Uploaded image for project: 'Core Server'
  1. Core Server
  2. SERVER-16273

createIndex is not sent to shards with no chunk for ns

    • Type: Icon: Bug Bug
    • Resolution: Done
    • Priority: Icon: Critical - P2 Critical - P2
    • 2.8.0-rc3
    • Affects Version/s: 2.7.4, 2.8.0-rc0, 2.8.0-rc2
    • Component/s: Index Maintenance, Sharding
    • Environment:
      Tested on OSX and Linux64
    • Fully Compatible
    • ALL

      And this breaks listIndexes since it assumes that the primary shard always has the complete indexes.

      Original description:

      You can use the following script to reproduce:

      var size = 1;
      var dbname = "test";
      var collname = "repro";
      var ns = dbname + "." + collname;
      var st = new ShardingTest({shards: 2, mongos: 1});
      
      jsTest.log("Enabling sharding");
      assert(st.adminCommand({enablesharding: dbname}));
      assert(st.adminCommand({shardcollection: ns, key: {_id: 1}}));
      
      jsTest.log("presplitting");
      var primary = st.getServer(dbname).getDB(dbname);
      var secondary = st.getOther(primary).getDB(dbname);
      //It looks like this is the problematic command, though it succeeds
      assert(st.adminCommand({movechunk: ns, find: {_id: size / 2}, to: secondary.getMongo().name}));
      
      jsTest.log("loading data");
      for(var i = 0; i < size; i++) {
          assert.writeOK(st.getDB(dbname)[collname].insert({_id: i}));
      }
      
      jsTest.log("creating index");
      var res = st.getDB(dbname)[collname].ensureIndex({i: 1}, {background: true}); //blocking call
      printjson(res);
      assert.commandWorked(res);
      sleep(5000);
      var indexes = st.s0.getDB(dbname)[collname].getIndexes();
      printjson(indexes);
      jsTest.log("checking index");
      assert.eq(indexes.length, 2);
      

      It seems that the pre-splitting (which seems to be a bad call) is what causes this problem. Regardless, we should not return a succeed for the index build when the index does not actually exist.

      here is output from a test run of this script

      MongoDB shell version: 2.8.0-rc1-pre-
      Resetting db path '/data/db/test0'
      2014-11-21T11:22:50.559-0500 I -        shell: started program /Users/ace/neweng/bins/mongodb-osx-x86_64-c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8-2014-11-18/bin/mongod --port 30000 --dbpath /data/db/test0 --setParameter enableTestCommands=1
      2014-11-21T11:22:50.560-0500 W NETWORK  Failed to connect to 127.0.0.1:30000, reason: errno:61 Connection refused
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] MongoDB starting : pid=1045 port=30000 dbpath=/data/db/test0 64-bit host=Andrew-Emil-MacBook-Pro.local
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten]
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] ** WARNING: soft rlimits too low. Number of files is 256, should be at least 1000
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] db version v2.8.0-rc1-pre-
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] git version: c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] build info: Darwin bs-osx108-6 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] allocator: system
       m30000| 2014-11-21T11:22:50.581-0500 I CONTROL  [initandlisten] options: { net: { port: 30000 }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/test0" } }
       m30000| 2014-11-21T11:22:50.583-0500 I JOURNAL  [initandlisten] journal dir=/data/db/test0/journal
       m30000| 2014-11-21T11:22:50.583-0500 I JOURNAL  [initandlisten] recover : no journal files present, no recovery needed
       m30000| 2014-11-21T11:22:50.612-0500 I INDEX    [initandlisten] allocating new ns file /data/db/test0/local.ns, filling with zeroes...
       m30000| 2014-11-21T11:22:50.654-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test0/local.0, filling with zeroes...
       m30000| 2014-11-21T11:22:50.654-0500 I STORAGE  [FileAllocator] creating directory /data/db/test0/_tmp
       m30000| 2014-11-21T11:22:50.786-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test0/local.0, size: 64MB,  took 0.131 secs
       m30000| 2014-11-21T11:22:50.823-0500 I NETWORK  [initandlisten] waiting for connections on port 30000
       m30000| 2014-11-21T11:22:50.861-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:54997 #1 (1 connection now open)
      Resetting db path '/data/db/test1'
      2014-11-21T11:22:50.866-0500 I -        shell: started program /Users/ace/neweng/bins/mongodb-osx-x86_64-c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8-2014-11-18/bin/mongod --port 30001 --dbpath /data/db/test1 --setParameter enableTestCommands=1
      2014-11-21T11:22:50.866-0500 W NETWORK  Failed to connect to 127.0.0.1:30001, reason: errno:61 Connection refused
       m30001| 2014-11-21T11:22:50.885-0500 I CONTROL  [initandlisten] MongoDB starting : pid=1046 port=30001 dbpath=/data/db/test1 64-bit host=Andrew-Emil-MacBook-Pro.local
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten]
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] ** WARNING: soft rlimits too low. Number of files is 256, should be at least 1000
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] db version v2.8.0-rc1-pre-
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] git version: c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] build info: Darwin bs-osx108-6 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] allocator: system
       m30001| 2014-11-21T11:22:50.886-0500 I CONTROL  [initandlisten] options: { net: { port: 30001 }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/test1" } }
       m30001| 2014-11-21T11:22:50.886-0500 I JOURNAL  [initandlisten] journal dir=/data/db/test1/journal
       m30001| 2014-11-21T11:22:50.887-0500 I JOURNAL  [initandlisten] recover : no journal files present, no recovery needed
       m30001| 2014-11-21T11:22:50.916-0500 I INDEX    [initandlisten] allocating new ns file /data/db/test1/local.ns, filling with zeroes...
       m30001| 2014-11-21T11:22:50.962-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test1/local.0, filling with zeroes...
       m30001| 2014-11-21T11:22:50.962-0500 I STORAGE  [FileAllocator] creating directory /data/db/test1/_tmp
       m30001| 2014-11-21T11:22:51.252-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test1/local.0, size: 64MB,  took 0.29 secs
       m30001| 2014-11-21T11:22:51.304-0500 I NETWORK  [initandlisten] waiting for connections on port 30001
       m30001| 2014-11-21T11:22:51.363-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:54999 #1 (1 connection now open)
      "localhost:30000"
       m30000| 2014-11-21T11:22:51.363-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55000 #2 (2 connections now open)
      ShardingTest test :
      {
              "config" : "localhost:30000",
              "shards" : [
                      connection to localhost:30000,
                      connection to localhost:30001
              ]
      }
      2014-11-21T11:22:51.365-0500 I -        shell: started program /Users/ace/neweng/bins/mongodb-osx-x86_64-c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8-2014-11-18/bin/mongos --port 30999 --configdb localhost:30000 --chunkSize 50 --setParameter enableTestCommands=1
      2014-11-21T11:22:51.366-0500 W NETWORK  Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused
       m30999| 2014-11-21T11:22:51.389-0500 W SHARDING running with 1 config server should be done only for testing purposes and is not recommended for production
       m30999| 2014-11-21T11:22:51.393-0500 I SHARDING [mongosMain] MongoS version 2.8.0-rc1-pre- starting: pid=1047 port=30999 64-bit host=Andrew-Emil-MacBook-Pro.local (--help for usage)
       m30999| 2014-11-21T11:22:51.393-0500 I CONTROL  [mongosMain] db version v2.8.0-rc1-pre-
       m30999| 2014-11-21T11:22:51.393-0500 I CONTROL  [mongosMain] git version: c0a623cf2bba01c4fbb78bda4a7b2e67c1c8d2d8
       m30999| 2014-11-21T11:22:51.393-0500 I CONTROL  [mongosMain] build info: Darwin bs-osx108-6 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49
       m30999| 2014-11-21T11:22:51.393-0500 I CONTROL  [mongosMain] allocator: system
       m30999| 2014-11-21T11:22:51.393-0500 I CONTROL  [mongosMain] options: { net: { port: 30999 }, setParameter: { enableTestCommands: "1" }, sharding: { chunkSize: 50, configDB: "localhost:30000" } }
       m30000| 2014-11-21T11:22:51.394-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55002 #3 (3 connections now open)
       m30000| 2014-11-21T11:22:51.394-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55003 #4 (4 connections now open)
       m30000| 2014-11-21T11:22:51.395-0500 I STORAGE  [conn4] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.399-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55004 #5 (5 connections now open)
       m30999| 2014-11-21T11:22:51.400-0500 I SHARDING [LockPinger] creating distributed lock ping thread for localhost:30000 and process Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807 (sleeping for 30000ms)
       m30000| 2014-11-21T11:22:51.410-0500 I INDEX    [conn3] allocating new ns file /data/db/test0/config.ns, filling with zeroes...
       m30000| 2014-11-21T11:22:51.452-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test0/config.0, filling with zeroes...
      2014-11-21T11:22:51.566-0500 W NETWORK  Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused
       m30000| 2014-11-21T11:22:51.636-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test0/config.0, size: 64MB,  took 0.183 secs
       m30000| 2014-11-21T11:22:51.662-0500 I WRITE    [conn3] update config.lockpings query: { _id: "Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807" } update: { $set: { ping: new Date(1416586971400) } } nscanned:0 nscannedObjects:0 nMatched:1 nModified:1 upsert:1 keyUpdates:0 numYields:0  251ms
       m30999| 2014-11-21T11:22:51.664-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:22:51 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30999| 2014-11-21T11:22:51.664-0500 I SHARDING [mongosMain] distributed lock 'configUpgrade/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807' acquired, ts : 546f66db3d42fcf10e8c8bbd
       m30999| 2014-11-21T11:22:51.664-0500 I SHARDING [mongosMain] starting upgrade of config server from v0 to v6
       m30999| 2014-11-21T11:22:51.664-0500 I SHARDING [mongosMain] starting next upgrade step from v0 to v6
       m30999| 2014-11-21T11:22:51.665-0500 I SHARDING [mongosMain] about to log new metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:51-546f66db3d42fcf10e8c8bbe", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586971665), what: "starting upgrade of config database", ns: "config.version", details: { from: 0, to: 6 } }
       m30000| 2014-11-21T11:22:51.683-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55006 #6 (6 connections now open)
       m30000| 2014-11-21T11:22:51.683-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:51.691-0500 I SHARDING [mongosMain] writing initial config version at v6
       m30000| 2014-11-21T11:22:51.691-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:51.699-0500 I SHARDING [mongosMain] about to log new metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:51-546f66db3d42fcf10e8c8bc0", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586971699), what: "finished upgrade of config database", ns: "config.version", details: { from: 0, to: 6 } }
       m30000| 2014-11-21T11:22:51.699-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:51.708-0500 I SHARDING [mongosMain] upgrade of config server to v6 successful
       m30999| 2014-11-21T11:22:51.708-0500 I SHARDING [mongosMain] distributed lock 'configUpgrade/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807' unlocked.
       m30000| 2014-11-21T11:22:51.708-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.716-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.726-0500 I INDEX    [conn6] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, min: 1 }, name: "ns_1_min_1", ns: "config.chunks" }
       m30000| 2014-11-21T11:22:51.726-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.726-0500 I INDEX    [conn6] build index done.  scanned 0 total records. 0 secs
       m30000| 2014-11-21T11:22:51.726-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.734-0500 I INDEX    [conn6] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, shard: 1, min: 1 }, name: "ns_1_shard_1_min_1", ns: "config.chunks" }
       m30000| 2014-11-21T11:22:51.734-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.734-0500 I INDEX    [conn6] build index done.  scanned 0 total records. 0 secs
       m30000| 2014-11-21T11:22:51.734-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.743-0500 I INDEX    [conn6] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, lastmod: 1 }, name: "ns_1_lastmod_1", ns: "config.chunks" }
       m30000| 2014-11-21T11:22:51.743-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.743-0500 I INDEX    [conn6] build index done.  scanned 0 total records. 0 secs
       m30000| 2014-11-21T11:22:51.743-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.764-0500 I INDEX    [conn6] build index on: config.shards properties: { v: 1, unique: true, key: { host: 1 }, name: "host_1", ns: "config.shards" }
       m30000| 2014-11-21T11:22:51.764-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.764-0500 I INDEX    [conn6] build index done.  scanned 0 total records. 0 secs
       m30000| 2014-11-21T11:22:51.764-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
      2014-11-21T11:22:51.767-0500 W NETWORK  Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused
       m30000| 2014-11-21T11:22:51.772-0500 I INDEX    [conn6] build index on: config.locks properties: { v: 1, unique: true, key: { ts: 1 }, name: "ts_1", ns: "config.locks" }
       m30000| 2014-11-21T11:22:51.772-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.772-0500 I INDEX    [conn6] build index done.  scanned 1 total records. 0 secs
       m30000| 2014-11-21T11:22:51.772-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.779-0500 I INDEX    [conn6] build index on: config.locks properties: { v: 1, key: { state: 1, process: 1 }, name: "state_1_process_1", ns: "config.locks" }
       m30000| 2014-11-21T11:22:51.779-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.779-0500 I INDEX    [conn6] build index done.  scanned 1 total records. 0 secs
       m30000| 2014-11-21T11:22:51.779-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:51.789-0500 I INDEX    [conn6] build index on: config.lockpings properties: { v: 1, key: { ping: 1 }, name: "ping_1", ns: "config.lockpings" }
       m30000| 2014-11-21T11:22:51.789-0500 I INDEX    [conn6]         building index using bulk method
       m30000| 2014-11-21T11:22:51.789-0500 I INDEX    [conn6] build index done.  scanned 1 total records. 0 secs
       m30999| 2014-11-21T11:22:51.789-0500 I SHARDING [Balancer] about to contact config servers and shards
       m30999| 2014-11-21T11:22:51.789-0500 I NETWORK  [mongosMain] waiting for connections on port 30999
       m30999| 2014-11-21T11:22:51.789-0500 I SHARDING [Balancer] config servers and shards contacted successfully
       m30999| 2014-11-21T11:22:51.789-0500 I SHARDING [Balancer] balancer id: Andrew-Emil-MacBook-Pro.local:30999 started at Nov 21 11:22:51
       m30999| 2014-11-21T11:22:51.793-0500 I SHARDING [Balancer] distributed lock 'balancer/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807' acquired, ts : 546f66db3d42fcf10e8c8bc3
       m30000| 2014-11-21T11:22:51.795-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:51.803-0500 I SHARDING [Balancer] distributed lock 'balancer/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807' unlocked.
       m30999| 2014-11-21T11:22:51.969-0500 I NETWORK  [mongosMain] connection accepted from 127.0.0.1:55008 #1 (1 connection now open)
       m30999| 2014-11-21T11:22:51.971-0500 I SHARDING [conn1] couldn't find database [admin] in config db
       m30000| 2014-11-21T11:22:51.971-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:51.978-0500 I SHARDING [conn1]         put [admin] on: config:localhost:30000
       m30000| 2014-11-21T11:22:51.981-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
      Waiting for active hosts...
      Waiting for the balancer lock...
      Waiting again for active hosts after balancer is off...
      ShardingTest undefined going to add shard : localhost:30000
       m30999| 2014-11-21T11:22:51.994-0500 I SHARDING [conn1] going to add shard: { _id: "shard0000", host: "localhost:30000" }
       m30000| 2014-11-21T11:22:51.995-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:52.001-0500 I SHARDING [conn1] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc3d42fcf10e8c8bc5", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586972001), what: "addShard", ns: "", details: { name: "shard0000", host: "localhost:30000" } }
       m30000| 2014-11-21T11:22:52.001-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
      { "shardAdded" : "shard0000", "ok" : 1 }
      ShardingTest undefined going to add shard : localhost:30001
       m30001| 2014-11-21T11:22:52.008-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55009 #2 (2 connections now open)
       m30999| 2014-11-21T11:22:52.009-0500 I SHARDING [conn1] going to add shard: { _id: "shard0001", host: "localhost:30001" }
       m30000| 2014-11-21T11:22:52.009-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:52.018-0500 I SHARDING [conn1] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc3d42fcf10e8c8bc6", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586972018), what: "addShard", ns: "", details: { name: "shard0001", host: "localhost:30001" } }
       m30000| 2014-11-21T11:22:52.018-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
      { "shardAdded" : "shard0001", "ok" : 1 }
      
      
      ----
      Enabling sharding
      ----
      
      
       m30999| 2014-11-21T11:22:52.026-0500 I SHARDING [conn1] couldn't find database [test] in config db
       m30000| 2014-11-21T11:22:52.027-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:52.033-0500 I SHARDING [conn1]         put [test] on: shard0001:localhost:30001
       m30999| 2014-11-21T11:22:52.033-0500 I COMMAND  [conn1] enabling sharding on: test
       m30000| 2014-11-21T11:22:52.033-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30001| 2014-11-21T11:22:52.041-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55010 #3 (3 connections now open)
       m30001| 2014-11-21T11:22:52.051-0500 I INDEX    [conn3] allocating new ns file /data/db/test1/test.ns, filling with zeroes...
       m30001| 2014-11-21T11:22:52.108-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test1/test.0, filling with zeroes...
       m30001| 2014-11-21T11:22:52.289-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test1/test.0, size: 64MB,  took 0.18 secs
       m30001| 2014-11-21T11:22:52.323-0500 I WRITE    [conn3] insert test.system.indexes query: { ns: "test.repro", key: { _id: 1.0 }, name: "_id_1" } ninserted:0 keyUpdates:0 numYields:0  282ms
       m30001| 2014-11-21T11:22:52.323-0500 I QUERY    [conn3] command test.$cmd command: insert { insert: "system.indexes", documents: [ { ns: "test.repro", key: { _id: 1.0 }, name: "_id_1" } ], ordered: true, metadata: { shardName: "shard0001", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 keyUpdates:0 numYields:0  reslen:40 282ms
       m30999| 2014-11-21T11:22:52.324-0500 I COMMAND  [conn1] CMD: shardcollection: { shardcollection: "test.repro", key: { _id: 1.0 } }
       m30999| 2014-11-21T11:22:52.324-0500 I SHARDING [conn1] enable sharding on: test.repro with shard key: { _id: 1.0 }
       m30999| 2014-11-21T11:22:52.324-0500 I SHARDING [conn1] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc3d42fcf10e8c8bc7", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586972324), what: "shardCollection.start", ns: "test.repro", details: { shardKey: { _id: 1.0 }, collection: "test.repro", primary: "shard0001:localhost:30001", initShards: [], numChunks: 1 } }
       m30000| 2014-11-21T11:22:52.324-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:52.331-0500 I SHARDING [conn1] going to create 1 chunk(s) for: test.repro using new epoch 546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:52.331-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30999| 2014-11-21T11:22:52.338-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 2 version: 1|0||546f66dc3d42fcf10e8c8bc8 based on: (empty)
       m30000| 2014-11-21T11:22:52.338-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:52.345-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
       m30001| 2014-11-21T11:22:52.355-0500 I SHARDING [conn3] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers
       m30001| 2014-11-21T11:22:52.355-0500 I SHARDING [conn3] remote client 127.0.0.1:55010 initialized this host (localhost:30001) as shard shard0001
       m30001| 2014-11-21T11:22:52.355-0500 I SHARDING [conn3] remotely refreshing metadata for test.repro with requested shard version 1|0||546f66dc3d42fcf10e8c8bc8, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000
       m30000| 2014-11-21T11:22:52.356-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55011 #7 (7 connections now open)
       m30001| 2014-11-21T11:22:52.356-0500 I SHARDING [conn3] collection test.repro was previously unsharded, new metadata loaded with shard version 1|0||546f66dc3d42fcf10e8c8bc8
       m30001| 2014-11-21T11:22:52.356-0500 I SHARDING [conn3] collection version was loaded at version 1|0||546f66dc3d42fcf10e8c8bc8, took 0ms
       m30999| 2014-11-21T11:22:52.357-0500 I SHARDING [conn1] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc3d42fcf10e8c8bc9", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "N/A", time: new Date(1416586972356), what: "shardCollection", ns: "test.repro", details: { version: "1|0||546f66dc3d42fcf10e8c8bc8" } }
       m30000| 2014-11-21T11:22:52.357-0500 I STORAGE  [conn6] CMD fsync: sync:1 lock:0
      
      
      ----
      presplitting
      ----
      
      
       m30999| 2014-11-21T11:22:52.365-0500 I COMMAND  [conn1] CMD: movechunk: { movechunk: "test.repro", find: { _id: 200000.0 }, to: "localhost:30000" }
       m30999| 2014-11-21T11:22:52.365-0500 I SHARDING [conn1] moving chunk ns: test.repro moving ( ns: test.repro, shard: shard0001:localhost:30001, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: MaxKey }) shard0001:localhost:30001 -> shard0000:localhost:30000
       m30001| 2014-11-21T11:22:52.365-0500 I SHARDING [conn2] received moveChunk request: { moveChunk: "test.repro", from: "localhost:30001", to: "localhost:30000", fromShard: "shard0001", toShard: "shard0000", min: { _id: MinKey }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, shardId: "test.repro-_id_MinKey", configdb: "localhost:30000", secondaryThrottle: true, waitForDelete: false, maxTimeMS: 0, epoch: ObjectId('546f66dc3d42fcf10e8c8bc8') }
       m30000| 2014-11-21T11:22:52.366-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55012 #8 (8 connections now open)
       m30001| 2014-11-21T11:22:52.366-0500 I SHARDING [LockPinger] creating distributed lock ping thread for localhost:30000 and process Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061 (sleeping for 30000ms)
       m30000| 2014-11-21T11:22:52.367-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55013 #9 (9 connections now open)
       m30001| 2014-11-21T11:22:52.367-0500 I SHARDING [conn2] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061' acquired, ts : 546f66dc930422ac86fda94d
       m30001| 2014-11-21T11:22:52.368-0500 I SHARDING [conn2] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc930422ac86fda94e", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55009", time: new Date(1416586972368), what: "moveChunk.start", ns: "test.repro", details: { min: { _id: MinKey }, max: { _id: MaxKey }, from: "shard0001", to: "shard0000" } }
       m30001| 2014-11-21T11:22:52.368-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:22:52 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:22:52.368-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55014 #10 (10 connections now open)
       m30000| 2014-11-21T11:22:52.368-0500 I STORAGE  [conn10] CMD fsync: sync:1 lock:0
       m30001| 2014-11-21T11:22:52.377-0500 I SHARDING [conn2] remotely refreshing metadata for test.repro based on current shard version 1|0||546f66dc3d42fcf10e8c8bc8, current metadata version is 1|0||546f66dc3d42fcf10e8c8bc8
       m30001| 2014-11-21T11:22:52.377-0500 I SHARDING [conn2] metadata of collection test.repro already up to date (shard version : 1|0||546f66dc3d42fcf10e8c8bc8, took 0ms)
       m30001| 2014-11-21T11:22:52.377-0500 I SHARDING [conn2] moveChunk request accepted at version 1|0||546f66dc3d42fcf10e8c8bc8
       m30001| 2014-11-21T11:22:52.377-0500 I SHARDING [conn2] moveChunk number of documents: 0
       m30000| 2014-11-21T11:22:52.377-0500 I SHARDING [conn8] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers
       m30000| 2014-11-21T11:22:52.377-0500 I SHARDING [conn8] remote client 127.0.0.1:55012 initialized this host as shard shard0000
       m30000| 2014-11-21T11:22:52.378-0500 I SHARDING [conn8] remotely refreshing metadata for test.repro, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000
       m30000| 2014-11-21T11:22:52.378-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55015 #11 (11 connections now open)
       m30000| 2014-11-21T11:22:52.378-0500 I SHARDING [conn8] collection test.repro was previously unsharded, new metadata loaded with shard version 0|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:52.378-0500 I SHARDING [conn8] collection version was loaded at version 1|0||546f66dc3d42fcf10e8c8bc8, took 0ms
       m30000| 2014-11-21T11:22:52.379-0500 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: MaxKey } for collection test.repro from localhost:30001 at epoch 546f66dc3d42fcf10e8c8bc8
       m30001| 2014-11-21T11:22:52.379-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55016 #4 (4 connections now open)
       m30001| 2014-11-21T11:22:52.380-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.382-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.387-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30000| 2014-11-21T11:22:52.388-0500 I INDEX    [migrateThread] allocating new ns file /data/db/test0/test.ns, filling with zeroes...
       m30001| 2014-11-21T11:22:52.396-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.414-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.447-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30000| 2014-11-21T11:22:52.451-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test0/test.0, filling with zeroes...
       m30001| 2014-11-21T11:22:52.512-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.641-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30000| 2014-11-21T11:22:52.714-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test0/test.0, size: 64MB,  took 0.263 secs
       m30000| 2014-11-21T11:22:52.745-0500 I INDEX    [migrateThread] build index on: test.repro properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "test.repro" }
       m30000| 2014-11-21T11:22:52.745-0500 I INDEX    [migrateThread]         building index using bulk method
       m30000| 2014-11-21T11:22:52.745-0500 I INDEX    [migrateThread] build index done.  scanned 0 total records. 0 secs
       m30000| 2014-11-21T11:22:52.745-0500 I SHARDING [migrateThread] Deleter starting delete for: test.repro from { _id: MinKey } -> { _id: MaxKey }, with opId: 245
       m30000| 2014-11-21T11:22:52.745-0500 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for test.repro from { _id: MinKey } -> { _id: MaxKey }
       m30000| 2014-11-21T11:22:52.745-0500 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section
       m30000| 2014-11-21T11:22:52.745-0500 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'test.repro' { _id: MinKey } -> { _id: MaxKey }
       m30000| 2014-11-21T11:22:52.746-0500 I SHARDING [migrateThread] migrate commit flushed to journal for 'test.repro' { _id: MinKey } -> { _id: MaxKey }
       m30001| 2014-11-21T11:22:52.898-0500 I SHARDING [conn2] moveChunk data transfer progress: { active: true, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0
       m30001| 2014-11-21T11:22:52.898-0500 I SHARDING [conn2] About to check if it is safe to enter critical section
       m30001| 2014-11-21T11:22:52.899-0500 I SHARDING [conn2] About to enter migrate critical section
       m30001| 2014-11-21T11:22:52.899-0500 I SHARDING [conn2] moveChunk setting version to: 2|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:52.899-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55017 #12 (12 connections now open)
       m30000| 2014-11-21T11:22:52.909-0500 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'test.repro' { _id: MinKey } -> { _id: MaxKey }
       m30000| 2014-11-21T11:22:52.910-0500 I SHARDING [migrateThread] migrate commit flushed to journal for 'test.repro' { _id: MinKey } -> { _id: MaxKey }
       m30000| 2014-11-21T11:22:52.910-0500 I SHARDING [migrateThread] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dcf867457471ec2329", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: ":27017", time: new Date(1416586972910), what: "moveChunk.to", ns: "test.repro", details: { min: { _id: MinKey }, max: { _id: MaxKey }, step 1 of 5: 366, step 2 of 5: 0, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 164, note: "success" } }
       m30000| 2014-11-21T11:22:52.911-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55018 #13 (13 connections now open)
       m30000| 2014-11-21T11:22:52.911-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30001| 2014-11-21T11:22:52.920-0500 I SHARDING [conn2] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "test.repro", from: "localhost:30001", min: { _id: MinKey }, max: { _id: MaxKey }, shardKeyPattern: { _id: 1.0 }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 }
       m30001| 2014-11-21T11:22:52.920-0500 I SHARDING [conn2] moveChunk moved last chunk out for collection 'test.repro'
       m30000| 2014-11-21T11:22:52.921-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55019 #14 (14 connections now open)
       m30001| 2014-11-21T11:22:52.921-0500 I SHARDING [conn2] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc930422ac86fda94f", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55009", time: new Date(1416586972921), what: "moveChunk.commit", ns: "test.repro", details: { min: { _id: MinKey }, max: { _id: MaxKey }, from: "shard0001", to: "shard0000", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } }
       m30000| 2014-11-21T11:22:52.921-0500 I STORAGE  [conn10] CMD fsync: sync:1 lock:0
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [conn2] MigrateFromStatus::done About to acquire global write lock to exit critical section
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [conn2] MigrateFromStatus::done coll lock for test.repro acquired
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [conn2] forking for cleanup of chunk data
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [conn2] MigrateFromStatus::done About to acquire global write lock to exit critical section
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [conn2] MigrateFromStatus::done coll lock for test.repro acquired
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [RangeDeleter] Deleter starting delete for: test.repro from { _id: MinKey } -> { _id: MaxKey }, with opId: 7
       m30001| 2014-11-21T11:22:52.929-0500 I SHARDING [RangeDeleter] rangeDeleter deleted 0 documents for test.repro from { _id: MinKey } -> { _id: MaxKey }
       m30001| 2014-11-21T11:22:52.930-0500 I SHARDING [conn2] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061' unlocked.
       m30001| 2014-11-21T11:22:52.930-0500 I SHARDING [conn2] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:52-546f66dc930422ac86fda950", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55009", time: new Date(1416586972930), what: "moveChunk.from", ns: "test.repro", details: { min: { _id: MinKey }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 11, step 3 of 6: 1, step 4 of 6: 519, step 5 of 6: 30, step 6 of 6: 0, to: "shard0000", from: "shard0001", note: "success" } }
       m30000| 2014-11-21T11:22:52.930-0500 I STORAGE  [conn10] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:53.048-0500 I QUERY    [conn10] command admin.$cmd command: fsync { fsync: true } ntoreturn:1 keyUpdates:0 numYields:0  reslen:51 117ms
       m30001| 2014-11-21T11:22:53.048-0500 I QUERY    [conn2] command admin.$cmd command: moveChunk { moveChunk: "test.repro", from: "localhost:30001", to: "localhost:30000", fromShard: "shard0001", toShard: "shard0000", min: { _id: MinKey }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, shardId: "test.repro-_id_MinKey", configdb: "localhost:30000", secondaryThrottle: true, waitForDelete: false, maxTimeMS: 0, epoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } ntoreturn:1 keyUpdates:0 numYields:0  reslen:37 683ms
       m30999| 2014-11-21T11:22:53.049-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 3 version: 2|0||546f66dc3d42fcf10e8c8bc8 based on: 1|0||546f66dc3d42fcf10e8c8bc8
      
      
      ----
      loading data
      ----
      
      
       m30000| 2014-11-21T11:22:53.050-0500 I SHARDING [conn6] remotely refreshing metadata for test.repro with requested shard version 2|0||546f66dc3d42fcf10e8c8bc8 based on current shard version 0|0||546f66dc3d42fcf10e8c8bc8, current metadata version is 1|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:53.051-0500 I SHARDING [conn6] updating metadata for test.repro from shard version 0|0||546f66dc3d42fcf10e8c8bc8 to shard version 2|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:53.051-0500 I SHARDING [conn6] collection version was loaded at version 2|0||546f66dc3d42fcf10e8c8bc8, took 0ms
       m30999| 2014-11-21T11:22:53.053-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 4 version: 2|0||546f66dc3d42fcf10e8c8bc8 based on: (empty)
       m30999| 2014-11-21T11:22:53.053-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 5 version: 2|0||546f66dc3d42fcf10e8c8bc8 based on: 2|0||546f66dc3d42fcf10e8c8bc8
       m30999| 2014-11-21T11:22:53.053-0500 W SHARDING [conn1] chunk manager reload forced for collection 'test.repro', config version is 2|0||546f66dc3d42fcf10e8c8bc8
       m30999| 2014-11-21T11:22:53.053-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:22:53.054-0500 I NETWORK  [conn4] end connection 127.0.0.1:55003 (13 connections now open)
       m30999| 2014-11-21T11:22:53.065-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:22:53.065-0500 I NETWORK  [conn3] end connection 127.0.0.1:55002 (12 connections now open)
       m30000| 2014-11-21T11:22:53.073-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55020 #15 (13 connections now open)
       m30999| 2014-11-21T11:22:53.074-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:22:53.074-0500 I NETWORK  [conn15] end connection 127.0.0.1:55020 (12 connections now open)
       m30000| 2014-11-21T11:22:53.074-0500 I SHARDING [conn5] request split points lookup for chunk test.repro { : MinKey } -->> { : MaxKey }
       m30000| 2014-11-21T11:22:53.074-0500 I SHARDING [conn5] received splitChunk request: { splitChunk: "test.repro", keyPattern: { _id: 1.0 }, min: { _id: MinKey }, max: { _id: MaxKey }, from: "shard0000", splitKeys: [ { _id: 0.0 }, { _id: 21.0 } ], shardId: "test.repro-_id_MinKey", configdb: "localhost:30000", epoch: ObjectId('546f66dc3d42fcf10e8c8bc8') }
       m30000| 2014-11-21T11:22:53.074-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55021 #16 (13 connections now open)
       m30000| 2014-11-21T11:22:53.075-0500 I SHARDING [LockPinger] creating distributed lock ping thread for localhost:30000 and process Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196 (sleeping for 30000ms)
       m30000| 2014-11-21T11:22:53.075-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55022 #17 (14 connections now open)
       m30000| 2014-11-21T11:22:53.076-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:22:53 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30000| 2014-11-21T11:22:53.076-0500 I SHARDING [conn5] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196' acquired, ts : 546f66ddf867457471ec232a
       m30000| 2014-11-21T11:22:53.077-0500 I SHARDING [conn5] remotely refreshing metadata for test.repro based on current shard version 2|0||546f66dc3d42fcf10e8c8bc8, current metadata version is 2|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:53.077-0500 I SHARDING [conn5] metadata of collection test.repro already up to date (shard version : 2|0||546f66dc3d42fcf10e8c8bc8, took 0ms)
       m30000| 2014-11-21T11:22:53.077-0500 I SHARDING [conn5] splitChunk accepted at version 2|0||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:22:53.078-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:53-546f66ddf867457471ec232b", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416586973078), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: MinKey }, max: { _id: MaxKey } }, number: 1, of: 3, chunk: { min: { _id: MinKey }, max: { _id: 0.0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:22:53.078-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:53.087-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:53-546f66ddf867457471ec232c", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416586973087), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: MinKey }, max: { _id: MaxKey } }, number: 2, of: 3, chunk: { min: { _id: 0.0 }, max: { _id: 21.0 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:22:53.087-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:53.096-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:22:53-546f66ddf867457471ec232d", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416586973096), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: MinKey }, max: { _id: MaxKey } }, number: 3, of: 3, chunk: { min: { _id: 21.0 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:22:53.096-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:22:53.103-0500 I SHARDING [conn5] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196' unlocked.
       m30000| 2014-11-21T11:22:53.104-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55023 #18 (15 connections now open)
       m30999| 2014-11-21T11:22:53.104-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 6 version: 2|3||546f66dc3d42fcf10e8c8bc8 based on: 2|0||546f66dc3d42fcf10e8c8bc8
       m30999| 2014-11-21T11:22:53.105-0500 I SHARDING [conn1] autosplitted test.repro shard: ns: test.repro, shard: shard0000:localhost:30000, lastmod: 2|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: MaxKey } into 3 (splitThreshold 921)
       m30999| 2014-11-21T11:22:53.105-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:22:53.106-0500 I NETWORK  [conn18] end connection 127.0.0.1:55023 (14 connections now open)
       m30000| 2014-11-21T11:23:01.804-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55026 #19 (15 connections now open)
       m30000| 2014-11-21T11:23:01.805-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55027 #20 (16 connections now open)
       m30000| 2014-11-21T11:23:01.806-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55028 #21 (17 connections now open)
       m30999| 2014-11-21T11:23:21.665-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:21 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30999| 2014-11-21T11:23:21.789-0500 I ACCESS   [UserCacheInvalidator] User cache generation changed from 546f66db3d42fcf10e8c8bc2 to 546f66daf867457471ec2321; invalidating user cache
       m30001| 2014-11-21T11:23:22.368-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:22 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:23:23.077-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:23 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:23:51.666-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:51 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:23:52.369-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:52 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:23:53.078-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:23:53 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:24:10.080-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:24:10.080-0500 I NETWORK  [conn20] end connection 127.0.0.1:55027 (16 connections now open)
       m30000| 2014-11-21T11:24:11.813-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55062 #22 (17 connections now open)
       m30999| 2014-11-21T11:24:21.667-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:21 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:24:22.370-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:22 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:24:23.079-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:23 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:24:51.668-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:51 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:24:52.371-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:52 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:24:53.080-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:24:53 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:25:21.668-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:21 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:25:22.371-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:22 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:25:23.081-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:23 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:25:27.265-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:25:27.265-0500 I NETWORK  [conn22] end connection 127.0.0.1:55062 (16 connections now open)
       m30000| 2014-11-21T11:25:31.826-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55083 #23 (17 connections now open)
       m30999| 2014-11-21T11:25:51.669-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:51 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:25:52.372-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:52 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:25:53.081-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:25:53 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:26:21.670-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:26:21 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30001| 2014-11-21T11:26:22.373-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:26:22 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061', sleeping for 30000ms
       m30000| 2014-11-21T11:26:23.082-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:26:23 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196', sleeping for 30000ms
       m30999| 2014-11-21T11:26:44.475-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:26:44.475-0500 I NETWORK  [conn23] end connection 127.0.0.1:55083 (16 connections now open)
       m30000| 2014-11-21T11:26:44.475-0500 I SHARDING [conn5] request split points lookup for chunk test.repro { : 21.0 } -->> { : MaxKey }
       m30000| 2014-11-21T11:26:44.773-0500 W SHARDING [conn5] Finding the split vector for test.repro over { _id: 1.0 } keyCount: 136533 numSplits: 2 lookedAt: 120151 took 298ms
       m30000| 2014-11-21T11:26:44.774-0500 I QUERY    [conn5] command admin.$cmd command: splitVector { splitVector: "test.repro", keyPattern: { _id: 1.0 }, min: { _id: 21.0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 keyUpdates:0 numYields:3072  reslen:111 298ms
       m30000| 2014-11-21T11:26:44.774-0500 I SHARDING [conn5] received splitChunk request: { splitChunk: "test.repro", keyPattern: { _id: 1.0 }, min: { _id: 21.0 }, max: { _id: MaxKey }, from: "shard0000", splitKeys: [ { _id: 136554.0 }, { _id: 393239.0 } ], shardId: "test.repro-_id_21.0", configdb: "localhost:30000", epoch: ObjectId('546f66dc3d42fcf10e8c8bc8') }
       m30000| 2014-11-21T11:26:44.776-0500 I SHARDING [conn5] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196' acquired, ts : 546f67c4f867457471ec232e
       m30000| 2014-11-21T11:26:44.776-0500 I SHARDING [conn5] remotely refreshing metadata for test.repro based on current shard version 2|3||546f66dc3d42fcf10e8c8bc8, current metadata version is 2|3||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:26:44.776-0500 I SHARDING [conn5] metadata of collection test.repro already up to date (shard version : 2|3||546f66dc3d42fcf10e8c8bc8, took 0ms)
       m30000| 2014-11-21T11:26:44.776-0500 I SHARDING [conn5] splitChunk accepted at version 2|3||546f66dc3d42fcf10e8c8bc8
       m30000| 2014-11-21T11:26:44.777-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:26:44-546f67c4f867457471ec232f", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416587204777), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: 21.0 }, max: { _id: MaxKey } }, number: 1, of: 3, chunk: { min: { _id: 21.0 }, max: { _id: 136554.0 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:26:44.777-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:26:44.818-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:26:44-546f67c4f867457471ec2330", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416587204818), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: 21.0 }, max: { _id: MaxKey } }, number: 2, of: 3, chunk: { min: { _id: 136554.0 }, max: { _id: 393239.0 }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:26:44.818-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:26:44.830-0500 I SHARDING [conn5] about to log metadata event: { _id: "Andrew-Emil-MacBook-Pro.local-2014-11-21T16:26:44-546f67c4f867457471ec2331", server: "Andrew-Emil-MacBook-Pro.local", clientAddr: "127.0.0.1:55004", time: new Date(1416587204830), what: "multi-split", ns: "test.repro", details: { before: { min: { _id: 21.0 }, max: { _id: MaxKey } }, number: 3, of: 3, chunk: { min: { _id: 393239.0 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|6, lastmodEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') } } }
       m30000| 2014-11-21T11:26:44.830-0500 I STORAGE  [conn13] CMD fsync: sync:1 lock:0
       m30000| 2014-11-21T11:26:44.840-0500 I SHARDING [conn5] distributed lock 'test.repro/Andrew-Emil-MacBook-Pro.local:30000:1416586973:398782196' unlocked.
       m30999| 2014-11-21T11:26:44.840-0500 I SHARDING [conn1] ChunkManager: time to load chunks for test.repro: 0ms sequenceNumber: 7 version: 2|6||546f66dc3d42fcf10e8c8bc8 based on: 2|3||546f66dc3d42fcf10e8c8bc8
       m30999| 2014-11-21T11:26:44.840-0500 I SHARDING [conn1] autosplitted test.repro shard: ns: test.repro, shard: shard0000:localhost:30000, lastmod: 2|3||000000000000000000000000, min: { _id: 21.0 }, max: { _id: MaxKey } into 3 (splitThreshold 11796480) (migrate suggested, but no migrations allowed)
       m30999| 2014-11-21T11:26:44.841-0500 I NETWORK  [conn1] scoped connection to localhost:30000 not being returned to the pool
       m30000| 2014-11-21T11:26:44.841-0500 I NETWORK  [conn19] end connection 127.0.0.1:55026 (15 connections now open)
       m30000| 2014-11-21T11:26:44.841-0500 I SHARDING [conn5] request split points lookup for chunk test.repro { : 393239.0 } -->> { : MaxKey }
      
      
      ----
      creating index
      ----
      
      
       m30000| 2014-11-21T11:26:48.863-0500 I INDEX    [conn6] build index on: test.repro properties: { v: 1, key: { i: 1.0 }, name: "i_1", ns: "test.repro", background: true }
       m30000| 2014-11-21T11:26:49.505-0500 I STORAGE  [FileAllocator] allocating new datafile /data/db/test0/test.1, filling with zeroes...
       m30000| 2014-11-21T11:26:49.825-0500 I STORAGE  [FileAllocator] done allocating datafile /data/db/test0/test.1, size: 128MB,  took 0.32 secs
       m30000| 2014-11-21T11:26:50.981-0500 I INDEX    [conn6] build index done.  scanned 400000 total records. 2 secs
       m30000| 2014-11-21T11:26:50.981-0500 I QUERY    [conn6] command test.$cmd command: createIndexes { createIndexes: "repro", indexes: [ { key: { i: 1.0 }, name: "i_1", background: true } ] } keyUpdates:0 numYields:3128  reslen:113 2119ms
      {
              "raw" : {
                      "localhost:30000" : {
                              "createdCollectionAutomatically" : false,
                              "numIndexesBefore" : 1,
                              "numIndexesAfter" : 2,
                              "ok" : 1
                      }
              },
              "ok" : 1
      }
       m30000| 2014-11-21T11:26:51.394-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55085 #24 (16 connections now open)
       m30999| 2014-11-21T11:26:51.671-0500 I SHARDING [LockPinger] cluster localhost:30000 pinged successfully at Fri Nov 21 11:26:51 2014 by distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30999:1416586971:16807', sleeping for 30000ms
       m30000| 2014-11-21T11:26:51.838-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55086 #25 (17 connections now open)
       m30001| 2014-11-21T11:26:51.983-0500 I COMMAND  [conn3] assertion 13388 [test.repro] shard version not ok: this shard no longer contains chunks for test.repro, the collection may have been dropped ( ns : test.repro, received : 1|0||546f66dc3d42fcf10e8c8bc8, wanted : 0|0||546f66dc3d42fcf10e8c8bc8, send ) ( ns : test.repro, received : 1|0||546f66dc3d42fcf10e8c8bc8, wanted : 0|0||546f66dc3d42fcf10e8c8bc8, send ) ns:test.$cmd query:{ listIndexes: "repro" }
       m30001| 2014-11-21T11:26:51.983-0500 I COMMAND  [conn3]  ntoskip:0 ntoreturn:1
       m30001| 2014-11-21T11:26:51.983-0500 I COMMAND  [conn3] stale version detected during query over test.$cmd : { $err: "[test.repro] shard version not ok: this shard no longer contains chunks for test.repro, the collection may have been dropped ( ns : test.repro, receiv...", code: 13388, ns: "test.repro", vReceived: Timestamp 1000|0, vReceivedEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8'), vWanted: Timestamp 0|0, vWantedEpoch: ObjectId('546f66dc3d42fcf10e8c8bc8') }
       m30999| 2014-11-21T11:26:51.984-0500 I SHARDING [conn1] sharded connection to localhost:30001 not being returned to the pool
       m30999| 2014-11-21T11:26:51.984-0500 I SHARDING [conn1] retrying command: { listIndexes: "repro" }
       m30001| 2014-11-21T11:26:51.984-0500 I NETWORK  [conn3] end connection 127.0.0.1:55010 (3 connections now open)
       m30001| 2014-11-21T11:26:51.984-0500 I NETWORK  [initandlisten] connection accepted from 127.0.0.1:55087 #5 (4 connections now open)
      [
              {
                      "v" : 1,
                      "key" : {
                              "_id" : 1
                      },
                      "name" : "_id_",
                      "ns" : "test.repro"
              }
      ]
      
      
      ----
      checking index
      ----
      
      
      assert: [1] != [2] are not equal : undefined
      Error: [1] != [2] are not equal : undefined
          at Error (<anonymous>)
          at doassert (src/mongo/shell/assert.js:11:14)
          at Function.assert.eq (src/mongo/shell/assert.js:38:5)
          at /Users/ace/neweng/mongo/jstests/noPassthrough/repro.js:30:8
      2014-11-21T11:26:51.987-0500 I QUERY    Error: [1] != [2] are not equal : undefined
          at Error (<anonymous>)
          at doassert (src/mongo/shell/assert.js:11:14)
          at Function.assert.eq (src/mongo/shell/assert.js:38:5)
          at /Users/ace/neweng/mongo/jstests/noPassthrough/repro.js:30:8 at src/mongo/shell/assert.js:13
      failed to load: /Users/ace/neweng/mongo/jstests/noPassthrough/repro.js
       m30000| 2014-11-21T11:26:51.987-0500 I CONTROL  [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends
       m30000| 2014-11-21T11:26:51.987-0500 I COMMAND  [signalProcessingThread] now exiting
       m30000| 2014-11-21T11:26:51.987-0500 I NETWORK  [signalProcessingThread] shutdown: going to close listening sockets...
       m30000| 2014-11-21T11:26:51.987-0500 I NETWORK  [signalProcessingThread] closing listening socket: 7
       m30000| 2014-11-21T11:26:51.987-0500 I NETWORK  [signalProcessingThread] closing listening socket: 8
       m30000| 2014-11-21T11:26:51.987-0500 I NETWORK  [signalProcessingThread] removing socket file: /tmp/mongodb-30000.sock
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [signalProcessingThread] shutdown: going to flush diaglog...
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [signalProcessingThread] shutdown: going to close sockets...
       m30000| 2014-11-21T11:26:51.988-0500 I STORAGE  [signalProcessingThread] shutdown: waiting for fs preallocator...
       m30000| 2014-11-21T11:26:51.988-0500 I STORAGE  [signalProcessingThread] shutdown: final commit...
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn5] end connection 127.0.0.1:55004 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn9] end connection 127.0.0.1:55013 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn24] end connection 127.0.0.1:55085 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn13] end connection 127.0.0.1:55018 (16 connections now open)
       m30001| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn4] end connection 127.0.0.1:55016 (3 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn25] end connection 127.0.0.1:55086 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn16] end connection 127.0.0.1:55021 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn17] end connection 127.0.0.1:55022 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn2] end connection 127.0.0.1:55000 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn6] end connection 127.0.0.1:55006 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn11] end connection 127.0.0.1:55015 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn12] end connection 127.0.0.1:55017 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn1] end connection 127.0.0.1:54997 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn14] end connection 127.0.0.1:55019 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn21] end connection 127.0.0.1:55028 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn7] end connection 127.0.0.1:55011 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn8] end connection 127.0.0.1:55012 (16 connections now open)
       m30000| 2014-11-21T11:26:51.988-0500 I NETWORK  [conn10] end connection 127.0.0.1:55014 (16 connections now open)
       m30000| 2014-11-21T11:26:52.006-0500 I STORAGE  [signalProcessingThread] shutdown: closing all files...
       m30000| 2014-11-21T11:26:52.008-0500 I STORAGE  [signalProcessingThread] closeAllFiles() finished
       m30000| 2014-11-21T11:26:52.008-0500 I JOURNAL  [signalProcessingThread] journalCleanup...
       m30000| 2014-11-21T11:26:52.008-0500 I JOURNAL  [signalProcessingThread] removeJournalFiles
       m30000| 2014-11-21T11:26:52.010-0500 I STORAGE  [signalProcessingThread] shutdown: removing fs lock...
       m30000| 2014-11-21T11:26:52.010-0500 I COMMAND  [signalProcessingThread] dbexit:  rc: 0
       m30001| 2014-11-21T11:26:52.372-0500 I NETWORK  [LockPinger] DBClientCursor::init call() failed
       m30001| 2014-11-21T11:26:52.374-0500 I NETWORK  [LockPinger] Detected bad connection created at 1416586972356016 microSec, clearing pool for localhost:30000 of 1 connections
       m30001| 2014-11-21T11:26:52.374-0500 W SHARDING [LockPinger] distributed lock pinger 'localhost:30000/Andrew-Emil-MacBook-Pro.local:30001:1416586972:270892061' detected an exception while pinging. :: caused by :: DBClientBase::findN: transport error: localhost:30000 ns: admin.$cmd query: { getlasterror: 1 }
       m30001| 2014-11-21T11:26:52.988-0500 I CONTROL  [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends
       m30001| 2014-11-21T11:26:52.988-0500 I COMMAND  [signalProcessingThread] now exiting
       m30001| 2014-11-21T11:26:52.988-0500 I NETWORK  [signalProcessingThread] shutdown: going to close listening sockets...
       m30001| 2014-11-21T11:26:52.988-0500 I NETWORK  [signalProcessingThread] closing listening socket: 10
       m30001| 2014-11-21T11:26:52.988-0500 I NETWORK  [signalProcessingThread] closing listening socket: 11
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [signalProcessingThread] removing socket file: /tmp/mongodb-30001.sock
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [signalProcessingThread] shutdown: going to flush diaglog...
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [signalProcessingThread] shutdown: going to close sockets...
       m30001| 2014-11-21T11:26:52.989-0500 I STORAGE  [signalProcessingThread] shutdown: waiting for fs preallocator...
       m30001| 2014-11-21T11:26:52.989-0500 I STORAGE  [signalProcessingThread] shutdown: final commit...
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [conn1] end connection 127.0.0.1:54999 (2 connections now open)
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [conn2] end connection 127.0.0.1:55009 (2 connections now open)
       m30001| 2014-11-21T11:26:52.989-0500 I NETWORK  [conn5] end connection 127.0.0.1:55087 (2 connections now open)
       m30001| 2014-11-21T11:26:52.993-0500 I STORAGE  [signalProcessingThread] shutdown: closing all files...
       m30001| 2014-11-21T11:26:52.994-0500 I STORAGE  [signalProcessingThread] closeAllFiles() finished
       m30001| 2014-11-21T11:26:52.994-0500 I JOURNAL  [signalProcessingThread] journalCleanup...
       m30001| 2014-11-21T11:26:52.994-0500 I JOURNAL  [signalProcessingThread] removeJournalFiles
       m30001| 2014-11-21T11:26:52.994-0500 I STORAGE  [signalProcessingThread] shutdown: removing fs lock...
       m30001| 2014-11-21T11:26:52.994-0500 I COMMAND  [signalProcessingThread] dbexit:  rc: 0
       m30999| 2014-11-21T11:26:53.989-0500 I CONTROL  [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends
       m30999| 2014-11-21T11:26:53.989-0500 I SHARDING [signalProcessingThread] dbexit:  rc:0
      

            Assignee:
            randolph@mongodb.com Randolph Tan
            Reporter:
            andrew.emil@10gen.com Andrew Emil (Inactive)
            Votes:
            0 Vote for this issue
            Watchers:
            6 Start watching this issue

              Created:
              Updated:
              Resolved: