Uploaded image for project: 'Core Server'
  1. Core Server
  2. SERVER-17070

Maxsize test not valid when running wiredTiger

    XMLWordPrintable

    Details

    • Operating System:
      ALL
    • Steps To Reproduce:
      Hide

      function getNumberOfChunks(configDB) {
          return configDB.chunks.count();
      }
       
      var st = new ShardingTest("maxSize", 2, 1, 1, {shards: 2, chunksize: 1, manualAddShard: true});
      var dbName = "test";
      var collName = "topchunk";
      var db = st.getDB(dbName);
      var coll = db[collName];
      var configDB = st.s.getDB('config');
       
      st.startBalancer();
      db.adminCommand({configureFailPoint: 'skipBalanceRound', mode: 'alwaysOn'});
       
      st.adminCommand({addshard: st.getConnNames()[0], maxSize: 5});
      st.adminCommand({addshard: st.getConnNames()[1], maxSize: 1});
       
      db.adminCommand({enableSharding: dbName});
      db.adminCommand({movePrimary: dbName, to: 'shard0000'});
      db.adminCommand({shardCollection: coll+"", key: {x: 1}});
       
      db.adminCommand({split: coll+"", middle: {x: 1000}});
      db.adminCommand({moveChunk: coll+"", find: {x: 1000}, to: 'shard0000'});
       
      db.adminCommand({split: coll+"", middle: {x: 0}});
      db.adminCommand({moveChunk: coll+"", find: {x: 0}, to: 'shard0001'});
       
      db.printShardingStatus(true);
       
      // Auto split on write to low top chunk
      var largeStr = new Array(1000).join('x');
       
      // Number of chunks before auto-split
      var numChunks = getNumberOfChunks(configDB);
       
      // Insert one doc at a time until first auto-split occurs on top chunk
      var xval = 2000;
      do {
              var doc = {x: xval, val: largeStr};
              coll.insert(doc);
              xval++;
      } while (getNumberOfChunks(configDB) <= numChunks);
       
      db.printShardingStatus(true);
      st.stop();

      Show
      function getNumberOfChunks(configDB) { return configDB.chunks.count(); }   var st = new ShardingTest("maxSize", 2, 1, 1, {shards: 2, chunksize: 1, manualAddShard: true}); var dbName = "test"; var collName = "topchunk"; var db = st.getDB(dbName); var coll = db[collName]; var configDB = st.s.getDB('config'); st.startBalancer(); db.adminCommand({configureFailPoint: 'skipBalanceRound', mode: 'alwaysOn'}); st.adminCommand({addshard: st.getConnNames()[0], maxSize: 5}); st.adminCommand({addshard: st.getConnNames()[1], maxSize: 1}); db.adminCommand({enableSharding: dbName}); db.adminCommand({movePrimary: dbName, to: 'shard0000'}); db.adminCommand({shardCollection: coll+"", key: {x: 1}}); db.adminCommand({split: coll+"", middle: {x: 1000}}); db.adminCommand({moveChunk: coll+"", find: {x: 1000}, to: 'shard0000'}); db.adminCommand({split: coll+"", middle: {x: 0}}); db.adminCommand({moveChunk: coll+"", find: {x: 0}, to: 'shard0001'}); db.printShardingStatus(true); // Auto split on write to low top chunk var largeStr = new Array(1000).join('x');   // Number of chunks before auto-split var numChunks = getNumberOfChunks(configDB);   // Insert one doc at a time until first auto-split occurs on top chunk var xval = 2000; do { var doc = {x: xval, val: largeStr}; coll.insert(doc); xval++; } while (getNumberOfChunks(configDB) <= numChunks); db.printShardingStatus(true); st.stop();
    • Sprint:
      Sharding 1 04/03/15, Sharding 2 04/24/15, Sharding 3 05/15/15, Sharding 4 06/05/15, Sharding 5 06/26/16

      Description

      Original title: Auto split moves chunk to shard node running WiredTiger, despite exceeding maxSize

      Original description:

      The auto split of a chunk should not move the new chunk to a shard node if the maxSize is exceeded on that shard node. This behavior works fine for mmapv1, but fails using wiredTiger.

        Attachments

          Issue Links

            Activity

              People

              Assignee:
              backlog-server-sharding Backlog - Sharding Team
              Reporter:
              jonathan.abrahams Jonathan Abrahams
              Participants:
              Votes:
              0 Vote for this issue
              Watchers:
              7 Start watching this issue

                Dates

                Created:
                Updated:
                Resolved: