sh.addShard("shard0/ip-10-102-151-133:30000")
|
sh.addShard("shard1/ip-10-203-173-62:30000")
|
db.adminCommand({ movePrimary : "test", to : "shard0" })
|
sh.enableSharding("test")
|
sh.status()
|
|
|
var largeValue = new Array(2048).join('Largevalue');
|
|
// set chunk size
|
db.getSiblingDB("config").settings.save({"_id" : "chunksize", "value" :2});
|
// shard collection with hashed key
|
|
sh.enableSharding("test");
|
var coll = db.cap1833hashed;
|
coll.ensureIndex({idx: "hashed"});
|
sh.shardCollection("test.cap1833hashed", {idx: "hashed"});
|
// secondary index
|
coll.ensureIndex({idx2: 1});
|
|
// Pre-load step
|
// Average doc size = 1KB
|
// Chunk size = 2MB (2000 docs)
|
// Total data = 1GB (500 chunks, 1000000 docs)
|
var numDocs = 1000000;
|
var startInsert = new Date();
|
var bulk = coll.initializeUnorderedBulkOp();
|
for (var i = 0; i < numDocs; i++) {
|
// Random doc variance of 400 bytes (600-1000)
|
var arraySize = 600 + Math.floor(Math.random()*1000)%400;
|
var randomVal = Math.floor(Math.random()*numDocs);
|
var doc = {idx: randomVal, idx2: randomVal, val: largeValue.slice(0, arraySize)};
|
bulk.insert(doc);
|
}
|
var res = bulk.execute();
|
printjson(res);
|
var doneInsert = new Date();
|
print("Insert time", (doneInsert-startInsert)/1000);
|