[SERVER-5872] autosplit would be triggered before data size touch chunksize. The return message "updatedExisting" of update is missed Created: 18/May/12  Updated: 11/Jul/16  Resolved: 15/Jun/12

Status: Closed
Project: Core Server
Component/s: Sharding
Affects Version/s: 2.0.2
Fix Version/s: 2.0.7, 2.1.2

Type: Bug Priority: Minor - P4
Reporter: eason lin Assignee: siddharth.singh@10gen.com
Resolution: Done Votes: 0
Labels: None
Remaining Estimate: Not Specified
Time Spent: Not Specified
Original Estimate: Not Specified
Environment:

singleShard
mongos> db.shards.find()

{ "_id" : "P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw", "host" : "P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018" }

mongos> db.settings.find()

{ "_id" : "chunksize", "value" : 64 }

Operating System: Linux
Participants:

 Description   

chunksize was 64, but autosplit would be triggered before data size touch it.
In my test, autosplit would be triggered after 16 times update with upsert flag.
When autosplit, 'err' was None but n was 0 in return message and "u'updatedExisting': False" is miss

===Python Test Script===

from pymongo import Connection
_SYSTEM_INDEX_COLLECTION = u'system.indexes'
def get_conn():
    return Connection( "P1.trend.lava.tw" )
def main():
    db = "testsplit"
    coll = "coll_5"
    conn = get_conn()
    colls = conn[db].collection_names()
    if _SYSTEM_INDEX_COLLECTION in colls:
        colls.remove(_SYSTEM_INDEX_COLLECTION)
    for coll in colls:
        conn[db].drop_collection(coll)
    admin = conn['admin']
    admin.command('enablesharding', db, allowable_errors=['already enabled'])
    admin.command('shardcollection', "%s.%s"%(db,coll),allowable_errors=['already sharded'], key={"shardkey2":1,"shardkey1":1}, unique=False)
    for id in range(1,16):
        mesg = conn[db][coll].update({"shardkey1":"test%s"%id,"shardkey2":"test%s"%id}, {"$set":{"test_upsert":"new_upsert_value3"}}, safe=True, upsert=True, multi=False)
        print "mesg=%s" % mesg
if __name__ == '__main__':
    main()

===Test Script Output===
mesg={u'ok': 1.0, u'err': None, u'shards': [u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019', u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018'], u'n': 1, u'updatedExisting': False, u'shardRawGLE': {u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019':

{u'connectionId': 5705, u'ok': 1.0, u'err': None, u'n': 0}

, u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018':

{u'ok': 1.0, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a8f'), u'err': None, u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352771L}

}}
mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a90'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352772L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a91'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352773L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a92'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352774L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a93'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352775L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a94'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352776L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a95'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352777L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a96'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352778L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a97'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352779L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a98'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352780L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a99'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352781L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9a'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352782L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9b'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352783L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9c'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352784L}

mesg={u'shardRawGLE': {u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019':

{u'connectionId': 5705, u'ok': 1.0, u'err': None, u'n': 0}

, u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018': {u'connectionId': 48862, u'lastOp': 5743692655800352785L, u'ok': 1.0, u'err': None, u'n': 0}}, u'ok': 1.0, u'shards': [u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019', u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018'], u'err': None, u'n': 0}

===The changelog after test===
{ "_id" : "p1.trend.lava.tw-2012-05-18T02:20:58-142", "server" : "p1.trend.lava.tw", "clientAddr" : "N/A", "time" : ISODate("2012-05-18T02:20:58.095Z"), "what" : "dropCollection.start", "ns" : "testsplit.coll_5", "details" : { } }
{ "_id" : "p1.trend.lava.tw-2012-05-18T02:20:58-143", "server" : "p1.trend.lava.tw", "clientAddr" : "N/A", "time" : ISODate("2012-05-18T02:20:58.242Z"), "what" : "dropCollection", "ns" : "testsplit.coll_5", "details" : { } }
{ "_id" : "p3.trend.lava.tw-2012-05-18T02:20:58-21", "server" : "p3.trend.lava.tw", "clientAddr" : "172.16.2.149:41322", "time" : ISODate("2012-05-18T02:20:58.665Z"), "what" : "split", "ns" : "testsplit.coll_5", "details" : { "before" : { "min" : { "shardkey2" :

{ $minKey : 1 }

, "shardkey1" :

{ $minKey : 1 }

}, "max" : { "shardkey2" :

{ $maxKey : 1 }

, "shardkey1" :

{ $maxKey : 1 }

}, "lastmod" :

{ "t" : 1000, "i" : 0 }

}, "left" : { "min" : { "shardkey2" :

{ $minKey : 1 }

, "shardkey1" :

{ $minKey : 1 }

}, "max" :

{ "shardkey2" : "test1", "shardkey1" : "test1" }

, "lastmod" :

{ "t" : 1000, "i" : 1 }

}, "right" : { "min" :

{ "shardkey2" : "test1", "shardkey1" : "test1" }

, "max" : { "shardkey2" :

{ $maxKey : 1 }

, "shardkey1" :

{ $maxKey : 1 }

}, "lastmod" :

{ "t" : 1000, "i" : 2 }

} } }

===Log of mongos.log after test===
Fri May 18 03:02:37 [mongosMain] connection accepted from 172.16.2.60:50953 #8299
Fri May 18 03:02:37 [conn8299] DROP: testsplit.coll_5
Fri May 18 03:02:37 [conn8299] about to log metadata event: { _id: "p1.trend.lava.tw-2012-05-18T03:02:37-144", server: "p1.trend.lava.tw", clientAddr: "N/A", time: new Date(1337310157712), what: "dropCollection.start", ns: "testsplit.coll_5", details: {} }
Fri May 18 03:02:37 [conn8299] distributed lock 'testsplit.coll_5/p1.trend.lava.tw:27017:1287157484:1804289383' acquired, ts : 4fb5bbcd0cf1d1e54a301387
Fri May 18 03:02:37 [conn8299] about to log metadata event: { _id: "p1.trend.lava.tw-2012-05-18T03:02:37-145", server: "p1.trend.lava.tw", clientAddr: "N/A", time: new Date(1337310157971), what: "dropCollection", ns: "testsplit.coll_5", details: {} }
Fri May 18 03:02:38 [conn8299] distributed lock 'testsplit.coll_5/p1.trend.lava.tw:27017:1287157484:1804289383' unlocked.
Fri May 18 03:02:38 [conn8299] CMD: shardcollection: { shardcollection: "testsplit.coll_5", unique: false, key:

{ shardkey2: 1, shardkey1: 1 }

}
Fri May 18 03:02:38 [conn8299] enable sharding on: testsplit.coll_5 with shard key:

{ shardkey2: 1, shardkey1: 1 }

Fri May 18 03:02:38 [conn8299] created new distributed lock for testsplit.coll_5 on P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019 ( lock timeout : 900000, ping interval : 30000, process : 0 )
Fri May 18 03:02:38 [conn8299] ChunkManager: time to load chunks for testsplit.coll_5: 0ms sequenceNumber: 147 version: 0|0
Fri May 18 03:02:38 [conn8299] going to create 1 chunk(s) for: testsplit.coll_5
Fri May 18 03:02:38 [conn8299] warning: version 0 found when reloading chunk manager, collection 'testsplit.coll_5' initially detected as sharded
Fri May 18 03:02:38 [conn8299] created new distributed lock for testsplit.coll_5 on P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019 ( lock timeout : 900000, ping interval : 30000, process : 0 )
Fri May 18 03:02:38 [conn8299] ChunkManager: time to load chunks for testsplit.coll_5: 0ms sequenceNumber: 148 version: 1|0
Fri May 18 03:02:38 [conn8299] SyncClusterConnection connecting to [P1.trend.lava.tw:27019]
Fri May 18 03:02:38 [conn8299] SyncClusterConnection connecting to [P2.trend.lava.tw:27019]
Fri May 18 03:02:38 [conn8299] SyncClusterConnection connecting to [P3.trend.lava.tw:27019]
Fri May 18 03:02:38 [conn8299] created new distributed lock for testsplit.coll_5 on P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019 ( lock timeout : 900000, ping interval : 30000, process : 0 )
Fri May 18 03:02:38 [conn8299] ChunkManager: time to load chunks for testsplit.coll_5: 0ms sequenceNumber: 149 version: 1|2
Fri May 18 03:02:38 [conn8299] autosplitted testsplit.coll_5 shard: ns:testsplit.coll_5 at: P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw:P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018 lastmod: 1|0 min:

{ shardkey2: MinKey, shardkey1: MinKey }

max:

{ shardkey2: MaxKey, shardkey1: MaxKey }

on:

{ shardkey2: "test1", shardkey1: "test1" }

(splitThreshold 921)
Fri May 18 03:02:38 [conn8299] update will be retried b/c sharding config info is stale, left:4 ns: testsplit.coll_5 query:

{ shardkey2: "test14", shardkey1: "test14" }

Fri May 18 03:02:38 [conn8299] end connection 172.16.2.60:50953



 Comments   
Comment by auto [ 09/Jul/12 ]

Author:

{u'date': u'2012-07-09T10:59:09-07:00', u'name': u'Siddharth Singh', u'email': u'singhsiddharth@gmail.com'}

Message: SERVER-5872 Test updatedExisting not missing

When autosplit, 'updatedExisting': False is missing.
Branch: master
https://github.com/mongodb/mongo/commit/a056a5dfd7840ad7bed1ab4ff6f9c9bc5c49885e

Comment by auto [ 03/Jul/12 ]

Author:

{u'date': u'2012-07-03T08:42:18-07:00', u'email': u'milkie@10gen.com', u'name': u'Eric Milkie'}

Message: SERVER-5872 use ScopedDBConnection when we talk to other shards during autosplit
Branch: v2.0
https://github.com/mongodb/mongo/commit/16fdca349817b8be0e6a0b5e9c37a2e1d294cf43

Comment by auto [ 15/Jun/12 ]

Author:

{u'date': u'2012-06-15T08:47:39-07:00', u'email': u'singhsiddharth@gmail.com', u'name': u'Siddharth Singh'}

Message: SERVER-5872 Use internalScopedDBConnection

Use internalScopedDBConnection when we talk to other shards during
autosplit.
Branch: master
https://github.com/mongodb/mongo/commit/dc39b4ee2a53aa47849f8e4b0c91df2c86ec7228

Comment by siddharth.singh@10gen.com [ 15/Jun/12 ]

Found the problem source. During autosplits, mongos talks to the shards. The existing code, however, was using the same connection (the one over which we were doing upserts) to communicate with the shards. getLastError command in those cases was reporting the information from the last database operation which was mongos querying the shard and hence the updatedExisting message was missing. Will push the fix to the master branch soon. Thanks for reporting this to us.

Comment by eason lin [ 14/Jun/12 ]

Thank you for your update.

What I meant about 'return message of updatedExisting is missed' is the first 15 times insertion were fine but there was no updatedExisting field at 16 times insertion.

I copyed the Test Script Output and heightlight it to red color.

(Execuse me I couldn't find way to edit the description, so I had to copy it.)

===Test Script Output===
mesg={u'ok': 1.0, u'err': None, u'shards': [u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019', u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018'], u'n': 1, u'updatedExisting': False, u'shardRawGLE': {u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019':

{u'connectionId': 5705, u'ok': 1.0, u'err': None, u'n': 0}

, u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018':

{u'ok': 1.0, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a8f'), u'err': None, u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352771L}

}}
mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a90'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352772L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a91'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352773L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a92'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352774L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a93'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352775L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a94'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352776L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a95'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352777L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a96'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352778L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a97'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352779L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a98'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352780L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a99'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352781L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9a'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352782L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9b'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352783L}

mesg=

{u'ok': 1.0, u'err': None, u'upserted': ObjectId('4fb5b20ab7487b8bf2693a9c'), u'singleShard': u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018', u'connectionId': 48862, u'n': 1, u'updatedExisting': False, u'lastOp': 5743692655800352784L}

mesg={u'shardRawGLE': {u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019': {u'connectionId': 5705, u'ok': 1.0, u'err': None, u'n': 0}, u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018': {u'connectionId': 48862, u'lastOp': 5743692655800352785L, u'ok': 1.0, u'err': None, u'n': 0}}, u'ok': 1.0, u'shards': [u'P1.trend.lava.tw:27019,P2.trend.lava.tw:27019,P3.trend.lava.tw:27019', u'P1.trend.lava.tw_P2.trend.lava.tw_P3.trend.lava.tw/P1.trend.lava.tw:27018,P3.trend.lava.tw:27018,P2.trend.lava.tw:27018'], u'err': None, u'n': 0}

Comment by siddharth.singh@10gen.com [ 13/Jun/12 ]

It is normal for the autosplits to be triggered before the data size touches the chunksize. updatedExisting field has a boolean value and in the logs that you have posted it does show up as 'updatedExisting': False so I am not sure what did you mean by 'return message of updatedExisting is missed' ?

Generated at Thu Feb 08 03:10:06 UTC 2024 using Jira 9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66.