[SERVER-12827] can't log in again after running mongorestore? Created: 21/Feb/14  Updated: 10/Dec/14  Resolved: 26/Feb/14

Status: Closed
Project: Core Server
Component/s: Tools
Affects Version/s: None
Fix Version/s: None

Type: Bug Priority: Major - P3
Reporter: Michael O'Brien Assignee: Spencer Brody (Inactive)
Resolution: Cannot Reproduce Votes: 0
Labels: 26qa
Remaining Estimate: Not Specified
Time Spent: Not Specified
Original Estimate: Not Specified

Issue Links:
Related
related to SERVER-12854 Prevent mongorestore from restoring u... Closed
Operating System: ALL
Participants:

 Description   

set up as follows:

1. start up the source replset on 2.4, create user

{user:"user", pwd:"pwd"}

, then upgrade the set to 2.6.
login as "user" and insert a bunch more test users with various roles into the admin db.

2. start up the target replset on 2.4, create user

{user:"user", pwd:"pwd"}

, then upgrade it to 2.6.

at this point, both source and target have the old auth schema, both contain a user with username:pw equal to "user":"pwd" but the source has a bunch more users that don't exist in the target yet.

3. run mongodump on the source.

4. run mongorestore into the target.

5. i can't login as "user":"pwd" on the target anymore. the logs suggest that the user was deleted and trying to auth as the user fails

   Running mongorestore to target database (2.6) using mongorestore version: 2.4
2014-02-21T15:06:03.972-0500 shell: started program mongorestore-2.4 --host rs3/Michaels-MacBook-Pro.local:50030,Michaels-MacBook-Pro.local:50031,Michaels-MacBook-Pro.local:50032 --username __system --password abcdefghijklmnopqrstuvwxyz --authenticationDatabase local
sh84050| Fri Feb 21 15:06:03.992 starting new replica set monitor for replica set rs3 with seed of Michaels-MacBook-Pro.local:50030,Michaels-MacBook-Pro.local:50031,Michaels-MacBook-Pro.local:50032
sh84050| Fri Feb 21 15:06:03.993 successfully connected to seed Michaels-MacBook-Pro.local:50030 for replica set rs3
 m50030| 2014-02-21T15:06:03.993-0500 [initandlisten] connection accepted from 10.4.122.245:65337 #2 (2 connections now open)
sh84050| Fri Feb 21 15:06:03.993 changing hosts to { 0: "Michaels-MacBook-Pro.local:50030", 1: "Michaels-MacBook-Pro.local:50032", 2: "Michaels-MacBook-Pro.local:50031" } from rs3/
sh84050| Fri Feb 21 15:06:03.993 trying to add new host Michaels-MacBook-Pro.local:50030 to replica set rs3
sh84050| Fri Feb 21 15:06:03.994 successfully connected to new host Michaels-MacBook-Pro.local:50030 in replica set rs3
 m50030| 2014-02-21T15:06:03.994-0500 [initandlisten] connection accepted from 10.4.122.245:65338 #3 (3 connections now open)
sh84050| Fri Feb 21 15:06:03.994 trying to add new host Michaels-MacBook-Pro.local:50031 to replica set rs3
sh84050| Fri Feb 21 15:06:03.994 successfully connected to new host Michaels-MacBook-Pro.local:50031 in replica set rs3
sh84050| Fri Feb 21 15:06:03.994 trying to add new host Michaels-MacBook-Pro.local:50032 to replica set rs3
 m50031| 2014-02-21T15:06:03.994-0500 [initandlisten] connection accepted from 10.4.122.245:65339 #6 (3 connections now open)
sh84050| Fri Feb 21 15:06:03.994 successfully connected to new host Michaels-MacBook-Pro.local:50032 in replica set rs3
 m50032| 2014-02-21T15:06:03.994-0500 [initandlisten] connection accepted from 10.4.122.245:65340 #5 (3 connections now open)
 m50030| 2014-02-21T15:06:03.995-0500 [initandlisten] connection accepted from 10.4.122.245:65341 #4 (4 connections now open)
 m50031| 2014-02-21T15:06:03.995-0500 [initandlisten] connection accepted from 10.4.122.245:65342 #7 (4 connections now open)
sh84050| Fri Feb 21 15:06:03.996 Primary for replica set rs3 changed to Michaels-MacBook-Pro.local:50032
 m50031| 2014-02-21T15:06:03.996-0500 [conn7] Unauthorized not authorized on admin to execute command { replSetGetStatus: 1 }
 m50030| 2014-02-21T15:06:03.995-0500 [conn4] Unauthorized not authorized on admin to execute command { replSetGetStatus: 1 }
 m50030| 2014-02-21T15:06:03.995-0500 [conn2] end connection 10.4.122.245:65337 (3 connections now open)
 m50030| 2014-02-21T15:06:03.995-0500 [conn4] Unauthorized not authorized on admin to execute command { replSetGetStatus: 1 }
 m50032| 2014-02-21T15:06:03.996-0500 [initandlisten] connection accepted from 10.4.122.245:65343 #6 (4 connections now open)
 m50032| 2014-02-21T15:06:03.996-0500 [conn6] Unauthorized not authorized on admin to execute command { replSetGetStatus: 1 }
 m50032| 2014-02-21T15:06:03.996-0500 [conn6] Unauthorized not authorized on admin to execute command { replSetGetStatus: 1 }
sh84050| Fri Feb 21 15:06:03.996 replica set monitor for replica set rs3 started, address is rs3/Michaels-MacBook-Pro.local:50030,Michaels-MacBook-Pro.local:50031,Michaels-MacBook-Pro.local:50032
sh84050| connected to: rs3/Michaels-MacBook-Pro.local:50030,Michaels-MacBook-Pro.local:50031,Michaels-MacBook-Pro.local:50032
sh84050| Fri Feb 21 15:06:03.996 [ReplicaSetMonitorWatcher] starting
 m50032| 2014-02-21T15:06:03.997-0500 [initandlisten] connection accepted from 10.4.122.245:65344 #7 (5 connections now open)
 m50032| 2014-02-21T15:06:03.997-0500 [conn7]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
sh84050| Fri Feb 21 15:06:03.998 dump/admin/system.users.bson
sh84050| Fri Feb 21 15:06:03.998        going into namespace [admin.system.users]
sh84050| Fri Feb 21 15:06:03.998 warning: Restoring to admin.system.users without dropping. Restored data will be inserted without raising errors; check your server log
sh84050| 10 objects found
sh84050| Fri Feb 21 15:06:03.999        Creating index: { key: { _id: 1 }, ns: "admin.system.users", name: "_id_" }
sh84050| Fri Feb 21 15:06:03.999        Creating index: { key: { user: 1, userSource: 1 }, unique: true, ns: "admin.system.users", name: "user_1_userSource_1" }
MONGORESTORE is done.
sh84050| Fri Feb 21 15:06:03.999 dump/admin/system.version.bson
sh84050| Fri Feb 21 15:06:03.999        going into namespace [admin.system.version]
sh84050| 1 objects found
sh84050| Fri Feb 21 15:06:03.999        Creating index: { key: { _id: 1 }, name: "_id_", ns: "admin.system.version" }
authing to target db
 m50031| 2014-02-21T15:06:03.998-0500 [initandlisten] connection accepted from 10.4.122.245:65345 #8 (5 connections now open)
 m50030| 2014-02-21T15:06:04.002-0500 [conn4] end connection 10.4.122.245:65341 (2 connections now open)
 m50031| 2014-02-21T15:06:03.998-0500 [conn8]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
Error: 18 { ok: 0.0, errmsg: "auth failed", code: 18 }
 m50031| 2014-02-21T15:06:04.002-0500 [conn8] end connection 10.4.122.245:65345 (4 connections now open)
 m50031| 2014-02-21T15:06:04.002-0500 [conn6] end connection 10.4.122.245:65339 (3 connections now open)
0
 m50030| 2014-02-21T15:06:04.003-0500 [conn3] end connection 10.4.122.245:65338 (1 connection now open)
 m50031| 2014-02-21T15:06:04.003-0500 [conn7] end connection 10.4.122.245:65342 (2 connections now open)
 m50032| 2014-02-21T15:06:03.999-0500 [conn7] build index on: admin.system.version properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "admin.system.version" }
 m50032| 2014-02-21T15:06:03.999-0500 [conn7]    added index to empty collection
 m50032| 2014-02-21T15:06:04.002-0500 [conn7] end connection 10.4.122.245:65344 (4 connections now open)
 m50032| 2014-02-21T15:06:04.002-0500 [conn6] end connection 10.4.122.245:65343 (4 connections now open)
 m50032| 2014-02-21T15:06:04.002-0500 [conn5] end connection 10.4.122.245:65340 (3 connections now open)
 m50032| 2014-02-21T15:06:04.004-0500 [conn1] Removed deleted user user@admin from session cache of user information.
 m50032| 2014-02-21T15:06:04.004-0500 [conn1]  authenticate db: admin { authenticate: 1, nonce: "xxx", user: "user", key: "xxx" }
 m50032| 2014-02-21T15:06:04.004-0500 [conn1] Failed to authenticate user@admin with mechanism MONGODB-CR: AuthenticationFailed UserNotFound Could not find user user@admin
 m50031| 2014-02-21T15:06:04.509-0500 [rsHealthPoll] replSet member Michaels-MacBook-Pro.local:50032 is now in state PRIMARY
 m50030| 2014-02-21T15:06:04.510-0500 [initandlisten] connection accepted from 10.4.122.245:65346 #5 (2 connections now open)
 m50030| 2014-02-21T15:06:04.511-0500 [conn5]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50031| 2014-02-21T15:06:04.511-0500 [rsHealthPoll] replset info Michaels-MacBook-Pro.local:50030 thinks that we are down
 m50031| 2014-02-21T15:06:05.509-0500 [rsBackgroundSync] replSet syncing to: Michaels-MacBook-Pro.local:50032
 m50032| 2014-02-21T15:06:05.510-0500 [initandlisten] connection accepted from 10.4.122.245:65347 #8 (3 connections now open)
 m50032| 2014-02-21T15:06:05.511-0500 [conn8]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50032| 2014-02-21T15:06:05.512-0500 [initandlisten] connection accepted from 10.4.122.245:65348 #9 (4 connections now open)
 m50032| 2014-02-21T15:06:05.512-0500 [conn9]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50031| 2014-02-21T15:06:05.513-0500 [repl writer worker 1] build index on: admin.system.version properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "admin.system.version" }
 m50031| 2014-02-21T15:06:05.513-0500 [repl writer worker 1]     added index to empty collection
 m50030| 2014-02-21T15:06:05.716-0500 [initandlisten] connection accepted from 10.4.122.245:65349 #6 (3 connections now open)
 m50030| 2014-02-21T15:06:05.716-0500 [conn6]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50032| 2014-02-21T15:06:05.717-0500 [rsHealthPoll] replset info Michaels-MacBook-Pro.local:50030 thinks that we are down
 m50032| 2014-02-21T15:06:05.804-0500 [initandlisten] connection accepted from 10.4.122.245:65350 #10 (5 connections now open)
 m50031| 2014-02-21T15:06:05.804-0500 [initandlisten] connection accepted from 10.4.122.245:65351 #9 (3 connections now open)
 m50031| 2014-02-21T15:06:05.805-0500 [conn9]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50032| 2014-02-21T15:06:05.805-0500 [conn10]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:05.805-0500 [rsHealthPoll] replSet member Michaels-MacBook-Pro.local:50031 is up
 m50030| 2014-02-21T15:06:05.805-0500 [rsHealthPoll] replSet member Michaels-MacBook-Pro.local:50032 is up
 m50030| 2014-02-21T15:06:05.805-0500 [rsHealthPoll] replSet member Michaels-MacBook-Pro.local:50031 is now in state SECONDARY
 m50030| 2014-02-21T15:06:05.805-0500 [rsHealthPoll] replSet member Michaels-MacBook-Pro.local:50032 is now in state PRIMARY
 m50032| 2014-02-21T15:06:06.528-0500 [slaveTracking] build index on: local.slaves properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.slaves" }
 m50032| 2014-02-21T15:06:06.528-0500 [slaveTracking]    added index to empty collection
 m40021| 2014-02-21T15:06:07.049-0500 [conn5] end connection 10.4.122.245:65302 (6 connections now open)
 m40021| 2014-02-21T15:06:07.049-0500 [initandlisten] connection accepted from 10.4.122.245:65352 #13 (7 connections now open)
 m40021| 2014-02-21T15:06:07.050-0500 [conn13]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:08.514-0500 [conn5] end connection 10.4.122.245:65346 (2 connections now open)
 m50030| 2014-02-21T15:06:08.514-0500 [initandlisten] connection accepted from 10.4.122.245:65353 #7 (3 connections now open)
 m50030| 2014-02-21T15:06:08.515-0500 [conn7]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:09.719-0500 [conn6] end connection 10.4.122.245:65349 (2 connections now open)
 m50030| 2014-02-21T15:06:09.720-0500 [initandlisten] connection accepted from 10.4.122.245:65354 #8 (3 connections now open)
 m50030| 2014-02-21T15:06:09.720-0500 [conn8]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:10.808-0500 [rsBackgroundSync] replSet syncing to: Michaels-MacBook-Pro.local:50031
 m50031| 2014-02-21T15:06:10.809-0500 [initandlisten] connection accepted from 10.4.122.245:65355 #10 (4 connections now open)
 m50031| 2014-02-21T15:06:10.809-0500 [conn10]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50031| 2014-02-21T15:06:10.810-0500 [initandlisten] connection accepted from 10.4.122.245:65356 #11 (5 connections now open)
 m50031| 2014-02-21T15:06:10.810-0500 [conn11]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:10.811-0500 [rsSyncNotifier] replset setting oplog notifier to Michaels-MacBook-Pro.local:50031
 m50031| 2014-02-21T15:06:10.811-0500 [conn11] end connection 10.4.122.245:65356 (4 connections now open)
 m50031| 2014-02-21T15:06:10.811-0500 [initandlisten] connection accepted from 10.4.122.245:65357 #12 (5 connections now open)
 m50031| 2014-02-21T15:06:10.811-0500 [conn12]  authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" }
 m50030| 2014-02-21T15:06:10.812-0500 [rsSync] build index on: local.replset.minvalid properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.replset.minvalid" }
 m50030| 2014-02-21T15:06:10.812-0500 [rsSync]   added index to empty collection
 m50030| 2014-02-21T15:06:10.813-0500 [repl writer worker 1] build index on: admin.system.version properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "admin.system.version" }
 m50030| 2014-02-21T15:06:10.813-0500 [repl writer worker 1]     added index to empty collection
 m50031| 2014-02-21T15:06:12.315-0500 [slaveTracking] build index on: local.slaves properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.slaves" }
 m50031| 2014-02-21T15:06:12.315-0500 [slaveTracking]    added index to empty collection
 m50032| 2014-02-21T15:06:14.007-0500 [conn1] assertion 13 not authorized for query on admin.system.users ns:admin.system.users query:{}
2014-02-21T15:06:14.008-0500 error: { "$err" : "not authorized for query on admin.system.users", "code" : 13 } at src/mongo/shell/query.js:131
 



 Comments   
Comment by Spencer Brody (Inactive) [ 26/Feb/14 ]

Got the test running but cannot reproduce the original behavior

Comment by Michael O'Brien [ 24/Feb/14 ]

spencer
You can get it from ./jstests/multiVersion/libs/multi_rs.js

Comment by Spencer Brody (Inactive) [ 24/Feb/14 ]

I can't run your repro script because I don't have multi_rs.js

Comment by Michael O'Brien [ 21/Feb/14 ]

here is my repro script.
this was on rc0 btw.

load('multi_rs.js')
 
//Set of sample users to create on each test run
users = [
  {user:"user1", roles: ["read"], pwd:"pwd"},
  {user:"user2", roles: ["readWrite"], pwd:"pwd"},
  {user:"user3", roles: ["dbAdmin"], pwd:"pwd"},
  {user:"user4", roles: ["userAdmin"], pwd:"pwd"},
  {user:"user5", roles: ["clusterAdmin"], pwd:"pwd"},
  {user:"user6", roles: ["readAnyDatabase"], pwd:"pwd"},
  {user:"user7", roles: ["readWriteAnyDatabase"], pwd:"pwd"},
  {user:"user8", roles: ["userAdminAnyDatabase"], pwd:"pwd"},
  {user:"user9", roles: ["dbAdminAnyDatabase"], pwd:"pwd"},
]
 
rscount = 1
//Helper function to generate a new name + port number for each test.
function newReplSetInfo(){
  rscount++
  return {name:"rs" + rscount, portOffset: rscount*10}
}
 
//Helper function to insert the test users into a replset.
//if "useOld" is true, it calls addUser
//otherwise, it uses createUser
function addTestUsers(replset, useOld){
  //Create a bunch of users in our source database.
  print("Adding users")
  for(var i=0;i<users.length;i++){
    if(useOld){
        replset.getPrimary().getDB("admin").addUser(users[i])
    }else{
        replset.getPrimary().getDB("admin").createUser(users[i])
    }
  }
  print("Done adding users")
}
 
function runDumpRestoreTest(options, asserts){
  print("TESTING WITH VERSIONS:")
  printjson(options)
  var sourceVersion = options.s
  var targetVersion= options.t
  var dumpVersion=options.d
  var restoreVersion=options.r
  var oldAuth = options.oldSchema
 
  var sourceInfo = newReplSetInfo()
  var source = new ReplSetTest({name:sourceInfo.name, nodes:3,   keyFile:"testKeyFile", startPort:40000 + sourceInfo.portOffset })
 
  var targetInfo = newReplSetInfo()
  var target = new ReplSetTest({name:targetInfo.name, nodes:3,   keyFile:"testKeyFile", startPort:50000 + targetInfo.portOffset})
  sourceStartVersion = sourceVersion
  targetStartVersion = targetVersion
 
  //If we want to use the old auth schema, we may need to start with 2.4 binaries even if the source/target is 2.6
  if(sourceStartVersion == "2.6" && oldAuth){
    sourceStartVersion = "2.4"
  }
  if(targetStartVersion == "2.6" && oldAuth){
    targetStartVersion = "2.4"
  }
  print("source version " + sourceVersion + " source start version " + sourceStartVersion)
  print("target version " + targetVersion + " target start version " + targetStartVersion)
 
  source.startSet({binVersion:sourceStartVersion})
  source.initiate()
  target.startSet({binVersion:targetStartVersion})
  target.initiate()
 
  //Set up the all-powerful user we will run the dump/restore programs as.
  //Need to pick command to use (createUser vs. addUser) based on version, since createUser doesn't exist in 2.4.x.
  print("Creating users in source database (" + sourceVersion + ")")
  if(sourceVersion != "2.4"){
    if(oldAuth){ //start the cluster on 2.4, add the user then upgrade the binaries so the auth schema is still old.
      print("old auth source version")
      source.getPrimary().getDB("admin").addUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
      source.getPrimary().getDB("admin").auth("user","pwd")
      addTestUsers(source, true)
      source.upgradeSet(sourceVersion, {auth:{"user":"user","pwd":"pwd"}})
    } else{
      print("not old auth source version")
      source.getPrimary().getDB("admin").createUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
      source.getPrimary().getDB("admin").auth("user","pwd")
      addTestUsers(source, false)
    }
  }else{
    source.getPrimary().getDB("admin").addUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
    source.getPrimary().getDB("admin").auth("user","pwd")
    addTestUsers(source, true)
  }
  source.getPrimary().getDB("admin").auth("user", "pwd")
 
  if(targetVersion != "2.4"){
    if(oldAuth){ //start the cluster on 2.4, add the user then upgrade the binaries so the auth schema is still old.
      print("old auth target version")
      print("calling adduser right here2")
      target.getPrimary().getDB("admin").addUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
      target.getPrimary().getDB("admin").auth("user","pwd")
      target.upgradeSet(sourceVersion, {"auth":{"user":"user", pwd:"pwd"}})
    } else{
    print("not old auth target version")
      print("calling createuser right here2")
      target.getPrimary().getDB("admin").createUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
    }
  }else{
    print("calling adduser right here")
    target.getPrimary().getDB("admin").addUser({user:"user", roles:["userAdminAnyDatabase", "readWriteAnyDatabase", "clusterAdmin"], pwd:"pwd"})
  }
  target.getPrimary().getDB("admin").auth("user", "pwd")
 
  print("done initializing replsets for:")
  printjson(options)
 
  print("SOURCE VERSION: ")
  printjson(source.getPrimary().getDB("admin").system.version.find().toArray())
  print("TARGET VERSION: ")
  printjson(target.getPrimary().getDB("admin").system.version.find().toArray())
  print("SOURCE DB VER:  "+ source.getPrimary().getDB("admin").version())
  print("TARGET DB VER:  "+ target.getPrimary().getDB("admin").version())
 
/*
  //Create a bunch of users in our source database.
  for(var i=0;i<users.length;i++){
    var func = source.getPrimary().getDB("admin").addUser
    if(sourceVersion != "2.4"){
      if(oldAuth){
        print("trying adduser1")
        source.getPrimary().getDB("admin").addUser(users[i])
      }else{
        print("trying createuser")
        source.getPrimary().getDB("admin").createUser(users[i])
      }
    }else{
      print("trying adduser")
      source.getPrimary().getDB("admin").addUser(users[i])
    }
  }
  print("Done adding users")
  */
 
  //Dump the data from the source server.
  print("Running mongodump from source database (" + sourceVersion + ") using mongodump version: " + dumpVersion)
  MongoRunner.runMongoTool("mongodump-" + dumpVersion, {host:source.getURL(), authenticationDatabase: "admin", username:"user", password:"pwd" })
  //Restore the data into the target server.
  //
  var targetAdmin = target.getPrimary().getDB("admin")
  printjson(targetAdmin.auth("user", "pwd"))
  print("Running mongorestore to target database (" + targetVersion + ") using mongorestore version: " + restoreVersion)
  MongoRunner.runMongoTool("mongorestore-" + restoreVersion, {host:target.getURL() , username:"__system", password:"abcdefghijklmnopqrstuvwxyz", authenticationDatabase:"local"})
 
  print("MONGORESTORE is done.")
 
  print("authing to target db")
  try{
    printjson(targetAdmin.auth("user", "pwd"))
    }catch(e){
        print("couldn't log in!")
    }
 
    sleep(10000)
  var targetUsers = targetAdmin.system.users.find().toArray()
  var sourceUsers = source.getPrimary().getDB("admin").system.users.find().toArray()
 
  print("RUNNING ASSERTIONS FOR TEST CONFIG")
  printjson(options)
  printjson(asserts)
  printjson(sourceUsers)
  printjson(targetUsers)
  //Check users existence.
  var assertFunc = asserts.usersShouldExist ? assert.neq : assert.eq
  for(var i=0;i<users.length;i++){
    var userDoc = targetAdmin.system.users.findOne({user:users[i].user})
    assertFunc(userDoc, null)
  }
 
  //Check users existence.
  var assertFunc = asserts.loginsShouldWork ? assert.neq : assert.eq
  for(var i=0;i<users.length;i++){
    targetAdmin.logout()
    assertFunc(targetAdmin.auth(users[i].user, users[i].pwd), 0)
  }
  /*
 
  //For cases where source is 2.4 and target is 2.6,
  //or vice versa
  if(sourceVersion != targetVersion){
    if(oldAuth){
      //If the old auth schema was used, all users should have been restored
      printjson(options)
      assert.eq(targetUsers.length, sourceUsers.length,  "target users count was expected to match source")
    }else{
      //If the *new* auth schema was used,
      //inserts against the differing auth schema should have been dropped.
      //So the only user in "targetUsers" is the one we created at start
      printjson(options)
      printjson(targetUsers)
      printjson(sourceUsers)
      assert.eq(targetUsers.length, 1, "target users count was expected to be 1")
    }
  }else{
    //Going from either 2.4 -> 2.4 or 2.6 -> 2.6
    //So all users should definitely have been dumped/restored.
    printjson(options)
    assert.eq(targetUsers.length, sourceUsers.length, "user counts did not match")
  }
  */
  source.stopSet()
  target.stopSet()
  print("DONE TESTING FOR:")
  printjson(options)
}
 
assertions = {usersShouldExist:false, loginsShouldWork:false}
runDumpRestoreTest({s:"2.6",t: "2.6",d: "2.4",r: "2.4", oldSchema:true}, assertions)
 

Generated at Thu Feb 08 03:29:44 UTC 2024 using Jira 9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66.