commit a0e5a6bfd85d872b3100965b081a4d05e10fb887 Author: Charlie Swanson Date: Thu Jan 22 13:21:02 2015 -0500 Yielding tests, after squashing. diff --git a/jstests/concurrency/fsm_all.js b/jstests/concurrency/fsm_all.js index 795f2e0..b0a8771 100644 --- a/jstests/concurrency/fsm_all.js +++ b/jstests/concurrency/fsm_all.js @@ -8,6 +8,7 @@ var blacklist = [ // Disabled due to known bugs 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM 'findAndModify_update_grow.js', // SERVER-17021 Perf. Regression for WT overflow items + 'yield_sort.js', // SERVER-17011 Cursor can return objects out of order if updated during query // Disabled due to MongoDB restrictions and/or workload restrictions @@ -26,6 +26,7 @@ var blacklist = [ 'create_capped_collection_maxdocs.js', ].map(function(file) { return dir + '/' + file; }); -runWorkloadsSerially(ls(dir).filter(function(file) { +runWorkloadsSerially([dir + '/yield_and_hashed.js']); + /*ls(dir).filter(function(file) { return !Array.contains(blacklist, file); -})); +})); */ diff --git a/jstests/concurrency/fsm_all_composed.js b/jstests/concurrency/fsm_all_composed.js index 2c57748..dbdda0f 100644 --- a/jstests/concurrency/fsm_all_composed.js +++ b/jstests/concurrency/fsm_all_composed.js @@ -8,6 +8,7 @@ var blacklist = [ // Disabled due to known bugs 'agg_sort_external.js', // SERVER-16700 Deadlock on WiredTiger LSM 'findAndModify_update_grow.js', // SERVER-17021 Perf. Regression for WT overflow items + 'yield_sort.js', // SERVER-17011 Cursor can return objects out of order if updated during query // Disabled due to MongoDB restrictions and/or workload restrictions diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js index 81e0d4f..0f2e517 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js @@ -40,7 +40,8 @@ var $config = (function() { function setup(db, collName) { var ixSpec = {}; ixSpec[this.indexedField] = 'text'; - assertAlways.commandWorked(db[collName].ensureIndex(ixSpec)); + // Only allowed to create one text index, other tests may create one. + assertWhenOwnColl.commandWorked(db[collName].ensureIndex(ixSpec)); } var text = [ diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js new file mode 100644 index 0000000..216ab78 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield.js @@ -0,0 +1,181 @@ +'use strict'; + +/** + * yield.js + * + * Designed to execute queries and make them yield as much as possible while also updating and + * removing documents that they operate on. + */ +var $config = (function() { + + var data = { + // Number of docs to insert at the beginning. + nDocs: 200, + // Batch size of queries to introduce more saving and restoring of states. + batchSize: 3, + // The words that can be found in the collection. + words: ['these', 'are', 'test', 'words'], + /* + * Helper function to advance a cursor, and verify that the documents that come out are + * what we'd expect. + */ + advanceCursor: function advanceCursor(cursor, verifier) { + // Keep track of the previous doc in case the verifier is trying to verify a sorted + // query. + var prevDoc = null; + var doc = null; + while (cursor.hasNext()) { + prevDoc = doc; + doc = cursor.next(); + assertAlways(verifier(doc, prevDoc), + 'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' + + 'Query plan: ' + tojson(cursor.explain()) + '\n' + + 'Previous doc: ' + tojson(prevDoc) + '\n' + + 'This doc: ' + tojson(doc)); + } + assertAlways.eq(cursor.itcount(), 0); + }, + /* + * Many subclasses will override the update function. To prevent lots of retyping, here is + * a prototype update that accepts a function which produces an update doc + * (e.g. function getUpdateDoc() { return { $set: { c: Random.randInt(data.nDocs) } }; }) + * and returns a function which updates a random doc from the collection using that + * update function. The returned function can be used to replace the update state. + */ + genUpdateFunction: function genUpdateFunction(getUpdateDoc) { + return function updateFunction(db, collName) { + var id = Random.randInt(this.nDocs); + var randDoc = db[collName].findOne({ _id: id }); + if (randDoc === null) { + return; + } + assertAlways.writeOK(db[collName].update(randDoc, getUpdateDoc())); + }; + } + }; + + var states = { + /* + * Update a random document from the collection. + */ + update: function update(db, collName) { + var id = Random.randInt(this.nDocs); + var randDoc = db[collName].findOne({ _id: id }); + if (randDoc === null) { + return; + } + var randVal = Random.randInt(this.nDocs); + assertAlways.writeOK(db[collName].update(randDoc, { $set: { a: randVal } })); + }, + + /* + * Remove a random document from the collection, then re-insert one to prevent losing + * documents. + */ + remove: function remove(db, collName) { + var id = Random.randInt(this.nDocs); + var doc = db[collName].findOne({ _id: id }); + if (doc !== null) { + var res = db[collName].remove({ _id: id }); + assertAlways.writeOK(res); + if (res.nRemoved > 0) { + assertAlways.writeOK(db[collName].insert(doc)); + } + } + }, + + /* + * Issue a query that will potentially yield and resume while documents are being updated. + * Subclasses will implement this differently + */ + query: function collScan(db, collName) { + var nMatches = 100; + var cursor = db[collName].find({ a: { $lt: nMatches } }) + .batchSize(2); + var collScanVerifier = function collScanVerifier(doc, prevDoc) { + return doc.a < nMatches; + }; + + this.advanceCursor(cursor, collScanVerifier); + } + }; + + /* + * Visual of FSM: + * + * _ + * / \ + * V / + * remove + * ^ ^ + * / \ + * v v + * -->update<---->query + * ^ \ ^ \ + * \_/ \_/ + * + */ + var transitions = { + update: { update: 0.334, remove: 0.333, query: 0.333 }, + remove: { update: 0.333, remove: 0.334, query: 0.333 }, + query: { update: 0.333, remove: 0.333, query: 0.334 } + }; + + /* + * Sets up the indices, sets a failpoint and lowers some yielding parameters to encourage + * more yielding, and inserts the documents to be used. + */ + function setup(db, collName) { + // Enable this failpoint to trigger more yields. In MMAPV1, if a record fetch is about to + // page fault, the query will yield. This failpoint will mock page faulting on such + // fetches every other time. + assertAlways.commandWorked(db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', + mode: 'alwaysOn' })); + + // Lower the following parameters to force even more yields. + assertAlways.commandWorked( + db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 5 }) + ); + assertAlways.commandWorked( + db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 1 }) + ); + // Set up some data to query. + var N = this.nDocs; + var bulk = db[collName].initializeUnorderedBulkOp(); + for (var i = 0; i < N; i++) { + // Give each doc some word of text + var word = this.words[i % this.words.length]; + bulk.find({ _id: i }).upsert().updateOne( + { $set: { a: i, b: N - i, c: i, d: N - i, yield_text: word } } + ); + } + assertAlways.writeOK(bulk.execute()); + } + + /* + * Reset parameters and disable failpoint. + */ + function teardown(db, collName) { + assertAlways.commandWorked( + db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', mode: 'off'}) + ); + assertAlways.commandWorked( + db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 128 }) + ); + assertAlways.commandWorked( + db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 10 }) + ); + } + + return { + threadCount: 5, + iterations: 50, + startState: 'update', + states: states, + transitions: transitions, + setup: setup, + teardown: teardown, + data: data + }; + +})(); diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js new file mode 100644 index 0000000..91e22e9 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js @@ -0,0 +1,65 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_and_hashed.js (extends yield.js) + * + * Intersperse queries which use the AND_HASH stage with updates and deletes of documents they may + * match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Issue a query that will use the AND_HASH stage. This is a little tricky, so use + * stagedebug to force it to happen. Unfortunately this means it can't be batched. + */ + $config.states.query = function andHash(db, collName) { + var nMatches = 100; + assertAlways.lte(nMatches, this.nDocs); + // Construct the query plan: two ixscans under an andHashed. + // Scan c <= nMatches + var ixscan1 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern:{ c: 1 }, + startKey: { '': nMatches }, endKey: {}, + endKeyInclusive: true, direction: -1 } } }; + + // Scan d >= this.nDocs - nMatches + var ixscan2 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern:{ d: 1 }, + startKey: { '': this.nDocs - nMatches }, endKey: {}, + endKeyInclusive: true, direction: 1 } } }; + + var andix1ix2 = { andHash: { args: { nodes: [ixscan1, ixscan2] } } }; + var res = db.runCommand({ stageDebug: { plan: andix1ix2, collection: collName } }); + assertAlways.commandWorked(res); + for (var i = 0; i < res.results.length; i++) { + var result = res.results[i]; + assertAlways.lte(result.c, nMatches); + assertAlways.gte(result.d, this.nDocs - nMatches); + } + }; + + /* + * A function which produces a random update doc (e.g. { $set: { c: 2, d: 10 } }), that can + * be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newC = Random.randInt($config.data.nDocs); + var newD = Random.randInt($config.data.nDocs); + return { $set: { c: newC, b: newD } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ c: 1 })); + assertAlways.commandWorked(db[collName].ensureIndex({ d: 1 })); + }; + + return $config; +}); + + + diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js new file mode 100644 index 0000000..de7bc24 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js @@ -0,0 +1,65 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_and_sorted.js (extends yield.js) + * + * Intersperse queries which use the AND_SORTED stage with updates and deletes of documents they + * may match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use + * stagedebug to force it to happen. Unfortunately this means it can't be batched. + */ + $config.states.query = function andSorted(db, collName) { + // Not very many docs returned in this, so loop to increase chances of yielding in the + // middle. + for (var i = 0; i < 100; i++) { + // Construct the query plan: two ixscans under an andSorted. + // Scan a == 0 + var ixscan1 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern:{ c: 1 }, + startKey: { '': 0 }, endKey: { '': 0 }, + endKeyInclusive: false, direction: 1 } } }; + // Scan b == this.nDocs + var ixscan2 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern:{ d: 1 }, + startKey: { '': this.nDocs }, + endKey: { '': this.nDocs }, + endKeyInclusive: false, direction: -1 } } }; + + // Intersect the two + var andix1ix2 = { andSorted: { args: { nodes: [ixscan1, ixscan2] } } }; + var res = db.runCommand({ stageDebug: { collection: collName, plan: andix1ix2 } }); + assertAlways.commandWorked(res); + for (var j = 0; j < res.results.length; j++) { + var result = res.results[j]; + assertAlways.eq(result.c, 0); + assertAlways.eq(result.d, this.nDocs); + } + } + }; + + /* + * A function which produces a random update doc (e.g. { $set: { c: 2, d: 10 } }), that can + * be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newC = Random.randInt($config.data.nDocs); + var newD = Random.randInt($config.data.nDocs); + return { $set: { c: newC, b: newD } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ c: 1 })); + assertAlways.commandWorked(db[collName].ensureIndex({ d: 1 })); + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js new file mode 100644 index 0000000..558afd0 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_fetch.js @@ -0,0 +1,50 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_fetch.js (extends yield.js) + * + * Intersperse queries which use the FETCH stage with updates and deletes of documents they may + * match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Issue a query that will use the FETCH stage. + */ + $config.states.query = function fetch(db, collName) { + var nMatches = 100; + + var cursor = db[collName].find({ a: { $lt: nMatches } }) + .batchSize(this.batchSize); + + var verifier = function fetchVerifier(doc, prevDoc) { + return doc.a < nMatches; + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: {a: 10 } }), that can be used + * to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newVal = Random.randInt($config.data.nDocs); + return { $set: { a: newVal } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ a: 1, b: 1 })); + }; + + return $config; +}); + + diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js new file mode 100644 index 0000000..5969e37 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js @@ -0,0 +1,103 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_geo_near.js (extends yield.js) + * + * Intersperse geo $near queries with updates and deletes of documents they may match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Use geo $near query to find points near the origin. + */ + $config.states.query = function geoNear(db, collName) { + // This distance gets about 80 docs around the origin. Don't ask why, too lazy to do + // the math. + var maxDistance = 5; + + var cursor = db[collName].find({ geo: { $near: [0,0], $maxDistance: maxDistance } }) + .batchSize(this.batchSize); + + var verifier = function geoNearVerifier(doc, prevDoc) { + var nx = doc.geo[0]; + var ny = doc.geo[1]; + var newDistance = Math.sqrt(Math.pow(nx, 2) + Math.pow(ny, 2)); + var correctOrder = true; + if (prevDoc !== null) { + var ox = prevDoc.geo[0]; + var oy = prevDoc.geo[1]; + var oldDistance = Math.sqrt(Math.pow(ox, 2) + Math.pow(oy, 2)); + correctOrder = (oldDistance <= newDistance); + } + return newDistance <= maxDistance && correctOrder; + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: { geo: [0,10] } }), that can be + * used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var P = Math.floor(Math.sqrt($config.data.nDocs)); + var newX = Random.randInt(P); + var newY = Random.randInt(P); + return { $set: { geo: [newX, newY] } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + /* + * We'll need to modify the advanceCursor function, since someone could conceivably remove the + * 2d index out from under us. In which case, the query would fail, but not until the first + * next() is called. + */ + $config.data.advanceCursor = function advanceCursor(cursor, verifier) { + // Keep track of the previous doc in case the verifier is trying to verify a sorted query. + var prevDoc = null; + var doc = null; + while (cursor.hasNext()) { + prevDoc = doc; + try { + doc = cursor.next(); + } catch(e) { + var errorText = "Error trying to get next doc out of geo query. "; + errorText += "Someone dropped the 2d index out from under query?\n"; + jsTestLog(errorText + e); + } + assertAlways(verifier(doc, prevDoc), + 'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' + + 'Query plan: ' + tojson(cursor.explain()) + '\n' + + 'Previous doc: ' + tojson(prevDoc) + '\n' + + 'This doc: ' + tojson(doc)); + } + assertAlways.eq(cursor.itcount(), 0); + }; + + /* + * Insert some docs in geo form and make a 2d index. + */ + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + + var P = Math.floor(Math.sqrt($config.data.nDocs)); + var i = this.nDocs; + // Set up some points to query (in a PxP grid around 0,0). + var bulk = db[collName].initializeUnorderedBulkOp(); + for (var x = -P; x < P; x++) { + for (var y = -P; y < P; y++) { + bulk.find({ _id: i }).upsert().replaceOne({ _id: i, geo: [x,y] }); + i++; + } + } + assertAlways.writeOK(bulk.execute()); + assertAlways.commandWorked(db[collName].ensureIndex({ geo: '2d' })); + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js new file mode 100644 index 0000000..f22a517 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js @@ -0,0 +1,34 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_id_hack.js (extends yield.js) + * + * Intersperse queries which use the ID_HACK stage with updates and deletes of documents they may + * match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Issue a query that will use the ID_HACK stage. This cannot be batched, so issue a + * number of them to increase the chances of yielding between getting the key and looking + * up its value. + */ + $config.states.query = function idHack(db, collName) { + var nQueries = 100; + for (var i = 0; i < nQueries; i++) { + assertAlways.lte(db[collName].find({ _id: i }).itcount(), 1); + var res = db[collName].findOne({ _id: i }); + if (res !== null) { + assertAlways.eq(i, res._id); + } + } + }; + + return $config; +}); + + + diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js new file mode 100644 index 0000000..18af4d7 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js @@ -0,0 +1,57 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_rooted_or.js (extends yield.js) + * + * Intersperse queries which use a rooted OR stage with updates and deletes of documents they may + * match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Issue a query that will use the AND_HASH stage. This is a little tricky, so use + * stagedebug to force it to happen. Unfortunately this means it can't be batched. + */ + $config.states.query = function rootedOr(db, collName) { + var nMatches = 100; + + var cursor = db[collName].find({ $or: [ { c: { $lte: nMatches/2 } }, + { d: { $lte: nMatches/2 } } ] + }) + .batchSize(this.batchSize); + + var verifier = function rootedOrVerifier(doc, prevDoc) { + return (doc.c <= nMatches/2 || doc.d <= nMatches/2); + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: { c: 2, d: 10 } }), that can + * be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newC = Random.randInt($config.data.nDocs); + var newD = Random.randInt($config.data.nDocs); + return { $set: { c: newC, b: newD } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ c: 1 })); + assertAlways.commandWorked(db[collName].ensureIndex({ d: 1 })); + }; + + return $config; +}); + + + + diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js new file mode 100644 index 0000000..a56fa15 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_sort.js @@ -0,0 +1,57 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_sort.js (extends yield.js) + * + * Intersperse queries which use the SORT stage with updates and deletes of documents they may + * match. + * Sort blacklisted until SERVER-17011 is resolved. Updates during sort stage can + * cause docs to be returned out of order if it's an unindexed query with non-default batch sizes. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Execute a query that will use the SORT stage. + */ + $config.states.query = function sort(db, collName) { + var nMatches = 100; + var cursor = db[collName].find({ a: { $lt: nMatches } }) + .sort({ b: -1 }) + .batchSize(this.batchSize); + + var verifier = function sortVerifier(doc, prevDoc) { + var correctOrder = true; + if (prevDoc !== null) { + correctOrder = (doc._id <= prevDoc._id); + } + return doc.a < nMatches && correctOrder; + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: { a: 2, b: 10 } }), that can + * be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newA = Random.randInt($config.data.nDocs); + var newB = Random.randInt($config.data.nDocs); + return { $set: { a: newA, b: newB } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ a: 1, b: 1 })); + }; + + return $config; +}); + + diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js new file mode 100644 index 0000000..58683e9 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js @@ -0,0 +1,61 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_sort_merge.js (extends yield.js) + * + * Intersperse queries which use the SORT_MERGE stage with updates and deletes of documents they + * may match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Execute a query that will use the SORT_MERGE stage. + */ + $config.states.query = function sortMerge(db, collName) { + var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected. + + // Build an array [0, nMatches). + var matches = []; + for (var i = 0; i < nMatches; i++) { + matches.push(i); + } + + var cursor = db[collName].find({ a: { $in: matches } }) + .sort({ b: -1 }) + .batchSize(this.batchSize); + + var verifier = function sortMergeVerifier(doc, prevDoc) { + var correctOrder = true; + if (prevDoc !== null) { + correctOrder = (doc.b <= prevDoc.b); + } + return doc.a < nMatches && correctOrder; + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: { a: 2, b: 10 } }), that can + * be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newA = Random.randInt($config.data.nDocs); + var newB = Random.randInt($config.data.nDocs); + return { $set: { a: newA, b: newB } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertAlways.commandWorked(db[collName].ensureIndex({ a: 1, b: 1 })); + }; + + return $config; +}); + diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js new file mode 100644 index 0000000..1338ee3 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/yield_text.js @@ -0,0 +1,76 @@ +'use strict'; + +load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +load('jstests/concurrency/fsm_workloads/yield.js'); // for $config + +/* + * yield_text.js (extends yield.js) + * + * Intersperse queries which use the TEXT stage with updates and deletes of documents they may + * match. + */ +var $config = extendWorkload($config, function($config, $super) { + + /* + * Pick a random word and search for it using full text search. + */ + $config.states.query = function text(db, collName) { + var word = this.words[Random.randInt(this.words.length)]; + + var cursor = db[collName].find({ $text: { $search: word }, + yield_text: { $exists: true } }) + .batchSize(this.batchSize); + + var verifier = function textVerifier(doc, prevDoc) { + return doc.yield_text.indexOf(word) !== -1; + }; + + this.advanceCursor(cursor, verifier); + }; + + /* + * A function which produces a random update doc (e.g. { $set: { yield_text: 'test' } }), + * that can be used to update a random doc which may match the query. + */ + function getUpdateDoc() { + var newWord = $config.data.words[Random.randInt($config.data.words.length)]; + return { $set: { yield_text: newWord } }; + } + + $config.states.update = $config.data.genUpdateFunction(getUpdateDoc); + + /* + * We'll need to modify the advanceCursor function, since someone could conceivably remove the + * text index out from under us. In which case, the query would fail, but not until the first + * next() is called. + */ + $config.data.advanceCursor = function advanceCursor(cursor, verifier) { + // Keep track of the previous doc in case the verifier is trying to verify a sorted query. + var prevDoc = null; + var doc = null; + while (cursor.hasNext()) { + prevDoc = doc; + try { + doc = cursor.next(); + } catch(e) { + var errorText = "Error trying to get next doc out of text query. "; + errorText += "Someone dropped the text index out from under query?\n"; + jsTestLog(errorText + e); + } + assertAlways(verifier(doc, prevDoc), + 'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' + + 'Query plan: ' + tojson(cursor.explain()) + '\n' + + 'Previous doc: ' + tojson(prevDoc) + '\n' + + 'This doc: ' + tojson(doc)); + } + assertAlways.eq(cursor.itcount(), 0); + }; + + $config.setup = function(db, collName) { + $super.setup.apply(this, arguments); + + assertWhenOwnColl.commandWorked(db[collName].ensureIndex({ yield_text: 'text' })); + }; + + return $config; +}); diff --git a/jstests/core/getmore_invalidation.js b/jstests/core/getmore_invalidation.js index 51a6d8d..58104aa 100644 --- a/jstests/core/getmore_invalidation.js +++ b/jstests/core/getmore_invalidation.js @@ -191,4 +191,39 @@ assert.neq([15, 15], nextDoc.geo); assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0); + // Case #10: sort with deletion invalidation. + t.drop(); + t.ensureIndex({a: 1}); + t.insert({a: 1, b: 2}); + t.insert({a: 3, b: 3}); + t.insert({a: 2, b: 1}); + + cursor = t.find({a: {$in: [1,2,3]}}).sort({b: 1}).batchSize(2); + cursor.next(); + cursor.next(); + + assert.writeOK(t.remove({a: 2})); + + if (cursor.hasNext()) { + assert.eq(cursor.next().b, 3); + } + + // Case #11: sort with mutation invalidation. + t.drop(); + t.ensureIndex({a: 1}); + t.insert({a: 1, b: 2}); + t.insert({a: 3, b: 3}); + t.insert({a: 2, b: 1}); + + cursor = t.find({a: {$in: [1,2,3]}}).sort({b: 1}).batchSize(2); + cursor.next(); + cursor.next(); + + assert.writeOK(t.update({a: 2}, {$set: {a: 4}})); + + count = cursor.itcount(); + if (cursor.hasNext()) { + assert.eq(cursor.next().b, 3); + } + })(); diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index d93a463..7b58115 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -154,7 +154,7 @@ namespace mongo { PlanExecutor* rawExec; Status execStatus = PlanExecutor::make(txn, ws.release(), rootFetch, collection, - PlanExecutor::YIELD_MANUAL, &rawExec); + PlanExecutor::YIELD_AUTO, &rawExec); fassert(28536, execStatus); boost::scoped_ptr exec(rawExec);