(function() {
|
"use strict";
|
|
const memoryLimitMb = 1;
|
const memoryLimitBytes = memoryLimitMb * 1024 * 1024;
|
|
// Start mongod with reduced memory limit for the $group stage.
|
const conn = MongoRunner.runMongod({
|
setParameter: {
|
internalDocumentSourceGroupMaxMemoryBytes: memoryLimitBytes,
|
internalQuerySlotBasedExecutionHashAggApproxMemoryUseInBytesBeforeSpill: memoryLimitBytes
|
}
|
});
|
const testDb = conn.getDB(jsTestName());
|
|
// Create a collection exceeding the memory limit.
|
testDb.largeColl.drop();
|
const largeStr = "A".repeat(1024 * 1024); // 1MB string
|
for (let i = 0; i < memoryLimitMb + 1; ++i)
|
assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
|
|
const pipeline = [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
|
// Make sure that the pipeline needs to spill to disk.
|
assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
|
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
|
testDb.largeColl.aggregate(pipeline);
|
|
MongoRunner.stopMongod(conn);
|
})();
|