|
Repro
> db.ts3.explain().aggregate([{$project: {y: 1}}, {$match: {x: {$lt: 0}}}]);
|
{
|
"explainVersion" : "1",
|
"stages" : [
|
{
|
"$cursor" : {
|
"queryPlanner" : {
|
"namespace" : "test.system.buckets.ts3",
|
"indexFilterSet" : false,
|
"parsedQuery" : {
|
|
},
|
"queryHash" : "FCBE9F38",
|
"planCacheKey" : "FCBE9F38",
|
"maxIndexedOrSolutionsReached" : false,
|
"maxIndexedAndSolutionsReached" : false,
|
"maxScansToExplodeReached" : false,
|
"winningPlan" : {
|
"stage" : "COLLSCAN",
|
"direction" : "forward"
|
},
|
"rejectedPlans" : [ ]
|
}
|
}
|
},
|
{
|
"$_internalUnpackBucket" : {
|
"include" : [
|
"_id",
|
"y"
|
],
|
"timeField" : "time",
|
"metaField" : "mm",
|
"bucketMaxSpanSeconds" : 3600,
|
"assumeNoMixedSchemaData" : true,
|
"eventFilter" : {
|
"x" : {
|
"$lt" : 0
|
}
|
}
|
}
|
}
|
],
|
The eventFilter has the discarded field x. If any top-level field of the 'eventFilter' is not in the bucketSpec's fieldSet, then it's a discarded field and we cannot push down the stage because the SBE filter generator cannot refer to slot(s) for the discarded field(s) which are not returned from 'block_to_row' stage. It would be better to optimize such event filter further so that it does not include any discarded field(s).
|