Uploaded image for project: 'Core Server'
  1. Core Server
  2. SERVER-70810

SHARDING_FILTER stage missing on shards from cluster count command explain with query predicate

    • Type: Icon: Bug Bug
    • Resolution: Unresolved
    • Priority: Icon: Major - P3 Major - P3
    • None
    • Affects Version/s: None
    • Component/s: None
    • Labels:
      None
    • Query Optimization
    • ALL

      This bug has varying severity depending on the version. The query is

      db.coll.count({predicate}) 
      

      with secondary reads.

      On 4.4, the result is incorrect, and includes orphan documents. A SHARDING_FILTER is missing from the explain plan.

      On 5.0 and 6.0, orphan documents are filtered out and the count is correct, but the SHARDING_FILTER stage is not reported in the explain plan. This makes me think the explain is incorrect, and the stage is actually included.

      Repro script (based on shard_filtering.js):

       (function() {
          "use strict";
          
          load("jstests/libs/analyze_plan.js");
          
          // Deliberately inserts orphans outside of migration.
          TestData.skipCheckOrphans = true;
          const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
          const collName = "test.shardfilter";
          const mongosDb = st.s.getDB("test");
          const mongosColl = st.s.getCollection(collName);
          
          assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
          st.ensurePrimaryShard("test", st.shard1.name);
          assert.commandWorked(
              st.s.adminCommand({shardCollection: collName, key: {a: 1, "b.c": 1, "d.e.f": 1}}));
          
          // Put a chunk with no data onto shard0 in order to make sure that both shards get targeted.
          assert.commandWorked(st.s.adminCommand({split: collName, middle: {a: 20, "b.c": 0, "d.e.f": 0}}));
          assert.commandWorked(st.s.adminCommand({split: collName, middle: {a: 30, "b.c": 0, "d.e.f": 0}}));
          assert.commandWorked(st.s.adminCommand(
              {moveChunk: collName, find: {a: 25, "b.c": 0, "d.e.f": 0}, to: st.shard0.shardName}));
          
          // Shard the collection and insert some docs.
          const docs = [
              {_id: 0, a: 1, b: {c: 1}, d: {e: {f: 1}}, g: 100, z: "z"},
              {_id: 1, a: 1, b: {c: 2}, d: {e: {f: 2}}, g: 100.9, z: "z"},
              {_id: 2, a: 1, b: {c: 3}, d: {e: {f: 3}}, g: "a", z: "z"},
              {_id: 3, a: 1, b: {c: 3}, d: {e: {f: 3}}, g: [1, 2, 3], z: "z"},
              {_id: 4, a: "a", b: {c: "b"}, d: {e: {f: "c"}}, g: null, z: "z"},
              {_id: 5, a: 1.0, b: {c: "b"}, d: {e: {f: Infinity}}, g: NaN, z: "z"},
          ];
          assert.commandWorked(mongosColl.insert(docs));
          assert.eq(mongosColl.find().itcount(), 6);
          
          // Insert some documents with valid partial shard keys to both shards. The versions of these
          // documents on shard0 are orphans, since all of the data is owned by shard1.
          const docsWithMissingAndNullKeys = [
              {_id: 6, a: "missingParts", z: "z"},
              {_id: 7, a: null, b: {c: 1}, d: {e: {f: 1}}, z: "z"},
              {_id: 8, a: "null", b: {c: null}, d: {e: {f: 1}}, z: "z"},
              {_id: 9, a: "deepNull", b: {c: 1}, d: {e: {f: null}}, z: "z"},
          ];
          assert.commandWorked(st.shard0.getCollection(collName).insert(docsWithMissingAndNullKeys));
          assert.commandWorked(st.shard1.getCollection(collName).insert(docsWithMissingAndNullKeys));
          
          // Insert orphan docs without missing or null shard keys onto shard0 and test that they get filtered
          // out.
          const orphanDocs = [
              {_id: 10, a: 100, b: {c: 10}, d: {e: {f: 999}}, g: "a", z: "z"},
              {_id: 11, a: 101, b: {c: 11}, d: {e: {f: 1000}}, g: "b", z: "z"}
          ];
          assert.commandWorked(st.shard0.getCollection(collName).insert(orphanDocs));
          assert.eq(mongosColl.find().itcount(), 10);
      
          // With primary read pref, count with predicate filters out orphans
          assert.eq(mongosColl.count({z: "z"}), 10);
          // The explain plan includes a sharding filter
          jsTestLog(mongosColl.explain().count({z: "z"}));
      
          mongosDb.shardfilter.getMongo().setReadPref("secondary");
          // With secondary read pref, count with predicate still filters out orphans
          assert.eq(mongosColl.count({z: "z"}), 10);
          // The following explain doesn't include a sharding filter
          jsTestLog(mongosColl.explain().count({z: "z"}));
          
          st.stop();
      })();
      

            Assignee:
            backlog-query-optimization [DO NOT USE] Backlog - Query Optimization
            Reporter:
            matt.boros@mongodb.com Matt Boros
            Votes:
            0 Vote for this issue
            Watchers:
            10 Start watching this issue

              Created:
              Updated: