Uploaded image for project: 'Go Driver'
  1. Go Driver
  2. GODRIVER-2401

report error when connect to mongodb inside k8s

    • Type: Icon: Bug Bug
    • Resolution: Gone away
    • Priority: Icon: Major - P3 Major - P3
    • None
    • Affects Version/s: None
    • Component/s: None
    • Labels:

      Summary

      application write with go driver report error when connect to mongodb inside k8s, If change to directConnection=true will not report error but have performance issue.

      Please provide the version of the driver. If applicable, please provide the MongoDB server version and topology (standalone, replica set, or sharded cluster).

      go 1.18

      require (
          github.com/urfave/cli/v2 v2.5.0
          go.mongodb.org/mongo-driver v1.9.0
      )

      MongoDB server version: 5.0.6

       

      How to Reproduce

      Steps to reproduce. If possible, please include a Short, Self Contained, Correct (Compilable), Example.

      We setup Mongodb's stand-alone architecture in k8s, and then used the
      golang application to connect to mongodb, but found a problem.
      If you connect directly to mongodb, an error will be reported. The
      error message is as follows:
      ````
      2022/04/14 09:30:00 server selection error: context deadline exceeded,
      _current topology: { Type: ReplicaSetNoPrimary, Servers: [

      { Addr:_ _mongo-a9f01-replica0-0-0.mongo-a9f01-replica0-headless._ _qfusion-admin:27017, Type: Unknown, Last error: connection() error_ _occurred during connection handshake: dial tcp: lookup_ _mongo-a9f01-replica0-0-0.mongo-a9f01-replica0-headless.qfusion-admin_ _on 192.168. 65.5:53: no such host }

      , ] }_
      ````

      The 192.168.65.5:53 here is the dns address inside k8s, and the
      outside application should not be able to connect, so require address
      from 192.168.65.5:53 to get domain name  of
      mongo-a9f01-replica0-0-0.mongo-a9f01-replica0-headless.qfusion-admin
      will report an error.

      I googled for a solution, and verified that using
      directConnection=true can solve this problem without reporting an
      error.

      However, when I am doing multiple concurrent data acquisition or data
      insertion, I found that increasing the concurrent threads has no
      effect, and the performance does not improve. I use the same
      application to connect to non-k8s, and the performance can reach 600M
      without direct, but when connected with directConnect=true, the
      performance drops to only 100M.

      I also tested c driver, no error will be reported without directConnect=true

      Additional Background

      Please provide any additional background information that may be helpful in diagnosing the bug.

      programs to test connect and performance issue 

      ```

      package main

      import (
          "flag"
          "fmt"
          "net/http"
          "sync"
          "time"

          "go.mongodb.org/mongo-driver/bson"
          "go.mongodb.org/mongo-driver/mongo"
          "go.mongodb.org/mongo-driver/mongo/options"
      )

      var (
          srcUri      string
          srcDb       string
          srcColl     string
          threadCount int
          docCount    int
          batchSize   int
          pprof       bool
      )

      func init()

      {     flag.StringVar(&srcUri, "srcuri", "mongodb://root:dbmotion#123@10.10.150.207:27717", "source mongodb uri")     flag.StringVar(&srcDb, "srcdb", "db1", "srouce db")     flag.StringVar(&srcColl, "srccoll", "t1", "srouce collection")     flag.IntVar(&threadCount, "nt", 1, "goroutine count")     flag.IntVar(&docCount, "ndoc", 1000000, "total docs to be inserted")     flag.IntVar(&batchSize, "batch", 512, "insert batch size")     flag.BoolVar(&pprof, "pprof", false, "start net/http/pprof") }

      func main() {
          flag.Parse()

          fmt.Printf("%s %d threads insert into %s/%s.%s %d docs, batch size: %d\n",
              nowStr(), threadCount, srcUri, srcDb, srcColl, docCount, batchSize)

          connOpt := options.Client().ApplyURI(srcUri)
          conn, err := mongo.Connect(nil, connOpt)
          if err != nil

      {         fmt.Println(err)         return     }
          defer conn.Disconnect(nil)

          if err := conn.Ping(nil, nil); err != nil {         fmt.Println(err)         return     }

          if err := conn.Database(srcDb).Collection(srcColl).Drop(nil); err != nil

      {         fmt.Println(err)         return     }

          begin := time.Now()

          nDoc := docCount / threadCount
          nIns := make([]int, threadCount)

          var wg sync.WaitGroup
          wg.Add(threadCount)

          for i := 0; i < threadCount; i++ {
              pRs := &nIns[i]
              *pRs = 0
              go func()

      {             insert(conn, srcDb, srcColl, nDoc, batchSize, pRs)             wg.Done()         }

      ()
          }

          go printStat(nIns, 10)

          if pprof {
              go func()

      {             http.ListenAndServe(":6060", nil)         }

      ()
              fmt.Printf("pprof on :6060\n")
          }
          wg.Wait()

          elapse := time.Since(begin)
          totalDoc := 0
          for _, val := range nIns

      {         totalDoc += val     }

          totalMB := 1.0 * float64(totalDoc) * 10 / 1024
          fmt.Printf("%s total insert %d docs, %3.f doc/s, %.3f MB/s\n", nowStr(),
              totalDoc, float64(totalDoc)/elapse.Seconds(), totalMB/elapse.Seconds())
      }

      func insert(conn *mongo.Client, dbName string, collName string, nDoc int, batchSize int, nInserted *int) {
          var binData []byte
          for i := 0; i < 1024; i++

      {         binData = append(binData, byte('A'+i%26))     }

          var doc bson.D
          for i := 0; i < 10; i++

      {         colName := fmt.Sprintf("c%d", i)         doc = append(doc, bson.E\{Key: colName, Value: binData}

      )
          }

          coll := conn.Database(dbName).Collection(collName)
          optIns := options.InsertMany() //.SetBypassDocumentValidation(true)

          var docs []interface{}

          for i := 0; i < nDoc; i++ {
              docs = append(docs, doc)
              if len(docs) >= batchSize {
                  if _, err := coll.InsertMany(nil, docs, optIns); err != nil

      {                 fmt.Println(err)                 return             }

                  *nInserted += len(docs)
                  docs = nil
              }
          }

          if len(docs) >= batchSize {
              if _, err := coll.InsertMany(nil, docs, optIns); err != nil

      {             fmt.Println(err)             return         }

              *nInserted += len(docs)
          }
      }

      func nowStr() string

      {     return time.Now().Format("2006-01-02 15:04:05") }

      func printStat(nIns []int, itvS int) {
          old := make([]int, len(nIns))

          tiker := time.NewTicker(time.Duration(itvS) * time.Second)
          for {
              copy(old, nIns)
              <-tiker.C

              totalDoc := 0
              totalMB := 0.0
              for i := 0; i < len(nIns); i++

      {             deltaIns := nIns[i] - old[i]             deltaMB := float64(deltaIns * 10 / 1024)             totalDoc += deltaIns             totalMB += deltaMB             fmt.Printf("%s t-%d insert %d docs, %3.f doc/s, %.3f MB/s\n", nowStr(), i,                 deltaIns, float64(deltaIns)/float64(itvS), deltaMB/float64(itvS))         }

              fmt.Printf("%s all insert %d docs, %3.f doc/s, %.3f MB/s\n", nowStr(),
                  totalDoc, float64(totalDoc)/float64(itvS), totalMB/float64(itvS))
          }
      }

      ```

       

      run the programs with './insert -srcuri="mongodb://root:dbmotion#123@10.10.150.208:27717/?directConnection=false" -nt 1  -trace'

       

            Assignee:
            matt.dale@mongodb.com Matt Dale
            Reporter:
            pickup112@gmail.com pickup li
            Votes:
            0 Vote for this issue
            Watchers:
            3 Start watching this issue

              Created:
              Updated:
              Resolved: