[SERVER-5627] mongodb hangs when doing mapreduce Created: 17/Apr/12  Updated: 11/Jul/16  Resolved: 13/Jun/12

Status: Closed
Project: Core Server
Component/s: JavaScript, MapReduce
Affects Version/s: 2.0.2
Fix Version/s: 2.1.2

Type: Bug Priority: Major - P3
Reporter: Ping Yin Assignee: Antoine Girbal
Resolution: Done Votes: 4
Labels: None
Remaining Estimate: Not Specified
Time Spent: Not Specified
Original Estimate: Not Specified
Environment:

Distributor ID: Ubuntu
Description: Ubuntu 10.04.3 LTS
Release: 10.04
Codename: lucid
Linux data2 2.6.32-33-generic #72-Ubuntu SMP Fri Jul 29 21:07:13 UTC 2011 x86_64 GNU/Linux


Issue Links:
Duplicate
Related
Operating System: Linux
Participants:

 Description   

mongodb hang this and last weekend when doing mapreduce at the very
early stage. Following is the related log (i think), the full log is
at https://gist.github.com/2395867

The db is fully locked without yield and any collection related operations hang. Deadlock maybe?

The mapreduce code should be fine. After i restart the mongodb and
rerun the mapreduce, it finished in less than 10 minutes.

Following is the log and code. Unfortunately, the mongodb log was lost when i restart mongodb.

mongostat --port 27018 5
connected to: 127.0.0.1:27018
insert  query update delete getmore command flushes mapped  vsize    res faults locked % idx miss %     qr|qw   ar|aw  netIn netOut  conn        set repl       time 
     0      0      0      0       0       1       0    75g   151g   9.1g      0        0          0       5|3     5|2   113b   424b    16 rs_default    M   08:58:12 
     0      0      0      0       0       1       0    75g   151g   9.1g      0        0          0       5|3     5|2   163b   480b    16 rs_default    M   08:58:17 
 
PRIMARY> db.currentOp()
{
       "inprog" : [
               {
                       "opid" : 15710450,
                       "active" : true,
                       "lockType" : "read",
                       "waitingForLock" : true,
                       "secs_running" : 17965,
                       "op" : "getmore",
                       "ns" : "local.oplog.rs",
                       "query" : {
 
                       },
                       "client" : "192.168.0.21:55171",
                       "desc" : "conn",
                       "threadId" : "0x7f1e8d1f3700",
                       "connectionId" : 5,
                       "numYields" : 0
               },
               {
                       "opid" : 15710430,
                       "active" : true,
                       "lockType" : "read",
                       "waitingForLock" : false,
                       "secs_running" : 17966,
                       "op" : "query",
                       "ns" : "app_wowsearch.events.day",
                       "query" : {
                               "$msg" : "query not recording (too large)"
                       },
                       "client" : "127.0.0.1:33900",
                       "desc" : "conn",
                       "threadId" : "0x7f1e8daf5700",
                       "connectionId" : 19963,
                       "msg" : "m/r: (1/3) emit phase 0/835941 0%",
                       "progress" : {
                               "done" : 0,
                               "total" : 835941
                       },
                       "numYields" : 3
               },
 
 
db.serverStatus()
{
       "host" : "data2:27018",
       "version" : "2.0.2",
       "process" : "mongod",
       "uptime" : 602434,
       "uptimeEstimate" : 594271,
       "localTime" : ISODate("2012-04-16T00:59:47.391Z"),
       "globalLock" : {
               "totalTime" : 602434261059,
               "lockTime" : 56141056582,
               "ratio" : 0.09319034492379538,
               "currentQueue" : {
                       "total" : 8,
                       "readers" : 5,
                       "writers" : 3
               },
               "activeClients" : {
                       "total" : 7,
                       "readers" : 5,
                       "writers" : 2
               }
       },

Here is the map reduce code, a few utility functions are not provided. If you want them, I can provide too.

function rollup_cycle_users(cycle, firstday) { //{{{
        var m = function() { //{{{
                var l = this.l || {};
 
                var mk = {id: this.id, l: {}};
                var keys = ['s', 'rt', 'rc', 'rf', 'va'];
                keys.forEach(function(k) {
                        mk.l[k] = l[k] || null;
                });
 
                var mv = extend(copy_obj(this, ['model', 'ch', 'city', 'province', 'avn', 'resolution']),
                                {
                                        pvs: 0,
                                        downloads: 0,
                                        searchs: 0,
                                        video_pls: 0,
                                        days: {},
                                });
                var e2dayname = {
                        search: 'sedays',   
                        download: 'dldays', 
                        pv: 'pvdays',       
                        video_play: 'vpdays',
                };
                switch(this.e){
                        case 'download' : mv.downloads += this.c || 1; break;
                        case 'pv' : mv.pvs += this.c || 1; break;
                        case 'search' : mv.searchs += this.c || 1; break;
                        case 'video_play' : mv.video_pls += this.c || 1; break;
                }
                var day_name = e2dayname[this.e];
                if (day_name) {
                        var d = 1 + (this.ts.getTime() - base_ts.getTime()) / (24 * 3600000); // start from 1
                        mv.days[day_name] = {};
                        mv.days[day_name][d] = true;
                        emit(mk, mv);       
                }
        }; //}}}
 
        var r = function(key, emits) { //{{{
                var total0 = {downloads: 0, pvs: 0, video_pls:0, searchs:0, days: {}};
                if (emits.length == 0) {    
                        return total0;      
                }
                var total = extend(emits[0], total0);
 
                var tdays = total.days;     
                emits.forEach(function(emit) {
                        for (day_name in emit.days) {
                                tdays[day_name] = extend(tdays[day_name] || {}, emit.days[day_name]);
                                tdays[day_name] = tdays[day_name] || {};
                                for (d in emit.days[day_name]) {
                                        tdays[day_name][d] = true;
                                }
                        }
                        total.downloads += emit.downloads;
                        total.pvs += emit.pvs;
                        total.video_pls += emit.video_pls;
                        total.searchs += emit.searchs;
                });
                return total;
        }; //}}}
 
        var f = function(key, total) { //{{{
                function count(days) {      
                        days = days || [];  
                        return days.length; 
                }
                function max_continuous_count(days) { //{{{
                        days = days || [];  
                        if (!days.length) return 0;
 
                        var max_ccount = 0; 
                        var ccount = 0;     
                        days = days.concat([-1]); // guard
                        for (i in days) {   
                                if (days[i] - days[i-1] != 1 || i == 0) {
                                        max_ccount = Math.max(max_ccount, ccount);
                                        ccount = 1;
                                }
                                else {      
                                        ccount += 1;
                                }
                        }
 
                        return max_ccount;  
                } //}}}
                function max_delta(days) {  
                        deltas = delta(days);
                        if (deltas.length == 0) return 0;
                        return Math.max.apply(null, deltas);
                }
                function min_delta(days) {  
                        deltas = delta(days);
                        if (deltas.length == 0) return 0;
                        return Math.min.apply(null, deltas);
                }
 
 
                for (d in total.days) {     
                        total.days[d] = keys(total.days[d]).sort(function(a, b) {return a-b;});
                }
                var days2al = {pvdays: 'pv', dldays: 'dl', vpdays: 'vp', sedays: 'se'}; //detail in map function 访问,下载,播放,搜索天数
 
                var pvdays = total.days.pvdays || [];
                var dldays = total.days.dldays || [];
                aldays = {};
                for (cdi in total.days) {   
                        cd = total.days[cdi];
                        if (!cd) continue;  
                        aldays[days2al[cdi]] = {
                                d: count(cd),
                                contd: max_continuous_count(cd),
                                mindd: min_delta(cd),
                                maxdd: max_delta(cd),
                        };
                }
 
                var min_dl_day = -1;
                if (dldays.length) {
                        min_dl_day = Math.min(dldays);
                }
                dlpvdays = filter(pvdays, function(d) {
                        return d > min_dl_day;
                });
                dlpvd = count(dlpvdays);    
 
                total = extend(total, {dlpvd: dlpvd});
                total = extend(total, aldays);
 
                return total;
        } //}}}
 
        var cycle2delta = {
                week: '7d',
                month: '1m',
        };
 
        var colname = get_scale_col_name(cycle, firstday);
        var mr_colname = 'mr.' + colname;   
        var ts0 = get_china_day(firstday);  
        var ts1 = date_add(ts0, cycle2delta[cycle]);
        var query = {ts: {$gte: ts0, $lt: ts1}}; // [ts0, ts1)
        db.events.day.mapReduce(
                        m, r, {
                                verbose: true,
                                out: mr_colname,
                                finalize: f,
                                query: query,
                                scope: {    
                                        base_ts: ts0,
                                        copy_obj: copy_obj,
                                        filter: filter,
                                        extend: extend,
                                        delta: delta,
                                        keys: keys,
                                },
                                jsMode: true,
                        }
                        );
 
        db[mr_colname].find().forEach(function(doc) {
                var ndoc = extend(doc._id, doc.value);
                db[colname].insert(ndoc);   
        });
 
} //}}}



 Comments   
Comment by Antoine Girbal [ 13/Jun/12 ]

Ideally the behavior should be:

  • yield based on time elapsed instead of number of items (mr.cpp line 1101).
    This way it would help the db if MR is in a tough spot and every item item processed takes a while.
    Problem is that the yielding code cannot easily be replaced with yieldSometimes() since it tries to do operations while outside lock (calls checkSize which usually do reduces).
  • throw away JS context based on its size / number of objects.
    Could not find an easy way to get stats on the context, typically if it is small there is no reason to throw it away.
    Otherwise could try to use the overall heap size.

Based on above 2 items, may need another ticket scheduled for later version.

Comment by Antoine Girbal [ 13/Jun/12 ]

for now the following was done to mitigate the issue:

  • yield lock every 100 instead of 1000 when processing map or reduce
  • get rid of JS context every 10 use instead of 100
Comment by auto [ 13/Jun/12 ]

Author:

{u'date': u'2012-06-12T19:24:37-07:00', u'email': u'antoine@10gen.com', u'name': u'agirbal'}

Message: SERVER-5627: mongodb hangs when doing mapreduce
Branch: master
https://github.com/mongodb/mongo/commit/bbc3ee201c0cd0a01d15c486ddfba0fb353f46e9

Comment by Ping Yin [ 13/May/12 ]

> This is because the group command and map/reduce both grabs the js global interpreter lock. So only one of them can be running at a time.
@Randolph Tan The global lock is ok for me. But why they lock each other forever? Can't the dead lock problem be avoided? i.e. group run first (map reduce waiting for lock), and then the map reduce run

Comment by Brett Kiefer [ 12/May/12 ]

Thanks, then that stopgap might make sense for us, since no single query should be generating a lot of objects for GC. If this is indeed our problem, then we've seen up to 90-second garbage collection cycles. Read and write locks aside, that does seem like a long time for the JS engine to be tied up, so I like your solution 3 above, though it sounds like 1 or 2 would be a good idea, too. Thanks!

Comment by Randolph Tan [ 12/May/12 ]

@Brett Closing several connections can indeed trigger long garbage collections. In fact, we have seen this in some of our back traces. And yes, the idea was to cleanup the garbage a m/r job as soon as possible instead of accumulating it. However, this trick will only be fool-proof if there can only be one active map reduce on the server at any time.

Comment by Brett Kiefer [ 12/May/12 ]

The GC theory seems fairly sound from my perspective, if it is the case that closing several active connections is likely to kick off a lengthy GC run. Is that the case? If so, then we can try working around the problem using your one connection per JS-executing command (e.g. MapReduce or regex search) approach. If I understand you, the idea is to avoid long GC runs by causing immediate GC by closing connections right after executing their commands. Is that correct?

Comment by Randolph Tan [ 12/May/12 ]

@Ping Yin This is because the group command and map/reduce both grabs the js global interpreter lock. So only one of them can be running at a time.

Comment by Ping Yin [ 12/May/12 ]

> this is probably due to the GC kicking in while MR is holding the read lock.
> If there are writes coming in, then all write / read ops will be stuck until the GC finishes.

The db hangs up forever ("secs_running" : 17965), so is it still a gc problem? It seems my problem is: when a group task is running, a new started map reduce task hangs in the very first begining (done == 0, not start yet ?)

 
"msg" : "m/r: (1/3) emit phase 0/835941 0%",
                       "progress" : {
                               "done" : 0,
                               "total" : 835941
                       },
                       "numYields" : 3
 

I am sure it is the group task which cause the map reduce hang. I have changed the crontab to schedule group and map reduce scripts to run at different time, and then the hang never happened again for two weeks. Before this change, the hang always happen at the weekend when both group and map reduce run at the same time.

Comment by Daniel Pasette (Inactive) [ 11/May/12 ]

For those in non-sharded deployments, can you try creating a new connection to the server for each invocation of m/r to see if this mitigates the GC issues and let us know if that remedies the hangups?

Comment by Antoine Girbal [ 11/May/12 ]

this is probably due to the GC kicking in while MR is holding the read lock.
If there are writes coming in, then all write / read ops will be stuck until the GC finishes.

Possible solutions:
1. release the read lock before executing any JS (every record)
2. faster pull a batch of docs from cursor, yield and then do JS functions
3. figure out ways to make GC smoother.. could be by getting rid of contexts more often. Could throw away context if too many objs where created with it. Still doesnt help for single large run.

Comment by Bruno Furtado [ 07/May/12 ]

@Dan

In my case I'm running a single replica set (not sharded) on a Xen VM. The workload is was around 50 write ops per second before everything stopped working due to locking. Nothing that would obviously explain this I think.

I don't have the exact stats from when it happened (and I had to take the M/R job off production because everything else was hanging) but it should be something close to this:

{
"ns" : "*************",
"count" : 2467701,
"size" : 157932864,
"avgObjSize" : 64,
"storageSize" : 368488448,
"numExtents" : 21,
"nindexes" : 1,
"lastExtentSize" : 64696320,
"paddingFactor" : 1,
"flags" : 1,
"totalIndexSize" : 80410960,
"indexSizes" :

{ "_id_" : 80410960 }

,
"ok" : 1
}

Comment by Ping Yin [ 07/May/12 ]

The os is on the hardware, no vm. no shard, replica set with 2 nodes and 1 arbiter

Comment by Daniel Pasette (Inactive) [ 07/May/12 ]

As there are a few of you experiencing this, could you each provide some more details of the environment you're running in? are you running in ec2 or your own hardware? could you each post collection stats as well? what sort of workload is on the server when this happens in addition to the m/r? are you running in a sharded cluster?

Thanks.

Comment by Bruno Furtado [ 04/May/12 ]

I'm also experiencing this issue on v2.0.1 on Debian. Just like Brett, we can run currentOp. I ran it multiple times and I noticed that the following op was "stalled" (the progress doesn't change at all despite the fact that the op is marked as active. This has happened to me twice in over 50 runs of the job. I have no idea how to replicate it tho.

{{
{
"opid" : 11213705,
"active" : true,
"lockType" : "read",
"waitingForLock" : false,
"secs_running" : 778,
"op" : "query",
"ns" : "trackerservice.events",
"query" :

{ "$msg" : "query not recording (too large)" }

,
"client" : "10.80.150.114:64845",
"desc" : "conn",
"threadId" : "0x600efb70",
"connectionId" : 273,
"msg" : "m/r: (1/3) emit phase 11329/205506 5%",
"progress" :

{ "done" : 11329, "total" : 205506 }

,
"killed" : true,
"numYields" : 11
}
}}

Comment by Brett Kiefer [ 04/May/12 ]

Yes, that would be consistent with what we're seeing in currentOp and with our understanding of MongoDB locking. We see a mapreduce op holding a lock for a long time. The mystery is why this happens right after we have disconnected a few open connections that had been active, when usually it holds the lock for a hundred milliseconds, max. We're only talking about closing 4 of our 144 open connections.

Today we stopped using the mapreduce, and we're seeing a much smaller version of the same problem, but with regex search queries. Normally they run quickly and with no problems, but when we disconnect a bunch of other active connections while one of these is running, it holds the lock for a few seconds.

Comment by Randolph Tan [ 04/May/12 ]

@Brett: this sounds like an operation is holding the write lock for a long time. The reason why you can still run currentOp is because it doesn't need a lock.

Comment by Brett Kiefer [ 04/May/12 ]

We're seeing something similar in production on v2.0.1 on Debian when we close active connections while a mapreduce job is running. Reads and writes go to 0 in mongostat, but we can still run currentOp. The DB comes back after several seconds - sometimes as much as a minute - and then processes its query backlog and goes on. We have no simple repro outside of our production environment.

Comment by Ping Yin [ 23/Apr/12 ]

generate_mail_data = function() {
  /*
    return mail_data =
          'launch_pre_five_province' =
            [
              [ 启动用户地区前五明细, 17, 18]
              [ 广东 ,   1678 ,   393]
              [ 北京 ,   1482 ,   423]
            ]
          'launch_pre_five_device' =
            [
              [ 启动用户设备前五明细, 17, 18]
              [Dell Streak 10 Pro,     1445,    278]
              [Kindle Fire   ,         1095,    360]
            ]
          'statistic_times' =
            [
              [统计项目, 17, 18]
              [资源下载次数, 10003, 10002]
              [软件下载次数, 10003, 10002]
              [游戏下载次数, 10003, 10002]
              [视频播放次数, 10003, 10002]
            ]
          'video_pre_five_album' =
            [
              [视频播放前五明细, 17,18]
              [郭德纲爆笑相声选集,  174, 52]
              [大尺度你懂得,    164, 63]
            ]
  */
  var be_yesterday, data, index, now, opt, opt_indexs, opts_count_events, raw_data, report, report_indexs, reports_group_visit, result, times, today, yesterday, _i, _j, _len, _len2;
  now = new Date;
  today = new Date(now.getFullYear(), now.getMonth(), now.getDate());
  yesterday = date_add(today, '-1d');
  be_yesterday = date_add(today, '-2d');
  reports_group_visit = {
    launch_pre_five_province: {
      name: '启动用户地区前五明细',
      type: 'group_visit',
      cond: {
        'e': 'l'
      },
      scale: 'day',
      period: [yesterday, today],
      groupby: ['province'],
      rows: {
        t: 'visitors'
      }
    },
    launch_pre_five_device: {
      name: '启动用户设备前五明细',
      type: 'group_visit',
      cond: {
        'e': 'l'
      },
      scale: 'day',
      period: [yesterday, today],
      groupby: ['model'],
      rows: {
        t: 'visitors'
      }
    },
    video_pre_five_album: {
      name: '视频播放专辑前五明细',
      type: 'group_visit',
      cond: {
        'e': 'video_play'
      },
      scale: 'day',
      period: [yesterday, today],
      groupby: ['l.va'],
      rows: {
        t: 'visits'
      }
    }
  };
  opts_count_events = {
    res_download: {
      name: '资源下载次数',
      cond: {
        "e": "download"
      },
      groupby: ['ts'],
      period: [be_yesterday, today],
      scale: 'day'
    },
    res_soft_download: {
      name: '应用下载次数',
      cond: {
        "e": "download",
        "l.rt": "soft"
      },
      groupby: ['ts'],
      period: [be_yesterday, today],
      scale: 'day'
    },
    res_game_download: {
      name: '游戏下载次数',
      cond: {
        "e": "download",
        "l.rt": "game"
      },
      groupby: ['ts'],
      period: [be_yesterday, today],
      scale: 'day'
    },
    video_plays: {
      name: '视频播放次数',
      cond: {
        "e": "video_play"
      },
      groupby: ['ts'],
      period: [be_yesterday, today],
      scale: 'day'
    }
  };
  result = {};
  report_indexs = ['launch_pre_five_province', 'launch_pre_five_device', 'video_pre_five_album'];
  opt_indexs = ['res_download', 'res_soft_download', 'res_game_download', 'video_plays'];
  for (_i = 0, _len = report_indexs.length; _i < _len; _i++) {
    index = report_indexs[_i];
    report = reports_group_visit[index];
    raw_data = generate_report_group_visit(report);
    if (raw_data === null) continue;
    data = [];
    raw_data[0][0] = report.name;
    data.push(raw_data[0]);
    raw_data.shift();
    raw_data.sort(function(a, b) {
      return b[1] - a[1];
    });
    data = data.concat(raw_data.slice(0, 5));
    result[index] = data;
  }
  times = [['统计条目', be_yesterday.getDate(), yesterday.getDate(), '增加/减少']];
  for (_j = 0, _len2 = opt_indexs.length; _j < _len2; _j++) {
    opt = opt_indexs[_j];
    opt = opts_count_events[opt];
    data = [];
    data.push(opt.name);
    raw_data = count_event(opt);
    if (!(raw_data && raw_data.length >= 2)) continue;
    if (raw_data[0].ts.getTime() > raw_data[1].ts.getTime()) {
      data = data.concat([raw_data[1].visits, raw_data[0].visits]);
    } else {
      data = data.concat([raw_data[0].visits, raw_data[1].visits]);
    }
    data.push(data[2] - data[1]);
    times.push(data);
  }
  result['statistic_times'] = times;
  return result;
};

Comment by Ping Yin [ 23/Apr/12 ]

Here is the log

At 4:00, this map reduce job (at localhost) and a group job (at 192.168.0.21) start running simultaneously at the same collection (events.day)

Mon Apr 23 04:00:01 [initandlisten] connection accepted from 127.0.0.1:57571 #20062
Mon Apr 23 04:00:01 [conn20062] CMD: drop app_wowsearch.tmp.mr.events.day_1_inc
Mon Apr 23 04:00:01 [conn20062] build index app_wowsearch.tmp.mr.events.day_1_inc { 0: 1 }
Mon Apr 23 04:00:01 [conn20062] build index done 0 records 0.002 secs
Mon Apr 23 04:00:01 [conn20062] CMD: drop app_wowsearch.tmp.mr.events.day_1
Mon Apr 23 04:00:01 [conn20062] build index app_wowsearch.tmp.mr.events.day_1 { _id: 1 }
Mon Apr 23 04:00:01 [conn20062] build index done 0 records 0.002 secs
Mon Apr 23 04:00:02 [initandlisten] connection accepted from 192.168.0.21:44201 #20063
Mon Apr 23 04:00:02 [conn20063] command app_wowsearch.$cmd command: { group: { key: { ts: true, province: true }, cond: { e: "l", ts: { $gte: new Date(1335024000000),
$lt: new Date(1335110400000) } }, finalize: function cf__2348__f_(out) {
    delete out.uids;
}, initial: { visits: 0.0, visitors: {} }, ns: "events.day", $reduce: function cf__2347__f_(doc, out) {
    if (out.uids && !(doc.id in out.... } } ntoreturn:1 reslen:152703 530ms
Mon Apr 23 04:00:03 [conn20063] command app_wowsearch.$cmd command: { group: { key: { ts: true, model: true }, cond: { e: "l", ts: { $gte: new Date(1335024000000), $lt
: new Date(1335110400000) } }, finalize: function cf__2348__f_(out) {
    delete out.uids;
}, initial: { visits: 0.0, visitors: {} }, ns: "events.day", $reduce: function cf__2347__f_(doc, out) {
    if (out.uids && !(doc.id in out.... } } ntoreturn:1 reslen:185570 506ms
Mon Apr 23 04:00:23 [conn20061] end connection 192.168.0.11:57110
Mon Apr 23 04:00:23 [initandlisten] connection accepted from 192.168.0.11:57122 #20064
Mon Apr 23 04:00:53 [conn20064] end connection 192.168.0.11:57122
Mon Apr 23 04:00:53 [initandlisten] connection accepted from 192.168.0.11:57135 #20065
Mon Apr 23 04:01:23 [conn20065] end connection 192.168.0.11:57135
Mon Apr 23 04:01:23 [initandlisten] connection accepted from 192.168.0.11:57151 #20066
Mon Apr 23 04:01:53 [conn20066] end connection 192.168.0.11:57151
Mon Apr 23 04:01:53 [initandlisten] connection accepted from 192.168.0.11:57161 #20067

the group is run with eval and nolock

    db = 'app_wowsearch'
    cmd = {
            '$eval' : 'generate_mail_data()',
            'nolock' : True,
            }
    data = conn[db].command(cmd, check=False)['retval']

Comment by Ping Yin [ 19/Apr/12 ]

All used functions

 
function copy_obj(obj, keys, ignore_empty) {
    var nobj = {};
    for (i in keys) {
        var k = keys[i];
        if (k in obj) {
            if (obj[k] || ! ignore_empty) {
                nobj[k] = obj[k];
            }
        }
    }
    return nobj;
}
function filter(arr, pred) { //{{{
    var narr = [];
    arr.forEach(function(ele) {
        if (pred.call(null, ele)) {
            narr.push(ele);
        }
    });
    return narr;
} //}}}
function extend(obj, obj1) {
    var nobj = {};
    for (k in obj) {
        nobj[k] = obj[k];
    }
    for (k in obj1) {
        nobj[k] = obj1[k];
    }
    return nobj;
}
function delta(days) {
    var ds = [];
    for (i in days) {
        if (i == 0) continue;
        ds.push(days[i] - days[i-1]);
    }
    return ds;
}
function keys(obj) {
    var arr = [];
    for (k in obj) {
        arr.push(k);
    }
    return arr;
}
 
function get_china_day(day) {
    var d = new ISODate(day); // 2011-01-02T00Z
    d = date_add(d, '-8h'); // 2011-01-01T16Z
    return d;
}
 
function pad (val, len) {
    val = String(val);
    len = len || 2;
    while (val.length < len) val = "0" + val;
    return val;
}
function date_isoday(d) {
    // return - 2011-01-01 (local time)
    return [d.getFullYear(), pad(d.getMonth() + 1, 2), pad(d.getDate(), 2)].join('-');
}
function date_add(d, deltas) {
    /**
     * delta - 1y, 1m, 1d, -1H, 1M, 1S, 1L, 1w or '1y,1m,1d'
     */
    var nd = new Date(d);
    var m = {
        y: 'FullYear',
        m: 'Month',
        d: 'Date',
        H: 'Hours',
        h: 'Hours',
        M: 'Minutes',
        S: 'Seconds',
        s: 'Seconds',
        L: 'Milliseconds',
    };
    deltas.split(',').forEach(function(delta) {
        var lc = delta[delta.length - 1];
        var n = parseInt(delta);
        var ratio = 1;
        if (lc == 'w') { // 1w = 7d
            lc = 'd';
            ratio = 7;
        }
        var nm = m[lc];
        nd['set' + nm](d['get' + nm]() + n * ratio);
    });
 
    return nd;
}
 
function get_scale_col_name(scale, firstday, prev) { //{{{
    /**
     * scale - week/month
     * firstday - 2011-01-01
     * prev - true / false
     */
    if (prev) {
        var d = new ISODate(firstday);
        if (scale == 'week') {
            d = date_add(d, '-7d');
        }
        else {
            d = date_add(d, '-1m');
        }
        firstday = date_isoday(d);
    }
    return 'users.' + scale + firstday.replace(/-/g, '');
} //}}}

Comment by Ping Yin [ 17/Apr/12 ]

It seems I can't edit my issue, so just add comment

PRIMARY> rs.conf()
{
        "_id" : "rs_default",
        "version" : 19,
        "members" : [
                {
                        "_id" : 2,
                        "host" : "db1:27018",
                        "arbiterOnly" : true
                },
                {
                        "_id" : 3,
                        "host" : "data2:27018",
                        "priority" : 2
                },
                {
                        "_id" : 4,
                        "host" : "data1:27018"
                }
        ]
}

Generated at Thu Feb 08 03:09:27 UTC 2024 using Jira 9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66.