Don't put docs which are already in s3 through archive job

This fixes the bug we saw with 'RangeError: Maximum call stack size exceeded'
if lots of docs are already in s3 cb() gets called synchronously multiple times quickly
which can cause the execption. I am not sure where the recursion is, maybe inside async.

doing setImmediate(cb) also fixes the issue as I beilve it gives the
process chance to clear the stack. Similar to process.nextTick
This commit is contained in:
Henry Oswald 2015-09-07 14:07:37 +01:00
parent ae75b855b4
commit 9cfa4b3f84
2 changed files with 4 additions and 6 deletions

View file

@ -16,12 +16,10 @@ module.exports = DocArchive =
return callback(err)
else if !docs?
return callback new Errors.NotFoundError("No docs for project #{project_id}")
docs = _.filter docs, (doc)-> doc.inS3 != true
jobs = _.map docs, (doc) ->
(cb)->
if doc.inS3
return cb()
else
DocArchive.archiveDoc project_id, doc, cb
(cb)->
DocArchive.archiveDoc project_id, doc, cb
async.series jobs, callback

View file

@ -168,7 +168,7 @@ describe "DocArchiveManager", ->
it "should not throw and error", (done)->
@DocArchiveManager.archiveAllDocs @project_id, (err)=>
err.should.not.exist
should.not.exist err
done()