overleaf/services/track-changes/app/coffee/MongoManager.coffee

180 lines
6.9 KiB
CoffeeScript
Raw Normal View History

{db, ObjectId} = require "./mongojs"
2015-02-17 06:14:13 -05:00
PackManager = require "./PackManager"
async = require "async"
module.exports = MongoManager =
getLastCompressedUpdate: (doc_id, callback = (error, update) ->) ->
db.docHistory
.find(doc_id: ObjectId(doc_id.toString()))
2014-03-07 09:02:16 -05:00
.sort( v: -1 )
.limit(1)
.toArray (error, compressedUpdates) ->
return callback(error) if error?
if compressedUpdates[0]?.pack?
# cannot pop from a pack, throw error
error = new Error("last compressed update is a pack")
return callback error, null
return callback null, compressedUpdates[0] or null
2015-10-08 11:10:48 -04:00
peekLastCompressedUpdate: (doc_id, callback = (error, update, version) ->) ->
# under normal use we pass back the last update as
# callback(null,update,version).
#
# when we have an existing last update but want to force a new one
# to start, we pass it back as callback(null,null,version), just
# giving the version so we can check consistency.
MongoManager.getLastCompressedUpdate doc_id, (error, update) ->
return callback(error) if error?
if update?
if update.broken
# the update is marked as broken so we will force a new op
return callback null, null
else if update.pack?
return callback null, update, update.pack[0]?.v
else
return callback null, update, update.v
else
MongoManager.getArchivedDocStatus doc_id, (error, status) ->
return callback(error) if error?
return callback(null, null, status.lastVersion) if status?.inS3? and status?.lastVersion?
callback null, null
insertCompressedUpdates: (project_id, doc_id, updates, temporary, callback = (error) ->) ->
jobs = []
for update in updates
do (update) ->
jobs.push (callback) -> MongoManager.insertCompressedUpdate project_id, doc_id, update, temporary, callback
async.series jobs, (err, results) ->
if not temporary
# keep track of updates to be packed
db.docHistoryStats.update {doc_id:ObjectId(doc_id)}, {
$inc:{update_count:updates.length},
$currentDate:{last_update:true}
}, {upsert:true}, () ->
callback(err,results)
else
callback(err,results)
2015-10-08 11:10:48 -04:00
modifyCompressedUpdate: (lastUpdate, newUpdate, callback = (error) ->) ->
return callback() if not newUpdate?
db.docHistory.findAndModify
query: lastUpdate,
update:
$set :
op: newUpdate.op
meta: newUpdate.meta
v: newUpdate.v
new: true
, (err, result, lastErrorObject) ->
return callback(error) if error?
return new Error("could not modify existing op") if not result?
callback(err, result)
insertCompressedUpdate: (project_id, doc_id, update, temporary, callback = (error) ->) ->
update = {
doc_id: ObjectId(doc_id.toString())
2014-03-19 12:40:55 -04:00
project_id: ObjectId(project_id.toString())
op: update.op
meta: update.meta
v: update.v
}
2015-08-09 18:52:32 -04:00
if temporary
seconds = 1000
minutes = 60 * seconds
hours = 60 * minutes
days = 24 * hours
update.expiresAt = new Date(Date.now() + 7 * days)
2015-02-05 11:36:41 -05:00
# may need to roll over a pack here if we are inserting packs
db.docHistory.insert update, callback
getDocUpdates:(doc_id, options = {}, callback = (error, updates) ->) ->
2014-03-04 09:05:17 -05:00
query =
doc_id: ObjectId(doc_id.toString())
if options.from?
query["v"] ||= {}
query["v"]["$gte"] = options.from
if options.to?
query["v"] ||= {}
query["v"]["$lte"] = options.to
PackManager.findDocResults(db.docHistory, query, options.limit, callback)
getProjectUpdates: (project_id, options = {}, callback = (error, updates) ->) ->
query =
project_id: ObjectId(project_id.toString())
if options.before?
query["meta.end_ts"] = { $lt: options.before }
PackManager.findProjectResults(db.docHistory, query, options.limit, callback)
backportProjectId: (project_id, doc_id, callback = (error) ->) ->
db.docHistory.update {
doc_id: ObjectId(doc_id.toString())
project_id: { $exists: false }
}, {
$set: { project_id: ObjectId(project_id.toString()) }
}, {
multi: true
}, callback
getProjectMetaData: (project_id, callback = (error, metadata) ->) ->
db.projectHistoryMetaData.find {
project_id: ObjectId(project_id.toString())
}, (error, results) ->
return callback(error) if error?
callback null, results[0]
setProjectMetaData: (project_id, metadata, callback = (error) ->) ->
db.projectHistoryMetaData.update {
project_id: ObjectId(project_id)
}, {
$set: metadata
}, {
upsert: true
}, callback
ensureIndices: () ->
# For finding all updates that go into a diff for a doc
2014-05-16 12:00:30 -04:00
db.docHistory.ensureIndex { doc_id: 1, v: 1 }, { background: true }
# For finding all updates that affect a project
2014-05-16 12:00:30 -04:00
db.docHistory.ensureIndex { project_id: 1, "meta.end_ts": 1 }, { background: true }
# For finding all packs that affect a project (use a sparse index so only packs are included)
db.docHistory.ensureIndex { project_id: 1, "pack.0.meta.end_ts": 1, "meta.end_ts": 1}, { background: true, sparse: true }
2015-08-31 17:13:18 -04:00
# For finding updates that don't yet have a project_id and need it inserting
2014-05-16 12:00:30 -04:00
db.docHistory.ensureIndex { doc_id: 1, project_id: 1 }, { background: true }
# For finding project meta-data
2014-05-16 12:00:30 -04:00
db.projectHistoryMetaData.ensureIndex { project_id: 1 }, { background: true }
# TTL index for auto deleting week old temporary ops
2014-05-16 12:00:30 -04:00
db.docHistory.ensureIndex { expiresAt: 1 }, { expireAfterSeconds: 0, background: true }
# For finding documents which need packing
db.docHistoryStats.ensureIndex { doc_id: 1 }, { background: true }
db.docHistoryStats.ensureIndex { updates: -1, doc_id: 1 }, { background: true }
2015-08-06 10:11:43 -04:00
getArchivedDocStatus: (doc_id, callback)->
db.docHistoryStats.findOne {doc_id: ObjectId(doc_id.toString()), inS3: {$exists:true}}, {inS3: true, lastVersion: true}, callback
2015-08-06 16:09:36 -04:00
getDocChangesCount: (doc_id, callback)->
db.docHistory.count { doc_id : ObjectId(doc_id.toString())}, callback
2015-08-09 16:50:15 -04:00
markDocHistoryAsArchiveInProgress: (doc_id, lastVersion, callback) ->
db.docHistoryStats.update {doc_id: ObjectId(doc_id.toString())}, {$set : {inS3: false, lastVersion: lastVersion}}, {upsert:true}, callback
clearDocHistoryAsArchiveInProgress: (doc_id, update, callback) ->
db.docHistoryStats.update {doc_id: ObjectId(doc_id.toString())}, {$unset : {inS3: true, lastVersion: true}}, callback
markDocHistoryAsArchived: (doc_id, lastVersion, callback)->
db.docHistoryStats.update {doc_id: ObjectId(doc_id.toString())}, {$set : {inS3: true}}, {upsert:true}, (error)->
return callback(error) if error?
# clear the archived entries from the docHistory now we have finally succeeded
db.docHistory.remove { doc_id : ObjectId(doc_id.toString()), v: {$lte : lastVersion}, expiresAt: {$exists : false} }, (error)->
2015-08-09 16:50:15 -04:00
return callback(error) if error?
callback(error)
2015-08-09 16:50:15 -04:00
markDocHistoryAsUnarchived: (doc_id, callback)->
# note this removes any inS3 field, regardless of its value (true/false/null)
db.docHistoryStats.update {doc_id: ObjectId(doc_id.toString())}, { $unset : { inS3: true, lastVersion: true} }, (error)->
2015-08-09 16:50:15 -04:00
callback(error)