2014-02-24 12:43:27 -05:00
|
|
|
MongoManager = require "./MongoManager"
|
2014-02-25 11:48:42 -05:00
|
|
|
RedisManager = require "./RedisManager"
|
2014-01-27 11:26:58 -05:00
|
|
|
UpdateCompressor = require "./UpdateCompressor"
|
2014-02-26 05:55:20 -05:00
|
|
|
LockManager = require "./LockManager"
|
2014-03-06 13:04:00 -05:00
|
|
|
WebApiManager = require "./WebApiManager"
|
2014-01-27 11:26:58 -05:00
|
|
|
logger = require "logger-sharelatex"
|
2014-03-06 13:04:00 -05:00
|
|
|
async = require "async"
|
2014-01-27 11:26:58 -05:00
|
|
|
|
2014-03-05 10:59:40 -05:00
|
|
|
module.exports = UpdatesManager =
|
2014-01-27 11:26:58 -05:00
|
|
|
compressAndSaveRawUpdates: (doc_id, rawUpdates, callback = (error) ->) ->
|
|
|
|
length = rawUpdates.length
|
|
|
|
if length == 0
|
|
|
|
return callback()
|
|
|
|
|
2014-02-24 12:43:27 -05:00
|
|
|
MongoManager.popLastCompressedUpdate doc_id, (error, lastCompressedUpdate) ->
|
2014-01-27 11:26:58 -05:00
|
|
|
return callback(error) if error?
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log doc_id: doc_id, "popped last update"
|
2014-02-25 07:27:42 -05:00
|
|
|
|
|
|
|
# Ensure that raw updates start where lastCompressedUpdate left off
|
|
|
|
if lastCompressedUpdate?
|
|
|
|
rawUpdates = rawUpdates.slice(0)
|
|
|
|
while rawUpdates[0]? and rawUpdates[0].v <= lastCompressedUpdate.v
|
|
|
|
rawUpdates.shift()
|
|
|
|
|
|
|
|
if rawUpdates[0]? and rawUpdates[0].v != lastCompressedUpdate.v + 1
|
2014-03-05 08:22:38 -05:00
|
|
|
error = new Error("Tried to apply raw op at version #{rawUpdates[0].v} to last compressed update with version #{lastCompressedUpdate.v}")
|
|
|
|
logger.error err: error, doc_id: doc_id, "inconsistent doc versions"
|
|
|
|
# Push the update back into Mongo - catching errors at this
|
|
|
|
# point is useless, we're already bailing
|
|
|
|
MongoManager.insertCompressedUpdates doc_id, [lastCompressedUpdate], () ->
|
|
|
|
return callback error
|
|
|
|
return
|
2014-02-25 07:27:42 -05:00
|
|
|
|
2014-01-27 11:26:58 -05:00
|
|
|
compressedUpdates = UpdateCompressor.compressRawUpdates lastCompressedUpdate, rawUpdates
|
2014-02-24 12:43:27 -05:00
|
|
|
MongoManager.insertCompressedUpdates doc_id, compressedUpdates, (error) ->
|
2014-01-27 11:26:58 -05:00
|
|
|
return callback(error) if error?
|
|
|
|
logger.log doc_id: doc_id, rawUpdatesLength: length, compressedUpdatesLength: compressedUpdates.length, "compressed doc updates"
|
|
|
|
callback()
|
|
|
|
|
2014-02-25 11:48:42 -05:00
|
|
|
REDIS_READ_BATCH_SIZE: 100
|
2014-02-25 07:27:42 -05:00
|
|
|
processUncompressedUpdates: (doc_id, callback = (error) ->) ->
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log "processUncompressedUpdates"
|
2014-03-05 10:59:40 -05:00
|
|
|
RedisManager.getOldestRawUpdates doc_id, UpdatesManager.REDIS_READ_BATCH_SIZE, (error, rawUpdates) ->
|
2014-02-25 11:48:42 -05:00
|
|
|
return callback(error) if error?
|
2014-02-26 06:18:26 -05:00
|
|
|
length = rawUpdates.length
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log doc_id: doc_id, length: length, "got raw updates from redis"
|
2014-03-05 10:59:40 -05:00
|
|
|
UpdatesManager.compressAndSaveRawUpdates doc_id, rawUpdates, (error) ->
|
2014-02-25 11:48:42 -05:00
|
|
|
return callback(error) if error?
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log doc_id: doc_id, "compressed and saved doc updates"
|
2014-03-01 06:42:31 -05:00
|
|
|
RedisManager.deleteOldestRawUpdates doc_id, length, (error) ->
|
2014-02-25 11:48:42 -05:00
|
|
|
return callback(error) if error?
|
2014-03-05 10:59:40 -05:00
|
|
|
if length == UpdatesManager.REDIS_READ_BATCH_SIZE
|
2014-02-26 06:18:26 -05:00
|
|
|
# There might be more updates
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log doc_id: doc_id, "continuing processing updates"
|
2014-02-26 06:18:26 -05:00
|
|
|
setTimeout () ->
|
2014-03-05 10:59:40 -05:00
|
|
|
UpdatesManager.processUncompressedUpdates doc_id, callback
|
2014-02-26 06:18:26 -05:00
|
|
|
, 0
|
|
|
|
else
|
2014-02-26 07:11:45 -05:00
|
|
|
logger.log doc_id: doc_id, "all raw updates processed"
|
2014-02-26 06:18:26 -05:00
|
|
|
callback()
|
2014-02-25 11:48:42 -05:00
|
|
|
|
2014-02-26 05:55:20 -05:00
|
|
|
processUncompressedUpdatesWithLock: (doc_id, callback = (error) ->) ->
|
|
|
|
LockManager.runWithLock(
|
|
|
|
"HistoryLock:#{doc_id}",
|
2014-02-26 07:11:45 -05:00
|
|
|
(releaseLock) ->
|
2014-03-05 10:59:40 -05:00
|
|
|
UpdatesManager.processUncompressedUpdates doc_id, releaseLock
|
2014-02-26 05:55:20 -05:00
|
|
|
callback
|
|
|
|
)
|
2014-02-25 11:48:42 -05:00
|
|
|
|
2014-03-05 10:59:40 -05:00
|
|
|
getUpdates: (doc_id, options = {}, callback = (error, updates) ->) ->
|
|
|
|
UpdatesManager.processUncompressedUpdatesWithLock doc_id, (error) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
MongoManager.getUpdates doc_id, options, callback
|
|
|
|
|
2014-03-06 13:04:00 -05:00
|
|
|
getUpdatesWithUserInfo: (doc_id, options = {}, callback = (error, updates) ->) ->
|
|
|
|
UpdatesManager.getUpdates doc_id, options, (error, updates) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
UpdatesManager.fillUserInfo updates, (error, updates) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
callback null, updates
|
|
|
|
|
|
|
|
fillUserInfo: (updates, callback = (error, updates) ->) ->
|
|
|
|
users = {}
|
|
|
|
for update in updates
|
|
|
|
users[update.meta.user_id] = true
|
|
|
|
|
|
|
|
jobs = []
|
|
|
|
for user_id, _ of users
|
|
|
|
do (user_id) ->
|
|
|
|
jobs.push (callback) ->
|
|
|
|
WebApiManager.getUserInfo user_id, (error, userInfo) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
users[user_id] = userInfo
|
|
|
|
callback()
|
|
|
|
|
|
|
|
async.series jobs, (error) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
for update in updates
|
|
|
|
user_id = update.meta.user_id
|
|
|
|
delete update.meta.user_id
|
|
|
|
update.meta.user = users[user_id]
|
|
|
|
callback null, updates
|