2014-02-12 05:40:42 -05:00
|
|
|
|
LockManager = require "./LockManager"
|
|
|
|
|
RedisManager = require "./RedisManager"
|
2017-05-02 10:38:33 -04:00
|
|
|
|
RealTimeRedisManager = require "./RealTimeRedisManager"
|
2014-02-12 05:40:42 -05:00
|
|
|
|
ShareJsUpdateManager = require "./ShareJsUpdateManager"
|
2016-11-28 05:14:42 -05:00
|
|
|
|
HistoryManager = require "./HistoryManager"
|
2014-02-12 05:40:42 -05:00
|
|
|
|
Settings = require('settings-sharelatex')
|
2017-09-29 07:57:27 -04:00
|
|
|
|
_ = require("underscore")
|
2014-02-12 05:40:42 -05:00
|
|
|
|
async = require("async")
|
|
|
|
|
logger = require('logger-sharelatex')
|
|
|
|
|
Metrics = require "./Metrics"
|
2016-11-28 05:14:42 -05:00
|
|
|
|
Errors = require "./Errors"
|
|
|
|
|
DocumentManager = require "./DocumentManager"
|
2016-12-08 07:31:43 -05:00
|
|
|
|
RangesManager = require "./RangesManager"
|
2017-05-18 06:00:07 -04:00
|
|
|
|
Profiler = require "./Profiler"
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
|
|
|
|
module.exports = UpdateManager =
|
2016-06-01 06:28:23 -04:00
|
|
|
|
processOutstandingUpdates: (project_id, doc_id, callback = (error) ->) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
|
timer = new Metrics.Timer("updateManager.processOutstandingUpdates")
|
2017-05-15 11:18:40 -04:00
|
|
|
|
UpdateManager.fetchAndApplyUpdates project_id, doc_id, (error) ->
|
|
|
|
|
timer.done()
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-05-15 11:18:40 -04:00
|
|
|
|
callback()
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
|
|
|
|
processOutstandingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) ->
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile = new Profiler("processOutstandingUpdatesWithLock", {project_id, doc_id})
|
2016-04-13 06:59:56 -04:00
|
|
|
|
LockManager.tryLock doc_id, (error, gotLock, lockValue) =>
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
|
|
|
|
return callback() if !gotLock
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("tryLock")
|
2014-02-12 05:40:42 -05:00
|
|
|
|
UpdateManager.processOutstandingUpdates project_id, doc_id, (error) ->
|
2016-04-13 06:59:56 -04:00
|
|
|
|
return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error?
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("processOutstandingUpdates")
|
2016-04-13 06:59:56 -04:00
|
|
|
|
LockManager.releaseLock doc_id, lockValue, (error) =>
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("releaseLock").end()
|
2014-02-12 05:40:42 -05:00
|
|
|
|
UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id, callback
|
|
|
|
|
|
|
|
|
|
continueProcessingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) ->
|
2017-05-02 10:38:33 -04:00
|
|
|
|
RealTimeRedisManager.getUpdatesLength doc_id, (error, length) =>
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
|
|
|
|
if length > 0
|
|
|
|
|
UpdateManager.processOutstandingUpdatesWithLock project_id, doc_id, callback
|
|
|
|
|
else
|
|
|
|
|
callback()
|
|
|
|
|
|
|
|
|
|
fetchAndApplyUpdates: (project_id, doc_id, callback = (error) ->) ->
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile = new Profiler("fetchAndApplyUpdates", {project_id, doc_id})
|
2017-05-02 10:38:33 -04:00
|
|
|
|
RealTimeRedisManager.getPendingUpdatesForDoc doc_id, (error, updates) =>
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-05-12 09:07:59 -04:00
|
|
|
|
logger.log {project_id: project_id, doc_id: doc_id, count: updates.length}, "processing updates"
|
2014-02-12 05:40:42 -05:00
|
|
|
|
if updates.length == 0
|
|
|
|
|
return callback()
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("getPendingUpdatesForDoc")
|
|
|
|
|
doUpdate = (update, cb)->
|
|
|
|
|
UpdateManager.applyUpdate project_id, doc_id, update, (err) ->
|
|
|
|
|
profile.log("applyUpdate")
|
|
|
|
|
cb(err)
|
|
|
|
|
finalCallback = (err) ->
|
|
|
|
|
profile.log("async done").end()
|
|
|
|
|
callback(err)
|
|
|
|
|
async.eachSeries updates, doUpdate, finalCallback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
2017-01-10 10:58:11 -05:00
|
|
|
|
applyUpdate: (project_id, doc_id, update, _callback = (error) ->) ->
|
|
|
|
|
callback = (error) ->
|
|
|
|
|
if error?
|
2017-05-02 10:38:33 -04:00
|
|
|
|
RealTimeRedisManager.sendData {project_id, doc_id, error: error.message || error}
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("sendData")
|
|
|
|
|
profile.end()
|
2017-01-10 10:58:11 -05:00
|
|
|
|
_callback(error)
|
2017-05-19 11:00:16 -04:00
|
|
|
|
|
|
|
|
|
profile = new Profiler("applyUpdate", {project_id, doc_id})
|
2016-09-09 06:01:14 -04:00
|
|
|
|
UpdateManager._sanitizeUpdate update
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("sanitizeUpdate")
|
2018-04-23 07:08:04 -04:00
|
|
|
|
DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId) ->
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("getDoc")
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
2016-11-28 05:14:42 -05:00
|
|
|
|
if !lines? or !version?
|
|
|
|
|
return callback(new Errors.NotFoundError("document not found: #{doc_id}"))
|
|
|
|
|
ShareJsUpdateManager.applyUpdate project_id, doc_id, update, lines, version, (error, updatedDocLines, version, appliedOps) ->
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("sharejs.applyUpdate")
|
2016-08-23 11:00:46 -04:00
|
|
|
|
return callback(error) if error?
|
2017-03-15 10:12:06 -04:00
|
|
|
|
RangesManager.applyUpdate project_id, doc_id, ranges, appliedOps, updatedDocLines, (error, new_ranges) ->
|
2018-04-23 07:08:04 -04:00
|
|
|
|
UpdateManager._addProjectHistoryMetadataToOps(appliedOps, pathname, projectHistoryId, lines)
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("RangesManager.applyUpdate")
|
2016-11-28 05:14:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-10-05 08:45:29 -04:00
|
|
|
|
RedisManager.updateDocument project_id, doc_id, updatedDocLines, version, appliedOps, new_ranges, (error, doc_ops_length, project_ops_length) ->
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("RedisManager.updateDocument")
|
2016-11-28 05:14:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-10-05 08:45:29 -04:00
|
|
|
|
HistoryManager.recordAndFlushHistoryOps project_id, doc_id, appliedOps, doc_ops_length, project_ops_length, (error) ->
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("recordAndFlushHistoryOps")
|
2017-10-05 08:45:29 -04:00
|
|
|
|
callback(error)
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
|
|
|
|
lockUpdatesAndDo: (method, project_id, doc_id, args..., callback) ->
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile = new Profiler("lockUpdatesAndDo", {project_id, doc_id})
|
2016-04-13 06:59:56 -04:00
|
|
|
|
LockManager.getLock doc_id, (error, lockValue) ->
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("getLock")
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
|
|
|
|
UpdateManager.processOutstandingUpdates project_id, doc_id, (error) ->
|
2016-04-13 06:59:56 -04:00
|
|
|
|
return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error?
|
2017-05-19 11:00:16 -04:00
|
|
|
|
profile.log("processOutstandingUpdates")
|
2014-02-12 05:40:42 -05:00
|
|
|
|
method project_id, doc_id, args..., (error, response_args...) ->
|
2016-04-13 06:59:56 -04:00
|
|
|
|
return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error?
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("method")
|
2016-04-13 06:59:56 -04:00
|
|
|
|
LockManager.releaseLock doc_id, lockValue, (error) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
|
return callback(error) if error?
|
2017-05-18 06:00:07 -04:00
|
|
|
|
profile.log("releaseLock").end()
|
2014-02-12 05:40:42 -05:00
|
|
|
|
callback null, response_args...
|
|
|
|
|
# We held the lock for a while so updates might have queued up
|
|
|
|
|
UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id
|
|
|
|
|
|
2016-04-13 06:59:56 -04:00
|
|
|
|
_handleErrorInsideLock: (doc_id, lockValue, original_error, callback = (error) ->) ->
|
|
|
|
|
LockManager.releaseLock doc_id, lockValue, (lock_error) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
|
callback(original_error)
|
2017-09-29 07:57:27 -04:00
|
|
|
|
|
2015-06-12 05:14:35 -04:00
|
|
|
|
_sanitizeUpdate: (update) ->
|
|
|
|
|
# In Javascript, characters are 16-bits wide. It does not understand surrogates as characters.
|
2017-09-29 07:57:27 -04:00
|
|
|
|
#
|
2015-06-12 05:14:35 -04:00
|
|
|
|
# From Wikipedia (http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane):
|
|
|
|
|
# "The High Surrogates (U+D800–U+DBFF) and Low Surrogate (U+DC00–U+DFFF) codes are reserved
|
|
|
|
|
# for encoding non-BMP characters in UTF-16 by using a pair of 16-bit codes: one High Surrogate
|
|
|
|
|
# and one Low Surrogate. A single surrogate code point will never be assigned a character.""
|
2017-09-29 07:57:27 -04:00
|
|
|
|
#
|
2015-06-12 05:14:35 -04:00
|
|
|
|
# The main offender seems to be \uD835 as a stand alone character, which would be the first
|
|
|
|
|
# 16-bit character of a blackboard bold character (http://www.fileformat.info/info/unicode/char/1d400/index.htm).
|
|
|
|
|
# Something must be going on client side that is screwing up the encoding and splitting the
|
|
|
|
|
# two 16-bit characters so that \uD835 is standalone.
|
|
|
|
|
for op in update.op or []
|
|
|
|
|
if op.i?
|
|
|
|
|
# Replace high and low surrogate characters with 'replacement character' (\uFFFD)
|
|
|
|
|
op.i = op.i.replace(/[\uD800-\uDFFF]/g, "\uFFFD")
|
|
|
|
|
return update
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
|
_addProjectHistoryMetadataToOps: (updates, pathname, projectHistoryId, lines) ->
|
2017-09-29 07:57:27 -04:00
|
|
|
|
doc_length = _.reduce lines,
|
|
|
|
|
(chars, line) -> chars + line.length,
|
|
|
|
|
0
|
|
|
|
|
doc_length += lines.length - 1 # count newline characters
|
2017-12-11 12:26:32 -05:00
|
|
|
|
updates.forEach (update) ->
|
2018-04-23 07:08:04 -04:00
|
|
|
|
update.projectHistoryId = projectHistoryId
|
2017-12-11 12:26:32 -05:00
|
|
|
|
update.meta ||= {}
|
|
|
|
|
update.meta.pathname = pathname
|
|
|
|
|
update.meta.doc_length = doc_length
|
|
|
|
|
# Each update may contain multiple ops, i.e.
|
|
|
|
|
# [{
|
|
|
|
|
# ops: [{i: "foo", p: 4}, {d: "bar", p:8}]
|
|
|
|
|
# }, {
|
|
|
|
|
# ops: [{d: "baz", p: 40}, {i: "qux", p:8}]
|
|
|
|
|
# }]
|
|
|
|
|
# We want to include the doc_length at the start of each update,
|
|
|
|
|
# before it's ops are applied. However, we need to track any
|
|
|
|
|
# changes to it for the next update.
|
|
|
|
|
for op in update.op
|
|
|
|
|
if op.i?
|
|
|
|
|
doc_length += op.i.length
|
|
|
|
|
if op.d?
|
|
|
|
|
doc_length -= op.d.length
|