2014-02-12 05:40:42 -05:00
|
|
|
Settings = require('settings-sharelatex')
|
2017-04-12 09:53:03 -04:00
|
|
|
rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater)
|
2014-02-12 05:40:42 -05:00
|
|
|
logger = require('logger-sharelatex')
|
|
|
|
metrics = require('./Metrics')
|
2016-05-31 08:24:19 -04:00
|
|
|
Errors = require "./Errors"
|
2017-02-14 11:11:43 -05:00
|
|
|
crypto = require "crypto"
|
2019-09-30 08:41:47 -04:00
|
|
|
async = require "async"
|
2018-03-09 09:14:14 -05:00
|
|
|
ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager"
|
2015-03-25 12:53:20 -04:00
|
|
|
|
2017-06-07 06:34:42 -04:00
|
|
|
# Sometimes Redis calls take an unexpectedly long time. We have to be
|
|
|
|
# quick with Redis calls because we're holding a lock that expires
|
|
|
|
# after 30 seconds. We can't let any errors in the rest of the stack
|
|
|
|
# hold us up, and need to bail out quickly if there is a problem.
|
|
|
|
MAX_REDIS_REQUEST_LENGTH = 5000 # 5 seconds
|
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
# Make times easy to read
|
|
|
|
minutes = 60 # seconds for Redis expire
|
|
|
|
|
2017-02-22 08:46:34 -05:00
|
|
|
logHashErrors = Settings.documentupdater?.logHashErrors
|
2017-02-22 09:32:51 -05:00
|
|
|
logHashReadErrors = logHashErrors?.read
|
2017-02-22 08:46:34 -05:00
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
MEGABYTES = 1024 * 1024
|
|
|
|
MAX_RANGES_SIZE = 3 * MEGABYTES
|
|
|
|
|
2017-04-12 09:53:03 -04:00
|
|
|
keys = Settings.redis.documentupdater.key_schema
|
2017-05-08 10:56:02 -04:00
|
|
|
historyKeys = Settings.redis.history.key_schema
|
2017-04-12 09:53:03 -04:00
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
module.exports = RedisManager =
|
2016-06-23 13:00:03 -04:00
|
|
|
rclient: rclient
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
putDocInMemory : (project_id, doc_id, docLines, version, ranges, pathname, projectHistoryId, _callback)->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer = new metrics.Timer("redis.put-doc")
|
2016-06-08 11:42:09 -04:00
|
|
|
callback = (error) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer.done()
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
docLines = JSON.stringify(docLines)
|
2017-02-27 03:55:04 -05:00
|
|
|
if docLines.indexOf("\u0000") != -1
|
2017-02-27 04:06:24 -05:00
|
|
|
error = new Error("null bytes found in doc lines")
|
2020-01-30 10:17:13 -05:00
|
|
|
# this check was added to catch memory corruption in JSON.stringify.
|
|
|
|
# It sometimes returned null bytes at the end of the string.
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, docLines: docLines}, error.message
|
2017-02-27 04:06:24 -05:00
|
|
|
return callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
docHash = RedisManager._computeHash(docLines)
|
2018-04-23 07:08:04 -04:00
|
|
|
logger.log {project_id, doc_id, version, docHash, pathname, projectHistoryId}, "putting doc in redis"
|
2017-02-27 08:34:20 -05:00
|
|
|
RedisManager._serializeRanges ranges, (error, ranges) ->
|
|
|
|
if error?
|
|
|
|
logger.error {err: error, doc_id, project_id}, error.message
|
|
|
|
return callback(error)
|
|
|
|
multi = rclient.multi()
|
2019-09-09 10:27:58 -04:00
|
|
|
multi.set keys.docLines(doc_id:doc_id), docLines
|
2017-02-27 08:34:20 -05:00
|
|
|
multi.set keys.projectKey({doc_id:doc_id}), project_id
|
|
|
|
multi.set keys.docVersion(doc_id:doc_id), version
|
|
|
|
multi.set keys.docHash(doc_id:doc_id), docHash
|
|
|
|
if ranges?
|
|
|
|
multi.set keys.ranges(doc_id:doc_id), ranges
|
|
|
|
else
|
|
|
|
multi.del keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.set keys.pathname(doc_id:doc_id), pathname
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.set keys.projectHistoryId(doc_id:doc_id), projectHistoryId
|
2017-02-27 08:34:20 -05:00
|
|
|
multi.exec (error, result) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
# update docsInProject set
|
|
|
|
rclient.sadd keys.docsInProject(project_id:project_id), doc_id, callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2016-06-08 11:42:09 -04:00
|
|
|
removeDocFromMemory : (project_id, doc_id, _callback)->
|
2014-02-12 05:40:42 -05:00
|
|
|
logger.log project_id:project_id, doc_id:doc_id, "removing doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
callback = (err) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
if err?
|
|
|
|
logger.err project_id:project_id, doc_id:doc_id, err:err, "error removing doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback(err)
|
2014-02-12 05:40:42 -05:00
|
|
|
else
|
2015-03-25 12:54:36 -04:00
|
|
|
logger.log project_id:project_id, doc_id:doc_id, "removed doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback()
|
|
|
|
|
|
|
|
multi = rclient.multi()
|
|
|
|
multi.del keys.docLines(doc_id:doc_id)
|
|
|
|
multi.del keys.projectKey(doc_id:doc_id)
|
|
|
|
multi.del keys.docVersion(doc_id:doc_id)
|
2017-02-14 11:11:43 -05:00
|
|
|
multi.del keys.docHash(doc_id:doc_id)
|
2016-12-08 07:31:43 -05:00
|
|
|
multi.del keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.del keys.pathname(doc_id:doc_id)
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.del keys.projectHistoryId(doc_id:doc_id)
|
2019-11-15 11:53:16 -05:00
|
|
|
multi.del keys.projectHistoryType(doc_id:doc_id)
|
2017-10-06 07:23:23 -04:00
|
|
|
multi.del keys.unflushedTime(doc_id:doc_id)
|
2019-04-22 20:02:48 -04:00
|
|
|
multi.del keys.lastUpdatedAt(doc_id: doc_id)
|
|
|
|
multi.del keys.lastUpdatedBy(doc_id: doc_id)
|
2016-06-08 11:42:09 -04:00
|
|
|
multi.exec (error) ->
|
|
|
|
return callback(error) if error?
|
2017-08-03 10:03:30 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.srem keys.docsInProject(project_id:project_id), doc_id
|
2017-08-09 11:45:08 -04:00
|
|
|
multi.del keys.projectState(project_id:project_id)
|
2017-08-03 10:03:30 -04:00
|
|
|
multi.exec callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2017-08-07 09:43:28 -04:00
|
|
|
checkOrSetProjectState: (project_id, newState, callback = (error, stateChanged) ->) ->
|
2017-08-09 11:45:08 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.getset keys.projectState(project_id:project_id), newState
|
|
|
|
multi.expire keys.projectState(project_id:project_id), 30 * minutes
|
|
|
|
multi.exec (error, response) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
logger.log project_id: project_id, newState:newState, oldState: response[0], "checking project state"
|
|
|
|
callback(null, response[0] isnt newState)
|
2017-08-07 09:43:28 -04:00
|
|
|
|
2017-09-08 10:50:26 -04:00
|
|
|
clearProjectState: (project_id, callback = (error) ->) ->
|
|
|
|
rclient.del keys.projectState(project_id:project_id), callback
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
getDoc : (project_id, doc_id, callback = (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) ->)->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer = new metrics.Timer("redis.get-doc")
|
2016-06-01 05:53:50 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.get keys.docLines(doc_id:doc_id)
|
2014-02-12 05:40:42 -05:00
|
|
|
multi.get keys.docVersion(doc_id:doc_id)
|
2017-02-14 11:11:43 -05:00
|
|
|
multi.get keys.docHash(doc_id:doc_id)
|
2016-09-02 09:47:41 -04:00
|
|
|
multi.get keys.projectKey(doc_id:doc_id)
|
2016-12-08 07:31:43 -05:00
|
|
|
multi.get keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.get keys.pathname(doc_id:doc_id)
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.get keys.projectHistoryId(doc_id:doc_id)
|
2017-10-06 07:23:23 -04:00
|
|
|
multi.get keys.unflushedTime(doc_id:doc_id)
|
2019-04-22 20:02:48 -04:00
|
|
|
multi.get keys.lastUpdatedAt(doc_id: doc_id)
|
|
|
|
multi.get keys.lastUpdatedBy(doc_id: doc_id)
|
|
|
|
multi.exec (error, [docLines, version, storedHash, doc_project_id, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy])->
|
2017-06-07 06:34:42 -04:00
|
|
|
timeSpan = timer.done()
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error) if error?
|
2017-06-07 06:34:42 -04:00
|
|
|
# check if request took too long and bail out. only do this for
|
|
|
|
# get, because it is the first call in each update, so if this
|
|
|
|
# passes we'll assume others have a reasonable chance to succeed.
|
2017-06-26 11:36:05 -04:00
|
|
|
if timeSpan > MAX_REDIS_REQUEST_LENGTH
|
|
|
|
error = new Error("redis getDoc exceeded timeout")
|
|
|
|
return callback(error)
|
2017-02-15 09:12:36 -05:00
|
|
|
# check sha1 hash value if present
|
|
|
|
if docLines? and storedHash?
|
2017-02-14 11:11:43 -05:00
|
|
|
computedHash = RedisManager._computeHash(docLines)
|
2017-02-22 09:32:51 -05:00
|
|
|
if logHashReadErrors and computedHash isnt storedHash
|
2017-03-03 10:57:44 -05:00
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, computedHash: computedHash, storedHash: storedHash, docLines:docLines, "hash mismatch on retrieved document"
|
2017-02-14 11:11:43 -05:00
|
|
|
|
2016-06-01 05:53:50 -04:00
|
|
|
try
|
2016-12-02 06:04:21 -05:00
|
|
|
docLines = JSON.parse docLines
|
2016-12-08 07:31:43 -05:00
|
|
|
ranges = RedisManager._deserializeRanges(ranges)
|
2016-06-01 05:53:50 -04:00
|
|
|
catch e
|
|
|
|
return callback(e)
|
2016-12-08 07:31:43 -05:00
|
|
|
|
2016-12-02 06:04:21 -05:00
|
|
|
version = parseInt(version or 0, 10)
|
2016-09-02 09:47:41 -04:00
|
|
|
# check doc is in requested project
|
|
|
|
if doc_project_id? and doc_project_id isnt project_id
|
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc not in project"
|
|
|
|
return callback(new Errors.NotFoundError("document not found"))
|
2017-05-31 10:33:59 -04:00
|
|
|
|
2018-04-23 10:19:06 -04:00
|
|
|
if projectHistoryId?
|
|
|
|
projectHistoryId = parseInt(projectHistoryId)
|
|
|
|
|
2017-05-31 10:33:59 -04:00
|
|
|
# doc is not in redis, bail out
|
2017-05-31 11:08:33 -04:00
|
|
|
if !docLines?
|
2019-04-22 20:02:48 -04:00
|
|
|
return callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy
|
2017-05-31 10:33:59 -04:00
|
|
|
|
|
|
|
# doc should be in project set, check if missing (workaround for missing docs from putDoc)
|
|
|
|
rclient.sadd keys.docsInProject(project_id:project_id), doc_id, (error, result) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
if result isnt 0 # doc should already be in set
|
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc missing from docsInProject set"
|
2019-04-22 20:02:48 -04:00
|
|
|
callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2019-11-15 11:53:16 -05:00
|
|
|
getDocVersion: (doc_id, callback = (error, version, projectHistoryType) ->) ->
|
2019-11-25 08:28:36 -05:00
|
|
|
rclient.mget keys.docVersion(doc_id: doc_id), keys.projectHistoryType(doc_id:doc_id), (error, result) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error) if error?
|
2019-11-25 08:28:36 -05:00
|
|
|
[version, projectHistoryType] = result || []
|
2014-02-12 05:40:42 -05:00
|
|
|
version = parseInt(version, 10)
|
2019-11-15 11:53:16 -05:00
|
|
|
callback null, version, projectHistoryType
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2017-07-28 11:34:32 -04:00
|
|
|
getDocLines: (doc_id, callback = (error, version) ->) ->
|
|
|
|
rclient.get keys.docLines(doc_id: doc_id), (error, docLines) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
callback null, docLines
|
|
|
|
|
2014-02-12 05:40:42 -05:00
|
|
|
getPreviousDocOps: (doc_id, start, end, callback = (error, jsonOps) ->) ->
|
2017-06-23 10:50:21 -04:00
|
|
|
timer = new metrics.Timer("redis.get-prev-docops")
|
2014-02-12 05:40:42 -05:00
|
|
|
rclient.llen keys.docOps(doc_id: doc_id), (error, length) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
rclient.get keys.docVersion(doc_id: doc_id), (error, version) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
version = parseInt(version, 10)
|
|
|
|
first_version_in_redis = version - length
|
|
|
|
|
|
|
|
if start < first_version_in_redis or end > version
|
2016-05-31 08:24:19 -04:00
|
|
|
error = new Errors.OpRangeNotAvailableError("doc ops range is not loaded in redis")
|
|
|
|
logger.warn {err: error, doc_id, length, version, start, end}, "doc ops range is not loaded in redis"
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error)
|
|
|
|
|
|
|
|
start = start - first_version_in_redis
|
|
|
|
if end > -1
|
|
|
|
end = end - first_version_in_redis
|
|
|
|
|
|
|
|
if isNaN(start) or isNaN(end)
|
|
|
|
error = new Error("inconsistent version or lengths")
|
2016-05-31 08:24:19 -04:00
|
|
|
logger.error {err: error, doc_id, length, version, start, end}, "inconsistent version or length"
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error)
|
|
|
|
|
|
|
|
rclient.lrange keys.docOps(doc_id: doc_id), start, end, (error, jsonOps) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
try
|
|
|
|
ops = jsonOps.map (jsonOp) -> JSON.parse jsonOp
|
|
|
|
catch e
|
|
|
|
return callback(e)
|
2017-06-23 10:50:21 -04:00
|
|
|
timeSpan = timer.done()
|
2017-06-26 11:36:05 -04:00
|
|
|
if timeSpan > MAX_REDIS_REQUEST_LENGTH
|
|
|
|
error = new Error("redis getPreviousDocOps exceeded timeout")
|
|
|
|
return callback(error)
|
2014-02-12 05:40:42 -05:00
|
|
|
callback null, ops
|
|
|
|
|
2019-11-15 11:53:16 -05:00
|
|
|
getHistoryType: (doc_id, callback = (error, projectHistoryType) ->) ->
|
|
|
|
rclient.get keys.projectHistoryType(doc_id:doc_id), (error, projectHistoryType) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
callback null, projectHistoryType
|
|
|
|
|
|
|
|
setHistoryType: (doc_id, projectHistoryType, callback = (error) ->) ->
|
|
|
|
rclient.set keys.projectHistoryType(doc_id:doc_id), projectHistoryType, callback
|
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
DOC_OPS_TTL: 60 * minutes
|
|
|
|
DOC_OPS_MAX_LENGTH: 100
|
2019-04-22 20:02:48 -04:00
|
|
|
updateDocument : (project_id, doc_id, docLines, newVersion, appliedOps = [], ranges, updateMeta, callback = (error) ->)->
|
2019-11-15 11:53:16 -05:00
|
|
|
RedisManager.getDocVersion doc_id, (error, currentVersion, projectHistoryType) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error) if error?
|
2016-08-23 11:00:46 -04:00
|
|
|
if currentVersion + appliedOps.length != newVersion
|
|
|
|
error = new Error("Version mismatch. '#{doc_id}' is corrupted.")
|
|
|
|
logger.error {err: error, doc_id, currentVersion, newVersion, opsLength: appliedOps.length}, "version mismatch"
|
|
|
|
return callback(error)
|
2017-02-27 04:06:24 -05:00
|
|
|
|
2016-08-23 11:00:46 -04:00
|
|
|
jsonOps = appliedOps.map (op) -> JSON.stringify op
|
2017-05-22 10:20:28 -04:00
|
|
|
for op in jsonOps
|
|
|
|
if op.indexOf("\u0000") != -1
|
|
|
|
error = new Error("null bytes found in jsonOps")
|
2020-01-30 10:17:13 -05:00
|
|
|
# this check was added to catch memory corruption in JSON.stringify
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, jsonOps: jsonOps}, error.message
|
2017-05-22 10:20:28 -04:00
|
|
|
return callback(error)
|
2017-05-22 09:58:38 -04:00
|
|
|
|
2017-02-14 11:11:43 -05:00
|
|
|
newDocLines = JSON.stringify(docLines)
|
2017-02-27 03:55:04 -05:00
|
|
|
if newDocLines.indexOf("\u0000") != -1
|
2017-02-27 04:06:24 -05:00
|
|
|
error = new Error("null bytes found in doc lines")
|
2020-01-30 10:17:13 -05:00
|
|
|
# this check was added to catch memory corruption in JSON.stringify
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, newDocLines: newDocLines}, error.message
|
2017-02-27 04:06:24 -05:00
|
|
|
return callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
newHash = RedisManager._computeHash(newDocLines)
|
2017-05-08 10:56:02 -04:00
|
|
|
|
|
|
|
opVersions = appliedOps.map (op) -> op?.v
|
|
|
|
logger.log doc_id: doc_id, version: newVersion, hash: newHash, op_versions: opVersions, "updating doc in redis"
|
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
RedisManager._serializeRanges ranges, (error, ranges) ->
|
|
|
|
if error?
|
|
|
|
logger.error {err: error, doc_id}, error.message
|
|
|
|
return callback(error)
|
2017-05-22 09:58:51 -04:00
|
|
|
if ranges? and ranges.indexOf("\u0000") != -1
|
|
|
|
error = new Error("null bytes found in ranges")
|
2020-01-30 10:17:13 -05:00
|
|
|
# this check was added to catch memory corruption in JSON.stringify
|
2017-05-22 09:58:51 -04:00
|
|
|
logger.error err: error, doc_id: doc_id, ranges: ranges, error.message
|
|
|
|
return callback(error)
|
2017-02-27 08:34:20 -05:00
|
|
|
multi = rclient.multi()
|
2019-09-09 10:27:58 -04:00
|
|
|
multi.set keys.docLines(doc_id:doc_id), newDocLines # index 0
|
2017-05-09 05:34:31 -04:00
|
|
|
multi.set keys.docVersion(doc_id:doc_id), newVersion # index 1
|
|
|
|
multi.set keys.docHash(doc_id:doc_id), newHash # index 2
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.ltrim keys.docOps(doc_id: doc_id), -RedisManager.DOC_OPS_MAX_LENGTH, -1 # index 3
|
2017-02-27 08:34:20 -05:00
|
|
|
if ranges?
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.set keys.ranges(doc_id:doc_id), ranges # index 4
|
2017-02-27 08:34:20 -05:00
|
|
|
else
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.del keys.ranges(doc_id:doc_id) # also index 4
|
|
|
|
# push the ops last so we can get the lengths at fixed index position 7
|
2017-05-09 05:34:31 -04:00
|
|
|
if jsonOps.length > 0
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.rpush keys.docOps(doc_id: doc_id), jsonOps... # index 5
|
|
|
|
# expire must come after rpush since before it will be a no-op if the list is empty
|
|
|
|
multi.expire keys.docOps(doc_id: doc_id), RedisManager.DOC_OPS_TTL # index 6
|
2019-11-15 11:53:16 -05:00
|
|
|
if projectHistoryType is "project-history"
|
2019-11-25 05:51:10 -05:00
|
|
|
metrics.inc 'history-queue', 1, {status: 'skip-track-changes'}
|
|
|
|
logger.log {doc_id}, "skipping push of uncompressed ops for project using project-history"
|
2019-11-15 11:53:16 -05:00
|
|
|
else
|
|
|
|
# project is using old track-changes history service
|
2019-11-25 05:51:10 -05:00
|
|
|
metrics.inc 'history-queue', 1, {status: 'track-changes'}
|
2019-11-15 11:53:16 -05:00
|
|
|
multi.rpush historyKeys.uncompressedHistoryOps(doc_id: doc_id), jsonOps... # index 7
|
2017-10-06 07:23:23 -04:00
|
|
|
# Set the unflushed timestamp to the current time if the doc
|
|
|
|
# hasn't been modified before (the content in mongo has been
|
|
|
|
# valid up to this point). Otherwise leave it alone ("NX" flag).
|
|
|
|
multi.set keys.unflushedTime(doc_id: doc_id), Date.now(), "NX"
|
2019-04-22 20:02:48 -04:00
|
|
|
multi.set keys.lastUpdatedAt(doc_id: doc_id), Date.now() # index 8
|
|
|
|
if updateMeta?.user_id
|
|
|
|
multi.set keys.lastUpdatedBy(doc_id: doc_id), updateMeta.user_id # index 9
|
|
|
|
else
|
|
|
|
multi.del keys.lastUpdatedBy(doc_id: doc_id) # index 9
|
2017-02-27 08:34:20 -05:00
|
|
|
multi.exec (error, result) ->
|
|
|
|
return callback(error) if error?
|
2017-09-29 07:44:36 -04:00
|
|
|
|
2019-11-15 11:53:16 -05:00
|
|
|
if projectHistoryType is 'project-history'
|
|
|
|
docUpdateCount = undefined # only using project history, don't bother with track-changes
|
|
|
|
else
|
|
|
|
# project is using old track-changes history service
|
|
|
|
docUpdateCount = result[7] # length of uncompressedHistoryOps queue (index 7)
|
2017-09-29 07:44:36 -04:00
|
|
|
|
|
|
|
if jsonOps.length > 0 && Settings.apis?.project_history?.enabled
|
2019-11-25 05:51:10 -05:00
|
|
|
metrics.inc 'history-queue', 1, {status: 'project-history'}
|
2018-03-09 09:14:14 -05:00
|
|
|
ProjectHistoryRedisManager.queueOps project_id, jsonOps..., (error, projectUpdateCount) ->
|
2017-09-29 07:44:36 -04:00
|
|
|
callback null, docUpdateCount, projectUpdateCount
|
|
|
|
else
|
|
|
|
callback null, docUpdateCount
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
renameDoc: (project_id, doc_id, user_id, update, projectHistoryId, callback = (error) ->) ->
|
2017-11-06 11:14:27 -05:00
|
|
|
RedisManager.getDoc project_id, doc_id, (error, lines, version) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
|
|
|
|
if lines? and version?
|
|
|
|
rclient.set keys.pathname(doc_id:doc_id), update.newPathname, (error) ->
|
|
|
|
return callback(error) if error?
|
2018-04-23 07:08:04 -04:00
|
|
|
ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback
|
2017-11-06 11:14:27 -05:00
|
|
|
else
|
2018-04-23 07:08:04 -04:00
|
|
|
ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2017-10-06 07:23:23 -04:00
|
|
|
clearUnflushedTime: (doc_id, callback = (error) ->) ->
|
|
|
|
rclient.del keys.unflushedTime(doc_id:doc_id), callback
|
|
|
|
|
2014-02-12 05:40:42 -05:00
|
|
|
getDocIdsInProject: (project_id, callback = (error, doc_ids) ->) ->
|
|
|
|
rclient.smembers keys.docsInProject(project_id: project_id), callback
|
2017-07-28 11:34:32 -04:00
|
|
|
|
2019-09-25 11:42:49 -04:00
|
|
|
getDocTimestamps: (doc_ids, callback = (error, result) ->) ->
|
|
|
|
# get lastupdatedat timestamps for an array of doc_ids
|
2019-09-30 08:41:47 -04:00
|
|
|
async.mapSeries doc_ids, (doc_id, cb) ->
|
|
|
|
rclient.get keys.lastUpdatedAt(doc_id: doc_id), cb
|
|
|
|
, callback
|
2019-09-25 11:42:49 -04:00
|
|
|
|
|
|
|
queueFlushAndDeleteProject: (project_id, callback) ->
|
2019-10-01 10:01:53 -04:00
|
|
|
# store the project id in a sorted set ordered by time with a random offset to smooth out spikes
|
|
|
|
SMOOTHING_OFFSET = if Settings.smoothingOffset > 0 then Math.round(Settings.smoothingOffset * Math.random()) else 0
|
|
|
|
rclient.zadd keys.flushAndDeleteQueue(), Date.now() + SMOOTHING_OFFSET, project_id, callback
|
2019-09-25 11:42:49 -04:00
|
|
|
|
|
|
|
getNextProjectToFlushAndDelete: (cutoffTime, callback = (error, key, timestamp)->) ->
|
2019-09-26 05:14:49 -04:00
|
|
|
# find the oldest queued flush that is before the cutoff time
|
2019-09-25 11:42:49 -04:00
|
|
|
rclient.zrangebyscore keys.flushAndDeleteQueue(), 0, cutoffTime, "WITHSCORES", "LIMIT", 0, 1, (err, reply) ->
|
|
|
|
return callback(err) if err?
|
2019-09-26 05:14:49 -04:00
|
|
|
return callback() if !reply?.length # return if no projects ready to be processed
|
|
|
|
# pop the oldest entry (get and remove in a multi)
|
2019-09-25 11:42:49 -04:00
|
|
|
multi = rclient.multi()
|
2019-09-26 10:18:10 -04:00
|
|
|
# Poor man's version of ZPOPMIN, which is only available in Redis 5.
|
2019-09-25 11:42:49 -04:00
|
|
|
multi.zrange keys.flushAndDeleteQueue(), 0, 0, "WITHSCORES"
|
|
|
|
multi.zremrangebyrank keys.flushAndDeleteQueue(), 0, 0
|
2019-09-26 05:14:49 -04:00
|
|
|
multi.zcard keys.flushAndDeleteQueue() # the total length of the queue (for metrics)
|
2019-09-25 11:42:49 -04:00
|
|
|
multi.exec (err, reply) ->
|
|
|
|
return callback(err) if err?
|
|
|
|
return callback() if !reply?.length
|
|
|
|
[key, timestamp] = reply[0]
|
2019-09-25 12:04:36 -04:00
|
|
|
queueLength = reply[2]
|
|
|
|
callback(null, key, timestamp, queueLength)
|
2019-09-25 11:42:49 -04:00
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
_serializeRanges: (ranges, callback = (error, serializedRanges) ->) ->
|
2016-12-08 07:31:43 -05:00
|
|
|
jsonRanges = JSON.stringify(ranges)
|
2017-02-27 08:34:20 -05:00
|
|
|
if jsonRanges? and jsonRanges.length > MAX_RANGES_SIZE
|
|
|
|
return callback new Error("ranges are too large")
|
2016-12-08 07:31:43 -05:00
|
|
|
if jsonRanges == '{}'
|
|
|
|
# Most doc will have empty ranges so don't fill redis with lots of '{}' keys
|
|
|
|
jsonRanges = null
|
2017-02-27 08:34:20 -05:00
|
|
|
return callback null, jsonRanges
|
2017-09-29 07:44:36 -04:00
|
|
|
|
2016-12-08 07:31:43 -05:00
|
|
|
_deserializeRanges: (ranges) ->
|
|
|
|
if !ranges? or ranges == ""
|
|
|
|
return {}
|
|
|
|
else
|
2017-02-14 11:11:43 -05:00
|
|
|
return JSON.parse(ranges)
|
|
|
|
|
|
|
|
_computeHash: (docLines) ->
|
2017-02-21 11:03:06 -05:00
|
|
|
# use sha1 checksum of doclines to detect data corruption.
|
|
|
|
#
|
|
|
|
# note: must specify 'utf8' encoding explicitly, as the default is
|
|
|
|
# binary in node < v5
|
|
|
|
return crypto.createHash('sha1').update(docLines, 'utf8').digest('hex')
|