2014-02-12 05:40:42 -05:00
|
|
|
Settings = require('settings-sharelatex')
|
2017-04-12 09:53:03 -04:00
|
|
|
rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater)
|
2014-02-12 05:40:42 -05:00
|
|
|
_ = require('underscore')
|
|
|
|
logger = require('logger-sharelatex')
|
|
|
|
metrics = require('./Metrics')
|
2016-05-31 08:24:19 -04:00
|
|
|
Errors = require "./Errors"
|
2017-02-14 11:11:43 -05:00
|
|
|
crypto = require "crypto"
|
2018-03-09 09:14:14 -05:00
|
|
|
ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager"
|
2015-03-25 12:53:20 -04:00
|
|
|
|
2017-06-07 06:34:42 -04:00
|
|
|
# Sometimes Redis calls take an unexpectedly long time. We have to be
|
|
|
|
# quick with Redis calls because we're holding a lock that expires
|
|
|
|
# after 30 seconds. We can't let any errors in the rest of the stack
|
|
|
|
# hold us up, and need to bail out quickly if there is a problem.
|
|
|
|
MAX_REDIS_REQUEST_LENGTH = 5000 # 5 seconds
|
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
# Make times easy to read
|
|
|
|
minutes = 60 # seconds for Redis expire
|
|
|
|
|
2017-02-17 07:29:15 -05:00
|
|
|
# LUA script to write document and return hash
|
|
|
|
# arguments: docLinesKey docLines
|
|
|
|
setScript = """
|
|
|
|
redis.call('set', KEYS[1], ARGV[1])
|
|
|
|
return redis.sha1hex(ARGV[1])
|
|
|
|
"""
|
|
|
|
|
2017-02-22 08:46:34 -05:00
|
|
|
logHashErrors = Settings.documentupdater?.logHashErrors
|
2017-02-22 09:32:51 -05:00
|
|
|
logHashReadErrors = logHashErrors?.read
|
|
|
|
logHashWriteErrors = logHashErrors?.write
|
2017-02-22 08:46:34 -05:00
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
MEGABYTES = 1024 * 1024
|
|
|
|
MAX_RANGES_SIZE = 3 * MEGABYTES
|
|
|
|
|
2017-04-12 09:53:03 -04:00
|
|
|
keys = Settings.redis.documentupdater.key_schema
|
2017-05-08 10:56:02 -04:00
|
|
|
historyKeys = Settings.redis.history.key_schema
|
2017-04-12 09:53:03 -04:00
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
module.exports = RedisManager =
|
2016-06-23 13:00:03 -04:00
|
|
|
rclient: rclient
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
putDocInMemory : (project_id, doc_id, docLines, version, ranges, pathname, projectHistoryId, _callback)->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer = new metrics.Timer("redis.put-doc")
|
2016-06-08 11:42:09 -04:00
|
|
|
callback = (error) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer.done()
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
docLines = JSON.stringify(docLines)
|
2017-02-27 03:55:04 -05:00
|
|
|
if docLines.indexOf("\u0000") != -1
|
2017-02-27 04:06:24 -05:00
|
|
|
error = new Error("null bytes found in doc lines")
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, docLines: docLines}, error.message
|
2017-02-27 04:06:24 -05:00
|
|
|
return callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
docHash = RedisManager._computeHash(docLines)
|
2018-04-23 07:08:04 -04:00
|
|
|
logger.log {project_id, doc_id, version, docHash, pathname, projectHistoryId}, "putting doc in redis"
|
2017-02-27 08:34:20 -05:00
|
|
|
RedisManager._serializeRanges ranges, (error, ranges) ->
|
|
|
|
if error?
|
|
|
|
logger.error {err: error, doc_id, project_id}, error.message
|
|
|
|
return callback(error)
|
|
|
|
multi = rclient.multi()
|
|
|
|
multi.eval setScript, 1, keys.docLines(doc_id:doc_id), docLines
|
|
|
|
multi.set keys.projectKey({doc_id:doc_id}), project_id
|
|
|
|
multi.set keys.docVersion(doc_id:doc_id), version
|
|
|
|
multi.set keys.docHash(doc_id:doc_id), docHash
|
|
|
|
if ranges?
|
|
|
|
multi.set keys.ranges(doc_id:doc_id), ranges
|
|
|
|
else
|
|
|
|
multi.del keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.set keys.pathname(doc_id:doc_id), pathname
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.set keys.projectHistoryId(doc_id:doc_id), projectHistoryId
|
2017-02-27 08:34:20 -05:00
|
|
|
multi.exec (error, result) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
# check the hash computed on the redis server
|
|
|
|
writeHash = result?[0]
|
|
|
|
if logHashWriteErrors and writeHash? and writeHash isnt docHash
|
2017-03-06 06:57:40 -05:00
|
|
|
logger.error project_id: project_id, doc_id: doc_id, writeHash: writeHash, origHash: docHash, docLines:docLines, "hash mismatch on putDocInMemory"
|
2017-02-27 08:34:20 -05:00
|
|
|
# update docsInProject set
|
|
|
|
rclient.sadd keys.docsInProject(project_id:project_id), doc_id, callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2016-06-08 11:42:09 -04:00
|
|
|
removeDocFromMemory : (project_id, doc_id, _callback)->
|
2014-02-12 05:40:42 -05:00
|
|
|
logger.log project_id:project_id, doc_id:doc_id, "removing doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
callback = (err) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
if err?
|
|
|
|
logger.err project_id:project_id, doc_id:doc_id, err:err, "error removing doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback(err)
|
2014-02-12 05:40:42 -05:00
|
|
|
else
|
2015-03-25 12:54:36 -04:00
|
|
|
logger.log project_id:project_id, doc_id:doc_id, "removed doc from redis"
|
2016-06-08 11:42:09 -04:00
|
|
|
_callback()
|
|
|
|
|
|
|
|
multi = rclient.multi()
|
|
|
|
multi.del keys.docLines(doc_id:doc_id)
|
|
|
|
multi.del keys.projectKey(doc_id:doc_id)
|
|
|
|
multi.del keys.docVersion(doc_id:doc_id)
|
2017-02-14 11:11:43 -05:00
|
|
|
multi.del keys.docHash(doc_id:doc_id)
|
2016-12-08 07:31:43 -05:00
|
|
|
multi.del keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.del keys.pathname(doc_id:doc_id)
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.del keys.projectHistoryId(doc_id:doc_id)
|
2017-10-06 07:23:23 -04:00
|
|
|
multi.del keys.unflushedTime(doc_id:doc_id)
|
2016-06-08 11:42:09 -04:00
|
|
|
multi.exec (error) ->
|
|
|
|
return callback(error) if error?
|
2017-08-03 10:03:30 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.srem keys.docsInProject(project_id:project_id), doc_id
|
2017-08-09 11:45:08 -04:00
|
|
|
multi.del keys.projectState(project_id:project_id)
|
2017-08-03 10:03:30 -04:00
|
|
|
multi.exec callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2017-08-07 09:43:28 -04:00
|
|
|
checkOrSetProjectState: (project_id, newState, callback = (error, stateChanged) ->) ->
|
2017-08-09 11:45:08 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.getset keys.projectState(project_id:project_id), newState
|
|
|
|
multi.expire keys.projectState(project_id:project_id), 30 * minutes
|
|
|
|
multi.exec (error, response) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
logger.log project_id: project_id, newState:newState, oldState: response[0], "checking project state"
|
|
|
|
callback(null, response[0] isnt newState)
|
2017-08-07 09:43:28 -04:00
|
|
|
|
2017-09-08 10:50:26 -04:00
|
|
|
clearProjectState: (project_id, callback = (error) ->) ->
|
|
|
|
rclient.del keys.projectState(project_id:project_id), callback
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
getDoc : (project_id, doc_id, callback = (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) ->)->
|
2014-02-12 05:40:42 -05:00
|
|
|
timer = new metrics.Timer("redis.get-doc")
|
2016-06-01 05:53:50 -04:00
|
|
|
multi = rclient.multi()
|
|
|
|
multi.get keys.docLines(doc_id:doc_id)
|
2014-02-12 05:40:42 -05:00
|
|
|
multi.get keys.docVersion(doc_id:doc_id)
|
2017-02-14 11:11:43 -05:00
|
|
|
multi.get keys.docHash(doc_id:doc_id)
|
2016-09-02 09:47:41 -04:00
|
|
|
multi.get keys.projectKey(doc_id:doc_id)
|
2016-12-08 07:31:43 -05:00
|
|
|
multi.get keys.ranges(doc_id:doc_id)
|
2017-09-29 05:54:48 -04:00
|
|
|
multi.get keys.pathname(doc_id:doc_id)
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.get keys.projectHistoryId(doc_id:doc_id)
|
2017-10-06 07:23:23 -04:00
|
|
|
multi.get keys.unflushedTime(doc_id:doc_id)
|
2018-04-23 07:08:04 -04:00
|
|
|
multi.exec (error, [docLines, version, storedHash, doc_project_id, ranges, pathname, projectHistoryId, unflushedTime])->
|
2017-06-07 06:34:42 -04:00
|
|
|
timeSpan = timer.done()
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error) if error?
|
2017-06-07 06:34:42 -04:00
|
|
|
# check if request took too long and bail out. only do this for
|
|
|
|
# get, because it is the first call in each update, so if this
|
|
|
|
# passes we'll assume others have a reasonable chance to succeed.
|
2017-06-26 11:36:05 -04:00
|
|
|
if timeSpan > MAX_REDIS_REQUEST_LENGTH
|
|
|
|
error = new Error("redis getDoc exceeded timeout")
|
|
|
|
return callback(error)
|
2017-02-15 09:12:36 -05:00
|
|
|
# check sha1 hash value if present
|
|
|
|
if docLines? and storedHash?
|
2017-02-14 11:11:43 -05:00
|
|
|
computedHash = RedisManager._computeHash(docLines)
|
2017-02-22 09:32:51 -05:00
|
|
|
if logHashReadErrors and computedHash isnt storedHash
|
2017-03-03 10:57:44 -05:00
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, computedHash: computedHash, storedHash: storedHash, docLines:docLines, "hash mismatch on retrieved document"
|
2017-02-14 11:11:43 -05:00
|
|
|
|
2016-06-01 05:53:50 -04:00
|
|
|
try
|
2016-12-02 06:04:21 -05:00
|
|
|
docLines = JSON.parse docLines
|
2016-12-08 07:31:43 -05:00
|
|
|
ranges = RedisManager._deserializeRanges(ranges)
|
2016-06-01 05:53:50 -04:00
|
|
|
catch e
|
|
|
|
return callback(e)
|
2016-12-08 07:31:43 -05:00
|
|
|
|
2016-12-02 06:04:21 -05:00
|
|
|
version = parseInt(version or 0, 10)
|
2016-09-02 09:47:41 -04:00
|
|
|
# check doc is in requested project
|
|
|
|
if doc_project_id? and doc_project_id isnt project_id
|
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc not in project"
|
|
|
|
return callback(new Errors.NotFoundError("document not found"))
|
2017-05-31 10:33:59 -04:00
|
|
|
|
2018-04-23 10:19:06 -04:00
|
|
|
if projectHistoryId?
|
|
|
|
projectHistoryId = parseInt(projectHistoryId)
|
|
|
|
|
2017-05-31 10:33:59 -04:00
|
|
|
# doc is not in redis, bail out
|
2017-05-31 11:08:33 -04:00
|
|
|
if !docLines?
|
2018-04-23 07:08:04 -04:00
|
|
|
return callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime
|
2017-05-31 10:33:59 -04:00
|
|
|
|
|
|
|
# doc should be in project set, check if missing (workaround for missing docs from putDoc)
|
|
|
|
rclient.sadd keys.docsInProject(project_id:project_id), doc_id, (error, result) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
if result isnt 0 # doc should already be in set
|
|
|
|
logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc missing from docsInProject set"
|
2018-04-23 07:08:04 -04:00
|
|
|
callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime
|
2014-02-12 05:40:42 -05:00
|
|
|
|
|
|
|
getDocVersion: (doc_id, callback = (error, version) ->) ->
|
|
|
|
rclient.get keys.docVersion(doc_id: doc_id), (error, version) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
version = parseInt(version, 10)
|
|
|
|
callback null, version
|
|
|
|
|
2017-07-28 11:34:32 -04:00
|
|
|
getDocLines: (doc_id, callback = (error, version) ->) ->
|
|
|
|
rclient.get keys.docLines(doc_id: doc_id), (error, docLines) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
callback null, docLines
|
|
|
|
|
2014-02-12 05:40:42 -05:00
|
|
|
getPreviousDocOps: (doc_id, start, end, callback = (error, jsonOps) ->) ->
|
2017-06-23 10:50:21 -04:00
|
|
|
timer = new metrics.Timer("redis.get-prev-docops")
|
2014-02-12 05:40:42 -05:00
|
|
|
rclient.llen keys.docOps(doc_id: doc_id), (error, length) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
rclient.get keys.docVersion(doc_id: doc_id), (error, version) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
version = parseInt(version, 10)
|
|
|
|
first_version_in_redis = version - length
|
|
|
|
|
|
|
|
if start < first_version_in_redis or end > version
|
2016-05-31 08:24:19 -04:00
|
|
|
error = new Errors.OpRangeNotAvailableError("doc ops range is not loaded in redis")
|
|
|
|
logger.warn {err: error, doc_id, length, version, start, end}, "doc ops range is not loaded in redis"
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error)
|
|
|
|
|
|
|
|
start = start - first_version_in_redis
|
|
|
|
if end > -1
|
|
|
|
end = end - first_version_in_redis
|
|
|
|
|
|
|
|
if isNaN(start) or isNaN(end)
|
|
|
|
error = new Error("inconsistent version or lengths")
|
2016-05-31 08:24:19 -04:00
|
|
|
logger.error {err: error, doc_id, length, version, start, end}, "inconsistent version or length"
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error)
|
|
|
|
|
|
|
|
rclient.lrange keys.docOps(doc_id: doc_id), start, end, (error, jsonOps) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
try
|
|
|
|
ops = jsonOps.map (jsonOp) -> JSON.parse jsonOp
|
|
|
|
catch e
|
|
|
|
return callback(e)
|
2017-06-23 10:50:21 -04:00
|
|
|
timeSpan = timer.done()
|
2017-06-26 11:36:05 -04:00
|
|
|
if timeSpan > MAX_REDIS_REQUEST_LENGTH
|
|
|
|
error = new Error("redis getPreviousDocOps exceeded timeout")
|
|
|
|
return callback(error)
|
2014-02-12 05:40:42 -05:00
|
|
|
callback null, ops
|
|
|
|
|
2014-02-10 10:17:08 -05:00
|
|
|
DOC_OPS_TTL: 60 * minutes
|
|
|
|
DOC_OPS_MAX_LENGTH: 100
|
2017-09-29 07:44:36 -04:00
|
|
|
updateDocument : (project_id, doc_id, docLines, newVersion, appliedOps = [], ranges, callback = (error) ->)->
|
2016-08-23 11:00:46 -04:00
|
|
|
RedisManager.getDocVersion doc_id, (error, currentVersion) ->
|
2014-02-12 05:40:42 -05:00
|
|
|
return callback(error) if error?
|
2016-08-23 11:00:46 -04:00
|
|
|
if currentVersion + appliedOps.length != newVersion
|
|
|
|
error = new Error("Version mismatch. '#{doc_id}' is corrupted.")
|
|
|
|
logger.error {err: error, doc_id, currentVersion, newVersion, opsLength: appliedOps.length}, "version mismatch"
|
|
|
|
return callback(error)
|
2017-02-27 04:06:24 -05:00
|
|
|
|
2016-08-23 11:00:46 -04:00
|
|
|
jsonOps = appliedOps.map (op) -> JSON.stringify op
|
2017-05-22 10:20:28 -04:00
|
|
|
for op in jsonOps
|
|
|
|
if op.indexOf("\u0000") != -1
|
|
|
|
error = new Error("null bytes found in jsonOps")
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, jsonOps: jsonOps}, error.message
|
2017-05-22 10:20:28 -04:00
|
|
|
return callback(error)
|
2017-05-22 09:58:38 -04:00
|
|
|
|
2017-02-14 11:11:43 -05:00
|
|
|
newDocLines = JSON.stringify(docLines)
|
2017-02-27 03:55:04 -05:00
|
|
|
if newDocLines.indexOf("\u0000") != -1
|
2017-02-27 04:06:24 -05:00
|
|
|
error = new Error("null bytes found in doc lines")
|
2017-12-29 03:13:16 -05:00
|
|
|
logger.error {err: error, doc_id: doc_id, newDocLines: newDocLines}, error.message
|
2017-02-27 04:06:24 -05:00
|
|
|
return callback(error)
|
2017-02-14 11:11:43 -05:00
|
|
|
newHash = RedisManager._computeHash(newDocLines)
|
2017-05-08 10:56:02 -04:00
|
|
|
|
|
|
|
opVersions = appliedOps.map (op) -> op?.v
|
|
|
|
logger.log doc_id: doc_id, version: newVersion, hash: newHash, op_versions: opVersions, "updating doc in redis"
|
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
RedisManager._serializeRanges ranges, (error, ranges) ->
|
|
|
|
if error?
|
|
|
|
logger.error {err: error, doc_id}, error.message
|
|
|
|
return callback(error)
|
2017-05-22 09:58:51 -04:00
|
|
|
if ranges? and ranges.indexOf("\u0000") != -1
|
|
|
|
error = new Error("null bytes found in ranges")
|
|
|
|
logger.error err: error, doc_id: doc_id, ranges: ranges, error.message
|
|
|
|
return callback(error)
|
2017-02-27 08:34:20 -05:00
|
|
|
multi = rclient.multi()
|
2017-05-09 05:34:31 -04:00
|
|
|
multi.eval setScript, 1, keys.docLines(doc_id:doc_id), newDocLines # index 0
|
|
|
|
multi.set keys.docVersion(doc_id:doc_id), newVersion # index 1
|
|
|
|
multi.set keys.docHash(doc_id:doc_id), newHash # index 2
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.ltrim keys.docOps(doc_id: doc_id), -RedisManager.DOC_OPS_MAX_LENGTH, -1 # index 3
|
2017-02-27 08:34:20 -05:00
|
|
|
if ranges?
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.set keys.ranges(doc_id:doc_id), ranges # index 4
|
2017-02-27 08:34:20 -05:00
|
|
|
else
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.del keys.ranges(doc_id:doc_id) # also index 4
|
|
|
|
# push the ops last so we can get the lengths at fixed index position 7
|
2017-05-09 05:34:31 -04:00
|
|
|
if jsonOps.length > 0
|
2017-06-08 11:43:06 -04:00
|
|
|
multi.rpush keys.docOps(doc_id: doc_id), jsonOps... # index 5
|
|
|
|
# expire must come after rpush since before it will be a no-op if the list is empty
|
|
|
|
multi.expire keys.docOps(doc_id: doc_id), RedisManager.DOC_OPS_TTL # index 6
|
2017-05-09 05:34:31 -04:00
|
|
|
multi.rpush historyKeys.uncompressedHistoryOps(doc_id: doc_id), jsonOps... # index 7
|
2017-10-06 07:23:23 -04:00
|
|
|
# Set the unflushed timestamp to the current time if the doc
|
|
|
|
# hasn't been modified before (the content in mongo has been
|
|
|
|
# valid up to this point). Otherwise leave it alone ("NX" flag).
|
|
|
|
multi.set keys.unflushedTime(doc_id: doc_id), Date.now(), "NX"
|
2017-02-27 08:34:20 -05:00
|
|
|
multi.exec (error, result) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
# check the hash computed on the redis server
|
|
|
|
writeHash = result?[0]
|
|
|
|
if logHashWriteErrors and writeHash? and writeHash isnt newHash
|
2017-03-06 06:57:40 -05:00
|
|
|
logger.error doc_id: doc_id, writeHash: writeHash, origHash: newHash, docLines:newDocLines, "hash mismatch on updateDocument"
|
2017-09-29 07:44:36 -04:00
|
|
|
|
|
|
|
# length of uncompressedHistoryOps queue (index 7)
|
|
|
|
docUpdateCount = result[7]
|
|
|
|
|
|
|
|
if jsonOps.length > 0 && Settings.apis?.project_history?.enabled
|
2018-03-09 09:14:14 -05:00
|
|
|
ProjectHistoryRedisManager.queueOps project_id, jsonOps..., (error, projectUpdateCount) ->
|
2017-09-29 07:44:36 -04:00
|
|
|
callback null, docUpdateCount, projectUpdateCount
|
|
|
|
else
|
|
|
|
callback null, docUpdateCount
|
|
|
|
|
2018-04-23 07:08:04 -04:00
|
|
|
renameDoc: (project_id, doc_id, user_id, update, projectHistoryId, callback = (error) ->) ->
|
2017-11-06 11:14:27 -05:00
|
|
|
RedisManager.getDoc project_id, doc_id, (error, lines, version) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
|
|
|
|
if lines? and version?
|
|
|
|
rclient.set keys.pathname(doc_id:doc_id), update.newPathname, (error) ->
|
|
|
|
return callback(error) if error?
|
2018-04-23 07:08:04 -04:00
|
|
|
ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback
|
2017-11-06 11:14:27 -05:00
|
|
|
else
|
2018-04-23 07:08:04 -04:00
|
|
|
ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback
|
2014-02-12 05:40:42 -05:00
|
|
|
|
2017-10-06 07:23:23 -04:00
|
|
|
clearUnflushedTime: (doc_id, callback = (error) ->) ->
|
|
|
|
rclient.del keys.unflushedTime(doc_id:doc_id), callback
|
|
|
|
|
2014-02-12 05:40:42 -05:00
|
|
|
getDocIdsInProject: (project_id, callback = (error, doc_ids) ->) ->
|
|
|
|
rclient.smembers keys.docsInProject(project_id: project_id), callback
|
2017-07-28 11:34:32 -04:00
|
|
|
|
2017-02-27 08:34:20 -05:00
|
|
|
_serializeRanges: (ranges, callback = (error, serializedRanges) ->) ->
|
2016-12-08 07:31:43 -05:00
|
|
|
jsonRanges = JSON.stringify(ranges)
|
2017-02-27 08:34:20 -05:00
|
|
|
if jsonRanges? and jsonRanges.length > MAX_RANGES_SIZE
|
|
|
|
return callback new Error("ranges are too large")
|
2016-12-08 07:31:43 -05:00
|
|
|
if jsonRanges == '{}'
|
|
|
|
# Most doc will have empty ranges so don't fill redis with lots of '{}' keys
|
|
|
|
jsonRanges = null
|
2017-02-27 08:34:20 -05:00
|
|
|
return callback null, jsonRanges
|
2017-09-29 07:44:36 -04:00
|
|
|
|
2016-12-08 07:31:43 -05:00
|
|
|
_deserializeRanges: (ranges) ->
|
|
|
|
if !ranges? or ranges == ""
|
|
|
|
return {}
|
|
|
|
else
|
2017-02-14 11:11:43 -05:00
|
|
|
return JSON.parse(ranges)
|
|
|
|
|
|
|
|
_computeHash: (docLines) ->
|
2017-02-21 11:03:06 -05:00
|
|
|
# use sha1 checksum of doclines to detect data corruption.
|
|
|
|
#
|
|
|
|
# note: must specify 'utf8' encoding explicitly, as the default is
|
|
|
|
# binary in node < v5
|
|
|
|
return crypto.createHash('sha1').update(docLines, 'utf8').digest('hex')
|