2014-02-25 11:27:14 -05:00
|
|
|
Settings = require "settings-sharelatex"
|
2014-09-26 12:21:33 -04:00
|
|
|
redis = require("redis-sharelatex")
|
2017-04-24 09:23:32 -04:00
|
|
|
rclient = redis.createClient(Settings.redis.history)
|
|
|
|
Keys = Settings.redis.history.key_schema
|
2017-05-05 06:30:11 -04:00
|
|
|
async = require "async"
|
2014-02-25 11:27:14 -05:00
|
|
|
|
|
|
|
module.exports = RedisManager =
|
2015-10-08 09:40:42 -04:00
|
|
|
|
|
|
|
getOldestDocUpdates: (doc_id, batchSize, callback = (error, jsonUpdates) ->) ->
|
2017-04-24 09:23:32 -04:00
|
|
|
key = Keys.uncompressedHistoryOps({doc_id})
|
2015-10-08 09:40:42 -04:00
|
|
|
rclient.lrange key, 0, batchSize - 1, callback
|
|
|
|
|
|
|
|
expandDocUpdates: (jsonUpdates, callback = (error, rawUpdates) ->) ->
|
|
|
|
try
|
|
|
|
rawUpdates = ( JSON.parse(update) for update in jsonUpdates or [] )
|
|
|
|
catch e
|
|
|
|
return callback(e)
|
|
|
|
callback null, rawUpdates
|
2014-02-25 11:27:14 -05:00
|
|
|
|
2015-10-08 09:40:42 -04:00
|
|
|
deleteAppliedDocUpdates: (project_id, doc_id, docUpdates, callback = (error) ->) ->
|
2014-03-21 09:17:58 -04:00
|
|
|
multi = rclient.multi()
|
2015-11-26 10:16:54 -05:00
|
|
|
# Delete all the updates which have been applied (exact match)
|
2015-10-08 09:40:42 -04:00
|
|
|
for update in docUpdates or []
|
2019-10-09 06:11:15 -04:00
|
|
|
multi.lrem Keys.uncompressedHistoryOps({doc_id}), 1, update
|
2014-03-21 09:17:58 -04:00
|
|
|
multi.exec (error, results) ->
|
|
|
|
return callback(error) if error?
|
2017-04-26 11:25:34 -04:00
|
|
|
# It's ok to delete the doc_id from the set here. Even though the list
|
|
|
|
# of updates may not be empty, we will continue to process it until it is.
|
|
|
|
rclient.srem Keys.docsWithHistoryOps({project_id}), doc_id, (error) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
callback null
|
2014-03-21 09:48:14 -04:00
|
|
|
|
|
|
|
getDocIdsWithHistoryOps: (project_id, callback = (error, doc_ids) ->) ->
|
2017-04-24 09:23:32 -04:00
|
|
|
rclient.smembers Keys.docsWithHistoryOps({project_id}), callback
|
2017-05-02 09:19:49 -04:00
|
|
|
|
|
|
|
# iterate over keys asynchronously using redis scan (non-blocking)
|
2017-05-05 06:30:11 -04:00
|
|
|
# handle all the cluster nodes or single redis server
|
2017-05-02 09:19:49 -04:00
|
|
|
_getKeys: (pattern, callback) ->
|
2017-05-05 06:30:11 -04:00
|
|
|
nodes = rclient.nodes?('master') || [ rclient ];
|
|
|
|
doKeyLookupForNode = (node, cb) ->
|
|
|
|
RedisManager._getKeysFromNode node, pattern, cb
|
|
|
|
async.concatSeries nodes, doKeyLookupForNode, callback
|
|
|
|
|
|
|
|
_getKeysFromNode: (node, pattern, callback) ->
|
2017-05-02 09:19:49 -04:00
|
|
|
cursor = 0 # redis iterator
|
|
|
|
keySet = {} # use hash to avoid duplicate results
|
|
|
|
# scan over all keys looking for pattern
|
|
|
|
doIteration = (cb) ->
|
2017-05-05 06:30:11 -04:00
|
|
|
node.scan cursor, "MATCH", pattern, "COUNT", 1000, (error, reply) ->
|
2017-05-02 09:19:49 -04:00
|
|
|
return callback(error) if error?
|
|
|
|
[cursor, keys] = reply
|
|
|
|
for key in keys
|
|
|
|
keySet[key] = true
|
|
|
|
if cursor == '0' # note redis returns string result not numeric
|
|
|
|
return callback(null, Object.keys(keySet))
|
|
|
|
else
|
|
|
|
doIteration()
|
|
|
|
doIteration()
|
|
|
|
|
|
|
|
# extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b
|
2017-05-05 06:30:11 -04:00
|
|
|
# or DocsWithHistoryOps:{57fd0b1f53a8396d22b2c24b} (for redis cluster)
|
2017-05-02 09:19:49 -04:00
|
|
|
_extractIds: (keyList) ->
|
2017-05-05 06:30:11 -04:00
|
|
|
ids = for key in keyList
|
|
|
|
m = key.match(/:\{?([0-9a-f]{24})\}?/) # extract object id
|
|
|
|
m[1]
|
2017-05-02 09:19:49 -04:00
|
|
|
return ids
|
|
|
|
|
|
|
|
getProjectIdsWithHistoryOps: (callback = (error, project_ids) ->) ->
|
|
|
|
RedisManager._getKeys Keys.docsWithHistoryOps({project_id:"*"}), (error, project_keys) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
project_ids = RedisManager._extractIds project_keys
|
|
|
|
callback(error, project_ids)
|
|
|
|
|
|
|
|
getAllDocIdsWithHistoryOps: (callback = (error, doc_ids) ->) ->
|
|
|
|
# return all the docids, to find dangling history entries after
|
|
|
|
# everything is flushed.
|
|
|
|
RedisManager._getKeys Keys.uncompressedHistoryOps({doc_id:"*"}), (error, doc_keys) ->
|
|
|
|
return callback(error) if error?
|
|
|
|
doc_ids = RedisManager._extractIds doc_keys
|
|
|
|
callback(error, doc_ids)
|