mirror of
https://github.com/overleaf/overleaf.git
synced 2024-11-07 20:31:06 -05:00
remove old single redis flushing code
This commit is contained in:
parent
60443f3fa6
commit
df38893538
4 changed files with 0 additions and 99 deletions
|
@ -50,9 +50,6 @@ app.post "/project/:project_id/doc/:doc_id/version/:version/restore", HttpContro
|
||||||
app.post '/project/:project_id/doc/:doc_id/push', HttpController.pushDocHistory
|
app.post '/project/:project_id/doc/:doc_id/push', HttpController.pushDocHistory
|
||||||
app.post '/project/:project_id/doc/:doc_id/pull', HttpController.pullDocHistory
|
app.post '/project/:project_id/doc/:doc_id/pull', HttpController.pullDocHistory
|
||||||
|
|
||||||
app.post '/flush/all', HttpController.flushAll
|
|
||||||
app.post '/check/dangling', HttpController.checkDanglingUpdates
|
|
||||||
|
|
||||||
packWorker = null # use a single packing worker
|
packWorker = null # use a single packing worker
|
||||||
|
|
||||||
app.post "/pack", (req, res, next) ->
|
app.post "/pack", (req, res, next) ->
|
||||||
|
|
|
@ -22,32 +22,6 @@ module.exports = HttpController =
|
||||||
return next(error) if error?
|
return next(error) if error?
|
||||||
res.send 204
|
res.send 204
|
||||||
|
|
||||||
flushAll: (req, res, next = (error) ->) ->
|
|
||||||
# limit on projects to flush or -1 for all (default)
|
|
||||||
limit = if req.query.limit? then parseInt(req.query.limit, 10) else -1
|
|
||||||
logger.log {limit: limit}, "flushing all projects"
|
|
||||||
UpdatesManager.flushAll limit, (error, result) ->
|
|
||||||
return next(error) if error?
|
|
||||||
{failed, succeeded, all} = result
|
|
||||||
status = "#{succeeded.length} succeeded, #{failed.length} failed"
|
|
||||||
if limit == 0
|
|
||||||
res.status(200).send "#{status}\nwould flush:\n#{all.join('\n')}\n"
|
|
||||||
else if failed.length > 0
|
|
||||||
logger.log {failed: failed, succeeded: succeeded}, "error flushing projects"
|
|
||||||
res.status(500).send "#{status}\nfailed to flush:\n#{failed.join('\n')}\n"
|
|
||||||
else
|
|
||||||
res.status(200).send "#{status}\nflushed #{succeeded.length} projects of #{all.length}\n"
|
|
||||||
|
|
||||||
checkDanglingUpdates: (req, res, next = (error) ->) ->
|
|
||||||
logger.log "checking dangling updates"
|
|
||||||
UpdatesManager.getDanglingUpdates (error, result) ->
|
|
||||||
return next(error) if error?
|
|
||||||
if result.length > 0
|
|
||||||
logger.log {dangling: result}, "found dangling updates"
|
|
||||||
res.status(500).send "dangling updates:\n#{result.join('\n')}\n"
|
|
||||||
else
|
|
||||||
res.status(200).send "no dangling updates found\n"
|
|
||||||
|
|
||||||
checkDoc: (req, res, next = (error) ->) ->
|
checkDoc: (req, res, next = (error) ->) ->
|
||||||
doc_id = req.params.doc_id
|
doc_id = req.params.doc_id
|
||||||
project_id = req.params.project_id
|
project_id = req.params.project_id
|
||||||
|
|
|
@ -32,41 +32,3 @@ module.exports = RedisManager =
|
||||||
|
|
||||||
getDocIdsWithHistoryOps: (project_id, callback = (error, doc_ids) ->) ->
|
getDocIdsWithHistoryOps: (project_id, callback = (error, doc_ids) ->) ->
|
||||||
rclient.smembers docsWithHistoryOpsKey(project_id), callback
|
rclient.smembers docsWithHistoryOpsKey(project_id), callback
|
||||||
|
|
||||||
# iterate over keys asynchronously using redis scan (non-blocking)
|
|
||||||
_getKeys: (pattern, callback) ->
|
|
||||||
cursor = 0 # redis iterator
|
|
||||||
keySet = {} # use hash to avoid duplicate results
|
|
||||||
# scan over all keys looking for pattern
|
|
||||||
doIteration = (cb) ->
|
|
||||||
rclient.scan cursor, "MATCH", pattern, "COUNT", 1000, (error, reply) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
[cursor, keys] = reply
|
|
||||||
for key in keys
|
|
||||||
keySet[key] = true
|
|
||||||
if cursor == '0' # note redis returns string result not numeric
|
|
||||||
return callback(null, Object.keys(keySet))
|
|
||||||
else
|
|
||||||
doIteration()
|
|
||||||
doIteration()
|
|
||||||
|
|
||||||
# extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b
|
|
||||||
_extractIds: (keyList) ->
|
|
||||||
ids = (key.split(":")[1] for key in keyList)
|
|
||||||
return ids
|
|
||||||
|
|
||||||
# this will only work on single node redis, not redis cluster
|
|
||||||
getProjectIdsWithHistoryOps: (callback = (error, project_ids) ->) ->
|
|
||||||
RedisManager._getKeys docsWithHistoryOpsKey("*"), (error, project_keys) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
project_ids = RedisManager._extractIds project_keys
|
|
||||||
callback(error, project_ids)
|
|
||||||
|
|
||||||
# this will only work on single node redis, not redis cluster
|
|
||||||
getAllDocIdsWithHistoryOps: (callback = (error, doc_ids) ->) ->
|
|
||||||
# return all the docids, to find dangling history entries after
|
|
||||||
# everything is flushed.
|
|
||||||
RedisManager._getKeys rawUpdatesKey("*"), (error, doc_keys) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
doc_ids = RedisManager._extractIds doc_keys
|
|
||||||
callback(error, doc_ids)
|
|
||||||
|
|
|
@ -144,38 +144,6 @@ module.exports = UpdatesManager =
|
||||||
UpdatesManager._processUncompressedUpdatesForDocWithLock project_id, doc_id, temporary, cb
|
UpdatesManager._processUncompressedUpdatesForDocWithLock project_id, doc_id, temporary, cb
|
||||||
async.parallelLimit jobs, 5, callback
|
async.parallelLimit jobs, 5, callback
|
||||||
|
|
||||||
# flush all outstanding changes
|
|
||||||
flushAll: (limit, callback = (error, result) ->) ->
|
|
||||||
RedisManager.getProjectIdsWithHistoryOps (error, project_ids) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
logger.log {count: project_ids?.length, project_ids: project_ids}, "found projects"
|
|
||||||
jobs = []
|
|
||||||
project_ids = _.shuffle project_ids # randomise to avoid hitting same projects each time
|
|
||||||
selectedProjects = if limit < 0 then project_ids else project_ids[0...limit]
|
|
||||||
for project_id in selectedProjects
|
|
||||||
do (project_id) ->
|
|
||||||
jobs.push (cb) ->
|
|
||||||
UpdatesManager.processUncompressedUpdatesForProject project_id, (err) ->
|
|
||||||
return cb(null, {failed: err?, project_id: project_id})
|
|
||||||
async.series jobs, (error, result) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
failedProjects = (x.project_id for x in result when x.failed)
|
|
||||||
succeededProjects = (x.project_id for x in result when not x.failed)
|
|
||||||
callback(null, {failed: failedProjects, succeeded: succeededProjects, all: project_ids})
|
|
||||||
|
|
||||||
getDanglingUpdates: (callback = (error, doc_ids) ->) ->
|
|
||||||
RedisManager.getAllDocIdsWithHistoryOps (error, all_doc_ids) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
RedisManager.getProjectIdsWithHistoryOps (error, all_project_ids) ->
|
|
||||||
return callback(error) if error?
|
|
||||||
# function to get doc_ids for each project
|
|
||||||
task = (cb) -> async.concatSeries all_project_ids, RedisManager.getDocIdsWithHistoryOps, cb
|
|
||||||
# find the dangling doc ids
|
|
||||||
task (error, project_doc_ids) ->
|
|
||||||
dangling_doc_ids = _.difference(all_doc_ids, project_doc_ids)
|
|
||||||
logger.log {all_doc_ids: all_doc_ids, all_project_ids: all_project_ids, project_doc_ids: project_doc_ids, dangling_doc_ids: dangling_doc_ids}, "checking for dangling doc ids"
|
|
||||||
callback(null, dangling_doc_ids)
|
|
||||||
|
|
||||||
getDocUpdates: (project_id, doc_id, options = {}, callback = (error, updates) ->) ->
|
getDocUpdates: (project_id, doc_id, options = {}, callback = (error, updates) ->) ->
|
||||||
UpdatesManager.processUncompressedUpdatesWithLock project_id, doc_id, (error) ->
|
UpdatesManager.processUncompressedUpdatesWithLock project_id, doc_id, (error) ->
|
||||||
return callback(error) if error?
|
return callback(error) if error?
|
||||||
|
|
Loading…
Reference in a new issue