From df388935386cd4b28eaab56d9e71154d14f5e2c6 Mon Sep 17 00:00:00 2001 From: Brian Gough Date: Mon, 24 Apr 2017 10:11:23 +0100 Subject: [PATCH] remove old single redis flushing code --- services/track-changes/app.coffee | 3 -- .../app/coffee/HttpController.coffee | 26 ------------- .../app/coffee/RedisManager.coffee | 38 ------------------- .../app/coffee/UpdatesManager.coffee | 32 ---------------- 4 files changed, 99 deletions(-) diff --git a/services/track-changes/app.coffee b/services/track-changes/app.coffee index 43cddca498..0a43cd1503 100644 --- a/services/track-changes/app.coffee +++ b/services/track-changes/app.coffee @@ -50,9 +50,6 @@ app.post "/project/:project_id/doc/:doc_id/version/:version/restore", HttpContro app.post '/project/:project_id/doc/:doc_id/push', HttpController.pushDocHistory app.post '/project/:project_id/doc/:doc_id/pull', HttpController.pullDocHistory -app.post '/flush/all', HttpController.flushAll -app.post '/check/dangling', HttpController.checkDanglingUpdates - packWorker = null # use a single packing worker app.post "/pack", (req, res, next) -> diff --git a/services/track-changes/app/coffee/HttpController.coffee b/services/track-changes/app/coffee/HttpController.coffee index 9ede86f3ee..eecc618330 100644 --- a/services/track-changes/app/coffee/HttpController.coffee +++ b/services/track-changes/app/coffee/HttpController.coffee @@ -22,32 +22,6 @@ module.exports = HttpController = return next(error) if error? res.send 204 - flushAll: (req, res, next = (error) ->) -> - # limit on projects to flush or -1 for all (default) - limit = if req.query.limit? then parseInt(req.query.limit, 10) else -1 - logger.log {limit: limit}, "flushing all projects" - UpdatesManager.flushAll limit, (error, result) -> - return next(error) if error? - {failed, succeeded, all} = result - status = "#{succeeded.length} succeeded, #{failed.length} failed" - if limit == 0 - res.status(200).send "#{status}\nwould flush:\n#{all.join('\n')}\n" - else if failed.length > 0 - logger.log {failed: failed, succeeded: succeeded}, "error flushing projects" - res.status(500).send "#{status}\nfailed to flush:\n#{failed.join('\n')}\n" - else - res.status(200).send "#{status}\nflushed #{succeeded.length} projects of #{all.length}\n" - - checkDanglingUpdates: (req, res, next = (error) ->) -> - logger.log "checking dangling updates" - UpdatesManager.getDanglingUpdates (error, result) -> - return next(error) if error? - if result.length > 0 - logger.log {dangling: result}, "found dangling updates" - res.status(500).send "dangling updates:\n#{result.join('\n')}\n" - else - res.status(200).send "no dangling updates found\n" - checkDoc: (req, res, next = (error) ->) -> doc_id = req.params.doc_id project_id = req.params.project_id diff --git a/services/track-changes/app/coffee/RedisManager.coffee b/services/track-changes/app/coffee/RedisManager.coffee index b58b99f11f..a634bbfed9 100644 --- a/services/track-changes/app/coffee/RedisManager.coffee +++ b/services/track-changes/app/coffee/RedisManager.coffee @@ -32,41 +32,3 @@ module.exports = RedisManager = getDocIdsWithHistoryOps: (project_id, callback = (error, doc_ids) ->) -> rclient.smembers docsWithHistoryOpsKey(project_id), callback - - # iterate over keys asynchronously using redis scan (non-blocking) - _getKeys: (pattern, callback) -> - cursor = 0 # redis iterator - keySet = {} # use hash to avoid duplicate results - # scan over all keys looking for pattern - doIteration = (cb) -> - rclient.scan cursor, "MATCH", pattern, "COUNT", 1000, (error, reply) -> - return callback(error) if error? - [cursor, keys] = reply - for key in keys - keySet[key] = true - if cursor == '0' # note redis returns string result not numeric - return callback(null, Object.keys(keySet)) - else - doIteration() - doIteration() - - # extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b - _extractIds: (keyList) -> - ids = (key.split(":")[1] for key in keyList) - return ids - - # this will only work on single node redis, not redis cluster - getProjectIdsWithHistoryOps: (callback = (error, project_ids) ->) -> - RedisManager._getKeys docsWithHistoryOpsKey("*"), (error, project_keys) -> - return callback(error) if error? - project_ids = RedisManager._extractIds project_keys - callback(error, project_ids) - - # this will only work on single node redis, not redis cluster - getAllDocIdsWithHistoryOps: (callback = (error, doc_ids) ->) -> - # return all the docids, to find dangling history entries after - # everything is flushed. - RedisManager._getKeys rawUpdatesKey("*"), (error, doc_keys) -> - return callback(error) if error? - doc_ids = RedisManager._extractIds doc_keys - callback(error, doc_ids) diff --git a/services/track-changes/app/coffee/UpdatesManager.coffee b/services/track-changes/app/coffee/UpdatesManager.coffee index 48423a39ec..f01681e5f0 100644 --- a/services/track-changes/app/coffee/UpdatesManager.coffee +++ b/services/track-changes/app/coffee/UpdatesManager.coffee @@ -144,38 +144,6 @@ module.exports = UpdatesManager = UpdatesManager._processUncompressedUpdatesForDocWithLock project_id, doc_id, temporary, cb async.parallelLimit jobs, 5, callback - # flush all outstanding changes - flushAll: (limit, callback = (error, result) ->) -> - RedisManager.getProjectIdsWithHistoryOps (error, project_ids) -> - return callback(error) if error? - logger.log {count: project_ids?.length, project_ids: project_ids}, "found projects" - jobs = [] - project_ids = _.shuffle project_ids # randomise to avoid hitting same projects each time - selectedProjects = if limit < 0 then project_ids else project_ids[0...limit] - for project_id in selectedProjects - do (project_id) -> - jobs.push (cb) -> - UpdatesManager.processUncompressedUpdatesForProject project_id, (err) -> - return cb(null, {failed: err?, project_id: project_id}) - async.series jobs, (error, result) -> - return callback(error) if error? - failedProjects = (x.project_id for x in result when x.failed) - succeededProjects = (x.project_id for x in result when not x.failed) - callback(null, {failed: failedProjects, succeeded: succeededProjects, all: project_ids}) - - getDanglingUpdates: (callback = (error, doc_ids) ->) -> - RedisManager.getAllDocIdsWithHistoryOps (error, all_doc_ids) -> - return callback(error) if error? - RedisManager.getProjectIdsWithHistoryOps (error, all_project_ids) -> - return callback(error) if error? - # function to get doc_ids for each project - task = (cb) -> async.concatSeries all_project_ids, RedisManager.getDocIdsWithHistoryOps, cb - # find the dangling doc ids - task (error, project_doc_ids) -> - dangling_doc_ids = _.difference(all_doc_ids, project_doc_ids) - logger.log {all_doc_ids: all_doc_ids, all_project_ids: all_project_ids, project_doc_ids: project_doc_ids, dangling_doc_ids: dangling_doc_ids}, "checking for dangling doc ids" - callback(null, dangling_doc_ids) - getDocUpdates: (project_id, doc_id, options = {}, callback = (error, updates) ->) -> UpdatesManager.processUncompressedUpdatesWithLock project_id, doc_id, (error) -> return callback(error) if error?