From 1fa88826741d47ceb067727863f8f08782620b69 Mon Sep 17 00:00:00 2001 From: decaffeinate Date: Wed, 6 May 2020 12:08:21 +0200 Subject: [PATCH] decaffeinate: Convert DeleteQueueManager.coffee and 58 other files to JS --- .../app/coffee/DeleteQueueManager.js | 165 ++- .../document-updater/app/coffee/DiffCodec.js | 69 +- .../app/coffee/DispatchManager.js | 126 +- .../app/coffee/DocumentManager.js | 513 +++++--- .../document-updater/app/coffee/Errors.js | 64 +- .../app/coffee/HistoryManager.js | 229 ++-- .../app/coffee/HistoryRedisManager.js | 37 +- .../app/coffee/HttpController.js | 519 +++++--- .../app/coffee/LockManager.js | 211 +-- .../app/coffee/LoggerSerializers.js | 58 +- .../document-updater/app/coffee/Metrics.js | 2 +- .../app/coffee/PersistenceManager.js | 208 +-- .../document-updater/app/coffee/Profiler.js | 80 +- .../app/coffee/ProjectFlusher.js | 160 ++- .../app/coffee/ProjectHistoryRedisManager.js | 160 ++- .../app/coffee/ProjectManager.js | 359 ++--- .../app/coffee/RangesManager.js | 170 ++- .../app/coffee/RangesTracker.js | 1169 +++++++++-------- .../app/coffee/RateLimitManager.js | 85 +- .../app/coffee/RealTimeRedisManager.js | 111 +- .../app/coffee/RedisManager.js | 776 ++++++----- .../document-updater/app/coffee/ShareJsDB.js | 92 +- .../app/coffee/ShareJsUpdateManager.js | 160 ++- .../app/coffee/SnapshotManager.js | 88 +- .../document-updater/app/coffee/UpdateKeys.js | 7 +- .../app/coffee/UpdateManager.js | 382 +++--- .../document-updater/app/coffee/mongojs.js | 31 +- .../app/coffee/sharejs/count.js | 40 +- .../app/coffee/sharejs/helpers.js | 126 +- .../app/coffee/sharejs/index.js | 28 +- .../app/coffee/sharejs/json-api.js | 417 +++--- .../app/coffee/sharejs/json.js | 859 ++++++------ .../app/coffee/sharejs/model.js | 1054 ++++++++------- .../app/coffee/sharejs/server/model.js | 1059 ++++++++------- .../app/coffee/sharejs/server/syncqueue.js | 80 +- .../app/coffee/sharejs/simple.js | 74 +- .../app/coffee/sharejs/syncqueue.js | 80 +- .../app/coffee/sharejs/text-api.js | 58 +- .../app/coffee/sharejs/text-composable-api.js | 87 +- .../app/coffee/sharejs/text-composable.js | 480 ++++--- .../app/coffee/sharejs/text-tp2-api.js | 169 ++- .../app/coffee/sharejs/text-tp2.js | 602 +++++---- .../app/coffee/sharejs/text.js | 376 +++--- .../app/coffee/sharejs/types/count.js | 40 +- .../app/coffee/sharejs/types/helpers.js | 126 +- .../app/coffee/sharejs/types/index.js | 28 +- .../app/coffee/sharejs/types/json-api.js | 417 +++--- .../app/coffee/sharejs/types/json.js | 859 ++++++------ .../app/coffee/sharejs/types/model.js | 1054 ++++++++------- .../app/coffee/sharejs/types/simple.js | 74 +- .../app/coffee/sharejs/types/syncqueue.js | 80 +- .../app/coffee/sharejs/types/text-api.js | 58 +- .../sharejs/types/text-composable-api.js | 87 +- .../coffee/sharejs/types/text-composable.js | 480 ++++--- .../app/coffee/sharejs/types/text-tp2-api.js | 169 ++- .../app/coffee/sharejs/types/text-tp2.js | 602 +++++---- .../app/coffee/sharejs/types/text.js | 476 ++++--- .../app/coffee/sharejs/types/web-prelude.js | 10 +- .../app/coffee/sharejs/web-prelude.js | 10 +- 59 files changed, 9218 insertions(+), 6972 deletions(-) diff --git a/services/document-updater/app/coffee/DeleteQueueManager.js b/services/document-updater/app/coffee/DeleteQueueManager.js index 9e3f1c176e..2b6230100a 100644 --- a/services/document-updater/app/coffee/DeleteQueueManager.js +++ b/services/document-updater/app/coffee/DeleteQueueManager.js @@ -1,79 +1,102 @@ -Settings = require('settings-sharelatex') -RedisManager = require "./RedisManager" -ProjectManager = require "./ProjectManager" -logger = require "logger-sharelatex" -metrics = require "./Metrics" -async = require "async" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let DeleteQueueManager; +const Settings = require('settings-sharelatex'); +const RedisManager = require("./RedisManager"); +const ProjectManager = require("./ProjectManager"); +const logger = require("logger-sharelatex"); +const metrics = require("./Metrics"); +const async = require("async"); -# Maintain a sorted set of project flushAndDelete requests, ordered by timestamp -# (ZADD), and process them from oldest to newest. A flushAndDelete request comes -# from real-time and is triggered when a user leaves a project. -# -# The aim is to remove the project from redis 5 minutes after the last request -# if there has been no activity (document updates) in that time. If there is -# activity we can expect a further flushAndDelete request when the editing user -# leaves the project. -# -# If a new flushAndDelete request comes in while an existing request is already -# in the queue we update the timestamp as we can postpone flushing further. -# -# Documents are processed by checking the queue, seeing if the first entry is -# older than 5 minutes, and popping it from the queue in that case. +// Maintain a sorted set of project flushAndDelete requests, ordered by timestamp +// (ZADD), and process them from oldest to newest. A flushAndDelete request comes +// from real-time and is triggered when a user leaves a project. +// +// The aim is to remove the project from redis 5 minutes after the last request +// if there has been no activity (document updates) in that time. If there is +// activity we can expect a further flushAndDelete request when the editing user +// leaves the project. +// +// If a new flushAndDelete request comes in while an existing request is already +// in the queue we update the timestamp as we can postpone flushing further. +// +// Documents are processed by checking the queue, seeing if the first entry is +// older than 5 minutes, and popping it from the queue in that case. -module.exports = DeleteQueueManager = - flushAndDeleteOldProjects: (options, callback) -> - startTime = Date.now() - cutoffTime = startTime - options.min_delete_age + 100 * (Math.random() - 0.5) - count = 0 +module.exports = (DeleteQueueManager = { + flushAndDeleteOldProjects(options, callback) { + const startTime = Date.now(); + const cutoffTime = (startTime - options.min_delete_age) + (100 * (Math.random() - 0.5)); + let count = 0; - flushProjectIfNotModified = (project_id, flushTimestamp, cb) -> - ProjectManager.getProjectDocsTimestamps project_id, (err, timestamps) -> - return callback(err) if err? - if timestamps.length == 0 - logger.log {project_id}, "skipping flush of queued project - no timestamps" - return cb() - # are any of the timestamps newer than the time the project was flushed? - for timestamp in timestamps when timestamp > flushTimestamp - metrics.inc "queued-delete-skipped" - logger.debug {project_id, timestamps, flushTimestamp}, "found newer timestamp, will skip delete" - return cb() - logger.log {project_id, flushTimestamp}, "flushing queued project" - ProjectManager.flushAndDeleteProjectWithLocks project_id, {skip_history_flush: false}, (err) -> - if err? - logger.err {project_id, err}, "error flushing queued project" - metrics.inc "queued-delete-completed" - return cb(null, true) + const flushProjectIfNotModified = (project_id, flushTimestamp, cb) => ProjectManager.getProjectDocsTimestamps(project_id, function(err, timestamps) { + if (err != null) { return callback(err); } + if (timestamps.length === 0) { + logger.log({project_id}, "skipping flush of queued project - no timestamps"); + return cb(); + } + // are any of the timestamps newer than the time the project was flushed? + for (let timestamp of Array.from(timestamps)) { + if (timestamp > flushTimestamp) { + metrics.inc("queued-delete-skipped"); + logger.debug({project_id, timestamps, flushTimestamp}, "found newer timestamp, will skip delete"); + return cb(); + } + } + logger.log({project_id, flushTimestamp}, "flushing queued project"); + return ProjectManager.flushAndDeleteProjectWithLocks(project_id, {skip_history_flush: false}, function(err) { + if (err != null) { + logger.err({project_id, err}, "error flushing queued project"); + } + metrics.inc("queued-delete-completed"); + return cb(null, true); + }); + }); - flushNextProject = () -> - now = Date.now() - if now - startTime > options.timeout - logger.log "hit time limit on flushing old projects" - return callback(null, count) - if count > options.limit - logger.log "hit count limit on flushing old projects" - return callback(null, count) - RedisManager.getNextProjectToFlushAndDelete cutoffTime, (err, project_id, flushTimestamp, queueLength) -> - return callback(err) if err? - return callback(null, count) if !project_id? - logger.log {project_id, queueLength: queueLength}, "flushing queued project" - metrics.globalGauge "queued-flush-backlog", queueLength - flushProjectIfNotModified project_id, flushTimestamp, (err, flushed) -> - count++ if flushed - flushNextProject() + var flushNextProject = function() { + const now = Date.now(); + if ((now - startTime) > options.timeout) { + logger.log("hit time limit on flushing old projects"); + return callback(null, count); + } + if (count > options.limit) { + logger.log("hit count limit on flushing old projects"); + return callback(null, count); + } + return RedisManager.getNextProjectToFlushAndDelete(cutoffTime, function(err, project_id, flushTimestamp, queueLength) { + if (err != null) { return callback(err); } + if ((project_id == null)) { return callback(null, count); } + logger.log({project_id, queueLength}, "flushing queued project"); + metrics.globalGauge("queued-flush-backlog", queueLength); + return flushProjectIfNotModified(project_id, flushTimestamp, function(err, flushed) { + if (flushed) { count++; } + return flushNextProject(); + }); + }); + }; - flushNextProject() + return flushNextProject(); + }, - startBackgroundFlush: () -> - SHORT_DELAY = 10 - LONG_DELAY = 1000 - doFlush = () -> - if Settings.shuttingDown - logger.warn "discontinuing background flush due to shutdown" - return - DeleteQueueManager.flushAndDeleteOldProjects { + startBackgroundFlush() { + const SHORT_DELAY = 10; + const LONG_DELAY = 1000; + var doFlush = function() { + if (Settings.shuttingDown) { + logger.warn("discontinuing background flush due to shutdown"); + return; + } + return DeleteQueueManager.flushAndDeleteOldProjects({ timeout:1000, min_delete_age:3*60*1000, - limit:1000 # high value, to ensure we always flush enough projects - }, (err, flushed) -> - setTimeout doFlush, (if flushed > 10 then SHORT_DELAY else LONG_DELAY) - doFlush() + limit:1000 // high value, to ensure we always flush enough projects + }, (err, flushed) => setTimeout(doFlush, (flushed > 10 ? SHORT_DELAY : LONG_DELAY))); + }; + return doFlush(); + } +}); diff --git a/services/document-updater/app/coffee/DiffCodec.js b/services/document-updater/app/coffee/DiffCodec.js index ba5966648e..c5c99b7acc 100644 --- a/services/document-updater/app/coffee/DiffCodec.js +++ b/services/document-updater/app/coffee/DiffCodec.js @@ -1,31 +1,48 @@ -diff_match_patch = require("../lib/diff_match_patch").diff_match_patch -dmp = new diff_match_patch() +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let DiffCodec; +const { + diff_match_patch +} = require("../lib/diff_match_patch"); +const dmp = new diff_match_patch(); -module.exports = DiffCodec = - ADDED: 1 - REMOVED: -1 - UNCHANGED: 0 +module.exports = (DiffCodec = { + ADDED: 1, + REMOVED: -1, + UNCHANGED: 0, - diffAsShareJsOp: (before, after, callback = (error, ops) ->) -> - diffs = dmp.diff_main(before.join("\n"), after.join("\n")) - dmp.diff_cleanupSemantic(diffs) + diffAsShareJsOp(before, after, callback) { + if (callback == null) { callback = function(error, ops) {}; } + const diffs = dmp.diff_main(before.join("\n"), after.join("\n")); + dmp.diff_cleanupSemantic(diffs); - ops = [] - position = 0 - for diff in diffs - type = diff[0] - content = diff[1] - if type == @ADDED - ops.push - i: content + const ops = []; + let position = 0; + for (let diff of Array.from(diffs)) { + const type = diff[0]; + const content = diff[1]; + if (type === this.ADDED) { + ops.push({ + i: content, p: position - position += content.length - else if type == @REMOVED - ops.push - d: content + }); + position += content.length; + } else if (type === this.REMOVED) { + ops.push({ + d: content, p: position - else if type == @UNCHANGED - position += content.length - else - throw "Unknown type" - callback null, ops + }); + } else if (type === this.UNCHANGED) { + position += content.length; + } else { + throw "Unknown type"; + } + } + return callback(null, ops); + } +}); diff --git a/services/document-updater/app/coffee/DispatchManager.js b/services/document-updater/app/coffee/DispatchManager.js index 375f3b98dc..3bf343dd2e 100644 --- a/services/document-updater/app/coffee/DispatchManager.js +++ b/services/document-updater/app/coffee/DispatchManager.js @@ -1,55 +1,81 @@ -Settings = require('settings-sharelatex') -logger = require('logger-sharelatex') -Keys = require('./UpdateKeys') -redis = require("redis-sharelatex") -Errors = require("./Errors") +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS202: Simplify dynamic range loops + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let DispatchManager; +const Settings = require('settings-sharelatex'); +const logger = require('logger-sharelatex'); +const Keys = require('./UpdateKeys'); +const redis = require("redis-sharelatex"); +const Errors = require("./Errors"); -UpdateManager = require('./UpdateManager') -Metrics = require('./Metrics') -RateLimitManager = require('./RateLimitManager') +const UpdateManager = require('./UpdateManager'); +const Metrics = require('./Metrics'); +const RateLimitManager = require('./RateLimitManager'); -module.exports = DispatchManager = - createDispatcher: (RateLimiter) -> - client = redis.createClient(Settings.redis.documentupdater) - worker = { - client: client - _waitForUpdateThenDispatchWorker: (callback = (error) ->) -> - timer = new Metrics.Timer "worker.waiting" - worker.client.blpop "pending-updates-list", 0, (error, result) -> - logger.log("getting pending-updates-list", error, result) - timer.done() - return callback(error) if error? - return callback() if !result? - [list_name, doc_key] = result - [project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key) - # Dispatch this in the background - backgroundTask = (cb) -> - UpdateManager.processOutstandingUpdatesWithLock project_id, doc_id, (error) -> - # log everything except OpRangeNotAvailable errors, these are normal - if error? - # downgrade OpRangeNotAvailable and "Delete component" errors so they are not sent to sentry - logAsWarning = (error instanceof Errors.OpRangeNotAvailableError) || (error instanceof Errors.DeleteMismatchError) - if logAsWarning - logger.warn err: error, project_id: project_id, doc_id: doc_id, "error processing update" - else - logger.error err: error, project_id: project_id, doc_id: doc_id, "error processing update" - cb() - RateLimiter.run backgroundTask, callback +module.exports = (DispatchManager = { + createDispatcher(RateLimiter) { + const client = redis.createClient(Settings.redis.documentupdater); + var worker = { + client, + _waitForUpdateThenDispatchWorker(callback) { + if (callback == null) { callback = function(error) {}; } + const timer = new Metrics.Timer("worker.waiting"); + return worker.client.blpop("pending-updates-list", 0, function(error, result) { + logger.log("getting pending-updates-list", error, result); + timer.done(); + if (error != null) { return callback(error); } + if ((result == null)) { return callback(); } + const [list_name, doc_key] = Array.from(result); + const [project_id, doc_id] = Array.from(Keys.splitProjectIdAndDocId(doc_key)); + // Dispatch this in the background + const backgroundTask = cb => UpdateManager.processOutstandingUpdatesWithLock(project_id, doc_id, function(error) { + // log everything except OpRangeNotAvailable errors, these are normal + if (error != null) { + // downgrade OpRangeNotAvailable and "Delete component" errors so they are not sent to sentry + const logAsWarning = (error instanceof Errors.OpRangeNotAvailableError) || (error instanceof Errors.DeleteMismatchError); + if (logAsWarning) { + logger.warn({err: error, project_id, doc_id}, "error processing update"); + } else { + logger.error({err: error, project_id, doc_id}, "error processing update"); + } + } + return cb(); + }); + return RateLimiter.run(backgroundTask, callback); + }); + }, - run: () -> - return if Settings.shuttingDown - worker._waitForUpdateThenDispatchWorker (error) => - if error? - logger.error err: error, "Error in worker process" - throw error - else - worker.run() - } + run() { + if (Settings.shuttingDown) { return; } + return worker._waitForUpdateThenDispatchWorker(error => { + if (error != null) { + logger.error({err: error}, "Error in worker process"); + throw error; + } else { + return worker.run(); + } + }); + } + }; - return worker + return worker; + }, - createAndStartDispatchers: (number) -> - RateLimiter = new RateLimitManager(number) - for i in [1..number] - worker = DispatchManager.createDispatcher(RateLimiter) - worker.run() + createAndStartDispatchers(number) { + const RateLimiter = new RateLimitManager(number); + return (() => { + const result = []; + for (let i = 1, end = number, asc = 1 <= end; asc ? i <= end : i >= end; asc ? i++ : i--) { + const worker = DispatchManager.createDispatcher(RateLimiter); + result.push(worker.run()); + } + return result; + })(); + } +}); diff --git a/services/document-updater/app/coffee/DocumentManager.js b/services/document-updater/app/coffee/DocumentManager.js index b37d2e9433..c5a9ebb3d1 100644 --- a/services/document-updater/app/coffee/DocumentManager.js +++ b/services/document-updater/app/coffee/DocumentManager.js @@ -1,243 +1,340 @@ -RedisManager = require "./RedisManager" -ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager" -PersistenceManager = require "./PersistenceManager" -DiffCodec = require "./DiffCodec" -logger = require "logger-sharelatex" -Metrics = require "./Metrics" -HistoryManager = require "./HistoryManager" -RealTimeRedisManager = require "./RealTimeRedisManager" -Errors = require "./Errors" -RangesManager = require "./RangesManager" -async = require "async" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let DocumentManager; +const RedisManager = require("./RedisManager"); +const ProjectHistoryRedisManager = require("./ProjectHistoryRedisManager"); +const PersistenceManager = require("./PersistenceManager"); +const DiffCodec = require("./DiffCodec"); +const logger = require("logger-sharelatex"); +const Metrics = require("./Metrics"); +const HistoryManager = require("./HistoryManager"); +const RealTimeRedisManager = require("./RealTimeRedisManager"); +const Errors = require("./Errors"); +const RangesManager = require("./RangesManager"); +const async = require("async"); -MAX_UNFLUSHED_AGE = 300 * 1000 # 5 mins, document should be flushed to mongo this time after a change +const MAX_UNFLUSHED_AGE = 300 * 1000; // 5 mins, document should be flushed to mongo this time after a change -module.exports = DocumentManager = - getDoc: (project_id, doc_id, _callback = (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) ->) -> - timer = new Metrics.Timer("docManager.getDoc") - callback = (args...) -> - timer.done() - _callback(args...) +module.exports = (DocumentManager = { + getDoc(project_id, doc_id, _callback) { + if (_callback == null) { _callback = function(error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) {}; } + const timer = new Metrics.Timer("docManager.getDoc"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - RedisManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) -> - return callback(error) if error? - if !lines? or !version? - logger.log {project_id, doc_id}, "doc not in redis so getting from persistence API" - PersistenceManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId, projectHistoryType) -> - return callback(error) if error? - logger.log {project_id, doc_id, lines, version, pathname, projectHistoryId, projectHistoryType}, "got doc from persistence API" - RedisManager.putDocInMemory project_id, doc_id, lines, version, ranges, pathname, projectHistoryId, (error) -> - return callback(error) if error? - RedisManager.setHistoryType doc_id, projectHistoryType, (error) -> - return callback(error) if error? - callback null, lines, version, ranges || {}, pathname, projectHistoryId, null, false - else - callback null, lines, version, ranges, pathname, projectHistoryId, unflushedTime, true + return RedisManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) { + if (error != null) { return callback(error); } + if ((lines == null) || (version == null)) { + logger.log({project_id, doc_id}, "doc not in redis so getting from persistence API"); + return PersistenceManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId, projectHistoryType) { + if (error != null) { return callback(error); } + logger.log({project_id, doc_id, lines, version, pathname, projectHistoryId, projectHistoryType}, "got doc from persistence API"); + return RedisManager.putDocInMemory(project_id, doc_id, lines, version, ranges, pathname, projectHistoryId, function(error) { + if (error != null) { return callback(error); } + return RedisManager.setHistoryType(doc_id, projectHistoryType, function(error) { + if (error != null) { return callback(error); } + return callback(null, lines, version, ranges || {}, pathname, projectHistoryId, null, false); + }); + }); + }); + } else { + return callback(null, lines, version, ranges, pathname, projectHistoryId, unflushedTime, true); + } + }); + }, - getDocAndRecentOps: (project_id, doc_id, fromVersion, _callback = (error, lines, version, ops, ranges, pathname, projectHistoryId) ->) -> - timer = new Metrics.Timer("docManager.getDocAndRecentOps") - callback = (args...) -> - timer.done() - _callback(args...) + getDocAndRecentOps(project_id, doc_id, fromVersion, _callback) { + if (_callback == null) { _callback = function(error, lines, version, ops, ranges, pathname, projectHistoryId) {}; } + const timer = new Metrics.Timer("docManager.getDocAndRecentOps"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId) -> - return callback(error) if error? - if fromVersion == -1 - callback null, lines, version, [], ranges, pathname, projectHistoryId - else - RedisManager.getPreviousDocOps doc_id, fromVersion, version, (error, ops) -> - return callback(error) if error? - callback null, lines, version, ops, ranges, pathname, projectHistoryId + return DocumentManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId) { + if (error != null) { return callback(error); } + if (fromVersion === -1) { + return callback(null, lines, version, [], ranges, pathname, projectHistoryId); + } else { + return RedisManager.getPreviousDocOps(doc_id, fromVersion, version, function(error, ops) { + if (error != null) { return callback(error); } + return callback(null, lines, version, ops, ranges, pathname, projectHistoryId); + }); + } + }); + }, - setDoc: (project_id, doc_id, newLines, source, user_id, undoing, _callback = (error) ->) -> - timer = new Metrics.Timer("docManager.setDoc") - callback = (args...) -> - timer.done() - _callback(args...) + setDoc(project_id, doc_id, newLines, source, user_id, undoing, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("docManager.setDoc"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - if !newLines? - return callback(new Error("No lines were provided to setDoc")) + if ((newLines == null)) { + return callback(new Error("No lines were provided to setDoc")); + } - UpdateManager = require "./UpdateManager" - DocumentManager.getDoc project_id, doc_id, (error, oldLines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) -> - return callback(error) if error? + const UpdateManager = require("./UpdateManager"); + return DocumentManager.getDoc(project_id, doc_id, function(error, oldLines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) { + if (error != null) { return callback(error); } - if oldLines? and oldLines.length > 0 and oldLines[0].text? - logger.log doc_id: doc_id, project_id: project_id, oldLines: oldLines, newLines: newLines, "document is JSON so not updating" - return callback(null) + if ((oldLines != null) && (oldLines.length > 0) && (oldLines[0].text != null)) { + logger.log({doc_id, project_id, oldLines, newLines}, "document is JSON so not updating"); + return callback(null); + } - logger.log doc_id: doc_id, project_id: project_id, oldLines: oldLines, newLines: newLines, "setting a document via http" - DiffCodec.diffAsShareJsOp oldLines, newLines, (error, op) -> - return callback(error) if error? - if undoing - for o in op or [] - o.u = true # Turn on undo flag for each op for track changes - update = - doc: doc_id - op: op - v: version - meta: - type: "external" - source: source - user_id: user_id - UpdateManager.applyUpdate project_id, doc_id, update, (error) -> - return callback(error) if error? - # If the document was loaded already, then someone has it open - # in a project, and the usual flushing mechanism will happen. - # Otherwise we should remove it immediately since nothing else - # is using it. - if alreadyLoaded - DocumentManager.flushDocIfLoaded project_id, doc_id, (error) -> - return callback(error) if error? - callback null - else - DocumentManager.flushAndDeleteDoc project_id, doc_id, {}, (error) -> - # There is no harm in flushing project history if the previous - # call failed and sometimes it is required - HistoryManager.flushProjectChangesAsync project_id + logger.log({doc_id, project_id, oldLines, newLines}, "setting a document via http"); + return DiffCodec.diffAsShareJsOp(oldLines, newLines, function(error, op) { + if (error != null) { return callback(error); } + if (undoing) { + for (let o of Array.from(op || [])) { + o.u = true; + } // Turn on undo flag for each op for track changes + } + const update = { + doc: doc_id, + op, + v: version, + meta: { + type: "external", + source, + user_id + } + }; + return UpdateManager.applyUpdate(project_id, doc_id, update, function(error) { + if (error != null) { return callback(error); } + // If the document was loaded already, then someone has it open + // in a project, and the usual flushing mechanism will happen. + // Otherwise we should remove it immediately since nothing else + // is using it. + if (alreadyLoaded) { + return DocumentManager.flushDocIfLoaded(project_id, doc_id, function(error) { + if (error != null) { return callback(error); } + return callback(null); + }); + } else { + return DocumentManager.flushAndDeleteDoc(project_id, doc_id, {}, function(error) { + // There is no harm in flushing project history if the previous + // call failed and sometimes it is required + HistoryManager.flushProjectChangesAsync(project_id); - return callback(error) if error? - callback null + if (error != null) { return callback(error); } + return callback(null); + }); + } + }); + }); + }); + }, - flushDocIfLoaded: (project_id, doc_id, _callback = (error) ->) -> - timer = new Metrics.Timer("docManager.flushDocIfLoaded") - callback = (args...) -> - timer.done() - _callback(args...) - RedisManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy) -> - return callback(error) if error? - if !lines? or !version? - logger.log project_id: project_id, doc_id: doc_id, "doc is not loaded so not flushing" - callback null # TODO: return a flag to bail out, as we go on to remove doc from memory? - else - logger.log project_id: project_id, doc_id: doc_id, version: version, "flushing doc" - PersistenceManager.setDoc project_id, doc_id, lines, version, ranges, lastUpdatedAt, lastUpdatedBy, (error) -> - return callback(error) if error? - RedisManager.clearUnflushedTime doc_id, callback + flushDocIfLoaded(project_id, doc_id, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("docManager.flushDocIfLoaded"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; + return RedisManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy) { + if (error != null) { return callback(error); } + if ((lines == null) || (version == null)) { + logger.log({project_id, doc_id}, "doc is not loaded so not flushing"); + return callback(null); // TODO: return a flag to bail out, as we go on to remove doc from memory? + } else { + logger.log({project_id, doc_id, version}, "flushing doc"); + return PersistenceManager.setDoc(project_id, doc_id, lines, version, ranges, lastUpdatedAt, lastUpdatedBy, function(error) { + if (error != null) { return callback(error); } + return RedisManager.clearUnflushedTime(doc_id, callback); + }); + } + }); + }, - flushAndDeleteDoc: (project_id, doc_id, options, _callback) -> - timer = new Metrics.Timer("docManager.flushAndDeleteDoc") - callback = (args...) -> - timer.done() - _callback(args...) + flushAndDeleteDoc(project_id, doc_id, options, _callback) { + const timer = new Metrics.Timer("docManager.flushAndDeleteDoc"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - DocumentManager.flushDocIfLoaded project_id, doc_id, (error) -> - if error? - if options.ignoreFlushErrors - logger.warn {project_id: project_id, doc_id: doc_id, err: error}, "ignoring flush error while deleting document" - else - return callback(error) + return DocumentManager.flushDocIfLoaded(project_id, doc_id, function(error) { + if (error != null) { + if (options.ignoreFlushErrors) { + logger.warn({project_id, doc_id, err: error}, "ignoring flush error while deleting document"); + } else { + return callback(error); + } + } - # Flush in the background since it requires a http request - HistoryManager.flushDocChangesAsync project_id, doc_id + // Flush in the background since it requires a http request + HistoryManager.flushDocChangesAsync(project_id, doc_id); - RedisManager.removeDocFromMemory project_id, doc_id, (error) -> - return callback(error) if error? - callback null + return RedisManager.removeDocFromMemory(project_id, doc_id, function(error) { + if (error != null) { return callback(error); } + return callback(null); + }); + }); + }, - acceptChanges: (project_id, doc_id, change_ids = [], _callback = (error) ->) -> - timer = new Metrics.Timer("docManager.acceptChanges") - callback = (args...) -> - timer.done() - _callback(args...) + acceptChanges(project_id, doc_id, change_ids, _callback) { + if (change_ids == null) { change_ids = []; } + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("docManager.acceptChanges"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges) -> - return callback(error) if error? - if !lines? or !version? - return callback(new Errors.NotFoundError("document not found: #{doc_id}")) - RangesManager.acceptChanges change_ids, ranges, (error, new_ranges) -> - return callback(error) if error? - RedisManager.updateDocument project_id, doc_id, lines, version, [], new_ranges, {}, (error) -> - return callback(error) if error? - callback() + return DocumentManager.getDoc(project_id, doc_id, function(error, lines, version, ranges) { + if (error != null) { return callback(error); } + if ((lines == null) || (version == null)) { + return callback(new Errors.NotFoundError(`document not found: ${doc_id}`)); + } + return RangesManager.acceptChanges(change_ids, ranges, function(error, new_ranges) { + if (error != null) { return callback(error); } + return RedisManager.updateDocument(project_id, doc_id, lines, version, [], new_ranges, {}, function(error) { + if (error != null) { return callback(error); } + return callback(); + }); + }); + }); + }, - deleteComment: (project_id, doc_id, comment_id, _callback = (error) ->) -> - timer = new Metrics.Timer("docManager.deleteComment") - callback = (args...) -> - timer.done() - _callback(args...) + deleteComment(project_id, doc_id, comment_id, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("docManager.deleteComment"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges) -> - return callback(error) if error? - if !lines? or !version? - return callback(new Errors.NotFoundError("document not found: #{doc_id}")) - RangesManager.deleteComment comment_id, ranges, (error, new_ranges) -> - return callback(error) if error? - RedisManager.updateDocument project_id, doc_id, lines, version, [], new_ranges, {}, (error) -> - return callback(error) if error? - callback() + return DocumentManager.getDoc(project_id, doc_id, function(error, lines, version, ranges) { + if (error != null) { return callback(error); } + if ((lines == null) || (version == null)) { + return callback(new Errors.NotFoundError(`document not found: ${doc_id}`)); + } + return RangesManager.deleteComment(comment_id, ranges, function(error, new_ranges) { + if (error != null) { return callback(error); } + return RedisManager.updateDocument(project_id, doc_id, lines, version, [], new_ranges, {}, function(error) { + if (error != null) { return callback(error); } + return callback(); + }); + }); + }); + }, - renameDoc: (project_id, doc_id, user_id, update, projectHistoryId, _callback = (error) ->) -> - timer = new Metrics.Timer("docManager.updateProject") - callback = (args...) -> - timer.done() - _callback(args...) + renameDoc(project_id, doc_id, user_id, update, projectHistoryId, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("docManager.updateProject"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - RedisManager.renameDoc project_id, doc_id, user_id, update, projectHistoryId, callback + return RedisManager.renameDoc(project_id, doc_id, user_id, update, projectHistoryId, callback); + }, - getDocAndFlushIfOld: (project_id, doc_id, callback = (error, doc) ->) -> - DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) -> - return callback(error) if error? - # if doc was already loaded see if it needs to be flushed - if alreadyLoaded and unflushedTime? and (Date.now() - unflushedTime) > MAX_UNFLUSHED_AGE - DocumentManager.flushDocIfLoaded project_id, doc_id, (error) -> - return callback(error) if error? - callback(null, lines, version) - else - callback(null, lines, version) + getDocAndFlushIfOld(project_id, doc_id, callback) { + if (callback == null) { callback = function(error, doc) {}; } + return DocumentManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId, unflushedTime, alreadyLoaded) { + if (error != null) { return callback(error); } + // if doc was already loaded see if it needs to be flushed + if (alreadyLoaded && (unflushedTime != null) && ((Date.now() - unflushedTime) > MAX_UNFLUSHED_AGE)) { + return DocumentManager.flushDocIfLoaded(project_id, doc_id, function(error) { + if (error != null) { return callback(error); } + return callback(null, lines, version); + }); + } else { + return callback(null, lines, version); + } + }); + }, - resyncDocContents: (project_id, doc_id, callback) -> - logger.log {project_id: project_id, doc_id: doc_id}, "start resyncing doc contents" - RedisManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId) -> - return callback(error) if error? + resyncDocContents(project_id, doc_id, callback) { + logger.log({project_id, doc_id}, "start resyncing doc contents"); + return RedisManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId) { + if (error != null) { return callback(error); } - if !lines? or !version? - logger.log {project_id: project_id, doc_id: doc_id}, "resyncing doc contents - not found in redis - retrieving from web" - PersistenceManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId) -> - if error? - logger.error {project_id: project_id, doc_id: doc_id, getDocError: error}, "resyncing doc contents - error retrieving from web" - return callback(error) - ProjectHistoryRedisManager.queueResyncDocContent project_id, projectHistoryId, doc_id, lines, version, pathname, callback - else - logger.log {project_id: project_id, doc_id: doc_id}, "resyncing doc contents - doc in redis - will queue in redis" - ProjectHistoryRedisManager.queueResyncDocContent project_id, projectHistoryId, doc_id, lines, version, pathname, callback + if ((lines == null) || (version == null)) { + logger.log({project_id, doc_id}, "resyncing doc contents - not found in redis - retrieving from web"); + return PersistenceManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId) { + if (error != null) { + logger.error({project_id, doc_id, getDocError: error}, "resyncing doc contents - error retrieving from web"); + return callback(error); + } + return ProjectHistoryRedisManager.queueResyncDocContent(project_id, projectHistoryId, doc_id, lines, version, pathname, callback); + }); + } else { + logger.log({project_id, doc_id}, "resyncing doc contents - doc in redis - will queue in redis"); + return ProjectHistoryRedisManager.queueResyncDocContent(project_id, projectHistoryId, doc_id, lines, version, pathname, callback); + } + }); + }, - getDocWithLock: (project_id, doc_id, callback = (error, lines, version) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.getDoc, project_id, doc_id, callback + getDocWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error, lines, version) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.getDoc, project_id, doc_id, callback); + }, - getDocAndRecentOpsWithLock: (project_id, doc_id, fromVersion, callback = (error, lines, version, ops, ranges, pathname, projectHistoryId) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.getDocAndRecentOps, project_id, doc_id, fromVersion, callback + getDocAndRecentOpsWithLock(project_id, doc_id, fromVersion, callback) { + if (callback == null) { callback = function(error, lines, version, ops, ranges, pathname, projectHistoryId) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.getDocAndRecentOps, project_id, doc_id, fromVersion, callback); + }, - getDocAndFlushIfOldWithLock: (project_id, doc_id, callback = (error, doc) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.getDocAndFlushIfOld, project_id, doc_id, callback + getDocAndFlushIfOldWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error, doc) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.getDocAndFlushIfOld, project_id, doc_id, callback); + }, - setDocWithLock: (project_id, doc_id, lines, source, user_id, undoing, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.setDoc, project_id, doc_id, lines, source, user_id, undoing, callback + setDocWithLock(project_id, doc_id, lines, source, user_id, undoing, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.setDoc, project_id, doc_id, lines, source, user_id, undoing, callback); + }, - flushDocIfLoadedWithLock: (project_id, doc_id, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.flushDocIfLoaded, project_id, doc_id, callback + flushDocIfLoadedWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.flushDocIfLoaded, project_id, doc_id, callback); + }, - flushAndDeleteDocWithLock: (project_id, doc_id, options, callback) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.flushAndDeleteDoc, project_id, doc_id, options, callback + flushAndDeleteDocWithLock(project_id, doc_id, options, callback) { + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.flushAndDeleteDoc, project_id, doc_id, options, callback); + }, - acceptChangesWithLock: (project_id, doc_id, change_ids, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.acceptChanges, project_id, doc_id, change_ids, callback + acceptChangesWithLock(project_id, doc_id, change_ids, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.acceptChanges, project_id, doc_id, change_ids, callback); + }, - deleteCommentWithLock: (project_id, doc_id, thread_id, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.deleteComment, project_id, doc_id, thread_id, callback + deleteCommentWithLock(project_id, doc_id, thread_id, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.deleteComment, project_id, doc_id, thread_id, callback); + }, - renameDocWithLock: (project_id, doc_id, user_id, update, projectHistoryId, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.renameDoc, project_id, doc_id, user_id, update, projectHistoryId, callback + renameDocWithLock(project_id, doc_id, user_id, update, projectHistoryId, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.renameDoc, project_id, doc_id, user_id, update, projectHistoryId, callback); + }, - resyncDocContentsWithLock: (project_id, doc_id, callback = (error) ->) -> - UpdateManager = require "./UpdateManager" - UpdateManager.lockUpdatesAndDo DocumentManager.resyncDocContents, project_id, doc_id, callback + resyncDocContentsWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + const UpdateManager = require("./UpdateManager"); + return UpdateManager.lockUpdatesAndDo(DocumentManager.resyncDocContents, project_id, doc_id, callback); + } +}); diff --git a/services/document-updater/app/coffee/Errors.js b/services/document-updater/app/coffee/Errors.js index e3d08e7641..a8cb2efb1d 100644 --- a/services/document-updater/app/coffee/Errors.js +++ b/services/document-updater/app/coffee/Errors.js @@ -1,33 +1,39 @@ -NotFoundError = (message) -> - error = new Error(message) - error.name = "NotFoundError" - error.__proto__ = NotFoundError.prototype - return error -NotFoundError.prototype.__proto__ = Error.prototype +let Errors; +var NotFoundError = function(message) { + const error = new Error(message); + error.name = "NotFoundError"; + error.__proto__ = NotFoundError.prototype; + return error; +}; +NotFoundError.prototype.__proto__ = Error.prototype; -OpRangeNotAvailableError = (message) -> - error = new Error(message) - error.name = "OpRangeNotAvailableError" - error.__proto__ = OpRangeNotAvailableError.prototype - return error -OpRangeNotAvailableError.prototype.__proto__ = Error.prototype +var OpRangeNotAvailableError = function(message) { + const error = new Error(message); + error.name = "OpRangeNotAvailableError"; + error.__proto__ = OpRangeNotAvailableError.prototype; + return error; +}; +OpRangeNotAvailableError.prototype.__proto__ = Error.prototype; -ProjectStateChangedError = (message) -> - error = new Error(message) - error.name = "ProjectStateChangedError" - error.__proto__ = ProjectStateChangedError.prototype - return error -ProjectStateChangedError.prototype.__proto__ = Error.prototype +var ProjectStateChangedError = function(message) { + const error = new Error(message); + error.name = "ProjectStateChangedError"; + error.__proto__ = ProjectStateChangedError.prototype; + return error; +}; +ProjectStateChangedError.prototype.__proto__ = Error.prototype; -DeleteMismatchError = (message) -> - error = new Error(message) - error.name = "DeleteMismatchError" - error.__proto__ = DeleteMismatchError.prototype - return error -DeleteMismatchError.prototype.__proto__ = Error.prototype +var DeleteMismatchError = function(message) { + const error = new Error(message); + error.name = "DeleteMismatchError"; + error.__proto__ = DeleteMismatchError.prototype; + return error; +}; +DeleteMismatchError.prototype.__proto__ = Error.prototype; -module.exports = Errors = - NotFoundError: NotFoundError - OpRangeNotAvailableError: OpRangeNotAvailableError - ProjectStateChangedError: ProjectStateChangedError - DeleteMismatchError: DeleteMismatchError +module.exports = (Errors = { + NotFoundError, + OpRangeNotAvailableError, + ProjectStateChangedError, + DeleteMismatchError +}); diff --git a/services/document-updater/app/coffee/HistoryManager.js b/services/document-updater/app/coffee/HistoryManager.js index 183ac268f3..ac9ba9a706 100644 --- a/services/document-updater/app/coffee/HistoryManager.js +++ b/services/document-updater/app/coffee/HistoryManager.js @@ -1,107 +1,144 @@ -async = require "async" -logger = require "logger-sharelatex" -request = require "request" -Settings = require "settings-sharelatex" -HistoryRedisManager = require "./HistoryRedisManager" -ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager" -RedisManager = require "./RedisManager" -metrics = require "./Metrics" +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let HistoryManager; +const async = require("async"); +const logger = require("logger-sharelatex"); +const request = require("request"); +const Settings = require("settings-sharelatex"); +const HistoryRedisManager = require("./HistoryRedisManager"); +const ProjectHistoryRedisManager = require("./ProjectHistoryRedisManager"); +const RedisManager = require("./RedisManager"); +const metrics = require("./Metrics"); -module.exports = HistoryManager = - flushDocChangesAsync: (project_id, doc_id) -> - if !Settings.apis?.trackchanges? - logger.warn { doc_id }, "track changes API is not configured, so not flushing" - return - RedisManager.getHistoryType doc_id, (err, projectHistoryType) -> - if err? - logger.warn {err, doc_id}, "error getting history type" - # if there's an error continue and flush to track-changes for safety - if Settings.disableDoubleFlush and projectHistoryType is "project-history" - logger.debug {doc_id, projectHistoryType}, "skipping track-changes flush" - else - metrics.inc 'history-flush', 1, { status: 'track-changes'} - url = "#{Settings.apis.trackchanges.url}/project/#{project_id}/doc/#{doc_id}/flush" - logger.log { project_id, doc_id, url, projectHistoryType }, "flushing doc in track changes api" - request.post url, (error, res, body)-> - if error? - logger.error { error, doc_id, project_id}, "track changes doc to track changes api" - else if res.statusCode < 200 and res.statusCode >= 300 - logger.error { doc_id, project_id }, "track changes api returned a failure status code: #{res.statusCode}" +module.exports = (HistoryManager = { + flushDocChangesAsync(project_id, doc_id) { + if (((Settings.apis != null ? Settings.apis.trackchanges : undefined) == null)) { + logger.warn({ doc_id }, "track changes API is not configured, so not flushing"); + return; + } + return RedisManager.getHistoryType(doc_id, function(err, projectHistoryType) { + if (err != null) { + logger.warn({err, doc_id}, "error getting history type"); + } + // if there's an error continue and flush to track-changes for safety + if (Settings.disableDoubleFlush && (projectHistoryType === "project-history")) { + return logger.debug({doc_id, projectHistoryType}, "skipping track-changes flush"); + } else { + metrics.inc('history-flush', 1, { status: 'track-changes'}); + const url = `${Settings.apis.trackchanges.url}/project/${project_id}/doc/${doc_id}/flush`; + logger.log({ project_id, doc_id, url, projectHistoryType }, "flushing doc in track changes api"); + return request.post(url, function(error, res, body){ + if (error != null) { + return logger.error({ error, doc_id, project_id}, "track changes doc to track changes api"); + } else if ((res.statusCode < 200) && (res.statusCode >= 300)) { + return logger.error({ doc_id, project_id }, `track changes api returned a failure status code: ${res.statusCode}`); + } + }); + } + }); + }, - # flush changes in the background - flushProjectChangesAsync: (project_id) -> - return if !Settings.apis?.project_history?.enabled - HistoryManager.flushProjectChanges project_id, {background:true}, -> + // flush changes in the background + flushProjectChangesAsync(project_id) { + if (!__guard__(Settings.apis != null ? Settings.apis.project_history : undefined, x => x.enabled)) { return; } + return HistoryManager.flushProjectChanges(project_id, {background:true}, function() {}); + }, - # flush changes and callback (for when we need to know the queue is flushed) - flushProjectChanges: (project_id, options, callback = (error) ->) -> - return callback() if !Settings.apis?.project_history?.enabled - if options.skip_history_flush - logger.log {project_id}, "skipping flush of project history" - return callback() - metrics.inc 'history-flush', 1, { status: 'project-history'} - url = "#{Settings.apis.project_history.url}/project/#{project_id}/flush" - qs = {} - qs.background = true if options.background # pass on the background flush option if present - logger.log { project_id, url, qs }, "flushing doc in project history api" - request.post {url: url, qs: qs}, (error, res, body)-> - if error? - logger.error { error, project_id}, "project history doc to track changes api" - return callback(error) - else if res.statusCode < 200 and res.statusCode >= 300 - logger.error { project_id }, "project history api returned a failure status code: #{res.statusCode}" - return callback(error) - else - return callback() + // flush changes and callback (for when we need to know the queue is flushed) + flushProjectChanges(project_id, options, callback) { + if (callback == null) { callback = function(error) {}; } + if (!__guard__(Settings.apis != null ? Settings.apis.project_history : undefined, x => x.enabled)) { return callback(); } + if (options.skip_history_flush) { + logger.log({project_id}, "skipping flush of project history"); + return callback(); + } + metrics.inc('history-flush', 1, { status: 'project-history'}); + const url = `${Settings.apis.project_history.url}/project/${project_id}/flush`; + const qs = {}; + if (options.background) { qs.background = true; } // pass on the background flush option if present + logger.log({ project_id, url, qs }, "flushing doc in project history api"); + return request.post({url, qs}, function(error, res, body){ + if (error != null) { + logger.error({ error, project_id}, "project history doc to track changes api"); + return callback(error); + } else if ((res.statusCode < 200) && (res.statusCode >= 300)) { + logger.error({ project_id }, `project history api returned a failure status code: ${res.statusCode}`); + return callback(error); + } else { + return callback(); + } + }); + }, - FLUSH_DOC_EVERY_N_OPS: 100 - FLUSH_PROJECT_EVERY_N_OPS: 500 + FLUSH_DOC_EVERY_N_OPS: 100, + FLUSH_PROJECT_EVERY_N_OPS: 500, - recordAndFlushHistoryOps: (project_id, doc_id, ops = [], doc_ops_length, project_ops_length, callback = (error) ->) -> - if ops.length == 0 - return callback() + recordAndFlushHistoryOps(project_id, doc_id, ops, doc_ops_length, project_ops_length, callback) { + if (ops == null) { ops = []; } + if (callback == null) { callback = function(error) {}; } + if (ops.length === 0) { + return callback(); + } - # record updates for project history - if Settings.apis?.project_history?.enabled - if HistoryManager.shouldFlushHistoryOps(project_ops_length, ops.length, HistoryManager.FLUSH_PROJECT_EVERY_N_OPS) - # Do this in the background since it uses HTTP and so may be too - # slow to wait for when processing a doc update. - logger.log { project_ops_length, project_id }, "flushing project history api" - HistoryManager.flushProjectChangesAsync project_id + // record updates for project history + if (__guard__(Settings.apis != null ? Settings.apis.project_history : undefined, x => x.enabled)) { + if (HistoryManager.shouldFlushHistoryOps(project_ops_length, ops.length, HistoryManager.FLUSH_PROJECT_EVERY_N_OPS)) { + // Do this in the background since it uses HTTP and so may be too + // slow to wait for when processing a doc update. + logger.log({ project_ops_length, project_id }, "flushing project history api"); + HistoryManager.flushProjectChangesAsync(project_id); + } + } - # if the doc_ops_length is undefined it means the project is not using track-changes - # so we can bail out here - if typeof(doc_ops_length) is 'undefined' - logger.debug { project_id, doc_id}, "skipping flush to track-changes, only using project-history" - return callback() + // if the doc_ops_length is undefined it means the project is not using track-changes + // so we can bail out here + if (typeof(doc_ops_length) === 'undefined') { + logger.debug({ project_id, doc_id}, "skipping flush to track-changes, only using project-history"); + return callback(); + } - # record updates for track-changes - HistoryRedisManager.recordDocHasHistoryOps project_id, doc_id, ops, (error) -> - return callback(error) if error? - if HistoryManager.shouldFlushHistoryOps(doc_ops_length, ops.length, HistoryManager.FLUSH_DOC_EVERY_N_OPS) - # Do this in the background since it uses HTTP and so may be too - # slow to wait for when processing a doc update. - logger.log { doc_ops_length, doc_id, project_id }, "flushing track changes api" - HistoryManager.flushDocChangesAsync project_id, doc_id - callback() + // record updates for track-changes + return HistoryRedisManager.recordDocHasHistoryOps(project_id, doc_id, ops, function(error) { + if (error != null) { return callback(error); } + if (HistoryManager.shouldFlushHistoryOps(doc_ops_length, ops.length, HistoryManager.FLUSH_DOC_EVERY_N_OPS)) { + // Do this in the background since it uses HTTP and so may be too + // slow to wait for when processing a doc update. + logger.log({ doc_ops_length, doc_id, project_id }, "flushing track changes api"); + HistoryManager.flushDocChangesAsync(project_id, doc_id); + } + return callback(); + }); + }, - shouldFlushHistoryOps: (length, ops_length, threshold) -> - return false if !length # don't flush unless we know the length - # We want to flush every 100 ops, i.e. 100, 200, 300, etc - # Find out which 'block' (i.e. 0-99, 100-199) we were in before and after pushing these - # ops. If we've changed, then we've gone over a multiple of 100 and should flush. - # (Most of the time, we will only hit 100 and then flushing will put us back to 0) - previousLength = length - ops_length - prevBlock = Math.floor(previousLength / threshold) - newBlock = Math.floor(length / threshold) - return newBlock != prevBlock + shouldFlushHistoryOps(length, ops_length, threshold) { + if (!length) { return false; } // don't flush unless we know the length + // We want to flush every 100 ops, i.e. 100, 200, 300, etc + // Find out which 'block' (i.e. 0-99, 100-199) we were in before and after pushing these + // ops. If we've changed, then we've gone over a multiple of 100 and should flush. + // (Most of the time, we will only hit 100 and then flushing will put us back to 0) + const previousLength = length - ops_length; + const prevBlock = Math.floor(previousLength / threshold); + const newBlock = Math.floor(length / threshold); + return newBlock !== prevBlock; + }, - MAX_PARALLEL_REQUESTS: 4 + MAX_PARALLEL_REQUESTS: 4, - resyncProjectHistory: (project_id, projectHistoryId, docs, files, callback) -> - ProjectHistoryRedisManager.queueResyncProjectStructure project_id, projectHistoryId, docs, files, (error) -> - return callback(error) if error? - DocumentManager = require "./DocumentManager" - resyncDoc = (doc, cb) -> - DocumentManager.resyncDocContentsWithLock project_id, doc.doc, cb - async.eachLimit docs, HistoryManager.MAX_PARALLEL_REQUESTS, resyncDoc, callback + resyncProjectHistory(project_id, projectHistoryId, docs, files, callback) { + return ProjectHistoryRedisManager.queueResyncProjectStructure(project_id, projectHistoryId, docs, files, function(error) { + if (error != null) { return callback(error); } + const DocumentManager = require("./DocumentManager"); + const resyncDoc = (doc, cb) => DocumentManager.resyncDocContentsWithLock(project_id, doc.doc, cb); + return async.eachLimit(docs, HistoryManager.MAX_PARALLEL_REQUESTS, resyncDoc, callback); + }); + } +}); + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/HistoryRedisManager.js b/services/document-updater/app/coffee/HistoryRedisManager.js index d9a99a09aa..6e2aba403c 100644 --- a/services/document-updater/app/coffee/HistoryRedisManager.js +++ b/services/document-updater/app/coffee/HistoryRedisManager.js @@ -1,13 +1,26 @@ -Settings = require('settings-sharelatex') -rclient = require("redis-sharelatex").createClient(Settings.redis.history) -Keys = Settings.redis.history.key_schema -logger = require('logger-sharelatex') +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let HistoryRedisManager; +const Settings = require('settings-sharelatex'); +const rclient = require("redis-sharelatex").createClient(Settings.redis.history); +const Keys = Settings.redis.history.key_schema; +const logger = require('logger-sharelatex'); -module.exports = HistoryRedisManager = - recordDocHasHistoryOps: (project_id, doc_id, ops = [], callback = (error) ->) -> - if ops.length == 0 - return callback(new Error("cannot push no ops")) # This should never be called with no ops, but protect against a redis error if we sent an empty array to rpush - logger.log project_id: project_id, doc_id: doc_id, "marking doc in project for history ops" - rclient.sadd Keys.docsWithHistoryOps({project_id}), doc_id, (error) -> - return callback(error) if error? - callback() +module.exports = (HistoryRedisManager = { + recordDocHasHistoryOps(project_id, doc_id, ops, callback) { + if (ops == null) { ops = []; } + if (callback == null) { callback = function(error) {}; } + if (ops.length === 0) { + return callback(new Error("cannot push no ops")); // This should never be called with no ops, but protect against a redis error if we sent an empty array to rpush + } + logger.log({project_id, doc_id}, "marking doc in project for history ops"); + return rclient.sadd(Keys.docsWithHistoryOps({project_id}), doc_id, function(error) { + if (error != null) { return callback(error); } + return callback(); + }); + } +}); diff --git a/services/document-updater/app/coffee/HttpController.js b/services/document-updater/app/coffee/HttpController.js index 67d247ab97..dfc749eeb9 100644 --- a/services/document-updater/app/coffee/HttpController.js +++ b/services/document-updater/app/coffee/HttpController.js @@ -1,231 +1,336 @@ -DocumentManager = require "./DocumentManager" -HistoryManager = require "./HistoryManager" -ProjectManager = require "./ProjectManager" -Errors = require "./Errors" -logger = require "logger-sharelatex" -Metrics = require "./Metrics" -ProjectFlusher = require("./ProjectFlusher") -DeleteQueueManager = require("./DeleteQueueManager") -async = require "async" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let HttpController; +const DocumentManager = require("./DocumentManager"); +const HistoryManager = require("./HistoryManager"); +const ProjectManager = require("./ProjectManager"); +const Errors = require("./Errors"); +const logger = require("logger-sharelatex"); +const Metrics = require("./Metrics"); +const ProjectFlusher = require("./ProjectFlusher"); +const DeleteQueueManager = require("./DeleteQueueManager"); +const async = require("async"); -TWO_MEGABYTES = 2 * 1024 * 1024 +const TWO_MEGABYTES = 2 * 1024 * 1024; -module.exports = HttpController = - getDoc: (req, res, next = (error) ->) -> - doc_id = req.params.doc_id - project_id = req.params.project_id - logger.log project_id: project_id, doc_id: doc_id, "getting doc via http" - timer = new Metrics.Timer("http.getDoc") +module.exports = (HttpController = { + getDoc(req, res, next) { + let fromVersion; + if (next == null) { next = function(error) {}; } + const { + doc_id + } = req.params; + const { + project_id + } = req.params; + logger.log({project_id, doc_id}, "getting doc via http"); + const timer = new Metrics.Timer("http.getDoc"); - if req.query?.fromVersion? - fromVersion = parseInt(req.query.fromVersion, 10) - else - fromVersion = -1 + if ((req.query != null ? req.query.fromVersion : undefined) != null) { + fromVersion = parseInt(req.query.fromVersion, 10); + } else { + fromVersion = -1; + } - DocumentManager.getDocAndRecentOpsWithLock project_id, doc_id, fromVersion, (error, lines, version, ops, ranges, pathname) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, doc_id: doc_id, "got doc via http" - if !lines? or !version? - return next(new Errors.NotFoundError("document not found")) - res.json - id: doc_id - lines: lines - version: version - ops: ops - ranges: ranges - pathname: pathname + return DocumentManager.getDocAndRecentOpsWithLock(project_id, doc_id, fromVersion, function(error, lines, version, ops, ranges, pathname) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id, doc_id}, "got doc via http"); + if ((lines == null) || (version == null)) { + return next(new Errors.NotFoundError("document not found")); + } + return res.json({ + id: doc_id, + lines, + version, + ops, + ranges, + pathname + }); + }); + }, - _getTotalSizeOfLines: (lines) -> - size = 0 - for line in lines - size += (line.length + 1) - return size + _getTotalSizeOfLines(lines) { + let size = 0; + for (let line of Array.from(lines)) { + size += (line.length + 1); + } + return size; + }, - getProjectDocsAndFlushIfOld: (req, res, next = (error) ->) -> - project_id = req.params.project_id - projectStateHash = req.query?.state - # exclude is string of existing docs "id:version,id:version,..." - excludeItems = req.query?.exclude?.split(',') or [] - logger.log project_id: project_id, exclude: excludeItems, "getting docs via http" - timer = new Metrics.Timer("http.getAllDocs") - excludeVersions = {} - for item in excludeItems - [id,version] = item?.split(':') - excludeVersions[id] = version - logger.log {project_id: project_id, projectStateHash: projectStateHash, excludeVersions: excludeVersions}, "excluding versions" - ProjectManager.getProjectDocsAndFlushIfOld project_id, projectStateHash, excludeVersions, (error, result) -> - timer.done() - if error instanceof Errors.ProjectStateChangedError - res.sendStatus 409 # conflict - else if error? - return next(error) - else - logger.log project_id: project_id, result: ("#{doc._id}:#{doc.v}" for doc in result), "got docs via http" - res.send result + getProjectDocsAndFlushIfOld(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + project_id + } = req.params; + const projectStateHash = req.query != null ? req.query.state : undefined; + // exclude is string of existing docs "id:version,id:version,..." + const excludeItems = __guard__(req.query != null ? req.query.exclude : undefined, x => x.split(',')) || []; + logger.log({project_id, exclude: excludeItems}, "getting docs via http"); + const timer = new Metrics.Timer("http.getAllDocs"); + const excludeVersions = {}; + for (let item of Array.from(excludeItems)) { + const [id,version] = Array.from(item != null ? item.split(':') : undefined); + excludeVersions[id] = version; + } + logger.log({project_id, projectStateHash, excludeVersions}, "excluding versions"); + return ProjectManager.getProjectDocsAndFlushIfOld(project_id, projectStateHash, excludeVersions, function(error, result) { + timer.done(); + if (error instanceof Errors.ProjectStateChangedError) { + return res.sendStatus(409); // conflict + } else if (error != null) { + return next(error); + } else { + logger.log({project_id, result: ((Array.from(result).map((doc) => `${doc._id}:${doc.v}`)))}, "got docs via http"); + return res.send(result); + } + }); + }, - clearProjectState: (req, res, next = (error) ->) -> - project_id = req.params.project_id - timer = new Metrics.Timer("http.clearProjectState") - logger.log project_id: project_id, "clearing project state via http" - ProjectManager.clearProjectState project_id, (error) -> - timer.done() - if error? - return next(error) - else - res.sendStatus 200 + clearProjectState(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + project_id + } = req.params; + const timer = new Metrics.Timer("http.clearProjectState"); + logger.log({project_id}, "clearing project state via http"); + return ProjectManager.clearProjectState(project_id, function(error) { + timer.done(); + if (error != null) { + return next(error); + } else { + return res.sendStatus(200); + } + }); + }, - setDoc: (req, res, next = (error) ->) -> - doc_id = req.params.doc_id - project_id = req.params.project_id - {lines, source, user_id, undoing} = req.body - lineSize = HttpController._getTotalSizeOfLines(lines) - if lineSize > TWO_MEGABYTES - logger.log {project_id, doc_id, source, lineSize, user_id}, "document too large, returning 406 response" - return res.sendStatus 406 - logger.log {project_id, doc_id, lines, source, user_id, undoing}, "setting doc via http" - timer = new Metrics.Timer("http.setDoc") - DocumentManager.setDocWithLock project_id, doc_id, lines, source, user_id, undoing, (error) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, doc_id: doc_id, "set doc via http" - res.sendStatus 204 # No Content + setDoc(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + doc_id + } = req.params; + const { + project_id + } = req.params; + const {lines, source, user_id, undoing} = req.body; + const lineSize = HttpController._getTotalSizeOfLines(lines); + if (lineSize > TWO_MEGABYTES) { + logger.log({project_id, doc_id, source, lineSize, user_id}, "document too large, returning 406 response"); + return res.sendStatus(406); + } + logger.log({project_id, doc_id, lines, source, user_id, undoing}, "setting doc via http"); + const timer = new Metrics.Timer("http.setDoc"); + return DocumentManager.setDocWithLock(project_id, doc_id, lines, source, user_id, undoing, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id, doc_id}, "set doc via http"); + return res.sendStatus(204); + }); + }, // No Content - flushDocIfLoaded: (req, res, next = (error) ->) -> - doc_id = req.params.doc_id - project_id = req.params.project_id - logger.log project_id: project_id, doc_id: doc_id, "flushing doc via http" - timer = new Metrics.Timer("http.flushDoc") - DocumentManager.flushDocIfLoadedWithLock project_id, doc_id, (error) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, doc_id: doc_id, "flushed doc via http" - res.sendStatus 204 # No Content + flushDocIfLoaded(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + doc_id + } = req.params; + const { + project_id + } = req.params; + logger.log({project_id, doc_id}, "flushing doc via http"); + const timer = new Metrics.Timer("http.flushDoc"); + return DocumentManager.flushDocIfLoadedWithLock(project_id, doc_id, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id, doc_id}, "flushed doc via http"); + return res.sendStatus(204); + }); + }, // No Content - deleteDoc: (req, res, next = (error) ->) -> - doc_id = req.params.doc_id - project_id = req.params.project_id - ignoreFlushErrors = req.query.ignore_flush_errors == 'true' - timer = new Metrics.Timer("http.deleteDoc") - logger.log project_id: project_id, doc_id: doc_id, "deleting doc via http" - DocumentManager.flushAndDeleteDocWithLock project_id, doc_id, { ignoreFlushErrors: ignoreFlushErrors }, (error) -> - timer.done() - # There is no harm in flushing project history if the previous call - # failed and sometimes it is required - HistoryManager.flushProjectChangesAsync project_id + deleteDoc(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + doc_id + } = req.params; + const { + project_id + } = req.params; + const ignoreFlushErrors = req.query.ignore_flush_errors === 'true'; + const timer = new Metrics.Timer("http.deleteDoc"); + logger.log({project_id, doc_id}, "deleting doc via http"); + return DocumentManager.flushAndDeleteDocWithLock(project_id, doc_id, { ignoreFlushErrors }, function(error) { + timer.done(); + // There is no harm in flushing project history if the previous call + // failed and sometimes it is required + HistoryManager.flushProjectChangesAsync(project_id); - return next(error) if error? - logger.log project_id: project_id, doc_id: doc_id, "deleted doc via http" - res.sendStatus 204 # No Content + if (error != null) { return next(error); } + logger.log({project_id, doc_id}, "deleted doc via http"); + return res.sendStatus(204); + }); + }, // No Content - flushProject: (req, res, next = (error) ->) -> - project_id = req.params.project_id - logger.log project_id: project_id, "flushing project via http" - timer = new Metrics.Timer("http.flushProject") - ProjectManager.flushProjectWithLocks project_id, (error) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, "flushed project via http" - res.sendStatus 204 # No Content + flushProject(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + project_id + } = req.params; + logger.log({project_id}, "flushing project via http"); + const timer = new Metrics.Timer("http.flushProject"); + return ProjectManager.flushProjectWithLocks(project_id, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id}, "flushed project via http"); + return res.sendStatus(204); + }); + }, // No Content - deleteProject: (req, res, next = (error) ->) -> - project_id = req.params.project_id - logger.log project_id: project_id, "deleting project via http" - options = {} - options.background = true if req.query?.background # allow non-urgent flushes to be queued - options.skip_history_flush = true if req.query?.shutdown # don't flush history when realtime shuts down - if req.query?.background - ProjectManager.queueFlushAndDeleteProject project_id, (error) -> - return next(error) if error? - logger.log project_id: project_id, "queue delete of project via http" - res.sendStatus 204 # No Content - else - timer = new Metrics.Timer("http.deleteProject") - ProjectManager.flushAndDeleteProjectWithLocks project_id, options, (error) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, "deleted project via http" - res.sendStatus 204 # No Content + deleteProject(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + project_id + } = req.params; + logger.log({project_id}, "deleting project via http"); + const options = {}; + if (req.query != null ? req.query.background : undefined) { options.background = true; } // allow non-urgent flushes to be queued + if (req.query != null ? req.query.shutdown : undefined) { options.skip_history_flush = true; } // don't flush history when realtime shuts down + if (req.query != null ? req.query.background : undefined) { + return ProjectManager.queueFlushAndDeleteProject(project_id, function(error) { + if (error != null) { return next(error); } + logger.log({project_id}, "queue delete of project via http"); + return res.sendStatus(204); + }); // No Content + } else { + const timer = new Metrics.Timer("http.deleteProject"); + return ProjectManager.flushAndDeleteProjectWithLocks(project_id, options, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id}, "deleted project via http"); + return res.sendStatus(204); + }); + } + }, // No Content - deleteMultipleProjects: (req, res, next = (error) ->) -> - project_ids = req.body?.project_ids || [] - logger.log project_ids: project_ids, "deleting multiple projects via http" - async.eachSeries project_ids, (project_id, cb) -> - logger.log project_id: project_id, "queue delete of project via http" - ProjectManager.queueFlushAndDeleteProject project_id, cb - , (error) -> - return next(error) if error? - res.sendStatus 204 # No Content + deleteMultipleProjects(req, res, next) { + if (next == null) { next = function(error) {}; } + const project_ids = (req.body != null ? req.body.project_ids : undefined) || []; + logger.log({project_ids}, "deleting multiple projects via http"); + return async.eachSeries(project_ids, function(project_id, cb) { + logger.log({project_id}, "queue delete of project via http"); + return ProjectManager.queueFlushAndDeleteProject(project_id, cb); + } + , function(error) { + if (error != null) { return next(error); } + return res.sendStatus(204); + }); + }, // No Content - acceptChanges: (req, res, next = (error) ->) -> - {project_id, doc_id} = req.params - change_ids = req.body?.change_ids - if !change_ids? - change_ids = [ req.params.change_id ] - logger.log {project_id, doc_id}, "accepting #{ change_ids.length } changes via http" - timer = new Metrics.Timer("http.acceptChanges") - DocumentManager.acceptChangesWithLock project_id, doc_id, change_ids, (error) -> - timer.done() - return next(error) if error? - logger.log {project_id, doc_id}, "accepted #{ change_ids.length } changes via http" - res.sendStatus 204 # No Content + acceptChanges(req, res, next) { + if (next == null) { next = function(error) {}; } + const {project_id, doc_id} = req.params; + let change_ids = req.body != null ? req.body.change_ids : undefined; + if ((change_ids == null)) { + change_ids = [ req.params.change_id ]; + } + logger.log({project_id, doc_id}, `accepting ${ change_ids.length } changes via http`); + const timer = new Metrics.Timer("http.acceptChanges"); + return DocumentManager.acceptChangesWithLock(project_id, doc_id, change_ids, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id, doc_id}, `accepted ${ change_ids.length } changes via http`); + return res.sendStatus(204); + }); + }, // No Content - deleteComment: (req, res, next = (error) ->) -> - {project_id, doc_id, comment_id} = req.params - logger.log {project_id, doc_id, comment_id}, "deleting comment via http" - timer = new Metrics.Timer("http.deleteComment") - DocumentManager.deleteCommentWithLock project_id, doc_id, comment_id, (error) -> - timer.done() - return next(error) if error? - logger.log {project_id, doc_id, comment_id}, "deleted comment via http" - res.sendStatus 204 # No Content + deleteComment(req, res, next) { + if (next == null) { next = function(error) {}; } + const {project_id, doc_id, comment_id} = req.params; + logger.log({project_id, doc_id, comment_id}, "deleting comment via http"); + const timer = new Metrics.Timer("http.deleteComment"); + return DocumentManager.deleteCommentWithLock(project_id, doc_id, comment_id, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id, doc_id, comment_id}, "deleted comment via http"); + return res.sendStatus(204); + }); + }, // No Content - updateProject: (req, res, next = (error) ->) -> - timer = new Metrics.Timer("http.updateProject") - project_id = req.params.project_id - {projectHistoryId, userId, docUpdates, fileUpdates, version} = req.body - logger.log {project_id, docUpdates, fileUpdates, version}, "updating project via http" + updateProject(req, res, next) { + if (next == null) { next = function(error) {}; } + const timer = new Metrics.Timer("http.updateProject"); + const { + project_id + } = req.params; + const {projectHistoryId, userId, docUpdates, fileUpdates, version} = req.body; + logger.log({project_id, docUpdates, fileUpdates, version}, "updating project via http"); - ProjectManager.updateProjectWithLocks project_id, projectHistoryId, userId, docUpdates, fileUpdates, version, (error) -> - timer.done() - return next(error) if error? - logger.log project_id: project_id, "updated project via http" - res.sendStatus 204 # No Content + return ProjectManager.updateProjectWithLocks(project_id, projectHistoryId, userId, docUpdates, fileUpdates, version, function(error) { + timer.done(); + if (error != null) { return next(error); } + logger.log({project_id}, "updated project via http"); + return res.sendStatus(204); + }); + }, // No Content - resyncProjectHistory: (req, res, next = (error) ->) -> - project_id = req.params.project_id - {projectHistoryId, docs, files} = req.body + resyncProjectHistory(req, res, next) { + if (next == null) { next = function(error) {}; } + const { + project_id + } = req.params; + const {projectHistoryId, docs, files} = req.body; - logger.log {project_id, docs, files}, "queuing project history resync via http" - HistoryManager.resyncProjectHistory project_id, projectHistoryId, docs, files, (error) -> - return next(error) if error? - logger.log {project_id}, "queued project history resync via http" - res.sendStatus 204 + logger.log({project_id, docs, files}, "queuing project history resync via http"); + return HistoryManager.resyncProjectHistory(project_id, projectHistoryId, docs, files, function(error) { + if (error != null) { return next(error); } + logger.log({project_id}, "queued project history resync via http"); + return res.sendStatus(204); + }); + }, - flushAllProjects: (req, res, next = (error)-> )-> - res.setTimeout(5 * 60 * 1000) - options = - limit : req.query.limit || 1000 - concurrency : req.query.concurrency || 5 + flushAllProjects(req, res, next ){ + if (next == null) { next = function(error){}; } + res.setTimeout(5 * 60 * 1000); + const options = { + limit : req.query.limit || 1000, + concurrency : req.query.concurrency || 5, dryRun : req.query.dryRun || false - ProjectFlusher.flushAllProjects options, (err, project_ids)-> - if err? - logger.err err:err, "error bulk flushing projects" - res.sendStatus 500 - else - res.send project_ids + }; + return ProjectFlusher.flushAllProjects(options, function(err, project_ids){ + if (err != null) { + logger.err({err}, "error bulk flushing projects"); + return res.sendStatus(500); + } else { + return res.send(project_ids); + } + }); + }, - flushQueuedProjects: (req, res, next = (error) ->) -> - res.setTimeout(10 * 60 * 1000) - options = - limit : req.query.limit || 1000 - timeout: 5 * 60 * 1000 - min_delete_age: req.query.min_delete_age || 5 * 60 * 1000 - DeleteQueueManager.flushAndDeleteOldProjects options, (err, flushed)-> - if err? - logger.err err:err, "error flushing old projects" - res.sendStatus 500 - else - logger.log {flushed: flushed}, "flush of queued projects completed" - res.send {flushed: flushed} + flushQueuedProjects(req, res, next) { + if (next == null) { next = function(error) {}; } + res.setTimeout(10 * 60 * 1000); + const options = { + limit : req.query.limit || 1000, + timeout: 5 * 60 * 1000, + min_delete_age: req.query.min_delete_age || (5 * 60 * 1000) + }; + return DeleteQueueManager.flushAndDeleteOldProjects(options, function(err, flushed){ + if (err != null) { + logger.err({err}, "error flushing old projects"); + return res.sendStatus(500); + } else { + logger.log({flushed}, "flush of queued projects completed"); + return res.send({flushed}); + } + }); + } +}); + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/LockManager.js b/services/document-updater/app/coffee/LockManager.js index 8f62e46ccb..2b278c31e4 100644 --- a/services/document-updater/app/coffee/LockManager.js +++ b/services/document-updater/app/coffee/LockManager.js @@ -1,102 +1,131 @@ -metrics = require('./Metrics') -Settings = require('settings-sharelatex') -redis = require("redis-sharelatex") -rclient = redis.createClient(Settings.redis.lock) -keys = Settings.redis.lock.key_schema -logger = require "logger-sharelatex" -os = require "os" -crypto = require "crypto" +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let LockManager; +const metrics = require('./Metrics'); +const Settings = require('settings-sharelatex'); +const redis = require("redis-sharelatex"); +const rclient = redis.createClient(Settings.redis.lock); +const keys = Settings.redis.lock.key_schema; +const logger = require("logger-sharelatex"); +const os = require("os"); +const crypto = require("crypto"); -Profiler = require "./Profiler" +const Profiler = require("./Profiler"); -HOST = os.hostname() -PID = process.pid -RND = crypto.randomBytes(4).toString('hex') -COUNT = 0 +const HOST = os.hostname(); +const PID = process.pid; +const RND = crypto.randomBytes(4).toString('hex'); +let COUNT = 0; -MAX_REDIS_REQUEST_LENGTH = 5000 # 5 seconds +const MAX_REDIS_REQUEST_LENGTH = 5000; // 5 seconds -module.exports = LockManager = - LOCK_TEST_INTERVAL: 50 # 50ms between each test of the lock - MAX_TEST_INTERVAL: 1000 # back off to 1s between each test of the lock - MAX_LOCK_WAIT_TIME: 10000 # 10s maximum time to spend trying to get the lock - LOCK_TTL: 30 # seconds. Time until lock auto expires in redis. +module.exports = (LockManager = { + LOCK_TEST_INTERVAL: 50, // 50ms between each test of the lock + MAX_TEST_INTERVAL: 1000, // back off to 1s between each test of the lock + MAX_LOCK_WAIT_TIME: 10000, // 10s maximum time to spend trying to get the lock + LOCK_TTL: 30, // seconds. Time until lock auto expires in redis. - # Use a signed lock value as described in - # http://redis.io/topics/distlock#correct-implementation-with-a-single-instance - # to prevent accidental unlocking by multiple processes - randomLock : () -> - time = Date.now() - return "locked:host=#{HOST}:pid=#{PID}:random=#{RND}:time=#{time}:count=#{COUNT++}" + // Use a signed lock value as described in + // http://redis.io/topics/distlock#correct-implementation-with-a-single-instance + // to prevent accidental unlocking by multiple processes + randomLock() { + const time = Date.now(); + return `locked:host=${HOST}:pid=${PID}:random=${RND}:time=${time}:count=${COUNT++}`; + }, - unlockScript: 'if redis.call("get", KEYS[1]) == ARGV[1] then return redis.call("del", KEYS[1]) else return 0 end'; + unlockScript: 'if redis.call("get", KEYS[1]) == ARGV[1] then return redis.call("del", KEYS[1]) else return 0 end', - tryLock : (doc_id, callback = (err, isFree)->)-> - lockValue = LockManager.randomLock() - key = keys.blockingKey(doc_id:doc_id) - profile = new Profiler("tryLock", {doc_id, key, lockValue}) - rclient.set key, lockValue, "EX", @LOCK_TTL, "NX", (err, gotLock)-> - return callback(err) if err? - if gotLock == "OK" - metrics.inc "doc-not-blocking" - timeTaken = profile.log("got lock").end() - if timeTaken > MAX_REDIS_REQUEST_LENGTH - # took too long, so try to free the lock - LockManager.releaseLock doc_id, lockValue, (err, result) -> - return callback(err) if err? # error freeing lock - callback null, false # tell caller they didn't get the lock - else - callback null, true, lockValue - else - metrics.inc "doc-blocking" - profile.log("doc is locked").end() - callback null, false + tryLock(doc_id, callback){ + if (callback == null) { callback = function(err, isFree){}; } + const lockValue = LockManager.randomLock(); + const key = keys.blockingKey({doc_id}); + const profile = new Profiler("tryLock", {doc_id, key, lockValue}); + return rclient.set(key, lockValue, "EX", this.LOCK_TTL, "NX", function(err, gotLock){ + if (err != null) { return callback(err); } + if (gotLock === "OK") { + metrics.inc("doc-not-blocking"); + const timeTaken = profile.log("got lock").end(); + if (timeTaken > MAX_REDIS_REQUEST_LENGTH) { + // took too long, so try to free the lock + return LockManager.releaseLock(doc_id, lockValue, function(err, result) { + if (err != null) { return callback(err); } // error freeing lock + return callback(null, false); + }); // tell caller they didn't get the lock + } else { + return callback(null, true, lockValue); + } + } else { + metrics.inc("doc-blocking"); + profile.log("doc is locked").end(); + return callback(null, false); + } + }); + }, - getLock: (doc_id, callback = (error, lockValue) ->) -> - startTime = Date.now() - testInterval = LockManager.LOCK_TEST_INTERVAL - profile = new Profiler("getLock", {doc_id}) - do attempt = () -> - if Date.now() - startTime > LockManager.MAX_LOCK_WAIT_TIME - e = new Error("Timeout") - e.doc_id = doc_id - profile.log("timeout").end() - return callback(e) + getLock(doc_id, callback) { + let attempt; + if (callback == null) { callback = function(error, lockValue) {}; } + const startTime = Date.now(); + let testInterval = LockManager.LOCK_TEST_INTERVAL; + const profile = new Profiler("getLock", {doc_id}); + return (attempt = function() { + if ((Date.now() - startTime) > LockManager.MAX_LOCK_WAIT_TIME) { + const e = new Error("Timeout"); + e.doc_id = doc_id; + profile.log("timeout").end(); + return callback(e); + } - LockManager.tryLock doc_id, (error, gotLock, lockValue) -> - return callback(error) if error? - profile.log("tryLock") - if gotLock - profile.end() - callback(null, lockValue) - else - setTimeout attempt, testInterval - # back off when the lock is taken to avoid overloading - testInterval = Math.min(testInterval * 2, LockManager.MAX_TEST_INTERVAL) + return LockManager.tryLock(doc_id, function(error, gotLock, lockValue) { + if (error != null) { return callback(error); } + profile.log("tryLock"); + if (gotLock) { + profile.end(); + return callback(null, lockValue); + } else { + setTimeout(attempt, testInterval); + // back off when the lock is taken to avoid overloading + return testInterval = Math.min(testInterval * 2, LockManager.MAX_TEST_INTERVAL); + } + }); + })(); + }, - checkLock: (doc_id, callback = (err, isFree)->)-> - key = keys.blockingKey(doc_id:doc_id) - rclient.exists key, (err, exists) -> - return callback(err) if err? - exists = parseInt exists - if exists == 1 - metrics.inc "doc-blocking" - callback null, false - else - metrics.inc "doc-not-blocking" - callback null, true + checkLock(doc_id, callback){ + if (callback == null) { callback = function(err, isFree){}; } + const key = keys.blockingKey({doc_id}); + return rclient.exists(key, function(err, exists) { + if (err != null) { return callback(err); } + exists = parseInt(exists); + if (exists === 1) { + metrics.inc("doc-blocking"); + return callback(null, false); + } else { + metrics.inc("doc-not-blocking"); + return callback(null, true); + } + }); + }, - releaseLock: (doc_id, lockValue, callback)-> - key = keys.blockingKey(doc_id:doc_id) - profile = new Profiler("releaseLock", {doc_id, key, lockValue}) - rclient.eval LockManager.unlockScript, 1, key, lockValue, (err, result) -> - if err? - return callback(err) - else if result? and result isnt 1 # successful unlock should release exactly one key - profile.log("unlockScript:expired-lock").end() - logger.error {doc_id:doc_id, key:key, lockValue:lockValue, redis_err:err, redis_result:result}, "unlocking error" - metrics.inc "unlock-error" - return callback(new Error("tried to release timed out lock")) - else - profile.log("unlockScript:ok").end() - callback(null,result) + releaseLock(doc_id, lockValue, callback){ + const key = keys.blockingKey({doc_id}); + const profile = new Profiler("releaseLock", {doc_id, key, lockValue}); + return rclient.eval(LockManager.unlockScript, 1, key, lockValue, function(err, result) { + if (err != null) { + return callback(err); + } else if ((result != null) && (result !== 1)) { // successful unlock should release exactly one key + profile.log("unlockScript:expired-lock").end(); + logger.error({doc_id, key, lockValue, redis_err:err, redis_result:result}, "unlocking error"); + metrics.inc("unlock-error"); + return callback(new Error("tried to release timed out lock")); + } else { + profile.log("unlockScript:ok").end(); + return callback(null,result); + } + }); + } +}); diff --git a/services/document-updater/app/coffee/LoggerSerializers.js b/services/document-updater/app/coffee/LoggerSerializers.js index 437f49e074..87696abf3a 100644 --- a/services/document-updater/app/coffee/LoggerSerializers.js +++ b/services/document-updater/app/coffee/LoggerSerializers.js @@ -1,25 +1,41 @@ -_ = require('lodash') +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +const _ = require('lodash'); -showLength = (thing) -> - if thing?.length then thing.length else thing +const showLength = function(thing) { + if ((thing != null ? thing.length : undefined)) { return thing.length; } else { return thing; } +}; -showUpdateLength = (update) -> - if update?.op instanceof Array - copy = _.cloneDeep(update) - copy.op.forEach (element, index) -> - copy.op[index].i = element.i.length if element?.i?.length? - copy.op[index].d = element.d.length if element?.d?.length? - copy.op[index].c = element.c.length if element?.c?.length? - copy - else - update +const showUpdateLength = function(update) { + if ((update != null ? update.op : undefined) instanceof Array) { + const copy = _.cloneDeep(update); + copy.op.forEach(function(element, index) { + if (__guard__(element != null ? element.i : undefined, x => x.length) != null) { copy.op[index].i = element.i.length; } + if (__guard__(element != null ? element.d : undefined, x1 => x1.length) != null) { copy.op[index].d = element.d.length; } + if (__guard__(element != null ? element.c : undefined, x2 => x2.length) != null) { return copy.op[index].c = element.c.length; } + }); + return copy; + } else { + return update; + } +}; -module.exports = - # replace long values with their length - lines: showLength - oldLines: showLength - newLines: showLength - docLines: showLength - newDocLines: showLength - ranges: showLength +module.exports = { + // replace long values with their length + lines: showLength, + oldLines: showLength, + newLines: showLength, + docLines: showLength, + newDocLines: showLength, + ranges: showLength, update: showUpdateLength +}; + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/Metrics.js b/services/document-updater/app/coffee/Metrics.js index 4bf5c6dba5..8a46f7aa83 100644 --- a/services/document-updater/app/coffee/Metrics.js +++ b/services/document-updater/app/coffee/Metrics.js @@ -1 +1 @@ -module.exports = require "metrics-sharelatex" \ No newline at end of file +module.exports = require("metrics-sharelatex"); \ No newline at end of file diff --git a/services/document-updater/app/coffee/PersistenceManager.js b/services/document-updater/app/coffee/PersistenceManager.js index 88b44fd1de..f981f6bf90 100644 --- a/services/document-updater/app/coffee/PersistenceManager.js +++ b/services/document-updater/app/coffee/PersistenceManager.js @@ -1,100 +1,134 @@ -Settings = require "settings-sharelatex" -Errors = require "./Errors" -Metrics = require "./Metrics" -logger = require "logger-sharelatex" -request = (require("requestretry")).defaults({ - maxAttempts: 2 +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let PersistenceManager; +const Settings = require("settings-sharelatex"); +const Errors = require("./Errors"); +const Metrics = require("./Metrics"); +const logger = require("logger-sharelatex"); +const request = (require("requestretry")).defaults({ + maxAttempts: 2, retryDelay: 10 -}) +}); -# We have to be quick with HTTP calls because we're holding a lock that -# expires after 30 seconds. We can't let any errors in the rest of the stack -# hold us up, and need to bail out quickly if there is a problem. -MAX_HTTP_REQUEST_LENGTH = 5000 # 5 seconds +// We have to be quick with HTTP calls because we're holding a lock that +// expires after 30 seconds. We can't let any errors in the rest of the stack +// hold us up, and need to bail out quickly if there is a problem. +const MAX_HTTP_REQUEST_LENGTH = 5000; // 5 seconds -updateMetric = (method, error, response) -> - # find the status, with special handling for connection timeouts - # https://github.com/request/request#timeouts - status = if error?.connect is true - "#{error.code} (connect)" - else if error? - error.code - else if response? - response.statusCode - Metrics.inc method, 1, {status: status} - if error?.attempts > 1 - Metrics.inc "#{method}-retries", 1, {status: 'error'} - if response?.attempts > 1 - Metrics.inc "#{method}-retries", 1, {status: 'success'} +const updateMetric = function(method, error, response) { + // find the status, with special handling for connection timeouts + // https://github.com/request/request#timeouts + const status = (() => { + if ((error != null ? error.connect : undefined) === true) { + return `${error.code} (connect)`; + } else if (error != null) { + return error.code; + } else if (response != null) { + return response.statusCode; + } + })(); + Metrics.inc(method, 1, {status}); + if ((error != null ? error.attempts : undefined) > 1) { + Metrics.inc(`${method}-retries`, 1, {status: 'error'}); + } + if ((response != null ? response.attempts : undefined) > 1) { + return Metrics.inc(`${method}-retries`, 1, {status: 'success'}); + } +}; -module.exports = PersistenceManager = - getDoc: (project_id, doc_id, _callback = (error, lines, version, ranges, pathname, projectHistoryId, projectHistoryType) ->) -> - timer = new Metrics.Timer("persistenceManager.getDoc") - callback = (args...) -> - timer.done() - _callback(args...) +module.exports = (PersistenceManager = { + getDoc(project_id, doc_id, _callback) { + if (_callback == null) { _callback = function(error, lines, version, ranges, pathname, projectHistoryId, projectHistoryType) {}; } + const timer = new Metrics.Timer("persistenceManager.getDoc"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - url = "#{Settings.apis.web.url}/project/#{project_id}/doc/#{doc_id}" - request { - url: url - method: "GET" - headers: + const url = `${Settings.apis.web.url}/project/${project_id}/doc/${doc_id}`; + return request({ + url, + method: "GET", + headers: { "accept": "application/json" - auth: - user: Settings.apis.web.user - pass: Settings.apis.web.pass + }, + auth: { + user: Settings.apis.web.user, + pass: Settings.apis.web.pass, sendImmediately: true - jar: false + }, + jar: false, timeout: MAX_HTTP_REQUEST_LENGTH - }, (error, res, body) -> - updateMetric('getDoc', error, res) - return callback(error) if error? - if res.statusCode >= 200 and res.statusCode < 300 - try - body = JSON.parse body - catch e - return callback(e) - if !body.lines? - return callback(new Error("web API response had no doc lines")) - if !body.version? or not body.version instanceof Number - return callback(new Error("web API response had no valid doc version")) - if !body.pathname? - return callback(new Error("web API response had no valid doc pathname")) - return callback null, body.lines, body.version, body.ranges, body.pathname, body.projectHistoryId, body.projectHistoryType - else if res.statusCode == 404 - return callback(new Errors.NotFoundError("doc not not found: #{url}")) - else - return callback(new Error("error accessing web API: #{url} #{res.statusCode}")) + }, function(error, res, body) { + updateMetric('getDoc', error, res); + if (error != null) { return callback(error); } + if ((res.statusCode >= 200) && (res.statusCode < 300)) { + try { + body = JSON.parse(body); + } catch (e) { + return callback(e); + } + if ((body.lines == null)) { + return callback(new Error("web API response had no doc lines")); + } + if ((body.version == null) || !body.version instanceof Number) { + return callback(new Error("web API response had no valid doc version")); + } + if ((body.pathname == null)) { + return callback(new Error("web API response had no valid doc pathname")); + } + return callback(null, body.lines, body.version, body.ranges, body.pathname, body.projectHistoryId, body.projectHistoryType); + } else if (res.statusCode === 404) { + return callback(new Errors.NotFoundError(`doc not not found: ${url}`)); + } else { + return callback(new Error(`error accessing web API: ${url} ${res.statusCode}`)); + } + }); + }, - setDoc: (project_id, doc_id, lines, version, ranges, lastUpdatedAt, lastUpdatedBy,_callback = (error) ->) -> - timer = new Metrics.Timer("persistenceManager.setDoc") - callback = (args...) -> - timer.done() - _callback(args...) + setDoc(project_id, doc_id, lines, version, ranges, lastUpdatedAt, lastUpdatedBy,_callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("persistenceManager.setDoc"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - url = "#{Settings.apis.web.url}/project/#{project_id}/doc/#{doc_id}" - request { - url: url - method: "POST" - json: - lines: lines - ranges: ranges - version: version - lastUpdatedBy: lastUpdatedBy - lastUpdatedAt: lastUpdatedAt - auth: - user: Settings.apis.web.user - pass: Settings.apis.web.pass + const url = `${Settings.apis.web.url}/project/${project_id}/doc/${doc_id}`; + return request({ + url, + method: "POST", + json: { + lines, + ranges, + version, + lastUpdatedBy, + lastUpdatedAt + }, + auth: { + user: Settings.apis.web.user, + pass: Settings.apis.web.pass, sendImmediately: true - jar: false + }, + jar: false, timeout: MAX_HTTP_REQUEST_LENGTH - }, (error, res, body) -> - updateMetric('setDoc', error, res) - return callback(error) if error? - if res.statusCode >= 200 and res.statusCode < 300 - return callback null - else if res.statusCode == 404 - return callback(new Errors.NotFoundError("doc not not found: #{url}")) - else - return callback(new Error("error accessing web API: #{url} #{res.statusCode}")) + }, function(error, res, body) { + updateMetric('setDoc', error, res); + if (error != null) { return callback(error); } + if ((res.statusCode >= 200) && (res.statusCode < 300)) { + return callback(null); + } else if (res.statusCode === 404) { + return callback(new Errors.NotFoundError(`doc not not found: ${url}`)); + } else { + return callback(new Error(`error accessing web API: ${url} ${res.statusCode}`)); + } + }); + } +}); diff --git a/services/document-updater/app/coffee/Profiler.js b/services/document-updater/app/coffee/Profiler.js index 1d85f9bd98..2ca3484496 100644 --- a/services/document-updater/app/coffee/Profiler.js +++ b/services/document-updater/app/coffee/Profiler.js @@ -1,34 +1,56 @@ -Settings = require('settings-sharelatex') -logger = require('logger-sharelatex') +/* + * decaffeinate suggestions: + * DS206: Consider reworking classes to avoid initClass + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let Profiler; +const Settings = require('settings-sharelatex'); +const logger = require('logger-sharelatex'); -deltaMs = (ta, tb) -> - nanoSeconds = (ta[0]-tb[0])*1e9 + (ta[1]-tb[1]) - milliSeconds = Math.floor(nanoSeconds*1e-6) - return milliSeconds +const deltaMs = function(ta, tb) { + const nanoSeconds = ((ta[0]-tb[0])*1e9) + (ta[1]-tb[1]); + const milliSeconds = Math.floor(nanoSeconds*1e-6); + return milliSeconds; +}; -module.exports = class Profiler - LOG_CUTOFF_TIME: 1000 +module.exports = (Profiler = (function() { + Profiler = class Profiler { + static initClass() { + this.prototype.LOG_CUTOFF_TIME = 1000; + } - constructor: (@name, @args) -> - @t0 = @t = process.hrtime() - @start = new Date() - @updateTimes = [] + constructor(name, args) { + this.name = name; + this.args = args; + this.t0 = (this.t = process.hrtime()); + this.start = new Date(); + this.updateTimes = []; + } - log: (label) -> - t1 = process.hrtime() - dtMilliSec = deltaMs(t1, @t) - @t = t1 - @updateTimes.push [label, dtMilliSec] # timings in ms - return @ # make it chainable + log(label) { + const t1 = process.hrtime(); + const dtMilliSec = deltaMs(t1, this.t); + this.t = t1; + this.updateTimes.push([label, dtMilliSec]); // timings in ms + return this; // make it chainable + } - end: (message) -> - totalTime = deltaMs(@t, @t0) - if totalTime > @LOG_CUTOFF_TIME # log anything greater than cutoff - args = {} - for k,v of @args - args[k] = v - args.updateTimes = @updateTimes - args.start = @start - args.end = new Date() - logger.log args, @name - return totalTime + end(message) { + const totalTime = deltaMs(this.t, this.t0); + if (totalTime > this.LOG_CUTOFF_TIME) { // log anything greater than cutoff + const args = {}; + for (let k in this.args) { + const v = this.args[k]; + args[k] = v; + } + args.updateTimes = this.updateTimes; + args.start = this.start; + args.end = new Date(); + logger.log(args, this.name); + } + return totalTime; + } + }; + Profiler.initClass(); + return Profiler; +})()); diff --git a/services/document-updater/app/coffee/ProjectFlusher.js b/services/document-updater/app/coffee/ProjectFlusher.js index e1ead3759c..d42eb59531 100644 --- a/services/document-updater/app/coffee/ProjectFlusher.js +++ b/services/document-updater/app/coffee/ProjectFlusher.js @@ -1,73 +1,101 @@ -request = require("request") -Settings = require('settings-sharelatex') -RedisManager = require("./RedisManager") -rclient = RedisManager.rclient -docUpdaterKeys = Settings.redis.documentupdater.key_schema -async = require("async") -ProjectManager = require("./ProjectManager") -_ = require("lodash") -logger = require("logger-sharelatex") +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +const request = require("request"); +const Settings = require('settings-sharelatex'); +const RedisManager = require("./RedisManager"); +const { + rclient +} = RedisManager; +const docUpdaterKeys = Settings.redis.documentupdater.key_schema; +const async = require("async"); +const ProjectManager = require("./ProjectManager"); +const _ = require("lodash"); +const logger = require("logger-sharelatex"); -ProjectFlusher = +var ProjectFlusher = { - # iterate over keys asynchronously using redis scan (non-blocking) - # handle all the cluster nodes or single redis server - _getKeys: (pattern, limit, callback) -> - nodes = rclient.nodes?('master') || [ rclient ]; - doKeyLookupForNode = (node, cb) -> - ProjectFlusher._getKeysFromNode node, pattern, limit, cb - async.concatSeries nodes, doKeyLookupForNode, callback + // iterate over keys asynchronously using redis scan (non-blocking) + // handle all the cluster nodes or single redis server + _getKeys(pattern, limit, callback) { + const nodes = (typeof rclient.nodes === 'function' ? rclient.nodes('master') : undefined) || [ rclient ]; + const doKeyLookupForNode = (node, cb) => ProjectFlusher._getKeysFromNode(node, pattern, limit, cb); + return async.concatSeries(nodes, doKeyLookupForNode, callback); + }, - _getKeysFromNode: (node, pattern, limit = 1000, callback) -> - cursor = 0 # redis iterator - keySet = {} # use hash to avoid duplicate results - batchSize = if limit? then Math.min(limit, 1000) else 1000 - # scan over all keys looking for pattern - doIteration = (cb) -> - node.scan cursor, "MATCH", pattern, "COUNT", batchSize, (error, reply) -> - return callback(error) if error? - [cursor, keys] = reply - for key in keys - keySet[key] = true - keys = Object.keys(keySet) - noResults = cursor == "0" # redis returns string results not numeric - limitReached = (limit? && keys.length >= limit) - if noResults || limitReached - return callback(null, keys) - else - setTimeout doIteration, 10 # avoid hitting redis too hard - doIteration() + _getKeysFromNode(node, pattern, limit, callback) { + if (limit == null) { limit = 1000; } + let cursor = 0; // redis iterator + const keySet = {}; // use hash to avoid duplicate results + const batchSize = (limit != null) ? Math.min(limit, 1000) : 1000; + // scan over all keys looking for pattern + var doIteration = cb => // avoid hitting redis too hard + node.scan(cursor, "MATCH", pattern, "COUNT", batchSize, function(error, reply) { + let keys; + if (error != null) { return callback(error); } + [cursor, keys] = Array.from(reply); + for (let key of Array.from(keys)) { + keySet[key] = true; + } + keys = Object.keys(keySet); + const noResults = cursor === "0"; // redis returns string results not numeric + const limitReached = ((limit != null) && (keys.length >= limit)); + if (noResults || limitReached) { + return callback(null, keys); + } else { + return setTimeout(doIteration, 10); + } + }); + return doIteration(); + }, - # extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b - # or docsInProject:{57fd0b1f53a8396d22b2c24b} (for redis cluster) - _extractIds: (keyList) -> - ids = for key in keyList - m = key.match(/:\{?([0-9a-f]{24})\}?/) # extract object id - m[1] - return ids + // extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b + // or docsInProject:{57fd0b1f53a8396d22b2c24b} (for redis cluster) + _extractIds(keyList) { + const ids = (() => { + const result = []; + for (let key of Array.from(keyList)) { + const m = key.match(/:\{?([0-9a-f]{24})\}?/); // extract object id + result.push(m[1]); + } + return result; + })(); + return ids; + }, - flushAllProjects: (options, callback)-> - logger.log options:options, "flushing all projects" - ProjectFlusher._getKeys docUpdaterKeys.docsInProject({project_id:"*"}), options.limit, (error, project_keys) -> - if error? - logger.err err:error, "error getting keys for flushing" - return callback(error) - project_ids = ProjectFlusher._extractIds(project_keys) - if options.dryRun - return callback(null, project_ids) - jobs = _.map project_ids, (project_id)-> - return (cb)-> - ProjectManager.flushAndDeleteProjectWithLocks project_id, {background:true}, cb - async.parallelLimit async.reflectAll(jobs), options.concurrency, (error, results)-> - success = [] - failure = [] - _.each results, (result, i)-> - if result.error? - failure.push(project_ids[i]) - else - success.push(project_ids[i]) - logger.log success:success, failure:failure, "finished flushing all projects" - return callback(error, {success:success, failure:failure}) + flushAllProjects(options, callback){ + logger.log({options}, "flushing all projects"); + return ProjectFlusher._getKeys(docUpdaterKeys.docsInProject({project_id:"*"}), options.limit, function(error, project_keys) { + if (error != null) { + logger.err({err:error}, "error getting keys for flushing"); + return callback(error); + } + const project_ids = ProjectFlusher._extractIds(project_keys); + if (options.dryRun) { + return callback(null, project_ids); + } + const jobs = _.map(project_ids, project_id => cb => ProjectManager.flushAndDeleteProjectWithLocks(project_id, {background:true}, cb)); + return async.parallelLimit(async.reflectAll(jobs), options.concurrency, function(error, results){ + const success = []; + const failure = []; + _.each(results, function(result, i){ + if (result.error != null) { + return failure.push(project_ids[i]); + } else { + return success.push(project_ids[i]); + } + }); + logger.log({success, failure}, "finished flushing all projects"); + return callback(error, {success, failure}); + }); + }); + } +}; -module.exports = ProjectFlusher \ No newline at end of file +module.exports = ProjectFlusher; \ No newline at end of file diff --git a/services/document-updater/app/coffee/ProjectHistoryRedisManager.js b/services/document-updater/app/coffee/ProjectHistoryRedisManager.js index af75487a90..cccacba2d2 100644 --- a/services/document-updater/app/coffee/ProjectHistoryRedisManager.js +++ b/services/document-updater/app/coffee/ProjectHistoryRedisManager.js @@ -1,79 +1,111 @@ -Settings = require('settings-sharelatex') -projectHistoryKeys = Settings.redis?.project_history?.key_schema -rclient = require("redis-sharelatex").createClient(Settings.redis.project_history) -logger = require('logger-sharelatex') -metrics = require('./Metrics') +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS201: Simplify complex destructure assignments + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let ProjectHistoryRedisManager; +const Settings = require('settings-sharelatex'); +const projectHistoryKeys = __guard__(Settings.redis != null ? Settings.redis.project_history : undefined, x => x.key_schema); +const rclient = require("redis-sharelatex").createClient(Settings.redis.project_history); +const logger = require('logger-sharelatex'); +const metrics = require('./Metrics'); -module.exports = ProjectHistoryRedisManager = - queueOps: (project_id, ops..., callback = (error, projectUpdateCount) ->) -> - # Record metric for ops pushed onto queue - for op in ops - metrics.summary "redis.projectHistoryOps", op.length, {status: "push"} - multi = rclient.multi() - # Push the ops onto the project history queue - multi.rpush projectHistoryKeys.projectHistoryOps({project_id}), ops... - # To record the age of the oldest op on the queue set a timestamp if not - # already present (SETNX). - multi.setnx projectHistoryKeys.projectHistoryFirstOpTimestamp({project_id}), Date.now() - multi.exec (error, result) -> - return callback(error) if error? - # return the number of entries pushed onto the project history queue - callback null, result[0] +module.exports = (ProjectHistoryRedisManager = { + queueOps(project_id, ...rest) { + // Record metric for ops pushed onto queue + const adjustedLength = Math.max(rest.length, 1), ops = rest.slice(0, adjustedLength - 1), val = rest[adjustedLength - 1], callback = val != null ? val : function(error, projectUpdateCount) {}; + for (let op of Array.from(ops)) { + metrics.summary("redis.projectHistoryOps", op.length, {status: "push"}); + } + const multi = rclient.multi(); + // Push the ops onto the project history queue + multi.rpush(projectHistoryKeys.projectHistoryOps({project_id}), ...Array.from(ops)); + // To record the age of the oldest op on the queue set a timestamp if not + // already present (SETNX). + multi.setnx(projectHistoryKeys.projectHistoryFirstOpTimestamp({project_id}), Date.now()); + return multi.exec(function(error, result) { + if (error != null) { return callback(error); } + // return the number of entries pushed onto the project history queue + return callback(null, result[0]);}); + }, - queueRenameEntity: (project_id, projectHistoryId, entity_type, entity_id, user_id, projectUpdate, callback) -> - projectUpdate = - pathname: projectUpdate.pathname - new_pathname: projectUpdate.newPathname - meta: - user_id: user_id + queueRenameEntity(project_id, projectHistoryId, entity_type, entity_id, user_id, projectUpdate, callback) { + projectUpdate = { + pathname: projectUpdate.pathname, + new_pathname: projectUpdate.newPathname, + meta: { + user_id, ts: new Date() - version: projectUpdate.version - projectHistoryId: projectHistoryId - projectUpdate[entity_type] = entity_id + }, + version: projectUpdate.version, + projectHistoryId + }; + projectUpdate[entity_type] = entity_id; - logger.log {project_id, projectUpdate}, "queue rename operation to project-history" - jsonUpdate = JSON.stringify(projectUpdate) + logger.log({project_id, projectUpdate}, "queue rename operation to project-history"); + const jsonUpdate = JSON.stringify(projectUpdate); - ProjectHistoryRedisManager.queueOps project_id, jsonUpdate, callback + return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback); + }, - queueAddEntity: (project_id, projectHistoryId, entity_type, entitiy_id, user_id, projectUpdate, callback = (error) ->) -> - projectUpdate = - pathname: projectUpdate.pathname - docLines: projectUpdate.docLines - url: projectUpdate.url - meta: - user_id: user_id + queueAddEntity(project_id, projectHistoryId, entity_type, entitiy_id, user_id, projectUpdate, callback) { + if (callback == null) { callback = function(error) {}; } + projectUpdate = { + pathname: projectUpdate.pathname, + docLines: projectUpdate.docLines, + url: projectUpdate.url, + meta: { + user_id, ts: new Date() - version: projectUpdate.version - projectHistoryId: projectHistoryId - projectUpdate[entity_type] = entitiy_id + }, + version: projectUpdate.version, + projectHistoryId + }; + projectUpdate[entity_type] = entitiy_id; - logger.log {project_id, projectUpdate}, "queue add operation to project-history" - jsonUpdate = JSON.stringify(projectUpdate) + logger.log({project_id, projectUpdate}, "queue add operation to project-history"); + const jsonUpdate = JSON.stringify(projectUpdate); - ProjectHistoryRedisManager.queueOps project_id, jsonUpdate, callback + return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback); + }, - queueResyncProjectStructure: (project_id, projectHistoryId, docs, files, callback) -> - logger.log {project_id, docs, files}, "queue project structure resync" - projectUpdate = - resyncProjectStructure: { docs, files } - projectHistoryId: projectHistoryId - meta: + queueResyncProjectStructure(project_id, projectHistoryId, docs, files, callback) { + logger.log({project_id, docs, files}, "queue project structure resync"); + const projectUpdate = { + resyncProjectStructure: { docs, files }, + projectHistoryId, + meta: { ts: new Date() - jsonUpdate = JSON.stringify projectUpdate - ProjectHistoryRedisManager.queueOps project_id, jsonUpdate, callback + } + }; + const jsonUpdate = JSON.stringify(projectUpdate); + return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback); + }, - queueResyncDocContent: (project_id, projectHistoryId, doc_id, lines, version, pathname, callback) -> - logger.log {project_id, doc_id, lines, version, pathname}, "queue doc content resync" - projectUpdate = - resyncDocContent: + queueResyncDocContent(project_id, projectHistoryId, doc_id, lines, version, pathname, callback) { + logger.log({project_id, doc_id, lines, version, pathname}, "queue doc content resync"); + const projectUpdate = { + resyncDocContent: { content: lines.join("\n"), - version: version - projectHistoryId: projectHistoryId - path: pathname - doc: doc_id - meta: + version + }, + projectHistoryId, + path: pathname, + doc: doc_id, + meta: { ts: new Date() - jsonUpdate = JSON.stringify projectUpdate - ProjectHistoryRedisManager.queueOps project_id, jsonUpdate, callback + } + }; + const jsonUpdate = JSON.stringify(projectUpdate); + return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback); + } +}); + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/ProjectManager.js b/services/document-updater/app/coffee/ProjectManager.js index b60bb98d5e..8b45b7d32d 100644 --- a/services/document-updater/app/coffee/ProjectManager.js +++ b/services/document-updater/app/coffee/ProjectManager.js @@ -1,168 +1,225 @@ -RedisManager = require "./RedisManager" -ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager" -DocumentManager = require "./DocumentManager" -HistoryManager = require "./HistoryManager" -async = require "async" -logger = require "logger-sharelatex" -Metrics = require "./Metrics" -Errors = require "./Errors" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let ProjectManager; +const RedisManager = require("./RedisManager"); +const ProjectHistoryRedisManager = require("./ProjectHistoryRedisManager"); +const DocumentManager = require("./DocumentManager"); +const HistoryManager = require("./HistoryManager"); +const async = require("async"); +const logger = require("logger-sharelatex"); +const Metrics = require("./Metrics"); +const Errors = require("./Errors"); -module.exports = ProjectManager = - flushProjectWithLocks: (project_id, _callback = (error) ->) -> - timer = new Metrics.Timer("projectManager.flushProjectWithLocks") - callback = (args...) -> - timer.done() - _callback(args...) +module.exports = (ProjectManager = { + flushProjectWithLocks(project_id, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("projectManager.flushProjectWithLocks"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - RedisManager.getDocIdsInProject project_id, (error, doc_ids) -> - return callback(error) if error? - jobs = [] - errors = [] - for doc_id in (doc_ids or []) - do (doc_id) -> - jobs.push (callback) -> - DocumentManager.flushDocIfLoadedWithLock project_id, doc_id, (error) -> - if error? and error instanceof Errors.NotFoundError - logger.warn err: error, project_id: project_id, doc_id: doc_id, "found deleted doc when flushing" - callback() - else if error? - logger.error err: error, project_id: project_id, doc_id: doc_id, "error flushing doc" - errors.push(error) - callback() - else - callback() + return RedisManager.getDocIdsInProject(project_id, function(error, doc_ids) { + if (error != null) { return callback(error); } + const jobs = []; + const errors = []; + for (let doc_id of Array.from((doc_ids || []))) { + ((doc_id => jobs.push(callback => DocumentManager.flushDocIfLoadedWithLock(project_id, doc_id, function(error) { + if ((error != null) && error instanceof Errors.NotFoundError) { + logger.warn({err: error, project_id, doc_id}, "found deleted doc when flushing"); + return callback(); + } else if (error != null) { + logger.error({err: error, project_id, doc_id}, "error flushing doc"); + errors.push(error); + return callback(); + } else { + return callback(); + } + }))))(doc_id); + } - logger.log project_id: project_id, doc_ids: doc_ids, "flushing docs" - async.series jobs, () -> - if errors.length > 0 - callback new Error("Errors flushing docs. See log for details") - else - callback(null) + logger.log({project_id, doc_ids}, "flushing docs"); + return async.series(jobs, function() { + if (errors.length > 0) { + return callback(new Error("Errors flushing docs. See log for details")); + } else { + return callback(null); + } + }); + }); + }, - flushAndDeleteProjectWithLocks: (project_id, options, _callback = (error) ->) -> - timer = new Metrics.Timer("projectManager.flushAndDeleteProjectWithLocks") - callback = (args...) -> - timer.done() - _callback(args...) + flushAndDeleteProjectWithLocks(project_id, options, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("projectManager.flushAndDeleteProjectWithLocks"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - RedisManager.getDocIdsInProject project_id, (error, doc_ids) -> - return callback(error) if error? - jobs = [] - errors = [] - for doc_id in (doc_ids or []) - do (doc_id) -> - jobs.push (callback) -> - DocumentManager.flushAndDeleteDocWithLock project_id, doc_id, {}, (error) -> - if error? - logger.error err: error, project_id: project_id, doc_id: doc_id, "error deleting doc" - errors.push(error) - callback() + return RedisManager.getDocIdsInProject(project_id, function(error, doc_ids) { + if (error != null) { return callback(error); } + const jobs = []; + const errors = []; + for (let doc_id of Array.from((doc_ids || []))) { + ((doc_id => jobs.push(callback => DocumentManager.flushAndDeleteDocWithLock(project_id, doc_id, {}, function(error) { + if (error != null) { + logger.error({err: error, project_id, doc_id}, "error deleting doc"); + errors.push(error); + } + return callback(); + }))))(doc_id); + } - logger.log project_id: project_id, doc_ids: doc_ids, "deleting docs" - async.series jobs, () -> - # When deleting the project here we want to ensure that project - # history is completely flushed because the project may be - # deleted in web after this call completes, and so further - # attempts to flush would fail after that. - HistoryManager.flushProjectChanges project_id, options, (error) -> - if errors.length > 0 - callback new Error("Errors deleting docs. See log for details") - else if error? - callback(error) - else - callback(null) + logger.log({project_id, doc_ids}, "deleting docs"); + return async.series(jobs, () => // When deleting the project here we want to ensure that project + // history is completely flushed because the project may be + // deleted in web after this call completes, and so further + // attempts to flush would fail after that. + HistoryManager.flushProjectChanges(project_id, options, function(error) { + if (errors.length > 0) { + return callback(new Error("Errors deleting docs. See log for details")); + } else if (error != null) { + return callback(error); + } else { + return callback(null); + } + })); + }); + }, - queueFlushAndDeleteProject: (project_id, callback = (error) ->) -> - RedisManager.queueFlushAndDeleteProject project_id, (error) -> - if error? - logger.error {project_id: project_id, error:error}, "error adding project to flush and delete queue" - return callback(error) - Metrics.inc "queued-delete" - callback() + queueFlushAndDeleteProject(project_id, callback) { + if (callback == null) { callback = function(error) {}; } + return RedisManager.queueFlushAndDeleteProject(project_id, function(error) { + if (error != null) { + logger.error({project_id, error}, "error adding project to flush and delete queue"); + return callback(error); + } + Metrics.inc("queued-delete"); + return callback(); + }); + }, - getProjectDocsTimestamps: (project_id, callback = (error) ->) -> - RedisManager.getDocIdsInProject project_id, (error, doc_ids) -> - return callback(error) if error? - return callback(null, []) if !doc_ids?.length - RedisManager.getDocTimestamps doc_ids, (error, timestamps) -> - return callback(error) if error? - callback(null, timestamps) + getProjectDocsTimestamps(project_id, callback) { + if (callback == null) { callback = function(error) {}; } + return RedisManager.getDocIdsInProject(project_id, function(error, doc_ids) { + if (error != null) { return callback(error); } + if (!(doc_ids != null ? doc_ids.length : undefined)) { return callback(null, []); } + return RedisManager.getDocTimestamps(doc_ids, function(error, timestamps) { + if (error != null) { return callback(error); } + return callback(null, timestamps); + }); + }); + }, - getProjectDocsAndFlushIfOld: (project_id, projectStateHash, excludeVersions = {}, _callback = (error, docs) ->) -> - timer = new Metrics.Timer("projectManager.getProjectDocsAndFlushIfOld") - callback = (args...) -> - timer.done() - _callback(args...) + getProjectDocsAndFlushIfOld(project_id, projectStateHash, excludeVersions, _callback) { + if (excludeVersions == null) { excludeVersions = {}; } + if (_callback == null) { _callback = function(error, docs) {}; } + const timer = new Metrics.Timer("projectManager.getProjectDocsAndFlushIfOld"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - RedisManager.checkOrSetProjectState project_id, projectStateHash, (error, projectStateChanged) -> - if error? - logger.error err: error, project_id: project_id, "error getting/setting project state in getProjectDocsAndFlushIfOld" - return callback(error) - # we can't return docs if project structure has changed - if projectStateChanged - return callback Errors.ProjectStateChangedError("project state changed") - # project structure hasn't changed, return doc content from redis - RedisManager.getDocIdsInProject project_id, (error, doc_ids) -> - if error? - logger.error err: error, project_id: project_id, "error getting doc ids in getProjectDocs" - return callback(error) - jobs = [] - for doc_id in doc_ids or [] - do (doc_id) -> - jobs.push (cb) -> - # get the doc lines from redis - DocumentManager.getDocAndFlushIfOldWithLock project_id, doc_id, (err, lines, version) -> - if err? - logger.error err:err, project_id: project_id, doc_id: doc_id, "error getting project doc lines in getProjectDocsAndFlushIfOld" - return cb(err) - doc = {_id:doc_id, lines:lines, v:version} # create a doc object to return - cb(null, doc) - async.series jobs, (error, docs) -> - return callback(error) if error? - callback(null, docs) + return RedisManager.checkOrSetProjectState(project_id, projectStateHash, function(error, projectStateChanged) { + if (error != null) { + logger.error({err: error, project_id}, "error getting/setting project state in getProjectDocsAndFlushIfOld"); + return callback(error); + } + // we can't return docs if project structure has changed + if (projectStateChanged) { + return callback(Errors.ProjectStateChangedError("project state changed")); + } + // project structure hasn't changed, return doc content from redis + return RedisManager.getDocIdsInProject(project_id, function(error, doc_ids) { + if (error != null) { + logger.error({err: error, project_id}, "error getting doc ids in getProjectDocs"); + return callback(error); + } + const jobs = []; + for (let doc_id of Array.from(doc_ids || [])) { + ((doc_id => jobs.push(cb => // get the doc lines from redis + DocumentManager.getDocAndFlushIfOldWithLock(project_id, doc_id, function(err, lines, version) { + if (err != null) { + logger.error({err, project_id, doc_id}, "error getting project doc lines in getProjectDocsAndFlushIfOld"); + return cb(err); + } + const doc = {_id:doc_id, lines, v:version}; // create a doc object to return + return cb(null, doc); + }))))(doc_id); + } + return async.series(jobs, function(error, docs) { + if (error != null) { return callback(error); } + return callback(null, docs); + }); + }); + }); + }, - clearProjectState: (project_id, callback = (error) ->) -> - RedisManager.clearProjectState project_id, callback + clearProjectState(project_id, callback) { + if (callback == null) { callback = function(error) {}; } + return RedisManager.clearProjectState(project_id, callback); + }, - updateProjectWithLocks: (project_id, projectHistoryId, user_id, docUpdates, fileUpdates, version, _callback = (error) ->) -> - timer = new Metrics.Timer("projectManager.updateProject") - callback = (args...) -> - timer.done() - _callback(args...) + updateProjectWithLocks(project_id, projectHistoryId, user_id, docUpdates, fileUpdates, version, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const timer = new Metrics.Timer("projectManager.updateProject"); + const callback = function(...args) { + timer.done(); + return _callback(...Array.from(args || [])); + }; - project_version = version - project_subversion = 0 # project versions can have multiple operations + const project_version = version; + let project_subversion = 0; // project versions can have multiple operations - project_ops_length = 0 + let project_ops_length = 0; - handleDocUpdate = (projectUpdate, cb) -> - doc_id = projectUpdate.id - projectUpdate.version = "#{project_version}.#{project_subversion++}" - if projectUpdate.docLines? - ProjectHistoryRedisManager.queueAddEntity project_id, projectHistoryId, 'doc', doc_id, user_id, projectUpdate, (error, count) -> - project_ops_length = count - cb(error) - else - DocumentManager.renameDocWithLock project_id, doc_id, user_id, projectUpdate, projectHistoryId, (error, count) -> - project_ops_length = count - cb(error) + const handleDocUpdate = function(projectUpdate, cb) { + const doc_id = projectUpdate.id; + projectUpdate.version = `${project_version}.${project_subversion++}`; + if (projectUpdate.docLines != null) { + return ProjectHistoryRedisManager.queueAddEntity(project_id, projectHistoryId, 'doc', doc_id, user_id, projectUpdate, function(error, count) { + project_ops_length = count; + return cb(error); + }); + } else { + return DocumentManager.renameDocWithLock(project_id, doc_id, user_id, projectUpdate, projectHistoryId, function(error, count) { + project_ops_length = count; + return cb(error); + }); + } + }; - handleFileUpdate = (projectUpdate, cb) -> - file_id = projectUpdate.id - projectUpdate.version = "#{project_version}.#{project_subversion++}" - if projectUpdate.url? - ProjectHistoryRedisManager.queueAddEntity project_id, projectHistoryId, 'file', file_id, user_id, projectUpdate, (error, count) -> - project_ops_length = count - cb(error) - else - ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'file', file_id, user_id, projectUpdate, (error, count) -> - project_ops_length = count - cb(error) + const handleFileUpdate = function(projectUpdate, cb) { + const file_id = projectUpdate.id; + projectUpdate.version = `${project_version}.${project_subversion++}`; + if (projectUpdate.url != null) { + return ProjectHistoryRedisManager.queueAddEntity(project_id, projectHistoryId, 'file', file_id, user_id, projectUpdate, function(error, count) { + project_ops_length = count; + return cb(error); + }); + } else { + return ProjectHistoryRedisManager.queueRenameEntity(project_id, projectHistoryId, 'file', file_id, user_id, projectUpdate, function(error, count) { + project_ops_length = count; + return cb(error); + }); + } + }; - async.eachSeries docUpdates, handleDocUpdate, (error) -> - return callback(error) if error? - async.eachSeries fileUpdates, handleFileUpdate, (error) -> - return callback(error) if error? - if HistoryManager.shouldFlushHistoryOps(project_ops_length, docUpdates.length + fileUpdates.length, HistoryManager.FLUSH_PROJECT_EVERY_N_OPS) - HistoryManager.flushProjectChangesAsync project_id - callback() + return async.eachSeries(docUpdates, handleDocUpdate, function(error) { + if (error != null) { return callback(error); } + return async.eachSeries(fileUpdates, handleFileUpdate, function(error) { + if (error != null) { return callback(error); } + if (HistoryManager.shouldFlushHistoryOps(project_ops_length, docUpdates.length + fileUpdates.length, HistoryManager.FLUSH_PROJECT_EVERY_N_OPS)) { + HistoryManager.flushProjectChangesAsync(project_id); + } + return callback(); + }); + }); + } +}); diff --git a/services/document-updater/app/coffee/RangesManager.js b/services/document-updater/app/coffee/RangesManager.js index bcb16a39c9..83523f33b5 100644 --- a/services/document-updater/app/coffee/RangesManager.js +++ b/services/document-updater/app/coffee/RangesManager.js @@ -1,76 +1,112 @@ -RangesTracker = require "./RangesTracker" -logger = require "logger-sharelatex" -_ = require "lodash" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let RangesManager; +const RangesTracker = require("./RangesTracker"); +const logger = require("logger-sharelatex"); +const _ = require("lodash"); -module.exports = RangesManager = - MAX_COMMENTS: 500 - MAX_CHANGES: 2000 +module.exports = (RangesManager = { + MAX_COMMENTS: 500, + MAX_CHANGES: 2000, - applyUpdate: (project_id, doc_id, entries = {}, updates = [], newDocLines, callback = (error, new_entries, ranges_were_collapsed) ->) -> - {changes, comments} = _.cloneDeep(entries) - rangesTracker = new RangesTracker(changes, comments) - emptyRangeCountBefore = RangesManager._emptyRangesCount(rangesTracker) - for update in updates - rangesTracker.track_changes = !!update.meta.tc - if !!update.meta.tc - rangesTracker.setIdSeed(update.meta.tc) - for op in update.op - try - rangesTracker.applyOp(op, { user_id: update.meta?.user_id }) - catch error - return callback(error) + applyUpdate(project_id, doc_id, entries, updates, newDocLines, callback) { + let error; + if (entries == null) { entries = {}; } + if (updates == null) { updates = []; } + if (callback == null) { callback = function(error, new_entries, ranges_were_collapsed) {}; } + const {changes, comments} = _.cloneDeep(entries); + const rangesTracker = new RangesTracker(changes, comments); + const emptyRangeCountBefore = RangesManager._emptyRangesCount(rangesTracker); + for (let update of Array.from(updates)) { + rangesTracker.track_changes = !!update.meta.tc; + if (!!update.meta.tc) { + rangesTracker.setIdSeed(update.meta.tc); + } + for (let op of Array.from(update.op)) { + try { + rangesTracker.applyOp(op, { user_id: (update.meta != null ? update.meta.user_id : undefined) }); + } catch (error1) { + error = error1; + return callback(error); + } + } + } - if rangesTracker.changes?.length > RangesManager.MAX_CHANGES or rangesTracker.comments?.length > RangesManager.MAX_COMMENTS - return callback new Error("too many comments or tracked changes") + if (((rangesTracker.changes != null ? rangesTracker.changes.length : undefined) > RangesManager.MAX_CHANGES) || ((rangesTracker.comments != null ? rangesTracker.comments.length : undefined) > RangesManager.MAX_COMMENTS)) { + return callback(new Error("too many comments or tracked changes")); + } - try - # This is a consistency check that all of our ranges and - # comments still match the corresponding text - rangesTracker.validate(newDocLines.join("\n")) - catch error - logger.error {err: error, project_id, doc_id, newDocLines, updates}, "error validating ranges" - return callback(error) + try { + // This is a consistency check that all of our ranges and + // comments still match the corresponding text + rangesTracker.validate(newDocLines.join("\n")); + } catch (error2) { + error = error2; + logger.error({err: error, project_id, doc_id, newDocLines, updates}, "error validating ranges"); + return callback(error); + } - emptyRangeCountAfter = RangesManager._emptyRangesCount(rangesTracker) - rangesWereCollapsed = emptyRangeCountAfter > emptyRangeCountBefore - response = RangesManager._getRanges rangesTracker - logger.log {project_id, doc_id, changesCount: response.changes?.length, commentsCount: response.comments?.length, rangesWereCollapsed}, "applied updates to ranges" - callback null, response, rangesWereCollapsed + const emptyRangeCountAfter = RangesManager._emptyRangesCount(rangesTracker); + const rangesWereCollapsed = emptyRangeCountAfter > emptyRangeCountBefore; + const response = RangesManager._getRanges(rangesTracker); + logger.log({project_id, doc_id, changesCount: (response.changes != null ? response.changes.length : undefined), commentsCount: (response.comments != null ? response.comments.length : undefined), rangesWereCollapsed}, "applied updates to ranges"); + return callback(null, response, rangesWereCollapsed); + }, - acceptChanges: (change_ids, ranges, callback = (error, ranges) ->) -> - {changes, comments} = ranges - logger.log "accepting #{ change_ids.length } changes in ranges" - rangesTracker = new RangesTracker(changes, comments) - rangesTracker.removeChangeIds(change_ids) - response = RangesManager._getRanges(rangesTracker) - callback null, response + acceptChanges(change_ids, ranges, callback) { + if (callback == null) { callback = function(error, ranges) {}; } + const {changes, comments} = ranges; + logger.log(`accepting ${ change_ids.length } changes in ranges`); + const rangesTracker = new RangesTracker(changes, comments); + rangesTracker.removeChangeIds(change_ids); + const response = RangesManager._getRanges(rangesTracker); + return callback(null, response); + }, - deleteComment: (comment_id, ranges, callback = (error, ranges) ->) -> - {changes, comments} = ranges - logger.log {comment_id}, "deleting comment in ranges" - rangesTracker = new RangesTracker(changes, comments) - rangesTracker.removeCommentId(comment_id) - response = RangesManager._getRanges(rangesTracker) - callback null, response + deleteComment(comment_id, ranges, callback) { + if (callback == null) { callback = function(error, ranges) {}; } + const {changes, comments} = ranges; + logger.log({comment_id}, "deleting comment in ranges"); + const rangesTracker = new RangesTracker(changes, comments); + rangesTracker.removeCommentId(comment_id); + const response = RangesManager._getRanges(rangesTracker); + return callback(null, response); + }, - _getRanges: (rangesTracker) -> - # Return the minimal data structure needed, since most documents won't have any - # changes or comments - response = {} - if rangesTracker.changes?.length > 0 - response ?= {} - response.changes = rangesTracker.changes - if rangesTracker.comments?.length > 0 - response ?= {} - response.comments = rangesTracker.comments - return response + _getRanges(rangesTracker) { + // Return the minimal data structure needed, since most documents won't have any + // changes or comments + let response = {}; + if ((rangesTracker.changes != null ? rangesTracker.changes.length : undefined) > 0) { + if (response == null) { response = {}; } + response.changes = rangesTracker.changes; + } + if ((rangesTracker.comments != null ? rangesTracker.comments.length : undefined) > 0) { + if (response == null) { response = {}; } + response.comments = rangesTracker.comments; + } + return response; + }, - _emptyRangesCount: (ranges) -> - count = 0 - for comment in (ranges.comments or []) - if comment.op.c == "" - count++ - for change in (ranges.changes or []) when change.op.i? - if change.op.i == "" - count++ - return count \ No newline at end of file + _emptyRangesCount(ranges) { + let count = 0; + for (let comment of Array.from((ranges.comments || []))) { + if (comment.op.c === "") { + count++; + } + } + for (let change of Array.from((ranges.changes || []))) { + if (change.op.i != null) { + if (change.op.i === "") { + count++; + } + } + } + return count; + } +}); \ No newline at end of file diff --git a/services/document-updater/app/coffee/RangesTracker.js b/services/document-updater/app/coffee/RangesTracker.js index 869d63159b..de7e885c5c 100644 --- a/services/document-updater/app/coffee/RangesTracker.js +++ b/services/document-updater/app/coffee/RangesTracker.js @@ -1,576 +1,717 @@ -# This file is shared between document-updater and web, so that the server and client share -# an identical track changes implementation. Do not edit it directly in web or document-updater, -# instead edit it at https://github.com/sharelatex/ranges-tracker, where it has a suite of tests -load = () -> - class RangesTracker - # The purpose of this class is to track a set of inserts and deletes to a document, like - # track changes in Word. We store these as a set of ShareJs style ranges: - # {i: "foo", p: 42} # Insert 'foo' at offset 42 - # {d: "bar", p: 37} # Delete 'bar' at offset 37 - # We only track the inserts and deletes, not the whole document, but by being given all - # updates that are applied to a document, we can update these appropriately. - # - # Note that the set of inserts and deletes we store applies to the document as-is at the moment. - # So inserts correspond to text which is in the document, while deletes correspond to text which - # is no longer there, so their lengths do not affect the position of later offsets. - # E.g. - # this is the current text of the document - # |-----| | - # {i: "current ", p:12} -^ ^- {d: "old ", p: 31} - # - # Track changes rules (should be consistent with Word): - # * When text is inserted at a delete, the text goes to the left of the delete - # I.e. "foo|bar" -> "foobaz|bar", where | is the delete, and 'baz' is inserted - # * Deleting content flagged as 'inserted' does not create a new delete marker, it only - # removes the insert marker. E.g. - # * "abdefghijkl" -> "abfghijkl" when 'de' is deleted. No delete marker added - # |---| <- inserted |-| <- inserted - # * Deletes overlapping regular text and inserted text will insert a delete marker for the - # regular text: - # "abcdefghijkl" -> "abcdejkl" when 'fghi' is deleted - # |----| |--|| - # ^- inserted 'bcdefg' \ ^- deleted 'hi' - # \--inserted 'bcde' - # * Deletes overlapping other deletes are merged. E.g. - # "abcghijkl" -> "ahijkl" when 'bcg is deleted' - # | <- delete 'def' | <- delete 'bcdefg' - # * Deletes by another user will consume deletes by the first user - # * Inserts by another user will not combine with inserts by the first user. If they are in the - # middle of a previous insert by the first user, the original insert will be split into two. - constructor: (@changes = [], @comments = []) -> - @setIdSeed(RangesTracker.generateIdSeed()) - @resetDirtyState() +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This file is shared between document-updater and web, so that the server and client share +// an identical track changes implementation. Do not edit it directly in web or document-updater, +// instead edit it at https://github.com/sharelatex/ranges-tracker, where it has a suite of tests +const load = function() { + let RangesTracker; + return RangesTracker = class RangesTracker { + // The purpose of this class is to track a set of inserts and deletes to a document, like + // track changes in Word. We store these as a set of ShareJs style ranges: + // {i: "foo", p: 42} # Insert 'foo' at offset 42 + // {d: "bar", p: 37} # Delete 'bar' at offset 37 + // We only track the inserts and deletes, not the whole document, but by being given all + // updates that are applied to a document, we can update these appropriately. + // + // Note that the set of inserts and deletes we store applies to the document as-is at the moment. + // So inserts correspond to text which is in the document, while deletes correspond to text which + // is no longer there, so their lengths do not affect the position of later offsets. + // E.g. + // this is the current text of the document + // |-----| | + // {i: "current ", p:12} -^ ^- {d: "old ", p: 31} + // + // Track changes rules (should be consistent with Word): + // * When text is inserted at a delete, the text goes to the left of the delete + // I.e. "foo|bar" -> "foobaz|bar", where | is the delete, and 'baz' is inserted + // * Deleting content flagged as 'inserted' does not create a new delete marker, it only + // removes the insert marker. E.g. + // * "abdefghijkl" -> "abfghijkl" when 'de' is deleted. No delete marker added + // |---| <- inserted |-| <- inserted + // * Deletes overlapping regular text and inserted text will insert a delete marker for the + // regular text: + // "abcdefghijkl" -> "abcdejkl" when 'fghi' is deleted + // |----| |--|| + // ^- inserted 'bcdefg' \ ^- deleted 'hi' + // \--inserted 'bcde' + // * Deletes overlapping other deletes are merged. E.g. + // "abcghijkl" -> "ahijkl" when 'bcg is deleted' + // | <- delete 'def' | <- delete 'bcdefg' + // * Deletes by another user will consume deletes by the first user + // * Inserts by another user will not combine with inserts by the first user. If they are in the + // middle of a previous insert by the first user, the original insert will be split into two. + constructor(changes, comments) { + if (changes == null) { changes = []; } + this.changes = changes; + if (comments == null) { comments = []; } + this.comments = comments; + this.setIdSeed(RangesTracker.generateIdSeed()); + this.resetDirtyState(); + } - getIdSeed: () -> - return @id_seed + getIdSeed() { + return this.id_seed; + } - setIdSeed: (seed) -> - @id_seed = seed - @id_increment = 0 + setIdSeed(seed) { + this.id_seed = seed; + return this.id_increment = 0; + } - @generateIdSeed: () -> - # Generate a the first 18 characters of Mongo ObjectId, leaving 6 for the increment part - # Reference: https://github.com/dreampulse/ObjectId.js/blob/master/src/main/javascript/Objectid.js - pid = Math.floor(Math.random() * (32767)).toString(16) - machine = Math.floor(Math.random() * (16777216)).toString(16) - timestamp = Math.floor(new Date().valueOf() / 1000).toString(16) + static generateIdSeed() { + // Generate a the first 18 characters of Mongo ObjectId, leaving 6 for the increment part + // Reference: https://github.com/dreampulse/ObjectId.js/blob/master/src/main/javascript/Objectid.js + const pid = Math.floor(Math.random() * (32767)).toString(16); + const machine = Math.floor(Math.random() * (16777216)).toString(16); + const timestamp = Math.floor(new Date().valueOf() / 1000).toString(16); return '00000000'.substr(0, 8 - timestamp.length) + timestamp + '000000'.substr(0, 6 - machine.length) + machine + - '0000'.substr(0, 4 - pid.length) + pid + '0000'.substr(0, 4 - pid.length) + pid; + } - @generateId: () -> - @generateIdSeed() + "000001" + static generateId() { + return this.generateIdSeed() + "000001"; + } - newId: () -> - @id_increment++ - increment = @id_increment.toString(16) - id = @id_seed + '000000'.substr(0, 6 - increment.length) + increment; - return id + newId() { + this.id_increment++; + const increment = this.id_increment.toString(16); + const id = this.id_seed + '000000'.substr(0, 6 - increment.length) + increment; + return id; + } - getComment: (comment_id) -> - comment = null - for c in @comments - if c.id == comment_id - comment = c - break - return comment + getComment(comment_id) { + let comment = null; + for (let c of Array.from(this.comments)) { + if (c.id === comment_id) { + comment = c; + break; + } + } + return comment; + } - removeCommentId: (comment_id) -> - comment = @getComment(comment_id) - return if !comment? - @comments = @comments.filter (c) -> c.id != comment_id - @_markAsDirty comment, "comment", "removed" + removeCommentId(comment_id) { + const comment = this.getComment(comment_id); + if ((comment == null)) { return; } + this.comments = this.comments.filter(c => c.id !== comment_id); + return this._markAsDirty(comment, "comment", "removed"); + } - moveCommentId: (comment_id, position, text) -> - for comment in @comments - if comment.id == comment_id - comment.op.p = position - comment.op.c = text - @_markAsDirty comment, "comment", "moved" + moveCommentId(comment_id, position, text) { + return (() => { + const result = []; + for (let comment of Array.from(this.comments)) { + if (comment.id === comment_id) { + comment.op.p = position; + comment.op.c = text; + result.push(this._markAsDirty(comment, "comment", "moved")); + } else { + result.push(undefined); + } + } + return result; + })(); + } - getChange: (change_id) -> - change = null - for c in @changes - if c.id == change_id - change = c - break - return change + getChange(change_id) { + let change = null; + for (let c of Array.from(this.changes)) { + if (c.id === change_id) { + change = c; + break; + } + } + return change; + } - getChanges: (change_ids) -> - changes_response = [] - ids_map = {} + getChanges(change_ids) { + const changes_response = []; + const ids_map = {}; - for change_id in change_ids - ids_map[change_id] = true + for (let change_id of Array.from(change_ids)) { + ids_map[change_id] = true; + } - for change in @changes - if ids_map[change.id] - delete ids_map[change.id] - changes_response.push change + for (let change of Array.from(this.changes)) { + if (ids_map[change.id]) { + delete ids_map[change.id]; + changes_response.push(change); + } + } - return changes_response + return changes_response; + } - removeChangeId: (change_id) -> - change = @getChange(change_id) - return if !change? - @_removeChange(change) + removeChangeId(change_id) { + const change = this.getChange(change_id); + if ((change == null)) { return; } + return this._removeChange(change); + } - removeChangeIds: (change_to_remove_ids) -> - return if !change_to_remove_ids?.length > 0 - i = @changes.length - remove_change_id = {} - for change_id in change_to_remove_ids - remove_change_id[change_id] = true + removeChangeIds(change_to_remove_ids) { + if (!(change_to_remove_ids != null ? change_to_remove_ids.length : undefined) > 0) { return; } + const i = this.changes.length; + const remove_change_id = {}; + for (let change_id of Array.from(change_to_remove_ids)) { + remove_change_id[change_id] = true; + } - remaining_changes = [] + const remaining_changes = []; - for change in @changes - if remove_change_id[change.id] - delete remove_change_id[change.id] - @_markAsDirty change, "change", "removed" - else - remaining_changes.push change + for (let change of Array.from(this.changes)) { + if (remove_change_id[change.id]) { + delete remove_change_id[change.id]; + this._markAsDirty(change, "change", "removed"); + } else { + remaining_changes.push(change); + } + } - @changes = remaining_changes + return this.changes = remaining_changes; + } - validate: (text) -> - for change in @changes - if change.op.i? - content = text.slice(change.op.p, change.op.p + change.op.i.length) - if content != change.op.i - throw new Error("Change (#{JSON.stringify(change)}) doesn't match text (#{JSON.stringify(content)})") - for comment in @comments - content = text.slice(comment.op.p, comment.op.p + comment.op.c.length) - if content != comment.op.c - throw new Error("Comment (#{JSON.stringify(comment)}) doesn't match text (#{JSON.stringify(content)})") - return true + validate(text) { + let content; + for (let change of Array.from(this.changes)) { + if (change.op.i != null) { + content = text.slice(change.op.p, change.op.p + change.op.i.length); + if (content !== change.op.i) { + throw new Error(`Change (${JSON.stringify(change)}) doesn't match text (${JSON.stringify(content)})`); + } + } + } + for (let comment of Array.from(this.comments)) { + content = text.slice(comment.op.p, comment.op.p + comment.op.c.length); + if (content !== comment.op.c) { + throw new Error(`Comment (${JSON.stringify(comment)}) doesn't match text (${JSON.stringify(content)})`); + } + } + return true; + } - applyOp: (op, metadata = {}) -> - metadata.ts ?= new Date() - # Apply an op that has been applied to the document to our changes to keep them up to date - if op.i? - @applyInsertToChanges(op, metadata) - @applyInsertToComments(op) - else if op.d? - @applyDeleteToChanges(op, metadata) - @applyDeleteToComments(op) - else if op.c? - @addComment(op, metadata) - else - throw new Error("unknown op type") + applyOp(op, metadata) { + if (metadata == null) { metadata = {}; } + if (metadata.ts == null) { metadata.ts = new Date(); } + // Apply an op that has been applied to the document to our changes to keep them up to date + if (op.i != null) { + this.applyInsertToChanges(op, metadata); + return this.applyInsertToComments(op); + } else if (op.d != null) { + this.applyDeleteToChanges(op, metadata); + return this.applyDeleteToComments(op); + } else if (op.c != null) { + return this.addComment(op, metadata); + } else { + throw new Error("unknown op type"); + } + } - applyOps: (ops, metadata = {}) -> - for op in ops - @applyOp(op, metadata) + applyOps(ops, metadata) { + if (metadata == null) { metadata = {}; } + return Array.from(ops).map((op) => + this.applyOp(op, metadata)); + } - addComment: (op, metadata) -> - existing = @getComment(op.t) - if existing? - @moveCommentId(op.t, op.p, op.c) - return existing - else - @comments.push comment = { - id: op.t or @newId() - op: # Copy because we'll modify in place - c: op.c - p: op.p + addComment(op, metadata) { + const existing = this.getComment(op.t); + if (existing != null) { + this.moveCommentId(op.t, op.p, op.c); + return existing; + } else { + let comment; + this.comments.push(comment = { + id: op.t || this.newId(), + op: { // Copy because we'll modify in place + c: op.c, + p: op.p, t: op.t + }, metadata - } - @_markAsDirty comment, "comment", "added" - return comment + }); + this._markAsDirty(comment, "comment", "added"); + return comment; + } + } - applyInsertToComments: (op) -> - for comment in @comments - if op.p <= comment.op.p - comment.op.p += op.i.length - @_markAsDirty comment, "comment", "moved" - else if op.p < comment.op.p + comment.op.c.length - offset = op.p - comment.op.p - comment.op.c = comment.op.c[0..(offset-1)] + op.i + comment.op.c[offset...] - @_markAsDirty comment, "comment", "moved" + applyInsertToComments(op) { + return (() => { + const result = []; + for (let comment of Array.from(this.comments)) { + if (op.p <= comment.op.p) { + comment.op.p += op.i.length; + result.push(this._markAsDirty(comment, "comment", "moved")); + } else if (op.p < (comment.op.p + comment.op.c.length)) { + const offset = op.p - comment.op.p; + comment.op.c = comment.op.c.slice(0, +(offset-1) + 1 || undefined) + op.i + comment.op.c.slice(offset); + result.push(this._markAsDirty(comment, "comment", "moved")); + } else { + result.push(undefined); + } + } + return result; + })(); + } - applyDeleteToComments: (op) -> - op_start = op.p - op_length = op.d.length - op_end = op.p + op_length - for comment in @comments - comment_start = comment.op.p - comment_end = comment.op.p + comment.op.c.length - comment_length = comment_end - comment_start - if op_end <= comment_start - # delete is fully before comment - comment.op.p -= op_length - @_markAsDirty comment, "comment", "moved" - else if op_start >= comment_end - # delete is fully after comment, nothing to do - else - # delete and comment overlap - if op_start <= comment_start - remaining_before = "" - else - remaining_before = comment.op.c.slice(0, op_start - comment_start) - if op_end >= comment_end - remaining_after = "" - else - remaining_after = comment.op.c.slice(op_end - comment_start) + applyDeleteToComments(op) { + const op_start = op.p; + const op_length = op.d.length; + const op_end = op.p + op_length; + return (() => { + const result = []; + for (let comment of Array.from(this.comments)) { + const comment_start = comment.op.p; + const comment_end = comment.op.p + comment.op.c.length; + const comment_length = comment_end - comment_start; + if (op_end <= comment_start) { + // delete is fully before comment + comment.op.p -= op_length; + result.push(this._markAsDirty(comment, "comment", "moved")); + } else if (op_start >= comment_end) { + // delete is fully after comment, nothing to do + } else { + // delete and comment overlap + var remaining_after, remaining_before; + if (op_start <= comment_start) { + remaining_before = ""; + } else { + remaining_before = comment.op.c.slice(0, op_start - comment_start); + } + if (op_end >= comment_end) { + remaining_after = ""; + } else { + remaining_after = comment.op.c.slice(op_end - comment_start); + } - # Check deleted content matches delete op - deleted_comment = comment.op.c.slice(remaining_before.length, comment_length - remaining_after.length) - offset = Math.max(0, comment_start - op_start) - deleted_op_content = op.d.slice(offset).slice(0, deleted_comment.length) - if deleted_comment != deleted_op_content - throw new Error("deleted content does not match comment content") + // Check deleted content matches delete op + const deleted_comment = comment.op.c.slice(remaining_before.length, comment_length - remaining_after.length); + const offset = Math.max(0, comment_start - op_start); + const deleted_op_content = op.d.slice(offset).slice(0, deleted_comment.length); + if (deleted_comment !== deleted_op_content) { + throw new Error("deleted content does not match comment content"); + } - comment.op.p = Math.min(comment_start, op_start) - comment.op.c = remaining_before + remaining_after - @_markAsDirty comment, "comment", "moved" + comment.op.p = Math.min(comment_start, op_start); + comment.op.c = remaining_before + remaining_after; + result.push(this._markAsDirty(comment, "comment", "moved")); + } + } + return result; + })(); + } - applyInsertToChanges: (op, metadata) -> - op_start = op.p - op_length = op.i.length - op_end = op.p + op_length - undoing = !!op.u + applyInsertToChanges(op, metadata) { + let change; + const op_start = op.p; + const op_length = op.i.length; + const op_end = op.p + op_length; + const undoing = !!op.u; - already_merged = false - previous_change = null - moved_changes = [] - remove_changes = [] - new_changes = [] + let already_merged = false; + let previous_change = null; + const moved_changes = []; + const remove_changes = []; + const new_changes = []; - for change, i in @changes - change_start = change.op.p + for (let i = 0; i < this.changes.length; i++) { + change = this.changes[i]; + const change_start = change.op.p; - if change.op.d? - # Shift any deletes after this along by the length of this insert - if op_start < change_start - change.op.p += op_length - moved_changes.push change - else if op_start == change_start - # If we are undoing, then we want to cancel any existing delete ranges if we can. - # Check if the insert matches the start of the delete, and just remove it from the delete instead if so. - if undoing and change.op.d.length >= op.i.length and change.op.d.slice(0, op.i.length) == op.i - change.op.d = change.op.d.slice(op.i.length) - change.op.p += op.i.length - if change.op.d == "" - remove_changes.push change - else - moved_changes.push change - already_merged = true - else - change.op.p += op_length - moved_changes.push change - else if change.op.i? - change_end = change_start + change.op.i.length - is_change_overlapping = (op_start >= change_start and op_start <= change_end) - - # Only merge inserts if they are from the same user - is_same_user = metadata.user_id == change.metadata.user_id - - # If we are undoing, then our changes will be removed from any delete ops just after. In that case, if there is also - # an insert op just before, then we shouldn't append it to this insert, but instead only cancel the following delete. - # E.g. - # foo|<--- about to insert 'b' here - # inserted 'foo' --^ ^-- deleted 'bar' - # should become just 'foo' not 'foob' (with the delete marker becoming just 'ar'), . - next_change = @changes[i+1] - is_op_adjacent_to_next_delete = next_change? and next_change.op.d? and op.p == change_end and next_change.op.p == op.p - will_op_cancel_next_delete = undoing and is_op_adjacent_to_next_delete and next_change.op.d.slice(0, op.i.length) == op.i - - # If there is a delete at the start of the insert, and we're inserting - # at the start, we SHOULDN'T merge since the delete acts as a partition. - # The previous op will be the delete, but it's already been shifted by this insert - # - # I.e. - # Originally: |-- existing insert --| - # | <- existing delete at same offset - # - # Now: |-- existing insert --| <- not shifted yet - # |-- this insert --|| <- existing delete shifted along to end of this op - # - # After: |-- existing insert --| - # |-- this insert --|| <- existing delete - # - # Without the delete, the inserts would be merged. - is_insert_blocked_by_delete = (previous_change? and previous_change.op.d? and previous_change.op.p == op_end) - - # If the insert is overlapping another insert, either at the beginning in the middle or touching the end, - # then we merge them into one. - if @track_changes and - is_change_overlapping and - !is_insert_blocked_by_delete and - !already_merged and - !will_op_cancel_next_delete and - is_same_user - offset = op_start - change_start - change.op.i = change.op.i.slice(0, offset) + op.i + change.op.i.slice(offset) - change.metadata.ts = metadata.ts - already_merged = true - moved_changes.push change - else if op_start <= change_start - # If we're fully before the other insert we can just shift the other insert by our length. - # If they are touching, and should have been merged, they will have been above. - # If not merged above, then it must be blocked by a delete, and will be after this insert, so we shift it along as well - change.op.p += op_length - moved_changes.push change - else if (!is_same_user or !@track_changes) and change_start < op_start < change_end - # This user is inserting inside a change by another user, so we need to split the - # other user's change into one before and after this one. - offset = op_start - change_start - before_content = change.op.i.slice(0, offset) - after_content = change.op.i.slice(offset) - - # The existing change can become the 'before' change - change.op.i = before_content - moved_changes.push change - - # Create a new op afterwards - after_change = { - op: { - i: after_content - p: change_start + offset + op_length + if (change.op.d != null) { + // Shift any deletes after this along by the length of this insert + if (op_start < change_start) { + change.op.p += op_length; + moved_changes.push(change); + } else if (op_start === change_start) { + // If we are undoing, then we want to cancel any existing delete ranges if we can. + // Check if the insert matches the start of the delete, and just remove it from the delete instead if so. + if (undoing && (change.op.d.length >= op.i.length) && (change.op.d.slice(0, op.i.length) === op.i)) { + change.op.d = change.op.d.slice(op.i.length); + change.op.p += op.i.length; + if (change.op.d === "") { + remove_changes.push(change); + } else { + moved_changes.push(change); } - metadata: {} + already_merged = true; + } else { + change.op.p += op_length; + moved_changes.push(change); } - after_change.metadata[key] = value for key, value of change.metadata - new_changes.push after_change + } + } else if (change.op.i != null) { + var offset; + const change_end = change_start + change.op.i.length; + const is_change_overlapping = ((op_start >= change_start) && (op_start <= change_end)); + + // Only merge inserts if they are from the same user + const is_same_user = metadata.user_id === change.metadata.user_id; + + // If we are undoing, then our changes will be removed from any delete ops just after. In that case, if there is also + // an insert op just before, then we shouldn't append it to this insert, but instead only cancel the following delete. + // E.g. + // foo|<--- about to insert 'b' here + // inserted 'foo' --^ ^-- deleted 'bar' + // should become just 'foo' not 'foob' (with the delete marker becoming just 'ar'), . + const next_change = this.changes[i+1]; + const is_op_adjacent_to_next_delete = (next_change != null) && (next_change.op.d != null) && (op.p === change_end) && (next_change.op.p === op.p); + const will_op_cancel_next_delete = undoing && is_op_adjacent_to_next_delete && (next_change.op.d.slice(0, op.i.length) === op.i); + + // If there is a delete at the start of the insert, and we're inserting + // at the start, we SHOULDN'T merge since the delete acts as a partition. + // The previous op will be the delete, but it's already been shifted by this insert + // + // I.e. + // Originally: |-- existing insert --| + // | <- existing delete at same offset + // + // Now: |-- existing insert --| <- not shifted yet + // |-- this insert --|| <- existing delete shifted along to end of this op + // + // After: |-- existing insert --| + // |-- this insert --|| <- existing delete + // + // Without the delete, the inserts would be merged. + const is_insert_blocked_by_delete = ((previous_change != null) && (previous_change.op.d != null) && (previous_change.op.p === op_end)); + + // If the insert is overlapping another insert, either at the beginning in the middle or touching the end, + // then we merge them into one. + if (this.track_changes && + is_change_overlapping && + !is_insert_blocked_by_delete && + !already_merged && + !will_op_cancel_next_delete && + is_same_user) { + offset = op_start - change_start; + change.op.i = change.op.i.slice(0, offset) + op.i + change.op.i.slice(offset); + change.metadata.ts = metadata.ts; + already_merged = true; + moved_changes.push(change); + } else if (op_start <= change_start) { + // If we're fully before the other insert we can just shift the other insert by our length. + // If they are touching, and should have been merged, they will have been above. + // If not merged above, then it must be blocked by a delete, and will be after this insert, so we shift it along as well + change.op.p += op_length; + moved_changes.push(change); + } else if ((!is_same_user || !this.track_changes) && (change_start < op_start && op_start < change_end)) { + // This user is inserting inside a change by another user, so we need to split the + // other user's change into one before and after this one. + offset = op_start - change_start; + const before_content = change.op.i.slice(0, offset); + const after_content = change.op.i.slice(offset); - previous_change = change + // The existing change can become the 'before' change + change.op.i = before_content; + moved_changes.push(change); + + // Create a new op afterwards + const after_change = { + op: { + i: after_content, + p: change_start + offset + op_length + }, + metadata: {} + }; + for (let key in change.metadata) { const value = change.metadata[key]; after_change.metadata[key] = value; } + new_changes.push(after_change); + } + } + + previous_change = change; + } - if @track_changes and !already_merged - @_addOp op, metadata - for {op, metadata} in new_changes - @_addOp op, metadata + if (this.track_changes && !already_merged) { + this._addOp(op, metadata); + } + for ({op, metadata} of Array.from(new_changes)) { + this._addOp(op, metadata); + } - for change in remove_changes - @_removeChange change + for (change of Array.from(remove_changes)) { + this._removeChange(change); + } - for change in moved_changes - @_markAsDirty change, "change", "moved" + return (() => { + const result = []; + for (change of Array.from(moved_changes)) { + result.push(this._markAsDirty(change, "change", "moved")); + } + return result; + })(); + } - applyDeleteToChanges: (op, metadata) -> - op_start = op.p - op_length = op.d.length - op_end = op.p + op_length - remove_changes = [] - moved_changes = [] + applyDeleteToChanges(op, metadata) { + let change; + const op_start = op.p; + const op_length = op.d.length; + const op_end = op.p + op_length; + const remove_changes = []; + let moved_changes = []; - # We might end up modifying our delete op if it merges with existing deletes, or cancels out - # with an existing insert. Since we might do multiple modifications, we record them and do - # all the modifications after looping through the existing changes, so as not to mess up the - # offset indexes as we go. - op_modifications = [] - for change in @changes - if change.op.i? - change_start = change.op.p - change_end = change_start + change.op.i.length - if op_end <= change_start - # Shift ops after us back by our length - change.op.p -= op_length - moved_changes.push change - else if op_start >= change_end - # Delete is after insert, nothing to do - else - # When the new delete overlaps an insert, we should remove the part of the insert that - # is now deleted, and also remove the part of the new delete that overlapped. I.e. - # the two cancel out where they overlap. - if op_start >= change_start - # |-- existing insert --| - # insert_remaining_before -> |.....||-- new delete --| - delete_remaining_before = "" - insert_remaining_before = change.op.i.slice(0, op_start - change_start) - else - # delete_remaining_before -> |.....||-- existing insert --| - # |-- new delete --| - delete_remaining_before = op.d.slice(0, change_start - op_start) - insert_remaining_before = "" - - if op_end <= change_end - # |-- existing insert --| - # |-- new delete --||.....| <- insert_remaining_after - delete_remaining_after = "" - insert_remaining_after = change.op.i.slice(op_end - change_start) - else - # |-- existing insert --||.....| <- delete_remaining_after - # |-- new delete --| - delete_remaining_after = op.d.slice(change_end - op_start) - insert_remaining_after = "" - - insert_remaining = insert_remaining_before + insert_remaining_after - if insert_remaining.length > 0 - change.op.i = insert_remaining - change.op.p = Math.min(change_start, op_start) - change.metadata.ts = metadata.ts - moved_changes.push change - else - remove_changes.push change - - # We know what we want to preserve of our delete op before (delete_remaining_before) and what we want to preserve - # afterwards (delete_remaining_before). Now we need to turn that into a modification which deletes the - # chunk in the middle not covered by these. - delete_removed_length = op.d.length - delete_remaining_before.length - delete_remaining_after.length - delete_removed_start = delete_remaining_before.length - modification = { - d: op.d.slice(delete_removed_start, delete_removed_start + delete_removed_length) - p: delete_removed_start + // We might end up modifying our delete op if it merges with existing deletes, or cancels out + // with an existing insert. Since we might do multiple modifications, we record them and do + // all the modifications after looping through the existing changes, so as not to mess up the + // offset indexes as we go. + const op_modifications = []; + for (change of Array.from(this.changes)) { + var change_start; + if (change.op.i != null) { + change_start = change.op.p; + const change_end = change_start + change.op.i.length; + if (op_end <= change_start) { + // Shift ops after us back by our length + change.op.p -= op_length; + moved_changes.push(change); + } else if (op_start >= change_end) { + // Delete is after insert, nothing to do + } else { + // When the new delete overlaps an insert, we should remove the part of the insert that + // is now deleted, and also remove the part of the new delete that overlapped. I.e. + // the two cancel out where they overlap. + var delete_remaining_after, delete_remaining_before, insert_remaining_after, insert_remaining_before; + if (op_start >= change_start) { + // |-- existing insert --| + // insert_remaining_before -> |.....||-- new delete --| + delete_remaining_before = ""; + insert_remaining_before = change.op.i.slice(0, op_start - change_start); + } else { + // delete_remaining_before -> |.....||-- existing insert --| + // |-- new delete --| + delete_remaining_before = op.d.slice(0, change_start - op_start); + insert_remaining_before = ""; } - if modification.d.length > 0 - op_modifications.push modification - else if change.op.d? - change_start = change.op.p - if op_end < change_start or (!@track_changes and op_end == change_start) - # Shift ops after us back by our length. - # If we're tracking changes, it must be strictly before, since we'll merge - # below if they are touching. Otherwise, touching is fine. - change.op.p -= op_length - moved_changes.push change - else if op_start <= change_start <= op_end - if @track_changes - # If we overlap a delete, add it in our content, and delete the existing change. - # It's easier to do it this way, rather than modifying the existing delete in case - # we overlap many deletes and we'd need to track that. We have a workaround to - # update the delete in place if possible below. - offset = change_start - op_start - op_modifications.push { i: change.op.d, p: offset } - remove_changes.push change - else - change.op.p = op_start - moved_changes.push change - # Copy rather than modify because we still need to apply it to comments + if (op_end <= change_end) { + // |-- existing insert --| + // |-- new delete --||.....| <- insert_remaining_after + delete_remaining_after = ""; + insert_remaining_after = change.op.i.slice(op_end - change_start); + } else { + // |-- existing insert --||.....| <- delete_remaining_after + // |-- new delete --| + delete_remaining_after = op.d.slice(change_end - op_start); + insert_remaining_after = ""; + } + + const insert_remaining = insert_remaining_before + insert_remaining_after; + if (insert_remaining.length > 0) { + change.op.i = insert_remaining; + change.op.p = Math.min(change_start, op_start); + change.metadata.ts = metadata.ts; + moved_changes.push(change); + } else { + remove_changes.push(change); + } + + // We know what we want to preserve of our delete op before (delete_remaining_before) and what we want to preserve + // afterwards (delete_remaining_before). Now we need to turn that into a modification which deletes the + // chunk in the middle not covered by these. + const delete_removed_length = op.d.length - delete_remaining_before.length - delete_remaining_after.length; + const delete_removed_start = delete_remaining_before.length; + const modification = { + d: op.d.slice(delete_removed_start, delete_removed_start + delete_removed_length), + p: delete_removed_start + }; + if (modification.d.length > 0) { + op_modifications.push(modification); + } + } + } else if (change.op.d != null) { + change_start = change.op.p; + if ((op_end < change_start) || (!this.track_changes && (op_end === change_start))) { + // Shift ops after us back by our length. + // If we're tracking changes, it must be strictly before, since we'll merge + // below if they are touching. Otherwise, touching is fine. + change.op.p -= op_length; + moved_changes.push(change); + } else if (op_start <= change_start && change_start <= op_end) { + if (this.track_changes) { + // If we overlap a delete, add it in our content, and delete the existing change. + // It's easier to do it this way, rather than modifying the existing delete in case + // we overlap many deletes and we'd need to track that. We have a workaround to + // update the delete in place if possible below. + const offset = change_start - op_start; + op_modifications.push({ i: change.op.d, p: offset }); + remove_changes.push(change); + } else { + change.op.p = op_start; + moved_changes.push(change); + } + } + } + } + + // Copy rather than modify because we still need to apply it to comments op = { - p: op.p - d: @_applyOpModifications(op.d, op_modifications) + p: op.p, + d: this._applyOpModifications(op.d, op_modifications) + }; + + for (change of Array.from(remove_changes)) { + // This is a bit of hack to avoid removing one delete and replacing it with another. + // If we don't do this, it causes the UI to flicker + if ((op.d.length > 0) && (change.op.d != null) && (op.p <= change.op.p && change.op.p <= op.p + op.d.length)) { + change.op.p = op.p; + change.op.d = op.d; + change.metadata = metadata; + moved_changes.push(change); + op.d = ""; // stop it being added + } else { + this._removeChange(change); + } } - for change in remove_changes - # This is a bit of hack to avoid removing one delete and replacing it with another. - # If we don't do this, it causes the UI to flicker - if op.d.length > 0 and change.op.d? and op.p <= change.op.p <= op.p + op.d.length - change.op.p = op.p - change.op.d = op.d - change.metadata = metadata - moved_changes.push change - op.d = "" # stop it being added - else - @_removeChange change - - if @track_changes and op.d.length > 0 - @_addOp op, metadata - else - # It's possible that we deleted an insert between two other inserts. I.e. - # If we delete 'user_2 insert' in: - # |-- user_1 insert --||-- user_2 insert --||-- user_1 insert --| - # it becomes: - # |-- user_1 insert --||-- user_1 insert --| - # We need to merge these together again - results = @_scanAndMergeAdjacentUpdates() - moved_changes = moved_changes.concat(results.moved_changes) - for change in results.remove_changes - @_removeChange change - moved_changes = moved_changes.filter (c) -> c != change - - for change in moved_changes - @_markAsDirty change, "change", "moved" - - _addOp: (op, metadata) -> - change = { - id: @newId() - op: @_clone(op) # Don't take a reference to the existing op since we'll modify this in place with future changes - metadata: @_clone(metadata) + if (this.track_changes && (op.d.length > 0)) { + this._addOp(op, metadata); + } else { + // It's possible that we deleted an insert between two other inserts. I.e. + // If we delete 'user_2 insert' in: + // |-- user_1 insert --||-- user_2 insert --||-- user_1 insert --| + // it becomes: + // |-- user_1 insert --||-- user_1 insert --| + // We need to merge these together again + const results = this._scanAndMergeAdjacentUpdates(); + moved_changes = moved_changes.concat(results.moved_changes); + for (change of Array.from(results.remove_changes)) { + this._removeChange(change); + moved_changes = moved_changes.filter(c => c !== change); + } } - @changes.push change - - # Keep ops in order of offset, with deletes before inserts - @changes.sort (c1, c2) -> - result = c1.op.p - c2.op.p - if result != 0 - return result - else if c1.op.i? and c2.op.d? - return 1 - else - return -1 - - @_markAsDirty(change, "change", "added") - - _removeChange: (change) -> - @changes = @changes.filter (c) -> c.id != change.id - @_markAsDirty change, "change", "removed" - _applyOpModifications: (content, op_modifications) -> - # Put in descending position order, with deleting first if at the same offset - # (Inserting first would modify the content that the delete will delete) - op_modifications.sort (a, b) -> - result = b.p - a.p - if result != 0 - return result - else if a.i? and b.d? - return 1 - else - return -1 + return (() => { + const result = []; + for (change of Array.from(moved_changes)) { + result.push(this._markAsDirty(change, "change", "moved")); + } + return result; + })(); + } - for modification in op_modifications - if modification.i? - content = content.slice(0, modification.p) + modification.i + content.slice(modification.p) - else if modification.d? - if content.slice(modification.p, modification.p + modification.d.length) != modification.d - throw new Error("deleted content does not match. content: #{JSON.stringify(content)}; modification: #{JSON.stringify(modification)}") - content = content.slice(0, modification.p) + content.slice(modification.p + modification.d.length) - return content + _addOp(op, metadata) { + const change = { + id: this.newId(), + op: this._clone(op), // Don't take a reference to the existing op since we'll modify this in place with future changes + metadata: this._clone(metadata) + }; + this.changes.push(change); + + // Keep ops in order of offset, with deletes before inserts + this.changes.sort(function(c1, c2) { + const result = c1.op.p - c2.op.p; + if (result !== 0) { + return result; + } else if ((c1.op.i != null) && (c2.op.d != null)) { + return 1; + } else { + return -1; + } + }); + + return this._markAsDirty(change, "change", "added"); + } - _scanAndMergeAdjacentUpdates: () -> - # This should only need calling when deleting an update between two - # other updates. There's no other way to get two adjacent updates from the - # same user, since they would be merged on insert. - previous_change = null - remove_changes = [] - moved_changes = [] - for change in @changes - if previous_change?.op.i? and change.op.i? - previous_change_end = previous_change.op.p + previous_change.op.i.length - previous_change_user_id = previous_change.metadata.user_id - change_start = change.op.p - change_user_id = change.metadata.user_id - if previous_change_end == change_start and previous_change_user_id == change_user_id - remove_changes.push change - previous_change.op.i += change.op.i - moved_changes.push previous_change - else if previous_change?.op.d? and change.op.d? and previous_change.op.p == change.op.p - # Merge adjacent deletes - previous_change.op.d += change.op.d - remove_changes.push change - moved_changes.push previous_change - else # Only update to the current change if we haven't removed it. - previous_change = change - return { moved_changes, remove_changes } + _removeChange(change) { + this.changes = this.changes.filter(c => c.id !== change.id); + return this._markAsDirty(change, "change", "removed"); + } + + _applyOpModifications(content, op_modifications) { + // Put in descending position order, with deleting first if at the same offset + // (Inserting first would modify the content that the delete will delete) + op_modifications.sort(function(a, b) { + const result = b.p - a.p; + if (result !== 0) { + return result; + } else if ((a.i != null) && (b.d != null)) { + return 1; + } else { + return -1; + } + }); + + for (let modification of Array.from(op_modifications)) { + if (modification.i != null) { + content = content.slice(0, modification.p) + modification.i + content.slice(modification.p); + } else if (modification.d != null) { + if (content.slice(modification.p, modification.p + modification.d.length) !== modification.d) { + throw new Error(`deleted content does not match. content: ${JSON.stringify(content)}; modification: ${JSON.stringify(modification)}`); + } + content = content.slice(0, modification.p) + content.slice(modification.p + modification.d.length); + } + } + return content; + } - resetDirtyState: () -> - @_dirtyState = { + _scanAndMergeAdjacentUpdates() { + // This should only need calling when deleting an update between two + // other updates. There's no other way to get two adjacent updates from the + // same user, since they would be merged on insert. + let previous_change = null; + const remove_changes = []; + const moved_changes = []; + for (let change of Array.from(this.changes)) { + if (((previous_change != null ? previous_change.op.i : undefined) != null) && (change.op.i != null)) { + const previous_change_end = previous_change.op.p + previous_change.op.i.length; + const previous_change_user_id = previous_change.metadata.user_id; + const change_start = change.op.p; + const change_user_id = change.metadata.user_id; + if ((previous_change_end === change_start) && (previous_change_user_id === change_user_id)) { + remove_changes.push(change); + previous_change.op.i += change.op.i; + moved_changes.push(previous_change); + } + } else if (((previous_change != null ? previous_change.op.d : undefined) != null) && (change.op.d != null) && (previous_change.op.p === change.op.p)) { + // Merge adjacent deletes + previous_change.op.d += change.op.d; + remove_changes.push(change); + moved_changes.push(previous_change); + } else { // Only update to the current change if we haven't removed it. + previous_change = change; + } + } + return { moved_changes, remove_changes }; + } + + resetDirtyState() { + return this._dirtyState = { comment: { - moved: {} - removed: {} + moved: {}, + removed: {}, added: {} - } + }, change: { - moved: {} - removed: {} + moved: {}, + removed: {}, added: {} } - } + }; + } - getDirtyState: () -> - return @_dirtyState + getDirtyState() { + return this._dirtyState; + } - _markAsDirty: (object, type, action) -> - @_dirtyState[type][action][object.id] = object + _markAsDirty(object, type, action) { + return this._dirtyState[type][action][object.id] = object; + } - _clone: (object) -> - clone = {} - (clone[k] = v for k,v of object) - return clone + _clone(object) { + const clone = {}; + for (let k in object) { const v = object[k]; clone[k] = v; } + return clone; + } + }; +}; -if define? - define [], load -else - module.exports = load() +if (typeof define !== 'undefined' && define !== null) { + define([], load); +} else { + module.exports = load(); +} diff --git a/services/document-updater/app/coffee/RateLimitManager.js b/services/document-updater/app/coffee/RateLimitManager.js index 7128b5d988..534fdade92 100644 --- a/services/document-updater/app/coffee/RateLimitManager.js +++ b/services/document-updater/app/coffee/RateLimitManager.js @@ -1,39 +1,58 @@ -Settings = require('settings-sharelatex') -logger = require('logger-sharelatex') -Metrics = require('./Metrics') +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let RateLimiter; +const Settings = require('settings-sharelatex'); +const logger = require('logger-sharelatex'); +const Metrics = require('./Metrics'); -module.exports = class RateLimiter +module.exports = (RateLimiter = class RateLimiter { - constructor: (number = 10) -> - @ActiveWorkerCount = 0 - @CurrentWorkerLimit = number - @BaseWorkerCount = number + constructor(number) { + if (number == null) { number = 10; } + this.ActiveWorkerCount = 0; + this.CurrentWorkerLimit = number; + this.BaseWorkerCount = number; + } - _adjustLimitUp: () -> - @CurrentWorkerLimit += 0.1 # allow target worker limit to increase gradually - Metrics.gauge "currentLimit", Math.ceil(@CurrentWorkerLimit) + _adjustLimitUp() { + this.CurrentWorkerLimit += 0.1; // allow target worker limit to increase gradually + return Metrics.gauge("currentLimit", Math.ceil(this.CurrentWorkerLimit)); + } - _adjustLimitDown: () -> - @CurrentWorkerLimit = Math.max @BaseWorkerCount, (@CurrentWorkerLimit * 0.9) - logger.log {currentLimit: Math.ceil(@CurrentWorkerLimit)}, "reducing rate limit" - Metrics.gauge "currentLimit", Math.ceil(@CurrentWorkerLimit) + _adjustLimitDown() { + this.CurrentWorkerLimit = Math.max(this.BaseWorkerCount, (this.CurrentWorkerLimit * 0.9)); + logger.log({currentLimit: Math.ceil(this.CurrentWorkerLimit)}, "reducing rate limit"); + return Metrics.gauge("currentLimit", Math.ceil(this.CurrentWorkerLimit)); + } - _trackAndRun: (task, callback = () ->) -> - @ActiveWorkerCount++ - Metrics.gauge "processingUpdates", @ActiveWorkerCount - task (err) => - @ActiveWorkerCount-- - Metrics.gauge "processingUpdates", @ActiveWorkerCount - callback(err) + _trackAndRun(task, callback) { + if (callback == null) { callback = function() {}; } + this.ActiveWorkerCount++; + Metrics.gauge("processingUpdates", this.ActiveWorkerCount); + return task(err => { + this.ActiveWorkerCount--; + Metrics.gauge("processingUpdates", this.ActiveWorkerCount); + return callback(err); + }); + } - run: (task, callback) -> - if @ActiveWorkerCount < @CurrentWorkerLimit - @_trackAndRun task # below the limit, just put the task in the background - callback() # return immediately - if @CurrentWorkerLimit > @BaseWorkerCount - @_adjustLimitDown() - else - logger.log {active: @ActiveWorkerCount, currentLimit: Math.ceil(@CurrentWorkerLimit)}, "hit rate limit" - @_trackAndRun task, (err) => - @_adjustLimitUp() if !err? # don't increment rate limit if there was an error - callback(err) # only return after task completes + run(task, callback) { + if (this.ActiveWorkerCount < this.CurrentWorkerLimit) { + this._trackAndRun(task); // below the limit, just put the task in the background + callback(); // return immediately + if (this.CurrentWorkerLimit > this.BaseWorkerCount) { + return this._adjustLimitDown(); + } + } else { + logger.log({active: this.ActiveWorkerCount, currentLimit: Math.ceil(this.CurrentWorkerLimit)}, "hit rate limit"); + return this._trackAndRun(task, err => { + if ((err == null)) { this._adjustLimitUp(); } // don't increment rate limit if there was an error + return callback(err); + }); // only return after task completes + } + } +}); diff --git a/services/document-updater/app/coffee/RealTimeRedisManager.js b/services/document-updater/app/coffee/RealTimeRedisManager.js index d26bf8ff8f..b3d7a65680 100644 --- a/services/document-updater/app/coffee/RealTimeRedisManager.js +++ b/services/document-updater/app/coffee/RealTimeRedisManager.js @@ -1,52 +1,73 @@ -Settings = require('settings-sharelatex') -rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater) -pubsubClient = require("redis-sharelatex").createClient(Settings.redis.pubsub) -Keys = Settings.redis.documentupdater.key_schema -logger = require('logger-sharelatex') -os = require "os" -crypto = require "crypto" -metrics = require('./Metrics') +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let RealTimeRedisManager; +const Settings = require('settings-sharelatex'); +const rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater); +const pubsubClient = require("redis-sharelatex").createClient(Settings.redis.pubsub); +const Keys = Settings.redis.documentupdater.key_schema; +const logger = require('logger-sharelatex'); +const os = require("os"); +const crypto = require("crypto"); +const metrics = require('./Metrics'); -HOST = os.hostname() -RND = crypto.randomBytes(4).toString('hex') # generate a random key for this process -COUNT = 0 +const HOST = os.hostname(); +const RND = crypto.randomBytes(4).toString('hex'); // generate a random key for this process +let COUNT = 0; -MAX_OPS_PER_ITERATION = 8 # process a limited number of ops for safety +const MAX_OPS_PER_ITERATION = 8; // process a limited number of ops for safety -module.exports = RealTimeRedisManager = - getPendingUpdatesForDoc : (doc_id, callback)-> - multi = rclient.multi() - multi.lrange Keys.pendingUpdates({doc_id}), 0, (MAX_OPS_PER_ITERATION-1) - multi.ltrim Keys.pendingUpdates({doc_id}), MAX_OPS_PER_ITERATION, -1 - multi.exec (error, replys) -> - return callback(error) if error? - jsonUpdates = replys[0] - for jsonUpdate in jsonUpdates - # record metric for each update removed from queue - metrics.summary "redis.pendingUpdates", jsonUpdate.length, {status: "pop"} - updates = [] - for jsonUpdate in jsonUpdates - try - update = JSON.parse jsonUpdate - catch e - return callback e - updates.push update - callback error, updates +module.exports = (RealTimeRedisManager = { + getPendingUpdatesForDoc(doc_id, callback){ + const multi = rclient.multi(); + multi.lrange(Keys.pendingUpdates({doc_id}), 0, (MAX_OPS_PER_ITERATION-1)); + multi.ltrim(Keys.pendingUpdates({doc_id}), MAX_OPS_PER_ITERATION, -1); + return multi.exec(function(error, replys) { + let jsonUpdate; + if (error != null) { return callback(error); } + const jsonUpdates = replys[0]; + for (jsonUpdate of Array.from(jsonUpdates)) { + // record metric for each update removed from queue + metrics.summary("redis.pendingUpdates", jsonUpdate.length, {status: "pop"}); + } + const updates = []; + for (jsonUpdate of Array.from(jsonUpdates)) { + var update; + try { + update = JSON.parse(jsonUpdate); + } catch (e) { + return callback(e); + } + updates.push(update); + } + return callback(error, updates); + }); + }, - getUpdatesLength: (doc_id, callback)-> - rclient.llen Keys.pendingUpdates({doc_id}), callback + getUpdatesLength(doc_id, callback){ + return rclient.llen(Keys.pendingUpdates({doc_id}), callback); + }, - sendData: (data) -> - # create a unique message id using a counter - message_id = "doc:#{HOST}:#{RND}-#{COUNT++}" - data?._id = message_id + sendData(data) { + // create a unique message id using a counter + const message_id = `doc:${HOST}:${RND}-${COUNT++}`; + if (data != null) { + data._id = message_id; + } - blob = JSON.stringify(data) - metrics.summary "redis.publish.applied-ops", blob.length + const blob = JSON.stringify(data); + metrics.summary("redis.publish.applied-ops", blob.length); - # publish on separate channels for individual projects and docs when - # configured (needs realtime to be configured for this too). - if Settings.publishOnIndividualChannels - pubsubClient.publish "applied-ops:#{data.doc_id}", blob - else - pubsubClient.publish "applied-ops", blob + // publish on separate channels for individual projects and docs when + // configured (needs realtime to be configured for this too). + if (Settings.publishOnIndividualChannels) { + return pubsubClient.publish(`applied-ops:${data.doc_id}`, blob); + } else { + return pubsubClient.publish("applied-ops", blob); + } + } +}); diff --git a/services/document-updater/app/coffee/RedisManager.js b/services/document-updater/app/coffee/RedisManager.js index 3eeed78ffb..f434dfc9d4 100644 --- a/services/document-updater/app/coffee/RedisManager.js +++ b/services/document-updater/app/coffee/RedisManager.js @@ -1,376 +1,484 @@ -Settings = require('settings-sharelatex') -rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater) -logger = require('logger-sharelatex') -metrics = require('./Metrics') -Errors = require "./Errors" -crypto = require "crypto" -async = require "async" -ProjectHistoryRedisManager = require "./ProjectHistoryRedisManager" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS201: Simplify complex destructure assignments + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let RedisManager; +const Settings = require('settings-sharelatex'); +const rclient = require("redis-sharelatex").createClient(Settings.redis.documentupdater); +const logger = require('logger-sharelatex'); +const metrics = require('./Metrics'); +const Errors = require("./Errors"); +const crypto = require("crypto"); +const async = require("async"); +const ProjectHistoryRedisManager = require("./ProjectHistoryRedisManager"); -# Sometimes Redis calls take an unexpectedly long time. We have to be -# quick with Redis calls because we're holding a lock that expires -# after 30 seconds. We can't let any errors in the rest of the stack -# hold us up, and need to bail out quickly if there is a problem. -MAX_REDIS_REQUEST_LENGTH = 5000 # 5 seconds +// Sometimes Redis calls take an unexpectedly long time. We have to be +// quick with Redis calls because we're holding a lock that expires +// after 30 seconds. We can't let any errors in the rest of the stack +// hold us up, and need to bail out quickly if there is a problem. +const MAX_REDIS_REQUEST_LENGTH = 5000; // 5 seconds -# Make times easy to read -minutes = 60 # seconds for Redis expire +// Make times easy to read +const minutes = 60; // seconds for Redis expire -logHashErrors = Settings.documentupdater?.logHashErrors -logHashReadErrors = logHashErrors?.read +const logHashErrors = Settings.documentupdater != null ? Settings.documentupdater.logHashErrors : undefined; +const logHashReadErrors = logHashErrors != null ? logHashErrors.read : undefined; -MEGABYTES = 1024 * 1024 -MAX_RANGES_SIZE = 3 * MEGABYTES +const MEGABYTES = 1024 * 1024; +const MAX_RANGES_SIZE = 3 * MEGABYTES; -keys = Settings.redis.documentupdater.key_schema -historyKeys = Settings.redis.history.key_schema # note: this is track changes, not project-history +const keys = Settings.redis.documentupdater.key_schema; +const historyKeys = Settings.redis.history.key_schema; // note: this is track changes, not project-history -module.exports = RedisManager = - rclient: rclient +module.exports = (RedisManager = { + rclient, - putDocInMemory : (project_id, doc_id, docLines, version, ranges, pathname, projectHistoryId, _callback)-> - timer = new metrics.Timer("redis.put-doc") - callback = (error) -> - timer.done() - _callback(error) - docLines = JSON.stringify(docLines) - if docLines.indexOf("\u0000") != -1 - error = new Error("null bytes found in doc lines") - # this check was added to catch memory corruption in JSON.stringify. - # It sometimes returned null bytes at the end of the string. - logger.error {err: error, doc_id: doc_id, docLines: docLines}, error.message - return callback(error) - docHash = RedisManager._computeHash(docLines) - # record bytes sent to redis - metrics.summary "redis.docLines", docLines.length, {status: "set"} - logger.log {project_id, doc_id, version, docHash, pathname, projectHistoryId}, "putting doc in redis" - RedisManager._serializeRanges ranges, (error, ranges) -> - if error? - logger.error {err: error, doc_id, project_id}, error.message - return callback(error) - multi = rclient.multi() - multi.set keys.docLines(doc_id:doc_id), docLines - multi.set keys.projectKey({doc_id:doc_id}), project_id - multi.set keys.docVersion(doc_id:doc_id), version - multi.set keys.docHash(doc_id:doc_id), docHash - if ranges? - multi.set keys.ranges(doc_id:doc_id), ranges - else - multi.del keys.ranges(doc_id:doc_id) - multi.set keys.pathname(doc_id:doc_id), pathname - multi.set keys.projectHistoryId(doc_id:doc_id), projectHistoryId - multi.exec (error, result) -> - return callback(error) if error? - # update docsInProject set - rclient.sadd keys.docsInProject(project_id:project_id), doc_id, callback + putDocInMemory(project_id, doc_id, docLines, version, ranges, pathname, projectHistoryId, _callback){ + const timer = new metrics.Timer("redis.put-doc"); + const callback = function(error) { + timer.done(); + return _callback(error); + }; + docLines = JSON.stringify(docLines); + if (docLines.indexOf("\u0000") !== -1) { + const error = new Error("null bytes found in doc lines"); + // this check was added to catch memory corruption in JSON.stringify. + // It sometimes returned null bytes at the end of the string. + logger.error({err: error, doc_id, docLines}, error.message); + return callback(error); + } + const docHash = RedisManager._computeHash(docLines); + // record bytes sent to redis + metrics.summary("redis.docLines", docLines.length, {status: "set"}); + logger.log({project_id, doc_id, version, docHash, pathname, projectHistoryId}, "putting doc in redis"); + return RedisManager._serializeRanges(ranges, function(error, ranges) { + if (error != null) { + logger.error({err: error, doc_id, project_id}, error.message); + return callback(error); + } + const multi = rclient.multi(); + multi.set(keys.docLines({doc_id}), docLines); + multi.set(keys.projectKey({doc_id}), project_id); + multi.set(keys.docVersion({doc_id}), version); + multi.set(keys.docHash({doc_id}), docHash); + if (ranges != null) { + multi.set(keys.ranges({doc_id}), ranges); + } else { + multi.del(keys.ranges({doc_id})); + } + multi.set(keys.pathname({doc_id}), pathname); + multi.set(keys.projectHistoryId({doc_id}), projectHistoryId); + return multi.exec(function(error, result) { + if (error != null) { return callback(error); } + // update docsInProject set + return rclient.sadd(keys.docsInProject({project_id}), doc_id, callback); + }); + }); + }, - removeDocFromMemory : (project_id, doc_id, _callback)-> - logger.log project_id:project_id, doc_id:doc_id, "removing doc from redis" - callback = (err) -> - if err? - logger.err project_id:project_id, doc_id:doc_id, err:err, "error removing doc from redis" - _callback(err) - else - logger.log project_id:project_id, doc_id:doc_id, "removed doc from redis" - _callback() + removeDocFromMemory(project_id, doc_id, _callback){ + logger.log({project_id, doc_id}, "removing doc from redis"); + const callback = function(err) { + if (err != null) { + logger.err({project_id, doc_id, err}, "error removing doc from redis"); + return _callback(err); + } else { + logger.log({project_id, doc_id}, "removed doc from redis"); + return _callback(); + } + }; - multi = rclient.multi() - multi.strlen keys.docLines(doc_id:doc_id) - multi.del keys.docLines(doc_id:doc_id) - multi.del keys.projectKey(doc_id:doc_id) - multi.del keys.docVersion(doc_id:doc_id) - multi.del keys.docHash(doc_id:doc_id) - multi.del keys.ranges(doc_id:doc_id) - multi.del keys.pathname(doc_id:doc_id) - multi.del keys.projectHistoryId(doc_id:doc_id) - multi.del keys.projectHistoryType(doc_id:doc_id) - multi.del keys.unflushedTime(doc_id:doc_id) - multi.del keys.lastUpdatedAt(doc_id: doc_id) - multi.del keys.lastUpdatedBy(doc_id: doc_id) - multi.exec (error, response) -> - return callback(error) if error? - length = response?[0] - if length > 0 - # record bytes freed in redis - metrics.summary "redis.docLines", length, {status: "del"} - multi = rclient.multi() - multi.srem keys.docsInProject(project_id:project_id), doc_id - multi.del keys.projectState(project_id:project_id) - multi.exec callback + let multi = rclient.multi(); + multi.strlen(keys.docLines({doc_id})); + multi.del(keys.docLines({doc_id})); + multi.del(keys.projectKey({doc_id})); + multi.del(keys.docVersion({doc_id})); + multi.del(keys.docHash({doc_id})); + multi.del(keys.ranges({doc_id})); + multi.del(keys.pathname({doc_id})); + multi.del(keys.projectHistoryId({doc_id})); + multi.del(keys.projectHistoryType({doc_id})); + multi.del(keys.unflushedTime({doc_id})); + multi.del(keys.lastUpdatedAt({doc_id})); + multi.del(keys.lastUpdatedBy({doc_id})); + return multi.exec(function(error, response) { + if (error != null) { return callback(error); } + const length = response != null ? response[0] : undefined; + if (length > 0) { + // record bytes freed in redis + metrics.summary("redis.docLines", length, {status: "del"}); + } + multi = rclient.multi(); + multi.srem(keys.docsInProject({project_id}), doc_id); + multi.del(keys.projectState({project_id})); + return multi.exec(callback); + }); + }, - checkOrSetProjectState: (project_id, newState, callback = (error, stateChanged) ->) -> - multi = rclient.multi() - multi.getset keys.projectState(project_id:project_id), newState - multi.expire keys.projectState(project_id:project_id), 30 * minutes - multi.exec (error, response) -> - return callback(error) if error? - logger.log project_id: project_id, newState:newState, oldState: response[0], "checking project state" - callback(null, response[0] isnt newState) + checkOrSetProjectState(project_id, newState, callback) { + if (callback == null) { callback = function(error, stateChanged) {}; } + const multi = rclient.multi(); + multi.getset(keys.projectState({project_id}), newState); + multi.expire(keys.projectState({project_id}), 30 * minutes); + return multi.exec(function(error, response) { + if (error != null) { return callback(error); } + logger.log({project_id, newState, oldState: response[0]}, "checking project state"); + return callback(null, response[0] !== newState); + }); + }, - clearProjectState: (project_id, callback = (error) ->) -> - rclient.del keys.projectState(project_id:project_id), callback + clearProjectState(project_id, callback) { + if (callback == null) { callback = function(error) {}; } + return rclient.del(keys.projectState({project_id}), callback); + }, - getDoc : (project_id, doc_id, callback = (error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) ->)-> - timer = new metrics.Timer("redis.get-doc") - multi = rclient.multi() - multi.get keys.docLines(doc_id:doc_id) - multi.get keys.docVersion(doc_id:doc_id) - multi.get keys.docHash(doc_id:doc_id) - multi.get keys.projectKey(doc_id:doc_id) - multi.get keys.ranges(doc_id:doc_id) - multi.get keys.pathname(doc_id:doc_id) - multi.get keys.projectHistoryId(doc_id:doc_id) - multi.get keys.unflushedTime(doc_id:doc_id) - multi.get keys.lastUpdatedAt(doc_id: doc_id) - multi.get keys.lastUpdatedBy(doc_id: doc_id) - multi.exec (error, [docLines, version, storedHash, doc_project_id, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy])-> - timeSpan = timer.done() - return callback(error) if error? - # check if request took too long and bail out. only do this for - # get, because it is the first call in each update, so if this - # passes we'll assume others have a reasonable chance to succeed. - if timeSpan > MAX_REDIS_REQUEST_LENGTH - error = new Error("redis getDoc exceeded timeout") - return callback(error) - # record bytes loaded from redis - if docLines? - metrics.summary "redis.docLines", docLines.length, {status: "get"} - # check sha1 hash value if present - if docLines? and storedHash? - computedHash = RedisManager._computeHash(docLines) - if logHashReadErrors and computedHash isnt storedHash - logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, computedHash: computedHash, storedHash: storedHash, docLines:docLines, "hash mismatch on retrieved document" + getDoc(project_id, doc_id, callback){ + if (callback == null) { callback = function(error, lines, version, ranges, pathname, projectHistoryId, unflushedTime) {}; } + const timer = new metrics.Timer("redis.get-doc"); + const multi = rclient.multi(); + multi.get(keys.docLines({doc_id})); + multi.get(keys.docVersion({doc_id})); + multi.get(keys.docHash({doc_id})); + multi.get(keys.projectKey({doc_id})); + multi.get(keys.ranges({doc_id})); + multi.get(keys.pathname({doc_id})); + multi.get(keys.projectHistoryId({doc_id})); + multi.get(keys.unflushedTime({doc_id})); + multi.get(keys.lastUpdatedAt({doc_id})); + multi.get(keys.lastUpdatedBy({doc_id})); + return multi.exec(function(error, ...rest){ + let [docLines, version, storedHash, doc_project_id, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy] = Array.from(rest[0]); + const timeSpan = timer.done(); + if (error != null) { return callback(error); } + // check if request took too long and bail out. only do this for + // get, because it is the first call in each update, so if this + // passes we'll assume others have a reasonable chance to succeed. + if (timeSpan > MAX_REDIS_REQUEST_LENGTH) { + error = new Error("redis getDoc exceeded timeout"); + return callback(error); + } + // record bytes loaded from redis + if (docLines != null) { + metrics.summary("redis.docLines", docLines.length, {status: "get"}); + } + // check sha1 hash value if present + if ((docLines != null) && (storedHash != null)) { + const computedHash = RedisManager._computeHash(docLines); + if (logHashReadErrors && (computedHash !== storedHash)) { + logger.error({project_id, doc_id, doc_project_id, computedHash, storedHash, docLines}, "hash mismatch on retrieved document"); + } + } - try - docLines = JSON.parse docLines - ranges = RedisManager._deserializeRanges(ranges) - catch e - return callback(e) + try { + docLines = JSON.parse(docLines); + ranges = RedisManager._deserializeRanges(ranges); + } catch (e) { + return callback(e); + } - version = parseInt(version or 0, 10) - # check doc is in requested project - if doc_project_id? and doc_project_id isnt project_id - logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc not in project" - return callback(new Errors.NotFoundError("document not found")) + version = parseInt(version || 0, 10); + // check doc is in requested project + if ((doc_project_id != null) && (doc_project_id !== project_id)) { + logger.error({project_id, doc_id, doc_project_id}, "doc not in project"); + return callback(new Errors.NotFoundError("document not found")); + } - if projectHistoryId? - projectHistoryId = parseInt(projectHistoryId) + if (projectHistoryId != null) { + projectHistoryId = parseInt(projectHistoryId); + } - # doc is not in redis, bail out - if !docLines? - return callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy + // doc is not in redis, bail out + if ((docLines == null)) { + return callback(null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy); + } - # doc should be in project set, check if missing (workaround for missing docs from putDoc) - rclient.sadd keys.docsInProject(project_id:project_id), doc_id, (error, result) -> - return callback(error) if error? - if result isnt 0 # doc should already be in set - logger.error project_id: project_id, doc_id: doc_id, doc_project_id: doc_project_id, "doc missing from docsInProject set" - callback null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy + // doc should be in project set, check if missing (workaround for missing docs from putDoc) + return rclient.sadd(keys.docsInProject({project_id}), doc_id, function(error, result) { + if (error != null) { return callback(error); } + if (result !== 0) { // doc should already be in set + logger.error({project_id, doc_id, doc_project_id}, "doc missing from docsInProject set"); + } + return callback(null, docLines, version, ranges, pathname, projectHistoryId, unflushedTime, lastUpdatedAt, lastUpdatedBy); + }); + }); + }, - getDocVersion: (doc_id, callback = (error, version, projectHistoryType) ->) -> - rclient.mget keys.docVersion(doc_id: doc_id), keys.projectHistoryType(doc_id:doc_id), (error, result) -> - return callback(error) if error? - [version, projectHistoryType] = result || [] - version = parseInt(version, 10) - callback null, version, projectHistoryType + getDocVersion(doc_id, callback) { + if (callback == null) { callback = function(error, version, projectHistoryType) {}; } + return rclient.mget(keys.docVersion({doc_id}), keys.projectHistoryType({doc_id}), function(error, result) { + if (error != null) { return callback(error); } + let [version, projectHistoryType] = Array.from(result || []); + version = parseInt(version, 10); + return callback(null, version, projectHistoryType); + }); + }, - getDocLines: (doc_id, callback = (error, version) ->) -> - rclient.get keys.docLines(doc_id: doc_id), (error, docLines) -> - return callback(error) if error? - callback null, docLines + getDocLines(doc_id, callback) { + if (callback == null) { callback = function(error, version) {}; } + return rclient.get(keys.docLines({doc_id}), function(error, docLines) { + if (error != null) { return callback(error); } + return callback(null, docLines); + }); + }, - getPreviousDocOps: (doc_id, start, end, callback = (error, jsonOps) ->) -> - timer = new metrics.Timer("redis.get-prev-docops") - rclient.llen keys.docOps(doc_id: doc_id), (error, length) -> - return callback(error) if error? - rclient.get keys.docVersion(doc_id: doc_id), (error, version) -> - return callback(error) if error? - version = parseInt(version, 10) - first_version_in_redis = version - length + getPreviousDocOps(doc_id, start, end, callback) { + if (callback == null) { callback = function(error, jsonOps) {}; } + const timer = new metrics.Timer("redis.get-prev-docops"); + return rclient.llen(keys.docOps({doc_id}), function(error, length) { + if (error != null) { return callback(error); } + return rclient.get(keys.docVersion({doc_id}), function(error, version) { + if (error != null) { return callback(error); } + version = parseInt(version, 10); + const first_version_in_redis = version - length; - if start < first_version_in_redis or end > version - error = new Errors.OpRangeNotAvailableError("doc ops range is not loaded in redis") - logger.warn {err: error, doc_id, length, version, start, end}, "doc ops range is not loaded in redis" - return callback(error) + if ((start < first_version_in_redis) || (end > version)) { + error = new Errors.OpRangeNotAvailableError("doc ops range is not loaded in redis"); + logger.warn({err: error, doc_id, length, version, start, end}, "doc ops range is not loaded in redis"); + return callback(error); + } - start = start - first_version_in_redis - if end > -1 - end = end - first_version_in_redis + start = start - first_version_in_redis; + if (end > -1) { + end = end - first_version_in_redis; + } - if isNaN(start) or isNaN(end) - error = new Error("inconsistent version or lengths") - logger.error {err: error, doc_id, length, version, start, end}, "inconsistent version or length" - return callback(error) + if (isNaN(start) || isNaN(end)) { + error = new Error("inconsistent version or lengths"); + logger.error({err: error, doc_id, length, version, start, end}, "inconsistent version or length"); + return callback(error); + } - rclient.lrange keys.docOps(doc_id: doc_id), start, end, (error, jsonOps) -> - return callback(error) if error? - try - ops = jsonOps.map (jsonOp) -> JSON.parse jsonOp - catch e - return callback(e) - timeSpan = timer.done() - if timeSpan > MAX_REDIS_REQUEST_LENGTH - error = new Error("redis getPreviousDocOps exceeded timeout") - return callback(error) - callback null, ops + return rclient.lrange(keys.docOps({doc_id}), start, end, function(error, jsonOps) { + let ops; + if (error != null) { return callback(error); } + try { + ops = jsonOps.map(jsonOp => JSON.parse(jsonOp)); + } catch (e) { + return callback(e); + } + const timeSpan = timer.done(); + if (timeSpan > MAX_REDIS_REQUEST_LENGTH) { + error = new Error("redis getPreviousDocOps exceeded timeout"); + return callback(error); + } + return callback(null, ops); + }); + }); + }); + }, - getHistoryType: (doc_id, callback = (error, projectHistoryType) ->) -> - rclient.get keys.projectHistoryType(doc_id:doc_id), (error, projectHistoryType) -> - return callback(error) if error? - callback null, projectHistoryType + getHistoryType(doc_id, callback) { + if (callback == null) { callback = function(error, projectHistoryType) {}; } + return rclient.get(keys.projectHistoryType({doc_id}), function(error, projectHistoryType) { + if (error != null) { return callback(error); } + return callback(null, projectHistoryType); + }); + }, - setHistoryType: (doc_id, projectHistoryType, callback = (error) ->) -> - rclient.set keys.projectHistoryType(doc_id:doc_id), projectHistoryType, callback + setHistoryType(doc_id, projectHistoryType, callback) { + if (callback == null) { callback = function(error) {}; } + return rclient.set(keys.projectHistoryType({doc_id}), projectHistoryType, callback); + }, - DOC_OPS_TTL: 60 * minutes - DOC_OPS_MAX_LENGTH: 100 - updateDocument : (project_id, doc_id, docLines, newVersion, appliedOps = [], ranges, updateMeta, callback = (error) ->)-> - RedisManager.getDocVersion doc_id, (error, currentVersion, projectHistoryType) -> - return callback(error) if error? - if currentVersion + appliedOps.length != newVersion - error = new Error("Version mismatch. '#{doc_id}' is corrupted.") - logger.error {err: error, doc_id, currentVersion, newVersion, opsLength: appliedOps.length}, "version mismatch" - return callback(error) + DOC_OPS_TTL: 60 * minutes, + DOC_OPS_MAX_LENGTH: 100, + updateDocument(project_id, doc_id, docLines, newVersion, appliedOps, ranges, updateMeta, callback){ + if (appliedOps == null) { appliedOps = []; } + if (callback == null) { callback = function(error) {}; } + return RedisManager.getDocVersion(doc_id, function(error, currentVersion, projectHistoryType) { + if (error != null) { return callback(error); } + if ((currentVersion + appliedOps.length) !== newVersion) { + error = new Error(`Version mismatch. '${doc_id}' is corrupted.`); + logger.error({err: error, doc_id, currentVersion, newVersion, opsLength: appliedOps.length}, "version mismatch"); + return callback(error); + } - jsonOps = appliedOps.map (op) -> JSON.stringify op - for op in jsonOps - if op.indexOf("\u0000") != -1 - error = new Error("null bytes found in jsonOps") - # this check was added to catch memory corruption in JSON.stringify - logger.error {err: error, doc_id: doc_id, jsonOps: jsonOps}, error.message - return callback(error) + const jsonOps = appliedOps.map(op => JSON.stringify(op)); + for (let op of Array.from(jsonOps)) { + if (op.indexOf("\u0000") !== -1) { + error = new Error("null bytes found in jsonOps"); + // this check was added to catch memory corruption in JSON.stringify + logger.error({err: error, doc_id, jsonOps}, error.message); + return callback(error); + } + } - newDocLines = JSON.stringify(docLines) - if newDocLines.indexOf("\u0000") != -1 - error = new Error("null bytes found in doc lines") - # this check was added to catch memory corruption in JSON.stringify - logger.error {err: error, doc_id: doc_id, newDocLines: newDocLines}, error.message - return callback(error) - newHash = RedisManager._computeHash(newDocLines) + const newDocLines = JSON.stringify(docLines); + if (newDocLines.indexOf("\u0000") !== -1) { + error = new Error("null bytes found in doc lines"); + // this check was added to catch memory corruption in JSON.stringify + logger.error({err: error, doc_id, newDocLines}, error.message); + return callback(error); + } + const newHash = RedisManager._computeHash(newDocLines); - opVersions = appliedOps.map (op) -> op?.v - logger.log doc_id: doc_id, version: newVersion, hash: newHash, op_versions: opVersions, "updating doc in redis" - # record bytes sent to redis in update - metrics.summary "redis.docLines", newDocLines.length, {status: "update"} - RedisManager._serializeRanges ranges, (error, ranges) -> - if error? - logger.error {err: error, doc_id}, error.message - return callback(error) - if ranges? and ranges.indexOf("\u0000") != -1 - error = new Error("null bytes found in ranges") - # this check was added to catch memory corruption in JSON.stringify - logger.error err: error, doc_id: doc_id, ranges: ranges, error.message - return callback(error) - multi = rclient.multi() - multi.set keys.docLines(doc_id:doc_id), newDocLines # index 0 - multi.set keys.docVersion(doc_id:doc_id), newVersion # index 1 - multi.set keys.docHash(doc_id:doc_id), newHash # index 2 - multi.ltrim keys.docOps(doc_id: doc_id), -RedisManager.DOC_OPS_MAX_LENGTH, -1 # index 3 - if ranges? - multi.set keys.ranges(doc_id:doc_id), ranges # index 4 - else - multi.del keys.ranges(doc_id:doc_id) # also index 4 - # push the ops last so we can get the lengths at fixed index position 7 - if jsonOps.length > 0 - multi.rpush keys.docOps(doc_id: doc_id), jsonOps... # index 5 - # expire must come after rpush since before it will be a no-op if the list is empty - multi.expire keys.docOps(doc_id: doc_id), RedisManager.DOC_OPS_TTL # index 6 - if projectHistoryType is "project-history" - metrics.inc 'history-queue', 1, {status: 'skip-track-changes'} - logger.log {doc_id}, "skipping push of uncompressed ops for project using project-history" - else - # project is using old track-changes history service - metrics.inc 'history-queue', 1, {status: 'track-changes'} - multi.rpush historyKeys.uncompressedHistoryOps(doc_id: doc_id), jsonOps... # index 7 - # Set the unflushed timestamp to the current time if the doc - # hasn't been modified before (the content in mongo has been - # valid up to this point). Otherwise leave it alone ("NX" flag). - multi.set keys.unflushedTime(doc_id: doc_id), Date.now(), "NX" - multi.set keys.lastUpdatedAt(doc_id: doc_id), Date.now() # index 8 - if updateMeta?.user_id - multi.set keys.lastUpdatedBy(doc_id: doc_id), updateMeta.user_id # index 9 - else - multi.del keys.lastUpdatedBy(doc_id: doc_id) # index 9 - multi.exec (error, result) -> - return callback(error) if error? + const opVersions = appliedOps.map(op => op != null ? op.v : undefined); + logger.log({doc_id, version: newVersion, hash: newHash, op_versions: opVersions}, "updating doc in redis"); + // record bytes sent to redis in update + metrics.summary("redis.docLines", newDocLines.length, {status: "update"}); + return RedisManager._serializeRanges(ranges, function(error, ranges) { + if (error != null) { + logger.error({err: error, doc_id}, error.message); + return callback(error); + } + if ((ranges != null) && (ranges.indexOf("\u0000") !== -1)) { + error = new Error("null bytes found in ranges"); + // this check was added to catch memory corruption in JSON.stringify + logger.error({err: error, doc_id, ranges}, error.message); + return callback(error); + } + const multi = rclient.multi(); + multi.set(keys.docLines({doc_id}), newDocLines); // index 0 + multi.set(keys.docVersion({doc_id}), newVersion); // index 1 + multi.set(keys.docHash({doc_id}), newHash); // index 2 + multi.ltrim(keys.docOps({doc_id}), -RedisManager.DOC_OPS_MAX_LENGTH, -1); // index 3 + if (ranges != null) { + multi.set(keys.ranges({doc_id}), ranges); // index 4 + } else { + multi.del(keys.ranges({doc_id})); // also index 4 + } + // push the ops last so we can get the lengths at fixed index position 7 + if (jsonOps.length > 0) { + multi.rpush(keys.docOps({doc_id}), ...Array.from(jsonOps)); // index 5 + // expire must come after rpush since before it will be a no-op if the list is empty + multi.expire(keys.docOps({doc_id}), RedisManager.DOC_OPS_TTL); // index 6 + if (projectHistoryType === "project-history") { + metrics.inc('history-queue', 1, {status: 'skip-track-changes'}); + logger.log({doc_id}, "skipping push of uncompressed ops for project using project-history"); + } else { + // project is using old track-changes history service + metrics.inc('history-queue', 1, {status: 'track-changes'}); + multi.rpush(historyKeys.uncompressedHistoryOps({doc_id}), ...Array.from(jsonOps)); // index 7 + } + // Set the unflushed timestamp to the current time if the doc + // hasn't been modified before (the content in mongo has been + // valid up to this point). Otherwise leave it alone ("NX" flag). + multi.set(keys.unflushedTime({doc_id}), Date.now(), "NX"); + multi.set(keys.lastUpdatedAt({doc_id}), Date.now()); // index 8 + if ((updateMeta != null ? updateMeta.user_id : undefined)) { + multi.set(keys.lastUpdatedBy({doc_id}), updateMeta.user_id); // index 9 + } else { + multi.del(keys.lastUpdatedBy({doc_id})); // index 9 + } + } + return multi.exec(function(error, result) { + let docUpdateCount; + if (error != null) { return callback(error); } - if projectHistoryType is 'project-history' - docUpdateCount = undefined # only using project history, don't bother with track-changes - else - # project is using old track-changes history service - docUpdateCount = result[7] # length of uncompressedHistoryOps queue (index 7) + if (projectHistoryType === 'project-history') { + docUpdateCount = undefined; // only using project history, don't bother with track-changes + } else { + // project is using old track-changes history service + docUpdateCount = result[7]; // length of uncompressedHistoryOps queue (index 7) + } - if jsonOps.length > 0 && Settings.apis?.project_history?.enabled - metrics.inc 'history-queue', 1, {status: 'project-history'} - ProjectHistoryRedisManager.queueOps project_id, jsonOps..., (error, projectUpdateCount) -> - callback null, docUpdateCount, projectUpdateCount - else - callback null, docUpdateCount + if ((jsonOps.length > 0) && __guard__(Settings.apis != null ? Settings.apis.project_history : undefined, x => x.enabled)) { + metrics.inc('history-queue', 1, {status: 'project-history'}); + return ProjectHistoryRedisManager.queueOps(project_id, ...Array.from(jsonOps), (error, projectUpdateCount) => callback(null, docUpdateCount, projectUpdateCount)); + } else { + return callback(null, docUpdateCount); + } + }); + }); + }); + }, - renameDoc: (project_id, doc_id, user_id, update, projectHistoryId, callback = (error) ->) -> - RedisManager.getDoc project_id, doc_id, (error, lines, version) -> - return callback(error) if error? + renameDoc(project_id, doc_id, user_id, update, projectHistoryId, callback) { + if (callback == null) { callback = function(error) {}; } + return RedisManager.getDoc(project_id, doc_id, function(error, lines, version) { + if (error != null) { return callback(error); } - if lines? and version? - rclient.set keys.pathname(doc_id:doc_id), update.newPathname, (error) -> - return callback(error) if error? - ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback - else - ProjectHistoryRedisManager.queueRenameEntity project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback + if ((lines != null) && (version != null)) { + return rclient.set(keys.pathname({doc_id}), update.newPathname, function(error) { + if (error != null) { return callback(error); } + return ProjectHistoryRedisManager.queueRenameEntity(project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback); + }); + } else { + return ProjectHistoryRedisManager.queueRenameEntity(project_id, projectHistoryId, 'doc', doc_id, user_id, update, callback); + } + }); + }, - clearUnflushedTime: (doc_id, callback = (error) ->) -> - rclient.del keys.unflushedTime(doc_id:doc_id), callback + clearUnflushedTime(doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + return rclient.del(keys.unflushedTime({doc_id}), callback); + }, - getDocIdsInProject: (project_id, callback = (error, doc_ids) ->) -> - rclient.smembers keys.docsInProject(project_id: project_id), callback + getDocIdsInProject(project_id, callback) { + if (callback == null) { callback = function(error, doc_ids) {}; } + return rclient.smembers(keys.docsInProject({project_id}), callback); + }, - getDocTimestamps: (doc_ids, callback = (error, result) ->) -> - # get lastupdatedat timestamps for an array of doc_ids - async.mapSeries doc_ids, (doc_id, cb) -> - rclient.get keys.lastUpdatedAt(doc_id: doc_id), cb - , callback + getDocTimestamps(doc_ids, callback) { + // get lastupdatedat timestamps for an array of doc_ids + if (callback == null) { callback = function(error, result) {}; } + return async.mapSeries(doc_ids, (doc_id, cb) => rclient.get(keys.lastUpdatedAt({doc_id}), cb) + , callback); + }, - queueFlushAndDeleteProject: (project_id, callback) -> - # store the project id in a sorted set ordered by time with a random offset to smooth out spikes - SMOOTHING_OFFSET = if Settings.smoothingOffset > 0 then Math.round(Settings.smoothingOffset * Math.random()) else 0 - rclient.zadd keys.flushAndDeleteQueue(), Date.now() + SMOOTHING_OFFSET, project_id, callback + queueFlushAndDeleteProject(project_id, callback) { + // store the project id in a sorted set ordered by time with a random offset to smooth out spikes + const SMOOTHING_OFFSET = Settings.smoothingOffset > 0 ? Math.round(Settings.smoothingOffset * Math.random()) : 0; + return rclient.zadd(keys.flushAndDeleteQueue(), Date.now() + SMOOTHING_OFFSET, project_id, callback); + }, - getNextProjectToFlushAndDelete: (cutoffTime, callback = (error, key, timestamp)->) -> - # find the oldest queued flush that is before the cutoff time - rclient.zrangebyscore keys.flushAndDeleteQueue(), 0, cutoffTime, "WITHSCORES", "LIMIT", 0, 1, (err, reply) -> - return callback(err) if err? - return callback() if !reply?.length # return if no projects ready to be processed - # pop the oldest entry (get and remove in a multi) - multi = rclient.multi() - # Poor man's version of ZPOPMIN, which is only available in Redis 5. - multi.zrange keys.flushAndDeleteQueue(), 0, 0, "WITHSCORES" - multi.zremrangebyrank keys.flushAndDeleteQueue(), 0, 0 - multi.zcard keys.flushAndDeleteQueue() # the total length of the queue (for metrics) - multi.exec (err, reply) -> - return callback(err) if err? - return callback() if !reply?.length - [key, timestamp] = reply[0] - queueLength = reply[2] - callback(null, key, timestamp, queueLength) + getNextProjectToFlushAndDelete(cutoffTime, callback) { + // find the oldest queued flush that is before the cutoff time + if (callback == null) { callback = function(error, key, timestamp){}; } + return rclient.zrangebyscore(keys.flushAndDeleteQueue(), 0, cutoffTime, "WITHSCORES", "LIMIT", 0, 1, function(err, reply) { + if (err != null) { return callback(err); } + if (!(reply != null ? reply.length : undefined)) { return callback(); } // return if no projects ready to be processed + // pop the oldest entry (get and remove in a multi) + const multi = rclient.multi(); + // Poor man's version of ZPOPMIN, which is only available in Redis 5. + multi.zrange(keys.flushAndDeleteQueue(), 0, 0, "WITHSCORES"); + multi.zremrangebyrank(keys.flushAndDeleteQueue(), 0, 0); + multi.zcard(keys.flushAndDeleteQueue()); // the total length of the queue (for metrics) + return multi.exec(function(err, reply) { + if (err != null) { return callback(err); } + if (!(reply != null ? reply.length : undefined)) { return callback(); } + const [key, timestamp] = Array.from(reply[0]); + const queueLength = reply[2]; + return callback(null, key, timestamp, queueLength); + }); + }); + }, - _serializeRanges: (ranges, callback = (error, serializedRanges) ->) -> - jsonRanges = JSON.stringify(ranges) - if jsonRanges? and jsonRanges.length > MAX_RANGES_SIZE - return callback new Error("ranges are too large") - if jsonRanges == '{}' - # Most doc will have empty ranges so don't fill redis with lots of '{}' keys - jsonRanges = null - return callback null, jsonRanges + _serializeRanges(ranges, callback) { + if (callback == null) { callback = function(error, serializedRanges) {}; } + let jsonRanges = JSON.stringify(ranges); + if ((jsonRanges != null) && (jsonRanges.length > MAX_RANGES_SIZE)) { + return callback(new Error("ranges are too large")); + } + if (jsonRanges === '{}') { + // Most doc will have empty ranges so don't fill redis with lots of '{}' keys + jsonRanges = null; + } + return callback(null, jsonRanges); + }, - _deserializeRanges: (ranges) -> - if !ranges? or ranges == "" - return {} - else - return JSON.parse(ranges) + _deserializeRanges(ranges) { + if ((ranges == null) || (ranges === "")) { + return {}; + } else { + return JSON.parse(ranges); + } + }, - _computeHash: (docLines) -> - # use sha1 checksum of doclines to detect data corruption. - # - # note: must specify 'utf8' encoding explicitly, as the default is - # binary in node < v5 - return crypto.createHash('sha1').update(docLines, 'utf8').digest('hex') + _computeHash(docLines) { + // use sha1 checksum of doclines to detect data corruption. + // + // note: must specify 'utf8' encoding explicitly, as the default is + // binary in node < v5 + return crypto.createHash('sha1').update(docLines, 'utf8').digest('hex'); + } +}); + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/ShareJsDB.js b/services/document-updater/app/coffee/ShareJsDB.js index 3e5dfe303f..5b313cee96 100644 --- a/services/document-updater/app/coffee/ShareJsDB.js +++ b/services/document-updater/app/coffee/ShareJsDB.js @@ -1,44 +1,64 @@ -Keys = require('./UpdateKeys') -RedisManager = require "./RedisManager" -Errors = require "./Errors" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let ShareJsDB; +const Keys = require('./UpdateKeys'); +const RedisManager = require("./RedisManager"); +const Errors = require("./Errors"); -module.exports = class ShareJsDB - constructor: (@project_id, @doc_id, @lines, @version) -> - @appliedOps = {} - # ShareJS calls this detacted from the instance, so we need - # bind it to keep our context that can access @appliedOps - @writeOp = @_writeOp.bind(@) +module.exports = (ShareJsDB = class ShareJsDB { + constructor(project_id, doc_id, lines, version) { + this.project_id = project_id; + this.doc_id = doc_id; + this.lines = lines; + this.version = version; + this.appliedOps = {}; + // ShareJS calls this detacted from the instance, so we need + // bind it to keep our context that can access @appliedOps + this.writeOp = this._writeOp.bind(this); + } - getOps: (doc_key, start, end, callback) -> - if start == end - return callback null, [] + getOps(doc_key, start, end, callback) { + if (start === end) { + return callback(null, []); + } - # In redis, lrange values are inclusive. - if end? - end-- - else - end = -1 + // In redis, lrange values are inclusive. + if (end != null) { + end--; + } else { + end = -1; + } - [project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key) - RedisManager.getPreviousDocOps doc_id, start, end, callback + const [project_id, doc_id] = Array.from(Keys.splitProjectIdAndDocId(doc_key)); + return RedisManager.getPreviousDocOps(doc_id, start, end, callback); + } - _writeOp: (doc_key, opData, callback) -> - @appliedOps[doc_key] ?= [] - @appliedOps[doc_key].push opData - callback() + _writeOp(doc_key, opData, callback) { + if (this.appliedOps[doc_key] == null) { this.appliedOps[doc_key] = []; } + this.appliedOps[doc_key].push(opData); + return callback(); + } - getSnapshot: (doc_key, callback) -> - if doc_key != Keys.combineProjectIdAndDocId(@project_id, @doc_id) - return callback(new Errors.NotFoundError("unexpected doc_key #{doc_key}, expected #{Keys.combineProjectIdAndDocId(@project_id, @doc_id)}")) - else - return callback null, { - snapshot: @lines.join("\n") - v: parseInt(@version, 10) + getSnapshot(doc_key, callback) { + if (doc_key !== Keys.combineProjectIdAndDocId(this.project_id, this.doc_id)) { + return callback(new Errors.NotFoundError(`unexpected doc_key ${doc_key}, expected ${Keys.combineProjectIdAndDocId(this.project_id, this.doc_id)}`)); + } else { + return callback(null, { + snapshot: this.lines.join("\n"), + v: parseInt(this.version, 10), type: "text" - } + }); + } + } - # To be able to remove a doc from the ShareJS memory - # we need to called Model::delete, which calls this - # method on the database. However, we will handle removing - # it from Redis ourselves - delete: (docName, dbMeta, callback) -> callback() + // To be able to remove a doc from the ShareJS memory + // we need to called Model::delete, which calls this + // method on the database. However, we will handle removing + // it from Redis ourselves + delete(docName, dbMeta, callback) { return callback(); } +}); diff --git a/services/document-updater/app/coffee/ShareJsUpdateManager.js b/services/document-updater/app/coffee/ShareJsUpdateManager.js index 856a4d4a37..82eb6923b0 100644 --- a/services/document-updater/app/coffee/ShareJsUpdateManager.js +++ b/services/document-updater/app/coffee/ShareJsUpdateManager.js @@ -1,80 +1,102 @@ -ShareJsModel = require "./sharejs/server/model" -ShareJsDB = require "./ShareJsDB" -logger = require "logger-sharelatex" -Settings = require('settings-sharelatex') -Keys = require "./UpdateKeys" -{EventEmitter} = require "events" -util = require "util" -RealTimeRedisManager = require "./RealTimeRedisManager" -crypto = require "crypto" -metrics = require('./Metrics') -Errors = require("./Errors") +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let ShareJsUpdateManager; +const ShareJsModel = require("./sharejs/server/model"); +const ShareJsDB = require("./ShareJsDB"); +const logger = require("logger-sharelatex"); +const Settings = require('settings-sharelatex'); +const Keys = require("./UpdateKeys"); +const {EventEmitter} = require("events"); +const util = require("util"); +const RealTimeRedisManager = require("./RealTimeRedisManager"); +const crypto = require("crypto"); +const metrics = require('./Metrics'); +const Errors = require("./Errors"); -ShareJsModel:: = {} -util.inherits ShareJsModel, EventEmitter +ShareJsModel.prototype = {}; +util.inherits(ShareJsModel, EventEmitter); -MAX_AGE_OF_OP = 80 +const MAX_AGE_OF_OP = 80; -module.exports = ShareJsUpdateManager = - getNewShareJsModel: (project_id, doc_id, lines, version) -> - db = new ShareJsDB(project_id, doc_id, lines, version) - model = new ShareJsModel(db, maxDocLength: Settings.max_doc_length, maximumAge: MAX_AGE_OF_OP) - model.db = db - return model +module.exports = (ShareJsUpdateManager = { + getNewShareJsModel(project_id, doc_id, lines, version) { + const db = new ShareJsDB(project_id, doc_id, lines, version); + const model = new ShareJsModel(db, {maxDocLength: Settings.max_doc_length, maximumAge: MAX_AGE_OF_OP}); + model.db = db; + return model; + }, - applyUpdate: (project_id, doc_id, update, lines, version, callback = (error, updatedDocLines) ->) -> - logger.log project_id: project_id, doc_id: doc_id, update: update, "applying sharejs updates" - jobs = [] - # record the update version before it is modified - incomingUpdateVersion = update.v - # We could use a global model for all docs, but we're hitting issues with the - # internal state of ShareJS not being accessible for clearing caches, and - # getting stuck due to queued callbacks (line 260 of sharejs/server/model.coffee) - # This adds a small but hopefully acceptable overhead (~12ms per 1000 updates on - # my 2009 MBP). - model = @getNewShareJsModel(project_id, doc_id, lines, version) - @_listenForOps(model) - doc_key = Keys.combineProjectIdAndDocId(project_id, doc_id) - model.applyOp doc_key, update, (error) -> - if error? - if error == "Op already submitted" - metrics.inc "sharejs.already-submitted" - logger.warn {project_id, doc_id, update}, "op has already been submitted" - update.dup = true - ShareJsUpdateManager._sendOp(project_id, doc_id, update) - else if /^Delete component/.test(error) - metrics.inc "sharejs.delete-mismatch" - logger.warn {project_id, doc_id, update, shareJsErr: error}, "sharejs delete does not match" - error = new Errors.DeleteMismatchError("Delete component does not match") - return callback(error) - else - metrics.inc "sharejs.other-error" - return callback(error) - logger.log project_id: project_id, doc_id: doc_id, error: error, "applied update" - model.getSnapshot doc_key, (error, data) => - return callback(error) if error? - # only check hash when present and no other updates have been applied - if update.hash? and incomingUpdateVersion == version - ourHash = ShareJsUpdateManager._computeHash(data.snapshot) - if ourHash != update.hash - metrics.inc "sharejs.hash-fail" - return callback(new Error("Invalid hash")) - else - metrics.inc "sharejs.hash-pass", 0.001 - docLines = data.snapshot.split(/\r\n|\n|\r/) - callback(null, docLines, data.v, model.db.appliedOps[doc_key] or []) + applyUpdate(project_id, doc_id, update, lines, version, callback) { + if (callback == null) { callback = function(error, updatedDocLines) {}; } + logger.log({project_id, doc_id, update}, "applying sharejs updates"); + const jobs = []; + // record the update version before it is modified + const incomingUpdateVersion = update.v; + // We could use a global model for all docs, but we're hitting issues with the + // internal state of ShareJS not being accessible for clearing caches, and + // getting stuck due to queued callbacks (line 260 of sharejs/server/model.coffee) + // This adds a small but hopefully acceptable overhead (~12ms per 1000 updates on + // my 2009 MBP). + const model = this.getNewShareJsModel(project_id, doc_id, lines, version); + this._listenForOps(model); + const doc_key = Keys.combineProjectIdAndDocId(project_id, doc_id); + return model.applyOp(doc_key, update, function(error) { + if (error != null) { + if (error === "Op already submitted") { + metrics.inc("sharejs.already-submitted"); + logger.warn({project_id, doc_id, update}, "op has already been submitted"); + update.dup = true; + ShareJsUpdateManager._sendOp(project_id, doc_id, update); + } else if (/^Delete component/.test(error)) { + metrics.inc("sharejs.delete-mismatch"); + logger.warn({project_id, doc_id, update, shareJsErr: error}, "sharejs delete does not match"); + error = new Errors.DeleteMismatchError("Delete component does not match"); + return callback(error); + } else { + metrics.inc("sharejs.other-error"); + return callback(error); + } + } + logger.log({project_id, doc_id, error}, "applied update"); + return model.getSnapshot(doc_key, (error, data) => { + if (error != null) { return callback(error); } + // only check hash when present and no other updates have been applied + if ((update.hash != null) && (incomingUpdateVersion === version)) { + const ourHash = ShareJsUpdateManager._computeHash(data.snapshot); + if (ourHash !== update.hash) { + metrics.inc("sharejs.hash-fail"); + return callback(new Error("Invalid hash")); + } else { + metrics.inc("sharejs.hash-pass", 0.001); + } + } + const docLines = data.snapshot.split(/\r\n|\n|\r/); + return callback(null, docLines, data.v, model.db.appliedOps[doc_key] || []); + }); + }); + }, - _listenForOps: (model) -> - model.on "applyOp", (doc_key, opData) -> - [project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key) - ShareJsUpdateManager._sendOp(project_id, doc_id, opData) + _listenForOps(model) { + return model.on("applyOp", function(doc_key, opData) { + const [project_id, doc_id] = Array.from(Keys.splitProjectIdAndDocId(doc_key)); + return ShareJsUpdateManager._sendOp(project_id, doc_id, opData); + }); + }, - _sendOp: (project_id, doc_id, op) -> - RealTimeRedisManager.sendData {project_id, doc_id, op} + _sendOp(project_id, doc_id, op) { + return RealTimeRedisManager.sendData({project_id, doc_id, op}); + }, - _computeHash: (content) -> + _computeHash(content) { return crypto.createHash('sha1') .update("blob " + content.length + "\x00") .update(content, 'utf8') - .digest('hex') + .digest('hex'); + } +}); diff --git a/services/document-updater/app/coffee/SnapshotManager.js b/services/document-updater/app/coffee/SnapshotManager.js index 86670b648d..5f998096af 100644 --- a/services/document-updater/app/coffee/SnapshotManager.js +++ b/services/document-updater/app/coffee/SnapshotManager.js @@ -1,42 +1,62 @@ -{db, ObjectId} = require "./mongojs" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let SnapshotManager; +const {db, ObjectId} = require("./mongojs"); -module.exports = SnapshotManager = - recordSnapshot: (project_id, doc_id, version, pathname, lines, ranges, callback) -> - try - project_id = ObjectId(project_id) - doc_id = ObjectId(doc_id) - catch error - return callback(error) - db.docSnapshots.insert { +module.exports = (SnapshotManager = { + recordSnapshot(project_id, doc_id, version, pathname, lines, ranges, callback) { + try { + project_id = ObjectId(project_id); + doc_id = ObjectId(doc_id); + } catch (error) { + return callback(error); + } + return db.docSnapshots.insert({ project_id, doc_id, version, lines, pathname, ranges: SnapshotManager.jsonRangesToMongo(ranges), ts: new Date() - }, callback - # Suggested indexes: - # db.docSnapshots.createIndex({project_id:1, doc_id:1}) - # db.docSnapshots.createIndex({ts:1},{expiresAfterSeconds: 30*24*3600)) # expires after 30 days + }, callback); + }, + // Suggested indexes: + // db.docSnapshots.createIndex({project_id:1, doc_id:1}) + // db.docSnapshots.createIndex({ts:1},{expiresAfterSeconds: 30*24*3600)) # expires after 30 days - jsonRangesToMongo: (ranges) -> - return null if !ranges? + jsonRangesToMongo(ranges) { + if ((ranges == null)) { return null; } - updateMetadata = (metadata) -> - if metadata?.ts? - metadata.ts = new Date(metadata.ts) - if metadata?.user_id? - metadata.user_id = SnapshotManager._safeObjectId(metadata.user_id) + const updateMetadata = function(metadata) { + if ((metadata != null ? metadata.ts : undefined) != null) { + metadata.ts = new Date(metadata.ts); + } + if ((metadata != null ? metadata.user_id : undefined) != null) { + return metadata.user_id = SnapshotManager._safeObjectId(metadata.user_id); + } + }; - for change in ranges.changes or [] - change.id = SnapshotManager._safeObjectId(change.id) - updateMetadata(change.metadata) - for comment in ranges.comments or [] - comment.id = SnapshotManager._safeObjectId(comment.id) - if comment.op?.t? - comment.op.t = SnapshotManager._safeObjectId(comment.op.t) - updateMetadata(comment.metadata) - return ranges + for (let change of Array.from(ranges.changes || [])) { + change.id = SnapshotManager._safeObjectId(change.id); + updateMetadata(change.metadata); + } + for (let comment of Array.from(ranges.comments || [])) { + comment.id = SnapshotManager._safeObjectId(comment.id); + if ((comment.op != null ? comment.op.t : undefined) != null) { + comment.op.t = SnapshotManager._safeObjectId(comment.op.t); + } + updateMetadata(comment.metadata); + } + return ranges; + }, - _safeObjectId: (data) -> - try - return ObjectId(data) - catch error - return data + _safeObjectId(data) { + try { + return ObjectId(data); + } catch (error) { + return data; + } + } +}); diff --git a/services/document-updater/app/coffee/UpdateKeys.js b/services/document-updater/app/coffee/UpdateKeys.js index 7d1f279495..470be0ce4a 100644 --- a/services/document-updater/app/coffee/UpdateKeys.js +++ b/services/document-updater/app/coffee/UpdateKeys.js @@ -1,3 +1,4 @@ -module.exports = - combineProjectIdAndDocId: (project_id, doc_id) -> "#{project_id}:#{doc_id}" - splitProjectIdAndDocId: (project_and_doc_id) -> project_and_doc_id.split(":") +module.exports = { + combineProjectIdAndDocId(project_id, doc_id) { return `${project_id}:${doc_id}`; }, + splitProjectIdAndDocId(project_and_doc_id) { return project_and_doc_id.split(":"); } +}; diff --git a/services/document-updater/app/coffee/UpdateManager.js b/services/document-updater/app/coffee/UpdateManager.js index e5ede11173..5151dfb4e7 100644 --- a/services/document-updater/app/coffee/UpdateManager.js +++ b/services/document-updater/app/coffee/UpdateManager.js @@ -1,170 +1,232 @@ -LockManager = require "./LockManager" -RedisManager = require "./RedisManager" -RealTimeRedisManager = require "./RealTimeRedisManager" -ShareJsUpdateManager = require "./ShareJsUpdateManager" -HistoryManager = require "./HistoryManager" -Settings = require('settings-sharelatex') -_ = require("lodash") -async = require("async") -logger = require('logger-sharelatex') -Metrics = require "./Metrics" -Errors = require "./Errors" -DocumentManager = require "./DocumentManager" -RangesManager = require "./RangesManager" -SnapshotManager = require "./SnapshotManager" -Profiler = require "./Profiler" +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS201: Simplify complex destructure assignments + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +let UpdateManager; +const LockManager = require("./LockManager"); +const RedisManager = require("./RedisManager"); +const RealTimeRedisManager = require("./RealTimeRedisManager"); +const ShareJsUpdateManager = require("./ShareJsUpdateManager"); +const HistoryManager = require("./HistoryManager"); +const Settings = require('settings-sharelatex'); +const _ = require("lodash"); +const async = require("async"); +const logger = require('logger-sharelatex'); +const Metrics = require("./Metrics"); +const Errors = require("./Errors"); +const DocumentManager = require("./DocumentManager"); +const RangesManager = require("./RangesManager"); +const SnapshotManager = require("./SnapshotManager"); +const Profiler = require("./Profiler"); -module.exports = UpdateManager = - processOutstandingUpdates: (project_id, doc_id, callback = (error) ->) -> - timer = new Metrics.Timer("updateManager.processOutstandingUpdates") - UpdateManager.fetchAndApplyUpdates project_id, doc_id, (error) -> - timer.done() - return callback(error) if error? - callback() +module.exports = (UpdateManager = { + processOutstandingUpdates(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + const timer = new Metrics.Timer("updateManager.processOutstandingUpdates"); + return UpdateManager.fetchAndApplyUpdates(project_id, doc_id, function(error) { + timer.done(); + if (error != null) { return callback(error); } + return callback(); + }); + }, - processOutstandingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) -> - profile = new Profiler("processOutstandingUpdatesWithLock", {project_id, doc_id}) - LockManager.tryLock doc_id, (error, gotLock, lockValue) => - return callback(error) if error? - return callback() if !gotLock - profile.log("tryLock") - UpdateManager.processOutstandingUpdates project_id, doc_id, (error) -> - return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error? - profile.log("processOutstandingUpdates") - LockManager.releaseLock doc_id, lockValue, (error) => - return callback(error) if error? - profile.log("releaseLock").end() - UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id, callback + processOutstandingUpdatesWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + const profile = new Profiler("processOutstandingUpdatesWithLock", {project_id, doc_id}); + return LockManager.tryLock(doc_id, (error, gotLock, lockValue) => { + if (error != null) { return callback(error); } + if (!gotLock) { return callback(); } + profile.log("tryLock"); + return UpdateManager.processOutstandingUpdates(project_id, doc_id, function(error) { + if (error != null) { return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback); } + profile.log("processOutstandingUpdates"); + return LockManager.releaseLock(doc_id, lockValue, error => { + if (error != null) { return callback(error); } + profile.log("releaseLock").end(); + return UpdateManager.continueProcessingUpdatesWithLock(project_id, doc_id, callback); + }); + }); + }); + }, - continueProcessingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) -> - RealTimeRedisManager.getUpdatesLength doc_id, (error, length) => - return callback(error) if error? - if length > 0 - UpdateManager.processOutstandingUpdatesWithLock project_id, doc_id, callback - else - callback() + continueProcessingUpdatesWithLock(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + return RealTimeRedisManager.getUpdatesLength(doc_id, (error, length) => { + if (error != null) { return callback(error); } + if (length > 0) { + return UpdateManager.processOutstandingUpdatesWithLock(project_id, doc_id, callback); + } else { + return callback(); + } + }); + }, - fetchAndApplyUpdates: (project_id, doc_id, callback = (error) ->) -> - profile = new Profiler("fetchAndApplyUpdates", {project_id, doc_id}) - RealTimeRedisManager.getPendingUpdatesForDoc doc_id, (error, updates) => - return callback(error) if error? - logger.log {project_id: project_id, doc_id: doc_id, count: updates.length}, "processing updates" - if updates.length == 0 - return callback() - profile.log("getPendingUpdatesForDoc") - doUpdate = (update, cb)-> - UpdateManager.applyUpdate project_id, doc_id, update, (err) -> - profile.log("applyUpdate") - cb(err) - finalCallback = (err) -> - profile.log("async done").end() - callback(err) - async.eachSeries updates, doUpdate, finalCallback + fetchAndApplyUpdates(project_id, doc_id, callback) { + if (callback == null) { callback = function(error) {}; } + const profile = new Profiler("fetchAndApplyUpdates", {project_id, doc_id}); + return RealTimeRedisManager.getPendingUpdatesForDoc(doc_id, (error, updates) => { + if (error != null) { return callback(error); } + logger.log({project_id, doc_id, count: updates.length}, "processing updates"); + if (updates.length === 0) { + return callback(); + } + profile.log("getPendingUpdatesForDoc"); + const doUpdate = (update, cb) => UpdateManager.applyUpdate(project_id, doc_id, update, function(err) { + profile.log("applyUpdate"); + return cb(err); + }); + const finalCallback = function(err) { + profile.log("async done").end(); + return callback(err); + }; + return async.eachSeries(updates, doUpdate, finalCallback); + }); + }, - applyUpdate: (project_id, doc_id, update, _callback = (error) ->) -> - callback = (error) -> - if error? - RealTimeRedisManager.sendData {project_id, doc_id, error: error.message || error} - profile.log("sendData") - profile.end() - _callback(error) + applyUpdate(project_id, doc_id, update, _callback) { + if (_callback == null) { _callback = function(error) {}; } + const callback = function(error) { + if (error != null) { + RealTimeRedisManager.sendData({project_id, doc_id, error: error.message || error}); + profile.log("sendData"); + } + profile.end(); + return _callback(error); + }; - profile = new Profiler("applyUpdate", {project_id, doc_id}) - UpdateManager._sanitizeUpdate update - profile.log("sanitizeUpdate") - DocumentManager.getDoc project_id, doc_id, (error, lines, version, ranges, pathname, projectHistoryId) -> - profile.log("getDoc") - return callback(error) if error? - if !lines? or !version? - return callback(new Errors.NotFoundError("document not found: #{doc_id}")) - previousVersion = version - ShareJsUpdateManager.applyUpdate project_id, doc_id, update, lines, version, (error, updatedDocLines, version, appliedOps) -> - profile.log("sharejs.applyUpdate") - return callback(error) if error? - RangesManager.applyUpdate project_id, doc_id, ranges, appliedOps, updatedDocLines, (error, new_ranges, ranges_were_collapsed) -> - UpdateManager._addProjectHistoryMetadataToOps(appliedOps, pathname, projectHistoryId, lines) - profile.log("RangesManager.applyUpdate") - return callback(error) if error? - RedisManager.updateDocument project_id, doc_id, updatedDocLines, version, appliedOps, new_ranges, update.meta, (error, doc_ops_length, project_ops_length) -> - profile.log("RedisManager.updateDocument") - return callback(error) if error? - HistoryManager.recordAndFlushHistoryOps project_id, doc_id, appliedOps, doc_ops_length, project_ops_length, (error) -> - profile.log("recordAndFlushHistoryOps") - return callback(error) if error? - if ranges_were_collapsed - logger.log {project_id, doc_id, previousVersion, lines, ranges, update}, "update collapsed some ranges, snapshotting previous content" - # Do this last, since it's a mongo call, and so potentially longest running - # If it overruns the lock, it's ok, since all of our redis work is done - SnapshotManager.recordSnapshot project_id, doc_id, previousVersion, pathname, lines, ranges, (error) -> - if error? - logger.error {err: error, project_id, doc_id, version, lines, ranges}, "error recording snapshot" - return callback(error) - else - callback() - else - callback() + var profile = new Profiler("applyUpdate", {project_id, doc_id}); + UpdateManager._sanitizeUpdate(update); + profile.log("sanitizeUpdate"); + return DocumentManager.getDoc(project_id, doc_id, function(error, lines, version, ranges, pathname, projectHistoryId) { + profile.log("getDoc"); + if (error != null) { return callback(error); } + if ((lines == null) || (version == null)) { + return callback(new Errors.NotFoundError(`document not found: ${doc_id}`)); + } + const previousVersion = version; + return ShareJsUpdateManager.applyUpdate(project_id, doc_id, update, lines, version, function(error, updatedDocLines, version, appliedOps) { + profile.log("sharejs.applyUpdate"); + if (error != null) { return callback(error); } + return RangesManager.applyUpdate(project_id, doc_id, ranges, appliedOps, updatedDocLines, function(error, new_ranges, ranges_were_collapsed) { + UpdateManager._addProjectHistoryMetadataToOps(appliedOps, pathname, projectHistoryId, lines); + profile.log("RangesManager.applyUpdate"); + if (error != null) { return callback(error); } + return RedisManager.updateDocument(project_id, doc_id, updatedDocLines, version, appliedOps, new_ranges, update.meta, function(error, doc_ops_length, project_ops_length) { + profile.log("RedisManager.updateDocument"); + if (error != null) { return callback(error); } + return HistoryManager.recordAndFlushHistoryOps(project_id, doc_id, appliedOps, doc_ops_length, project_ops_length, function(error) { + profile.log("recordAndFlushHistoryOps"); + if (error != null) { return callback(error); } + if (ranges_were_collapsed) { + logger.log({project_id, doc_id, previousVersion, lines, ranges, update}, "update collapsed some ranges, snapshotting previous content"); + // Do this last, since it's a mongo call, and so potentially longest running + // If it overruns the lock, it's ok, since all of our redis work is done + return SnapshotManager.recordSnapshot(project_id, doc_id, previousVersion, pathname, lines, ranges, function(error) { + if (error != null) { + logger.error({err: error, project_id, doc_id, version, lines, ranges}, "error recording snapshot"); + return callback(error); + } else { + return callback(); + } + }); + } else { + return callback(); + } + }); + }); + }); + }); + }); + }, - lockUpdatesAndDo: (method, project_id, doc_id, args..., callback) -> - profile = new Profiler("lockUpdatesAndDo", {project_id, doc_id}) - LockManager.getLock doc_id, (error, lockValue) -> - profile.log("getLock") - return callback(error) if error? - UpdateManager.processOutstandingUpdates project_id, doc_id, (error) -> - return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error? - profile.log("processOutstandingUpdates") - method project_id, doc_id, args..., (error, response_args...) -> - return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback) if error? - profile.log("method") - LockManager.releaseLock doc_id, lockValue, (error) -> - return callback(error) if error? - profile.log("releaseLock").end() - callback null, response_args... - # We held the lock for a while so updates might have queued up - UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id + lockUpdatesAndDo(method, project_id, doc_id, ...rest) { + const adjustedLength = Math.max(rest.length, 1), args = rest.slice(0, adjustedLength - 1), callback = rest[adjustedLength - 1]; + const profile = new Profiler("lockUpdatesAndDo", {project_id, doc_id}); + return LockManager.getLock(doc_id, function(error, lockValue) { + profile.log("getLock"); + if (error != null) { return callback(error); } + return UpdateManager.processOutstandingUpdates(project_id, doc_id, function(error) { + if (error != null) { return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback); } + profile.log("processOutstandingUpdates"); + return method(project_id, doc_id, ...Array.from(args), function(error, ...response_args) { + if (error != null) { return UpdateManager._handleErrorInsideLock(doc_id, lockValue, error, callback); } + profile.log("method"); + return LockManager.releaseLock(doc_id, lockValue, function(error) { + if (error != null) { return callback(error); } + profile.log("releaseLock").end(); + callback(null, ...Array.from(response_args)); + // We held the lock for a while so updates might have queued up + return UpdateManager.continueProcessingUpdatesWithLock(project_id, doc_id); + }); + }); + }); + }); + }, - _handleErrorInsideLock: (doc_id, lockValue, original_error, callback = (error) ->) -> - LockManager.releaseLock doc_id, lockValue, (lock_error) -> - callback(original_error) + _handleErrorInsideLock(doc_id, lockValue, original_error, callback) { + if (callback == null) { callback = function(error) {}; } + return LockManager.releaseLock(doc_id, lockValue, lock_error => callback(original_error)); + }, - _sanitizeUpdate: (update) -> - # In Javascript, characters are 16-bits wide. It does not understand surrogates as characters. - # - # From Wikipedia (http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane): - # "The High Surrogates (U+D800–U+DBFF) and Low Surrogate (U+DC00–U+DFFF) codes are reserved - # for encoding non-BMP characters in UTF-16 by using a pair of 16-bit codes: one High Surrogate - # and one Low Surrogate. A single surrogate code point will never be assigned a character."" - # - # The main offender seems to be \uD835 as a stand alone character, which would be the first - # 16-bit character of a blackboard bold character (http://www.fileformat.info/info/unicode/char/1d400/index.htm). - # Something must be going on client side that is screwing up the encoding and splitting the - # two 16-bit characters so that \uD835 is standalone. - for op in update.op or [] - if op.i? - # Replace high and low surrogate characters with 'replacement character' (\uFFFD) - op.i = op.i.replace(/[\uD800-\uDFFF]/g, "\uFFFD") - return update + _sanitizeUpdate(update) { + // In Javascript, characters are 16-bits wide. It does not understand surrogates as characters. + // + // From Wikipedia (http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane): + // "The High Surrogates (U+D800–U+DBFF) and Low Surrogate (U+DC00–U+DFFF) codes are reserved + // for encoding non-BMP characters in UTF-16 by using a pair of 16-bit codes: one High Surrogate + // and one Low Surrogate. A single surrogate code point will never be assigned a character."" + // + // The main offender seems to be \uD835 as a stand alone character, which would be the first + // 16-bit character of a blackboard bold character (http://www.fileformat.info/info/unicode/char/1d400/index.htm). + // Something must be going on client side that is screwing up the encoding and splitting the + // two 16-bit characters so that \uD835 is standalone. + for (let op of Array.from(update.op || [])) { + if (op.i != null) { + // Replace high and low surrogate characters with 'replacement character' (\uFFFD) + op.i = op.i.replace(/[\uD800-\uDFFF]/g, "\uFFFD"); + } + } + return update; + }, - _addProjectHistoryMetadataToOps: (updates, pathname, projectHistoryId, lines) -> - doc_length = _.reduce lines, - (chars, line) -> chars + line.length, - 0 - doc_length += lines.length - 1 # count newline characters - updates.forEach (update) -> - update.projectHistoryId = projectHistoryId - update.meta ||= {} - update.meta.pathname = pathname - update.meta.doc_length = doc_length - # Each update may contain multiple ops, i.e. - # [{ - # ops: [{i: "foo", p: 4}, {d: "bar", p:8}] - # }, { - # ops: [{d: "baz", p: 40}, {i: "qux", p:8}] - # }] - # We want to include the doc_length at the start of each update, - # before it's ops are applied. However, we need to track any - # changes to it for the next update. - for op in update.op - if op.i? - doc_length += op.i.length - if op.d? - doc_length -= op.d.length + _addProjectHistoryMetadataToOps(updates, pathname, projectHistoryId, lines) { + let doc_length = _.reduce(lines, + (chars, line) => chars + line.length, + 0); + doc_length += lines.length - 1; // count newline characters + return updates.forEach(function(update) { + update.projectHistoryId = projectHistoryId; + if (!update.meta) { update.meta = {}; } + update.meta.pathname = pathname; + update.meta.doc_length = doc_length; + // Each update may contain multiple ops, i.e. + // [{ + // ops: [{i: "foo", p: 4}, {d: "bar", p:8}] + // }, { + // ops: [{d: "baz", p: 40}, {i: "qux", p:8}] + // }] + // We want to include the doc_length at the start of each update, + // before it's ops are applied. However, we need to track any + // changes to it for the next update. + return (() => { + const result = []; + for (let op of Array.from(update.op)) { + if (op.i != null) { + doc_length += op.i.length; + } + if (op.d != null) { + result.push(doc_length -= op.d.length); + } else { + result.push(undefined); + } + } + return result; + })(); + }); + } +}); diff --git a/services/document-updater/app/coffee/mongojs.js b/services/document-updater/app/coffee/mongojs.js index dfeebb788f..daf6fbed6d 100644 --- a/services/document-updater/app/coffee/mongojs.js +++ b/services/document-updater/app/coffee/mongojs.js @@ -1,12 +1,21 @@ -Settings = require "settings-sharelatex" -mongojs = require "mongojs" -db = mongojs(Settings.mongo.url, ["docSnapshots"]) +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +const Settings = require("settings-sharelatex"); +const mongojs = require("mongojs"); +const db = mongojs(Settings.mongo.url, ["docSnapshots"]); -module.exports = - db: db - ObjectId: mongojs.ObjectId - healthCheck: (callback) -> - db.runCommand {ping: 1}, (err, res) -> - return callback(err) if err? - return callback(new Error("failed mongo ping")) if !res.ok - callback() +module.exports = { + db, + ObjectId: mongojs.ObjectId, + healthCheck(callback) { + return db.runCommand({ping: 1}, function(err, res) { + if (err != null) { return callback(err); } + if (!res.ok) { return callback(new Error("failed mongo ping")); } + return callback(); + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/count.js b/services/document-updater/app/coffee/sharejs/count.js index da28355efb..ffc3337ac7 100644 --- a/services/document-updater/app/coffee/sharejs/count.js +++ b/services/document-updater/app/coffee/sharejs/count.js @@ -1,22 +1,30 @@ -# This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment] -exports.name = 'count' -exports.create = -> 1 +exports.name = 'count'; +exports.create = () => 1; -exports.apply = (snapshot, op) -> - [v, inc] = op - throw new Error "Op #{v} != snapshot #{snapshot}" unless snapshot == v - snapshot + inc +exports.apply = function(snapshot, op) { + const [v, inc] = Array.from(op); + if (snapshot !== v) { throw new Error(`Op ${v} != snapshot ${snapshot}`); } + return snapshot + inc; +}; -# transform op1 by op2. Return transformed version of op1. -exports.transform = (op1, op2) -> - throw new Error "Op1 #{op1[0]} != op2 #{op2[0]}" unless op1[0] == op2[0] - [op1[0] + op2[1], op1[1]] +// transform op1 by op2. Return transformed version of op1. +exports.transform = function(op1, op2) { + if (op1[0] !== op2[0]) { throw new Error(`Op1 ${op1[0]} != op2 ${op2[0]}`); } + return [op1[0] + op2[1], op1[1]]; +}; -exports.compose = (op1, op2) -> - throw new Error "Op1 #{op1} + 1 != op2 #{op2}" unless op1[0] + op1[1] == op2[0] - [op1[0], op1[1] + op2[1]] +exports.compose = function(op1, op2) { + if ((op1[0] + op1[1]) !== op2[0]) { throw new Error(`Op1 ${op1} + 1 != op2 ${op2}`); } + return [op1[0], op1[1] + op2[1]]; +}; -exports.generateRandomOp = (doc) -> - [[doc, 1], doc + 1] +exports.generateRandomOp = doc => [[doc, 1], doc + 1]; diff --git a/services/document-updater/app/coffee/sharejs/helpers.js b/services/document-updater/app/coffee/sharejs/helpers.js index 093b32e1bb..81a561de03 100644 --- a/services/document-updater/app/coffee/sharejs/helpers.js +++ b/services/document-updater/app/coffee/sharejs/helpers.js @@ -1,65 +1,87 @@ -# These methods let you build a transform function from a transformComponent function -# for OT types like text and JSON in which operations are lists of components -# and transforming them requires N^2 work. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// These methods let you build a transform function from a transformComponent function +// for OT types like text and JSON in which operations are lists of components +// and transforming them requires N^2 work. -# Add transform and transformX functions for an OT type which has transformComponent defined. -# transformComponent(destination array, component, other component, side) -exports['_bt'] = bootstrapTransform = (type, transformComponent, checkValidOp, append) -> - transformComponentX = (left, right, destLeft, destRight) -> - transformComponent destLeft, left, right, 'left' - transformComponent destRight, right, left, 'right' +// Add transform and transformX functions for an OT type which has transformComponent defined. +// transformComponent(destination array, component, other component, side) +let bootstrapTransform; +exports['_bt'] = (bootstrapTransform = function(type, transformComponent, checkValidOp, append) { + let transformX; + const transformComponentX = function(left, right, destLeft, destRight) { + transformComponent(destLeft, left, right, 'left'); + return transformComponent(destRight, right, left, 'right'); + }; - # Transforms rightOp by leftOp. Returns ['rightOp', clientOp'] - type.transformX = type['transformX'] = transformX = (leftOp, rightOp) -> - checkValidOp leftOp - checkValidOp rightOp + // Transforms rightOp by leftOp. Returns ['rightOp', clientOp'] + type.transformX = (type['transformX'] = (transformX = function(leftOp, rightOp) { + checkValidOp(leftOp); + checkValidOp(rightOp); - newRightOp = [] + const newRightOp = []; - for rightComponent in rightOp - # Generate newLeftOp by composing leftOp by rightComponent - newLeftOp = [] + for (let rightComponent of Array.from(rightOp)) { + // Generate newLeftOp by composing leftOp by rightComponent + const newLeftOp = []; - k = 0 - while k < leftOp.length - nextC = [] - transformComponentX leftOp[k], rightComponent, newLeftOp, nextC - k++ + let k = 0; + while (k < leftOp.length) { + var l; + const nextC = []; + transformComponentX(leftOp[k], rightComponent, newLeftOp, nextC); + k++; - if nextC.length == 1 - rightComponent = nextC[0] - else if nextC.length == 0 - append newLeftOp, l for l in leftOp[k..] - rightComponent = null - break - else - # Recurse. - [l_, r_] = transformX leftOp[k..], nextC - append newLeftOp, l for l in l_ - append newRightOp, r for r in r_ - rightComponent = null - break + if (nextC.length === 1) { + rightComponent = nextC[0]; + } else if (nextC.length === 0) { + for (l of Array.from(leftOp.slice(k))) { append(newLeftOp, l); } + rightComponent = null; + break; + } else { + // Recurse. + const [l_, r_] = Array.from(transformX(leftOp.slice(k), nextC)); + for (l of Array.from(l_)) { append(newLeftOp, l); } + for (let r of Array.from(r_)) { append(newRightOp, r); } + rightComponent = null; + break; + } + } - append newRightOp, rightComponent if rightComponent? - leftOp = newLeftOp + if (rightComponent != null) { append(newRightOp, rightComponent); } + leftOp = newLeftOp; + } - [leftOp, newRightOp] + return [leftOp, newRightOp]; + })); - # Transforms op with specified type ('left' or 'right') by otherOp. - type.transform = type['transform'] = (op, otherOp, type) -> - throw new Error "type must be 'left' or 'right'" unless type == 'left' or type == 'right' + // Transforms op with specified type ('left' or 'right') by otherOp. + return type.transform = (type['transform'] = function(op, otherOp, type) { + let _; + if ((type !== 'left') && (type !== 'right')) { throw new Error("type must be 'left' or 'right'"); } - return op if otherOp.length == 0 + if (otherOp.length === 0) { return op; } - # TODO: Benchmark with and without this line. I _think_ it'll make a big difference...? - return transformComponent [], op[0], otherOp[0], type if op.length == 1 and otherOp.length == 1 + // TODO: Benchmark with and without this line. I _think_ it'll make a big difference...? + if ((op.length === 1) && (otherOp.length === 1)) { return transformComponent([], op[0], otherOp[0], type); } - if type == 'left' - [left, _] = transformX op, otherOp - left - else - [_, right] = transformX otherOp, op - right + if (type === 'left') { + let left; + [left, _] = Array.from(transformX(op, otherOp)); + return left; + } else { + let right; + [_, right] = Array.from(transformX(otherOp, op)); + return right; + } + }); +}); -if typeof WEB is 'undefined' - exports.bootstrapTransform = bootstrapTransform +if (typeof WEB === 'undefined') { + exports.bootstrapTransform = bootstrapTransform; +} diff --git a/services/document-updater/app/coffee/sharejs/index.js b/services/document-updater/app/coffee/sharejs/index.js index 6f3bb8ec20..bf681de7cd 100644 --- a/services/document-updater/app/coffee/sharejs/index.js +++ b/services/document-updater/app/coffee/sharejs/index.js @@ -1,15 +1,21 @@ +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ -register = (file) -> - type = require file - exports[type.name] = type - try require "#{file}-api" +const register = function(file) { + const type = require(file); + exports[type.name] = type; + try { return require(`${file}-api`); } catch (error) {} +}; -# Import all the built-in types. -register './simple' -register './count' +// Import all the built-in types. +register('./simple'); +register('./count'); -register './text' -register './text-composable' -register './text-tp2' +register('./text'); +register('./text-composable'); +register('./text-tp2'); -register './json' +register('./json'); diff --git a/services/document-updater/app/coffee/sharejs/json-api.js b/services/document-updater/app/coffee/sharejs/json-api.js index 8819dee798..1c7c2633ba 100644 --- a/services/document-updater/app/coffee/sharejs/json-api.js +++ b/services/document-updater/app/coffee/sharejs/json-api.js @@ -1,180 +1,273 @@ -# API for JSON OT +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// API for JSON OT -json = require './json' if typeof WEB is 'undefined' +let json; +if (typeof WEB === 'undefined') { json = require('./json'); } -if WEB? - extendDoc = exports.extendDoc - exports.extendDoc = (name, fn) -> - SubDoc::[name] = fn - extendDoc name, fn +if (typeof WEB !== 'undefined' && WEB !== null) { + const { + extendDoc + } = exports; + exports.extendDoc = function(name, fn) { + SubDoc.prototype[name] = fn; + return extendDoc(name, fn); + }; +} -depath = (path) -> - if path.length == 1 and path[0].constructor == Array - path[0] - else path +const depath = function(path) { + if ((path.length === 1) && (path[0].constructor === Array)) { + return path[0]; + } else { return path; } +}; -class SubDoc - constructor: (@doc, @path) -> - at: (path...) -> @doc.at @path.concat depath path - get: -> @doc.getAt @path - # for objects and lists - set: (value, cb) -> @doc.setAt @path, value, cb - # for strings and lists. - insert: (pos, value, cb) -> @doc.insertAt @path, pos, value, cb - # for strings - del: (pos, length, cb) -> @doc.deleteTextAt @path, length, pos, cb - # for objects and lists - remove: (cb) -> @doc.removeAt @path, cb - push: (value, cb) -> @insert @get().length, value, cb - move: (from, to, cb) -> @doc.moveAt @path, from, to, cb - add: (amount, cb) -> @doc.addAt @path, amount, cb - on: (event, cb) -> @doc.addListener @path, event, cb - removeListener: (l) -> @doc.removeListener l +class SubDoc { + constructor(doc, path) { + this.doc = doc; + this.path = path; + } + at(...path) { return this.doc.at(this.path.concat(depath(path))); } + get() { return this.doc.getAt(this.path); } + // for objects and lists + set(value, cb) { return this.doc.setAt(this.path, value, cb); } + // for strings and lists. + insert(pos, value, cb) { return this.doc.insertAt(this.path, pos, value, cb); } + // for strings + del(pos, length, cb) { return this.doc.deleteTextAt(this.path, length, pos, cb); } + // for objects and lists + remove(cb) { return this.doc.removeAt(this.path, cb); } + push(value, cb) { return this.insert(this.get().length, value, cb); } + move(from, to, cb) { return this.doc.moveAt(this.path, from, to, cb); } + add(amount, cb) { return this.doc.addAt(this.path, amount, cb); } + on(event, cb) { return this.doc.addListener(this.path, event, cb); } + removeListener(l) { return this.doc.removeListener(l); } - # text API compatibility - getLength: -> @get().length - getText: -> @get() + // text API compatibility + getLength() { return this.get().length; } + getText() { return this.get(); } +} -traverse = (snapshot, path) -> - container = data:snapshot - key = 'data' - elem = container - for p in path - elem = elem[key] - key = p - throw new Error 'bad path' if typeof elem == 'undefined' - {elem, key} +const traverse = function(snapshot, path) { + const container = {data:snapshot}; + let key = 'data'; + let elem = container; + for (let p of Array.from(path)) { + elem = elem[key]; + key = p; + if (typeof elem === 'undefined') { throw new Error('bad path'); } + } + return {elem, key}; +}; -pathEquals = (p1, p2) -> - return false if p1.length != p2.length - for e,i in p1 - return false if e != p2[i] - true +const pathEquals = function(p1, p2) { + if (p1.length !== p2.length) { return false; } + for (let i = 0; i < p1.length; i++) { + const e = p1[i]; + if (e !== p2[i]) { return false; } + } + return true; +}; -json.api = - provides: {json:true} +json.api = { + provides: {json:true}, - at: (path...) -> new SubDoc this, depath path + at(...path) { return new SubDoc(this, depath(path)); }, - get: -> @snapshot - set: (value, cb) -> @setAt [], value, cb + get() { return this.snapshot; }, + set(value, cb) { return this.setAt([], value, cb); }, - getAt: (path) -> - {elem, key} = traverse @snapshot, path - return elem[key] + getAt(path) { + const {elem, key} = traverse(this.snapshot, path); + return elem[key]; + }, - setAt: (path, value, cb) -> - {elem, key} = traverse @snapshot, path - op = {p:path} - if elem.constructor == Array - op.li = value - op.ld = elem[key] if typeof elem[key] != 'undefined' - else if typeof elem == 'object' - op.oi = value - op.od = elem[key] if typeof elem[key] != 'undefined' - else throw new Error 'bad path' - @submitOp [op], cb + setAt(path, value, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = {p:path}; + if (elem.constructor === Array) { + op.li = value; + if (typeof elem[key] !== 'undefined') { op.ld = elem[key]; } + } else if (typeof elem === 'object') { + op.oi = value; + if (typeof elem[key] !== 'undefined') { op.od = elem[key]; } + } else { throw new Error('bad path'); } + return this.submitOp([op], cb); + }, - removeAt: (path, cb) -> - {elem, key} = traverse @snapshot, path - throw new Error 'no element at that path' unless typeof elem[key] != 'undefined' - op = {p:path} - if elem.constructor == Array - op.ld = elem[key] - else if typeof elem == 'object' - op.od = elem[key] - else throw new Error 'bad path' - @submitOp [op], cb + removeAt(path, cb) { + const {elem, key} = traverse(this.snapshot, path); + if (typeof elem[key] === 'undefined') { throw new Error('no element at that path'); } + const op = {p:path}; + if (elem.constructor === Array) { + op.ld = elem[key]; + } else if (typeof elem === 'object') { + op.od = elem[key]; + } else { throw new Error('bad path'); } + return this.submitOp([op], cb); + }, - insertAt: (path, pos, value, cb) -> - {elem, key} = traverse @snapshot, path - op = {p:path.concat pos} - if elem[key].constructor == Array - op.li = value - else if typeof elem[key] == 'string' - op.si = value - @submitOp [op], cb + insertAt(path, pos, value, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = {p:path.concat(pos)}; + if (elem[key].constructor === Array) { + op.li = value; + } else if (typeof elem[key] === 'string') { + op.si = value; + } + return this.submitOp([op], cb); + }, - moveAt: (path, from, to, cb) -> - op = [{p:path.concat(from), lm:to}] - @submitOp op, cb + moveAt(path, from, to, cb) { + const op = [{p:path.concat(from), lm:to}]; + return this.submitOp(op, cb); + }, - addAt: (path, amount, cb) -> - op = [{p:path, na:amount}] - @submitOp op, cb + addAt(path, amount, cb) { + const op = [{p:path, na:amount}]; + return this.submitOp(op, cb); + }, - deleteTextAt: (path, length, pos, cb) -> - {elem, key} = traverse @snapshot, path - op = [{p:path.concat(pos), sd:elem[key][pos...(pos + length)]}] - @submitOp op, cb + deleteTextAt(path, length, pos, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = [{p:path.concat(pos), sd:elem[key].slice(pos, (pos + length))}]; + return this.submitOp(op, cb); + }, - addListener: (path, event, cb) -> - l = {path, event, cb} - @_listeners.push l - l - removeListener: (l) -> - i = @_listeners.indexOf l - return false if i < 0 - @_listeners.splice i, 1 - return true - _register: -> - @_listeners = [] - @on 'change', (op) -> - for c in op - if c.na != undefined or c.si != undefined or c.sd != undefined - # no change to structure - continue - to_remove = [] - for l, i in @_listeners - # Transform a dummy op by the incoming op to work out what - # should happen to the listener. - dummy = {p:l.path, na:0} - xformed = @type.transformComponent [], dummy, c, 'left' - if xformed.length == 0 - # The op was transformed to noop, so we should delete the listener. - to_remove.push i - else if xformed.length == 1 - # The op remained, so grab its new path into the listener. - l.path = xformed[0].p - else - throw new Error "Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components." - to_remove.sort (a, b) -> b - a - for i in to_remove - @_listeners.splice i, 1 - @on 'remoteop', (op) -> - for c in op - match_path = if c.na == undefined then c.p[...c.p.length-1] else c.p - for {path, event, cb} in @_listeners - if pathEquals path, match_path - switch event - when 'insert' - if c.li != undefined and c.ld == undefined - cb(c.p[c.p.length-1], c.li) - else if c.oi != undefined and c.od == undefined - cb(c.p[c.p.length-1], c.oi) - else if c.si != undefined - cb(c.p[c.p.length-1], c.si) - when 'delete' - if c.li == undefined and c.ld != undefined - cb(c.p[c.p.length-1], c.ld) - else if c.oi == undefined and c.od != undefined - cb(c.p[c.p.length-1], c.od) - else if c.sd != undefined - cb(c.p[c.p.length-1], c.sd) - when 'replace' - if c.li != undefined and c.ld != undefined - cb(c.p[c.p.length-1], c.ld, c.li) - else if c.oi != undefined and c.od != undefined - cb(c.p[c.p.length-1], c.od, c.oi) - when 'move' - if c.lm != undefined - cb(c.p[c.p.length-1], c.lm) - when 'add' - if c.na != undefined - cb(c.na) - else if (common = @type.commonPath match_path, path)? - if event == 'child op' - if match_path.length == path.length == common - throw new Error "paths match length and have commonality, but aren't equal?" - child_path = c.p[common+1..] - cb(child_path, c) + addListener(path, event, cb) { + const l = {path, event, cb}; + this._listeners.push(l); + return l; + }, + removeListener(l) { + const i = this._listeners.indexOf(l); + if (i < 0) { return false; } + this._listeners.splice(i, 1); + return true; + }, + _register() { + this._listeners = []; + this.on('change', function(op) { + return (() => { + const result = []; + for (let c of Array.from(op)) { + var i; + if ((c.na !== undefined) || (c.si !== undefined) || (c.sd !== undefined)) { + // no change to structure + continue; + } + var to_remove = []; + for (i = 0; i < this._listeners.length; i++) { + // Transform a dummy op by the incoming op to work out what + // should happen to the listener. + const l = this._listeners[i]; + const dummy = {p:l.path, na:0}; + const xformed = this.type.transformComponent([], dummy, c, 'left'); + if (xformed.length === 0) { + // The op was transformed to noop, so we should delete the listener. + to_remove.push(i); + } else if (xformed.length === 1) { + // The op remained, so grab its new path into the listener. + l.path = xformed[0].p; + } else { + throw new Error("Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."); + } + } + to_remove.sort((a, b) => b - a); + result.push((() => { + const result1 = []; + for (i of Array.from(to_remove)) { + result1.push(this._listeners.splice(i, 1)); + } + return result1; + })()); + } + return result; + })(); + }); + return this.on('remoteop', function(op) { + return (() => { + const result = []; + for (var c of Array.from(op)) { + var match_path = c.na === undefined ? c.p.slice(0, c.p.length-1) : c.p; + result.push((() => { + const result1 = []; + for (let {path, event, cb} of Array.from(this._listeners)) { + var common; + if (pathEquals(path, match_path)) { + switch (event) { + case 'insert': + if ((c.li !== undefined) && (c.ld === undefined)) { + result1.push(cb(c.p[c.p.length-1], c.li)); + } else if ((c.oi !== undefined) && (c.od === undefined)) { + result1.push(cb(c.p[c.p.length-1], c.oi)); + } else if (c.si !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.si)); + } else { + result1.push(undefined); + } + break; + case 'delete': + if ((c.li === undefined) && (c.ld !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.ld)); + } else if ((c.oi === undefined) && (c.od !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.od)); + } else if (c.sd !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.sd)); + } else { + result1.push(undefined); + } + break; + case 'replace': + if ((c.li !== undefined) && (c.ld !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.ld, c.li)); + } else if ((c.oi !== undefined) && (c.od !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.od, c.oi)); + } else { + result1.push(undefined); + } + break; + case 'move': + if (c.lm !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.lm)); + } else { + result1.push(undefined); + } + break; + case 'add': + if (c.na !== undefined) { + result1.push(cb(c.na)); + } else { + result1.push(undefined); + } + break; + default: + result1.push(undefined); + } + } else if ((common = this.type.commonPath(match_path, path)) != null) { + if (event === 'child op') { + if (match_path.length === path.length && path.length === common) { + throw new Error("paths match length and have commonality, but aren't equal?"); + } + const child_path = c.p.slice(common+1); + result1.push(cb(child_path, c)); + } else { + result1.push(undefined); + } + } else { + result1.push(undefined); + } + } + return result1; + })()); + } + return result; + })(); + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/json.js b/services/document-updater/app/coffee/sharejs/json.js index b03b0947ef..3e3bee79d9 100644 --- a/services/document-updater/app/coffee/sharejs/json.js +++ b/services/document-updater/app/coffee/sharejs/json.js @@ -1,441 +1,534 @@ -# This is the implementation of the JSON OT type. -# -# Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is the implementation of the JSON OT type. +// +// Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations -if WEB? - text = exports.types.text -else - text = require './text' +let text; +if (typeof WEB !== 'undefined' && WEB !== null) { + ({ + text + } = exports.types); +} else { + text = require('./text'); +} -json = {} +const json = {}; -json.name = 'json' +json.name = 'json'; -json.create = -> null +json.create = () => null; -json.invertComponent = (c) -> - c_ = {p: c.p} - c_.sd = c.si if c.si != undefined - c_.si = c.sd if c.sd != undefined - c_.od = c.oi if c.oi != undefined - c_.oi = c.od if c.od != undefined - c_.ld = c.li if c.li != undefined - c_.li = c.ld if c.ld != undefined - c_.na = -c.na if c.na != undefined - if c.lm != undefined - c_.lm = c.p[c.p.length-1] - c_.p = c.p[0...c.p.length - 1].concat([c.lm]) - c_ +json.invertComponent = function(c) { + const c_ = {p: c.p}; + if (c.si !== undefined) { c_.sd = c.si; } + if (c.sd !== undefined) { c_.si = c.sd; } + if (c.oi !== undefined) { c_.od = c.oi; } + if (c.od !== undefined) { c_.oi = c.od; } + if (c.li !== undefined) { c_.ld = c.li; } + if (c.ld !== undefined) { c_.li = c.ld; } + if (c.na !== undefined) { c_.na = -c.na; } + if (c.lm !== undefined) { + c_.lm = c.p[c.p.length-1]; + c_.p = c.p.slice(0, c.p.length - 1).concat([c.lm]); + } + return c_; +}; -json.invert = (op) -> json.invertComponent c for c in op.slice().reverse() +json.invert = op => Array.from(op.slice().reverse()).map((c) => json.invertComponent(c)); -json.checkValidOp = (op) -> +json.checkValidOp = function(op) {}; -isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]' -json.checkList = (elem) -> - throw new Error 'Referenced element not a list' unless isArray(elem) +const isArray = o => Object.prototype.toString.call(o) === '[object Array]'; +json.checkList = function(elem) { + if (!isArray(elem)) { throw new Error('Referenced element not a list'); } +}; -json.checkObj = (elem) -> - throw new Error "Referenced element not an object (it was #{JSON.stringify elem})" unless elem.constructor is Object +json.checkObj = function(elem) { + if (elem.constructor !== Object) { throw new Error(`Referenced element not an object (it was ${JSON.stringify(elem)})`); } +}; -json.apply = (snapshot, op) -> - json.checkValidOp op - op = clone op +json.apply = function(snapshot, op) { + json.checkValidOp(op); + op = clone(op); - container = {data: clone snapshot} + const container = {data: clone(snapshot)}; - try - for c, i in op - parent = null - parentkey = null - elem = container - key = 'data' + try { + for (let i = 0; i < op.length; i++) { + const c = op[i]; + let parent = null; + let parentkey = null; + let elem = container; + let key = 'data'; - for p in c.p - parent = elem - parentkey = key - elem = elem[key] - key = p + for (let p of Array.from(c.p)) { + parent = elem; + parentkey = key; + elem = elem[key]; + key = p; - throw new Error 'Path invalid' unless parent? + if (parent == null) { throw new Error('Path invalid'); } + } - if c.na != undefined - # Number add - throw new Error 'Referenced element not a number' unless typeof elem[key] is 'number' - elem[key] += c.na + if (c.na !== undefined) { + // Number add + if (typeof elem[key] !== 'number') { throw new Error('Referenced element not a number'); } + elem[key] += c.na; - else if c.si != undefined - # String insert - throw new Error "Referenced element not a string (it was #{JSON.stringify elem})" unless typeof elem is 'string' - parent[parentkey] = elem[...key] + c.si + elem[key..] - else if c.sd != undefined - # String delete - throw new Error 'Referenced element not a string' unless typeof elem is 'string' - throw new Error 'Deleted string does not match' unless elem[key...key + c.sd.length] == c.sd - parent[parentkey] = elem[...key] + elem[key + c.sd.length..] + } else if (c.si !== undefined) { + // String insert + if (typeof elem !== 'string') { throw new Error(`Referenced element not a string (it was ${JSON.stringify(elem)})`); } + parent[parentkey] = elem.slice(0, key) + c.si + elem.slice(key); + } else if (c.sd !== undefined) { + // String delete + if (typeof elem !== 'string') { throw new Error('Referenced element not a string'); } + if (elem.slice(key, key + c.sd.length) !== c.sd) { throw new Error('Deleted string does not match'); } + parent[parentkey] = elem.slice(0, key) + elem.slice(key + c.sd.length); - else if c.li != undefined && c.ld != undefined - # List replace - json.checkList elem + } else if ((c.li !== undefined) && (c.ld !== undefined)) { + // List replace + json.checkList(elem); - # Should check the list element matches c.ld - elem[key] = c.li - else if c.li != undefined - # List insert - json.checkList elem + // Should check the list element matches c.ld + elem[key] = c.li; + } else if (c.li !== undefined) { + // List insert + json.checkList(elem); - elem.splice key, 0, c.li - else if c.ld != undefined - # List delete - json.checkList elem + elem.splice(key, 0, c.li); + } else if (c.ld !== undefined) { + // List delete + json.checkList(elem); - # Should check the list element matches c.ld here too. - elem.splice key, 1 - else if c.lm != undefined - # List move - json.checkList elem - if c.lm != key - e = elem[key] - # Remove it... - elem.splice key, 1 - # And insert it back. - elem.splice c.lm, 0, e + // Should check the list element matches c.ld here too. + elem.splice(key, 1); + } else if (c.lm !== undefined) { + // List move + json.checkList(elem); + if (c.lm !== key) { + const e = elem[key]; + // Remove it... + elem.splice(key, 1); + // And insert it back. + elem.splice(c.lm, 0, e); + } - else if c.oi != undefined - # Object insert / replace - json.checkObj elem + } else if (c.oi !== undefined) { + // Object insert / replace + json.checkObj(elem); - # Should check that elem[key] == c.od - elem[key] = c.oi - else if c.od != undefined - # Object delete - json.checkObj elem + // Should check that elem[key] == c.od + elem[key] = c.oi; + } else if (c.od !== undefined) { + // Object delete + json.checkObj(elem); - # Should check that elem[key] == c.od - delete elem[key] - else - throw new Error 'invalid / missing instruction in op' - catch error - # TODO: Roll back all already applied changes. Write tests before implementing this code. - throw error + // Should check that elem[key] == c.od + delete elem[key]; + } else { + throw new Error('invalid / missing instruction in op'); + } + } + } catch (error) { + // TODO: Roll back all already applied changes. Write tests before implementing this code. + throw error; + } - container.data + return container.data; +}; -# Checks if two paths, p1 and p2 match. -json.pathMatches = (p1, p2, ignoreLast) -> - return false unless p1.length == p2.length +// Checks if two paths, p1 and p2 match. +json.pathMatches = function(p1, p2, ignoreLast) { + if (p1.length !== p2.length) { return false; } - for p, i in p1 - return false if p != p2[i] and (!ignoreLast or i != p1.length - 1) + for (let i = 0; i < p1.length; i++) { + const p = p1[i]; + if ((p !== p2[i]) && (!ignoreLast || (i !== (p1.length - 1)))) { return false; } + } - true + return true; +}; -json.append = (dest, c) -> - c = clone c - if dest.length != 0 and json.pathMatches c.p, (last = dest[dest.length - 1]).p - if last.na != undefined and c.na != undefined - dest[dest.length - 1] = { p: last.p, na: last.na + c.na } - else if last.li != undefined and c.li == undefined and c.ld == last.li - # insert immediately followed by delete becomes a noop. - if last.ld != undefined - # leave the delete part of the replace - delete last.li - else - dest.pop() - else if last.od != undefined and last.oi == undefined and - c.oi != undefined and c.od == undefined - last.oi = c.oi - else if c.lm != undefined and c.p[c.p.length-1] == c.lm - null # don't do anything - else - dest.push c - else - dest.push c +json.append = function(dest, c) { + let last; + c = clone(c); + if ((dest.length !== 0) && json.pathMatches(c.p, (last = dest[dest.length - 1]).p)) { + if ((last.na !== undefined) && (c.na !== undefined)) { + return dest[dest.length - 1] = { p: last.p, na: last.na + c.na }; + } else if ((last.li !== undefined) && (c.li === undefined) && (c.ld === last.li)) { + // insert immediately followed by delete becomes a noop. + if (last.ld !== undefined) { + // leave the delete part of the replace + return delete last.li; + } else { + return dest.pop(); + } + } else if ((last.od !== undefined) && (last.oi === undefined) && + (c.oi !== undefined) && (c.od === undefined)) { + return last.oi = c.oi; + } else if ((c.lm !== undefined) && (c.p[c.p.length-1] === c.lm)) { + return null; // don't do anything + } else { + return dest.push(c); + } + } else { + return dest.push(c); + } +}; -json.compose = (op1, op2) -> - json.checkValidOp op1 - json.checkValidOp op2 +json.compose = function(op1, op2) { + json.checkValidOp(op1); + json.checkValidOp(op2); - newOp = clone op1 - json.append newOp, c for c in op2 + const newOp = clone(op1); + for (let c of Array.from(op2)) { json.append(newOp, c); } - newOp + return newOp; +}; -json.normalize = (op) -> - newOp = [] +json.normalize = function(op) { + const newOp = []; - op = [op] unless isArray op + if (!isArray(op)) { op = [op]; } - for c in op - c.p ?= [] - json.append newOp, c + for (let c of Array.from(op)) { + if (c.p == null) { c.p = []; } + json.append(newOp, c); + } - newOp + return newOp; +}; -# hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming -# we have browser support for JSON. -# http://jsperf.com/cloning-an-object/12 -clone = (o) -> JSON.parse(JSON.stringify o) +// hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming +// we have browser support for JSON. +// http://jsperf.com/cloning-an-object/12 +var clone = o => JSON.parse(JSON.stringify(o)); -json.commonPath = (p1, p2) -> - p1 = p1.slice() - p2 = p2.slice() - p1.unshift('data') - p2.unshift('data') - p1 = p1[...p1.length-1] - p2 = p2[...p2.length-1] - return -1 if p2.length == 0 - i = 0 - while p1[i] == p2[i] && i < p1.length - i++ - if i == p2.length - return i-1 - return +json.commonPath = function(p1, p2) { + p1 = p1.slice(); + p2 = p2.slice(); + p1.unshift('data'); + p2.unshift('data'); + p1 = p1.slice(0, p1.length-1); + p2 = p2.slice(0, p2.length-1); + if (p2.length === 0) { return -1; } + let i = 0; + while ((p1[i] === p2[i]) && (i < p1.length)) { + i++; + if (i === p2.length) { + return i-1; + } + } +}; -# transform c so it applies to a document with otherC applied. -json.transformComponent = (dest, c, otherC, type) -> - c = clone c - c.p.push(0) if c.na != undefined - otherC.p.push(0) if otherC.na != undefined +// transform c so it applies to a document with otherC applied. +json.transformComponent = function(dest, c, otherC, type) { + let oc; + c = clone(c); + if (c.na !== undefined) { c.p.push(0); } + if (otherC.na !== undefined) { otherC.p.push(0); } - common = json.commonPath c.p, otherC.p - common2 = json.commonPath otherC.p, c.p + const common = json.commonPath(c.p, otherC.p); + const common2 = json.commonPath(otherC.p, c.p); - cplength = c.p.length - otherCplength = otherC.p.length + const cplength = c.p.length; + const otherCplength = otherC.p.length; - c.p.pop() if c.na != undefined # hax - otherC.p.pop() if otherC.na != undefined + if (c.na !== undefined) { c.p.pop(); } // hax + if (otherC.na !== undefined) { otherC.p.pop(); } - if otherC.na - if common2? && otherCplength >= cplength && otherC.p[common2] == c.p[common2] - if c.ld != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.ld = json.apply clone(c.ld), [oc] - else if c.od != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.od = json.apply clone(c.od), [oc] - json.append dest, c - return dest + if (otherC.na) { + if ((common2 != null) && (otherCplength >= cplength) && (otherC.p[common2] === c.p[common2])) { + if (c.ld !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.ld = json.apply(clone(c.ld), [oc]); + } else if (c.od !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.od = json.apply(clone(c.od), [oc]); + } + } + json.append(dest, c); + return dest; + } - if common2? && otherCplength > cplength && c.p[common2] == otherC.p[common2] - # transform based on c - if c.ld != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.ld = json.apply clone(c.ld), [oc] - else if c.od != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.od = json.apply clone(c.od), [oc] + if ((common2 != null) && (otherCplength > cplength) && (c.p[common2] === otherC.p[common2])) { + // transform based on c + if (c.ld !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.ld = json.apply(clone(c.ld), [oc]); + } else if (c.od !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.od = json.apply(clone(c.od), [oc]); + } + } - if common? - commonOperand = cplength == otherCplength - # transform based on otherC - if otherC.na != undefined - # this case is handled above due to icky path hax - else if otherC.si != undefined || otherC.sd != undefined - # String op vs string op - pass through to text type - if c.si != undefined || c.sd != undefined - throw new Error("must be a string?") unless commonOperand + if (common != null) { + let from, p, to; + const commonOperand = cplength === otherCplength; + // transform based on otherC + if (otherC.na !== undefined) { + // this case is handled above due to icky path hax + } else if ((otherC.si !== undefined) || (otherC.sd !== undefined)) { + // String op vs string op - pass through to text type + if ((c.si !== undefined) || (c.sd !== undefined)) { + if (!commonOperand) { throw new Error("must be a string?"); } - # Convert an op component to a text op component - convert = (component) -> - newC = p:component.p[component.p.length - 1] - if component.si - newC.i = component.si - else - newC.d = component.sd - newC + // Convert an op component to a text op component + const convert = function(component) { + const newC = {p:component.p[component.p.length - 1]}; + if (component.si) { + newC.i = component.si; + } else { + newC.d = component.sd; + } + return newC; + }; - tc1 = convert c - tc2 = convert otherC + const tc1 = convert(c); + const tc2 = convert(otherC); - res = [] - text._tc res, tc1, tc2, type - for tc in res - jc = { p: c.p[...common] } - jc.p.push(tc.p) - jc.si = tc.i if tc.i? - jc.sd = tc.d if tc.d? - json.append dest, jc - return dest - else if otherC.li != undefined && otherC.ld != undefined - if otherC.p[common] == c.p[common] - # noop - if !commonOperand - # we're below the deleted element, so -> noop - return dest - else if c.ld != undefined - # we're trying to delete the same element, -> noop - if c.li != undefined and type == 'left' - # we're both replacing one element with another. only one can - # survive! - c.ld = clone otherC.li - else - return dest - else if otherC.li != undefined - if c.li != undefined and c.ld == undefined and commonOperand and c.p[common] == otherC.p[common] - # in li vs. li, left wins. - if type == 'right' - c.p[common]++ - else if otherC.p[common] <= c.p[common] - c.p[common]++ + const res = []; + text._tc(res, tc1, tc2, type); + for (let tc of Array.from(res)) { + const jc = { p: c.p.slice(0, common) }; + jc.p.push(tc.p); + if (tc.i != null) { jc.si = tc.i; } + if (tc.d != null) { jc.sd = tc.d; } + json.append(dest, jc); + } + return dest; + } + } else if ((otherC.li !== undefined) && (otherC.ld !== undefined)) { + if (otherC.p[common] === c.p[common]) { + // noop + if (!commonOperand) { + // we're below the deleted element, so -> noop + return dest; + } else if (c.ld !== undefined) { + // we're trying to delete the same element, -> noop + if ((c.li !== undefined) && (type === 'left')) { + // we're both replacing one element with another. only one can + // survive! + c.ld = clone(otherC.li); + } else { + return dest; + } + } + } + } else if (otherC.li !== undefined) { + if ((c.li !== undefined) && (c.ld === undefined) && commonOperand && (c.p[common] === otherC.p[common])) { + // in li vs. li, left wins. + if (type === 'right') { + c.p[common]++; + } + } else if (otherC.p[common] <= c.p[common]) { + c.p[common]++; + } - if c.lm != undefined - if commonOperand - # otherC edits the same list we edit - if otherC.p[common] <= c.lm - c.lm++ - # changing c.from is handled above. - else if otherC.ld != undefined - if c.lm != undefined - if commonOperand - if otherC.p[common] == c.p[common] - # they deleted the thing we're trying to move - return dest - # otherC edits the same list we edit - p = otherC.p[common] - from = c.p[common] - to = c.lm - if p < to || (p == to && from < to) - c.lm-- + if (c.lm !== undefined) { + if (commonOperand) { + // otherC edits the same list we edit + if (otherC.p[common] <= c.lm) { + c.lm++; + } + } + } + // changing c.from is handled above. + } else if (otherC.ld !== undefined) { + if (c.lm !== undefined) { + if (commonOperand) { + if (otherC.p[common] === c.p[common]) { + // they deleted the thing we're trying to move + return dest; + } + // otherC edits the same list we edit + p = otherC.p[common]; + from = c.p[common]; + to = c.lm; + if ((p < to) || ((p === to) && (from < to))) { + c.lm--; + } + } + } - if otherC.p[common] < c.p[common] - c.p[common]-- - else if otherC.p[common] == c.p[common] - if otherCplength < cplength - # we're below the deleted element, so -> noop - return dest - else if c.ld != undefined - if c.li != undefined - # we're replacing, they're deleting. we become an insert. - delete c.ld - else - # we're trying to delete the same element, -> noop - return dest - else if otherC.lm != undefined - if c.lm != undefined and cplength == otherCplength - # lm vs lm, here we go! - from = c.p[common] - to = c.lm - otherFrom = otherC.p[common] - otherTo = otherC.lm - if otherFrom != otherTo - # if otherFrom == otherTo, we don't need to change our op. + if (otherC.p[common] < c.p[common]) { + c.p[common]--; + } else if (otherC.p[common] === c.p[common]) { + if (otherCplength < cplength) { + // we're below the deleted element, so -> noop + return dest; + } else if (c.ld !== undefined) { + if (c.li !== undefined) { + // we're replacing, they're deleting. we become an insert. + delete c.ld; + } else { + // we're trying to delete the same element, -> noop + return dest; + } + } + } + } else if (otherC.lm !== undefined) { + if ((c.lm !== undefined) && (cplength === otherCplength)) { + // lm vs lm, here we go! + from = c.p[common]; + to = c.lm; + const otherFrom = otherC.p[common]; + const otherTo = otherC.lm; + if (otherFrom !== otherTo) { + // if otherFrom == otherTo, we don't need to change our op. - # where did my thing go? - if from == otherFrom - # they moved it! tie break. - if type == 'left' - c.p[common] = otherTo - if from == to # ugh - c.lm = otherTo - else - return dest - else - # they moved around it - if from > otherFrom - c.p[common]-- - if from > otherTo - c.p[common]++ - else if from == otherTo - if otherFrom > otherTo - c.p[common]++ - if from == to # ugh, again - c.lm++ + // where did my thing go? + if (from === otherFrom) { + // they moved it! tie break. + if (type === 'left') { + c.p[common] = otherTo; + if (from === to) { // ugh + c.lm = otherTo; + } + } else { + return dest; + } + } else { + // they moved around it + if (from > otherFrom) { + c.p[common]--; + } + if (from > otherTo) { + c.p[common]++; + } else if (from === otherTo) { + if (otherFrom > otherTo) { + c.p[common]++; + if (from === to) { // ugh, again + c.lm++; + } + } + } - # step 2: where am i going to put it? - if to > otherFrom - c.lm-- - else if to == otherFrom - if to > from - c.lm-- - if to > otherTo - c.lm++ - else if to == otherTo - # if we're both moving in the same direction, tie break - if (otherTo > otherFrom and to > from) or - (otherTo < otherFrom and to < from) - if type == 'right' - c.lm++ - else - if to > from - c.lm++ - else if to == otherFrom - c.lm-- - else if c.li != undefined and c.ld == undefined and commonOperand - # li - from = otherC.p[common] - to = otherC.lm - p = c.p[common] - if p > from - c.p[common]-- - if p > to - c.p[common]++ - else - # ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath - # the lm - # - # i.e. things care about where their item is after the move. - from = otherC.p[common] - to = otherC.lm - p = c.p[common] - if p == from - c.p[common] = to - else - if p > from - c.p[common]-- - if p > to - c.p[common]++ - else if p == to - if from > to - c.p[common]++ - else if otherC.oi != undefined && otherC.od != undefined - if c.p[common] == otherC.p[common] - if c.oi != undefined and commonOperand - # we inserted where someone else replaced - if type == 'right' - # left wins - return dest - else - # we win, make our op replace what they inserted - c.od = otherC.oi - else - # -> noop if the other component is deleting the same object (or any - # parent) - return dest - else if otherC.oi != undefined - if c.oi != undefined and c.p[common] == otherC.p[common] - # left wins if we try to insert at the same place - if type == 'left' - json.append dest, {p:c.p, od:otherC.oi} - else - return dest - else if otherC.od != undefined - if c.p[common] == otherC.p[common] - return dest if !commonOperand - if c.oi != undefined - delete c.od - else - return dest + // step 2: where am i going to put it? + if (to > otherFrom) { + c.lm--; + } else if (to === otherFrom) { + if (to > from) { + c.lm--; + } + } + if (to > otherTo) { + c.lm++; + } else if (to === otherTo) { + // if we're both moving in the same direction, tie break + if (((otherTo > otherFrom) && (to > from)) || + ((otherTo < otherFrom) && (to < from))) { + if (type === 'right') { + c.lm++; + } + } else { + if (to > from) { + c.lm++; + } else if (to === otherFrom) { + c.lm--; + } + } + } + } + } + } else if ((c.li !== undefined) && (c.ld === undefined) && commonOperand) { + // li + from = otherC.p[common]; + to = otherC.lm; + p = c.p[common]; + if (p > from) { + c.p[common]--; + } + if (p > to) { + c.p[common]++; + } + } else { + // ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath + // the lm + // + // i.e. things care about where their item is after the move. + from = otherC.p[common]; + to = otherC.lm; + p = c.p[common]; + if (p === from) { + c.p[common] = to; + } else { + if (p > from) { + c.p[common]--; + } + if (p > to) { + c.p[common]++; + } else if (p === to) { + if (from > to) { + c.p[common]++; + } + } + } + } + } else if ((otherC.oi !== undefined) && (otherC.od !== undefined)) { + if (c.p[common] === otherC.p[common]) { + if ((c.oi !== undefined) && commonOperand) { + // we inserted where someone else replaced + if (type === 'right') { + // left wins + return dest; + } else { + // we win, make our op replace what they inserted + c.od = otherC.oi; + } + } else { + // -> noop if the other component is deleting the same object (or any + // parent) + return dest; + } + } + } else if (otherC.oi !== undefined) { + if ((c.oi !== undefined) && (c.p[common] === otherC.p[common])) { + // left wins if we try to insert at the same place + if (type === 'left') { + json.append(dest, {p:c.p, od:otherC.oi}); + } else { + return dest; + } + } + } else if (otherC.od !== undefined) { + if (c.p[common] === otherC.p[common]) { + if (!commonOperand) { return dest; } + if (c.oi !== undefined) { + delete c.od; + } else { + return dest; + } + } + } + } - json.append dest, c - return dest + json.append(dest, c); + return dest; +}; -if WEB? - exports.types ||= {} +if (typeof WEB !== 'undefined' && WEB !== null) { + if (!exports.types) { exports.types = {}; } - # This is kind of awful - come up with a better way to hook this helper code up. - exports._bt(json, json.transformComponent, json.checkValidOp, json.append) + // This is kind of awful - come up with a better way to hook this helper code up. + exports._bt(json, json.transformComponent, json.checkValidOp, json.append); - # [] is used to prevent closure from renaming types.text - exports.types.json = json -else - module.exports = json + // [] is used to prevent closure from renaming types.text + exports.types.json = json; +} else { + module.exports = json; - require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append) + require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append); +} diff --git a/services/document-updater/app/coffee/sharejs/model.js b/services/document-updater/app/coffee/sharejs/model.js index 284d6fd770..9b6e65effd 100644 --- a/services/document-updater/app/coffee/sharejs/model.js +++ b/services/document-updater/app/coffee/sharejs/model.js @@ -1,603 +1,699 @@ -# The model of all the ops. Responsible for applying & transforming remote deltas -# and managing the storage layer. -# -# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS104: Avoid inline assignments + * DS204: Change includes calls to have a more natural evaluation order + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// The model of all the ops. Responsible for applying & transforming remote deltas +// and managing the storage layer. +// +// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache -{EventEmitter} = require 'events' +let Model; +const {EventEmitter} = require('events'); -queue = require './syncqueue' -types = require '../types' +const queue = require('./syncqueue'); +const types = require('../types'); -isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]' +const isArray = o => Object.prototype.toString.call(o) === '[object Array]'; -# This constructor creates a new Model object. There will be one model object -# per server context. -# -# The model object is responsible for a lot of things: -# -# - It manages the interactions with the database -# - It maintains (in memory) a set of all active documents -# - It calls out to the OT functions when necessary -# -# The model is an event emitter. It emits the following events: -# -# create(docName, data): A document has been created with the specified name & data -module.exports = Model = (db, options) -> - # db can be null if the user doesn't want persistance. +// This constructor creates a new Model object. There will be one model object +// per server context. +// +// The model object is responsible for a lot of things: +// +// - It manages the interactions with the database +// - It maintains (in memory) a set of all active documents +// - It calls out to the OT functions when necessary +// +// The model is an event emitter. It emits the following events: +// +// create(docName, data): A document has been created with the specified name & data +module.exports = (Model = function(db, options) { + // db can be null if the user doesn't want persistance. - return new Model(db, options) if !(this instanceof Model) + let getOps; + if (!(this instanceof Model)) { return new Model(db, options); } - model = this + const model = this; - options ?= {} + if (options == null) { options = {}; } - # This is a cache of 'live' documents. - # - # The cache is a map from docName -> { - # ops:[{op, meta}] - # snapshot - # type - # v - # meta - # eventEmitter - # reapTimer - # committedVersion: v - # snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant - # dbMeta: database specific data - # opQueue: syncQueue for processing ops - # } - # - # The ops list contains the document's last options.numCachedOps ops. (Or all - # of them if we're using a memory store). - # - # Documents are stored in this set so long as the document has been accessed in - # the last few seconds (options.reapTime) OR at least one client has the document - # open. I don't know if I should keep open (but not being edited) documents live - - # maybe if a client has a document open but the document isn't being edited, I should - # flush it from the cache. - # - # In any case, the API to model is designed such that if we want to change that later - # it should be pretty easy to do so without any external-to-the-model code changes. - docs = {} + // This is a cache of 'live' documents. + // + // The cache is a map from docName -> { + // ops:[{op, meta}] + // snapshot + // type + // v + // meta + // eventEmitter + // reapTimer + // committedVersion: v + // snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant + // dbMeta: database specific data + // opQueue: syncQueue for processing ops + // } + // + // The ops list contains the document's last options.numCachedOps ops. (Or all + // of them if we're using a memory store). + // + // Documents are stored in this set so long as the document has been accessed in + // the last few seconds (options.reapTime) OR at least one client has the document + // open. I don't know if I should keep open (but not being edited) documents live - + // maybe if a client has a document open but the document isn't being edited, I should + // flush it from the cache. + // + // In any case, the API to model is designed such that if we want to change that later + // it should be pretty easy to do so without any external-to-the-model code changes. + const docs = {}; - # This is a map from docName -> [callback]. It is used when a document hasn't been - # cached and multiple getSnapshot() / getVersion() requests come in. All requests - # are added to the callback list and called when db.getSnapshot() returns. - # - # callback(error, snapshot data) - awaitingGetSnapshot = {} + // This is a map from docName -> [callback]. It is used when a document hasn't been + // cached and multiple getSnapshot() / getVersion() requests come in. All requests + // are added to the callback list and called when db.getSnapshot() returns. + // + // callback(error, snapshot data) + const awaitingGetSnapshot = {}; - # The time that documents which no clients have open will stay in the cache. - # Should be > 0. - options.reapTime ?= 3000 + // The time that documents which no clients have open will stay in the cache. + // Should be > 0. + if (options.reapTime == null) { options.reapTime = 3000; } - # The number of operations the cache holds before reusing the space - options.numCachedOps ?= 10 + // The number of operations the cache holds before reusing the space + if (options.numCachedOps == null) { options.numCachedOps = 10; } - # This option forces documents to be reaped, even when there's no database backend. - # This is useful when you don't care about persistance and don't want to gradually - # fill memory. - # - # You might want to set reapTime to a day or something. - options.forceReaping ?= false + // This option forces documents to be reaped, even when there's no database backend. + // This is useful when you don't care about persistance and don't want to gradually + // fill memory. + // + // You might want to set reapTime to a day or something. + if (options.forceReaping == null) { options.forceReaping = false; } - # Until I come up with a better strategy, we'll save a copy of the document snapshot - # to the database every ~20 submitted ops. - options.opsBeforeCommit ?= 20 + // Until I come up with a better strategy, we'll save a copy of the document snapshot + // to the database every ~20 submitted ops. + if (options.opsBeforeCommit == null) { options.opsBeforeCommit = 20; } - # It takes some processing time to transform client ops. The server will punt ops back to the - # client to transform if they're too old. - options.maximumAge ?= 40 + // It takes some processing time to transform client ops. The server will punt ops back to the + // client to transform if they're too old. + if (options.maximumAge == null) { options.maximumAge = 40; } - # **** Cache API methods + // **** Cache API methods - # Its important that all ops are applied in order. This helper method creates the op submission queue - # for a single document. This contains the logic for transforming & applying ops. - makeOpQueue = (docName, doc) -> queue (opData, callback) -> - return callback 'Version missing' unless opData.v >= 0 - return callback 'Op at future version' if opData.v > doc.v + // Its important that all ops are applied in order. This helper method creates the op submission queue + // for a single document. This contains the logic for transforming & applying ops. + const makeOpQueue = (docName, doc) => queue(function(opData, callback) { + if (!(opData.v >= 0)) { return callback('Version missing'); } + if (opData.v > doc.v) { return callback('Op at future version'); } - # Punt the transforming work back to the client if the op is too old. - return callback 'Op too old' if opData.v + options.maximumAge < doc.v + // Punt the transforming work back to the client if the op is too old. + if ((opData.v + options.maximumAge) < doc.v) { return callback('Op too old'); } - opData.meta ||= {} - opData.meta.ts = Date.now() + if (!opData.meta) { opData.meta = {}; } + opData.meta.ts = Date.now(); - # We'll need to transform the op to the current version of the document. This - # calls the callback immediately if opVersion == doc.v. - getOps docName, opData.v, doc.v, (error, ops) -> - return callback error if error + // We'll need to transform the op to the current version of the document. This + // calls the callback immediately if opVersion == doc.v. + return getOps(docName, opData.v, doc.v, function(error, ops) { + let snapshot; + if (error) { return callback(error); } - unless doc.v - opData.v == ops.length - # This should never happen. It indicates that we didn't get all the ops we - # asked for. Its important that the submitted op is correctly transformed. - console.error "Could not get old ops in model for document #{docName}" - console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops" - return callback 'Internal error' + if ((doc.v - opData.v) !== ops.length) { + // This should never happen. It indicates that we didn't get all the ops we + // asked for. Its important that the submitted op is correctly transformed. + console.error(`Could not get old ops in model for document ${docName}`); + console.error(`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`); + return callback('Internal error'); + } - if ops.length > 0 - try - # If there's enough ops, it might be worth spinning this out into a webworker thread. - for oldOp in ops - # Dup detection works by sending the id(s) the op has been submitted with previously. - # If the id matches, we reject it. The client can also detect the op has been submitted - # already if it sees its own previous id in the ops it sees when it does catchup. - if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource - return callback 'Op already submitted' + if (ops.length > 0) { + try { + // If there's enough ops, it might be worth spinning this out into a webworker thread. + for (let oldOp of Array.from(ops)) { + // Dup detection works by sending the id(s) the op has been submitted with previously. + // If the id matches, we reject it. The client can also detect the op has been submitted + // already if it sees its own previous id in the ops it sees when it does catchup. + if (oldOp.meta.source && opData.dupIfSource && Array.from(opData.dupIfSource).includes(oldOp.meta.source)) { + return callback('Op already submitted'); + } - opData.op = doc.type.transform opData.op, oldOp.op, 'left' - opData.v++ - catch error - console.error error.stack - return callback error.message + opData.op = doc.type.transform(opData.op, oldOp.op, 'left'); + opData.v++; + } + } catch (error1) { + error = error1; + console.error(error.stack); + return callback(error.message); + } + } - try - snapshot = doc.type.apply doc.snapshot, opData.op - catch error - console.error error.stack - return callback error.message + try { + snapshot = doc.type.apply(doc.snapshot, opData.op); + } catch (error2) { + error = error2; + console.error(error.stack); + return callback(error.message); + } - # The op data should be at the current version, and the new document data should be at - # the next version. - # - # This should never happen in practice, but its a nice little check to make sure everything - # is hunky-dory. - unless opData.v == doc.v - # This should never happen. - console.error "Version mismatch detected in model. File a ticket - this is a bug." - console.error "Expecting #{opData.v} == #{doc.v}" - return callback 'Internal error' + // The op data should be at the current version, and the new document data should be at + // the next version. + // + // This should never happen in practice, but its a nice little check to make sure everything + // is hunky-dory. + if (opData.v !== doc.v) { + // This should never happen. + console.error("Version mismatch detected in model. File a ticket - this is a bug."); + console.error(`Expecting ${opData.v} == ${doc.v}`); + return callback('Internal error'); + } - #newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} - writeOp = db?.writeOp or (docName, newOpData, callback) -> callback() + //newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} + const writeOp = (db != null ? db.writeOp : undefined) || ((docName, newOpData, callback) => callback()); - writeOp docName, opData, (error) -> - if error - # The user should probably know about this. - console.warn "Error writing ops to database: #{error}" - return callback error + return writeOp(docName, opData, function(error) { + if (error) { + // The user should probably know about this. + console.warn(`Error writing ops to database: ${error}`); + return callback(error); + } - options.stats?.writeOp?() + __guardMethod__(options.stats, 'writeOp', o => o.writeOp()); - # This is needed when we emit the 'change' event, below. - oldSnapshot = doc.snapshot + // This is needed when we emit the 'change' event, below. + const oldSnapshot = doc.snapshot; - # All the heavy lifting is now done. Finally, we'll update the cache with the new data - # and (maybe!) save a new document snapshot to the database. + // All the heavy lifting is now done. Finally, we'll update the cache with the new data + // and (maybe!) save a new document snapshot to the database. - doc.v = opData.v + 1 - doc.snapshot = snapshot + doc.v = opData.v + 1; + doc.snapshot = snapshot; - doc.ops.push opData - doc.ops.shift() if db and doc.ops.length > options.numCachedOps + doc.ops.push(opData); + if (db && (doc.ops.length > options.numCachedOps)) { doc.ops.shift(); } - model.emit 'applyOp', docName, opData, snapshot, oldSnapshot - doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot + model.emit('applyOp', docName, opData, snapshot, oldSnapshot); + doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot); - # The callback is called with the version of the document at which the op was applied. - # This is the op.v after transformation, and its doc.v - 1. - callback null, opData.v + // The callback is called with the version of the document at which the op was applied. + // This is the op.v after transformation, and its doc.v - 1. + callback(null, opData.v); - # I need a decent strategy here for deciding whether or not to save the snapshot. - # - # The 'right' strategy looks something like "Store the snapshot whenever the snapshot - # is smaller than the accumulated op data". For now, I'll just store it every 20 - # ops or something. (Configurable with doc.committedVersion) - if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v - tryWriteSnapshot docName, (error) -> - console.warn "Error writing snapshot #{error}. This is nonfatal" if error + // I need a decent strategy here for deciding whether or not to save the snapshot. + // + // The 'right' strategy looks something like "Store the snapshot whenever the snapshot + // is smaller than the accumulated op data". For now, I'll just store it every 20 + // ops or something. (Configurable with doc.committedVersion) + if (!doc.snapshotWriteLock && ((doc.committedVersion + options.opsBeforeCommit) <= doc.v)) { + return tryWriteSnapshot(docName, function(error) { + if (error) { return console.warn(`Error writing snapshot ${error}. This is nonfatal`); } + }); + } + }); + }); + }); - # Add the data for the given docName to the cache. The named document shouldn't already - # exist in the doc set. - # - # Returns the new doc. - add = (docName, error, data, committedVersion, ops, dbMeta) -> - callbacks = awaitingGetSnapshot[docName] - delete awaitingGetSnapshot[docName] + // Add the data for the given docName to the cache. The named document shouldn't already + // exist in the doc set. + // + // Returns the new doc. + const add = function(docName, error, data, committedVersion, ops, dbMeta) { + let callback, doc; + const callbacks = awaitingGetSnapshot[docName]; + delete awaitingGetSnapshot[docName]; - if error - callback error for callback in callbacks if callbacks - else - doc = docs[docName] = - snapshot: data.snapshot - v: data.v - type: data.type - meta: data.meta + if (error) { + if (callbacks) { for (callback of Array.from(callbacks)) { callback(error); } } + } else { + doc = (docs[docName] = { + snapshot: data.snapshot, + v: data.v, + type: data.type, + meta: data.meta, - # Cache of ops - ops: ops or [] + // Cache of ops + ops: ops || [], - eventEmitter: new EventEmitter + eventEmitter: new EventEmitter, - # Timer before the document will be invalidated from the cache (if the document has no - # listeners) - reapTimer: null + // Timer before the document will be invalidated from the cache (if the document has no + // listeners) + reapTimer: null, - # Version of the snapshot thats in the database - committedVersion: committedVersion ? data.v - snapshotWriteLock: false - dbMeta: dbMeta + // Version of the snapshot thats in the database + committedVersion: committedVersion != null ? committedVersion : data.v, + snapshotWriteLock: false, + dbMeta + }); - doc.opQueue = makeOpQueue docName, doc + doc.opQueue = makeOpQueue(docName, doc); - refreshReapingTimeout docName - model.emit 'add', docName, data - callback null, doc for callback in callbacks if callbacks + refreshReapingTimeout(docName); + model.emit('add', docName, data); + if (callbacks) { for (callback of Array.from(callbacks)) { callback(null, doc); } } + } - doc + return doc; + }; - # This is a little helper wrapper around db.getOps. It does two things: - # - # - If there's no database set, it returns an error to the callback - # - It adds version numbers to each op returned from the database - # (These can be inferred from context so the DB doesn't store them, but its useful to have them). - getOpsInternal = (docName, start, end, callback) -> - return callback? 'Document does not exist' unless db + // This is a little helper wrapper around db.getOps. It does two things: + // + // - If there's no database set, it returns an error to the callback + // - It adds version numbers to each op returned from the database + // (These can be inferred from context so the DB doesn't store them, but its useful to have them). + const getOpsInternal = function(docName, start, end, callback) { + if (!db) { return (typeof callback === 'function' ? callback('Document does not exist') : undefined); } - db.getOps docName, start, end, (error, ops) -> - return callback? error if error + return db.getOps(docName, start, end, function(error, ops) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - v = start - op.v = v++ for op in ops + let v = start; + for (let op of Array.from(ops)) { op.v = v++; } - callback? null, ops + return (typeof callback === 'function' ? callback(null, ops) : undefined); + }); + }; - # Load the named document into the cache. This function is re-entrant. - # - # The callback is called with (error, doc) - load = (docName, callback) -> - if docs[docName] - # The document is already loaded. Return immediately. - options.stats?.cacheHit? 'getSnapshot' - return callback null, docs[docName] + // Load the named document into the cache. This function is re-entrant. + // + // The callback is called with (error, doc) + const load = function(docName, callback) { + if (docs[docName]) { + // The document is already loaded. Return immediately. + __guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot')); + return callback(null, docs[docName]); + } - # We're a memory store. If we don't have it, nobody does. - return callback 'Document does not exist' unless db + // We're a memory store. If we don't have it, nobody does. + if (!db) { return callback('Document does not exist'); } - callbacks = awaitingGetSnapshot[docName] + const callbacks = awaitingGetSnapshot[docName]; - # The document is being loaded already. Add ourselves as a callback. - return callbacks.push callback if callbacks + // The document is being loaded already. Add ourselves as a callback. + if (callbacks) { return callbacks.push(callback); } - options.stats?.cacheMiss? 'getSnapshot' + __guardMethod__(options.stats, 'cacheMiss', o1 => o1.cacheMiss('getSnapshot')); - # The document isn't loaded and isn't being loaded. Load it. - awaitingGetSnapshot[docName] = [callback] - db.getSnapshot docName, (error, data, dbMeta) -> - return add docName, error if error + // The document isn't loaded and isn't being loaded. Load it. + awaitingGetSnapshot[docName] = [callback]; + return db.getSnapshot(docName, function(error, data, dbMeta) { + if (error) { return add(docName, error); } - type = types[data.type] - unless type - console.warn "Type '#{data.type}' missing" - return callback "Type not found" - data.type = type + const type = types[data.type]; + if (!type) { + console.warn(`Type '${data.type}' missing`); + return callback("Type not found"); + } + data.type = type; - committedVersion = data.v + const committedVersion = data.v; - # The server can close without saving the most recent document snapshot. - # In this case, there are extra ops which need to be applied before - # returning the snapshot. - getOpsInternal docName, data.v, null, (error, ops) -> - return callback error if error + // The server can close without saving the most recent document snapshot. + // In this case, there are extra ops which need to be applied before + // returning the snapshot. + return getOpsInternal(docName, data.v, null, function(error, ops) { + if (error) { return callback(error); } - if ops.length > 0 - console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}" + if (ops.length > 0) { + console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`); - try - for op in ops - data.snapshot = type.apply data.snapshot, op.op - data.v++ - catch e - # This should never happen - it indicates that whats in the - # database is invalid. - console.error "Op data invalid for #{docName}: #{e.stack}" - return callback 'Op data invalid' + try { + for (let op of Array.from(ops)) { + data.snapshot = type.apply(data.snapshot, op.op); + data.v++; + } + } catch (e) { + // This should never happen - it indicates that whats in the + // database is invalid. + console.error(`Op data invalid for ${docName}: ${e.stack}`); + return callback('Op data invalid'); + } + } - model.emit 'load', docName, data - add docName, error, data, committedVersion, ops, dbMeta + model.emit('load', docName, data); + return add(docName, error, data, committedVersion, ops, dbMeta); + }); + }); + }; - # This makes sure the cache contains a document. If the doc cache doesn't contain - # a document, it is loaded from the database and stored. - # - # Documents are stored so long as either: - # - They have been accessed within the past #{PERIOD} - # - At least one client has the document open - refreshReapingTimeout = (docName) -> - doc = docs[docName] - return unless doc + // This makes sure the cache contains a document. If the doc cache doesn't contain + // a document, it is loaded from the database and stored. + // + // Documents are stored so long as either: + // - They have been accessed within the past #{PERIOD} + // - At least one client has the document open + var refreshReapingTimeout = function(docName) { + const doc = docs[docName]; + if (!doc) { return; } - # I want to let the clients list be updated before this is called. - process.nextTick -> - # This is an awkward way to find out the number of clients on a document. If this - # causes performance issues, add a numClients field to the document. - # - # The first check is because its possible that between refreshReapingTimeout being called and this - # event being fired, someone called delete() on the document and hence the doc is something else now. - if doc == docs[docName] and - doc.eventEmitter.listeners('op').length == 0 and - (db or options.forceReaping) and - doc.opQueue.busy is false + // I want to let the clients list be updated before this is called. + return process.nextTick(function() { + // This is an awkward way to find out the number of clients on a document. If this + // causes performance issues, add a numClients field to the document. + // + // The first check is because its possible that between refreshReapingTimeout being called and this + // event being fired, someone called delete() on the document and hence the doc is something else now. + if ((doc === docs[docName]) && + (doc.eventEmitter.listeners('op').length === 0) && + (db || options.forceReaping) && + (doc.opQueue.busy === false)) { - clearTimeout doc.reapTimer - doc.reapTimer = reapTimer = setTimeout -> - tryWriteSnapshot docName, -> - # If the reaping timeout has been refreshed while we're writing the snapshot, or if we're - # in the middle of applying an operation, don't reap. - delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false - , options.reapTime + let reapTimer; + clearTimeout(doc.reapTimer); + return doc.reapTimer = (reapTimer = setTimeout(() => tryWriteSnapshot(docName, function() { + // If the reaping timeout has been refreshed while we're writing the snapshot, or if we're + // in the middle of applying an operation, don't reap. + if ((docs[docName].reapTimer === reapTimer) && (doc.opQueue.busy === false)) { return delete docs[docName]; } + }) + , options.reapTime)); + } + }); + }; - tryWriteSnapshot = (docName, callback) -> - return callback?() unless db + var tryWriteSnapshot = function(docName, callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - doc = docs[docName] + const doc = docs[docName]; - # The doc is closed - return callback?() unless doc + // The doc is closed + if (!doc) { return (typeof callback === 'function' ? callback() : undefined); } - # The document is already saved. - return callback?() if doc.committedVersion is doc.v + // The document is already saved. + if (doc.committedVersion === doc.v) { return (typeof callback === 'function' ? callback() : undefined); } - return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock + if (doc.snapshotWriteLock) { return (typeof callback === 'function' ? callback('Another snapshot write is in progress') : undefined); } - doc.snapshotWriteLock = true + doc.snapshotWriteLock = true; - options.stats?.writeSnapshot?() + __guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot()); - writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback() + const writeSnapshot = (db != null ? db.writeSnapshot : undefined) || ((docName, docData, dbMeta, callback) => callback()); - data = - v: doc.v - meta: doc.meta - snapshot: doc.snapshot - # The database doesn't know about object types. + const data = { + v: doc.v, + meta: doc.meta, + snapshot: doc.snapshot, + // The database doesn't know about object types. type: doc.type.name + }; - # Commit snapshot. - writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) -> - doc.snapshotWriteLock = false + // Commit snapshot. + return writeSnapshot(docName, data, doc.dbMeta, function(error, dbMeta) { + doc.snapshotWriteLock = false; - # We have to use data.v here because the version in the doc could - # have been updated between the call to writeSnapshot() and now. - doc.committedVersion = data.v - doc.dbMeta = dbMeta + // We have to use data.v here because the version in the doc could + // have been updated between the call to writeSnapshot() and now. + doc.committedVersion = data.v; + doc.dbMeta = dbMeta; - callback? error + return (typeof callback === 'function' ? callback(error) : undefined); + }); + }; - # *** Model interface methods + // *** Model interface methods - # Create a new document. - # - # data should be {snapshot, type, [meta]}. The version of a new document is 0. - @create = (docName, type, meta, callback) -> - [meta, callback] = [{}, meta] if typeof meta is 'function' + // Create a new document. + // + // data should be {snapshot, type, [meta]}. The version of a new document is 0. + this.create = function(docName, type, meta, callback) { + if (typeof meta === 'function') { [meta, callback] = Array.from([{}, meta]); } - return callback? 'Invalid document name' if docName.match /\// - return callback? 'Document already exists' if docs[docName] + if (docName.match(/\//)) { return (typeof callback === 'function' ? callback('Invalid document name') : undefined); } + if (docs[docName]) { return (typeof callback === 'function' ? callback('Document already exists') : undefined); } - type = types[type] if typeof type == 'string' - return callback? 'Type not found' unless type + if (typeof type === 'string') { type = types[type]; } + if (!type) { return (typeof callback === 'function' ? callback('Type not found') : undefined); } - data = - snapshot:type.create() - type:type.name - meta:meta or {} + const data = { + snapshot:type.create(), + type:type.name, + meta:meta || {}, v:0 + }; - done = (error, dbMeta) -> - # dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. - return callback? error if error + const done = function(error, dbMeta) { + // dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - # From here on we'll store the object version of the type name. - data.type = type - add docName, null, data, 0, [], dbMeta - model.emit 'create', docName, data - callback?() + // From here on we'll store the object version of the type name. + data.type = type; + add(docName, null, data, 0, [], dbMeta); + model.emit('create', docName, data); + return (typeof callback === 'function' ? callback() : undefined); + }; - if db - db.create docName, data, done - else - done() + if (db) { + return db.create(docName, data, done); + } else { + return done(); + } + }; - # Perminantly deletes the specified document. - # If listeners are attached, they are removed. - # - # The callback is called with (error) if there was an error. If error is null / undefined, the - # document was deleted. - # - # WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the - # deletion. Subsequent op submissions will fail). - @delete = (docName, callback) -> - doc = docs[docName] + // Perminantly deletes the specified document. + // If listeners are attached, they are removed. + // + // The callback is called with (error) if there was an error. If error is null / undefined, the + // document was deleted. + // + // WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the + // deletion. Subsequent op submissions will fail). + this.delete = function(docName, callback) { + const doc = docs[docName]; - if doc - clearTimeout doc.reapTimer - delete docs[docName] + if (doc) { + clearTimeout(doc.reapTimer); + delete docs[docName]; + } - done = (error) -> - model.emit 'delete', docName unless error - callback? error + const done = function(error) { + if (!error) { model.emit('delete', docName); } + return (typeof callback === 'function' ? callback(error) : undefined); + }; - if db - db.delete docName, doc?.dbMeta, done - else - done (if !doc then 'Document does not exist') + if (db) { + return db.delete(docName, doc != null ? doc.dbMeta : undefined, done); + } else { + return done((!doc ? 'Document does not exist' : undefined)); + } + }; - # This gets all operations from [start...end]. (That is, its not inclusive.) - # - # end can be null. This means 'get me all ops from start'. - # - # Each op returned is in the form {op:o, meta:m, v:version}. - # - # Callback is called with (error, [ops]) - # - # If the document does not exist, getOps doesn't necessarily return an error. This is because - # its awkward to figure out whether or not the document exists for things - # like the redis database backend. I guess its a bit gross having this inconsistant - # with the other DB calls, but its certainly convenient. - # - # Use getVersion() to determine if a document actually exists, if thats what you're - # after. - @getOps = getOps = (docName, start, end, callback) -> - # getOps will only use the op cache if its there. It won't fill the op cache in. - throw new Error 'start must be 0+' unless start >= 0 + // This gets all operations from [start...end]. (That is, its not inclusive.) + // + // end can be null. This means 'get me all ops from start'. + // + // Each op returned is in the form {op:o, meta:m, v:version}. + // + // Callback is called with (error, [ops]) + // + // If the document does not exist, getOps doesn't necessarily return an error. This is because + // its awkward to figure out whether or not the document exists for things + // like the redis database backend. I guess its a bit gross having this inconsistant + // with the other DB calls, but its certainly convenient. + // + // Use getVersion() to determine if a document actually exists, if thats what you're + // after. + this.getOps = (getOps = function(docName, start, end, callback) { + // getOps will only use the op cache if its there. It won't fill the op cache in. + if (!(start >= 0)) { throw new Error('start must be 0+'); } - [end, callback] = [null, end] if typeof end is 'function' + if (typeof end === 'function') { [end, callback] = Array.from([null, end]); } - ops = docs[docName]?.ops + const ops = docs[docName] != null ? docs[docName].ops : undefined; - if ops - version = docs[docName].v + if (ops) { + const version = docs[docName].v; - # Ops contains an array of ops. The last op in the list is the last op applied - end ?= version - start = Math.min start, end + // Ops contains an array of ops. The last op in the list is the last op applied + if (end == null) { end = version; } + start = Math.min(start, end); - return callback null, [] if start == end + if (start === end) { return callback(null, []); } - # Base is the version number of the oldest op we have cached - base = version - ops.length + // Base is the version number of the oldest op we have cached + const base = version - ops.length; - # If the database is null, we'll trim to the ops we do have and hope thats enough. - if start >= base or db is null - refreshReapingTimeout docName - options.stats?.cacheHit 'getOps' + // If the database is null, we'll trim to the ops we do have and hope thats enough. + if ((start >= base) || (db === null)) { + refreshReapingTimeout(docName); + if (options.stats != null) { + options.stats.cacheHit('getOps'); + } - return callback null, ops[(start - base)...(end - base)] + return callback(null, ops.slice((start - base), (end - base))); + } + } - options.stats?.cacheMiss 'getOps' + if (options.stats != null) { + options.stats.cacheMiss('getOps'); + } - getOpsInternal docName, start, end, callback + return getOpsInternal(docName, start, end, callback); + }); - # Gets the snapshot data for the specified document. - # getSnapshot(docName, callback) - # Callback is called with (error, {v: , type: , snapshot: , meta: }) - @getSnapshot = (docName, callback) -> - load docName, (error, doc) -> - callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} + // Gets the snapshot data for the specified document. + // getSnapshot(docName, callback) + // Callback is called with (error, {v: , type: , snapshot: , meta: }) + this.getSnapshot = (docName, callback) => load(docName, (error, doc) => callback(error, doc ? {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} : undefined)); - # Gets the latest version # of the document. - # getVersion(docName, callback) - # callback is called with (error, version). - @getVersion = (docName, callback) -> - load docName, (error, doc) -> callback error, doc?.v + // Gets the latest version # of the document. + // getVersion(docName, callback) + // callback is called with (error, version). + this.getVersion = (docName, callback) => load(docName, (error, doc) => callback(error, doc != null ? doc.v : undefined)); - # Apply an op to the specified document. - # The callback is passed (error, applied version #) - # opData = {op:op, v:v, meta:metadata} - # - # Ops are queued before being applied so that the following code applies op C before op B: - # model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB - # model.applyOp 'doc', OPC - @applyOp = (docName, opData, callback) -> - # All the logic for this is in makeOpQueue, above. - load docName, (error, doc) -> - return callback error if error + // Apply an op to the specified document. + // The callback is passed (error, applied version #) + // opData = {op:op, v:v, meta:metadata} + // + // Ops are queued before being applied so that the following code applies op C before op B: + // model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB + // model.applyOp 'doc', OPC + this.applyOp = (docName, opData, callback) => // All the logic for this is in makeOpQueue, above. + load(docName, function(error, doc) { + if (error) { return callback(error); } - process.nextTick -> doc.opQueue opData, (error, newVersion) -> - refreshReapingTimeout docName - callback? error, newVersion + return process.nextTick(() => doc.opQueue(opData, function(error, newVersion) { + refreshReapingTimeout(docName); + return (typeof callback === 'function' ? callback(error, newVersion) : undefined); + })); + }); - # TODO: store (some) metadata in DB - # TODO: op and meta should be combineable in the op that gets sent - @applyMetaOp = (docName, metaOpData, callback) -> - {path, value} = metaOpData.meta + // TODO: store (some) metadata in DB + // TODO: op and meta should be combineable in the op that gets sent + this.applyMetaOp = function(docName, metaOpData, callback) { + const {path, value} = metaOpData.meta; - return callback? "path should be an array" unless isArray path + if (!isArray(path)) { return (typeof callback === 'function' ? callback("path should be an array") : undefined); } - load docName, (error, doc) -> - if error? - callback? error - else - applied = false - switch path[0] - when 'shout' - doc.eventEmitter.emit 'op', metaOpData - applied = true + return load(docName, function(error, doc) { + if (error != null) { + return (typeof callback === 'function' ? callback(error) : undefined); + } else { + let applied = false; + switch (path[0]) { + case 'shout': + doc.eventEmitter.emit('op', metaOpData); + applied = true; + break; + } - model.emit 'applyMetaOp', docName, path, value if applied - callback? null, doc.v + if (applied) { model.emit('applyMetaOp', docName, path, value); } + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Listen to all ops from the specified version. If version is in the past, all - # ops since that version are sent immediately to the listener. - # - # The callback is called once the listener is attached, but before any ops have been passed - # to the listener. - # - # This will _not_ edit the document metadata. - # - # If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour - # might change in a future version. - # - # version is the document version at which the document is opened. It can be left out if you want to open - # the document at the most recent version. - # - # listener is called with (opData) each time an op is applied. - # - # callback(error, openedVersion) - @listen = (docName, version, listener, callback) -> - [version, listener, callback] = [null, version, listener] if typeof version is 'function' + // Listen to all ops from the specified version. If version is in the past, all + // ops since that version are sent immediately to the listener. + // + // The callback is called once the listener is attached, but before any ops have been passed + // to the listener. + // + // This will _not_ edit the document metadata. + // + // If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour + // might change in a future version. + // + // version is the document version at which the document is opened. It can be left out if you want to open + // the document at the most recent version. + // + // listener is called with (opData) each time an op is applied. + // + // callback(error, openedVersion) + this.listen = function(docName, version, listener, callback) { + if (typeof version === 'function') { [version, listener, callback] = Array.from([null, version, listener]); } - load docName, (error, doc) -> - return callback? error if error + return load(docName, function(error, doc) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - clearTimeout doc.reapTimer + clearTimeout(doc.reapTimer); - if version? - getOps docName, version, null, (error, data) -> - return callback? error if error + if (version != null) { + return getOps(docName, version, null, function(error, data) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - doc.eventEmitter.on 'op', listener - callback? null, version - for op in data - listener op + doc.eventEmitter.on('op', listener); + if (typeof callback === 'function') { + callback(null, version); + } + return (() => { + const result = []; + for (let op of Array.from(data)) { + var needle; + listener(op); - # The listener may well remove itself during the catchup phase. If this happens, break early. - # This is done in a quite inefficient way. (O(n) where n = #listeners on doc) - break unless listener in doc.eventEmitter.listeners 'op' + // The listener may well remove itself during the catchup phase. If this happens, break early. + // This is done in a quite inefficient way. (O(n) where n = #listeners on doc) + if ((needle = listener, !Array.from(doc.eventEmitter.listeners('op')).includes(needle))) { break; } else { + result.push(undefined); + } + } + return result; + })(); + }); - else # Version is null / undefined. Just add the listener. - doc.eventEmitter.on 'op', listener - callback? null, doc.v + } else { // Version is null / undefined. Just add the listener. + doc.eventEmitter.on('op', listener); + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Remove a listener for a particular document. - # - # removeListener(docName, listener) - # - # This is synchronous. - @removeListener = (docName, listener) -> - # The document should already be loaded. - doc = docs[docName] - throw new Error 'removeListener called but document not loaded' unless doc + // Remove a listener for a particular document. + // + // removeListener(docName, listener) + // + // This is synchronous. + this.removeListener = function(docName, listener) { + // The document should already be loaded. + const doc = docs[docName]; + if (!doc) { throw new Error('removeListener called but document not loaded'); } - doc.eventEmitter.removeListener 'op', listener - refreshReapingTimeout docName + doc.eventEmitter.removeListener('op', listener); + return refreshReapingTimeout(docName); + }; - # Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - - # sharejs will happily replay uncommitted ops when documents are re-opened anyway. - @flush = (callback) -> - return callback?() unless db + // Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - + // sharejs will happily replay uncommitted ops when documents are re-opened anyway. + this.flush = function(callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - pendingWrites = 0 + let pendingWrites = 0; - for docName, doc of docs - if doc.committedVersion < doc.v - pendingWrites++ - # I'm hoping writeSnapshot will always happen in another thread. - tryWriteSnapshot docName, -> - process.nextTick -> - pendingWrites-- - callback?() if pendingWrites is 0 + for (let docName in docs) { + const doc = docs[docName]; + if (doc.committedVersion < doc.v) { + pendingWrites++; + // I'm hoping writeSnapshot will always happen in another thread. + tryWriteSnapshot(docName, () => process.nextTick(function() { + pendingWrites--; + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + })); + } + } - # If nothing was queued, terminate immediately. - callback?() if pendingWrites is 0 + // If nothing was queued, terminate immediately. + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + }; - # Close the database connection. This is needed so nodejs can shut down cleanly. - @closeDb = -> - db?.close?() - db = null + // Close the database connection. This is needed so nodejs can shut down cleanly. + this.closeDb = function() { + __guardMethod__(db, 'close', o => o.close()); + return db = null; + }; - return +}); -# Model inherits from EventEmitter. -Model:: = new EventEmitter +// Model inherits from EventEmitter. +Model.prototype = new EventEmitter; + +function __guardMethod__(obj, methodName, transform) { + if (typeof obj !== 'undefined' && obj !== null && typeof obj[methodName] === 'function') { + return transform(obj, methodName); + } else { + return undefined; + } +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/sharejs/server/model.js b/services/document-updater/app/coffee/sharejs/server/model.js index 0e699cce92..42dd7acc64 100644 --- a/services/document-updater/app/coffee/sharejs/server/model.js +++ b/services/document-updater/app/coffee/sharejs/server/model.js @@ -1,606 +1,703 @@ -# The model of all the ops. Responsible for applying & transforming remote deltas -# and managing the storage layer. -# -# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS104: Avoid inline assignments + * DS204: Change includes calls to have a more natural evaluation order + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// The model of all the ops. Responsible for applying & transforming remote deltas +// and managing the storage layer. +// +// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache -{EventEmitter} = require 'events' +let Model; +const {EventEmitter} = require('events'); -queue = require './syncqueue' -types = require '../types' +const queue = require('./syncqueue'); +const types = require('../types'); -isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]' +const isArray = o => Object.prototype.toString.call(o) === '[object Array]'; -# This constructor creates a new Model object. There will be one model object -# per server context. -# -# The model object is responsible for a lot of things: -# -# - It manages the interactions with the database -# - It maintains (in memory) a set of all active documents -# - It calls out to the OT functions when necessary -# -# The model is an event emitter. It emits the following events: -# -# create(docName, data): A document has been created with the specified name & data -module.exports = Model = (db, options) -> - # db can be null if the user doesn't want persistance. +// This constructor creates a new Model object. There will be one model object +// per server context. +// +// The model object is responsible for a lot of things: +// +// - It manages the interactions with the database +// - It maintains (in memory) a set of all active documents +// - It calls out to the OT functions when necessary +// +// The model is an event emitter. It emits the following events: +// +// create(docName, data): A document has been created with the specified name & data +module.exports = (Model = function(db, options) { + // db can be null if the user doesn't want persistance. - return new Model(db, options) if !(this instanceof Model) + let getOps; + if (!(this instanceof Model)) { return new Model(db, options); } - model = this + const model = this; - options ?= {} + if (options == null) { options = {}; } - # This is a cache of 'live' documents. - # - # The cache is a map from docName -> { - # ops:[{op, meta}] - # snapshot - # type - # v - # meta - # eventEmitter - # reapTimer - # committedVersion: v - # snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant - # dbMeta: database specific data - # opQueue: syncQueue for processing ops - # } - # - # The ops list contains the document's last options.numCachedOps ops. (Or all - # of them if we're using a memory store). - # - # Documents are stored in this set so long as the document has been accessed in - # the last few seconds (options.reapTime) OR at least one client has the document - # open. I don't know if I should keep open (but not being edited) documents live - - # maybe if a client has a document open but the document isn't being edited, I should - # flush it from the cache. - # - # In any case, the API to model is designed such that if we want to change that later - # it should be pretty easy to do so without any external-to-the-model code changes. - docs = {} + // This is a cache of 'live' documents. + // + // The cache is a map from docName -> { + // ops:[{op, meta}] + // snapshot + // type + // v + // meta + // eventEmitter + // reapTimer + // committedVersion: v + // snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant + // dbMeta: database specific data + // opQueue: syncQueue for processing ops + // } + // + // The ops list contains the document's last options.numCachedOps ops. (Or all + // of them if we're using a memory store). + // + // Documents are stored in this set so long as the document has been accessed in + // the last few seconds (options.reapTime) OR at least one client has the document + // open. I don't know if I should keep open (but not being edited) documents live - + // maybe if a client has a document open but the document isn't being edited, I should + // flush it from the cache. + // + // In any case, the API to model is designed such that if we want to change that later + // it should be pretty easy to do so without any external-to-the-model code changes. + const docs = {}; - # This is a map from docName -> [callback]. It is used when a document hasn't been - # cached and multiple getSnapshot() / getVersion() requests come in. All requests - # are added to the callback list and called when db.getSnapshot() returns. - # - # callback(error, snapshot data) - awaitingGetSnapshot = {} + // This is a map from docName -> [callback]. It is used when a document hasn't been + // cached and multiple getSnapshot() / getVersion() requests come in. All requests + // are added to the callback list and called when db.getSnapshot() returns. + // + // callback(error, snapshot data) + const awaitingGetSnapshot = {}; - # The time that documents which no clients have open will stay in the cache. - # Should be > 0. - options.reapTime ?= 3000 + // The time that documents which no clients have open will stay in the cache. + // Should be > 0. + if (options.reapTime == null) { options.reapTime = 3000; } - # The number of operations the cache holds before reusing the space - options.numCachedOps ?= 10 + // The number of operations the cache holds before reusing the space + if (options.numCachedOps == null) { options.numCachedOps = 10; } - # This option forces documents to be reaped, even when there's no database backend. - # This is useful when you don't care about persistance and don't want to gradually - # fill memory. - # - # You might want to set reapTime to a day or something. - options.forceReaping ?= false + // This option forces documents to be reaped, even when there's no database backend. + // This is useful when you don't care about persistance and don't want to gradually + // fill memory. + // + // You might want to set reapTime to a day or something. + if (options.forceReaping == null) { options.forceReaping = false; } - # Until I come up with a better strategy, we'll save a copy of the document snapshot - # to the database every ~20 submitted ops. - options.opsBeforeCommit ?= 20 + // Until I come up with a better strategy, we'll save a copy of the document snapshot + // to the database every ~20 submitted ops. + if (options.opsBeforeCommit == null) { options.opsBeforeCommit = 20; } - # It takes some processing time to transform client ops. The server will punt ops back to the - # client to transform if they're too old. - options.maximumAge ?= 40 + // It takes some processing time to transform client ops. The server will punt ops back to the + // client to transform if they're too old. + if (options.maximumAge == null) { options.maximumAge = 40; } - # **** Cache API methods + // **** Cache API methods - # Its important that all ops are applied in order. This helper method creates the op submission queue - # for a single document. This contains the logic for transforming & applying ops. - makeOpQueue = (docName, doc) -> queue (opData, callback) -> - return callback 'Version missing' unless opData.v >= 0 - return callback 'Op at future version' if opData.v > doc.v + // Its important that all ops are applied in order. This helper method creates the op submission queue + // for a single document. This contains the logic for transforming & applying ops. + const makeOpQueue = (docName, doc) => queue(function(opData, callback) { + if (!(opData.v >= 0)) { return callback('Version missing'); } + if (opData.v > doc.v) { return callback('Op at future version'); } - # Punt the transforming work back to the client if the op is too old. - return callback 'Op too old' if opData.v + options.maximumAge < doc.v + // Punt the transforming work back to the client if the op is too old. + if ((opData.v + options.maximumAge) < doc.v) { return callback('Op too old'); } - opData.meta ||= {} - opData.meta.ts = Date.now() + if (!opData.meta) { opData.meta = {}; } + opData.meta.ts = Date.now(); - # We'll need to transform the op to the current version of the document. This - # calls the callback immediately if opVersion == doc.v. - getOps docName, opData.v, doc.v, (error, ops) -> - return callback error if error + // We'll need to transform the op to the current version of the document. This + // calls the callback immediately if opVersion == doc.v. + return getOps(docName, opData.v, doc.v, function(error, ops) { + let snapshot; + if (error) { return callback(error); } - unless doc.v - opData.v == ops.length - # This should never happen. It indicates that we didn't get all the ops we - # asked for. Its important that the submitted op is correctly transformed. - console.error "Could not get old ops in model for document #{docName}" - console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops" - return callback 'Internal error' + if ((doc.v - opData.v) !== ops.length) { + // This should never happen. It indicates that we didn't get all the ops we + // asked for. Its important that the submitted op is correctly transformed. + console.error(`Could not get old ops in model for document ${docName}`); + console.error(`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`); + return callback('Internal error'); + } - if ops.length > 0 - try - # If there's enough ops, it might be worth spinning this out into a webworker thread. - for oldOp in ops - # Dup detection works by sending the id(s) the op has been submitted with previously. - # If the id matches, we reject it. The client can also detect the op has been submitted - # already if it sees its own previous id in the ops it sees when it does catchup. - if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource - return callback 'Op already submitted' + if (ops.length > 0) { + try { + // If there's enough ops, it might be worth spinning this out into a webworker thread. + for (let oldOp of Array.from(ops)) { + // Dup detection works by sending the id(s) the op has been submitted with previously. + // If the id matches, we reject it. The client can also detect the op has been submitted + // already if it sees its own previous id in the ops it sees when it does catchup. + if (oldOp.meta.source && opData.dupIfSource && Array.from(opData.dupIfSource).includes(oldOp.meta.source)) { + return callback('Op already submitted'); + } - opData.op = doc.type.transform opData.op, oldOp.op, 'left' - opData.v++ - catch error - console.error error.stack - return callback error.message + opData.op = doc.type.transform(opData.op, oldOp.op, 'left'); + opData.v++; + } + } catch (error1) { + error = error1; + console.error(error.stack); + return callback(error.message); + } + } - try - snapshot = doc.type.apply doc.snapshot, opData.op - catch error - console.error error.stack - return callback error.message + try { + snapshot = doc.type.apply(doc.snapshot, opData.op); + } catch (error2) { + error = error2; + console.error(error.stack); + return callback(error.message); + } - if options.maxDocLength? and doc.snapshot.length > options.maxDocLength - return callback "Update takes doc over max doc size" + if ((options.maxDocLength != null) && (doc.snapshot.length > options.maxDocLength)) { + return callback("Update takes doc over max doc size"); + } - # The op data should be at the current version, and the new document data should be at - # the next version. - # - # This should never happen in practice, but its a nice little check to make sure everything - # is hunky-dory. - unless opData.v == doc.v - # This should never happen. - console.error "Version mismatch detected in model. File a ticket - this is a bug." - console.error "Expecting #{opData.v} == #{doc.v}" - return callback 'Internal error' + // The op data should be at the current version, and the new document data should be at + // the next version. + // + // This should never happen in practice, but its a nice little check to make sure everything + // is hunky-dory. + if (opData.v !== doc.v) { + // This should never happen. + console.error("Version mismatch detected in model. File a ticket - this is a bug."); + console.error(`Expecting ${opData.v} == ${doc.v}`); + return callback('Internal error'); + } - #newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} - writeOp = db?.writeOp or (docName, newOpData, callback) -> callback() + //newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} + const writeOp = (db != null ? db.writeOp : undefined) || ((docName, newOpData, callback) => callback()); - writeOp docName, opData, (error) -> - if error - # The user should probably know about this. - console.warn "Error writing ops to database: #{error}" - return callback error + return writeOp(docName, opData, function(error) { + if (error) { + // The user should probably know about this. + console.warn(`Error writing ops to database: ${error}`); + return callback(error); + } - options.stats?.writeOp?() + __guardMethod__(options.stats, 'writeOp', o => o.writeOp()); - # This is needed when we emit the 'change' event, below. - oldSnapshot = doc.snapshot + // This is needed when we emit the 'change' event, below. + const oldSnapshot = doc.snapshot; - # All the heavy lifting is now done. Finally, we'll update the cache with the new data - # and (maybe!) save a new document snapshot to the database. + // All the heavy lifting is now done. Finally, we'll update the cache with the new data + // and (maybe!) save a new document snapshot to the database. - doc.v = opData.v + 1 - doc.snapshot = snapshot + doc.v = opData.v + 1; + doc.snapshot = snapshot; - doc.ops.push opData - doc.ops.shift() if db and doc.ops.length > options.numCachedOps + doc.ops.push(opData); + if (db && (doc.ops.length > options.numCachedOps)) { doc.ops.shift(); } - model.emit 'applyOp', docName, opData, snapshot, oldSnapshot - doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot + model.emit('applyOp', docName, opData, snapshot, oldSnapshot); + doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot); - # The callback is called with the version of the document at which the op was applied. - # This is the op.v after transformation, and its doc.v - 1. - callback null, opData.v + // The callback is called with the version of the document at which the op was applied. + // This is the op.v after transformation, and its doc.v - 1. + callback(null, opData.v); - # I need a decent strategy here for deciding whether or not to save the snapshot. - # - # The 'right' strategy looks something like "Store the snapshot whenever the snapshot - # is smaller than the accumulated op data". For now, I'll just store it every 20 - # ops or something. (Configurable with doc.committedVersion) - if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v - tryWriteSnapshot docName, (error) -> - console.warn "Error writing snapshot #{error}. This is nonfatal" if error + // I need a decent strategy here for deciding whether or not to save the snapshot. + // + // The 'right' strategy looks something like "Store the snapshot whenever the snapshot + // is smaller than the accumulated op data". For now, I'll just store it every 20 + // ops or something. (Configurable with doc.committedVersion) + if (!doc.snapshotWriteLock && ((doc.committedVersion + options.opsBeforeCommit) <= doc.v)) { + return tryWriteSnapshot(docName, function(error) { + if (error) { return console.warn(`Error writing snapshot ${error}. This is nonfatal`); } + }); + } + }); + }); + }); - # Add the data for the given docName to the cache. The named document shouldn't already - # exist in the doc set. - # - # Returns the new doc. - add = (docName, error, data, committedVersion, ops, dbMeta) -> - callbacks = awaitingGetSnapshot[docName] - delete awaitingGetSnapshot[docName] + // Add the data for the given docName to the cache. The named document shouldn't already + // exist in the doc set. + // + // Returns the new doc. + const add = function(docName, error, data, committedVersion, ops, dbMeta) { + let callback, doc; + const callbacks = awaitingGetSnapshot[docName]; + delete awaitingGetSnapshot[docName]; - if error - callback error for callback in callbacks if callbacks - else - doc = docs[docName] = - snapshot: data.snapshot - v: data.v - type: data.type - meta: data.meta + if (error) { + if (callbacks) { for (callback of Array.from(callbacks)) { callback(error); } } + } else { + doc = (docs[docName] = { + snapshot: data.snapshot, + v: data.v, + type: data.type, + meta: data.meta, - # Cache of ops - ops: ops or [] + // Cache of ops + ops: ops || [], - eventEmitter: new EventEmitter + eventEmitter: new EventEmitter, - # Timer before the document will be invalidated from the cache (if the document has no - # listeners) - reapTimer: null + // Timer before the document will be invalidated from the cache (if the document has no + // listeners) + reapTimer: null, - # Version of the snapshot thats in the database - committedVersion: committedVersion ? data.v - snapshotWriteLock: false - dbMeta: dbMeta + // Version of the snapshot thats in the database + committedVersion: committedVersion != null ? committedVersion : data.v, + snapshotWriteLock: false, + dbMeta + }); - doc.opQueue = makeOpQueue docName, doc + doc.opQueue = makeOpQueue(docName, doc); - refreshReapingTimeout docName - model.emit 'add', docName, data - callback null, doc for callback in callbacks if callbacks + refreshReapingTimeout(docName); + model.emit('add', docName, data); + if (callbacks) { for (callback of Array.from(callbacks)) { callback(null, doc); } } + } - doc + return doc; + }; - # This is a little helper wrapper around db.getOps. It does two things: - # - # - If there's no database set, it returns an error to the callback - # - It adds version numbers to each op returned from the database - # (These can be inferred from context so the DB doesn't store them, but its useful to have them). - getOpsInternal = (docName, start, end, callback) -> - return callback? 'Document does not exist' unless db + // This is a little helper wrapper around db.getOps. It does two things: + // + // - If there's no database set, it returns an error to the callback + // - It adds version numbers to each op returned from the database + // (These can be inferred from context so the DB doesn't store them, but its useful to have them). + const getOpsInternal = function(docName, start, end, callback) { + if (!db) { return (typeof callback === 'function' ? callback('Document does not exist') : undefined); } - db.getOps docName, start, end, (error, ops) -> - return callback? error if error + return db.getOps(docName, start, end, function(error, ops) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - v = start - op.v = v++ for op in ops + let v = start; + for (let op of Array.from(ops)) { op.v = v++; } - callback? null, ops + return (typeof callback === 'function' ? callback(null, ops) : undefined); + }); + }; - # Load the named document into the cache. This function is re-entrant. - # - # The callback is called with (error, doc) - load = (docName, callback) -> - if docs[docName] - # The document is already loaded. Return immediately. - options.stats?.cacheHit? 'getSnapshot' - return callback null, docs[docName] + // Load the named document into the cache. This function is re-entrant. + // + // The callback is called with (error, doc) + const load = function(docName, callback) { + if (docs[docName]) { + // The document is already loaded. Return immediately. + __guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot')); + return callback(null, docs[docName]); + } - # We're a memory store. If we don't have it, nobody does. - return callback 'Document does not exist' unless db + // We're a memory store. If we don't have it, nobody does. + if (!db) { return callback('Document does not exist'); } - callbacks = awaitingGetSnapshot[docName] + const callbacks = awaitingGetSnapshot[docName]; - # The document is being loaded already. Add ourselves as a callback. - return callbacks.push callback if callbacks + // The document is being loaded already. Add ourselves as a callback. + if (callbacks) { return callbacks.push(callback); } - options.stats?.cacheMiss? 'getSnapshot' + __guardMethod__(options.stats, 'cacheMiss', o1 => o1.cacheMiss('getSnapshot')); - # The document isn't loaded and isn't being loaded. Load it. - awaitingGetSnapshot[docName] = [callback] - db.getSnapshot docName, (error, data, dbMeta) -> - return add docName, error if error + // The document isn't loaded and isn't being loaded. Load it. + awaitingGetSnapshot[docName] = [callback]; + return db.getSnapshot(docName, function(error, data, dbMeta) { + if (error) { return add(docName, error); } - type = types[data.type] - unless type - console.warn "Type '#{data.type}' missing" - return callback "Type not found" - data.type = type + const type = types[data.type]; + if (!type) { + console.warn(`Type '${data.type}' missing`); + return callback("Type not found"); + } + data.type = type; - committedVersion = data.v + const committedVersion = data.v; - # The server can close without saving the most recent document snapshot. - # In this case, there are extra ops which need to be applied before - # returning the snapshot. - getOpsInternal docName, data.v, null, (error, ops) -> - return callback error if error + // The server can close without saving the most recent document snapshot. + // In this case, there are extra ops which need to be applied before + // returning the snapshot. + return getOpsInternal(docName, data.v, null, function(error, ops) { + if (error) { return callback(error); } - if ops.length > 0 - console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}" + if (ops.length > 0) { + console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`); - try - for op in ops - data.snapshot = type.apply data.snapshot, op.op - data.v++ - catch e - # This should never happen - it indicates that whats in the - # database is invalid. - console.error "Op data invalid for #{docName}: #{e.stack}" - return callback 'Op data invalid' + try { + for (let op of Array.from(ops)) { + data.snapshot = type.apply(data.snapshot, op.op); + data.v++; + } + } catch (e) { + // This should never happen - it indicates that whats in the + // database is invalid. + console.error(`Op data invalid for ${docName}: ${e.stack}`); + return callback('Op data invalid'); + } + } - model.emit 'load', docName, data - add docName, error, data, committedVersion, ops, dbMeta + model.emit('load', docName, data); + return add(docName, error, data, committedVersion, ops, dbMeta); + }); + }); + }; - # This makes sure the cache contains a document. If the doc cache doesn't contain - # a document, it is loaded from the database and stored. - # - # Documents are stored so long as either: - # - They have been accessed within the past #{PERIOD} - # - At least one client has the document open - refreshReapingTimeout = (docName) -> - doc = docs[docName] - return unless doc + // This makes sure the cache contains a document. If the doc cache doesn't contain + // a document, it is loaded from the database and stored. + // + // Documents are stored so long as either: + // - They have been accessed within the past #{PERIOD} + // - At least one client has the document open + var refreshReapingTimeout = function(docName) { + const doc = docs[docName]; + if (!doc) { return; } - # I want to let the clients list be updated before this is called. - process.nextTick -> - # This is an awkward way to find out the number of clients on a document. If this - # causes performance issues, add a numClients field to the document. - # - # The first check is because its possible that between refreshReapingTimeout being called and this - # event being fired, someone called delete() on the document and hence the doc is something else now. - if doc == docs[docName] and - doc.eventEmitter.listeners('op').length == 0 and - (db or options.forceReaping) and - doc.opQueue.busy is false + // I want to let the clients list be updated before this is called. + return process.nextTick(function() { + // This is an awkward way to find out the number of clients on a document. If this + // causes performance issues, add a numClients field to the document. + // + // The first check is because its possible that between refreshReapingTimeout being called and this + // event being fired, someone called delete() on the document and hence the doc is something else now. + if ((doc === docs[docName]) && + (doc.eventEmitter.listeners('op').length === 0) && + (db || options.forceReaping) && + (doc.opQueue.busy === false)) { - clearTimeout doc.reapTimer - doc.reapTimer = reapTimer = setTimeout -> - tryWriteSnapshot docName, -> - # If the reaping timeout has been refreshed while we're writing the snapshot, or if we're - # in the middle of applying an operation, don't reap. - delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false - , options.reapTime + let reapTimer; + clearTimeout(doc.reapTimer); + return doc.reapTimer = (reapTimer = setTimeout(() => tryWriteSnapshot(docName, function() { + // If the reaping timeout has been refreshed while we're writing the snapshot, or if we're + // in the middle of applying an operation, don't reap. + if ((docs[docName].reapTimer === reapTimer) && (doc.opQueue.busy === false)) { return delete docs[docName]; } + }) + , options.reapTime)); + } + }); + }; - tryWriteSnapshot = (docName, callback) -> - return callback?() unless db + var tryWriteSnapshot = function(docName, callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - doc = docs[docName] + const doc = docs[docName]; - # The doc is closed - return callback?() unless doc + // The doc is closed + if (!doc) { return (typeof callback === 'function' ? callback() : undefined); } - # The document is already saved. - return callback?() if doc.committedVersion is doc.v + // The document is already saved. + if (doc.committedVersion === doc.v) { return (typeof callback === 'function' ? callback() : undefined); } - return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock + if (doc.snapshotWriteLock) { return (typeof callback === 'function' ? callback('Another snapshot write is in progress') : undefined); } - doc.snapshotWriteLock = true + doc.snapshotWriteLock = true; - options.stats?.writeSnapshot?() + __guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot()); - writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback() + const writeSnapshot = (db != null ? db.writeSnapshot : undefined) || ((docName, docData, dbMeta, callback) => callback()); - data = - v: doc.v - meta: doc.meta - snapshot: doc.snapshot - # The database doesn't know about object types. + const data = { + v: doc.v, + meta: doc.meta, + snapshot: doc.snapshot, + // The database doesn't know about object types. type: doc.type.name + }; - # Commit snapshot. - writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) -> - doc.snapshotWriteLock = false + // Commit snapshot. + return writeSnapshot(docName, data, doc.dbMeta, function(error, dbMeta) { + doc.snapshotWriteLock = false; - # We have to use data.v here because the version in the doc could - # have been updated between the call to writeSnapshot() and now. - doc.committedVersion = data.v - doc.dbMeta = dbMeta + // We have to use data.v here because the version in the doc could + // have been updated between the call to writeSnapshot() and now. + doc.committedVersion = data.v; + doc.dbMeta = dbMeta; - callback? error + return (typeof callback === 'function' ? callback(error) : undefined); + }); + }; - # *** Model interface methods + // *** Model interface methods - # Create a new document. - # - # data should be {snapshot, type, [meta]}. The version of a new document is 0. - @create = (docName, type, meta, callback) -> - [meta, callback] = [{}, meta] if typeof meta is 'function' + // Create a new document. + // + // data should be {snapshot, type, [meta]}. The version of a new document is 0. + this.create = function(docName, type, meta, callback) { + if (typeof meta === 'function') { [meta, callback] = Array.from([{}, meta]); } - return callback? 'Invalid document name' if docName.match /\// - return callback? 'Document already exists' if docs[docName] + if (docName.match(/\//)) { return (typeof callback === 'function' ? callback('Invalid document name') : undefined); } + if (docs[docName]) { return (typeof callback === 'function' ? callback('Document already exists') : undefined); } - type = types[type] if typeof type == 'string' - return callback? 'Type not found' unless type + if (typeof type === 'string') { type = types[type]; } + if (!type) { return (typeof callback === 'function' ? callback('Type not found') : undefined); } - data = - snapshot:type.create() - type:type.name - meta:meta or {} + const data = { + snapshot:type.create(), + type:type.name, + meta:meta || {}, v:0 + }; - done = (error, dbMeta) -> - # dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. - return callback? error if error + const done = function(error, dbMeta) { + // dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - # From here on we'll store the object version of the type name. - data.type = type - add docName, null, data, 0, [], dbMeta - model.emit 'create', docName, data - callback?() + // From here on we'll store the object version of the type name. + data.type = type; + add(docName, null, data, 0, [], dbMeta); + model.emit('create', docName, data); + return (typeof callback === 'function' ? callback() : undefined); + }; - if db - db.create docName, data, done - else - done() + if (db) { + return db.create(docName, data, done); + } else { + return done(); + } + }; - # Perminantly deletes the specified document. - # If listeners are attached, they are removed. - # - # The callback is called with (error) if there was an error. If error is null / undefined, the - # document was deleted. - # - # WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the - # deletion. Subsequent op submissions will fail). - @delete = (docName, callback) -> - doc = docs[docName] + // Perminantly deletes the specified document. + // If listeners are attached, they are removed. + // + // The callback is called with (error) if there was an error. If error is null / undefined, the + // document was deleted. + // + // WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the + // deletion. Subsequent op submissions will fail). + this.delete = function(docName, callback) { + const doc = docs[docName]; - if doc - clearTimeout doc.reapTimer - delete docs[docName] + if (doc) { + clearTimeout(doc.reapTimer); + delete docs[docName]; + } - done = (error) -> - model.emit 'delete', docName unless error - callback? error + const done = function(error) { + if (!error) { model.emit('delete', docName); } + return (typeof callback === 'function' ? callback(error) : undefined); + }; - if db - db.delete docName, doc?.dbMeta, done - else - done (if !doc then 'Document does not exist') + if (db) { + return db.delete(docName, doc != null ? doc.dbMeta : undefined, done); + } else { + return done((!doc ? 'Document does not exist' : undefined)); + } + }; - # This gets all operations from [start...end]. (That is, its not inclusive.) - # - # end can be null. This means 'get me all ops from start'. - # - # Each op returned is in the form {op:o, meta:m, v:version}. - # - # Callback is called with (error, [ops]) - # - # If the document does not exist, getOps doesn't necessarily return an error. This is because - # its awkward to figure out whether or not the document exists for things - # like the redis database backend. I guess its a bit gross having this inconsistant - # with the other DB calls, but its certainly convenient. - # - # Use getVersion() to determine if a document actually exists, if thats what you're - # after. - @getOps = getOps = (docName, start, end, callback) -> - # getOps will only use the op cache if its there. It won't fill the op cache in. - throw new Error 'start must be 0+' unless start >= 0 + // This gets all operations from [start...end]. (That is, its not inclusive.) + // + // end can be null. This means 'get me all ops from start'. + // + // Each op returned is in the form {op:o, meta:m, v:version}. + // + // Callback is called with (error, [ops]) + // + // If the document does not exist, getOps doesn't necessarily return an error. This is because + // its awkward to figure out whether or not the document exists for things + // like the redis database backend. I guess its a bit gross having this inconsistant + // with the other DB calls, but its certainly convenient. + // + // Use getVersion() to determine if a document actually exists, if thats what you're + // after. + this.getOps = (getOps = function(docName, start, end, callback) { + // getOps will only use the op cache if its there. It won't fill the op cache in. + if (!(start >= 0)) { throw new Error('start must be 0+'); } - [end, callback] = [null, end] if typeof end is 'function' + if (typeof end === 'function') { [end, callback] = Array.from([null, end]); } - ops = docs[docName]?.ops + const ops = docs[docName] != null ? docs[docName].ops : undefined; - if ops - version = docs[docName].v + if (ops) { + const version = docs[docName].v; - # Ops contains an array of ops. The last op in the list is the last op applied - end ?= version - start = Math.min start, end + // Ops contains an array of ops. The last op in the list is the last op applied + if (end == null) { end = version; } + start = Math.min(start, end); - return callback null, [] if start == end + if (start === end) { return callback(null, []); } - # Base is the version number of the oldest op we have cached - base = version - ops.length + // Base is the version number of the oldest op we have cached + const base = version - ops.length; - # If the database is null, we'll trim to the ops we do have and hope thats enough. - if start >= base or db is null - refreshReapingTimeout docName - options.stats?.cacheHit 'getOps' + // If the database is null, we'll trim to the ops we do have and hope thats enough. + if ((start >= base) || (db === null)) { + refreshReapingTimeout(docName); + if (options.stats != null) { + options.stats.cacheHit('getOps'); + } - return callback null, ops[(start - base)...(end - base)] + return callback(null, ops.slice((start - base), (end - base))); + } + } - options.stats?.cacheMiss 'getOps' + if (options.stats != null) { + options.stats.cacheMiss('getOps'); + } - getOpsInternal docName, start, end, callback + return getOpsInternal(docName, start, end, callback); + }); - # Gets the snapshot data for the specified document. - # getSnapshot(docName, callback) - # Callback is called with (error, {v: , type: , snapshot: , meta: }) - @getSnapshot = (docName, callback) -> - load docName, (error, doc) -> - callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} + // Gets the snapshot data for the specified document. + // getSnapshot(docName, callback) + // Callback is called with (error, {v: , type: , snapshot: , meta: }) + this.getSnapshot = (docName, callback) => load(docName, (error, doc) => callback(error, doc ? {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} : undefined)); - # Gets the latest version # of the document. - # getVersion(docName, callback) - # callback is called with (error, version). - @getVersion = (docName, callback) -> - load docName, (error, doc) -> callback error, doc?.v + // Gets the latest version # of the document. + // getVersion(docName, callback) + // callback is called with (error, version). + this.getVersion = (docName, callback) => load(docName, (error, doc) => callback(error, doc != null ? doc.v : undefined)); - # Apply an op to the specified document. - # The callback is passed (error, applied version #) - # opData = {op:op, v:v, meta:metadata} - # - # Ops are queued before being applied so that the following code applies op C before op B: - # model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB - # model.applyOp 'doc', OPC - @applyOp = (docName, opData, callback) -> - # All the logic for this is in makeOpQueue, above. - load docName, (error, doc) -> - return callback error if error + // Apply an op to the specified document. + // The callback is passed (error, applied version #) + // opData = {op:op, v:v, meta:metadata} + // + // Ops are queued before being applied so that the following code applies op C before op B: + // model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB + // model.applyOp 'doc', OPC + this.applyOp = (docName, opData, callback) => // All the logic for this is in makeOpQueue, above. + load(docName, function(error, doc) { + if (error) { return callback(error); } - process.nextTick -> doc.opQueue opData, (error, newVersion) -> - refreshReapingTimeout docName - callback? error, newVersion + return process.nextTick(() => doc.opQueue(opData, function(error, newVersion) { + refreshReapingTimeout(docName); + return (typeof callback === 'function' ? callback(error, newVersion) : undefined); + })); + }); - # TODO: store (some) metadata in DB - # TODO: op and meta should be combineable in the op that gets sent - @applyMetaOp = (docName, metaOpData, callback) -> - {path, value} = metaOpData.meta + // TODO: store (some) metadata in DB + // TODO: op and meta should be combineable in the op that gets sent + this.applyMetaOp = function(docName, metaOpData, callback) { + const {path, value} = metaOpData.meta; - return callback? "path should be an array" unless isArray path + if (!isArray(path)) { return (typeof callback === 'function' ? callback("path should be an array") : undefined); } - load docName, (error, doc) -> - if error? - callback? error - else - applied = false - switch path[0] - when 'shout' - doc.eventEmitter.emit 'op', metaOpData - applied = true + return load(docName, function(error, doc) { + if (error != null) { + return (typeof callback === 'function' ? callback(error) : undefined); + } else { + let applied = false; + switch (path[0]) { + case 'shout': + doc.eventEmitter.emit('op', metaOpData); + applied = true; + break; + } - model.emit 'applyMetaOp', docName, path, value if applied - callback? null, doc.v + if (applied) { model.emit('applyMetaOp', docName, path, value); } + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Listen to all ops from the specified version. If version is in the past, all - # ops since that version are sent immediately to the listener. - # - # The callback is called once the listener is attached, but before any ops have been passed - # to the listener. - # - # This will _not_ edit the document metadata. - # - # If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour - # might change in a future version. - # - # version is the document version at which the document is opened. It can be left out if you want to open - # the document at the most recent version. - # - # listener is called with (opData) each time an op is applied. - # - # callback(error, openedVersion) - @listen = (docName, version, listener, callback) -> - [version, listener, callback] = [null, version, listener] if typeof version is 'function' + // Listen to all ops from the specified version. If version is in the past, all + // ops since that version are sent immediately to the listener. + // + // The callback is called once the listener is attached, but before any ops have been passed + // to the listener. + // + // This will _not_ edit the document metadata. + // + // If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour + // might change in a future version. + // + // version is the document version at which the document is opened. It can be left out if you want to open + // the document at the most recent version. + // + // listener is called with (opData) each time an op is applied. + // + // callback(error, openedVersion) + this.listen = function(docName, version, listener, callback) { + if (typeof version === 'function') { [version, listener, callback] = Array.from([null, version, listener]); } - load docName, (error, doc) -> - return callback? error if error + return load(docName, function(error, doc) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - clearTimeout doc.reapTimer + clearTimeout(doc.reapTimer); - if version? - getOps docName, version, null, (error, data) -> - return callback? error if error + if (version != null) { + return getOps(docName, version, null, function(error, data) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - doc.eventEmitter.on 'op', listener - callback? null, version - for op in data - listener op + doc.eventEmitter.on('op', listener); + if (typeof callback === 'function') { + callback(null, version); + } + return (() => { + const result = []; + for (let op of Array.from(data)) { + var needle; + listener(op); - # The listener may well remove itself during the catchup phase. If this happens, break early. - # This is done in a quite inefficient way. (O(n) where n = #listeners on doc) - break unless listener in doc.eventEmitter.listeners 'op' + // The listener may well remove itself during the catchup phase. If this happens, break early. + // This is done in a quite inefficient way. (O(n) where n = #listeners on doc) + if ((needle = listener, !Array.from(doc.eventEmitter.listeners('op')).includes(needle))) { break; } else { + result.push(undefined); + } + } + return result; + })(); + }); - else # Version is null / undefined. Just add the listener. - doc.eventEmitter.on 'op', listener - callback? null, doc.v + } else { // Version is null / undefined. Just add the listener. + doc.eventEmitter.on('op', listener); + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Remove a listener for a particular document. - # - # removeListener(docName, listener) - # - # This is synchronous. - @removeListener = (docName, listener) -> - # The document should already be loaded. - doc = docs[docName] - throw new Error 'removeListener called but document not loaded' unless doc + // Remove a listener for a particular document. + // + // removeListener(docName, listener) + // + // This is synchronous. + this.removeListener = function(docName, listener) { + // The document should already be loaded. + const doc = docs[docName]; + if (!doc) { throw new Error('removeListener called but document not loaded'); } - doc.eventEmitter.removeListener 'op', listener - refreshReapingTimeout docName + doc.eventEmitter.removeListener('op', listener); + return refreshReapingTimeout(docName); + }; - # Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - - # sharejs will happily replay uncommitted ops when documents are re-opened anyway. - @flush = (callback) -> - return callback?() unless db + // Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - + // sharejs will happily replay uncommitted ops when documents are re-opened anyway. + this.flush = function(callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - pendingWrites = 0 + let pendingWrites = 0; - for docName, doc of docs - if doc.committedVersion < doc.v - pendingWrites++ - # I'm hoping writeSnapshot will always happen in another thread. - tryWriteSnapshot docName, -> - process.nextTick -> - pendingWrites-- - callback?() if pendingWrites is 0 + for (let docName in docs) { + const doc = docs[docName]; + if (doc.committedVersion < doc.v) { + pendingWrites++; + // I'm hoping writeSnapshot will always happen in another thread. + tryWriteSnapshot(docName, () => process.nextTick(function() { + pendingWrites--; + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + })); + } + } - # If nothing was queued, terminate immediately. - callback?() if pendingWrites is 0 + // If nothing was queued, terminate immediately. + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + }; - # Close the database connection. This is needed so nodejs can shut down cleanly. - @closeDb = -> - db?.close?() - db = null + // Close the database connection. This is needed so nodejs can shut down cleanly. + this.closeDb = function() { + __guardMethod__(db, 'close', o => o.close()); + return db = null; + }; - return +}); -# Model inherits from EventEmitter. -Model:: = new EventEmitter +// Model inherits from EventEmitter. +Model.prototype = new EventEmitter; + +function __guardMethod__(obj, methodName, transform) { + if (typeof obj !== 'undefined' && obj !== null && typeof obj[methodName] === 'function') { + return transform(obj, methodName); + } else { + return undefined; + } +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/sharejs/server/syncqueue.js b/services/document-updater/app/coffee/sharejs/server/syncqueue.js index 746450b010..31b2235ee3 100644 --- a/services/document-updater/app/coffee/sharejs/server/syncqueue.js +++ b/services/document-updater/app/coffee/sharejs/server/syncqueue.js @@ -1,42 +1,52 @@ -# A synchronous processing queue. The queue calls process on the arguments, -# ensuring that process() is only executing once at a time. -# -# process(data, callback) _MUST_ eventually call its callback. -# -# Example: -# -# queue = require 'syncqueue' -# -# fn = queue (data, callback) -> -# asyncthing data, -> -# callback(321) -# -# fn(1) -# fn(2) -# fn(3, (result) -> console.log(result)) -# -# ^--- async thing will only be running once at any time. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A synchronous processing queue. The queue calls process on the arguments, +// ensuring that process() is only executing once at a time. +// +// process(data, callback) _MUST_ eventually call its callback. +// +// Example: +// +// queue = require 'syncqueue' +// +// fn = queue (data, callback) -> +// asyncthing data, -> +// callback(321) +// +// fn(1) +// fn(2) +// fn(3, (result) -> console.log(result)) +// +// ^--- async thing will only be running once at any time. -module.exports = (process) -> - throw new Error('process is not a function') unless typeof process == 'function' - queue = [] +module.exports = function(process) { + if (typeof process !== 'function') { throw new Error('process is not a function'); } + const queue = []; - enqueue = (data, callback) -> - queue.push [data, callback] - flush() + const enqueue = function(data, callback) { + queue.push([data, callback]); + return flush(); + }; - enqueue.busy = false + enqueue.busy = false; - flush = -> - return if enqueue.busy or queue.length == 0 + var flush = function() { + if (enqueue.busy || (queue.length === 0)) { return; } - enqueue.busy = true - [data, callback] = queue.shift() - process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow. - enqueue.busy = false - # This is called after busy = false so a user can check if enqueue.busy is set in the callback. - callback.apply null, result if callback - flush() + enqueue.busy = true; + const [data, callback] = Array.from(queue.shift()); + return process(data, function(...result) { // TODO: Make this not use varargs - varargs are really slow. + enqueue.busy = false; + // This is called after busy = false so a user can check if enqueue.busy is set in the callback. + if (callback) { callback.apply(null, result); } + return flush(); + }); + }; - enqueue + return enqueue; +}; diff --git a/services/document-updater/app/coffee/sharejs/simple.js b/services/document-updater/app/coffee/sharejs/simple.js index 996b1a5ddc..57c4934f73 100644 --- a/services/document-updater/app/coffee/sharejs/simple.js +++ b/services/document-updater/app/coffee/sharejs/simple.js @@ -1,38 +1,48 @@ -# This is a really simple OT type. Its not compiled with the web client, but it could be. -# -# Its mostly included for demonstration purposes and its used in a lot of unit tests. -# -# This defines a really simple text OT type which only allows inserts. (No deletes). -# -# Ops look like: -# {position:#, text:"asdf"} -# -# Document snapshots look like: -# {str:string} +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is a really simple OT type. Its not compiled with the web client, but it could be. +// +// Its mostly included for demonstration purposes and its used in a lot of unit tests. +// +// This defines a really simple text OT type which only allows inserts. (No deletes). +// +// Ops look like: +// {position:#, text:"asdf"} +// +// Document snapshots look like: +// {str:string} -module.exports = - # The name of the OT type. The type is stored in types[type.name]. The name can be - # used in place of the actual type in all the API methods. - name: 'simple' +module.exports = { + // The name of the OT type. The type is stored in types[type.name]. The name can be + // used in place of the actual type in all the API methods. + name: 'simple', - # Create a new document snapshot - create: -> {str:""} + // Create a new document snapshot + create() { return {str:""}; }, - # Apply the given op to the document snapshot. Returns the new snapshot. - # - # The original snapshot should not be modified. - apply: (snapshot, op) -> - throw new Error 'Invalid position' unless 0 <= op.position <= snapshot.str.length + // Apply the given op to the document snapshot. Returns the new snapshot. + // + // The original snapshot should not be modified. + apply(snapshot, op) { + if (!(0 <= op.position && op.position <= snapshot.str.length)) { throw new Error('Invalid position'); } - str = snapshot.str - str = str.slice(0, op.position) + op.text + str.slice(op.position) - {str} + let { + str + } = snapshot; + str = str.slice(0, op.position) + op.text + str.slice(op.position); + return {str}; + }, - # transform op1 by op2. Return transformed version of op1. - # sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the - # op being transformed comes from the client or the server. - transform: (op1, op2, sym) -> - pos = op1.position - pos += op2.text.length if op2.position < pos or (op2.position == pos and sym is 'left') + // transform op1 by op2. Return transformed version of op1. + // sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the + // op being transformed comes from the client or the server. + transform(op1, op2, sym) { + let pos = op1.position; + if ((op2.position < pos) || ((op2.position === pos) && (sym === 'left'))) { pos += op2.text.length; } - return {position:pos, text:op1.text} + return {position:pos, text:op1.text}; + } +}; diff --git a/services/document-updater/app/coffee/sharejs/syncqueue.js b/services/document-updater/app/coffee/sharejs/syncqueue.js index 746450b010..31b2235ee3 100644 --- a/services/document-updater/app/coffee/sharejs/syncqueue.js +++ b/services/document-updater/app/coffee/sharejs/syncqueue.js @@ -1,42 +1,52 @@ -# A synchronous processing queue. The queue calls process on the arguments, -# ensuring that process() is only executing once at a time. -# -# process(data, callback) _MUST_ eventually call its callback. -# -# Example: -# -# queue = require 'syncqueue' -# -# fn = queue (data, callback) -> -# asyncthing data, -> -# callback(321) -# -# fn(1) -# fn(2) -# fn(3, (result) -> console.log(result)) -# -# ^--- async thing will only be running once at any time. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A synchronous processing queue. The queue calls process on the arguments, +// ensuring that process() is only executing once at a time. +// +// process(data, callback) _MUST_ eventually call its callback. +// +// Example: +// +// queue = require 'syncqueue' +// +// fn = queue (data, callback) -> +// asyncthing data, -> +// callback(321) +// +// fn(1) +// fn(2) +// fn(3, (result) -> console.log(result)) +// +// ^--- async thing will only be running once at any time. -module.exports = (process) -> - throw new Error('process is not a function') unless typeof process == 'function' - queue = [] +module.exports = function(process) { + if (typeof process !== 'function') { throw new Error('process is not a function'); } + const queue = []; - enqueue = (data, callback) -> - queue.push [data, callback] - flush() + const enqueue = function(data, callback) { + queue.push([data, callback]); + return flush(); + }; - enqueue.busy = false + enqueue.busy = false; - flush = -> - return if enqueue.busy or queue.length == 0 + var flush = function() { + if (enqueue.busy || (queue.length === 0)) { return; } - enqueue.busy = true - [data, callback] = queue.shift() - process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow. - enqueue.busy = false - # This is called after busy = false so a user can check if enqueue.busy is set in the callback. - callback.apply null, result if callback - flush() + enqueue.busy = true; + const [data, callback] = Array.from(queue.shift()); + return process(data, function(...result) { // TODO: Make this not use varargs - varargs are really slow. + enqueue.busy = false; + // This is called after busy = false so a user can check if enqueue.busy is set in the callback. + if (callback) { callback.apply(null, result); } + return flush(); + }); + }; - enqueue + return enqueue; +}; diff --git a/services/document-updater/app/coffee/sharejs/text-api.js b/services/document-updater/app/coffee/sharejs/text-api.js index 96243ceffb..295261ff90 100644 --- a/services/document-updater/app/coffee/sharejs/text-api.js +++ b/services/document-updater/app/coffee/sharejs/text-api.js @@ -1,32 +1,44 @@ -# Text document API for text +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text -text = require './text' if typeof WEB is 'undefined' +let text; +if (typeof WEB === 'undefined') { text = require('./text'); } -text.api = - provides: {text:true} +text.api = { + provides: {text:true}, - # The number of characters in the string - getLength: -> @snapshot.length + // The number of characters in the string + getLength() { return this.snapshot.length; }, - # Get the text contents of a document - getText: -> @snapshot + // Get the text contents of a document + getText() { return this.snapshot; }, - insert: (pos, text, callback) -> - op = [{p:pos, i:text}] + insert(pos, text, callback) { + const op = [{p:pos, i:text}]; - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - del: (pos, length, callback) -> - op = [{p:pos, d:@snapshot[pos...(pos + length)]}] + del(pos, length, callback) { + const op = [{p:pos, d:this.snapshot.slice(pos, (pos + length))}]; - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - _register: -> - @on 'remoteop', (op) -> - for component in op - if component.i != undefined - @emit 'insert', component.p, component.i - else - @emit 'delete', component.p, component.d + _register() { + return this.on('remoteop', function(op) { + return Array.from(op).map((component) => + component.i !== undefined ? + this.emit('insert', component.p, component.i) + : + this.emit('delete', component.p, component.d)); + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/text-composable-api.js b/services/document-updater/app/coffee/sharejs/text-composable-api.js index 7b27ac163a..160ab1c46e 100644 --- a/services/document-updater/app/coffee/sharejs/text-composable-api.js +++ b/services/document-updater/app/coffee/sharejs/text-composable-api.js @@ -1,43 +1,64 @@ -# Text document API for text +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text -if WEB? - type = exports.types['text-composable'] -else - type = require './text-composable' +let type; +if (typeof WEB !== 'undefined' && WEB !== null) { + type = exports.types['text-composable']; +} else { + type = require('./text-composable'); +} -type.api = - provides: {'text':true} +type.api = { + provides: {'text':true}, - # The number of characters in the string - 'getLength': -> @snapshot.length + // The number of characters in the string + 'getLength'() { return this.snapshot.length; }, - # Get the text contents of a document - 'getText': -> @snapshot + // Get the text contents of a document + 'getText'() { return this.snapshot; }, - 'insert': (pos, text, callback) -> - op = type.normalize [pos, 'i':text, (@snapshot.length - pos)] + 'insert'(pos, text, callback) { + const op = type.normalize([pos, {'i':text}, (this.snapshot.length - pos)]); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - 'del': (pos, length, callback) -> - op = type.normalize [pos, 'd':@snapshot[pos...(pos + length)], (@snapshot.length - pos - length)] + 'del'(pos, length, callback) { + const op = type.normalize([pos, {'d':this.snapshot.slice(pos, (pos + length))}, (this.snapshot.length - pos - length)]); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - _register: -> - @on 'remoteop', (op) -> - pos = 0 - for component in op - if typeof component is 'number' - pos += component - else if component.i != undefined - @emit 'insert', pos, component.i - pos += component.i.length - else - # delete - @emit 'delete', pos, component.d - # We don't increment pos, because the position - # specified is after the delete has happened. + _register() { + return this.on('remoteop', function(op) { + let pos = 0; + return (() => { + const result = []; + for (let component of Array.from(op)) { + if (typeof component === 'number') { + result.push(pos += component); + } else if (component.i !== undefined) { + this.emit('insert', pos, component.i); + result.push(pos += component.i.length); + } else { + // delete + result.push(this.emit('delete', pos, component.d)); + } + } + return result; + })(); + }); + } +}; + // We don't increment pos, because the position + // specified is after the delete has happened. diff --git a/services/document-updater/app/coffee/sharejs/text-composable.js b/services/document-updater/app/coffee/sharejs/text-composable.js index 992b567bf0..4f43f769cd 100644 --- a/services/document-updater/app/coffee/sharejs/text-composable.js +++ b/services/document-updater/app/coffee/sharejs/text-composable.js @@ -1,261 +1,315 @@ -# An alternate composable implementation for text. This is much closer -# to the implementation used by google wave. -# -# Ops are lists of components which iterate over the whole document. -# Components are either: -# A number N: Skip N characters in the original document -# {i:'str'}: Insert 'str' at the current position in the document -# {d:'str'}: Delete 'str', which appears at the current position in the document -# -# Eg: [3, {i:'hi'}, 5, {d:'internet'}] -# -# Snapshots are strings. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// An alternate composable implementation for text. This is much closer +// to the implementation used by google wave. +// +// Ops are lists of components which iterate over the whole document. +// Components are either: +// A number N: Skip N characters in the original document +// {i:'str'}: Insert 'str' at the current position in the document +// {d:'str'}: Delete 'str', which appears at the current position in the document +// +// Eg: [3, {i:'hi'}, 5, {d:'internet'}] +// +// Snapshots are strings. -p = -> #require('util').debug -i = -> #require('util').inspect +let makeAppend; +const p = function() {}; //require('util').debug +const i = function() {}; //require('util').inspect -exports = if WEB? then {} else module.exports +const exports = (typeof WEB !== 'undefined' && WEB !== null) ? {} : module.exports; -exports.name = 'text-composable' +exports.name = 'text-composable'; -exports.create = -> '' +exports.create = () => ''; -# -------- Utility methods +// -------- Utility methods -checkOp = (op) -> - throw new Error('Op must be an array of components') unless Array.isArray(op) - last = null - for c in op - if typeof(c) == 'object' - throw new Error("Invalid op component: #{i c}") unless (c.i? && c.i.length > 0) or (c.d? && c.d.length > 0) - else - throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number' - throw new Error('Skip components must be a positive number') unless c > 0 - throw new Error('Adjacent skip components should be added') if typeof(last) == 'number' +const checkOp = function(op) { + if (!Array.isArray(op)) { throw new Error('Op must be an array of components'); } + let last = null; + return (() => { + const result = []; + for (let c of Array.from(op)) { + if (typeof(c) === 'object') { + if (((c.i == null) || !(c.i.length > 0)) && ((c.d == null) || !(c.d.length > 0))) { throw new Error(`Invalid op component: ${i(c)}`); } + } else { + if (typeof(c) !== 'number') { throw new Error('Op components must be objects or numbers'); } + if (!(c > 0)) { throw new Error('Skip components must be a positive number'); } + if (typeof(last) === 'number') { throw new Error('Adjacent skip components should be added'); } + } - last = c + result.push(last = c); + } + return result; + })(); +}; -# Makes a function for appending components to a given op. -# Exported for the randomOpGenerator. -exports._makeAppend = makeAppend = (op) -> (component) -> - if component == 0 || component.i == '' || component.d == '' - return - else if op.length == 0 - op.push component - else if typeof(component) == 'number' && typeof(op[op.length - 1]) == 'number' - op[op.length - 1] += component - else if component.i? && op[op.length - 1].i? - op[op.length - 1].i += component.i - else if component.d? && op[op.length - 1].d? - op[op.length - 1].d += component.d - else - op.push component +// Makes a function for appending components to a given op. +// Exported for the randomOpGenerator. +exports._makeAppend = (makeAppend = op => (function(component) { + if ((component === 0) || (component.i === '') || (component.d === '')) { + return; + } else if (op.length === 0) { + return op.push(component); + } else if ((typeof(component) === 'number') && (typeof(op[op.length - 1]) === 'number')) { + return op[op.length - 1] += component; + } else if ((component.i != null) && (op[op.length - 1].i != null)) { + return op[op.length - 1].i += component.i; + } else if ((component.d != null) && (op[op.length - 1].d != null)) { + return op[op.length - 1].d += component.d; + } else { + return op.push(component); + } +})); -# checkOp op +// checkOp op -# Makes 2 functions for taking components from the start of an op, and for peeking -# at the next op that could be taken. -makeTake = (op) -> - # The index of the next component to take - idx = 0 - # The offset into the component - offset = 0 +// Makes 2 functions for taking components from the start of an op, and for peeking +// at the next op that could be taken. +const makeTake = function(op) { + // The index of the next component to take + let idx = 0; + // The offset into the component + let offset = 0; - # Take up to length n from the front of op. If n is null, take the next - # op component. If indivisableField == 'd', delete components won't be separated. - # If indivisableField == 'i', insert components won't be separated. - take = (n, indivisableField) -> - return null if idx == op.length - #assert.notStrictEqual op.length, i, 'The op is too short to traverse the document' + // Take up to length n from the front of op. If n is null, take the next + // op component. If indivisableField == 'd', delete components won't be separated. + // If indivisableField == 'i', insert components won't be separated. + const take = function(n, indivisableField) { + let c; + if (idx === op.length) { return null; } + //assert.notStrictEqual op.length, i, 'The op is too short to traverse the document' - if typeof(op[idx]) == 'number' - if !n? or op[idx] - offset <= n - c = op[idx] - offset - ++idx; offset = 0 - c - else - offset += n - n - else - # Take from the string - field = if op[idx].i then 'i' else 'd' - c = {} - if !n? or op[idx][field].length - offset <= n or field == indivisableField - c[field] = op[idx][field][offset..] - ++idx; offset = 0 - else - c[field] = op[idx][field][offset...(offset + n)] - offset += n - c + if (typeof(op[idx]) === 'number') { + if ((n == null) || ((op[idx] - offset) <= n)) { + c = op[idx] - offset; + ++idx; offset = 0; + return c; + } else { + offset += n; + return n; + } + } else { + // Take from the string + const field = op[idx].i ? 'i' : 'd'; + c = {}; + if ((n == null) || ((op[idx][field].length - offset) <= n) || (field === indivisableField)) { + c[field] = op[idx][field].slice(offset); + ++idx; offset = 0; + } else { + c[field] = op[idx][field].slice(offset, (offset + n)); + offset += n; + } + return c; + } + }; - peekType = () -> - op[idx] + const peekType = () => op[idx]; - [take, peekType] + return [take, peekType]; +}; -# Find and return the length of an op component -componentLength = (component) -> - if typeof(component) == 'number' - component - else if component.i? - component.i.length - else - component.d.length +// Find and return the length of an op component +const componentLength = function(component) { + if (typeof(component) === 'number') { + return component; + } else if (component.i != null) { + return component.i.length; + } else { + return component.d.length; + } +}; -# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate -# adjacent inserts and deletes. -exports.normalize = (op) -> - newOp = [] - append = makeAppend newOp - append component for component in op - newOp +// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate +// adjacent inserts and deletes. +exports.normalize = function(op) { + const newOp = []; + const append = makeAppend(newOp); + for (let component of Array.from(op)) { append(component); } + return newOp; +}; -# Apply the op to the string. Returns the new string. -exports.apply = (str, op) -> - p "Applying #{i op} to '#{str}'" - throw new Error('Snapshot should be a string') unless typeof(str) == 'string' - checkOp op +// Apply the op to the string. Returns the new string. +exports.apply = function(str, op) { + p(`Applying ${i(op)} to '${str}'`); + if (typeof(str) !== 'string') { throw new Error('Snapshot should be a string'); } + checkOp(op); - pos = 0 - newDoc = [] + const pos = 0; + const newDoc = []; - for component in op - if typeof(component) == 'number' - throw new Error('The op is too long for this document') if component > str.length - newDoc.push str[...component] - str = str[component..] - else if component.i? - newDoc.push component.i - else - throw new Error("The deleted text '#{component.d}' doesn't match the next characters in the document '#{str[...component.d.length]}'") unless component.d == str[...component.d.length] - str = str[component.d.length..] + for (let component of Array.from(op)) { + if (typeof(component) === 'number') { + if (component > str.length) { throw new Error('The op is too long for this document'); } + newDoc.push(str.slice(0, component)); + str = str.slice(component); + } else if (component.i != null) { + newDoc.push(component.i); + } else { + if (component.d !== str.slice(0, component.d.length)) { throw new Error(`The deleted text '${component.d}' doesn't match the next characters in the document '${str.slice(0, component.d.length)}'`); } + str = str.slice(component.d.length); + } + } - throw new Error("The applied op doesn't traverse the entire document") unless '' == str + if ('' !== str) { throw new Error("The applied op doesn't traverse the entire document"); } - newDoc.join '' + return newDoc.join(''); +}; -# transform op1 by op2. Return transformed version of op1. -# op1 and op2 are unchanged by transform. -exports.transform = (op, otherOp, side) -> - throw new Error "side (#{side} must be 'left' or 'right'" unless side == 'left' or side == 'right' +// transform op1 by op2. Return transformed version of op1. +// op1 and op2 are unchanged by transform. +exports.transform = function(op, otherOp, side) { + let component; + if ((side !== 'left') && (side !== 'right')) { throw new Error(`side (${side} must be 'left' or 'right'`); } - checkOp op - checkOp otherOp - newOp = [] + checkOp(op); + checkOp(otherOp); + const newOp = []; - append = makeAppend newOp - [take, peek] = makeTake op + const append = makeAppend(newOp); + const [take, peek] = Array.from(makeTake(op)); - for component in otherOp - if typeof(component) == 'number' # Skip - length = component - while length > 0 - chunk = take(length, 'i') - throw new Error('The op traverses more elements than the document has') unless chunk != null + for (component of Array.from(otherOp)) { + var chunk, length; + if (typeof(component) === 'number') { // Skip + length = component; + while (length > 0) { + chunk = take(length, 'i'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append chunk - length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.i? - else if component.i? # Insert - if side == 'left' - # The left insert should go first. - o = peek() - append take() if o?.i + append(chunk); + if ((typeof(chunk) !== 'object') || (chunk.i == null)) { length -= componentLength(chunk); } + } + } else if (component.i != null) { // Insert + if (side === 'left') { + // The left insert should go first. + const o = peek(); + if (o != null ? o.i : undefined) { append(take()); } + } - # Otherwise, skip the inserted text. - append(component.i.length) - else # Delete. - #assert.ok component.d - length = component.d.length - while length > 0 - chunk = take(length, 'i') - throw new Error('The op traverses more elements than the document has') unless chunk != null + // Otherwise, skip the inserted text. + append(component.i.length); + } else { // Delete. + //assert.ok component.d + ({ + length + } = component.d); + while (length > 0) { + chunk = take(length, 'i'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - if typeof(chunk) == 'number' - length -= chunk - else if chunk.i? - append(chunk) - else - #assert.ok chunk.d - # The delete is unnecessary now. - length -= chunk.d.length + if (typeof(chunk) === 'number') { + length -= chunk; + } else if (chunk.i != null) { + append(chunk); + } else { + //assert.ok chunk.d + // The delete is unnecessary now. + length -= chunk.d.length; + } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in the op: #{i component}" unless component?.i? - append component + // Append extras from op1 + while (component = take()) { + if ((component != null ? component.i : undefined) == null) { throw new Error(`Remaining fragments in the op: ${i(component)}`); } + append(component); + } - newOp + return newOp; +}; -# Compose 2 ops into 1 op. -exports.compose = (op1, op2) -> - p "COMPOSE #{i op1} + #{i op2}" - checkOp op1 - checkOp op2 +// Compose 2 ops into 1 op. +exports.compose = function(op1, op2) { + let component; + p(`COMPOSE ${i(op1)} + ${i(op2)}`); + checkOp(op1); + checkOp(op2); - result = [] + const result = []; - append = makeAppend result - [take, _] = makeTake op1 + const append = makeAppend(result); + const [take, _] = Array.from(makeTake(op1)); - for component in op2 - if typeof(component) == 'number' # Skip - length = component - while length > 0 - chunk = take(length, 'd') - throw new Error('The op traverses more elements than the document has') unless chunk != null + for (component of Array.from(op2)) { + var chunk, length; + if (typeof(component) === 'number') { // Skip + length = component; + while (length > 0) { + chunk = take(length, 'd'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append chunk - length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.d? + append(chunk); + if ((typeof(chunk) !== 'object') || (chunk.d == null)) { length -= componentLength(chunk); } + } - else if component.i? # Insert - append {i:component.i} + } else if (component.i != null) { // Insert + append({i:component.i}); - else # Delete - offset = 0 - while offset < component.d.length - chunk = take(component.d.length - offset, 'd') - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Delete + let offset = 0; + while (offset < component.d.length) { + chunk = take(component.d.length - offset, 'd'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - # If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length. - if typeof(chunk) == 'number' - append {d:component.d[offset...(offset + chunk)]} - offset += chunk - else if chunk.i? - throw new Error("The deleted text doesn't match the inserted text") unless component.d[offset...(offset + chunk.i.length)] == chunk.i - offset += chunk.i.length - # The ops cancel each other out. - else - # Delete - append chunk + // If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length. + if (typeof(chunk) === 'number') { + append({d:component.d.slice(offset, (offset + chunk))}); + offset += chunk; + } else if (chunk.i != null) { + if (component.d.slice(offset, (offset + chunk.i.length)) !== chunk.i) { throw new Error("The deleted text doesn't match the inserted text"); } + offset += chunk.i.length; + // The ops cancel each other out. + } else { + // Delete + append(chunk); + } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Trailing stuff in op1 #{i component}" unless component?.d? - append component + // Append extras from op1 + while (component = take()) { + if ((component != null ? component.d : undefined) == null) { throw new Error(`Trailing stuff in op1 ${i(component)}`); } + append(component); + } - result + return result; +}; -invertComponent = (c) -> - if typeof(c) == 'number' - c - else if c.i? - {d:c.i} - else - {i:c.d} +const invertComponent = function(c) { + if (typeof(c) === 'number') { + return c; + } else if (c.i != null) { + return {d:c.i}; + } else { + return {i:c.d}; + } +}; -# Invert an op -exports.invert = (op) -> - result = [] - append = makeAppend result +// Invert an op +exports.invert = function(op) { + const result = []; + const append = makeAppend(result); - append(invertComponent component) for component in op + for (let component of Array.from(op)) { append(invertComponent(component)); } - result + return result; +}; -if window? - window.ot ||= {} - window.ot.types ||= {} - window.ot.types.text = exports +if (typeof window !== 'undefined' && window !== null) { + if (!window.ot) { window.ot = {}; } + if (!window.ot.types) { window.ot.types = {}; } + window.ot.types.text = exports; +} diff --git a/services/document-updater/app/coffee/sharejs/text-tp2-api.js b/services/document-updater/app/coffee/sharejs/text-tp2-api.js index d661b5ae37..e3f4f95ea6 100644 --- a/services/document-updater/app/coffee/sharejs/text-tp2-api.js +++ b/services/document-updater/app/coffee/sharejs/text-tp2-api.js @@ -1,89 +1,118 @@ -# Text document API for text-tp2 +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text-tp2 -if WEB? - type = exports.types['text-tp2'] -else - type = require './text-tp2' +let type; +if (typeof WEB !== 'undefined' && WEB !== null) { + type = exports.types['text-tp2']; +} else { + type = require('./text-tp2'); +} -{_takeDoc:takeDoc, _append:append} = type +const {_takeDoc:takeDoc, _append:append} = type; -appendSkipChars = (op, doc, pos, maxlength) -> - while (maxlength == undefined || maxlength > 0) and pos.index < doc.data.length - part = takeDoc doc, pos, maxlength, true - maxlength -= part.length if maxlength != undefined and typeof part is 'string' - append op, (part.length || part) +const appendSkipChars = (op, doc, pos, maxlength) => (() => { + const result = []; + while (((maxlength === undefined) || (maxlength > 0)) && (pos.index < doc.data.length)) { + const part = takeDoc(doc, pos, maxlength, true); + if ((maxlength !== undefined) && (typeof part === 'string')) { maxlength -= part.length; } + result.push(append(op, (part.length || part))); + } + return result; +})(); -type['api'] = - 'provides': {'text':true} +type['api'] = { + 'provides': {'text':true}, - # The number of characters in the string - 'getLength': -> @snapshot.charLength + // The number of characters in the string + 'getLength'() { return this.snapshot.charLength; }, - # Flatten a document into a string - 'getText': -> - strings = (elem for elem in @snapshot.data when typeof elem is 'string') - strings.join '' + // Flatten a document into a string + 'getText'() { + const strings = (Array.from(this.snapshot.data).filter((elem) => typeof elem === 'string')); + return strings.join(''); + }, - 'insert': (pos, text, callback) -> - pos = 0 if pos == undefined + 'insert'(pos, text, callback) { + if (pos === undefined) { pos = 0; } - op = [] - docPos = {index:0, offset:0} + const op = []; + const docPos = {index:0, offset:0}; - appendSkipChars op, @snapshot, docPos, pos - append op, {'i':text} - appendSkipChars op, @snapshot, docPos + appendSkipChars(op, this.snapshot, docPos, pos); + append(op, {'i':text}); + appendSkipChars(op, this.snapshot, docPos); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - 'del': (pos, length, callback) -> - op = [] - docPos = {index:0, offset:0} + 'del'(pos, length, callback) { + const op = []; + const docPos = {index:0, offset:0}; - appendSkipChars op, @snapshot, docPos, pos + appendSkipChars(op, this.snapshot, docPos, pos); - while length > 0 - part = takeDoc @snapshot, docPos, length, true - if typeof part is 'string' - append op, {'d':part.length} - length -= part.length - else - append op, part + while (length > 0) { + const part = takeDoc(this.snapshot, docPos, length, true); + if (typeof part === 'string') { + append(op, {'d':part.length}); + length -= part.length; + } else { + append(op, part); + } + } - appendSkipChars op, @snapshot, docPos + appendSkipChars(op, this.snapshot, docPos); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - '_register': -> - # Interpret recieved ops + generate more detailed events for them - @on 'remoteop', (op, snapshot) -> - textPos = 0 - docPos = {index:0, offset:0} + '_register'() { + // Interpret recieved ops + generate more detailed events for them + return this.on('remoteop', function(op, snapshot) { + let textPos = 0; + const docPos = {index:0, offset:0}; - for component in op - if typeof component is 'number' - # Skip - remainder = component - while remainder > 0 - part = takeDoc snapshot, docPos, remainder - if typeof part is 'string' - textPos += part.length - remainder -= part.length || part - else if component.i != undefined - # Insert - if typeof component.i is 'string' - @emit 'insert', textPos, component.i - textPos += component.i.length - else - # Delete - remainder = component.d - while remainder > 0 - part = takeDoc snapshot, docPos, remainder - if typeof part is 'string' - @emit 'delete', textPos, part - remainder -= part.length || part + for (let component of Array.from(op)) { + var part, remainder; + if (typeof component === 'number') { + // Skip + remainder = component; + while (remainder > 0) { + part = takeDoc(snapshot, docPos, remainder); + if (typeof part === 'string') { + textPos += part.length; + } + remainder -= part.length || part; + } + } else if (component.i !== undefined) { + // Insert + if (typeof component.i === 'string') { + this.emit('insert', textPos, component.i); + textPos += component.i.length; + } + } else { + // Delete + remainder = component.d; + while (remainder > 0) { + part = takeDoc(snapshot, docPos, remainder); + if (typeof part === 'string') { + this.emit('delete', textPos, part); + } + remainder -= part.length || part; + } + } + } - return + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/text-tp2.js b/services/document-updater/app/coffee/sharejs/text-tp2.js index d19cbdcef4..ab123d6ff7 100644 --- a/services/document-updater/app/coffee/sharejs/text-tp2.js +++ b/services/document-updater/app/coffee/sharejs/text-tp2.js @@ -1,322 +1,398 @@ -# A TP2 implementation of text, following this spec: -# http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README -# -# A document is made up of a string and a set of tombstones inserted throughout -# the string. For example, 'some ', (2 tombstones), 'string'. -# -# This is encoded in a document as: {s:'some string', t:[5, -2, 6]} -# -# Ops are lists of components which iterate over the whole document. -# Components are either: -# N: Skip N characters in the original document -# {i:'str'}: Insert 'str' at the current position in the document -# {i:N}: Insert N tombstones at the current position in the document -# {d:N}: Delete (tombstone) N characters at the current position in the document -# -# Eg: [3, {i:'hi'}, 5, {d:8}] -# -# Snapshots are lists with characters and tombstones. Characters are stored in strings -# and adjacent tombstones are flattened into numbers. -# -# Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters) -# would be represented by a document snapshot of ['Hello ', 5, 'world'] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A TP2 implementation of text, following this spec: +// http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README +// +// A document is made up of a string and a set of tombstones inserted throughout +// the string. For example, 'some ', (2 tombstones), 'string'. +// +// This is encoded in a document as: {s:'some string', t:[5, -2, 6]} +// +// Ops are lists of components which iterate over the whole document. +// Components are either: +// N: Skip N characters in the original document +// {i:'str'}: Insert 'str' at the current position in the document +// {i:N}: Insert N tombstones at the current position in the document +// {d:N}: Delete (tombstone) N characters at the current position in the document +// +// Eg: [3, {i:'hi'}, 5, {d:8}] +// +// Snapshots are lists with characters and tombstones. Characters are stored in strings +// and adjacent tombstones are flattened into numbers. +// +// Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters) +// would be represented by a document snapshot of ['Hello ', 5, 'world'] -type = - name: 'text-tp2' - tp2: true - create: -> {charLength:0, totalLength:0, positionCache:[], data:[]} - serialize: (doc) -> - throw new Error 'invalid doc snapshot' unless doc.data - doc.data - deserialize: (data) -> - doc = type.create() - doc.data = data +let append, appendDoc, takeDoc; +var type = { + name: 'text-tp2', + tp2: true, + create() { return {charLength:0, totalLength:0, positionCache:[], data:[]}; }, + serialize(doc) { + if (!doc.data) { throw new Error('invalid doc snapshot'); } + return doc.data; + }, + deserialize(data) { + const doc = type.create(); + doc.data = data; - for component in data - if typeof component is 'string' - doc.charLength += component.length - doc.totalLength += component.length - else - doc.totalLength += component + for (let component of Array.from(data)) { + if (typeof component === 'string') { + doc.charLength += component.length; + doc.totalLength += component.length; + } else { + doc.totalLength += component; + } + } - doc + return doc; + } +}; -checkOp = (op) -> - throw new Error('Op must be an array of components') unless Array.isArray(op) - last = null - for c in op - if typeof(c) == 'object' - if c.i != undefined - throw new Error('Inserts must insert a string or a +ive number') unless (typeof(c.i) == 'string' and c.i.length > 0) or (typeof(c.i) == 'number' and c.i > 0) - else if c.d != undefined - throw new Error('Deletes must be a +ive number') unless typeof(c.d) == 'number' and c.d > 0 - else - throw new Error('Operation component must define .i or .d') - else - throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number' - throw new Error('Skip components must be a positive number') unless c > 0 - throw new Error('Adjacent skip components should be combined') if typeof(last) == 'number' +const checkOp = function(op) { + if (!Array.isArray(op)) { throw new Error('Op must be an array of components'); } + let last = null; + return (() => { + const result = []; + for (let c of Array.from(op)) { + if (typeof(c) === 'object') { + if (c.i !== undefined) { + if (((typeof(c.i) !== 'string') || !(c.i.length > 0)) && ((typeof(c.i) !== 'number') || !(c.i > 0))) { throw new Error('Inserts must insert a string or a +ive number'); } + } else if (c.d !== undefined) { + if ((typeof(c.d) !== 'number') || !(c.d > 0)) { throw new Error('Deletes must be a +ive number'); } + } else { + throw new Error('Operation component must define .i or .d'); + } + } else { + if (typeof(c) !== 'number') { throw new Error('Op components must be objects or numbers'); } + if (!(c > 0)) { throw new Error('Skip components must be a positive number'); } + if (typeof(last) === 'number') { throw new Error('Adjacent skip components should be combined'); } + } - last = c + result.push(last = c); + } + return result; + })(); +}; -# Take the next part from the specified position in a document snapshot. -# position = {index, offset}. It will be updated. -type._takeDoc = takeDoc = (doc, position, maxlength, tombsIndivisible) -> - throw new Error 'Operation goes past the end of the document' if position.index >= doc.data.length +// Take the next part from the specified position in a document snapshot. +// position = {index, offset}. It will be updated. +type._takeDoc = (takeDoc = function(doc, position, maxlength, tombsIndivisible) { + if (position.index >= doc.data.length) { throw new Error('Operation goes past the end of the document'); } - part = doc.data[position.index] - # peel off data[0] - result = if typeof(part) == 'string' - if maxlength != undefined - part[position.offset...(position.offset + maxlength)] - else - part[position.offset...] - else - if maxlength == undefined or tombsIndivisible + const part = doc.data[position.index]; + // peel off data[0] + const result = typeof(part) === 'string' ? + maxlength !== undefined ? + part.slice(position.offset, (position.offset + maxlength)) + : + part.slice(position.offset) + : + (maxlength === undefined) || tombsIndivisible ? part - position.offset - else - Math.min(maxlength, part - position.offset) + : + Math.min(maxlength, part - position.offset); - resultLen = result.length || result + const resultLen = result.length || result; - if (part.length || part) - position.offset > resultLen - position.offset += resultLen - else - position.index++ - position.offset = 0 + if (((part.length || part) - position.offset) > resultLen) { + position.offset += resultLen; + } else { + position.index++; + position.offset = 0; + } - result + return result; +}); -# Append a part to the end of a document -type._appendDoc = appendDoc = (doc, p) -> - return if p == 0 or p == '' +// Append a part to the end of a document +type._appendDoc = (appendDoc = function(doc, p) { + if ((p === 0) || (p === '')) { return; } - if typeof p is 'string' - doc.charLength += p.length - doc.totalLength += p.length - else - doc.totalLength += p + if (typeof p === 'string') { + doc.charLength += p.length; + doc.totalLength += p.length; + } else { + doc.totalLength += p; + } - data = doc.data - if data.length == 0 - data.push p - else if typeof(data[data.length - 1]) == typeof(p) - data[data.length - 1] += p - else - data.push p - return + const { + data + } = doc; + if (data.length === 0) { + data.push(p); + } else if (typeof(data[data.length - 1]) === typeof(p)) { + data[data.length - 1] += p; + } else { + data.push(p); + } +}); -# Apply the op to the document. The document is not modified in the process. -type.apply = (doc, op) -> - unless doc.totalLength != undefined and doc.charLength != undefined and doc.data.length != undefined - throw new Error('Snapshot is invalid') +// Apply the op to the document. The document is not modified in the process. +type.apply = function(doc, op) { + if ((doc.totalLength === undefined) || (doc.charLength === undefined) || (doc.data.length === undefined)) { + throw new Error('Snapshot is invalid'); + } - checkOp op + checkOp(op); - newDoc = type.create() - position = {index:0, offset:0} + const newDoc = type.create(); + const position = {index:0, offset:0}; - for component in op - if typeof(component) is 'number' - remainder = component - while remainder > 0 - part = takeDoc doc, position, remainder + for (let component of Array.from(op)) { + var part, remainder; + if (typeof(component) === 'number') { + remainder = component; + while (remainder > 0) { + part = takeDoc(doc, position, remainder); - appendDoc newDoc, part - remainder -= part.length || part + appendDoc(newDoc, part); + remainder -= part.length || part; + } - else if component.i != undefined - appendDoc newDoc, component.i - else if component.d != undefined - remainder = component.d - while remainder > 0 - part = takeDoc doc, position, remainder - remainder -= part.length || part - appendDoc newDoc, component.d + } else if (component.i !== undefined) { + appendDoc(newDoc, component.i); + } else if (component.d !== undefined) { + remainder = component.d; + while (remainder > 0) { + part = takeDoc(doc, position, remainder); + remainder -= part.length || part; + } + appendDoc(newDoc, component.d); + } + } - newDoc + return newDoc; +}; -# Append an op component to the end of the specified op. -# Exported for the randomOpGenerator. -type._append = append = (op, component) -> - if component == 0 || component.i == '' || component.i == 0 || component.d == 0 - return - else if op.length == 0 - op.push component - else - last = op[op.length - 1] - if typeof(component) == 'number' && typeof(last) == 'number' - op[op.length - 1] += component - else if component.i != undefined && last.i? && typeof(last.i) == typeof(component.i) - last.i += component.i - else if component.d != undefined && last.d? - last.d += component.d - else - op.push component +// Append an op component to the end of the specified op. +// Exported for the randomOpGenerator. +type._append = (append = function(op, component) { + if ((component === 0) || (component.i === '') || (component.i === 0) || (component.d === 0)) { + return; + } else if (op.length === 0) { + return op.push(component); + } else { + const last = op[op.length - 1]; + if ((typeof(component) === 'number') && (typeof(last) === 'number')) { + return op[op.length - 1] += component; + } else if ((component.i !== undefined) && (last.i != null) && (typeof(last.i) === typeof(component.i))) { + return last.i += component.i; + } else if ((component.d !== undefined) && (last.d != null)) { + return last.d += component.d; + } else { + return op.push(component); + } + } +}); -# Makes 2 functions for taking components from the start of an op, and for peeking -# at the next op that could be taken. -makeTake = (op) -> - # The index of the next component to take - index = 0 - # The offset into the component - offset = 0 +// Makes 2 functions for taking components from the start of an op, and for peeking +// at the next op that could be taken. +const makeTake = function(op) { + // The index of the next component to take + let index = 0; + // The offset into the component + let offset = 0; - # Take up to length maxlength from the op. If maxlength is not defined, there is no max. - # If insertsIndivisible is true, inserts (& insert tombstones) won't be separated. - # - # Returns null when op is fully consumed. - take = (maxlength, insertsIndivisible) -> - return null if index == op.length + // Take up to length maxlength from the op. If maxlength is not defined, there is no max. + // If insertsIndivisible is true, inserts (& insert tombstones) won't be separated. + // + // Returns null when op is fully consumed. + const take = function(maxlength, insertsIndivisible) { + let current; + if (index === op.length) { return null; } - e = op[index] - if typeof((current = e)) == 'number' or typeof((current = e.i)) == 'number' or (current = e.d) != undefined - if !maxlength? or current - offset <= maxlength or (insertsIndivisible and e.i != undefined) - # Return the rest of the current element. - c = current - offset - ++index; offset = 0 - else - offset += maxlength - c = maxlength - if e.i != undefined then {i:c} else if e.d != undefined then {d:c} else c - else - # Take from the inserted string - if !maxlength? or e.i.length - offset <= maxlength or insertsIndivisible - result = {i:e.i[offset..]} - ++index; offset = 0 - else - result = {i:e.i[offset...offset + maxlength]} - offset += maxlength - result + const e = op[index]; + if ((typeof((current = e)) === 'number') || (typeof((current = e.i)) === 'number') || ((current = e.d) !== undefined)) { + let c; + if ((maxlength == null) || ((current - offset) <= maxlength) || (insertsIndivisible && (e.i !== undefined))) { + // Return the rest of the current element. + c = current - offset; + ++index; offset = 0; + } else { + offset += maxlength; + c = maxlength; + } + if (e.i !== undefined) { return {i:c}; } else if (e.d !== undefined) { return {d:c}; } else { return c; } + } else { + // Take from the inserted string + let result; + if ((maxlength == null) || ((e.i.length - offset) <= maxlength) || insertsIndivisible) { + result = {i:e.i.slice(offset)}; + ++index; offset = 0; + } else { + result = {i:e.i.slice(offset, offset + maxlength)}; + offset += maxlength; + } + return result; + } + }; - peekType = -> op[index] + const peekType = () => op[index]; - [take, peekType] + return [take, peekType]; +}; -# Find and return the length of an op component -componentLength = (component) -> - if typeof(component) == 'number' - component - else if typeof(component.i) == 'string' - component.i.length - else - # This should work because c.d and c.i must be +ive. - component.d or component.i +// Find and return the length of an op component +const componentLength = function(component) { + if (typeof(component) === 'number') { + return component; + } else if (typeof(component.i) === 'string') { + return component.i.length; + } else { + // This should work because c.d and c.i must be +ive. + return component.d || component.i; + } +}; -# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate -# adjacent inserts and deletes. -type.normalize = (op) -> - newOp = [] - append newOp, component for component in op - newOp +// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate +// adjacent inserts and deletes. +type.normalize = function(op) { + const newOp = []; + for (let component of Array.from(op)) { append(newOp, component); } + return newOp; +}; -# This is a helper method to transform and prune. goForwards is true for transform, false for prune. -transformer = (op, otherOp, goForwards, side) -> - checkOp op - checkOp otherOp - newOp = [] +// This is a helper method to transform and prune. goForwards is true for transform, false for prune. +const transformer = function(op, otherOp, goForwards, side) { + let component; + checkOp(op); + checkOp(otherOp); + const newOp = []; - [take, peek] = makeTake op + const [take, peek] = Array.from(makeTake(op)); - for component in otherOp - length = componentLength component + for (component of Array.from(otherOp)) { + var chunk; + let length = componentLength(component); - if component.i != undefined # Insert text or tombs - if goForwards # transform - insert skips over inserted parts - if side == 'left' - # The left insert should go first. - append newOp, take() while peek()?.i != undefined + if (component.i !== undefined) { // Insert text or tombs + if (goForwards) { // transform - insert skips over inserted parts + if (side === 'left') { + // The left insert should go first. + while (__guard__(peek(), x => x.i) !== undefined) { append(newOp, take()); } + } - # In any case, skip the inserted text. - append newOp, length + // In any case, skip the inserted text. + append(newOp, length); - else # Prune. Remove skips for inserts. - while length > 0 - chunk = take length, true + } else { // Prune. Remove skips for inserts. + while (length > 0) { + chunk = take(length, true); - throw new Error 'The transformed op is invalid' unless chunk != null - throw new Error 'The transformed op deletes locally inserted characters - it cannot be purged of the insert.' if chunk.d != undefined + if (chunk === null) { throw new Error('The transformed op is invalid'); } + if (chunk.d !== undefined) { throw new Error('The transformed op deletes locally inserted characters - it cannot be purged of the insert.'); } - if typeof chunk is 'number' - length -= chunk - else - append newOp, chunk + if (typeof chunk === 'number') { + length -= chunk; + } else { + append(newOp, chunk); + } + } + } - else # Skip or delete - while length > 0 - chunk = take length, true - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Skip or delete + while (length > 0) { + chunk = take(length, true); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append newOp, chunk - length -= componentLength chunk unless chunk.i + append(newOp, chunk); + if (!chunk.i) { length -= componentLength(chunk); } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in the op: #{component}" unless component.i != undefined - append newOp, component + // Append extras from op1 + while (component = take()) { + if (component.i === undefined) { throw new Error(`Remaining fragments in the op: ${component}`); } + append(newOp, component); + } - newOp + return newOp; +}; -# transform op1 by op2. Return transformed version of op1. -# op1 and op2 are unchanged by transform. -# side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op. -type.transform = (op, otherOp, side) -> - throw new Error "side (#{side}) should be 'left' or 'right'" unless side == 'left' or side == 'right' - transformer op, otherOp, true, side +// transform op1 by op2. Return transformed version of op1. +// op1 and op2 are unchanged by transform. +// side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op. +type.transform = function(op, otherOp, side) { + if ((side !== 'left') && (side !== 'right')) { throw new Error(`side (${side}) should be 'left' or 'right'`); } + return transformer(op, otherOp, true, side); +}; -# Prune is the inverse of transform. -type.prune = (op, otherOp) -> transformer op, otherOp, false +// Prune is the inverse of transform. +type.prune = (op, otherOp) => transformer(op, otherOp, false); -# Compose 2 ops into 1 op. -type.compose = (op1, op2) -> - return op2 if op1 == null or op1 == undefined +// Compose 2 ops into 1 op. +type.compose = function(op1, op2) { + let component; + if ((op1 === null) || (op1 === undefined)) { return op2; } - checkOp op1 - checkOp op2 + checkOp(op1); + checkOp(op2); - result = [] + const result = []; - [take, _] = makeTake op1 + const [take, _] = Array.from(makeTake(op1)); - for component in op2 + for (component of Array.from(op2)) { - if typeof(component) == 'number' # Skip - # Just copy from op1. - length = component - while length > 0 - chunk = take length - throw new Error('The op traverses more elements than the document has') unless chunk != null + var chunk, length; + if (typeof(component) === 'number') { // Skip + // Just copy from op1. + length = component; + while (length > 0) { + chunk = take(length); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append result, chunk - length -= componentLength chunk + append(result, chunk); + length -= componentLength(chunk); + } - else if component.i != undefined # Insert - append result, {i:component.i} + } else if (component.i !== undefined) { // Insert + append(result, {i:component.i}); - else # Delete - length = component.d - while length > 0 - chunk = take length - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Delete + length = component.d; + while (length > 0) { + chunk = take(length); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - chunkLength = componentLength chunk - if chunk.i != undefined - append result, {i:chunkLength} - else - append result, {d:chunkLength} + const chunkLength = componentLength(chunk); + if (chunk.i !== undefined) { + append(result, {i:chunkLength}); + } else { + append(result, {d:chunkLength}); + } - length -= chunkLength + length -= chunkLength; + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in op1: #{component}" unless component.i != undefined - append result, component + // Append extras from op1 + while (component = take()) { + if (component.i === undefined) { throw new Error(`Remaining fragments in op1: ${component}`); } + append(result, component); + } - result + return result; +}; -if WEB? - exports.types['text-tp2'] = type -else - module.exports = type +if (typeof WEB !== 'undefined' && WEB !== null) { + exports.types['text-tp2'] = type; +} else { + module.exports = type; +} + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/sharejs/text.js b/services/document-updater/app/coffee/sharejs/text.js index c64b4dfa68..3ecb026c77 100644 --- a/services/document-updater/app/coffee/sharejs/text.js +++ b/services/document-updater/app/coffee/sharejs/text.js @@ -1,209 +1,245 @@ -# A simple text implementation -# -# Operations are lists of components. -# Each component either inserts or deletes at a specified position in the document. -# -# Components are either: -# {i:'str', p:100}: Insert 'str' at position 100 in the document -# {d:'str', p:100}: Delete 'str' at position 100 in the document -# -# Components in an operation are executed sequentially, so the position of components -# assumes previous components have already executed. -# -# Eg: This op: -# [{i:'abc', p:0}] -# is equivalent to this op: -# [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A simple text implementation +// +// Operations are lists of components. +// Each component either inserts or deletes at a specified position in the document. +// +// Components are either: +// {i:'str', p:100}: Insert 'str' at position 100 in the document +// {d:'str', p:100}: Delete 'str' at position 100 in the document +// +// Components in an operation are executed sequentially, so the position of components +// assumes previous components have already executed. +// +// Eg: This op: +// [{i:'abc', p:0}] +// is equivalent to this op: +// [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}] -# NOTE: The global scope here is shared with other sharejs files when built with closure. -# Be careful what ends up in your namespace. +// NOTE: The global scope here is shared with other sharejs files when built with closure. +// Be careful what ends up in your namespace. -text = {} +let append, transformComponent; +const text = {}; -text.name = 'text' +text.name = 'text'; -text.create = -> '' +text.create = () => ''; -strInject = (s1, pos, s2) -> s1[...pos] + s2 + s1[pos..] +const strInject = (s1, pos, s2) => s1.slice(0, pos) + s2 + s1.slice(pos); -checkValidComponent = (c) -> - throw new Error 'component missing position field' if typeof c.p != 'number' +const checkValidComponent = function(c) { + if (typeof c.p !== 'number') { throw new Error('component missing position field'); } - i_type = typeof c.i - d_type = typeof c.d - throw new Error 'component needs an i or d field' unless (i_type == 'string') ^ (d_type == 'string') + const i_type = typeof c.i; + const d_type = typeof c.d; + if (!((i_type === 'string') ^ (d_type === 'string'))) { throw new Error('component needs an i or d field'); } - throw new Error 'position cannot be negative' unless c.p >= 0 + if (!(c.p >= 0)) { throw new Error('position cannot be negative'); } +}; -checkValidOp = (op) -> - checkValidComponent(c) for c in op - true +const checkValidOp = function(op) { + for (let c of Array.from(op)) { checkValidComponent(c); } + return true; +}; -text.apply = (snapshot, op) -> - checkValidOp op - for component in op - if component.i? - snapshot = strInject snapshot, component.p, component.i - else - deleted = snapshot[component.p...(component.p + component.d.length)] - throw new Error "Delete component '#{component.d}' does not match deleted text '#{deleted}'" unless component.d == deleted - snapshot = snapshot[...component.p] + snapshot[(component.p + component.d.length)..] +text.apply = function(snapshot, op) { + checkValidOp(op); + for (let component of Array.from(op)) { + if (component.i != null) { + snapshot = strInject(snapshot, component.p, component.i); + } else { + const deleted = snapshot.slice(component.p, (component.p + component.d.length)); + if (component.d !== deleted) { throw new Error(`Delete component '${component.d}' does not match deleted text '${deleted}'`); } + snapshot = snapshot.slice(0, component.p) + snapshot.slice((component.p + component.d.length)); + } + } - snapshot + return snapshot; +}; -# Exported for use by the random op generator. -# -# For simplicity, this version of append does not compress adjacent inserts and deletes of -# the same text. It would be nice to change that at some stage. -text._append = append = (newOp, c) -> - return if c.i == '' or c.d == '' - if newOp.length == 0 - newOp.push c - else - last = newOp[newOp.length - 1] +// Exported for use by the random op generator. +// +// For simplicity, this version of append does not compress adjacent inserts and deletes of +// the same text. It would be nice to change that at some stage. +text._append = (append = function(newOp, c) { + if ((c.i === '') || (c.d === '')) { return; } + if (newOp.length === 0) { + return newOp.push(c); + } else { + const last = newOp[newOp.length - 1]; - # Compose the insert into the previous insert if possible - if last.i? && c.i? and last.p <= c.p <= (last.p + last.i.length) - newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p} - else if last.d? && c.d? and c.p <= last.p <= (c.p + c.d.length) - newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p} - else - newOp.push c + // Compose the insert into the previous insert if possible + if ((last.i != null) && (c.i != null) && (last.p <= c.p && c.p <= (last.p + last.i.length))) { + return newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p}; + } else if ((last.d != null) && (c.d != null) && (c.p <= last.p && last.p <= (c.p + c.d.length))) { + return newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p}; + } else { + return newOp.push(c); + } + } +}); -text.compose = (op1, op2) -> - checkValidOp op1 - checkValidOp op2 +text.compose = function(op1, op2) { + checkValidOp(op1); + checkValidOp(op2); - newOp = op1.slice() - append newOp, c for c in op2 + const newOp = op1.slice(); + for (let c of Array.from(op2)) { append(newOp, c); } - newOp + return newOp; +}; -# Attempt to compress the op components together 'as much as possible'. -# This implementation preserves order and preserves create/delete pairs. -text.compress = (op) -> text.compose [], op +// Attempt to compress the op components together 'as much as possible'. +// This implementation preserves order and preserves create/delete pairs. +text.compress = op => text.compose([], op); -text.normalize = (op) -> - newOp = [] +text.normalize = function(op) { + const newOp = []; - # Normalize should allow ops which are a single (unwrapped) component: - # {i:'asdf', p:23}. - # There's no good way to test if something is an array: - # http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/ - # so this is probably the least bad solution. - op = [op] if op.i? or op.p? + // Normalize should allow ops which are a single (unwrapped) component: + // {i:'asdf', p:23}. + // There's no good way to test if something is an array: + // http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/ + // so this is probably the least bad solution. + if ((op.i != null) || (op.p != null)) { op = [op]; } - for c in op - c.p ?= 0 - append newOp, c + for (let c of Array.from(op)) { + if (c.p == null) { c.p = 0; } + append(newOp, c); + } - newOp + return newOp; +}; -# This helper method transforms a position by an op component. -# -# If c is an insert, insertAfter specifies whether the transform -# is pushed after the insert (true) or before it (false). -# -# insertAfter is optional for deletes. -transformPosition = (pos, c, insertAfter) -> - if c.i? - if c.p < pos || (c.p == pos && insertAfter) - pos + c.i.length - else - pos - else - # I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length)) - # but I think its harder to read that way, and it compiles using ternary operators anyway - # so its no slower written like this. - if pos <= c.p - pos - else if pos <= c.p + c.d.length - c.p - else - pos - c.d.length +// This helper method transforms a position by an op component. +// +// If c is an insert, insertAfter specifies whether the transform +// is pushed after the insert (true) or before it (false). +// +// insertAfter is optional for deletes. +const transformPosition = function(pos, c, insertAfter) { + if (c.i != null) { + if ((c.p < pos) || ((c.p === pos) && insertAfter)) { + return pos + c.i.length; + } else { + return pos; + } + } else { + // I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length)) + // but I think its harder to read that way, and it compiles using ternary operators anyway + // so its no slower written like this. + if (pos <= c.p) { + return pos; + } else if (pos <= (c.p + c.d.length)) { + return c.p; + } else { + return pos - c.d.length; + } + } +}; -# Helper method to transform a cursor position as a result of an op. -# -# Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position -# is pushed after an insert (true) or before it (false). -text.transformCursor = (position, op, side) -> - insertAfter = side == 'right' - position = transformPosition position, c, insertAfter for c in op - position +// Helper method to transform a cursor position as a result of an op. +// +// Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position +// is pushed after an insert (true) or before it (false). +text.transformCursor = function(position, op, side) { + const insertAfter = side === 'right'; + for (let c of Array.from(op)) { position = transformPosition(position, c, insertAfter); } + return position; +}; -# Transform an op component by another op component. Asymmetric. -# The result will be appended to destination. -# -# exported for use in JSON type -text._tc = transformComponent = (dest, c, otherC, side) -> - checkValidOp [c] - checkValidOp [otherC] +// Transform an op component by another op component. Asymmetric. +// The result will be appended to destination. +// +// exported for use in JSON type +text._tc = (transformComponent = function(dest, c, otherC, side) { + checkValidOp([c]); + checkValidOp([otherC]); - if c.i? - append dest, {i:c.i, p:transformPosition(c.p, otherC, side == 'right')} + if (c.i != null) { + append(dest, {i:c.i, p:transformPosition(c.p, otherC, side === 'right')}); - else # Delete - if otherC.i? # delete vs insert - s = c.d - if c.p < otherC.p - append dest, {d:s[...otherC.p - c.p], p:c.p} - s = s[(otherC.p - c.p)..] - if s != '' - append dest, {d:s, p:c.p + otherC.i.length} + } else { // Delete + if (otherC.i != null) { // delete vs insert + let s = c.d; + if (c.p < otherC.p) { + append(dest, {d:s.slice(0, otherC.p - c.p), p:c.p}); + s = s.slice((otherC.p - c.p)); + } + if (s !== '') { + append(dest, {d:s, p:c.p + otherC.i.length}); + } - else # Delete vs delete - if c.p >= otherC.p + otherC.d.length - append dest, {d:c.d, p:c.p - otherC.d.length} - else if c.p + c.d.length <= otherC.p - append dest, c - else - # They overlap somewhere. - newC = {d:'', p:c.p} - if c.p < otherC.p - newC.d = c.d[...(otherC.p - c.p)] - if c.p + c.d.length > otherC.p + otherC.d.length - newC.d += c.d[(otherC.p + otherC.d.length - c.p)..] + } else { // Delete vs delete + if (c.p >= (otherC.p + otherC.d.length)) { + append(dest, {d:c.d, p:c.p - otherC.d.length}); + } else if ((c.p + c.d.length) <= otherC.p) { + append(dest, c); + } else { + // They overlap somewhere. + const newC = {d:'', p:c.p}; + if (c.p < otherC.p) { + newC.d = c.d.slice(0, (otherC.p - c.p)); + } + if ((c.p + c.d.length) > (otherC.p + otherC.d.length)) { + newC.d += c.d.slice(((otherC.p + otherC.d.length) - c.p)); + } - # This is entirely optional - just for a check that the deleted - # text in the two ops matches - intersectStart = Math.max c.p, otherC.p - intersectEnd = Math.min c.p + c.d.length, otherC.p + otherC.d.length - cIntersect = c.d[intersectStart - c.p...intersectEnd - c.p] - otherIntersect = otherC.d[intersectStart - otherC.p...intersectEnd - otherC.p] - throw new Error 'Delete ops delete different text in the same region of the document' unless cIntersect == otherIntersect + // This is entirely optional - just for a check that the deleted + // text in the two ops matches + const intersectStart = Math.max(c.p, otherC.p); + const intersectEnd = Math.min(c.p + c.d.length, otherC.p + otherC.d.length); + const cIntersect = c.d.slice(intersectStart - c.p, intersectEnd - c.p); + const otherIntersect = otherC.d.slice(intersectStart - otherC.p, intersectEnd - otherC.p); + if (cIntersect !== otherIntersect) { throw new Error('Delete ops delete different text in the same region of the document'); } - if newC.d != '' - # This could be rewritten similarly to insert v delete, above. - newC.p = transformPosition newC.p, otherC - append dest, newC + if (newC.d !== '') { + // This could be rewritten similarly to insert v delete, above. + newC.p = transformPosition(newC.p, otherC); + append(dest, newC); + } + } + } + } - dest + return dest; +}); -invertComponent = (c) -> - if c.i? - {d:c.i, p:c.p} - else - {i:c.d, p:c.p} +const invertComponent = function(c) { + if (c.i != null) { + return {d:c.i, p:c.p}; + } else { + return {i:c.d, p:c.p}; + } +}; -# No need to use append for invert, because the components won't be able to -# cancel with one another. -text.invert = (op) -> (invertComponent c for c in op.slice().reverse()) +// No need to use append for invert, because the components won't be able to +// cancel with one another. +text.invert = op => Array.from(op.slice().reverse()).map((c) => invertComponent(c)); -if WEB? - exports.types ||= {} +if (typeof WEB !== 'undefined' && WEB !== null) { + if (!exports.types) { exports.types = {}; } - # This is kind of awful - come up with a better way to hook this helper code up. - bootstrapTransform(text, transformComponent, checkValidOp, append) + // This is kind of awful - come up with a better way to hook this helper code up. + bootstrapTransform(text, transformComponent, checkValidOp, append); - # [] is used to prevent closure from renaming types.text - exports.types.text = text -else - module.exports = text + // [] is used to prevent closure from renaming types.text + exports.types.text = text; +} else { + module.exports = text; - # The text type really shouldn't need this - it should be possible to define - # an efficient transform function by making a sort of transform map and passing each - # op component through it. - require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append) + // The text type really shouldn't need this - it should be possible to define + // an efficient transform function by making a sort of transform map and passing each + // op component through it. + require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append); +} diff --git a/services/document-updater/app/coffee/sharejs/types/count.js b/services/document-updater/app/coffee/sharejs/types/count.js index da28355efb..ffc3337ac7 100644 --- a/services/document-updater/app/coffee/sharejs/types/count.js +++ b/services/document-updater/app/coffee/sharejs/types/count.js @@ -1,22 +1,30 @@ -# This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment] -exports.name = 'count' -exports.create = -> 1 +exports.name = 'count'; +exports.create = () => 1; -exports.apply = (snapshot, op) -> - [v, inc] = op - throw new Error "Op #{v} != snapshot #{snapshot}" unless snapshot == v - snapshot + inc +exports.apply = function(snapshot, op) { + const [v, inc] = Array.from(op); + if (snapshot !== v) { throw new Error(`Op ${v} != snapshot ${snapshot}`); } + return snapshot + inc; +}; -# transform op1 by op2. Return transformed version of op1. -exports.transform = (op1, op2) -> - throw new Error "Op1 #{op1[0]} != op2 #{op2[0]}" unless op1[0] == op2[0] - [op1[0] + op2[1], op1[1]] +// transform op1 by op2. Return transformed version of op1. +exports.transform = function(op1, op2) { + if (op1[0] !== op2[0]) { throw new Error(`Op1 ${op1[0]} != op2 ${op2[0]}`); } + return [op1[0] + op2[1], op1[1]]; +}; -exports.compose = (op1, op2) -> - throw new Error "Op1 #{op1} + 1 != op2 #{op2}" unless op1[0] + op1[1] == op2[0] - [op1[0], op1[1] + op2[1]] +exports.compose = function(op1, op2) { + if ((op1[0] + op1[1]) !== op2[0]) { throw new Error(`Op1 ${op1} + 1 != op2 ${op2}`); } + return [op1[0], op1[1] + op2[1]]; +}; -exports.generateRandomOp = (doc) -> - [[doc, 1], doc + 1] +exports.generateRandomOp = doc => [[doc, 1], doc + 1]; diff --git a/services/document-updater/app/coffee/sharejs/types/helpers.js b/services/document-updater/app/coffee/sharejs/types/helpers.js index 093b32e1bb..81a561de03 100644 --- a/services/document-updater/app/coffee/sharejs/types/helpers.js +++ b/services/document-updater/app/coffee/sharejs/types/helpers.js @@ -1,65 +1,87 @@ -# These methods let you build a transform function from a transformComponent function -# for OT types like text and JSON in which operations are lists of components -# and transforming them requires N^2 work. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// These methods let you build a transform function from a transformComponent function +// for OT types like text and JSON in which operations are lists of components +// and transforming them requires N^2 work. -# Add transform and transformX functions for an OT type which has transformComponent defined. -# transformComponent(destination array, component, other component, side) -exports['_bt'] = bootstrapTransform = (type, transformComponent, checkValidOp, append) -> - transformComponentX = (left, right, destLeft, destRight) -> - transformComponent destLeft, left, right, 'left' - transformComponent destRight, right, left, 'right' +// Add transform and transformX functions for an OT type which has transformComponent defined. +// transformComponent(destination array, component, other component, side) +let bootstrapTransform; +exports['_bt'] = (bootstrapTransform = function(type, transformComponent, checkValidOp, append) { + let transformX; + const transformComponentX = function(left, right, destLeft, destRight) { + transformComponent(destLeft, left, right, 'left'); + return transformComponent(destRight, right, left, 'right'); + }; - # Transforms rightOp by leftOp. Returns ['rightOp', clientOp'] - type.transformX = type['transformX'] = transformX = (leftOp, rightOp) -> - checkValidOp leftOp - checkValidOp rightOp + // Transforms rightOp by leftOp. Returns ['rightOp', clientOp'] + type.transformX = (type['transformX'] = (transformX = function(leftOp, rightOp) { + checkValidOp(leftOp); + checkValidOp(rightOp); - newRightOp = [] + const newRightOp = []; - for rightComponent in rightOp - # Generate newLeftOp by composing leftOp by rightComponent - newLeftOp = [] + for (let rightComponent of Array.from(rightOp)) { + // Generate newLeftOp by composing leftOp by rightComponent + const newLeftOp = []; - k = 0 - while k < leftOp.length - nextC = [] - transformComponentX leftOp[k], rightComponent, newLeftOp, nextC - k++ + let k = 0; + while (k < leftOp.length) { + var l; + const nextC = []; + transformComponentX(leftOp[k], rightComponent, newLeftOp, nextC); + k++; - if nextC.length == 1 - rightComponent = nextC[0] - else if nextC.length == 0 - append newLeftOp, l for l in leftOp[k..] - rightComponent = null - break - else - # Recurse. - [l_, r_] = transformX leftOp[k..], nextC - append newLeftOp, l for l in l_ - append newRightOp, r for r in r_ - rightComponent = null - break + if (nextC.length === 1) { + rightComponent = nextC[0]; + } else if (nextC.length === 0) { + for (l of Array.from(leftOp.slice(k))) { append(newLeftOp, l); } + rightComponent = null; + break; + } else { + // Recurse. + const [l_, r_] = Array.from(transformX(leftOp.slice(k), nextC)); + for (l of Array.from(l_)) { append(newLeftOp, l); } + for (let r of Array.from(r_)) { append(newRightOp, r); } + rightComponent = null; + break; + } + } - append newRightOp, rightComponent if rightComponent? - leftOp = newLeftOp + if (rightComponent != null) { append(newRightOp, rightComponent); } + leftOp = newLeftOp; + } - [leftOp, newRightOp] + return [leftOp, newRightOp]; + })); - # Transforms op with specified type ('left' or 'right') by otherOp. - type.transform = type['transform'] = (op, otherOp, type) -> - throw new Error "type must be 'left' or 'right'" unless type == 'left' or type == 'right' + // Transforms op with specified type ('left' or 'right') by otherOp. + return type.transform = (type['transform'] = function(op, otherOp, type) { + let _; + if ((type !== 'left') && (type !== 'right')) { throw new Error("type must be 'left' or 'right'"); } - return op if otherOp.length == 0 + if (otherOp.length === 0) { return op; } - # TODO: Benchmark with and without this line. I _think_ it'll make a big difference...? - return transformComponent [], op[0], otherOp[0], type if op.length == 1 and otherOp.length == 1 + // TODO: Benchmark with and without this line. I _think_ it'll make a big difference...? + if ((op.length === 1) && (otherOp.length === 1)) { return transformComponent([], op[0], otherOp[0], type); } - if type == 'left' - [left, _] = transformX op, otherOp - left - else - [_, right] = transformX otherOp, op - right + if (type === 'left') { + let left; + [left, _] = Array.from(transformX(op, otherOp)); + return left; + } else { + let right; + [_, right] = Array.from(transformX(otherOp, op)); + return right; + } + }); +}); -if typeof WEB is 'undefined' - exports.bootstrapTransform = bootstrapTransform +if (typeof WEB === 'undefined') { + exports.bootstrapTransform = bootstrapTransform; +} diff --git a/services/document-updater/app/coffee/sharejs/types/index.js b/services/document-updater/app/coffee/sharejs/types/index.js index 6f3bb8ec20..bf681de7cd 100644 --- a/services/document-updater/app/coffee/sharejs/types/index.js +++ b/services/document-updater/app/coffee/sharejs/types/index.js @@ -1,15 +1,21 @@ +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ -register = (file) -> - type = require file - exports[type.name] = type - try require "#{file}-api" +const register = function(file) { + const type = require(file); + exports[type.name] = type; + try { return require(`${file}-api`); } catch (error) {} +}; -# Import all the built-in types. -register './simple' -register './count' +// Import all the built-in types. +register('./simple'); +register('./count'); -register './text' -register './text-composable' -register './text-tp2' +register('./text'); +register('./text-composable'); +register('./text-tp2'); -register './json' +register('./json'); diff --git a/services/document-updater/app/coffee/sharejs/types/json-api.js b/services/document-updater/app/coffee/sharejs/types/json-api.js index 8819dee798..1c7c2633ba 100644 --- a/services/document-updater/app/coffee/sharejs/types/json-api.js +++ b/services/document-updater/app/coffee/sharejs/types/json-api.js @@ -1,180 +1,273 @@ -# API for JSON OT +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// API for JSON OT -json = require './json' if typeof WEB is 'undefined' +let json; +if (typeof WEB === 'undefined') { json = require('./json'); } -if WEB? - extendDoc = exports.extendDoc - exports.extendDoc = (name, fn) -> - SubDoc::[name] = fn - extendDoc name, fn +if (typeof WEB !== 'undefined' && WEB !== null) { + const { + extendDoc + } = exports; + exports.extendDoc = function(name, fn) { + SubDoc.prototype[name] = fn; + return extendDoc(name, fn); + }; +} -depath = (path) -> - if path.length == 1 and path[0].constructor == Array - path[0] - else path +const depath = function(path) { + if ((path.length === 1) && (path[0].constructor === Array)) { + return path[0]; + } else { return path; } +}; -class SubDoc - constructor: (@doc, @path) -> - at: (path...) -> @doc.at @path.concat depath path - get: -> @doc.getAt @path - # for objects and lists - set: (value, cb) -> @doc.setAt @path, value, cb - # for strings and lists. - insert: (pos, value, cb) -> @doc.insertAt @path, pos, value, cb - # for strings - del: (pos, length, cb) -> @doc.deleteTextAt @path, length, pos, cb - # for objects and lists - remove: (cb) -> @doc.removeAt @path, cb - push: (value, cb) -> @insert @get().length, value, cb - move: (from, to, cb) -> @doc.moveAt @path, from, to, cb - add: (amount, cb) -> @doc.addAt @path, amount, cb - on: (event, cb) -> @doc.addListener @path, event, cb - removeListener: (l) -> @doc.removeListener l +class SubDoc { + constructor(doc, path) { + this.doc = doc; + this.path = path; + } + at(...path) { return this.doc.at(this.path.concat(depath(path))); } + get() { return this.doc.getAt(this.path); } + // for objects and lists + set(value, cb) { return this.doc.setAt(this.path, value, cb); } + // for strings and lists. + insert(pos, value, cb) { return this.doc.insertAt(this.path, pos, value, cb); } + // for strings + del(pos, length, cb) { return this.doc.deleteTextAt(this.path, length, pos, cb); } + // for objects and lists + remove(cb) { return this.doc.removeAt(this.path, cb); } + push(value, cb) { return this.insert(this.get().length, value, cb); } + move(from, to, cb) { return this.doc.moveAt(this.path, from, to, cb); } + add(amount, cb) { return this.doc.addAt(this.path, amount, cb); } + on(event, cb) { return this.doc.addListener(this.path, event, cb); } + removeListener(l) { return this.doc.removeListener(l); } - # text API compatibility - getLength: -> @get().length - getText: -> @get() + // text API compatibility + getLength() { return this.get().length; } + getText() { return this.get(); } +} -traverse = (snapshot, path) -> - container = data:snapshot - key = 'data' - elem = container - for p in path - elem = elem[key] - key = p - throw new Error 'bad path' if typeof elem == 'undefined' - {elem, key} +const traverse = function(snapshot, path) { + const container = {data:snapshot}; + let key = 'data'; + let elem = container; + for (let p of Array.from(path)) { + elem = elem[key]; + key = p; + if (typeof elem === 'undefined') { throw new Error('bad path'); } + } + return {elem, key}; +}; -pathEquals = (p1, p2) -> - return false if p1.length != p2.length - for e,i in p1 - return false if e != p2[i] - true +const pathEquals = function(p1, p2) { + if (p1.length !== p2.length) { return false; } + for (let i = 0; i < p1.length; i++) { + const e = p1[i]; + if (e !== p2[i]) { return false; } + } + return true; +}; -json.api = - provides: {json:true} +json.api = { + provides: {json:true}, - at: (path...) -> new SubDoc this, depath path + at(...path) { return new SubDoc(this, depath(path)); }, - get: -> @snapshot - set: (value, cb) -> @setAt [], value, cb + get() { return this.snapshot; }, + set(value, cb) { return this.setAt([], value, cb); }, - getAt: (path) -> - {elem, key} = traverse @snapshot, path - return elem[key] + getAt(path) { + const {elem, key} = traverse(this.snapshot, path); + return elem[key]; + }, - setAt: (path, value, cb) -> - {elem, key} = traverse @snapshot, path - op = {p:path} - if elem.constructor == Array - op.li = value - op.ld = elem[key] if typeof elem[key] != 'undefined' - else if typeof elem == 'object' - op.oi = value - op.od = elem[key] if typeof elem[key] != 'undefined' - else throw new Error 'bad path' - @submitOp [op], cb + setAt(path, value, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = {p:path}; + if (elem.constructor === Array) { + op.li = value; + if (typeof elem[key] !== 'undefined') { op.ld = elem[key]; } + } else if (typeof elem === 'object') { + op.oi = value; + if (typeof elem[key] !== 'undefined') { op.od = elem[key]; } + } else { throw new Error('bad path'); } + return this.submitOp([op], cb); + }, - removeAt: (path, cb) -> - {elem, key} = traverse @snapshot, path - throw new Error 'no element at that path' unless typeof elem[key] != 'undefined' - op = {p:path} - if elem.constructor == Array - op.ld = elem[key] - else if typeof elem == 'object' - op.od = elem[key] - else throw new Error 'bad path' - @submitOp [op], cb + removeAt(path, cb) { + const {elem, key} = traverse(this.snapshot, path); + if (typeof elem[key] === 'undefined') { throw new Error('no element at that path'); } + const op = {p:path}; + if (elem.constructor === Array) { + op.ld = elem[key]; + } else if (typeof elem === 'object') { + op.od = elem[key]; + } else { throw new Error('bad path'); } + return this.submitOp([op], cb); + }, - insertAt: (path, pos, value, cb) -> - {elem, key} = traverse @snapshot, path - op = {p:path.concat pos} - if elem[key].constructor == Array - op.li = value - else if typeof elem[key] == 'string' - op.si = value - @submitOp [op], cb + insertAt(path, pos, value, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = {p:path.concat(pos)}; + if (elem[key].constructor === Array) { + op.li = value; + } else if (typeof elem[key] === 'string') { + op.si = value; + } + return this.submitOp([op], cb); + }, - moveAt: (path, from, to, cb) -> - op = [{p:path.concat(from), lm:to}] - @submitOp op, cb + moveAt(path, from, to, cb) { + const op = [{p:path.concat(from), lm:to}]; + return this.submitOp(op, cb); + }, - addAt: (path, amount, cb) -> - op = [{p:path, na:amount}] - @submitOp op, cb + addAt(path, amount, cb) { + const op = [{p:path, na:amount}]; + return this.submitOp(op, cb); + }, - deleteTextAt: (path, length, pos, cb) -> - {elem, key} = traverse @snapshot, path - op = [{p:path.concat(pos), sd:elem[key][pos...(pos + length)]}] - @submitOp op, cb + deleteTextAt(path, length, pos, cb) { + const {elem, key} = traverse(this.snapshot, path); + const op = [{p:path.concat(pos), sd:elem[key].slice(pos, (pos + length))}]; + return this.submitOp(op, cb); + }, - addListener: (path, event, cb) -> - l = {path, event, cb} - @_listeners.push l - l - removeListener: (l) -> - i = @_listeners.indexOf l - return false if i < 0 - @_listeners.splice i, 1 - return true - _register: -> - @_listeners = [] - @on 'change', (op) -> - for c in op - if c.na != undefined or c.si != undefined or c.sd != undefined - # no change to structure - continue - to_remove = [] - for l, i in @_listeners - # Transform a dummy op by the incoming op to work out what - # should happen to the listener. - dummy = {p:l.path, na:0} - xformed = @type.transformComponent [], dummy, c, 'left' - if xformed.length == 0 - # The op was transformed to noop, so we should delete the listener. - to_remove.push i - else if xformed.length == 1 - # The op remained, so grab its new path into the listener. - l.path = xformed[0].p - else - throw new Error "Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components." - to_remove.sort (a, b) -> b - a - for i in to_remove - @_listeners.splice i, 1 - @on 'remoteop', (op) -> - for c in op - match_path = if c.na == undefined then c.p[...c.p.length-1] else c.p - for {path, event, cb} in @_listeners - if pathEquals path, match_path - switch event - when 'insert' - if c.li != undefined and c.ld == undefined - cb(c.p[c.p.length-1], c.li) - else if c.oi != undefined and c.od == undefined - cb(c.p[c.p.length-1], c.oi) - else if c.si != undefined - cb(c.p[c.p.length-1], c.si) - when 'delete' - if c.li == undefined and c.ld != undefined - cb(c.p[c.p.length-1], c.ld) - else if c.oi == undefined and c.od != undefined - cb(c.p[c.p.length-1], c.od) - else if c.sd != undefined - cb(c.p[c.p.length-1], c.sd) - when 'replace' - if c.li != undefined and c.ld != undefined - cb(c.p[c.p.length-1], c.ld, c.li) - else if c.oi != undefined and c.od != undefined - cb(c.p[c.p.length-1], c.od, c.oi) - when 'move' - if c.lm != undefined - cb(c.p[c.p.length-1], c.lm) - when 'add' - if c.na != undefined - cb(c.na) - else if (common = @type.commonPath match_path, path)? - if event == 'child op' - if match_path.length == path.length == common - throw new Error "paths match length and have commonality, but aren't equal?" - child_path = c.p[common+1..] - cb(child_path, c) + addListener(path, event, cb) { + const l = {path, event, cb}; + this._listeners.push(l); + return l; + }, + removeListener(l) { + const i = this._listeners.indexOf(l); + if (i < 0) { return false; } + this._listeners.splice(i, 1); + return true; + }, + _register() { + this._listeners = []; + this.on('change', function(op) { + return (() => { + const result = []; + for (let c of Array.from(op)) { + var i; + if ((c.na !== undefined) || (c.si !== undefined) || (c.sd !== undefined)) { + // no change to structure + continue; + } + var to_remove = []; + for (i = 0; i < this._listeners.length; i++) { + // Transform a dummy op by the incoming op to work out what + // should happen to the listener. + const l = this._listeners[i]; + const dummy = {p:l.path, na:0}; + const xformed = this.type.transformComponent([], dummy, c, 'left'); + if (xformed.length === 0) { + // The op was transformed to noop, so we should delete the listener. + to_remove.push(i); + } else if (xformed.length === 1) { + // The op remained, so grab its new path into the listener. + l.path = xformed[0].p; + } else { + throw new Error("Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."); + } + } + to_remove.sort((a, b) => b - a); + result.push((() => { + const result1 = []; + for (i of Array.from(to_remove)) { + result1.push(this._listeners.splice(i, 1)); + } + return result1; + })()); + } + return result; + })(); + }); + return this.on('remoteop', function(op) { + return (() => { + const result = []; + for (var c of Array.from(op)) { + var match_path = c.na === undefined ? c.p.slice(0, c.p.length-1) : c.p; + result.push((() => { + const result1 = []; + for (let {path, event, cb} of Array.from(this._listeners)) { + var common; + if (pathEquals(path, match_path)) { + switch (event) { + case 'insert': + if ((c.li !== undefined) && (c.ld === undefined)) { + result1.push(cb(c.p[c.p.length-1], c.li)); + } else if ((c.oi !== undefined) && (c.od === undefined)) { + result1.push(cb(c.p[c.p.length-1], c.oi)); + } else if (c.si !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.si)); + } else { + result1.push(undefined); + } + break; + case 'delete': + if ((c.li === undefined) && (c.ld !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.ld)); + } else if ((c.oi === undefined) && (c.od !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.od)); + } else if (c.sd !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.sd)); + } else { + result1.push(undefined); + } + break; + case 'replace': + if ((c.li !== undefined) && (c.ld !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.ld, c.li)); + } else if ((c.oi !== undefined) && (c.od !== undefined)) { + result1.push(cb(c.p[c.p.length-1], c.od, c.oi)); + } else { + result1.push(undefined); + } + break; + case 'move': + if (c.lm !== undefined) { + result1.push(cb(c.p[c.p.length-1], c.lm)); + } else { + result1.push(undefined); + } + break; + case 'add': + if (c.na !== undefined) { + result1.push(cb(c.na)); + } else { + result1.push(undefined); + } + break; + default: + result1.push(undefined); + } + } else if ((common = this.type.commonPath(match_path, path)) != null) { + if (event === 'child op') { + if (match_path.length === path.length && path.length === common) { + throw new Error("paths match length and have commonality, but aren't equal?"); + } + const child_path = c.p.slice(common+1); + result1.push(cb(child_path, c)); + } else { + result1.push(undefined); + } + } else { + result1.push(undefined); + } + } + return result1; + })()); + } + return result; + })(); + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/types/json.js b/services/document-updater/app/coffee/sharejs/types/json.js index b03b0947ef..3e3bee79d9 100644 --- a/services/document-updater/app/coffee/sharejs/types/json.js +++ b/services/document-updater/app/coffee/sharejs/types/json.js @@ -1,441 +1,534 @@ -# This is the implementation of the JSON OT type. -# -# Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is the implementation of the JSON OT type. +// +// Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations -if WEB? - text = exports.types.text -else - text = require './text' +let text; +if (typeof WEB !== 'undefined' && WEB !== null) { + ({ + text + } = exports.types); +} else { + text = require('./text'); +} -json = {} +const json = {}; -json.name = 'json' +json.name = 'json'; -json.create = -> null +json.create = () => null; -json.invertComponent = (c) -> - c_ = {p: c.p} - c_.sd = c.si if c.si != undefined - c_.si = c.sd if c.sd != undefined - c_.od = c.oi if c.oi != undefined - c_.oi = c.od if c.od != undefined - c_.ld = c.li if c.li != undefined - c_.li = c.ld if c.ld != undefined - c_.na = -c.na if c.na != undefined - if c.lm != undefined - c_.lm = c.p[c.p.length-1] - c_.p = c.p[0...c.p.length - 1].concat([c.lm]) - c_ +json.invertComponent = function(c) { + const c_ = {p: c.p}; + if (c.si !== undefined) { c_.sd = c.si; } + if (c.sd !== undefined) { c_.si = c.sd; } + if (c.oi !== undefined) { c_.od = c.oi; } + if (c.od !== undefined) { c_.oi = c.od; } + if (c.li !== undefined) { c_.ld = c.li; } + if (c.ld !== undefined) { c_.li = c.ld; } + if (c.na !== undefined) { c_.na = -c.na; } + if (c.lm !== undefined) { + c_.lm = c.p[c.p.length-1]; + c_.p = c.p.slice(0, c.p.length - 1).concat([c.lm]); + } + return c_; +}; -json.invert = (op) -> json.invertComponent c for c in op.slice().reverse() +json.invert = op => Array.from(op.slice().reverse()).map((c) => json.invertComponent(c)); -json.checkValidOp = (op) -> +json.checkValidOp = function(op) {}; -isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]' -json.checkList = (elem) -> - throw new Error 'Referenced element not a list' unless isArray(elem) +const isArray = o => Object.prototype.toString.call(o) === '[object Array]'; +json.checkList = function(elem) { + if (!isArray(elem)) { throw new Error('Referenced element not a list'); } +}; -json.checkObj = (elem) -> - throw new Error "Referenced element not an object (it was #{JSON.stringify elem})" unless elem.constructor is Object +json.checkObj = function(elem) { + if (elem.constructor !== Object) { throw new Error(`Referenced element not an object (it was ${JSON.stringify(elem)})`); } +}; -json.apply = (snapshot, op) -> - json.checkValidOp op - op = clone op +json.apply = function(snapshot, op) { + json.checkValidOp(op); + op = clone(op); - container = {data: clone snapshot} + const container = {data: clone(snapshot)}; - try - for c, i in op - parent = null - parentkey = null - elem = container - key = 'data' + try { + for (let i = 0; i < op.length; i++) { + const c = op[i]; + let parent = null; + let parentkey = null; + let elem = container; + let key = 'data'; - for p in c.p - parent = elem - parentkey = key - elem = elem[key] - key = p + for (let p of Array.from(c.p)) { + parent = elem; + parentkey = key; + elem = elem[key]; + key = p; - throw new Error 'Path invalid' unless parent? + if (parent == null) { throw new Error('Path invalid'); } + } - if c.na != undefined - # Number add - throw new Error 'Referenced element not a number' unless typeof elem[key] is 'number' - elem[key] += c.na + if (c.na !== undefined) { + // Number add + if (typeof elem[key] !== 'number') { throw new Error('Referenced element not a number'); } + elem[key] += c.na; - else if c.si != undefined - # String insert - throw new Error "Referenced element not a string (it was #{JSON.stringify elem})" unless typeof elem is 'string' - parent[parentkey] = elem[...key] + c.si + elem[key..] - else if c.sd != undefined - # String delete - throw new Error 'Referenced element not a string' unless typeof elem is 'string' - throw new Error 'Deleted string does not match' unless elem[key...key + c.sd.length] == c.sd - parent[parentkey] = elem[...key] + elem[key + c.sd.length..] + } else if (c.si !== undefined) { + // String insert + if (typeof elem !== 'string') { throw new Error(`Referenced element not a string (it was ${JSON.stringify(elem)})`); } + parent[parentkey] = elem.slice(0, key) + c.si + elem.slice(key); + } else if (c.sd !== undefined) { + // String delete + if (typeof elem !== 'string') { throw new Error('Referenced element not a string'); } + if (elem.slice(key, key + c.sd.length) !== c.sd) { throw new Error('Deleted string does not match'); } + parent[parentkey] = elem.slice(0, key) + elem.slice(key + c.sd.length); - else if c.li != undefined && c.ld != undefined - # List replace - json.checkList elem + } else if ((c.li !== undefined) && (c.ld !== undefined)) { + // List replace + json.checkList(elem); - # Should check the list element matches c.ld - elem[key] = c.li - else if c.li != undefined - # List insert - json.checkList elem + // Should check the list element matches c.ld + elem[key] = c.li; + } else if (c.li !== undefined) { + // List insert + json.checkList(elem); - elem.splice key, 0, c.li - else if c.ld != undefined - # List delete - json.checkList elem + elem.splice(key, 0, c.li); + } else if (c.ld !== undefined) { + // List delete + json.checkList(elem); - # Should check the list element matches c.ld here too. - elem.splice key, 1 - else if c.lm != undefined - # List move - json.checkList elem - if c.lm != key - e = elem[key] - # Remove it... - elem.splice key, 1 - # And insert it back. - elem.splice c.lm, 0, e + // Should check the list element matches c.ld here too. + elem.splice(key, 1); + } else if (c.lm !== undefined) { + // List move + json.checkList(elem); + if (c.lm !== key) { + const e = elem[key]; + // Remove it... + elem.splice(key, 1); + // And insert it back. + elem.splice(c.lm, 0, e); + } - else if c.oi != undefined - # Object insert / replace - json.checkObj elem + } else if (c.oi !== undefined) { + // Object insert / replace + json.checkObj(elem); - # Should check that elem[key] == c.od - elem[key] = c.oi - else if c.od != undefined - # Object delete - json.checkObj elem + // Should check that elem[key] == c.od + elem[key] = c.oi; + } else if (c.od !== undefined) { + // Object delete + json.checkObj(elem); - # Should check that elem[key] == c.od - delete elem[key] - else - throw new Error 'invalid / missing instruction in op' - catch error - # TODO: Roll back all already applied changes. Write tests before implementing this code. - throw error + // Should check that elem[key] == c.od + delete elem[key]; + } else { + throw new Error('invalid / missing instruction in op'); + } + } + } catch (error) { + // TODO: Roll back all already applied changes. Write tests before implementing this code. + throw error; + } - container.data + return container.data; +}; -# Checks if two paths, p1 and p2 match. -json.pathMatches = (p1, p2, ignoreLast) -> - return false unless p1.length == p2.length +// Checks if two paths, p1 and p2 match. +json.pathMatches = function(p1, p2, ignoreLast) { + if (p1.length !== p2.length) { return false; } - for p, i in p1 - return false if p != p2[i] and (!ignoreLast or i != p1.length - 1) + for (let i = 0; i < p1.length; i++) { + const p = p1[i]; + if ((p !== p2[i]) && (!ignoreLast || (i !== (p1.length - 1)))) { return false; } + } - true + return true; +}; -json.append = (dest, c) -> - c = clone c - if dest.length != 0 and json.pathMatches c.p, (last = dest[dest.length - 1]).p - if last.na != undefined and c.na != undefined - dest[dest.length - 1] = { p: last.p, na: last.na + c.na } - else if last.li != undefined and c.li == undefined and c.ld == last.li - # insert immediately followed by delete becomes a noop. - if last.ld != undefined - # leave the delete part of the replace - delete last.li - else - dest.pop() - else if last.od != undefined and last.oi == undefined and - c.oi != undefined and c.od == undefined - last.oi = c.oi - else if c.lm != undefined and c.p[c.p.length-1] == c.lm - null # don't do anything - else - dest.push c - else - dest.push c +json.append = function(dest, c) { + let last; + c = clone(c); + if ((dest.length !== 0) && json.pathMatches(c.p, (last = dest[dest.length - 1]).p)) { + if ((last.na !== undefined) && (c.na !== undefined)) { + return dest[dest.length - 1] = { p: last.p, na: last.na + c.na }; + } else if ((last.li !== undefined) && (c.li === undefined) && (c.ld === last.li)) { + // insert immediately followed by delete becomes a noop. + if (last.ld !== undefined) { + // leave the delete part of the replace + return delete last.li; + } else { + return dest.pop(); + } + } else if ((last.od !== undefined) && (last.oi === undefined) && + (c.oi !== undefined) && (c.od === undefined)) { + return last.oi = c.oi; + } else if ((c.lm !== undefined) && (c.p[c.p.length-1] === c.lm)) { + return null; // don't do anything + } else { + return dest.push(c); + } + } else { + return dest.push(c); + } +}; -json.compose = (op1, op2) -> - json.checkValidOp op1 - json.checkValidOp op2 +json.compose = function(op1, op2) { + json.checkValidOp(op1); + json.checkValidOp(op2); - newOp = clone op1 - json.append newOp, c for c in op2 + const newOp = clone(op1); + for (let c of Array.from(op2)) { json.append(newOp, c); } - newOp + return newOp; +}; -json.normalize = (op) -> - newOp = [] +json.normalize = function(op) { + const newOp = []; - op = [op] unless isArray op + if (!isArray(op)) { op = [op]; } - for c in op - c.p ?= [] - json.append newOp, c + for (let c of Array.from(op)) { + if (c.p == null) { c.p = []; } + json.append(newOp, c); + } - newOp + return newOp; +}; -# hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming -# we have browser support for JSON. -# http://jsperf.com/cloning-an-object/12 -clone = (o) -> JSON.parse(JSON.stringify o) +// hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming +// we have browser support for JSON. +// http://jsperf.com/cloning-an-object/12 +var clone = o => JSON.parse(JSON.stringify(o)); -json.commonPath = (p1, p2) -> - p1 = p1.slice() - p2 = p2.slice() - p1.unshift('data') - p2.unshift('data') - p1 = p1[...p1.length-1] - p2 = p2[...p2.length-1] - return -1 if p2.length == 0 - i = 0 - while p1[i] == p2[i] && i < p1.length - i++ - if i == p2.length - return i-1 - return +json.commonPath = function(p1, p2) { + p1 = p1.slice(); + p2 = p2.slice(); + p1.unshift('data'); + p2.unshift('data'); + p1 = p1.slice(0, p1.length-1); + p2 = p2.slice(0, p2.length-1); + if (p2.length === 0) { return -1; } + let i = 0; + while ((p1[i] === p2[i]) && (i < p1.length)) { + i++; + if (i === p2.length) { + return i-1; + } + } +}; -# transform c so it applies to a document with otherC applied. -json.transformComponent = (dest, c, otherC, type) -> - c = clone c - c.p.push(0) if c.na != undefined - otherC.p.push(0) if otherC.na != undefined +// transform c so it applies to a document with otherC applied. +json.transformComponent = function(dest, c, otherC, type) { + let oc; + c = clone(c); + if (c.na !== undefined) { c.p.push(0); } + if (otherC.na !== undefined) { otherC.p.push(0); } - common = json.commonPath c.p, otherC.p - common2 = json.commonPath otherC.p, c.p + const common = json.commonPath(c.p, otherC.p); + const common2 = json.commonPath(otherC.p, c.p); - cplength = c.p.length - otherCplength = otherC.p.length + const cplength = c.p.length; + const otherCplength = otherC.p.length; - c.p.pop() if c.na != undefined # hax - otherC.p.pop() if otherC.na != undefined + if (c.na !== undefined) { c.p.pop(); } // hax + if (otherC.na !== undefined) { otherC.p.pop(); } - if otherC.na - if common2? && otherCplength >= cplength && otherC.p[common2] == c.p[common2] - if c.ld != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.ld = json.apply clone(c.ld), [oc] - else if c.od != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.od = json.apply clone(c.od), [oc] - json.append dest, c - return dest + if (otherC.na) { + if ((common2 != null) && (otherCplength >= cplength) && (otherC.p[common2] === c.p[common2])) { + if (c.ld !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.ld = json.apply(clone(c.ld), [oc]); + } else if (c.od !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.od = json.apply(clone(c.od), [oc]); + } + } + json.append(dest, c); + return dest; + } - if common2? && otherCplength > cplength && c.p[common2] == otherC.p[common2] - # transform based on c - if c.ld != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.ld = json.apply clone(c.ld), [oc] - else if c.od != undefined - oc = clone otherC - oc.p = oc.p[cplength..] - c.od = json.apply clone(c.od), [oc] + if ((common2 != null) && (otherCplength > cplength) && (c.p[common2] === otherC.p[common2])) { + // transform based on c + if (c.ld !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.ld = json.apply(clone(c.ld), [oc]); + } else if (c.od !== undefined) { + oc = clone(otherC); + oc.p = oc.p.slice(cplength); + c.od = json.apply(clone(c.od), [oc]); + } + } - if common? - commonOperand = cplength == otherCplength - # transform based on otherC - if otherC.na != undefined - # this case is handled above due to icky path hax - else if otherC.si != undefined || otherC.sd != undefined - # String op vs string op - pass through to text type - if c.si != undefined || c.sd != undefined - throw new Error("must be a string?") unless commonOperand + if (common != null) { + let from, p, to; + const commonOperand = cplength === otherCplength; + // transform based on otherC + if (otherC.na !== undefined) { + // this case is handled above due to icky path hax + } else if ((otherC.si !== undefined) || (otherC.sd !== undefined)) { + // String op vs string op - pass through to text type + if ((c.si !== undefined) || (c.sd !== undefined)) { + if (!commonOperand) { throw new Error("must be a string?"); } - # Convert an op component to a text op component - convert = (component) -> - newC = p:component.p[component.p.length - 1] - if component.si - newC.i = component.si - else - newC.d = component.sd - newC + // Convert an op component to a text op component + const convert = function(component) { + const newC = {p:component.p[component.p.length - 1]}; + if (component.si) { + newC.i = component.si; + } else { + newC.d = component.sd; + } + return newC; + }; - tc1 = convert c - tc2 = convert otherC + const tc1 = convert(c); + const tc2 = convert(otherC); - res = [] - text._tc res, tc1, tc2, type - for tc in res - jc = { p: c.p[...common] } - jc.p.push(tc.p) - jc.si = tc.i if tc.i? - jc.sd = tc.d if tc.d? - json.append dest, jc - return dest - else if otherC.li != undefined && otherC.ld != undefined - if otherC.p[common] == c.p[common] - # noop - if !commonOperand - # we're below the deleted element, so -> noop - return dest - else if c.ld != undefined - # we're trying to delete the same element, -> noop - if c.li != undefined and type == 'left' - # we're both replacing one element with another. only one can - # survive! - c.ld = clone otherC.li - else - return dest - else if otherC.li != undefined - if c.li != undefined and c.ld == undefined and commonOperand and c.p[common] == otherC.p[common] - # in li vs. li, left wins. - if type == 'right' - c.p[common]++ - else if otherC.p[common] <= c.p[common] - c.p[common]++ + const res = []; + text._tc(res, tc1, tc2, type); + for (let tc of Array.from(res)) { + const jc = { p: c.p.slice(0, common) }; + jc.p.push(tc.p); + if (tc.i != null) { jc.si = tc.i; } + if (tc.d != null) { jc.sd = tc.d; } + json.append(dest, jc); + } + return dest; + } + } else if ((otherC.li !== undefined) && (otherC.ld !== undefined)) { + if (otherC.p[common] === c.p[common]) { + // noop + if (!commonOperand) { + // we're below the deleted element, so -> noop + return dest; + } else if (c.ld !== undefined) { + // we're trying to delete the same element, -> noop + if ((c.li !== undefined) && (type === 'left')) { + // we're both replacing one element with another. only one can + // survive! + c.ld = clone(otherC.li); + } else { + return dest; + } + } + } + } else if (otherC.li !== undefined) { + if ((c.li !== undefined) && (c.ld === undefined) && commonOperand && (c.p[common] === otherC.p[common])) { + // in li vs. li, left wins. + if (type === 'right') { + c.p[common]++; + } + } else if (otherC.p[common] <= c.p[common]) { + c.p[common]++; + } - if c.lm != undefined - if commonOperand - # otherC edits the same list we edit - if otherC.p[common] <= c.lm - c.lm++ - # changing c.from is handled above. - else if otherC.ld != undefined - if c.lm != undefined - if commonOperand - if otherC.p[common] == c.p[common] - # they deleted the thing we're trying to move - return dest - # otherC edits the same list we edit - p = otherC.p[common] - from = c.p[common] - to = c.lm - if p < to || (p == to && from < to) - c.lm-- + if (c.lm !== undefined) { + if (commonOperand) { + // otherC edits the same list we edit + if (otherC.p[common] <= c.lm) { + c.lm++; + } + } + } + // changing c.from is handled above. + } else if (otherC.ld !== undefined) { + if (c.lm !== undefined) { + if (commonOperand) { + if (otherC.p[common] === c.p[common]) { + // they deleted the thing we're trying to move + return dest; + } + // otherC edits the same list we edit + p = otherC.p[common]; + from = c.p[common]; + to = c.lm; + if ((p < to) || ((p === to) && (from < to))) { + c.lm--; + } + } + } - if otherC.p[common] < c.p[common] - c.p[common]-- - else if otherC.p[common] == c.p[common] - if otherCplength < cplength - # we're below the deleted element, so -> noop - return dest - else if c.ld != undefined - if c.li != undefined - # we're replacing, they're deleting. we become an insert. - delete c.ld - else - # we're trying to delete the same element, -> noop - return dest - else if otherC.lm != undefined - if c.lm != undefined and cplength == otherCplength - # lm vs lm, here we go! - from = c.p[common] - to = c.lm - otherFrom = otherC.p[common] - otherTo = otherC.lm - if otherFrom != otherTo - # if otherFrom == otherTo, we don't need to change our op. + if (otherC.p[common] < c.p[common]) { + c.p[common]--; + } else if (otherC.p[common] === c.p[common]) { + if (otherCplength < cplength) { + // we're below the deleted element, so -> noop + return dest; + } else if (c.ld !== undefined) { + if (c.li !== undefined) { + // we're replacing, they're deleting. we become an insert. + delete c.ld; + } else { + // we're trying to delete the same element, -> noop + return dest; + } + } + } + } else if (otherC.lm !== undefined) { + if ((c.lm !== undefined) && (cplength === otherCplength)) { + // lm vs lm, here we go! + from = c.p[common]; + to = c.lm; + const otherFrom = otherC.p[common]; + const otherTo = otherC.lm; + if (otherFrom !== otherTo) { + // if otherFrom == otherTo, we don't need to change our op. - # where did my thing go? - if from == otherFrom - # they moved it! tie break. - if type == 'left' - c.p[common] = otherTo - if from == to # ugh - c.lm = otherTo - else - return dest - else - # they moved around it - if from > otherFrom - c.p[common]-- - if from > otherTo - c.p[common]++ - else if from == otherTo - if otherFrom > otherTo - c.p[common]++ - if from == to # ugh, again - c.lm++ + // where did my thing go? + if (from === otherFrom) { + // they moved it! tie break. + if (type === 'left') { + c.p[common] = otherTo; + if (from === to) { // ugh + c.lm = otherTo; + } + } else { + return dest; + } + } else { + // they moved around it + if (from > otherFrom) { + c.p[common]--; + } + if (from > otherTo) { + c.p[common]++; + } else if (from === otherTo) { + if (otherFrom > otherTo) { + c.p[common]++; + if (from === to) { // ugh, again + c.lm++; + } + } + } - # step 2: where am i going to put it? - if to > otherFrom - c.lm-- - else if to == otherFrom - if to > from - c.lm-- - if to > otherTo - c.lm++ - else if to == otherTo - # if we're both moving in the same direction, tie break - if (otherTo > otherFrom and to > from) or - (otherTo < otherFrom and to < from) - if type == 'right' - c.lm++ - else - if to > from - c.lm++ - else if to == otherFrom - c.lm-- - else if c.li != undefined and c.ld == undefined and commonOperand - # li - from = otherC.p[common] - to = otherC.lm - p = c.p[common] - if p > from - c.p[common]-- - if p > to - c.p[common]++ - else - # ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath - # the lm - # - # i.e. things care about where their item is after the move. - from = otherC.p[common] - to = otherC.lm - p = c.p[common] - if p == from - c.p[common] = to - else - if p > from - c.p[common]-- - if p > to - c.p[common]++ - else if p == to - if from > to - c.p[common]++ - else if otherC.oi != undefined && otherC.od != undefined - if c.p[common] == otherC.p[common] - if c.oi != undefined and commonOperand - # we inserted where someone else replaced - if type == 'right' - # left wins - return dest - else - # we win, make our op replace what they inserted - c.od = otherC.oi - else - # -> noop if the other component is deleting the same object (or any - # parent) - return dest - else if otherC.oi != undefined - if c.oi != undefined and c.p[common] == otherC.p[common] - # left wins if we try to insert at the same place - if type == 'left' - json.append dest, {p:c.p, od:otherC.oi} - else - return dest - else if otherC.od != undefined - if c.p[common] == otherC.p[common] - return dest if !commonOperand - if c.oi != undefined - delete c.od - else - return dest + // step 2: where am i going to put it? + if (to > otherFrom) { + c.lm--; + } else if (to === otherFrom) { + if (to > from) { + c.lm--; + } + } + if (to > otherTo) { + c.lm++; + } else if (to === otherTo) { + // if we're both moving in the same direction, tie break + if (((otherTo > otherFrom) && (to > from)) || + ((otherTo < otherFrom) && (to < from))) { + if (type === 'right') { + c.lm++; + } + } else { + if (to > from) { + c.lm++; + } else if (to === otherFrom) { + c.lm--; + } + } + } + } + } + } else if ((c.li !== undefined) && (c.ld === undefined) && commonOperand) { + // li + from = otherC.p[common]; + to = otherC.lm; + p = c.p[common]; + if (p > from) { + c.p[common]--; + } + if (p > to) { + c.p[common]++; + } + } else { + // ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath + // the lm + // + // i.e. things care about where their item is after the move. + from = otherC.p[common]; + to = otherC.lm; + p = c.p[common]; + if (p === from) { + c.p[common] = to; + } else { + if (p > from) { + c.p[common]--; + } + if (p > to) { + c.p[common]++; + } else if (p === to) { + if (from > to) { + c.p[common]++; + } + } + } + } + } else if ((otherC.oi !== undefined) && (otherC.od !== undefined)) { + if (c.p[common] === otherC.p[common]) { + if ((c.oi !== undefined) && commonOperand) { + // we inserted where someone else replaced + if (type === 'right') { + // left wins + return dest; + } else { + // we win, make our op replace what they inserted + c.od = otherC.oi; + } + } else { + // -> noop if the other component is deleting the same object (or any + // parent) + return dest; + } + } + } else if (otherC.oi !== undefined) { + if ((c.oi !== undefined) && (c.p[common] === otherC.p[common])) { + // left wins if we try to insert at the same place + if (type === 'left') { + json.append(dest, {p:c.p, od:otherC.oi}); + } else { + return dest; + } + } + } else if (otherC.od !== undefined) { + if (c.p[common] === otherC.p[common]) { + if (!commonOperand) { return dest; } + if (c.oi !== undefined) { + delete c.od; + } else { + return dest; + } + } + } + } - json.append dest, c - return dest + json.append(dest, c); + return dest; +}; -if WEB? - exports.types ||= {} +if (typeof WEB !== 'undefined' && WEB !== null) { + if (!exports.types) { exports.types = {}; } - # This is kind of awful - come up with a better way to hook this helper code up. - exports._bt(json, json.transformComponent, json.checkValidOp, json.append) + // This is kind of awful - come up with a better way to hook this helper code up. + exports._bt(json, json.transformComponent, json.checkValidOp, json.append); - # [] is used to prevent closure from renaming types.text - exports.types.json = json -else - module.exports = json + // [] is used to prevent closure from renaming types.text + exports.types.json = json; +} else { + module.exports = json; - require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append) + require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append); +} diff --git a/services/document-updater/app/coffee/sharejs/types/model.js b/services/document-updater/app/coffee/sharejs/types/model.js index 284d6fd770..9b6e65effd 100644 --- a/services/document-updater/app/coffee/sharejs/types/model.js +++ b/services/document-updater/app/coffee/sharejs/types/model.js @@ -1,603 +1,699 @@ -# The model of all the ops. Responsible for applying & transforming remote deltas -# and managing the storage layer. -# -# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS104: Avoid inline assignments + * DS204: Change includes calls to have a more natural evaluation order + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// The model of all the ops. Responsible for applying & transforming remote deltas +// and managing the storage layer. +// +// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache -{EventEmitter} = require 'events' +let Model; +const {EventEmitter} = require('events'); -queue = require './syncqueue' -types = require '../types' +const queue = require('./syncqueue'); +const types = require('../types'); -isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]' +const isArray = o => Object.prototype.toString.call(o) === '[object Array]'; -# This constructor creates a new Model object. There will be one model object -# per server context. -# -# The model object is responsible for a lot of things: -# -# - It manages the interactions with the database -# - It maintains (in memory) a set of all active documents -# - It calls out to the OT functions when necessary -# -# The model is an event emitter. It emits the following events: -# -# create(docName, data): A document has been created with the specified name & data -module.exports = Model = (db, options) -> - # db can be null if the user doesn't want persistance. +// This constructor creates a new Model object. There will be one model object +// per server context. +// +// The model object is responsible for a lot of things: +// +// - It manages the interactions with the database +// - It maintains (in memory) a set of all active documents +// - It calls out to the OT functions when necessary +// +// The model is an event emitter. It emits the following events: +// +// create(docName, data): A document has been created with the specified name & data +module.exports = (Model = function(db, options) { + // db can be null if the user doesn't want persistance. - return new Model(db, options) if !(this instanceof Model) + let getOps; + if (!(this instanceof Model)) { return new Model(db, options); } - model = this + const model = this; - options ?= {} + if (options == null) { options = {}; } - # This is a cache of 'live' documents. - # - # The cache is a map from docName -> { - # ops:[{op, meta}] - # snapshot - # type - # v - # meta - # eventEmitter - # reapTimer - # committedVersion: v - # snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant - # dbMeta: database specific data - # opQueue: syncQueue for processing ops - # } - # - # The ops list contains the document's last options.numCachedOps ops. (Or all - # of them if we're using a memory store). - # - # Documents are stored in this set so long as the document has been accessed in - # the last few seconds (options.reapTime) OR at least one client has the document - # open. I don't know if I should keep open (but not being edited) documents live - - # maybe if a client has a document open but the document isn't being edited, I should - # flush it from the cache. - # - # In any case, the API to model is designed such that if we want to change that later - # it should be pretty easy to do so without any external-to-the-model code changes. - docs = {} + // This is a cache of 'live' documents. + // + // The cache is a map from docName -> { + // ops:[{op, meta}] + // snapshot + // type + // v + // meta + // eventEmitter + // reapTimer + // committedVersion: v + // snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant + // dbMeta: database specific data + // opQueue: syncQueue for processing ops + // } + // + // The ops list contains the document's last options.numCachedOps ops. (Or all + // of them if we're using a memory store). + // + // Documents are stored in this set so long as the document has been accessed in + // the last few seconds (options.reapTime) OR at least one client has the document + // open. I don't know if I should keep open (but not being edited) documents live - + // maybe if a client has a document open but the document isn't being edited, I should + // flush it from the cache. + // + // In any case, the API to model is designed such that if we want to change that later + // it should be pretty easy to do so without any external-to-the-model code changes. + const docs = {}; - # This is a map from docName -> [callback]. It is used when a document hasn't been - # cached and multiple getSnapshot() / getVersion() requests come in. All requests - # are added to the callback list and called when db.getSnapshot() returns. - # - # callback(error, snapshot data) - awaitingGetSnapshot = {} + // This is a map from docName -> [callback]. It is used when a document hasn't been + // cached and multiple getSnapshot() / getVersion() requests come in. All requests + // are added to the callback list and called when db.getSnapshot() returns. + // + // callback(error, snapshot data) + const awaitingGetSnapshot = {}; - # The time that documents which no clients have open will stay in the cache. - # Should be > 0. - options.reapTime ?= 3000 + // The time that documents which no clients have open will stay in the cache. + // Should be > 0. + if (options.reapTime == null) { options.reapTime = 3000; } - # The number of operations the cache holds before reusing the space - options.numCachedOps ?= 10 + // The number of operations the cache holds before reusing the space + if (options.numCachedOps == null) { options.numCachedOps = 10; } - # This option forces documents to be reaped, even when there's no database backend. - # This is useful when you don't care about persistance and don't want to gradually - # fill memory. - # - # You might want to set reapTime to a day or something. - options.forceReaping ?= false + // This option forces documents to be reaped, even when there's no database backend. + // This is useful when you don't care about persistance and don't want to gradually + // fill memory. + // + // You might want to set reapTime to a day or something. + if (options.forceReaping == null) { options.forceReaping = false; } - # Until I come up with a better strategy, we'll save a copy of the document snapshot - # to the database every ~20 submitted ops. - options.opsBeforeCommit ?= 20 + // Until I come up with a better strategy, we'll save a copy of the document snapshot + // to the database every ~20 submitted ops. + if (options.opsBeforeCommit == null) { options.opsBeforeCommit = 20; } - # It takes some processing time to transform client ops. The server will punt ops back to the - # client to transform if they're too old. - options.maximumAge ?= 40 + // It takes some processing time to transform client ops. The server will punt ops back to the + // client to transform if they're too old. + if (options.maximumAge == null) { options.maximumAge = 40; } - # **** Cache API methods + // **** Cache API methods - # Its important that all ops are applied in order. This helper method creates the op submission queue - # for a single document. This contains the logic for transforming & applying ops. - makeOpQueue = (docName, doc) -> queue (opData, callback) -> - return callback 'Version missing' unless opData.v >= 0 - return callback 'Op at future version' if opData.v > doc.v + // Its important that all ops are applied in order. This helper method creates the op submission queue + // for a single document. This contains the logic for transforming & applying ops. + const makeOpQueue = (docName, doc) => queue(function(opData, callback) { + if (!(opData.v >= 0)) { return callback('Version missing'); } + if (opData.v > doc.v) { return callback('Op at future version'); } - # Punt the transforming work back to the client if the op is too old. - return callback 'Op too old' if opData.v + options.maximumAge < doc.v + // Punt the transforming work back to the client if the op is too old. + if ((opData.v + options.maximumAge) < doc.v) { return callback('Op too old'); } - opData.meta ||= {} - opData.meta.ts = Date.now() + if (!opData.meta) { opData.meta = {}; } + opData.meta.ts = Date.now(); - # We'll need to transform the op to the current version of the document. This - # calls the callback immediately if opVersion == doc.v. - getOps docName, opData.v, doc.v, (error, ops) -> - return callback error if error + // We'll need to transform the op to the current version of the document. This + // calls the callback immediately if opVersion == doc.v. + return getOps(docName, opData.v, doc.v, function(error, ops) { + let snapshot; + if (error) { return callback(error); } - unless doc.v - opData.v == ops.length - # This should never happen. It indicates that we didn't get all the ops we - # asked for. Its important that the submitted op is correctly transformed. - console.error "Could not get old ops in model for document #{docName}" - console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops" - return callback 'Internal error' + if ((doc.v - opData.v) !== ops.length) { + // This should never happen. It indicates that we didn't get all the ops we + // asked for. Its important that the submitted op is correctly transformed. + console.error(`Could not get old ops in model for document ${docName}`); + console.error(`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`); + return callback('Internal error'); + } - if ops.length > 0 - try - # If there's enough ops, it might be worth spinning this out into a webworker thread. - for oldOp in ops - # Dup detection works by sending the id(s) the op has been submitted with previously. - # If the id matches, we reject it. The client can also detect the op has been submitted - # already if it sees its own previous id in the ops it sees when it does catchup. - if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource - return callback 'Op already submitted' + if (ops.length > 0) { + try { + // If there's enough ops, it might be worth spinning this out into a webworker thread. + for (let oldOp of Array.from(ops)) { + // Dup detection works by sending the id(s) the op has been submitted with previously. + // If the id matches, we reject it. The client can also detect the op has been submitted + // already if it sees its own previous id in the ops it sees when it does catchup. + if (oldOp.meta.source && opData.dupIfSource && Array.from(opData.dupIfSource).includes(oldOp.meta.source)) { + return callback('Op already submitted'); + } - opData.op = doc.type.transform opData.op, oldOp.op, 'left' - opData.v++ - catch error - console.error error.stack - return callback error.message + opData.op = doc.type.transform(opData.op, oldOp.op, 'left'); + opData.v++; + } + } catch (error1) { + error = error1; + console.error(error.stack); + return callback(error.message); + } + } - try - snapshot = doc.type.apply doc.snapshot, opData.op - catch error - console.error error.stack - return callback error.message + try { + snapshot = doc.type.apply(doc.snapshot, opData.op); + } catch (error2) { + error = error2; + console.error(error.stack); + return callback(error.message); + } - # The op data should be at the current version, and the new document data should be at - # the next version. - # - # This should never happen in practice, but its a nice little check to make sure everything - # is hunky-dory. - unless opData.v == doc.v - # This should never happen. - console.error "Version mismatch detected in model. File a ticket - this is a bug." - console.error "Expecting #{opData.v} == #{doc.v}" - return callback 'Internal error' + // The op data should be at the current version, and the new document data should be at + // the next version. + // + // This should never happen in practice, but its a nice little check to make sure everything + // is hunky-dory. + if (opData.v !== doc.v) { + // This should never happen. + console.error("Version mismatch detected in model. File a ticket - this is a bug."); + console.error(`Expecting ${opData.v} == ${doc.v}`); + return callback('Internal error'); + } - #newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} - writeOp = db?.writeOp or (docName, newOpData, callback) -> callback() + //newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta} + const writeOp = (db != null ? db.writeOp : undefined) || ((docName, newOpData, callback) => callback()); - writeOp docName, opData, (error) -> - if error - # The user should probably know about this. - console.warn "Error writing ops to database: #{error}" - return callback error + return writeOp(docName, opData, function(error) { + if (error) { + // The user should probably know about this. + console.warn(`Error writing ops to database: ${error}`); + return callback(error); + } - options.stats?.writeOp?() + __guardMethod__(options.stats, 'writeOp', o => o.writeOp()); - # This is needed when we emit the 'change' event, below. - oldSnapshot = doc.snapshot + // This is needed when we emit the 'change' event, below. + const oldSnapshot = doc.snapshot; - # All the heavy lifting is now done. Finally, we'll update the cache with the new data - # and (maybe!) save a new document snapshot to the database. + // All the heavy lifting is now done. Finally, we'll update the cache with the new data + // and (maybe!) save a new document snapshot to the database. - doc.v = opData.v + 1 - doc.snapshot = snapshot + doc.v = opData.v + 1; + doc.snapshot = snapshot; - doc.ops.push opData - doc.ops.shift() if db and doc.ops.length > options.numCachedOps + doc.ops.push(opData); + if (db && (doc.ops.length > options.numCachedOps)) { doc.ops.shift(); } - model.emit 'applyOp', docName, opData, snapshot, oldSnapshot - doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot + model.emit('applyOp', docName, opData, snapshot, oldSnapshot); + doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot); - # The callback is called with the version of the document at which the op was applied. - # This is the op.v after transformation, and its doc.v - 1. - callback null, opData.v + // The callback is called with the version of the document at which the op was applied. + // This is the op.v after transformation, and its doc.v - 1. + callback(null, opData.v); - # I need a decent strategy here for deciding whether or not to save the snapshot. - # - # The 'right' strategy looks something like "Store the snapshot whenever the snapshot - # is smaller than the accumulated op data". For now, I'll just store it every 20 - # ops or something. (Configurable with doc.committedVersion) - if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v - tryWriteSnapshot docName, (error) -> - console.warn "Error writing snapshot #{error}. This is nonfatal" if error + // I need a decent strategy here for deciding whether or not to save the snapshot. + // + // The 'right' strategy looks something like "Store the snapshot whenever the snapshot + // is smaller than the accumulated op data". For now, I'll just store it every 20 + // ops or something. (Configurable with doc.committedVersion) + if (!doc.snapshotWriteLock && ((doc.committedVersion + options.opsBeforeCommit) <= doc.v)) { + return tryWriteSnapshot(docName, function(error) { + if (error) { return console.warn(`Error writing snapshot ${error}. This is nonfatal`); } + }); + } + }); + }); + }); - # Add the data for the given docName to the cache. The named document shouldn't already - # exist in the doc set. - # - # Returns the new doc. - add = (docName, error, data, committedVersion, ops, dbMeta) -> - callbacks = awaitingGetSnapshot[docName] - delete awaitingGetSnapshot[docName] + // Add the data for the given docName to the cache. The named document shouldn't already + // exist in the doc set. + // + // Returns the new doc. + const add = function(docName, error, data, committedVersion, ops, dbMeta) { + let callback, doc; + const callbacks = awaitingGetSnapshot[docName]; + delete awaitingGetSnapshot[docName]; - if error - callback error for callback in callbacks if callbacks - else - doc = docs[docName] = - snapshot: data.snapshot - v: data.v - type: data.type - meta: data.meta + if (error) { + if (callbacks) { for (callback of Array.from(callbacks)) { callback(error); } } + } else { + doc = (docs[docName] = { + snapshot: data.snapshot, + v: data.v, + type: data.type, + meta: data.meta, - # Cache of ops - ops: ops or [] + // Cache of ops + ops: ops || [], - eventEmitter: new EventEmitter + eventEmitter: new EventEmitter, - # Timer before the document will be invalidated from the cache (if the document has no - # listeners) - reapTimer: null + // Timer before the document will be invalidated from the cache (if the document has no + // listeners) + reapTimer: null, - # Version of the snapshot thats in the database - committedVersion: committedVersion ? data.v - snapshotWriteLock: false - dbMeta: dbMeta + // Version of the snapshot thats in the database + committedVersion: committedVersion != null ? committedVersion : data.v, + snapshotWriteLock: false, + dbMeta + }); - doc.opQueue = makeOpQueue docName, doc + doc.opQueue = makeOpQueue(docName, doc); - refreshReapingTimeout docName - model.emit 'add', docName, data - callback null, doc for callback in callbacks if callbacks + refreshReapingTimeout(docName); + model.emit('add', docName, data); + if (callbacks) { for (callback of Array.from(callbacks)) { callback(null, doc); } } + } - doc + return doc; + }; - # This is a little helper wrapper around db.getOps. It does two things: - # - # - If there's no database set, it returns an error to the callback - # - It adds version numbers to each op returned from the database - # (These can be inferred from context so the DB doesn't store them, but its useful to have them). - getOpsInternal = (docName, start, end, callback) -> - return callback? 'Document does not exist' unless db + // This is a little helper wrapper around db.getOps. It does two things: + // + // - If there's no database set, it returns an error to the callback + // - It adds version numbers to each op returned from the database + // (These can be inferred from context so the DB doesn't store them, but its useful to have them). + const getOpsInternal = function(docName, start, end, callback) { + if (!db) { return (typeof callback === 'function' ? callback('Document does not exist') : undefined); } - db.getOps docName, start, end, (error, ops) -> - return callback? error if error + return db.getOps(docName, start, end, function(error, ops) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - v = start - op.v = v++ for op in ops + let v = start; + for (let op of Array.from(ops)) { op.v = v++; } - callback? null, ops + return (typeof callback === 'function' ? callback(null, ops) : undefined); + }); + }; - # Load the named document into the cache. This function is re-entrant. - # - # The callback is called with (error, doc) - load = (docName, callback) -> - if docs[docName] - # The document is already loaded. Return immediately. - options.stats?.cacheHit? 'getSnapshot' - return callback null, docs[docName] + // Load the named document into the cache. This function is re-entrant. + // + // The callback is called with (error, doc) + const load = function(docName, callback) { + if (docs[docName]) { + // The document is already loaded. Return immediately. + __guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot')); + return callback(null, docs[docName]); + } - # We're a memory store. If we don't have it, nobody does. - return callback 'Document does not exist' unless db + // We're a memory store. If we don't have it, nobody does. + if (!db) { return callback('Document does not exist'); } - callbacks = awaitingGetSnapshot[docName] + const callbacks = awaitingGetSnapshot[docName]; - # The document is being loaded already. Add ourselves as a callback. - return callbacks.push callback if callbacks + // The document is being loaded already. Add ourselves as a callback. + if (callbacks) { return callbacks.push(callback); } - options.stats?.cacheMiss? 'getSnapshot' + __guardMethod__(options.stats, 'cacheMiss', o1 => o1.cacheMiss('getSnapshot')); - # The document isn't loaded and isn't being loaded. Load it. - awaitingGetSnapshot[docName] = [callback] - db.getSnapshot docName, (error, data, dbMeta) -> - return add docName, error if error + // The document isn't loaded and isn't being loaded. Load it. + awaitingGetSnapshot[docName] = [callback]; + return db.getSnapshot(docName, function(error, data, dbMeta) { + if (error) { return add(docName, error); } - type = types[data.type] - unless type - console.warn "Type '#{data.type}' missing" - return callback "Type not found" - data.type = type + const type = types[data.type]; + if (!type) { + console.warn(`Type '${data.type}' missing`); + return callback("Type not found"); + } + data.type = type; - committedVersion = data.v + const committedVersion = data.v; - # The server can close without saving the most recent document snapshot. - # In this case, there are extra ops which need to be applied before - # returning the snapshot. - getOpsInternal docName, data.v, null, (error, ops) -> - return callback error if error + // The server can close without saving the most recent document snapshot. + // In this case, there are extra ops which need to be applied before + // returning the snapshot. + return getOpsInternal(docName, data.v, null, function(error, ops) { + if (error) { return callback(error); } - if ops.length > 0 - console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}" + if (ops.length > 0) { + console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`); - try - for op in ops - data.snapshot = type.apply data.snapshot, op.op - data.v++ - catch e - # This should never happen - it indicates that whats in the - # database is invalid. - console.error "Op data invalid for #{docName}: #{e.stack}" - return callback 'Op data invalid' + try { + for (let op of Array.from(ops)) { + data.snapshot = type.apply(data.snapshot, op.op); + data.v++; + } + } catch (e) { + // This should never happen - it indicates that whats in the + // database is invalid. + console.error(`Op data invalid for ${docName}: ${e.stack}`); + return callback('Op data invalid'); + } + } - model.emit 'load', docName, data - add docName, error, data, committedVersion, ops, dbMeta + model.emit('load', docName, data); + return add(docName, error, data, committedVersion, ops, dbMeta); + }); + }); + }; - # This makes sure the cache contains a document. If the doc cache doesn't contain - # a document, it is loaded from the database and stored. - # - # Documents are stored so long as either: - # - They have been accessed within the past #{PERIOD} - # - At least one client has the document open - refreshReapingTimeout = (docName) -> - doc = docs[docName] - return unless doc + // This makes sure the cache contains a document. If the doc cache doesn't contain + // a document, it is loaded from the database and stored. + // + // Documents are stored so long as either: + // - They have been accessed within the past #{PERIOD} + // - At least one client has the document open + var refreshReapingTimeout = function(docName) { + const doc = docs[docName]; + if (!doc) { return; } - # I want to let the clients list be updated before this is called. - process.nextTick -> - # This is an awkward way to find out the number of clients on a document. If this - # causes performance issues, add a numClients field to the document. - # - # The first check is because its possible that between refreshReapingTimeout being called and this - # event being fired, someone called delete() on the document and hence the doc is something else now. - if doc == docs[docName] and - doc.eventEmitter.listeners('op').length == 0 and - (db or options.forceReaping) and - doc.opQueue.busy is false + // I want to let the clients list be updated before this is called. + return process.nextTick(function() { + // This is an awkward way to find out the number of clients on a document. If this + // causes performance issues, add a numClients field to the document. + // + // The first check is because its possible that between refreshReapingTimeout being called and this + // event being fired, someone called delete() on the document and hence the doc is something else now. + if ((doc === docs[docName]) && + (doc.eventEmitter.listeners('op').length === 0) && + (db || options.forceReaping) && + (doc.opQueue.busy === false)) { - clearTimeout doc.reapTimer - doc.reapTimer = reapTimer = setTimeout -> - tryWriteSnapshot docName, -> - # If the reaping timeout has been refreshed while we're writing the snapshot, or if we're - # in the middle of applying an operation, don't reap. - delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false - , options.reapTime + let reapTimer; + clearTimeout(doc.reapTimer); + return doc.reapTimer = (reapTimer = setTimeout(() => tryWriteSnapshot(docName, function() { + // If the reaping timeout has been refreshed while we're writing the snapshot, or if we're + // in the middle of applying an operation, don't reap. + if ((docs[docName].reapTimer === reapTimer) && (doc.opQueue.busy === false)) { return delete docs[docName]; } + }) + , options.reapTime)); + } + }); + }; - tryWriteSnapshot = (docName, callback) -> - return callback?() unless db + var tryWriteSnapshot = function(docName, callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - doc = docs[docName] + const doc = docs[docName]; - # The doc is closed - return callback?() unless doc + // The doc is closed + if (!doc) { return (typeof callback === 'function' ? callback() : undefined); } - # The document is already saved. - return callback?() if doc.committedVersion is doc.v + // The document is already saved. + if (doc.committedVersion === doc.v) { return (typeof callback === 'function' ? callback() : undefined); } - return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock + if (doc.snapshotWriteLock) { return (typeof callback === 'function' ? callback('Another snapshot write is in progress') : undefined); } - doc.snapshotWriteLock = true + doc.snapshotWriteLock = true; - options.stats?.writeSnapshot?() + __guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot()); - writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback() + const writeSnapshot = (db != null ? db.writeSnapshot : undefined) || ((docName, docData, dbMeta, callback) => callback()); - data = - v: doc.v - meta: doc.meta - snapshot: doc.snapshot - # The database doesn't know about object types. + const data = { + v: doc.v, + meta: doc.meta, + snapshot: doc.snapshot, + // The database doesn't know about object types. type: doc.type.name + }; - # Commit snapshot. - writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) -> - doc.snapshotWriteLock = false + // Commit snapshot. + return writeSnapshot(docName, data, doc.dbMeta, function(error, dbMeta) { + doc.snapshotWriteLock = false; - # We have to use data.v here because the version in the doc could - # have been updated between the call to writeSnapshot() and now. - doc.committedVersion = data.v - doc.dbMeta = dbMeta + // We have to use data.v here because the version in the doc could + // have been updated between the call to writeSnapshot() and now. + doc.committedVersion = data.v; + doc.dbMeta = dbMeta; - callback? error + return (typeof callback === 'function' ? callback(error) : undefined); + }); + }; - # *** Model interface methods + // *** Model interface methods - # Create a new document. - # - # data should be {snapshot, type, [meta]}. The version of a new document is 0. - @create = (docName, type, meta, callback) -> - [meta, callback] = [{}, meta] if typeof meta is 'function' + // Create a new document. + // + // data should be {snapshot, type, [meta]}. The version of a new document is 0. + this.create = function(docName, type, meta, callback) { + if (typeof meta === 'function') { [meta, callback] = Array.from([{}, meta]); } - return callback? 'Invalid document name' if docName.match /\// - return callback? 'Document already exists' if docs[docName] + if (docName.match(/\//)) { return (typeof callback === 'function' ? callback('Invalid document name') : undefined); } + if (docs[docName]) { return (typeof callback === 'function' ? callback('Document already exists') : undefined); } - type = types[type] if typeof type == 'string' - return callback? 'Type not found' unless type + if (typeof type === 'string') { type = types[type]; } + if (!type) { return (typeof callback === 'function' ? callback('Type not found') : undefined); } - data = - snapshot:type.create() - type:type.name - meta:meta or {} + const data = { + snapshot:type.create(), + type:type.name, + meta:meta || {}, v:0 + }; - done = (error, dbMeta) -> - # dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. - return callback? error if error + const done = function(error, dbMeta) { + // dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something. + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - # From here on we'll store the object version of the type name. - data.type = type - add docName, null, data, 0, [], dbMeta - model.emit 'create', docName, data - callback?() + // From here on we'll store the object version of the type name. + data.type = type; + add(docName, null, data, 0, [], dbMeta); + model.emit('create', docName, data); + return (typeof callback === 'function' ? callback() : undefined); + }; - if db - db.create docName, data, done - else - done() + if (db) { + return db.create(docName, data, done); + } else { + return done(); + } + }; - # Perminantly deletes the specified document. - # If listeners are attached, they are removed. - # - # The callback is called with (error) if there was an error. If error is null / undefined, the - # document was deleted. - # - # WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the - # deletion. Subsequent op submissions will fail). - @delete = (docName, callback) -> - doc = docs[docName] + // Perminantly deletes the specified document. + // If listeners are attached, they are removed. + // + // The callback is called with (error) if there was an error. If error is null / undefined, the + // document was deleted. + // + // WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the + // deletion. Subsequent op submissions will fail). + this.delete = function(docName, callback) { + const doc = docs[docName]; - if doc - clearTimeout doc.reapTimer - delete docs[docName] + if (doc) { + clearTimeout(doc.reapTimer); + delete docs[docName]; + } - done = (error) -> - model.emit 'delete', docName unless error - callback? error + const done = function(error) { + if (!error) { model.emit('delete', docName); } + return (typeof callback === 'function' ? callback(error) : undefined); + }; - if db - db.delete docName, doc?.dbMeta, done - else - done (if !doc then 'Document does not exist') + if (db) { + return db.delete(docName, doc != null ? doc.dbMeta : undefined, done); + } else { + return done((!doc ? 'Document does not exist' : undefined)); + } + }; - # This gets all operations from [start...end]. (That is, its not inclusive.) - # - # end can be null. This means 'get me all ops from start'. - # - # Each op returned is in the form {op:o, meta:m, v:version}. - # - # Callback is called with (error, [ops]) - # - # If the document does not exist, getOps doesn't necessarily return an error. This is because - # its awkward to figure out whether or not the document exists for things - # like the redis database backend. I guess its a bit gross having this inconsistant - # with the other DB calls, but its certainly convenient. - # - # Use getVersion() to determine if a document actually exists, if thats what you're - # after. - @getOps = getOps = (docName, start, end, callback) -> - # getOps will only use the op cache if its there. It won't fill the op cache in. - throw new Error 'start must be 0+' unless start >= 0 + // This gets all operations from [start...end]. (That is, its not inclusive.) + // + // end can be null. This means 'get me all ops from start'. + // + // Each op returned is in the form {op:o, meta:m, v:version}. + // + // Callback is called with (error, [ops]) + // + // If the document does not exist, getOps doesn't necessarily return an error. This is because + // its awkward to figure out whether or not the document exists for things + // like the redis database backend. I guess its a bit gross having this inconsistant + // with the other DB calls, but its certainly convenient. + // + // Use getVersion() to determine if a document actually exists, if thats what you're + // after. + this.getOps = (getOps = function(docName, start, end, callback) { + // getOps will only use the op cache if its there. It won't fill the op cache in. + if (!(start >= 0)) { throw new Error('start must be 0+'); } - [end, callback] = [null, end] if typeof end is 'function' + if (typeof end === 'function') { [end, callback] = Array.from([null, end]); } - ops = docs[docName]?.ops + const ops = docs[docName] != null ? docs[docName].ops : undefined; - if ops - version = docs[docName].v + if (ops) { + const version = docs[docName].v; - # Ops contains an array of ops. The last op in the list is the last op applied - end ?= version - start = Math.min start, end + // Ops contains an array of ops. The last op in the list is the last op applied + if (end == null) { end = version; } + start = Math.min(start, end); - return callback null, [] if start == end + if (start === end) { return callback(null, []); } - # Base is the version number of the oldest op we have cached - base = version - ops.length + // Base is the version number of the oldest op we have cached + const base = version - ops.length; - # If the database is null, we'll trim to the ops we do have and hope thats enough. - if start >= base or db is null - refreshReapingTimeout docName - options.stats?.cacheHit 'getOps' + // If the database is null, we'll trim to the ops we do have and hope thats enough. + if ((start >= base) || (db === null)) { + refreshReapingTimeout(docName); + if (options.stats != null) { + options.stats.cacheHit('getOps'); + } - return callback null, ops[(start - base)...(end - base)] + return callback(null, ops.slice((start - base), (end - base))); + } + } - options.stats?.cacheMiss 'getOps' + if (options.stats != null) { + options.stats.cacheMiss('getOps'); + } - getOpsInternal docName, start, end, callback + return getOpsInternal(docName, start, end, callback); + }); - # Gets the snapshot data for the specified document. - # getSnapshot(docName, callback) - # Callback is called with (error, {v: , type: , snapshot: , meta: }) - @getSnapshot = (docName, callback) -> - load docName, (error, doc) -> - callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} + // Gets the snapshot data for the specified document. + // getSnapshot(docName, callback) + // Callback is called with (error, {v: , type: , snapshot: , meta: }) + this.getSnapshot = (docName, callback) => load(docName, (error, doc) => callback(error, doc ? {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta} : undefined)); - # Gets the latest version # of the document. - # getVersion(docName, callback) - # callback is called with (error, version). - @getVersion = (docName, callback) -> - load docName, (error, doc) -> callback error, doc?.v + // Gets the latest version # of the document. + // getVersion(docName, callback) + // callback is called with (error, version). + this.getVersion = (docName, callback) => load(docName, (error, doc) => callback(error, doc != null ? doc.v : undefined)); - # Apply an op to the specified document. - # The callback is passed (error, applied version #) - # opData = {op:op, v:v, meta:metadata} - # - # Ops are queued before being applied so that the following code applies op C before op B: - # model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB - # model.applyOp 'doc', OPC - @applyOp = (docName, opData, callback) -> - # All the logic for this is in makeOpQueue, above. - load docName, (error, doc) -> - return callback error if error + // Apply an op to the specified document. + // The callback is passed (error, applied version #) + // opData = {op:op, v:v, meta:metadata} + // + // Ops are queued before being applied so that the following code applies op C before op B: + // model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB + // model.applyOp 'doc', OPC + this.applyOp = (docName, opData, callback) => // All the logic for this is in makeOpQueue, above. + load(docName, function(error, doc) { + if (error) { return callback(error); } - process.nextTick -> doc.opQueue opData, (error, newVersion) -> - refreshReapingTimeout docName - callback? error, newVersion + return process.nextTick(() => doc.opQueue(opData, function(error, newVersion) { + refreshReapingTimeout(docName); + return (typeof callback === 'function' ? callback(error, newVersion) : undefined); + })); + }); - # TODO: store (some) metadata in DB - # TODO: op and meta should be combineable in the op that gets sent - @applyMetaOp = (docName, metaOpData, callback) -> - {path, value} = metaOpData.meta + // TODO: store (some) metadata in DB + // TODO: op and meta should be combineable in the op that gets sent + this.applyMetaOp = function(docName, metaOpData, callback) { + const {path, value} = metaOpData.meta; - return callback? "path should be an array" unless isArray path + if (!isArray(path)) { return (typeof callback === 'function' ? callback("path should be an array") : undefined); } - load docName, (error, doc) -> - if error? - callback? error - else - applied = false - switch path[0] - when 'shout' - doc.eventEmitter.emit 'op', metaOpData - applied = true + return load(docName, function(error, doc) { + if (error != null) { + return (typeof callback === 'function' ? callback(error) : undefined); + } else { + let applied = false; + switch (path[0]) { + case 'shout': + doc.eventEmitter.emit('op', metaOpData); + applied = true; + break; + } - model.emit 'applyMetaOp', docName, path, value if applied - callback? null, doc.v + if (applied) { model.emit('applyMetaOp', docName, path, value); } + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Listen to all ops from the specified version. If version is in the past, all - # ops since that version are sent immediately to the listener. - # - # The callback is called once the listener is attached, but before any ops have been passed - # to the listener. - # - # This will _not_ edit the document metadata. - # - # If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour - # might change in a future version. - # - # version is the document version at which the document is opened. It can be left out if you want to open - # the document at the most recent version. - # - # listener is called with (opData) each time an op is applied. - # - # callback(error, openedVersion) - @listen = (docName, version, listener, callback) -> - [version, listener, callback] = [null, version, listener] if typeof version is 'function' + // Listen to all ops from the specified version. If version is in the past, all + // ops since that version are sent immediately to the listener. + // + // The callback is called once the listener is attached, but before any ops have been passed + // to the listener. + // + // This will _not_ edit the document metadata. + // + // If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour + // might change in a future version. + // + // version is the document version at which the document is opened. It can be left out if you want to open + // the document at the most recent version. + // + // listener is called with (opData) each time an op is applied. + // + // callback(error, openedVersion) + this.listen = function(docName, version, listener, callback) { + if (typeof version === 'function') { [version, listener, callback] = Array.from([null, version, listener]); } - load docName, (error, doc) -> - return callback? error if error + return load(docName, function(error, doc) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - clearTimeout doc.reapTimer + clearTimeout(doc.reapTimer); - if version? - getOps docName, version, null, (error, data) -> - return callback? error if error + if (version != null) { + return getOps(docName, version, null, function(error, data) { + if (error) { return (typeof callback === 'function' ? callback(error) : undefined); } - doc.eventEmitter.on 'op', listener - callback? null, version - for op in data - listener op + doc.eventEmitter.on('op', listener); + if (typeof callback === 'function') { + callback(null, version); + } + return (() => { + const result = []; + for (let op of Array.from(data)) { + var needle; + listener(op); - # The listener may well remove itself during the catchup phase. If this happens, break early. - # This is done in a quite inefficient way. (O(n) where n = #listeners on doc) - break unless listener in doc.eventEmitter.listeners 'op' + // The listener may well remove itself during the catchup phase. If this happens, break early. + // This is done in a quite inefficient way. (O(n) where n = #listeners on doc) + if ((needle = listener, !Array.from(doc.eventEmitter.listeners('op')).includes(needle))) { break; } else { + result.push(undefined); + } + } + return result; + })(); + }); - else # Version is null / undefined. Just add the listener. - doc.eventEmitter.on 'op', listener - callback? null, doc.v + } else { // Version is null / undefined. Just add the listener. + doc.eventEmitter.on('op', listener); + return (typeof callback === 'function' ? callback(null, doc.v) : undefined); + } + }); + }; - # Remove a listener for a particular document. - # - # removeListener(docName, listener) - # - # This is synchronous. - @removeListener = (docName, listener) -> - # The document should already be loaded. - doc = docs[docName] - throw new Error 'removeListener called but document not loaded' unless doc + // Remove a listener for a particular document. + // + // removeListener(docName, listener) + // + // This is synchronous. + this.removeListener = function(docName, listener) { + // The document should already be loaded. + const doc = docs[docName]; + if (!doc) { throw new Error('removeListener called but document not loaded'); } - doc.eventEmitter.removeListener 'op', listener - refreshReapingTimeout docName + doc.eventEmitter.removeListener('op', listener); + return refreshReapingTimeout(docName); + }; - # Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - - # sharejs will happily replay uncommitted ops when documents are re-opened anyway. - @flush = (callback) -> - return callback?() unless db + // Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed - + // sharejs will happily replay uncommitted ops when documents are re-opened anyway. + this.flush = function(callback) { + if (!db) { return (typeof callback === 'function' ? callback() : undefined); } - pendingWrites = 0 + let pendingWrites = 0; - for docName, doc of docs - if doc.committedVersion < doc.v - pendingWrites++ - # I'm hoping writeSnapshot will always happen in another thread. - tryWriteSnapshot docName, -> - process.nextTick -> - pendingWrites-- - callback?() if pendingWrites is 0 + for (let docName in docs) { + const doc = docs[docName]; + if (doc.committedVersion < doc.v) { + pendingWrites++; + // I'm hoping writeSnapshot will always happen in another thread. + tryWriteSnapshot(docName, () => process.nextTick(function() { + pendingWrites--; + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + })); + } + } - # If nothing was queued, terminate immediately. - callback?() if pendingWrites is 0 + // If nothing was queued, terminate immediately. + if (pendingWrites === 0) { return (typeof callback === 'function' ? callback() : undefined); } + }; - # Close the database connection. This is needed so nodejs can shut down cleanly. - @closeDb = -> - db?.close?() - db = null + // Close the database connection. This is needed so nodejs can shut down cleanly. + this.closeDb = function() { + __guardMethod__(db, 'close', o => o.close()); + return db = null; + }; - return +}); -# Model inherits from EventEmitter. -Model:: = new EventEmitter +// Model inherits from EventEmitter. +Model.prototype = new EventEmitter; + +function __guardMethod__(obj, methodName, transform) { + if (typeof obj !== 'undefined' && obj !== null && typeof obj[methodName] === 'function') { + return transform(obj, methodName); + } else { + return undefined; + } +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/sharejs/types/simple.js b/services/document-updater/app/coffee/sharejs/types/simple.js index 996b1a5ddc..57c4934f73 100644 --- a/services/document-updater/app/coffee/sharejs/types/simple.js +++ b/services/document-updater/app/coffee/sharejs/types/simple.js @@ -1,38 +1,48 @@ -# This is a really simple OT type. Its not compiled with the web client, but it could be. -# -# Its mostly included for demonstration purposes and its used in a lot of unit tests. -# -# This defines a really simple text OT type which only allows inserts. (No deletes). -# -# Ops look like: -# {position:#, text:"asdf"} -# -# Document snapshots look like: -# {str:string} +/* + * decaffeinate suggestions: + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// This is a really simple OT type. Its not compiled with the web client, but it could be. +// +// Its mostly included for demonstration purposes and its used in a lot of unit tests. +// +// This defines a really simple text OT type which only allows inserts. (No deletes). +// +// Ops look like: +// {position:#, text:"asdf"} +// +// Document snapshots look like: +// {str:string} -module.exports = - # The name of the OT type. The type is stored in types[type.name]. The name can be - # used in place of the actual type in all the API methods. - name: 'simple' +module.exports = { + // The name of the OT type. The type is stored in types[type.name]. The name can be + // used in place of the actual type in all the API methods. + name: 'simple', - # Create a new document snapshot - create: -> {str:""} + // Create a new document snapshot + create() { return {str:""}; }, - # Apply the given op to the document snapshot. Returns the new snapshot. - # - # The original snapshot should not be modified. - apply: (snapshot, op) -> - throw new Error 'Invalid position' unless 0 <= op.position <= snapshot.str.length + // Apply the given op to the document snapshot. Returns the new snapshot. + // + // The original snapshot should not be modified. + apply(snapshot, op) { + if (!(0 <= op.position && op.position <= snapshot.str.length)) { throw new Error('Invalid position'); } - str = snapshot.str - str = str.slice(0, op.position) + op.text + str.slice(op.position) - {str} + let { + str + } = snapshot; + str = str.slice(0, op.position) + op.text + str.slice(op.position); + return {str}; + }, - # transform op1 by op2. Return transformed version of op1. - # sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the - # op being transformed comes from the client or the server. - transform: (op1, op2, sym) -> - pos = op1.position - pos += op2.text.length if op2.position < pos or (op2.position == pos and sym is 'left') + // transform op1 by op2. Return transformed version of op1. + // sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the + // op being transformed comes from the client or the server. + transform(op1, op2, sym) { + let pos = op1.position; + if ((op2.position < pos) || ((op2.position === pos) && (sym === 'left'))) { pos += op2.text.length; } - return {position:pos, text:op1.text} + return {position:pos, text:op1.text}; + } +}; diff --git a/services/document-updater/app/coffee/sharejs/types/syncqueue.js b/services/document-updater/app/coffee/sharejs/types/syncqueue.js index 746450b010..31b2235ee3 100644 --- a/services/document-updater/app/coffee/sharejs/types/syncqueue.js +++ b/services/document-updater/app/coffee/sharejs/types/syncqueue.js @@ -1,42 +1,52 @@ -# A synchronous processing queue. The queue calls process on the arguments, -# ensuring that process() is only executing once at a time. -# -# process(data, callback) _MUST_ eventually call its callback. -# -# Example: -# -# queue = require 'syncqueue' -# -# fn = queue (data, callback) -> -# asyncthing data, -> -# callback(321) -# -# fn(1) -# fn(2) -# fn(3, (result) -> console.log(result)) -# -# ^--- async thing will only be running once at any time. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A synchronous processing queue. The queue calls process on the arguments, +// ensuring that process() is only executing once at a time. +// +// process(data, callback) _MUST_ eventually call its callback. +// +// Example: +// +// queue = require 'syncqueue' +// +// fn = queue (data, callback) -> +// asyncthing data, -> +// callback(321) +// +// fn(1) +// fn(2) +// fn(3, (result) -> console.log(result)) +// +// ^--- async thing will only be running once at any time. -module.exports = (process) -> - throw new Error('process is not a function') unless typeof process == 'function' - queue = [] +module.exports = function(process) { + if (typeof process !== 'function') { throw new Error('process is not a function'); } + const queue = []; - enqueue = (data, callback) -> - queue.push [data, callback] - flush() + const enqueue = function(data, callback) { + queue.push([data, callback]); + return flush(); + }; - enqueue.busy = false + enqueue.busy = false; - flush = -> - return if enqueue.busy or queue.length == 0 + var flush = function() { + if (enqueue.busy || (queue.length === 0)) { return; } - enqueue.busy = true - [data, callback] = queue.shift() - process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow. - enqueue.busy = false - # This is called after busy = false so a user can check if enqueue.busy is set in the callback. - callback.apply null, result if callback - flush() + enqueue.busy = true; + const [data, callback] = Array.from(queue.shift()); + return process(data, function(...result) { // TODO: Make this not use varargs - varargs are really slow. + enqueue.busy = false; + // This is called after busy = false so a user can check if enqueue.busy is set in the callback. + if (callback) { callback.apply(null, result); } + return flush(); + }); + }; - enqueue + return enqueue; +}; diff --git a/services/document-updater/app/coffee/sharejs/types/text-api.js b/services/document-updater/app/coffee/sharejs/types/text-api.js index 96243ceffb..295261ff90 100644 --- a/services/document-updater/app/coffee/sharejs/types/text-api.js +++ b/services/document-updater/app/coffee/sharejs/types/text-api.js @@ -1,32 +1,44 @@ -# Text document API for text +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text -text = require './text' if typeof WEB is 'undefined' +let text; +if (typeof WEB === 'undefined') { text = require('./text'); } -text.api = - provides: {text:true} +text.api = { + provides: {text:true}, - # The number of characters in the string - getLength: -> @snapshot.length + // The number of characters in the string + getLength() { return this.snapshot.length; }, - # Get the text contents of a document - getText: -> @snapshot + // Get the text contents of a document + getText() { return this.snapshot; }, - insert: (pos, text, callback) -> - op = [{p:pos, i:text}] + insert(pos, text, callback) { + const op = [{p:pos, i:text}]; - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - del: (pos, length, callback) -> - op = [{p:pos, d:@snapshot[pos...(pos + length)]}] + del(pos, length, callback) { + const op = [{p:pos, d:this.snapshot.slice(pos, (pos + length))}]; - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - _register: -> - @on 'remoteop', (op) -> - for component in op - if component.i != undefined - @emit 'insert', component.p, component.i - else - @emit 'delete', component.p, component.d + _register() { + return this.on('remoteop', function(op) { + return Array.from(op).map((component) => + component.i !== undefined ? + this.emit('insert', component.p, component.i) + : + this.emit('delete', component.p, component.d)); + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/types/text-composable-api.js b/services/document-updater/app/coffee/sharejs/types/text-composable-api.js index 7b27ac163a..160ab1c46e 100644 --- a/services/document-updater/app/coffee/sharejs/types/text-composable-api.js +++ b/services/document-updater/app/coffee/sharejs/types/text-composable-api.js @@ -1,43 +1,64 @@ -# Text document API for text +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text -if WEB? - type = exports.types['text-composable'] -else - type = require './text-composable' +let type; +if (typeof WEB !== 'undefined' && WEB !== null) { + type = exports.types['text-composable']; +} else { + type = require('./text-composable'); +} -type.api = - provides: {'text':true} +type.api = { + provides: {'text':true}, - # The number of characters in the string - 'getLength': -> @snapshot.length + // The number of characters in the string + 'getLength'() { return this.snapshot.length; }, - # Get the text contents of a document - 'getText': -> @snapshot + // Get the text contents of a document + 'getText'() { return this.snapshot; }, - 'insert': (pos, text, callback) -> - op = type.normalize [pos, 'i':text, (@snapshot.length - pos)] + 'insert'(pos, text, callback) { + const op = type.normalize([pos, {'i':text}, (this.snapshot.length - pos)]); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - 'del': (pos, length, callback) -> - op = type.normalize [pos, 'd':@snapshot[pos...(pos + length)], (@snapshot.length - pos - length)] + 'del'(pos, length, callback) { + const op = type.normalize([pos, {'d':this.snapshot.slice(pos, (pos + length))}, (this.snapshot.length - pos - length)]); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - _register: -> - @on 'remoteop', (op) -> - pos = 0 - for component in op - if typeof component is 'number' - pos += component - else if component.i != undefined - @emit 'insert', pos, component.i - pos += component.i.length - else - # delete - @emit 'delete', pos, component.d - # We don't increment pos, because the position - # specified is after the delete has happened. + _register() { + return this.on('remoteop', function(op) { + let pos = 0; + return (() => { + const result = []; + for (let component of Array.from(op)) { + if (typeof component === 'number') { + result.push(pos += component); + } else if (component.i !== undefined) { + this.emit('insert', pos, component.i); + result.push(pos += component.i.length); + } else { + // delete + result.push(this.emit('delete', pos, component.d)); + } + } + return result; + })(); + }); + } +}; + // We don't increment pos, because the position + // specified is after the delete has happened. diff --git a/services/document-updater/app/coffee/sharejs/types/text-composable.js b/services/document-updater/app/coffee/sharejs/types/text-composable.js index 992b567bf0..4f43f769cd 100644 --- a/services/document-updater/app/coffee/sharejs/types/text-composable.js +++ b/services/document-updater/app/coffee/sharejs/types/text-composable.js @@ -1,261 +1,315 @@ -# An alternate composable implementation for text. This is much closer -# to the implementation used by google wave. -# -# Ops are lists of components which iterate over the whole document. -# Components are either: -# A number N: Skip N characters in the original document -# {i:'str'}: Insert 'str' at the current position in the document -# {d:'str'}: Delete 'str', which appears at the current position in the document -# -# Eg: [3, {i:'hi'}, 5, {d:'internet'}] -# -# Snapshots are strings. +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// An alternate composable implementation for text. This is much closer +// to the implementation used by google wave. +// +// Ops are lists of components which iterate over the whole document. +// Components are either: +// A number N: Skip N characters in the original document +// {i:'str'}: Insert 'str' at the current position in the document +// {d:'str'}: Delete 'str', which appears at the current position in the document +// +// Eg: [3, {i:'hi'}, 5, {d:'internet'}] +// +// Snapshots are strings. -p = -> #require('util').debug -i = -> #require('util').inspect +let makeAppend; +const p = function() {}; //require('util').debug +const i = function() {}; //require('util').inspect -exports = if WEB? then {} else module.exports +const exports = (typeof WEB !== 'undefined' && WEB !== null) ? {} : module.exports; -exports.name = 'text-composable' +exports.name = 'text-composable'; -exports.create = -> '' +exports.create = () => ''; -# -------- Utility methods +// -------- Utility methods -checkOp = (op) -> - throw new Error('Op must be an array of components') unless Array.isArray(op) - last = null - for c in op - if typeof(c) == 'object' - throw new Error("Invalid op component: #{i c}") unless (c.i? && c.i.length > 0) or (c.d? && c.d.length > 0) - else - throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number' - throw new Error('Skip components must be a positive number') unless c > 0 - throw new Error('Adjacent skip components should be added') if typeof(last) == 'number' +const checkOp = function(op) { + if (!Array.isArray(op)) { throw new Error('Op must be an array of components'); } + let last = null; + return (() => { + const result = []; + for (let c of Array.from(op)) { + if (typeof(c) === 'object') { + if (((c.i == null) || !(c.i.length > 0)) && ((c.d == null) || !(c.d.length > 0))) { throw new Error(`Invalid op component: ${i(c)}`); } + } else { + if (typeof(c) !== 'number') { throw new Error('Op components must be objects or numbers'); } + if (!(c > 0)) { throw new Error('Skip components must be a positive number'); } + if (typeof(last) === 'number') { throw new Error('Adjacent skip components should be added'); } + } - last = c + result.push(last = c); + } + return result; + })(); +}; -# Makes a function for appending components to a given op. -# Exported for the randomOpGenerator. -exports._makeAppend = makeAppend = (op) -> (component) -> - if component == 0 || component.i == '' || component.d == '' - return - else if op.length == 0 - op.push component - else if typeof(component) == 'number' && typeof(op[op.length - 1]) == 'number' - op[op.length - 1] += component - else if component.i? && op[op.length - 1].i? - op[op.length - 1].i += component.i - else if component.d? && op[op.length - 1].d? - op[op.length - 1].d += component.d - else - op.push component +// Makes a function for appending components to a given op. +// Exported for the randomOpGenerator. +exports._makeAppend = (makeAppend = op => (function(component) { + if ((component === 0) || (component.i === '') || (component.d === '')) { + return; + } else if (op.length === 0) { + return op.push(component); + } else if ((typeof(component) === 'number') && (typeof(op[op.length - 1]) === 'number')) { + return op[op.length - 1] += component; + } else if ((component.i != null) && (op[op.length - 1].i != null)) { + return op[op.length - 1].i += component.i; + } else if ((component.d != null) && (op[op.length - 1].d != null)) { + return op[op.length - 1].d += component.d; + } else { + return op.push(component); + } +})); -# checkOp op +// checkOp op -# Makes 2 functions for taking components from the start of an op, and for peeking -# at the next op that could be taken. -makeTake = (op) -> - # The index of the next component to take - idx = 0 - # The offset into the component - offset = 0 +// Makes 2 functions for taking components from the start of an op, and for peeking +// at the next op that could be taken. +const makeTake = function(op) { + // The index of the next component to take + let idx = 0; + // The offset into the component + let offset = 0; - # Take up to length n from the front of op. If n is null, take the next - # op component. If indivisableField == 'd', delete components won't be separated. - # If indivisableField == 'i', insert components won't be separated. - take = (n, indivisableField) -> - return null if idx == op.length - #assert.notStrictEqual op.length, i, 'The op is too short to traverse the document' + // Take up to length n from the front of op. If n is null, take the next + // op component. If indivisableField == 'd', delete components won't be separated. + // If indivisableField == 'i', insert components won't be separated. + const take = function(n, indivisableField) { + let c; + if (idx === op.length) { return null; } + //assert.notStrictEqual op.length, i, 'The op is too short to traverse the document' - if typeof(op[idx]) == 'number' - if !n? or op[idx] - offset <= n - c = op[idx] - offset - ++idx; offset = 0 - c - else - offset += n - n - else - # Take from the string - field = if op[idx].i then 'i' else 'd' - c = {} - if !n? or op[idx][field].length - offset <= n or field == indivisableField - c[field] = op[idx][field][offset..] - ++idx; offset = 0 - else - c[field] = op[idx][field][offset...(offset + n)] - offset += n - c + if (typeof(op[idx]) === 'number') { + if ((n == null) || ((op[idx] - offset) <= n)) { + c = op[idx] - offset; + ++idx; offset = 0; + return c; + } else { + offset += n; + return n; + } + } else { + // Take from the string + const field = op[idx].i ? 'i' : 'd'; + c = {}; + if ((n == null) || ((op[idx][field].length - offset) <= n) || (field === indivisableField)) { + c[field] = op[idx][field].slice(offset); + ++idx; offset = 0; + } else { + c[field] = op[idx][field].slice(offset, (offset + n)); + offset += n; + } + return c; + } + }; - peekType = () -> - op[idx] + const peekType = () => op[idx]; - [take, peekType] + return [take, peekType]; +}; -# Find and return the length of an op component -componentLength = (component) -> - if typeof(component) == 'number' - component - else if component.i? - component.i.length - else - component.d.length +// Find and return the length of an op component +const componentLength = function(component) { + if (typeof(component) === 'number') { + return component; + } else if (component.i != null) { + return component.i.length; + } else { + return component.d.length; + } +}; -# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate -# adjacent inserts and deletes. -exports.normalize = (op) -> - newOp = [] - append = makeAppend newOp - append component for component in op - newOp +// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate +// adjacent inserts and deletes. +exports.normalize = function(op) { + const newOp = []; + const append = makeAppend(newOp); + for (let component of Array.from(op)) { append(component); } + return newOp; +}; -# Apply the op to the string. Returns the new string. -exports.apply = (str, op) -> - p "Applying #{i op} to '#{str}'" - throw new Error('Snapshot should be a string') unless typeof(str) == 'string' - checkOp op +// Apply the op to the string. Returns the new string. +exports.apply = function(str, op) { + p(`Applying ${i(op)} to '${str}'`); + if (typeof(str) !== 'string') { throw new Error('Snapshot should be a string'); } + checkOp(op); - pos = 0 - newDoc = [] + const pos = 0; + const newDoc = []; - for component in op - if typeof(component) == 'number' - throw new Error('The op is too long for this document') if component > str.length - newDoc.push str[...component] - str = str[component..] - else if component.i? - newDoc.push component.i - else - throw new Error("The deleted text '#{component.d}' doesn't match the next characters in the document '#{str[...component.d.length]}'") unless component.d == str[...component.d.length] - str = str[component.d.length..] + for (let component of Array.from(op)) { + if (typeof(component) === 'number') { + if (component > str.length) { throw new Error('The op is too long for this document'); } + newDoc.push(str.slice(0, component)); + str = str.slice(component); + } else if (component.i != null) { + newDoc.push(component.i); + } else { + if (component.d !== str.slice(0, component.d.length)) { throw new Error(`The deleted text '${component.d}' doesn't match the next characters in the document '${str.slice(0, component.d.length)}'`); } + str = str.slice(component.d.length); + } + } - throw new Error("The applied op doesn't traverse the entire document") unless '' == str + if ('' !== str) { throw new Error("The applied op doesn't traverse the entire document"); } - newDoc.join '' + return newDoc.join(''); +}; -# transform op1 by op2. Return transformed version of op1. -# op1 and op2 are unchanged by transform. -exports.transform = (op, otherOp, side) -> - throw new Error "side (#{side} must be 'left' or 'right'" unless side == 'left' or side == 'right' +// transform op1 by op2. Return transformed version of op1. +// op1 and op2 are unchanged by transform. +exports.transform = function(op, otherOp, side) { + let component; + if ((side !== 'left') && (side !== 'right')) { throw new Error(`side (${side} must be 'left' or 'right'`); } - checkOp op - checkOp otherOp - newOp = [] + checkOp(op); + checkOp(otherOp); + const newOp = []; - append = makeAppend newOp - [take, peek] = makeTake op + const append = makeAppend(newOp); + const [take, peek] = Array.from(makeTake(op)); - for component in otherOp - if typeof(component) == 'number' # Skip - length = component - while length > 0 - chunk = take(length, 'i') - throw new Error('The op traverses more elements than the document has') unless chunk != null + for (component of Array.from(otherOp)) { + var chunk, length; + if (typeof(component) === 'number') { // Skip + length = component; + while (length > 0) { + chunk = take(length, 'i'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append chunk - length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.i? - else if component.i? # Insert - if side == 'left' - # The left insert should go first. - o = peek() - append take() if o?.i + append(chunk); + if ((typeof(chunk) !== 'object') || (chunk.i == null)) { length -= componentLength(chunk); } + } + } else if (component.i != null) { // Insert + if (side === 'left') { + // The left insert should go first. + const o = peek(); + if (o != null ? o.i : undefined) { append(take()); } + } - # Otherwise, skip the inserted text. - append(component.i.length) - else # Delete. - #assert.ok component.d - length = component.d.length - while length > 0 - chunk = take(length, 'i') - throw new Error('The op traverses more elements than the document has') unless chunk != null + // Otherwise, skip the inserted text. + append(component.i.length); + } else { // Delete. + //assert.ok component.d + ({ + length + } = component.d); + while (length > 0) { + chunk = take(length, 'i'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - if typeof(chunk) == 'number' - length -= chunk - else if chunk.i? - append(chunk) - else - #assert.ok chunk.d - # The delete is unnecessary now. - length -= chunk.d.length + if (typeof(chunk) === 'number') { + length -= chunk; + } else if (chunk.i != null) { + append(chunk); + } else { + //assert.ok chunk.d + // The delete is unnecessary now. + length -= chunk.d.length; + } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in the op: #{i component}" unless component?.i? - append component + // Append extras from op1 + while (component = take()) { + if ((component != null ? component.i : undefined) == null) { throw new Error(`Remaining fragments in the op: ${i(component)}`); } + append(component); + } - newOp + return newOp; +}; -# Compose 2 ops into 1 op. -exports.compose = (op1, op2) -> - p "COMPOSE #{i op1} + #{i op2}" - checkOp op1 - checkOp op2 +// Compose 2 ops into 1 op. +exports.compose = function(op1, op2) { + let component; + p(`COMPOSE ${i(op1)} + ${i(op2)}`); + checkOp(op1); + checkOp(op2); - result = [] + const result = []; - append = makeAppend result - [take, _] = makeTake op1 + const append = makeAppend(result); + const [take, _] = Array.from(makeTake(op1)); - for component in op2 - if typeof(component) == 'number' # Skip - length = component - while length > 0 - chunk = take(length, 'd') - throw new Error('The op traverses more elements than the document has') unless chunk != null + for (component of Array.from(op2)) { + var chunk, length; + if (typeof(component) === 'number') { // Skip + length = component; + while (length > 0) { + chunk = take(length, 'd'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append chunk - length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.d? + append(chunk); + if ((typeof(chunk) !== 'object') || (chunk.d == null)) { length -= componentLength(chunk); } + } - else if component.i? # Insert - append {i:component.i} + } else if (component.i != null) { // Insert + append({i:component.i}); - else # Delete - offset = 0 - while offset < component.d.length - chunk = take(component.d.length - offset, 'd') - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Delete + let offset = 0; + while (offset < component.d.length) { + chunk = take(component.d.length - offset, 'd'); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - # If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length. - if typeof(chunk) == 'number' - append {d:component.d[offset...(offset + chunk)]} - offset += chunk - else if chunk.i? - throw new Error("The deleted text doesn't match the inserted text") unless component.d[offset...(offset + chunk.i.length)] == chunk.i - offset += chunk.i.length - # The ops cancel each other out. - else - # Delete - append chunk + // If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length. + if (typeof(chunk) === 'number') { + append({d:component.d.slice(offset, (offset + chunk))}); + offset += chunk; + } else if (chunk.i != null) { + if (component.d.slice(offset, (offset + chunk.i.length)) !== chunk.i) { throw new Error("The deleted text doesn't match the inserted text"); } + offset += chunk.i.length; + // The ops cancel each other out. + } else { + // Delete + append(chunk); + } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Trailing stuff in op1 #{i component}" unless component?.d? - append component + // Append extras from op1 + while (component = take()) { + if ((component != null ? component.d : undefined) == null) { throw new Error(`Trailing stuff in op1 ${i(component)}`); } + append(component); + } - result + return result; +}; -invertComponent = (c) -> - if typeof(c) == 'number' - c - else if c.i? - {d:c.i} - else - {i:c.d} +const invertComponent = function(c) { + if (typeof(c) === 'number') { + return c; + } else if (c.i != null) { + return {d:c.i}; + } else { + return {i:c.d}; + } +}; -# Invert an op -exports.invert = (op) -> - result = [] - append = makeAppend result +// Invert an op +exports.invert = function(op) { + const result = []; + const append = makeAppend(result); - append(invertComponent component) for component in op + for (let component of Array.from(op)) { append(invertComponent(component)); } - result + return result; +}; -if window? - window.ot ||= {} - window.ot.types ||= {} - window.ot.types.text = exports +if (typeof window !== 'undefined' && window !== null) { + if (!window.ot) { window.ot = {}; } + if (!window.ot.types) { window.ot.types = {}; } + window.ot.types.text = exports; +} diff --git a/services/document-updater/app/coffee/sharejs/types/text-tp2-api.js b/services/document-updater/app/coffee/sharejs/types/text-tp2-api.js index d661b5ae37..e3f4f95ea6 100644 --- a/services/document-updater/app/coffee/sharejs/types/text-tp2-api.js +++ b/services/document-updater/app/coffee/sharejs/types/text-tp2-api.js @@ -1,89 +1,118 @@ -# Text document API for text-tp2 +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// Text document API for text-tp2 -if WEB? - type = exports.types['text-tp2'] -else - type = require './text-tp2' +let type; +if (typeof WEB !== 'undefined' && WEB !== null) { + type = exports.types['text-tp2']; +} else { + type = require('./text-tp2'); +} -{_takeDoc:takeDoc, _append:append} = type +const {_takeDoc:takeDoc, _append:append} = type; -appendSkipChars = (op, doc, pos, maxlength) -> - while (maxlength == undefined || maxlength > 0) and pos.index < doc.data.length - part = takeDoc doc, pos, maxlength, true - maxlength -= part.length if maxlength != undefined and typeof part is 'string' - append op, (part.length || part) +const appendSkipChars = (op, doc, pos, maxlength) => (() => { + const result = []; + while (((maxlength === undefined) || (maxlength > 0)) && (pos.index < doc.data.length)) { + const part = takeDoc(doc, pos, maxlength, true); + if ((maxlength !== undefined) && (typeof part === 'string')) { maxlength -= part.length; } + result.push(append(op, (part.length || part))); + } + return result; +})(); -type['api'] = - 'provides': {'text':true} +type['api'] = { + 'provides': {'text':true}, - # The number of characters in the string - 'getLength': -> @snapshot.charLength + // The number of characters in the string + 'getLength'() { return this.snapshot.charLength; }, - # Flatten a document into a string - 'getText': -> - strings = (elem for elem in @snapshot.data when typeof elem is 'string') - strings.join '' + // Flatten a document into a string + 'getText'() { + const strings = (Array.from(this.snapshot.data).filter((elem) => typeof elem === 'string')); + return strings.join(''); + }, - 'insert': (pos, text, callback) -> - pos = 0 if pos == undefined + 'insert'(pos, text, callback) { + if (pos === undefined) { pos = 0; } - op = [] - docPos = {index:0, offset:0} + const op = []; + const docPos = {index:0, offset:0}; - appendSkipChars op, @snapshot, docPos, pos - append op, {'i':text} - appendSkipChars op, @snapshot, docPos + appendSkipChars(op, this.snapshot, docPos, pos); + append(op, {'i':text}); + appendSkipChars(op, this.snapshot, docPos); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - 'del': (pos, length, callback) -> - op = [] - docPos = {index:0, offset:0} + 'del'(pos, length, callback) { + const op = []; + const docPos = {index:0, offset:0}; - appendSkipChars op, @snapshot, docPos, pos + appendSkipChars(op, this.snapshot, docPos, pos); - while length > 0 - part = takeDoc @snapshot, docPos, length, true - if typeof part is 'string' - append op, {'d':part.length} - length -= part.length - else - append op, part + while (length > 0) { + const part = takeDoc(this.snapshot, docPos, length, true); + if (typeof part === 'string') { + append(op, {'d':part.length}); + length -= part.length; + } else { + append(op, part); + } + } - appendSkipChars op, @snapshot, docPos + appendSkipChars(op, this.snapshot, docPos); - @submitOp op, callback - op + this.submitOp(op, callback); + return op; + }, - '_register': -> - # Interpret recieved ops + generate more detailed events for them - @on 'remoteop', (op, snapshot) -> - textPos = 0 - docPos = {index:0, offset:0} + '_register'() { + // Interpret recieved ops + generate more detailed events for them + return this.on('remoteop', function(op, snapshot) { + let textPos = 0; + const docPos = {index:0, offset:0}; - for component in op - if typeof component is 'number' - # Skip - remainder = component - while remainder > 0 - part = takeDoc snapshot, docPos, remainder - if typeof part is 'string' - textPos += part.length - remainder -= part.length || part - else if component.i != undefined - # Insert - if typeof component.i is 'string' - @emit 'insert', textPos, component.i - textPos += component.i.length - else - # Delete - remainder = component.d - while remainder > 0 - part = takeDoc snapshot, docPos, remainder - if typeof part is 'string' - @emit 'delete', textPos, part - remainder -= part.length || part + for (let component of Array.from(op)) { + var part, remainder; + if (typeof component === 'number') { + // Skip + remainder = component; + while (remainder > 0) { + part = takeDoc(snapshot, docPos, remainder); + if (typeof part === 'string') { + textPos += part.length; + } + remainder -= part.length || part; + } + } else if (component.i !== undefined) { + // Insert + if (typeof component.i === 'string') { + this.emit('insert', textPos, component.i); + textPos += component.i.length; + } + } else { + // Delete + remainder = component.d; + while (remainder > 0) { + part = takeDoc(snapshot, docPos, remainder); + if (typeof part === 'string') { + this.emit('delete', textPos, part); + } + remainder -= part.length || part; + } + } + } - return + }); + } +}; diff --git a/services/document-updater/app/coffee/sharejs/types/text-tp2.js b/services/document-updater/app/coffee/sharejs/types/text-tp2.js index d19cbdcef4..ab123d6ff7 100644 --- a/services/document-updater/app/coffee/sharejs/types/text-tp2.js +++ b/services/document-updater/app/coffee/sharejs/types/text-tp2.js @@ -1,322 +1,398 @@ -# A TP2 implementation of text, following this spec: -# http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README -# -# A document is made up of a string and a set of tombstones inserted throughout -# the string. For example, 'some ', (2 tombstones), 'string'. -# -# This is encoded in a document as: {s:'some string', t:[5, -2, 6]} -# -# Ops are lists of components which iterate over the whole document. -# Components are either: -# N: Skip N characters in the original document -# {i:'str'}: Insert 'str' at the current position in the document -# {i:N}: Insert N tombstones at the current position in the document -# {d:N}: Delete (tombstone) N characters at the current position in the document -# -# Eg: [3, {i:'hi'}, 5, {d:8}] -# -# Snapshots are lists with characters and tombstones. Characters are stored in strings -# and adjacent tombstones are flattened into numbers. -# -# Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters) -# would be represented by a document snapshot of ['Hello ', 5, 'world'] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS103: Rewrite code to no longer use __guard__ + * DS205: Consider reworking code to avoid use of IIFEs + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A TP2 implementation of text, following this spec: +// http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README +// +// A document is made up of a string and a set of tombstones inserted throughout +// the string. For example, 'some ', (2 tombstones), 'string'. +// +// This is encoded in a document as: {s:'some string', t:[5, -2, 6]} +// +// Ops are lists of components which iterate over the whole document. +// Components are either: +// N: Skip N characters in the original document +// {i:'str'}: Insert 'str' at the current position in the document +// {i:N}: Insert N tombstones at the current position in the document +// {d:N}: Delete (tombstone) N characters at the current position in the document +// +// Eg: [3, {i:'hi'}, 5, {d:8}] +// +// Snapshots are lists with characters and tombstones. Characters are stored in strings +// and adjacent tombstones are flattened into numbers. +// +// Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters) +// would be represented by a document snapshot of ['Hello ', 5, 'world'] -type = - name: 'text-tp2' - tp2: true - create: -> {charLength:0, totalLength:0, positionCache:[], data:[]} - serialize: (doc) -> - throw new Error 'invalid doc snapshot' unless doc.data - doc.data - deserialize: (data) -> - doc = type.create() - doc.data = data +let append, appendDoc, takeDoc; +var type = { + name: 'text-tp2', + tp2: true, + create() { return {charLength:0, totalLength:0, positionCache:[], data:[]}; }, + serialize(doc) { + if (!doc.data) { throw new Error('invalid doc snapshot'); } + return doc.data; + }, + deserialize(data) { + const doc = type.create(); + doc.data = data; - for component in data - if typeof component is 'string' - doc.charLength += component.length - doc.totalLength += component.length - else - doc.totalLength += component + for (let component of Array.from(data)) { + if (typeof component === 'string') { + doc.charLength += component.length; + doc.totalLength += component.length; + } else { + doc.totalLength += component; + } + } - doc + return doc; + } +}; -checkOp = (op) -> - throw new Error('Op must be an array of components') unless Array.isArray(op) - last = null - for c in op - if typeof(c) == 'object' - if c.i != undefined - throw new Error('Inserts must insert a string or a +ive number') unless (typeof(c.i) == 'string' and c.i.length > 0) or (typeof(c.i) == 'number' and c.i > 0) - else if c.d != undefined - throw new Error('Deletes must be a +ive number') unless typeof(c.d) == 'number' and c.d > 0 - else - throw new Error('Operation component must define .i or .d') - else - throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number' - throw new Error('Skip components must be a positive number') unless c > 0 - throw new Error('Adjacent skip components should be combined') if typeof(last) == 'number' +const checkOp = function(op) { + if (!Array.isArray(op)) { throw new Error('Op must be an array of components'); } + let last = null; + return (() => { + const result = []; + for (let c of Array.from(op)) { + if (typeof(c) === 'object') { + if (c.i !== undefined) { + if (((typeof(c.i) !== 'string') || !(c.i.length > 0)) && ((typeof(c.i) !== 'number') || !(c.i > 0))) { throw new Error('Inserts must insert a string or a +ive number'); } + } else if (c.d !== undefined) { + if ((typeof(c.d) !== 'number') || !(c.d > 0)) { throw new Error('Deletes must be a +ive number'); } + } else { + throw new Error('Operation component must define .i or .d'); + } + } else { + if (typeof(c) !== 'number') { throw new Error('Op components must be objects or numbers'); } + if (!(c > 0)) { throw new Error('Skip components must be a positive number'); } + if (typeof(last) === 'number') { throw new Error('Adjacent skip components should be combined'); } + } - last = c + result.push(last = c); + } + return result; + })(); +}; -# Take the next part from the specified position in a document snapshot. -# position = {index, offset}. It will be updated. -type._takeDoc = takeDoc = (doc, position, maxlength, tombsIndivisible) -> - throw new Error 'Operation goes past the end of the document' if position.index >= doc.data.length +// Take the next part from the specified position in a document snapshot. +// position = {index, offset}. It will be updated. +type._takeDoc = (takeDoc = function(doc, position, maxlength, tombsIndivisible) { + if (position.index >= doc.data.length) { throw new Error('Operation goes past the end of the document'); } - part = doc.data[position.index] - # peel off data[0] - result = if typeof(part) == 'string' - if maxlength != undefined - part[position.offset...(position.offset + maxlength)] - else - part[position.offset...] - else - if maxlength == undefined or tombsIndivisible + const part = doc.data[position.index]; + // peel off data[0] + const result = typeof(part) === 'string' ? + maxlength !== undefined ? + part.slice(position.offset, (position.offset + maxlength)) + : + part.slice(position.offset) + : + (maxlength === undefined) || tombsIndivisible ? part - position.offset - else - Math.min(maxlength, part - position.offset) + : + Math.min(maxlength, part - position.offset); - resultLen = result.length || result + const resultLen = result.length || result; - if (part.length || part) - position.offset > resultLen - position.offset += resultLen - else - position.index++ - position.offset = 0 + if (((part.length || part) - position.offset) > resultLen) { + position.offset += resultLen; + } else { + position.index++; + position.offset = 0; + } - result + return result; +}); -# Append a part to the end of a document -type._appendDoc = appendDoc = (doc, p) -> - return if p == 0 or p == '' +// Append a part to the end of a document +type._appendDoc = (appendDoc = function(doc, p) { + if ((p === 0) || (p === '')) { return; } - if typeof p is 'string' - doc.charLength += p.length - doc.totalLength += p.length - else - doc.totalLength += p + if (typeof p === 'string') { + doc.charLength += p.length; + doc.totalLength += p.length; + } else { + doc.totalLength += p; + } - data = doc.data - if data.length == 0 - data.push p - else if typeof(data[data.length - 1]) == typeof(p) - data[data.length - 1] += p - else - data.push p - return + const { + data + } = doc; + if (data.length === 0) { + data.push(p); + } else if (typeof(data[data.length - 1]) === typeof(p)) { + data[data.length - 1] += p; + } else { + data.push(p); + } +}); -# Apply the op to the document. The document is not modified in the process. -type.apply = (doc, op) -> - unless doc.totalLength != undefined and doc.charLength != undefined and doc.data.length != undefined - throw new Error('Snapshot is invalid') +// Apply the op to the document. The document is not modified in the process. +type.apply = function(doc, op) { + if ((doc.totalLength === undefined) || (doc.charLength === undefined) || (doc.data.length === undefined)) { + throw new Error('Snapshot is invalid'); + } - checkOp op + checkOp(op); - newDoc = type.create() - position = {index:0, offset:0} + const newDoc = type.create(); + const position = {index:0, offset:0}; - for component in op - if typeof(component) is 'number' - remainder = component - while remainder > 0 - part = takeDoc doc, position, remainder + for (let component of Array.from(op)) { + var part, remainder; + if (typeof(component) === 'number') { + remainder = component; + while (remainder > 0) { + part = takeDoc(doc, position, remainder); - appendDoc newDoc, part - remainder -= part.length || part + appendDoc(newDoc, part); + remainder -= part.length || part; + } - else if component.i != undefined - appendDoc newDoc, component.i - else if component.d != undefined - remainder = component.d - while remainder > 0 - part = takeDoc doc, position, remainder - remainder -= part.length || part - appendDoc newDoc, component.d + } else if (component.i !== undefined) { + appendDoc(newDoc, component.i); + } else if (component.d !== undefined) { + remainder = component.d; + while (remainder > 0) { + part = takeDoc(doc, position, remainder); + remainder -= part.length || part; + } + appendDoc(newDoc, component.d); + } + } - newDoc + return newDoc; +}; -# Append an op component to the end of the specified op. -# Exported for the randomOpGenerator. -type._append = append = (op, component) -> - if component == 0 || component.i == '' || component.i == 0 || component.d == 0 - return - else if op.length == 0 - op.push component - else - last = op[op.length - 1] - if typeof(component) == 'number' && typeof(last) == 'number' - op[op.length - 1] += component - else if component.i != undefined && last.i? && typeof(last.i) == typeof(component.i) - last.i += component.i - else if component.d != undefined && last.d? - last.d += component.d - else - op.push component +// Append an op component to the end of the specified op. +// Exported for the randomOpGenerator. +type._append = (append = function(op, component) { + if ((component === 0) || (component.i === '') || (component.i === 0) || (component.d === 0)) { + return; + } else if (op.length === 0) { + return op.push(component); + } else { + const last = op[op.length - 1]; + if ((typeof(component) === 'number') && (typeof(last) === 'number')) { + return op[op.length - 1] += component; + } else if ((component.i !== undefined) && (last.i != null) && (typeof(last.i) === typeof(component.i))) { + return last.i += component.i; + } else if ((component.d !== undefined) && (last.d != null)) { + return last.d += component.d; + } else { + return op.push(component); + } + } +}); -# Makes 2 functions for taking components from the start of an op, and for peeking -# at the next op that could be taken. -makeTake = (op) -> - # The index of the next component to take - index = 0 - # The offset into the component - offset = 0 +// Makes 2 functions for taking components from the start of an op, and for peeking +// at the next op that could be taken. +const makeTake = function(op) { + // The index of the next component to take + let index = 0; + // The offset into the component + let offset = 0; - # Take up to length maxlength from the op. If maxlength is not defined, there is no max. - # If insertsIndivisible is true, inserts (& insert tombstones) won't be separated. - # - # Returns null when op is fully consumed. - take = (maxlength, insertsIndivisible) -> - return null if index == op.length + // Take up to length maxlength from the op. If maxlength is not defined, there is no max. + // If insertsIndivisible is true, inserts (& insert tombstones) won't be separated. + // + // Returns null when op is fully consumed. + const take = function(maxlength, insertsIndivisible) { + let current; + if (index === op.length) { return null; } - e = op[index] - if typeof((current = e)) == 'number' or typeof((current = e.i)) == 'number' or (current = e.d) != undefined - if !maxlength? or current - offset <= maxlength or (insertsIndivisible and e.i != undefined) - # Return the rest of the current element. - c = current - offset - ++index; offset = 0 - else - offset += maxlength - c = maxlength - if e.i != undefined then {i:c} else if e.d != undefined then {d:c} else c - else - # Take from the inserted string - if !maxlength? or e.i.length - offset <= maxlength or insertsIndivisible - result = {i:e.i[offset..]} - ++index; offset = 0 - else - result = {i:e.i[offset...offset + maxlength]} - offset += maxlength - result + const e = op[index]; + if ((typeof((current = e)) === 'number') || (typeof((current = e.i)) === 'number') || ((current = e.d) !== undefined)) { + let c; + if ((maxlength == null) || ((current - offset) <= maxlength) || (insertsIndivisible && (e.i !== undefined))) { + // Return the rest of the current element. + c = current - offset; + ++index; offset = 0; + } else { + offset += maxlength; + c = maxlength; + } + if (e.i !== undefined) { return {i:c}; } else if (e.d !== undefined) { return {d:c}; } else { return c; } + } else { + // Take from the inserted string + let result; + if ((maxlength == null) || ((e.i.length - offset) <= maxlength) || insertsIndivisible) { + result = {i:e.i.slice(offset)}; + ++index; offset = 0; + } else { + result = {i:e.i.slice(offset, offset + maxlength)}; + offset += maxlength; + } + return result; + } + }; - peekType = -> op[index] + const peekType = () => op[index]; - [take, peekType] + return [take, peekType]; +}; -# Find and return the length of an op component -componentLength = (component) -> - if typeof(component) == 'number' - component - else if typeof(component.i) == 'string' - component.i.length - else - # This should work because c.d and c.i must be +ive. - component.d or component.i +// Find and return the length of an op component +const componentLength = function(component) { + if (typeof(component) === 'number') { + return component; + } else if (typeof(component.i) === 'string') { + return component.i.length; + } else { + // This should work because c.d and c.i must be +ive. + return component.d || component.i; + } +}; -# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate -# adjacent inserts and deletes. -type.normalize = (op) -> - newOp = [] - append newOp, component for component in op - newOp +// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate +// adjacent inserts and deletes. +type.normalize = function(op) { + const newOp = []; + for (let component of Array.from(op)) { append(newOp, component); } + return newOp; +}; -# This is a helper method to transform and prune. goForwards is true for transform, false for prune. -transformer = (op, otherOp, goForwards, side) -> - checkOp op - checkOp otherOp - newOp = [] +// This is a helper method to transform and prune. goForwards is true for transform, false for prune. +const transformer = function(op, otherOp, goForwards, side) { + let component; + checkOp(op); + checkOp(otherOp); + const newOp = []; - [take, peek] = makeTake op + const [take, peek] = Array.from(makeTake(op)); - for component in otherOp - length = componentLength component + for (component of Array.from(otherOp)) { + var chunk; + let length = componentLength(component); - if component.i != undefined # Insert text or tombs - if goForwards # transform - insert skips over inserted parts - if side == 'left' - # The left insert should go first. - append newOp, take() while peek()?.i != undefined + if (component.i !== undefined) { // Insert text or tombs + if (goForwards) { // transform - insert skips over inserted parts + if (side === 'left') { + // The left insert should go first. + while (__guard__(peek(), x => x.i) !== undefined) { append(newOp, take()); } + } - # In any case, skip the inserted text. - append newOp, length + // In any case, skip the inserted text. + append(newOp, length); - else # Prune. Remove skips for inserts. - while length > 0 - chunk = take length, true + } else { // Prune. Remove skips for inserts. + while (length > 0) { + chunk = take(length, true); - throw new Error 'The transformed op is invalid' unless chunk != null - throw new Error 'The transformed op deletes locally inserted characters - it cannot be purged of the insert.' if chunk.d != undefined + if (chunk === null) { throw new Error('The transformed op is invalid'); } + if (chunk.d !== undefined) { throw new Error('The transformed op deletes locally inserted characters - it cannot be purged of the insert.'); } - if typeof chunk is 'number' - length -= chunk - else - append newOp, chunk + if (typeof chunk === 'number') { + length -= chunk; + } else { + append(newOp, chunk); + } + } + } - else # Skip or delete - while length > 0 - chunk = take length, true - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Skip or delete + while (length > 0) { + chunk = take(length, true); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append newOp, chunk - length -= componentLength chunk unless chunk.i + append(newOp, chunk); + if (!chunk.i) { length -= componentLength(chunk); } + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in the op: #{component}" unless component.i != undefined - append newOp, component + // Append extras from op1 + while (component = take()) { + if (component.i === undefined) { throw new Error(`Remaining fragments in the op: ${component}`); } + append(newOp, component); + } - newOp + return newOp; +}; -# transform op1 by op2. Return transformed version of op1. -# op1 and op2 are unchanged by transform. -# side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op. -type.transform = (op, otherOp, side) -> - throw new Error "side (#{side}) should be 'left' or 'right'" unless side == 'left' or side == 'right' - transformer op, otherOp, true, side +// transform op1 by op2. Return transformed version of op1. +// op1 and op2 are unchanged by transform. +// side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op. +type.transform = function(op, otherOp, side) { + if ((side !== 'left') && (side !== 'right')) { throw new Error(`side (${side}) should be 'left' or 'right'`); } + return transformer(op, otherOp, true, side); +}; -# Prune is the inverse of transform. -type.prune = (op, otherOp) -> transformer op, otherOp, false +// Prune is the inverse of transform. +type.prune = (op, otherOp) => transformer(op, otherOp, false); -# Compose 2 ops into 1 op. -type.compose = (op1, op2) -> - return op2 if op1 == null or op1 == undefined +// Compose 2 ops into 1 op. +type.compose = function(op1, op2) { + let component; + if ((op1 === null) || (op1 === undefined)) { return op2; } - checkOp op1 - checkOp op2 + checkOp(op1); + checkOp(op2); - result = [] + const result = []; - [take, _] = makeTake op1 + const [take, _] = Array.from(makeTake(op1)); - for component in op2 + for (component of Array.from(op2)) { - if typeof(component) == 'number' # Skip - # Just copy from op1. - length = component - while length > 0 - chunk = take length - throw new Error('The op traverses more elements than the document has') unless chunk != null + var chunk, length; + if (typeof(component) === 'number') { // Skip + // Just copy from op1. + length = component; + while (length > 0) { + chunk = take(length); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - append result, chunk - length -= componentLength chunk + append(result, chunk); + length -= componentLength(chunk); + } - else if component.i != undefined # Insert - append result, {i:component.i} + } else if (component.i !== undefined) { // Insert + append(result, {i:component.i}); - else # Delete - length = component.d - while length > 0 - chunk = take length - throw new Error('The op traverses more elements than the document has') unless chunk != null + } else { // Delete + length = component.d; + while (length > 0) { + chunk = take(length); + if (chunk === null) { throw new Error('The op traverses more elements than the document has'); } - chunkLength = componentLength chunk - if chunk.i != undefined - append result, {i:chunkLength} - else - append result, {d:chunkLength} + const chunkLength = componentLength(chunk); + if (chunk.i !== undefined) { + append(result, {i:chunkLength}); + } else { + append(result, {d:chunkLength}); + } - length -= chunkLength + length -= chunkLength; + } + } + } - # Append extras from op1 - while (component = take()) - throw new Error "Remaining fragments in op1: #{component}" unless component.i != undefined - append result, component + // Append extras from op1 + while (component = take()) { + if (component.i === undefined) { throw new Error(`Remaining fragments in op1: ${component}`); } + append(result, component); + } - result + return result; +}; -if WEB? - exports.types['text-tp2'] = type -else - module.exports = type +if (typeof WEB !== 'undefined' && WEB !== null) { + exports.types['text-tp2'] = type; +} else { + module.exports = type; +} + +function __guard__(value, transform) { + return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined; +} \ No newline at end of file diff --git a/services/document-updater/app/coffee/sharejs/types/text.js b/services/document-updater/app/coffee/sharejs/types/text.js index 2a3b79997d..fed546d10f 100644 --- a/services/document-updater/app/coffee/sharejs/types/text.js +++ b/services/document-updater/app/coffee/sharejs/types/text.js @@ -1,263 +1,305 @@ -# A simple text implementation -# -# Operations are lists of components. -# Each component either inserts or deletes at a specified position in the document. -# -# Components are either: -# {i:'str', p:100}: Insert 'str' at position 100 in the document -# {d:'str', p:100}: Delete 'str' at position 100 in the document -# -# Components in an operation are executed sequentially, so the position of components -# assumes previous components have already executed. -# -# Eg: This op: -# [{i:'abc', p:0}] -# is equivalent to this op: -# [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}] +/* + * decaffeinate suggestions: + * DS101: Remove unnecessary use of Array.from + * DS102: Remove unnecessary code created because of implicit returns + * DS207: Consider shorter variations of null checks + * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md + */ +// A simple text implementation +// +// Operations are lists of components. +// Each component either inserts or deletes at a specified position in the document. +// +// Components are either: +// {i:'str', p:100}: Insert 'str' at position 100 in the document +// {d:'str', p:100}: Delete 'str' at position 100 in the document +// +// Components in an operation are executed sequentially, so the position of components +// assumes previous components have already executed. +// +// Eg: This op: +// [{i:'abc', p:0}] +// is equivalent to this op: +// [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}] -# NOTE: The global scope here is shared with other sharejs files when built with closure. -# Be careful what ends up in your namespace. +// NOTE: The global scope here is shared with other sharejs files when built with closure. +// Be careful what ends up in your namespace. -text = {} +let append, transformComponent; +const text = {}; -text.name = 'text' +text.name = 'text'; -text.create = -> '' +text.create = () => ''; -strInject = (s1, pos, s2) -> s1[...pos] + s2 + s1[pos..] +const strInject = (s1, pos, s2) => s1.slice(0, pos) + s2 + s1.slice(pos); -checkValidComponent = (c) -> - throw new Error 'component missing position field' if typeof c.p != 'number' +const checkValidComponent = function(c) { + if (typeof c.p !== 'number') { throw new Error('component missing position field'); } - i_type = typeof c.i - d_type = typeof c.d - c_type = typeof c.c - throw new Error 'component needs an i, d or c field' unless (i_type == 'string') ^ (d_type == 'string') ^ (c_type == 'string') + const i_type = typeof c.i; + const d_type = typeof c.d; + const c_type = typeof c.c; + if (!((i_type === 'string') ^ (d_type === 'string') ^ (c_type === 'string'))) { throw new Error('component needs an i, d or c field'); } - throw new Error 'position cannot be negative' unless c.p >= 0 + if (!(c.p >= 0)) { throw new Error('position cannot be negative'); } +}; -checkValidOp = (op) -> - checkValidComponent(c) for c in op - true +const checkValidOp = function(op) { + for (let c of Array.from(op)) { checkValidComponent(c); } + return true; +}; -text.apply = (snapshot, op) -> - checkValidOp op - for component in op - if component.i? - snapshot = strInject snapshot, component.p, component.i - else if component.d? - deleted = snapshot[component.p...(component.p + component.d.length)] - throw new Error "Delete component '#{component.d}' does not match deleted text '#{deleted}'" unless component.d == deleted - snapshot = snapshot[...component.p] + snapshot[(component.p + component.d.length)..] - else if component.c? - comment = snapshot[component.p...(component.p + component.c.length)] - throw new Error "Comment component '#{component.c}' does not match commented text '#{comment}'" unless component.c == comment - else - throw new Error "Unknown op type" - snapshot +text.apply = function(snapshot, op) { + checkValidOp(op); + for (let component of Array.from(op)) { + if (component.i != null) { + snapshot = strInject(snapshot, component.p, component.i); + } else if (component.d != null) { + const deleted = snapshot.slice(component.p, (component.p + component.d.length)); + if (component.d !== deleted) { throw new Error(`Delete component '${component.d}' does not match deleted text '${deleted}'`); } + snapshot = snapshot.slice(0, component.p) + snapshot.slice((component.p + component.d.length)); + } else if (component.c != null) { + const comment = snapshot.slice(component.p, (component.p + component.c.length)); + if (component.c !== comment) { throw new Error(`Comment component '${component.c}' does not match commented text '${comment}'`); } + } else { + throw new Error("Unknown op type"); + } + } + return snapshot; +}; -# Exported for use by the random op generator. -# -# For simplicity, this version of append does not compress adjacent inserts and deletes of -# the same text. It would be nice to change that at some stage. -text._append = append = (newOp, c) -> - return if c.i == '' or c.d == '' - if newOp.length == 0 - newOp.push c - else - last = newOp[newOp.length - 1] +// Exported for use by the random op generator. +// +// For simplicity, this version of append does not compress adjacent inserts and deletes of +// the same text. It would be nice to change that at some stage. +text._append = (append = function(newOp, c) { + if ((c.i === '') || (c.d === '')) { return; } + if (newOp.length === 0) { + return newOp.push(c); + } else { + const last = newOp[newOp.length - 1]; - # Compose the insert into the previous insert if possible - if last.i? && c.i? and last.p <= c.p <= (last.p + last.i.length) - newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p} - else if last.d? && c.d? and c.p <= last.p <= (c.p + c.d.length) - newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p} - else - newOp.push c + // Compose the insert into the previous insert if possible + if ((last.i != null) && (c.i != null) && (last.p <= c.p && c.p <= (last.p + last.i.length))) { + return newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p}; + } else if ((last.d != null) && (c.d != null) && (c.p <= last.p && last.p <= (c.p + c.d.length))) { + return newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p}; + } else { + return newOp.push(c); + } + } +}); -text.compose = (op1, op2) -> - checkValidOp op1 - checkValidOp op2 +text.compose = function(op1, op2) { + checkValidOp(op1); + checkValidOp(op2); - newOp = op1.slice() - append newOp, c for c in op2 + const newOp = op1.slice(); + for (let c of Array.from(op2)) { append(newOp, c); } - newOp + return newOp; +}; -# Attempt to compress the op components together 'as much as possible'. -# This implementation preserves order and preserves create/delete pairs. -text.compress = (op) -> text.compose [], op +// Attempt to compress the op components together 'as much as possible'. +// This implementation preserves order and preserves create/delete pairs. +text.compress = op => text.compose([], op); -text.normalize = (op) -> - newOp = [] +text.normalize = function(op) { + const newOp = []; - # Normalize should allow ops which are a single (unwrapped) component: - # {i:'asdf', p:23}. - # There's no good way to test if something is an array: - # http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/ - # so this is probably the least bad solution. - op = [op] if op.i? or op.p? + // Normalize should allow ops which are a single (unwrapped) component: + // {i:'asdf', p:23}. + // There's no good way to test if something is an array: + // http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/ + // so this is probably the least bad solution. + if ((op.i != null) || (op.p != null)) { op = [op]; } - for c in op - c.p ?= 0 - append newOp, c + for (let c of Array.from(op)) { + if (c.p == null) { c.p = 0; } + append(newOp, c); + } - newOp + return newOp; +}; -# This helper method transforms a position by an op component. -# -# If c is an insert, insertAfter specifies whether the transform -# is pushed after the insert (true) or before it (false). -# -# insertAfter is optional for deletes. -transformPosition = (pos, c, insertAfter) -> - if c.i? - if c.p < pos || (c.p == pos && insertAfter) - pos + c.i.length - else - pos - else if c.d? - # I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length)) - # but I think its harder to read that way, and it compiles using ternary operators anyway - # so its no slower written like this. - if pos <= c.p - pos - else if pos <= c.p + c.d.length - c.p - else - pos - c.d.length - else if c.c? - pos - else - throw new Error("unknown op type") +// This helper method transforms a position by an op component. +// +// If c is an insert, insertAfter specifies whether the transform +// is pushed after the insert (true) or before it (false). +// +// insertAfter is optional for deletes. +const transformPosition = function(pos, c, insertAfter) { + if (c.i != null) { + if ((c.p < pos) || ((c.p === pos) && insertAfter)) { + return pos + c.i.length; + } else { + return pos; + } + } else if (c.d != null) { + // I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length)) + // but I think its harder to read that way, and it compiles using ternary operators anyway + // so its no slower written like this. + if (pos <= c.p) { + return pos; + } else if (pos <= (c.p + c.d.length)) { + return c.p; + } else { + return pos - c.d.length; + } + } else if (c.c != null) { + return pos; + } else { + throw new Error("unknown op type"); + } +}; -# Helper method to transform a cursor position as a result of an op. -# -# Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position -# is pushed after an insert (true) or before it (false). -text.transformCursor = (position, op, side) -> - insertAfter = side == 'right' - position = transformPosition position, c, insertAfter for c in op - position +// Helper method to transform a cursor position as a result of an op. +// +// Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position +// is pushed after an insert (true) or before it (false). +text.transformCursor = function(position, op, side) { + const insertAfter = side === 'right'; + for (let c of Array.from(op)) { position = transformPosition(position, c, insertAfter); } + return position; +}; -# Transform an op component by another op component. Asymmetric. -# The result will be appended to destination. -# -# exported for use in JSON type -text._tc = transformComponent = (dest, c, otherC, side) -> - checkValidOp [c] - checkValidOp [otherC] +// Transform an op component by another op component. Asymmetric. +// The result will be appended to destination. +// +// exported for use in JSON type +text._tc = (transformComponent = function(dest, c, otherC, side) { + let cIntersect, intersectEnd, intersectStart, newC, otherIntersect; + checkValidOp([c]); + checkValidOp([otherC]); - if c.i? - append dest, {i:c.i, p:transformPosition(c.p, otherC, side == 'right')} + if (c.i != null) { + append(dest, {i:c.i, p:transformPosition(c.p, otherC, side === 'right')}); - else if c.d? # Delete - if otherC.i? # delete vs insert - s = c.d - if c.p < otherC.p - append dest, {d:s[...otherC.p - c.p], p:c.p} - s = s[(otherC.p - c.p)..] - if s != '' - append dest, {d:s, p:c.p + otherC.i.length} + } else if (c.d != null) { // Delete + if (otherC.i != null) { // delete vs insert + let s = c.d; + if (c.p < otherC.p) { + append(dest, {d:s.slice(0, otherC.p - c.p), p:c.p}); + s = s.slice((otherC.p - c.p)); + } + if (s !== '') { + append(dest, {d:s, p:c.p + otherC.i.length}); + } - else if otherC.d? # Delete vs delete - if c.p >= otherC.p + otherC.d.length - append dest, {d:c.d, p:c.p - otherC.d.length} - else if c.p + c.d.length <= otherC.p - append dest, c - else - # They overlap somewhere. - newC = {d:'', p:c.p} - if c.p < otherC.p - newC.d = c.d[...(otherC.p - c.p)] - if c.p + c.d.length > otherC.p + otherC.d.length - newC.d += c.d[(otherC.p + otherC.d.length - c.p)..] + } else if (otherC.d != null) { // Delete vs delete + if (c.p >= (otherC.p + otherC.d.length)) { + append(dest, {d:c.d, p:c.p - otherC.d.length}); + } else if ((c.p + c.d.length) <= otherC.p) { + append(dest, c); + } else { + // They overlap somewhere. + newC = {d:'', p:c.p}; + if (c.p < otherC.p) { + newC.d = c.d.slice(0, (otherC.p - c.p)); + } + if ((c.p + c.d.length) > (otherC.p + otherC.d.length)) { + newC.d += c.d.slice(((otherC.p + otherC.d.length) - c.p)); + } - # This is entirely optional - just for a check that the deleted - # text in the two ops matches - intersectStart = Math.max c.p, otherC.p - intersectEnd = Math.min c.p + c.d.length, otherC.p + otherC.d.length - cIntersect = c.d[intersectStart - c.p...intersectEnd - c.p] - otherIntersect = otherC.d[intersectStart - otherC.p...intersectEnd - otherC.p] - throw new Error 'Delete ops delete different text in the same region of the document' unless cIntersect == otherIntersect + // This is entirely optional - just for a check that the deleted + // text in the two ops matches + intersectStart = Math.max(c.p, otherC.p); + intersectEnd = Math.min(c.p + c.d.length, otherC.p + otherC.d.length); + cIntersect = c.d.slice(intersectStart - c.p, intersectEnd - c.p); + otherIntersect = otherC.d.slice(intersectStart - otherC.p, intersectEnd - otherC.p); + if (cIntersect !== otherIntersect) { throw new Error('Delete ops delete different text in the same region of the document'); } - if newC.d != '' - # This could be rewritten similarly to insert v delete, above. - newC.p = transformPosition newC.p, otherC - append dest, newC + if (newC.d !== '') { + // This could be rewritten similarly to insert v delete, above. + newC.p = transformPosition(newC.p, otherC); + append(dest, newC); + } + } - else if otherC.c? - append dest, c + } else if (otherC.c != null) { + append(dest, c); - else - throw new Error("unknown op type") + } else { + throw new Error("unknown op type"); + } - else if c.c? # Comment - if otherC.i? - if c.p < otherC.p < c.p + c.c.length - offset = otherC.p - c.p - new_c = (c.c[0..(offset-1)] + otherC.i + c.c[offset...]) - append dest, {c:new_c, p:c.p, t: c.t} - else - append dest, {c:c.c, p:transformPosition(c.p, otherC, true), t: c.t} + } else if (c.c != null) { // Comment + if (otherC.i != null) { + if (c.p < otherC.p && otherC.p < c.p + c.c.length) { + const offset = otherC.p - c.p; + const new_c = (c.c.slice(0, +(offset-1) + 1 || undefined) + otherC.i + c.c.slice(offset)); + append(dest, {c:new_c, p:c.p, t: c.t}); + } else { + append(dest, {c:c.c, p:transformPosition(c.p, otherC, true), t: c.t}); + } - else if otherC.d? - if c.p >= otherC.p + otherC.d.length - append dest, {c:c.c, p:c.p - otherC.d.length, t: c.t} - else if c.p + c.c.length <= otherC.p - append dest, c - else # Delete overlaps comment - # They overlap somewhere. - newC = {c:'', p:c.p, t: c.t} - if c.p < otherC.p - newC.c = c.c[...(otherC.p - c.p)] - if c.p + c.c.length > otherC.p + otherC.d.length - newC.c += c.c[(otherC.p + otherC.d.length - c.p)..] + } else if (otherC.d != null) { + if (c.p >= (otherC.p + otherC.d.length)) { + append(dest, {c:c.c, p:c.p - otherC.d.length, t: c.t}); + } else if ((c.p + c.c.length) <= otherC.p) { + append(dest, c); + } else { // Delete overlaps comment + // They overlap somewhere. + newC = {c:'', p:c.p, t: c.t}; + if (c.p < otherC.p) { + newC.c = c.c.slice(0, (otherC.p - c.p)); + } + if ((c.p + c.c.length) > (otherC.p + otherC.d.length)) { + newC.c += c.c.slice(((otherC.p + otherC.d.length) - c.p)); + } - # This is entirely optional - just for a check that the deleted - # text in the two ops matches - intersectStart = Math.max c.p, otherC.p - intersectEnd = Math.min c.p + c.c.length, otherC.p + otherC.d.length - cIntersect = c.c[intersectStart - c.p...intersectEnd - c.p] - otherIntersect = otherC.d[intersectStart - otherC.p...intersectEnd - otherC.p] - throw new Error 'Delete ops delete different text in the same region of the document' unless cIntersect == otherIntersect + // This is entirely optional - just for a check that the deleted + // text in the two ops matches + intersectStart = Math.max(c.p, otherC.p); + intersectEnd = Math.min(c.p + c.c.length, otherC.p + otherC.d.length); + cIntersect = c.c.slice(intersectStart - c.p, intersectEnd - c.p); + otherIntersect = otherC.d.slice(intersectStart - otherC.p, intersectEnd - otherC.p); + if (cIntersect !== otherIntersect) { throw new Error('Delete ops delete different text in the same region of the document'); } - newC.p = transformPosition newC.p, otherC - append dest, newC + newC.p = transformPosition(newC.p, otherC); + append(dest, newC); + } - else if otherC.c? - append dest, c + } else if (otherC.c != null) { + append(dest, c); - else - throw new Error("unknown op type") + } else { + throw new Error("unknown op type"); + } + } - dest + return dest; +}); -invertComponent = (c) -> - if c.i? - {d:c.i, p:c.p} - else - {i:c.d, p:c.p} +const invertComponent = function(c) { + if (c.i != null) { + return {d:c.i, p:c.p}; + } else { + return {i:c.d, p:c.p}; + } +}; -# No need to use append for invert, because the components won't be able to -# cancel with one another. -text.invert = (op) -> (invertComponent c for c in op.slice().reverse()) +// No need to use append for invert, because the components won't be able to +// cancel with one another. +text.invert = op => Array.from(op.slice().reverse()).map((c) => invertComponent(c)); -if WEB? - exports.types ||= {} +if (typeof WEB !== 'undefined' && WEB !== null) { + if (!exports.types) { exports.types = {}; } - # This is kind of awful - come up with a better way to hook this helper code up. - bootstrapTransform(text, transformComponent, checkValidOp, append) + // This is kind of awful - come up with a better way to hook this helper code up. + bootstrapTransform(text, transformComponent, checkValidOp, append); - # [] is used to prevent closure from renaming types.text - exports.types.text = text -else - module.exports = text + // [] is used to prevent closure from renaming types.text + exports.types.text = text; +} else { + module.exports = text; - # The text type really shouldn't need this - it should be possible to define - # an efficient transform function by making a sort of transform map and passing each - # op component through it. - require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append) + // The text type really shouldn't need this - it should be possible to define + // an efficient transform function by making a sort of transform map and passing each + // op component through it. + require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append); +} diff --git a/services/document-updater/app/coffee/sharejs/types/web-prelude.js b/services/document-updater/app/coffee/sharejs/types/web-prelude.js index 3c045532dc..b7252728e9 100644 --- a/services/document-updater/app/coffee/sharejs/types/web-prelude.js +++ b/services/document-updater/app/coffee/sharejs/types/web-prelude.js @@ -1,11 +1,11 @@ -# This is included at the top of each compiled type file for the web. +// This is included at the top of each compiled type file for the web. -`/** +/** @const @type {boolean} */ -var WEB = true; -` +const WEB = true; -exports = window['sharejs'] + +const exports = window['sharejs']; diff --git a/services/document-updater/app/coffee/sharejs/web-prelude.js b/services/document-updater/app/coffee/sharejs/web-prelude.js index 3c045532dc..b7252728e9 100644 --- a/services/document-updater/app/coffee/sharejs/web-prelude.js +++ b/services/document-updater/app/coffee/sharejs/web-prelude.js @@ -1,11 +1,11 @@ -# This is included at the top of each compiled type file for the web. +// This is included at the top of each compiled type file for the web. -`/** +/** @const @type {boolean} */ -var WEB = true; -` +const WEB = true; -exports = window['sharejs'] + +const exports = window['sharejs'];