mirror of
https://github.com/overleaf/overleaf.git
synced 2024-11-14 20:40:17 -05:00
Initial open sourcing
This commit is contained in:
commit
e1a7d4f24a
104 changed files with 12838 additions and 0 deletions
46
services/document-updater/.gitignore
vendored
Normal file
46
services/document-updater/.gitignore
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
compileFolder
|
||||||
|
|
||||||
|
Compiled source #
|
||||||
|
###################
|
||||||
|
*.com
|
||||||
|
*.class
|
||||||
|
*.dll
|
||||||
|
*.exe
|
||||||
|
*.o
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Packages #
|
||||||
|
############
|
||||||
|
# it's better to unpack these files and commit the raw source
|
||||||
|
# git has its own built in compression methods
|
||||||
|
*.7z
|
||||||
|
*.dmg
|
||||||
|
*.gz
|
||||||
|
*.iso
|
||||||
|
*.jar
|
||||||
|
*.rar
|
||||||
|
*.tar
|
||||||
|
*.zip
|
||||||
|
|
||||||
|
# Logs and databases #
|
||||||
|
######################
|
||||||
|
*.log
|
||||||
|
*.sql
|
||||||
|
*.sqlite
|
||||||
|
|
||||||
|
# OS generated files #
|
||||||
|
######################
|
||||||
|
.DS_Store?
|
||||||
|
ehthumbs.db
|
||||||
|
Icon?
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
/node_modules/*
|
||||||
|
|
||||||
|
app.js
|
||||||
|
app/js/*
|
||||||
|
|
||||||
|
test/unit/js/*
|
||||||
|
test/acceptance/js/*
|
||||||
|
|
||||||
|
**.swp
|
111
services/document-updater/Gruntfile.coffee
Normal file
111
services/document-updater/Gruntfile.coffee
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
module.exports = (grunt) ->
|
||||||
|
grunt.loadNpmTasks 'grunt-contrib-coffee'
|
||||||
|
grunt.loadNpmTasks 'grunt-contrib-clean'
|
||||||
|
grunt.loadNpmTasks 'grunt-mocha-test'
|
||||||
|
grunt.loadNpmTasks 'grunt-available-tasks'
|
||||||
|
grunt.loadNpmTasks 'grunt-execute'
|
||||||
|
grunt.loadNpmTasks 'grunt-bunyan'
|
||||||
|
|
||||||
|
grunt.initConfig
|
||||||
|
execute:
|
||||||
|
app:
|
||||||
|
src: "app.js"
|
||||||
|
|
||||||
|
bunyan:
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
coffee:
|
||||||
|
app_dir:
|
||||||
|
expand: true,
|
||||||
|
flatten: false,
|
||||||
|
cwd: 'app/coffee',
|
||||||
|
src: ['**/*.coffee'],
|
||||||
|
dest: 'app/js/',
|
||||||
|
ext: '.js'
|
||||||
|
|
||||||
|
app:
|
||||||
|
src: 'app.coffee'
|
||||||
|
dest: 'app.js'
|
||||||
|
|
||||||
|
acceptance_tests:
|
||||||
|
expand: true,
|
||||||
|
flatten: false,
|
||||||
|
cwd: 'test/acceptance/coffee',
|
||||||
|
src: ['**/*.coffee'],
|
||||||
|
dest: 'test/acceptance/js/',
|
||||||
|
ext: '.js'
|
||||||
|
|
||||||
|
unit_tests:
|
||||||
|
expand: true,
|
||||||
|
flatten: false,
|
||||||
|
cwd: 'test/unit/coffee',
|
||||||
|
src: ['**/*.coffee'],
|
||||||
|
dest: 'test/unit/js/',
|
||||||
|
ext: '.js'
|
||||||
|
|
||||||
|
clean:
|
||||||
|
app: ["app/js"]
|
||||||
|
acceptance_tests: ["test/unit/js"]
|
||||||
|
|
||||||
|
mochaTest:
|
||||||
|
unit:
|
||||||
|
src: ['test/unit/js/**/*.js']
|
||||||
|
options:
|
||||||
|
reporter: grunt.option('reporter') or 'spec'
|
||||||
|
grep: grunt.option("grep")
|
||||||
|
acceptance:
|
||||||
|
src: ['test/acceptance/js/**/*.js']
|
||||||
|
options:
|
||||||
|
reporter: grunt.option('reporter') or 'spec'
|
||||||
|
grep: grunt.option("grep")
|
||||||
|
timeout: 10000
|
||||||
|
|
||||||
|
availabletasks:
|
||||||
|
tasks:
|
||||||
|
options:
|
||||||
|
filter: 'exclude',
|
||||||
|
tasks: [
|
||||||
|
'coffee'
|
||||||
|
'clean'
|
||||||
|
'mochaTest'
|
||||||
|
'availabletasks'
|
||||||
|
'execute'
|
||||||
|
'bunyan'
|
||||||
|
]
|
||||||
|
groups:
|
||||||
|
"Compile tasks": [
|
||||||
|
"compile:server"
|
||||||
|
"compile:tests"
|
||||||
|
"compile"
|
||||||
|
"compile:unit_tests"
|
||||||
|
"compile:acceptance_tests"
|
||||||
|
"install"
|
||||||
|
]
|
||||||
|
"Test tasks": [
|
||||||
|
"test:unit"
|
||||||
|
"test:acceptance"
|
||||||
|
]
|
||||||
|
"Run tasks": [
|
||||||
|
"run"
|
||||||
|
"default"
|
||||||
|
]
|
||||||
|
"Misc": [
|
||||||
|
"help"
|
||||||
|
]
|
||||||
|
|
||||||
|
grunt.registerTask 'help', 'Display this help list', 'availabletasks'
|
||||||
|
|
||||||
|
grunt.registerTask 'compile:server', 'Compile the server side coffee script', ['clean:app', 'coffee:app', 'coffee:app_dir']
|
||||||
|
grunt.registerTask 'compile:unit_tests', 'Compile the unit tests', ['coffee:unit_tests']
|
||||||
|
grunt.registerTask 'compile:acceptance_tests', 'Compile the acceptance tests', ['clean:acceptance_tests', 'coffee:acceptance_tests']
|
||||||
|
grunt.registerTask 'compile:tests', 'Compile all the tests', ['compile:acceptance_tests', 'compile:unit_tests']
|
||||||
|
grunt.registerTask 'compile', 'Compiles everything need to run document-updater-sharelatex', ['compile:server']
|
||||||
|
|
||||||
|
grunt.registerTask 'install', "Compile everything when installing as an npm module", ['compile']
|
||||||
|
|
||||||
|
grunt.registerTask 'test:unit', 'Run the unit tests (use --grep=<regex> for individual tests)', ['compile:unit_tests', 'mochaTest:unit']
|
||||||
|
grunt.registerTask 'test:acceptance', 'Run the acceptance tests (use --grep=<regex> for individual tests)', ['compile:acceptance_tests', 'mochaTest:acceptance']
|
||||||
|
|
||||||
|
grunt.registerTask 'run', "Compile and run the document-updater-sharelatex server", ['compile', 'bunyan', 'execute']
|
||||||
|
grunt.registerTask 'default', 'run'
|
||||||
|
|
68
services/document-updater/app.coffee
Normal file
68
services/document-updater/app.coffee
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
express = require('express')
|
||||||
|
http = require("http")
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
logger = require('logger-sharelatex')
|
||||||
|
logger.initialize("documentupdater")
|
||||||
|
RedisManager = require('./app/js/RedisManager.js')
|
||||||
|
UpdateManager = require('./app/js/UpdateManager.js')
|
||||||
|
Keys = require('./app/js/RedisKeyBuilder')
|
||||||
|
redis = require('redis')
|
||||||
|
rclient = redis.createClient(Settings.redis.port, Settings.redis.host)
|
||||||
|
rclient.auth(Settings.redis.password)
|
||||||
|
metrics = require('./app/js/Metrics')
|
||||||
|
Errors = require "./app/js/Errors"
|
||||||
|
HttpController = require "./app/js/HttpController"
|
||||||
|
|
||||||
|
app = express()
|
||||||
|
app.configure ->
|
||||||
|
app.use(express.logger(':remote-addr - [:date] - :user-agent ":method :url" :status - :response-time ms'));
|
||||||
|
app.use express.bodyParser()
|
||||||
|
app.use app.router
|
||||||
|
|
||||||
|
app.configure 'development', ()->
|
||||||
|
console.log "Development Enviroment"
|
||||||
|
app.use express.errorHandler({ dumpExceptions: true, showStack: true })
|
||||||
|
|
||||||
|
app.configure 'production', ()->
|
||||||
|
console.log "Production Enviroment"
|
||||||
|
app.use express.logger()
|
||||||
|
app.use express.errorHandler()
|
||||||
|
|
||||||
|
rclient.subscribe("pending-updates")
|
||||||
|
rclient.on "message", (channel, doc_key)->
|
||||||
|
[project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
UpdateManager.processOutstandingUpdatesWithLock project_id, doc_id, (error) ->
|
||||||
|
logger.error err: error, project_id: project_id, doc_id: doc_id, "error processing update" if error?
|
||||||
|
|
||||||
|
UpdateManager.resumeProcessing()
|
||||||
|
|
||||||
|
app.use (req, res, next)->
|
||||||
|
metrics.inc "http-request"
|
||||||
|
next()
|
||||||
|
|
||||||
|
app.get '/project/:project_id/doc/:doc_id', HttpController.getDoc
|
||||||
|
app.post '/project/:project_id/doc/:doc_id', HttpController.setDoc
|
||||||
|
app.post '/project/:project_id/doc/:doc_id/flush', HttpController.flushDocIfLoaded
|
||||||
|
app.delete '/project/:project_id/doc/:doc_id', HttpController.flushAndDeleteDoc
|
||||||
|
app.delete '/project/:project_id', HttpController.deleteProject
|
||||||
|
app.post '/project/:project_id/flush', HttpController.flushProject
|
||||||
|
|
||||||
|
app.get '/total', (req, res)->
|
||||||
|
timer = new metrics.Timer("http.allDocList")
|
||||||
|
RedisManager.getCountOfDocsInMemory (err, count)->
|
||||||
|
timer.done()
|
||||||
|
res.send {total:count}
|
||||||
|
|
||||||
|
app.get '/status', (req, res)->
|
||||||
|
res.send('document updater is alive')
|
||||||
|
|
||||||
|
app.use (error, req, res, next) ->
|
||||||
|
logger.error err: error, "request errored"
|
||||||
|
if error instanceof Errors.NotFoundError
|
||||||
|
res.send 404
|
||||||
|
else
|
||||||
|
res.send(500, "Oops, something went wrong")
|
||||||
|
|
||||||
|
port = Settings.internal?.documentupdater?.port or Settings.apis?.documentupdater?.port or 3003
|
||||||
|
app.listen port, "localhost", ->
|
||||||
|
logger.log("documentupdater-sharelatex server listening on port #{port}")
|
181
services/document-updater/app/DocumentUpdater.js
Normal file
181
services/document-updater/app/DocumentUpdater.js
Normal file
|
@ -0,0 +1,181 @@
|
||||||
|
(function(exports){
|
||||||
|
Ace = require('aceserverside-sharelatex')
|
||||||
|
Range = Ace.Range
|
||||||
|
|
||||||
|
//look at applyDeltas method
|
||||||
|
exports.applyChange = function(aceDoc, change, callback) {
|
||||||
|
var r = change.range;
|
||||||
|
var range = new Range(r.start.row, r.start.column, r.end.row, r.end.column);
|
||||||
|
if('insertText'==change.action){
|
||||||
|
aceDoc.insert(change.range.start, change.text);
|
||||||
|
}else if('insertLines'==change.action){
|
||||||
|
aceDoc.insertLines(change.range.start.row, change.lines);
|
||||||
|
}else if('removeText'==change.action){
|
||||||
|
aceDoc.remove(range);
|
||||||
|
}else if('removeLines'==change.action){
|
||||||
|
aceDoc.removeLines(range.start.row, range.end.row-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(typeof callback === 'function'){
|
||||||
|
callback(null, aceDoc);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
})(typeof exports === 'undefined'? this['documentUpdater']={}: exports);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
31
services/document-updater/app/coffee/DiffCodec.coffee
Normal file
31
services/document-updater/app/coffee/DiffCodec.coffee
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
diff_match_patch = require("../lib/diff_match_patch").diff_match_patch
|
||||||
|
dmp = new diff_match_patch()
|
||||||
|
|
||||||
|
module.exports = DiffCodec =
|
||||||
|
ADDED: 1
|
||||||
|
REMOVED: -1
|
||||||
|
UNCHANGED: 0
|
||||||
|
|
||||||
|
diffAsShareJsOp: (before, after, callback = (error, ops) ->) ->
|
||||||
|
diffs = dmp.diff_main(before.join("\n"), after.join("\n"))
|
||||||
|
dmp.diff_cleanupSemantic(diffs)
|
||||||
|
|
||||||
|
ops = []
|
||||||
|
position = 0
|
||||||
|
for diff in diffs
|
||||||
|
type = diff[0]
|
||||||
|
content = diff[1]
|
||||||
|
if type == @ADDED
|
||||||
|
ops.push
|
||||||
|
i: content
|
||||||
|
p: position
|
||||||
|
position += content.length
|
||||||
|
else if type == @REMOVED
|
||||||
|
ops.push
|
||||||
|
d: content
|
||||||
|
p: position
|
||||||
|
else if type == @UNCHANGED
|
||||||
|
position += content.length
|
||||||
|
else
|
||||||
|
throw "Unknown type"
|
||||||
|
callback null, ops
|
127
services/document-updater/app/coffee/DocOpsManager.coffee
Normal file
127
services/document-updater/app/coffee/DocOpsManager.coffee
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
RedisManager = require "./RedisManager"
|
||||||
|
mongojs = require("./mongojs")
|
||||||
|
db = mongojs.db
|
||||||
|
ObjectId = mongojs.ObjectId
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
async = require "async"
|
||||||
|
Metrics = require("./Metrics")
|
||||||
|
|
||||||
|
module.exports = DocOpsManager =
|
||||||
|
flushDocOpsToMongo: (project_id, doc_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("docOpsManager.flushDocOpsToMongo")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
DocOpsManager.getDocVersionInMongo doc_id, (error, mongoVersion) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.getDocVersion doc_id, (error, redisVersion) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if !mongoVersion? or !redisVersion? or mongoVersion > redisVersion
|
||||||
|
logger.error doc_id: doc_id, redisVersion: redisVersion, mongoVersion: mongoVersion, "mongo version is ahead of redis"
|
||||||
|
return callback(new Error("inconsistent versions"))
|
||||||
|
|
||||||
|
RedisManager.getPreviousDocOps doc_id, mongoVersion, -1, (error, ops) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if ops.length != redisVersion - mongoVersion
|
||||||
|
logger.error doc_id: doc_id, redisVersion: redisVersion, mongoVersion: mongoVersion, opsLength: ops.length, "version difference does not match ops length"
|
||||||
|
return callback(new Error("inconsistent versions"))
|
||||||
|
logger.log doc_id: doc_id, redisVersion: redisVersion, mongoVersion: mongoVersion, "flushing doc ops to mongo"
|
||||||
|
DocOpsManager._appendDocOpsInMongo doc_id, ops, redisVersion, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null
|
||||||
|
|
||||||
|
getPreviousDocOps: (project_id, doc_id, start, end, _callback = (error, ops) ->) ->
|
||||||
|
timer = new Metrics.Timer("docOpsManager.getPreviousDocOps")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
DocOpsManager._ensureOpsAreLoaded project_id, doc_id, start, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.getPreviousDocOps doc_id, start, end, (error, ops) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null, ops
|
||||||
|
|
||||||
|
pushDocOp: (project_id, doc_id, op, callback = (error) ->) ->
|
||||||
|
RedisManager.pushDocOp doc_id, op, callback
|
||||||
|
|
||||||
|
_ensureOpsAreLoaded: (project_id, doc_id, backToVersion, callback = (error) ->) ->
|
||||||
|
RedisManager.getDocVersion doc_id, (error, redisVersion) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.getDocOpsLength doc_id, (error, opsLength) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
oldestVersionInRedis = redisVersion - opsLength
|
||||||
|
if oldestVersionInRedis > backToVersion
|
||||||
|
# _getDocOpsFromMongo(<id>, 4, 6, ...) will return the ops in positions 4 and 5, but not 6.
|
||||||
|
logger.log doc_id: doc_id, backToVersion: backToVersion, oldestVersionInRedis: oldestVersionInRedis, "loading old ops from mongo"
|
||||||
|
DocOpsManager._getDocOpsFromMongo doc_id, backToVersion, oldestVersionInRedis, (error, ops) ->
|
||||||
|
logger.log doc_id: doc_id, backToVersion: backToVersion, oldestVersionInRedis: oldestVersionInRedis, ops: ops, "loaded old ops from mongo"
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.prependDocOps doc_id, ops, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null
|
||||||
|
else
|
||||||
|
logger.log doc_id: doc_id, backToVersion: backToVersion, oldestVersionInRedis: oldestVersionInRedis, "ops already in redis"
|
||||||
|
callback()
|
||||||
|
|
||||||
|
getDocVersionInMongo: (doc_id, callback = (error, version) ->) ->
|
||||||
|
t = new Metrics.Timer("mongo-time")
|
||||||
|
db.docOps.find {
|
||||||
|
doc_id: ObjectId(doc_id)
|
||||||
|
}, {
|
||||||
|
version: 1
|
||||||
|
}, (error, docs) ->
|
||||||
|
t.done()
|
||||||
|
return callback(error) if error?
|
||||||
|
if docs.length < 1 or !docs[0].version?
|
||||||
|
return callback null, 0
|
||||||
|
else
|
||||||
|
return callback null, docs[0].version
|
||||||
|
|
||||||
|
APPEND_OPS_BATCH_SIZE: 100
|
||||||
|
|
||||||
|
_appendDocOpsInMongo: (doc_id, docOps, newVersion, callback = (error) ->) ->
|
||||||
|
currentVersion = newVersion - docOps.length
|
||||||
|
batchSize = DocOpsManager.APPEND_OPS_BATCH_SIZE
|
||||||
|
noOfBatches = Math.ceil(docOps.length / batchSize)
|
||||||
|
if noOfBatches <= 0
|
||||||
|
return callback()
|
||||||
|
jobs = []
|
||||||
|
for batchNo in [0..(noOfBatches-1)]
|
||||||
|
do (batchNo) ->
|
||||||
|
jobs.push (callback) ->
|
||||||
|
batch = docOps.slice(batchNo * batchSize, (batchNo + 1) * batchSize)
|
||||||
|
currentVersion += batch.length
|
||||||
|
logger.log doc_id: doc_id, batchNo: batchNo, "appending doc op batch to Mongo"
|
||||||
|
t = new Metrics.Timer("mongo-time")
|
||||||
|
db.docOps.update {
|
||||||
|
doc_id: ObjectId(doc_id)
|
||||||
|
}, {
|
||||||
|
$push: docOps: { $each: batch, $slice: -100 }
|
||||||
|
$set: version: currentVersion
|
||||||
|
}, {
|
||||||
|
upsert: true
|
||||||
|
}, (err)->
|
||||||
|
t.done()
|
||||||
|
callback(err)
|
||||||
|
|
||||||
|
async.series jobs, (error) -> callback(error)
|
||||||
|
|
||||||
|
_getDocOpsFromMongo: (doc_id, start, end, callback = (error, ops) ->) ->
|
||||||
|
DocOpsManager.getDocVersionInMongo doc_id, (error, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
offset = - (version - start) # Negative tells mongo to count from the end backwards
|
||||||
|
limit = end - start
|
||||||
|
t = new Metrics.Timer("mongo-time")
|
||||||
|
db.docOps.find {
|
||||||
|
doc_id: ObjectId(doc_id)
|
||||||
|
}, {
|
||||||
|
docOps: $slice: [offset, limit]
|
||||||
|
}, (error, docs) ->
|
||||||
|
t.done()
|
||||||
|
if docs.length < 1 or !docs[0].docOps?
|
||||||
|
return callback null, []
|
||||||
|
else
|
||||||
|
return callback null, docs[0].docOps
|
||||||
|
|
127
services/document-updater/app/coffee/DocumentManager.coffee
Normal file
127
services/document-updater/app/coffee/DocumentManager.coffee
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
RedisManager = require "./RedisManager"
|
||||||
|
PersistenceManager = require "./PersistenceManager"
|
||||||
|
DocOpsManager = require "./DocOpsManager"
|
||||||
|
DiffCodec = require "./DiffCodec"
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
Metrics = require "./Metrics"
|
||||||
|
|
||||||
|
module.exports = DocumentManager =
|
||||||
|
getDoc: (project_id, doc_id, _callback = (error, lines, version) ->) ->
|
||||||
|
timer = new Metrics.Timer("docManager.getDoc")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
RedisManager.getDoc doc_id, (error, lines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if !lines? or !version?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "doc not in redis so getting from persistence API"
|
||||||
|
PersistenceManager.getDoc project_id, doc_id, (error, lines) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
DocOpsManager.getDocVersionInMongo doc_id, (error, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, lines: lines, version: version, "got doc from persistence API"
|
||||||
|
RedisManager.putDocInMemory project_id, doc_id, lines, version, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null, lines, version
|
||||||
|
else
|
||||||
|
callback null, lines, version
|
||||||
|
|
||||||
|
getDocAndRecentOps: (project_id, doc_id, fromVersion, _callback = (error, lines, version, recentOps) ->) ->
|
||||||
|
timer = new Metrics.Timer("docManager.getDocAndRecentOps")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
DocumentManager.getDoc project_id, doc_id, (error, lines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if fromVersion == -1
|
||||||
|
callback null, lines, version, []
|
||||||
|
else
|
||||||
|
DocOpsManager.getPreviousDocOps project_id, doc_id, fromVersion, version, (error, ops) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null, lines, version, ops
|
||||||
|
|
||||||
|
setDoc: (project_id, doc_id, newLines, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("docManager.setDoc")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
if !newLines?
|
||||||
|
return callback(new Error("No lines were provided to setDoc"))
|
||||||
|
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
DocumentManager.getDoc project_id, doc_id, (error, oldLines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
|
||||||
|
if oldLines? and oldLines.length > 0 and oldLines[0].text?
|
||||||
|
logger.log doc_id: doc_id, project_id: project_id, oldLines: oldLines, newLines: newLines, "document is JSON so not updating"
|
||||||
|
return callback(null)
|
||||||
|
|
||||||
|
logger.log doc_id: doc_id, project_id: project_id, oldLines: oldLines, newLines: newLines, "setting a document via http"
|
||||||
|
DiffCodec.diffAsShareJsOp oldLines, newLines, (error, op) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
update =
|
||||||
|
doc: doc_id
|
||||||
|
op: op
|
||||||
|
v: version
|
||||||
|
meta:
|
||||||
|
type: "external"
|
||||||
|
UpdateManager.applyUpdates project_id, doc_id, [update], (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
DocumentManager.flushDocIfLoaded project_id, doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null
|
||||||
|
|
||||||
|
|
||||||
|
flushDocIfLoaded: (project_id, doc_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("docManager.flushDocIfLoaded")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
RedisManager.getDoc doc_id, (error, lines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if !lines? or !version?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "doc is not loaded so not flushing"
|
||||||
|
callback null
|
||||||
|
else
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "flushing doc"
|
||||||
|
PersistenceManager.setDoc project_id, doc_id, lines, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
DocOpsManager.flushDocOpsToMongo project_id, doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null
|
||||||
|
|
||||||
|
flushAndDeleteDoc: (project_id, doc_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("docManager.flushAndDeleteDoc")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
DocumentManager.flushDocIfLoaded project_id, doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.removeDocFromMemory project_id, doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null
|
||||||
|
|
||||||
|
getDocWithLock: (project_id, doc_id, callback = (error, lines, version) ->) ->
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
UpdateManager.lockUpdatesAndDo DocumentManager.getDoc, project_id, doc_id, callback
|
||||||
|
|
||||||
|
getDocAndRecentOpsWithLock: (project_id, doc_id, fromVersion, callback = (error, lines, version) ->) ->
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
UpdateManager.lockUpdatesAndDo DocumentManager.getDocAndRecentOps, project_id, doc_id, fromVersion, callback
|
||||||
|
|
||||||
|
setDocWithLock: (project_id, doc_id, lines, callback = (error) ->) ->
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
UpdateManager.lockUpdatesAndDo DocumentManager.setDoc, project_id, doc_id, lines, callback
|
||||||
|
|
||||||
|
flushDocIfLoadedWithLock: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
UpdateManager.lockUpdatesAndDo DocumentManager.flushDocIfLoaded, project_id, doc_id, callback
|
||||||
|
|
||||||
|
flushAndDeleteDocWithLock: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
UpdateManager = require "./UpdateManager"
|
||||||
|
UpdateManager.lockUpdatesAndDo DocumentManager.flushAndDeleteDoc, project_id, doc_id, callback
|
10
services/document-updater/app/coffee/Errors.coffee
Normal file
10
services/document-updater/app/coffee/Errors.coffee
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
NotFoundError = (message) ->
|
||||||
|
error = new Error(message)
|
||||||
|
error.name = "NotFoundError"
|
||||||
|
error.__proto__ = NotFoundError.prototype
|
||||||
|
return error
|
||||||
|
NotFoundError.prototype.__proto__ = Error.prototype
|
||||||
|
|
||||||
|
module.exports = Errors =
|
||||||
|
NotFoundError: NotFoundError
|
||||||
|
|
85
services/document-updater/app/coffee/HttpController.coffee
Normal file
85
services/document-updater/app/coffee/HttpController.coffee
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
DocumentManager = require "./DocumentManager"
|
||||||
|
ProjectManager = require "./ProjectManager"
|
||||||
|
Errors = require "./Errors"
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
Metrics = require "./Metrics"
|
||||||
|
|
||||||
|
module.exports = HttpController =
|
||||||
|
getDoc: (req, res, next = (error) ->) ->
|
||||||
|
doc_id = req.params.doc_id
|
||||||
|
project_id = req.params.project_id
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "getting doc via http"
|
||||||
|
timer = new Metrics.Timer("http.getDoc")
|
||||||
|
|
||||||
|
if req.query?.fromVersion?
|
||||||
|
fromVersion = parseInt(req.query.fromVersion, 10)
|
||||||
|
else
|
||||||
|
fromVersion = -1
|
||||||
|
|
||||||
|
DocumentManager.getDocAndRecentOpsWithLock project_id, doc_id, fromVersion, (error, lines, version, ops) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "got doc via http"
|
||||||
|
if !lines? or !version?
|
||||||
|
return next(new Errors.NotFoundError("document not found"))
|
||||||
|
res.send JSON.stringify
|
||||||
|
id: doc_id
|
||||||
|
lines: lines
|
||||||
|
version: version
|
||||||
|
ops: ops
|
||||||
|
|
||||||
|
setDoc: (req, res, next = (error) ->) ->
|
||||||
|
doc_id = req.params.doc_id
|
||||||
|
project_id = req.params.project_id
|
||||||
|
lines = req.body.lines
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, lines: lines, "setting doc via http"
|
||||||
|
timer = new Metrics.Timer("http.setDoc")
|
||||||
|
DocumentManager.setDocWithLock project_id, doc_id, lines, (error) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "set doc via http"
|
||||||
|
res.send 204 # No Content
|
||||||
|
|
||||||
|
|
||||||
|
flushDocIfLoaded: (req, res, next = (error) ->) ->
|
||||||
|
doc_id = req.params.doc_id
|
||||||
|
project_id = req.params.project_id
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "flushing doc via http"
|
||||||
|
timer = new Metrics.Timer("http.flushDoc")
|
||||||
|
DocumentManager.flushDocIfLoadedWithLock project_id, doc_id, (error) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "flushed doc via http"
|
||||||
|
res.send 204 # No Content
|
||||||
|
|
||||||
|
flushAndDeleteDoc: (req, res, next = (error) ->) ->
|
||||||
|
doc_id = req.params.doc_id
|
||||||
|
project_id = req.params.project_id
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "deleting doc via http"
|
||||||
|
timer = new Metrics.Timer("http.deleteDoc")
|
||||||
|
DocumentManager.flushAndDeleteDocWithLock project_id, doc_id, (error) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, "deleted doc via http"
|
||||||
|
res.send 204 # No Content
|
||||||
|
|
||||||
|
flushProject: (req, res, next = (error) ->) ->
|
||||||
|
project_id = req.params.project_id
|
||||||
|
logger.log project_id: project_id, "flushing project via http"
|
||||||
|
timer = new Metrics.Timer("http.flushProject")
|
||||||
|
ProjectManager.flushProjectWithLocks project_id, (error) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, "flushed project via http"
|
||||||
|
res.send 204 # No Content
|
||||||
|
|
||||||
|
deleteProject: (req, res, next = (error) ->) ->
|
||||||
|
project_id = req.params.project_id
|
||||||
|
logger.log project_id: project_id, "deleting project via http"
|
||||||
|
timer = new Metrics.Timer("http.deleteProject")
|
||||||
|
ProjectManager.flushAndDeleteProjectWithLocks project_id, (error) ->
|
||||||
|
timer.done()
|
||||||
|
return next(error) if error?
|
||||||
|
logger.log project_id: project_id, "deleted project via http"
|
||||||
|
res.send 204 # No Content
|
||||||
|
|
55
services/document-updater/app/coffee/LockManager.coffee
Normal file
55
services/document-updater/app/coffee/LockManager.coffee
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
metrics = require('./Metrics')
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
redis = require('redis')
|
||||||
|
redisConf = Settings.redis?.web or Settings.redis or {host: "localhost", port: 6379}
|
||||||
|
rclient = redis.createClient(redisConf.port, redisConf.host)
|
||||||
|
rclient.auth(redisConf.password)
|
||||||
|
keys = require('./RedisKeyBuilder')
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
|
||||||
|
module.exports = LockManager =
|
||||||
|
LOCK_TEST_INTERVAL: 50 # 50ms between each test of the lock
|
||||||
|
MAX_LOCK_WAIT_TIME: 10000 # 10s maximum time to spend trying to get the lock
|
||||||
|
|
||||||
|
tryLock : (doc_id, callback = (err, isFree)->)->
|
||||||
|
tenSeconds = 10
|
||||||
|
rclient.set keys.blockingKey(doc_id: doc_id), "locked", "EX", 10, "NX", (err, gotLock)->
|
||||||
|
return callback(err) if err?
|
||||||
|
if gotLock == "OK"
|
||||||
|
metrics.inc "doc-not-blocking"
|
||||||
|
callback err, true
|
||||||
|
else
|
||||||
|
metrics.inc "doc-blocking"
|
||||||
|
logger.log doc_id: doc_id, redis_response: gotLock, "doc is locked"
|
||||||
|
callback err, false
|
||||||
|
|
||||||
|
getLock: (doc_id, callback = (error) ->) ->
|
||||||
|
startTime = Date.now()
|
||||||
|
do attempt = () ->
|
||||||
|
if Date.now() - startTime > LockManager.MAX_LOCK_WAIT_TIME
|
||||||
|
return callback(new Error("Timeout"))
|
||||||
|
|
||||||
|
LockManager.tryLock doc_id, (error, gotLock) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if gotLock
|
||||||
|
callback(null)
|
||||||
|
else
|
||||||
|
setTimeout attempt, LockManager.LOCK_TEST_INTERVAL
|
||||||
|
|
||||||
|
checkLock: (doc_id, callback = (err, isFree)->)->
|
||||||
|
multi = rclient.multi()
|
||||||
|
multi.exists keys.blockingKey(doc_id:doc_id)
|
||||||
|
multi.exec (err, replys)->
|
||||||
|
return callback(err) if err?
|
||||||
|
exists = parseInt replys[0]
|
||||||
|
if exists == 1
|
||||||
|
metrics.inc "doc-blocking"
|
||||||
|
callback err, false
|
||||||
|
else
|
||||||
|
metrics.inc "doc-not-blocking"
|
||||||
|
callback err, true
|
||||||
|
|
||||||
|
releaseLock: (doc_id, callback)->
|
||||||
|
rclient.del keys.blockingKey(doc_id:doc_id), callback
|
||||||
|
|
||||||
|
|
23
services/document-updater/app/coffee/Metrics.coffee
Normal file
23
services/document-updater/app/coffee/Metrics.coffee
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
StatsD = require('lynx')
|
||||||
|
statsd = new StatsD('localhost', 8125, {on_error:->})
|
||||||
|
|
||||||
|
buildKey = (key)-> "doc-updater.#{process.env.NODE_ENV}.#{key}"
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
set : (key, value, sampleRate = 1)->
|
||||||
|
statsd.set buildKey(key), value, sampleRate
|
||||||
|
|
||||||
|
inc : (key, sampleRate = 1)->
|
||||||
|
statsd.increment buildKey(key), sampleRate
|
||||||
|
|
||||||
|
Timer : class
|
||||||
|
constructor :(key, sampleRate = 1)->
|
||||||
|
this.start = new Date()
|
||||||
|
this.key = buildKey(key)
|
||||||
|
done:->
|
||||||
|
timeSpan = new Date - this.start
|
||||||
|
statsd.timing(this.key, timeSpan, this.sampleRate)
|
||||||
|
|
||||||
|
gauge : (key, value, sampleRate = 1)->
|
||||||
|
statsd.gauge key, value, sampleRate
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
request = require "request"
|
||||||
|
Settings = require "settings-sharelatex"
|
||||||
|
Errors = require "./Errors"
|
||||||
|
Metrics = require "./Metrics"
|
||||||
|
|
||||||
|
module.exports = PersistenceManager =
|
||||||
|
getDoc: (project_id, doc_id, _callback = (error, lines) ->) ->
|
||||||
|
timer = new Metrics.Timer("persistenceManager.getDoc")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
url = "#{Settings.apis.web.url}/project/#{project_id}/doc/#{doc_id}"
|
||||||
|
request {
|
||||||
|
url: url
|
||||||
|
method: "GET"
|
||||||
|
headers:
|
||||||
|
"accept": "application/json"
|
||||||
|
auth:
|
||||||
|
user: Settings.apis.web.user
|
||||||
|
pass: Settings.apis.web.pass
|
||||||
|
sendImmediately: true
|
||||||
|
jar: false
|
||||||
|
}, (error, res, body) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if res.statusCode >= 200 and res.statusCode < 300
|
||||||
|
try
|
||||||
|
body = JSON.parse body
|
||||||
|
catch e
|
||||||
|
return callback(e)
|
||||||
|
return callback null, body.lines
|
||||||
|
else if res.statusCode == 404
|
||||||
|
return callback(new Errors.NotFoundError("doc not not found: #{url}"))
|
||||||
|
else
|
||||||
|
return callback(new Error("error accessing web API: #{url} #{res.statusCode}"))
|
||||||
|
|
||||||
|
setDoc: (project_id, doc_id, lines, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("persistenceManager.setDoc")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
url = "#{Settings.apis.web.url}/project/#{project_id}/doc/#{doc_id}"
|
||||||
|
request {
|
||||||
|
url: url
|
||||||
|
method: "POST"
|
||||||
|
body: JSON.stringify
|
||||||
|
lines: lines
|
||||||
|
headers:
|
||||||
|
"content-type": "application/json"
|
||||||
|
auth:
|
||||||
|
user: Settings.apis.web.user
|
||||||
|
pass: Settings.apis.web.pass
|
||||||
|
sendImmediately: true
|
||||||
|
jar: false
|
||||||
|
}, (error, res, body) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if res.statusCode >= 200 and res.statusCode < 300
|
||||||
|
return callback null
|
||||||
|
else if res.statusCode == 404
|
||||||
|
return callback(new Errors.NotFoundError("doc not not found: #{url}"))
|
||||||
|
else
|
||||||
|
return callback(new Error("error accessing web API: #{url} #{res.statusCode}"))
|
||||||
|
|
||||||
|
|
||||||
|
|
60
services/document-updater/app/coffee/ProjectManager.coffee
Normal file
60
services/document-updater/app/coffee/ProjectManager.coffee
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
RedisManager = require "./RedisManager"
|
||||||
|
DocumentManager = require "./DocumentManager"
|
||||||
|
async = require "async"
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
Metrics = require "./Metrics"
|
||||||
|
|
||||||
|
module.exports = ProjectManager =
|
||||||
|
flushProjectWithLocks: (project_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("projectManager.flushProjectWithLocks")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
RedisManager.getDocIdsInProject project_id, (error, doc_ids) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
jobs = []
|
||||||
|
errors = []
|
||||||
|
for doc_id in (doc_ids or [])
|
||||||
|
do (doc_id) ->
|
||||||
|
jobs.push (callback) ->
|
||||||
|
DocumentManager.flushDocIfLoadedWithLock project_id, doc_id, (error) ->
|
||||||
|
if error?
|
||||||
|
logger.error err: error, project_id: project_id, doc_id: doc_id, "error flushing doc"
|
||||||
|
errors.push(error)
|
||||||
|
callback()
|
||||||
|
|
||||||
|
logger.log project_id: project_id, doc_ids: doc_ids, "flushing docs"
|
||||||
|
async.series jobs, () ->
|
||||||
|
if errors.length > 0
|
||||||
|
callback new Error("Errors flushing docs. See log for details")
|
||||||
|
else
|
||||||
|
callback(null)
|
||||||
|
|
||||||
|
flushAndDeleteProjectWithLocks: (project_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("projectManager.flushAndDeleteProjectWithLocks")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
RedisManager.getDocIdsInProject project_id, (error, doc_ids) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
jobs = []
|
||||||
|
errors = []
|
||||||
|
for doc_id in (doc_ids or [])
|
||||||
|
do (doc_id) ->
|
||||||
|
jobs.push (callback) ->
|
||||||
|
DocumentManager.flushAndDeleteDocWithLock project_id, doc_id, (error) ->
|
||||||
|
if error?
|
||||||
|
logger.error err: error, project_id: project_id, doc_id: doc_id, "error deleting doc"
|
||||||
|
errors.push(error)
|
||||||
|
callback()
|
||||||
|
|
||||||
|
logger.log project_id: project_id, doc_ids: doc_ids, "deleting docs"
|
||||||
|
async.series jobs, () ->
|
||||||
|
if errors.length > 0
|
||||||
|
callback new Error("Errors deleting docs. See log for details")
|
||||||
|
else
|
||||||
|
callback(null)
|
||||||
|
|
||||||
|
|
28
services/document-updater/app/coffee/RedisKeyBuilder.coffee
Normal file
28
services/document-updater/app/coffee/RedisKeyBuilder.coffee
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
ALLDOCSKEY = "AllDocIds"
|
||||||
|
PROJECTKEY = "ProjectId"
|
||||||
|
BLOCKINGKEY = "Blocking"
|
||||||
|
CHANGEQUE = "ChangeQue"
|
||||||
|
DOCSINPROJECT = "DocsIn"
|
||||||
|
PENDINGUPDATESKEY = "PendingUpdates"
|
||||||
|
DOCLINES = "doclines"
|
||||||
|
DOCOPS = "DocOps"
|
||||||
|
DOCVERSION = "DocVersion"
|
||||||
|
DOCIDSWITHPENDINGUPDATES = "DocsWithPendingUpdates"
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
|
||||||
|
allDocs : ALLDOCSKEY
|
||||||
|
docLines : (op)-> DOCLINES+":"+op.doc_id
|
||||||
|
docOps : (op)-> DOCOPS+":"+op.doc_id
|
||||||
|
docVersion : (op)-> DOCVERSION+":"+op.doc_id
|
||||||
|
projectKey : (op)-> PROJECTKEY+":"+op.doc_id
|
||||||
|
blockingKey : (op)-> BLOCKINGKEY+":"+op.doc_id
|
||||||
|
changeQue : (op)-> CHANGEQUE+":"+op.project_id
|
||||||
|
docsInProject : (op)-> DOCSINPROJECT+":"+op.project_id
|
||||||
|
pendingUpdates : (op)-> PENDINGUPDATESKEY+":"+op.doc_id
|
||||||
|
docsWithPendingUpdates : DOCIDSWITHPENDINGUPDATES
|
||||||
|
combineProjectIdAndDocId: (project_id, doc_id) -> "#{project_id}:#{doc_id}"
|
||||||
|
splitProjectIdAndDocId: (project_and_doc_id) -> project_and_doc_id.split(":")
|
||||||
|
now : (key)->
|
||||||
|
d = new Date()
|
||||||
|
d.getDate()+":"+(d.getMonth()+1)+":"+d.getFullYear()+":"+key
|
184
services/document-updater/app/coffee/RedisManager.coffee
Normal file
184
services/document-updater/app/coffee/RedisManager.coffee
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
redis = require('redis')
|
||||||
|
redisConf = Settings.redis?.web or Settings.redis or {host: "localhost", port: 6379}
|
||||||
|
rclient = redis.createClient(redisConf.port, redisConf.host)
|
||||||
|
rclient.auth(redisConf.password)
|
||||||
|
async = require('async')
|
||||||
|
_ = require('underscore')
|
||||||
|
keys = require('./RedisKeyBuilder')
|
||||||
|
logger = require('logger-sharelatex')
|
||||||
|
metrics = require('./Metrics')
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
putDocInMemory : (project_id, doc_id, docLines, version, callback)->
|
||||||
|
timer = new metrics.Timer("redis.put-doc")
|
||||||
|
logger.log project_id:project_id, doc_id:doc_id, docLines:docLines, version: version, "putting doc in redis"
|
||||||
|
multi = rclient.multi()
|
||||||
|
multi.set keys.docLines(doc_id:doc_id), JSON.stringify(docLines)
|
||||||
|
multi.set keys.projectKey({doc_id:doc_id}), project_id
|
||||||
|
multi.set keys.docVersion(doc_id:doc_id), version
|
||||||
|
multi.del keys.docOps(doc_id:doc_id)
|
||||||
|
multi.sadd keys.allDocs, doc_id
|
||||||
|
multi.sadd keys.docsInProject(project_id:project_id), doc_id
|
||||||
|
multi.exec (err, replys)->
|
||||||
|
timer.done()
|
||||||
|
callback(err)
|
||||||
|
|
||||||
|
removeDocFromMemory : (project_id, doc_id, callback)->
|
||||||
|
logger.log project_id:project_id, doc_id:doc_id, "removing doc from redis"
|
||||||
|
multi = rclient.multi()
|
||||||
|
multi.get keys.docLines(doc_id:doc_id)
|
||||||
|
multi.del keys.docLines(doc_id:doc_id)
|
||||||
|
multi.del keys.projectKey(doc_id:doc_id)
|
||||||
|
multi.del keys.docVersion(doc_id:doc_id)
|
||||||
|
multi.del keys.docOps(doc_id:doc_id)
|
||||||
|
multi.srem keys.docsInProject(project_id:project_id), doc_id
|
||||||
|
multi.srem keys.allDocs, doc_id
|
||||||
|
multi.exec (err, replys)->
|
||||||
|
if err?
|
||||||
|
logger.err project_id:project_id, doc_id:doc_id, err:err, "error removing doc from redis"
|
||||||
|
callback(err, null)
|
||||||
|
else
|
||||||
|
docLines = replys[0]
|
||||||
|
logger.log project_id:project_id, doc_id:doc_id, docLines:docLines, "removed doc from redis"
|
||||||
|
callback()
|
||||||
|
|
||||||
|
getDoc : (doc_id, callback = (error, lines, version) ->)->
|
||||||
|
timer = new metrics.Timer("redis.get-doc")
|
||||||
|
multi = rclient.multi()
|
||||||
|
linesKey = keys.docLines(doc_id:doc_id)
|
||||||
|
multi.get linesKey
|
||||||
|
multi.get keys.docVersion(doc_id:doc_id)
|
||||||
|
multi.exec (error, result)->
|
||||||
|
timer.done()
|
||||||
|
return callback(error) if error?
|
||||||
|
try
|
||||||
|
docLines = JSON.parse result[0]
|
||||||
|
catch e
|
||||||
|
return callback(e)
|
||||||
|
version = parseInt(result[1] or 0, 10)
|
||||||
|
callback null, docLines, version
|
||||||
|
|
||||||
|
getDocVersion: (doc_id, callback = (error, version) ->) ->
|
||||||
|
rclient.get keys.docVersion(doc_id: doc_id), (error, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
version = parseInt(version, 10)
|
||||||
|
callback null, version
|
||||||
|
|
||||||
|
getCountOfDocsInMemory : (callback)->
|
||||||
|
rclient.smembers keys.allDocs, (err, members)->
|
||||||
|
len = members.length
|
||||||
|
callback null, len
|
||||||
|
|
||||||
|
setDocument : (doc_id, docLines, version, callback = (error) ->)->
|
||||||
|
multi = rclient.multi()
|
||||||
|
multi.set keys.docLines(doc_id:doc_id), JSON.stringify(docLines)
|
||||||
|
multi.set keys.docVersion(doc_id:doc_id), version
|
||||||
|
multi.incr keys.now("docsets")
|
||||||
|
multi.exec (error, replys) -> callback(error)
|
||||||
|
|
||||||
|
getPendingUpdatesForDoc : (doc_id, callback)->
|
||||||
|
multi = rclient.multi()
|
||||||
|
multi.lrange keys.pendingUpdates(doc_id:doc_id), 0 , -1
|
||||||
|
multi.del keys.pendingUpdates(doc_id:doc_id)
|
||||||
|
multi.exec (error, replys) ->
|
||||||
|
jsonUpdates = replys[0]
|
||||||
|
updates = []
|
||||||
|
for jsonUpdate in jsonUpdates
|
||||||
|
try
|
||||||
|
update = JSON.parse jsonUpdate
|
||||||
|
catch e
|
||||||
|
return callback e
|
||||||
|
updates.push update
|
||||||
|
callback error, updates
|
||||||
|
|
||||||
|
getUpdatesLength: (doc_id, callback)->
|
||||||
|
rclient.llen keys.pendingUpdates(doc_id:doc_id), callback
|
||||||
|
|
||||||
|
getDocsWithPendingUpdates: (callback = (error, docs) ->) ->
|
||||||
|
rclient.smembers keys.docsWithPendingUpdates, (error, doc_keys) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
docs = doc_keys.map (doc_key) ->
|
||||||
|
[project_id, doc_id] = keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
return {
|
||||||
|
doc_id: doc_id
|
||||||
|
project_id: project_id
|
||||||
|
}
|
||||||
|
callback null, docs
|
||||||
|
|
||||||
|
clearDocFromPendingUpdatesSet: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
doc_key = keys.combineProjectIdAndDocId(project_id, doc_id)
|
||||||
|
rclient.srem keys.docsWithPendingUpdates, doc_key, callback
|
||||||
|
|
||||||
|
getPreviousDocOps: (doc_id, start, end, callback = (error, jsonOps) ->) ->
|
||||||
|
# TODO: parse the ops and return them as objects, not JSON
|
||||||
|
rclient.llen keys.docOps(doc_id: doc_id), (error, length) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
rclient.get keys.docVersion(doc_id: doc_id), (error, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
version = parseInt(version, 10)
|
||||||
|
first_version_in_redis = version - length
|
||||||
|
|
||||||
|
if start < first_version_in_redis or end > version
|
||||||
|
error = new Error("doc ops range is not loaded in redis")
|
||||||
|
logger.error err: error, length: length, version: version, start: start, end: end, "inconsistent version or length"
|
||||||
|
return callback(error)
|
||||||
|
|
||||||
|
start = start - first_version_in_redis
|
||||||
|
if end > -1
|
||||||
|
end = end - first_version_in_redis
|
||||||
|
|
||||||
|
if isNaN(start) or isNaN(end)
|
||||||
|
error = new Error("inconsistent version or lengths")
|
||||||
|
logger.error err: error, length: length, version: version, start: start, end: end, "inconsistent version or length"
|
||||||
|
return callback(error)
|
||||||
|
|
||||||
|
rclient.lrange keys.docOps(doc_id: doc_id), start, end, (error, jsonOps) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
try
|
||||||
|
ops = jsonOps.map (jsonOp) -> JSON.parse jsonOp
|
||||||
|
catch e
|
||||||
|
return callback(e)
|
||||||
|
callback null, ops
|
||||||
|
|
||||||
|
pushDocOp: (doc_id, op, callback = (error, new_version) ->) ->
|
||||||
|
# TODO: take a raw op object and JSONify it here
|
||||||
|
jsonOp = JSON.stringify op
|
||||||
|
rclient.rpush keys.docOps(doc_id: doc_id), jsonOp, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
rclient.incr keys.docVersion(doc_id: doc_id), (error, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
version = parseInt(version, 10)
|
||||||
|
callback null, version
|
||||||
|
|
||||||
|
prependDocOps: (doc_id, ops, callback = (error) ->) ->
|
||||||
|
jsonOps = ops.map (op) -> JSON.stringify op
|
||||||
|
rclient.lpush keys.docOps(doc_id: doc_id), jsonOps.reverse(), callback
|
||||||
|
|
||||||
|
getDocOpsLength: (doc_id, callback = (error, length) ->) ->
|
||||||
|
rclient.llen keys.docOps(doc_id: doc_id), callback
|
||||||
|
|
||||||
|
getDocIdsInProject: (project_id, callback = (error, doc_ids) ->) ->
|
||||||
|
rclient.smembers keys.docsInProject(project_id: project_id), callback
|
||||||
|
|
||||||
|
|
||||||
|
getDocumentsProjectId = (doc_id, callback)->
|
||||||
|
rclient.get keys.projectKey({doc_id:doc_id}), (err, project_id)->
|
||||||
|
callback err, {doc_id:doc_id, project_id:project_id}
|
||||||
|
|
||||||
|
getAllProjectDocsIds = (project_id, callback)->
|
||||||
|
rclient.SMEMBERS keys.docsInProject(project_id:project_id), (err, doc_ids)->
|
||||||
|
if callback?
|
||||||
|
callback(err, doc_ids)
|
||||||
|
|
||||||
|
getDocumentsAndExpire = (doc_ids, callback)->
|
||||||
|
multi = rclient.multi()
|
||||||
|
oneDay = 86400
|
||||||
|
doc_ids.forEach (doc_id)->
|
||||||
|
# rclient.expire keys.docLines(doc_id:doc_id), oneDay, ->
|
||||||
|
doc_ids.forEach (doc_id)->
|
||||||
|
multi.get keys.docLines(doc_id:doc_id)
|
||||||
|
multi.exec (err, docsLines)->
|
||||||
|
callback err, docsLines
|
||||||
|
|
||||||
|
|
58
services/document-updater/app/coffee/ShareJsDB.coffee
Normal file
58
services/document-updater/app/coffee/ShareJsDB.coffee
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
Keys = require('./RedisKeyBuilder')
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
DocumentManager = require "./DocumentManager"
|
||||||
|
RedisManager = require "./RedisManager"
|
||||||
|
DocOpsManager = require "./DocOpsManager"
|
||||||
|
Errors = require "./Errors"
|
||||||
|
|
||||||
|
module.exports = ShareJsDB =
|
||||||
|
getOps: (doc_key, start, end, callback) ->
|
||||||
|
if start == end
|
||||||
|
return callback null, []
|
||||||
|
|
||||||
|
# In redis, lrange values are inclusive.
|
||||||
|
if end?
|
||||||
|
end--
|
||||||
|
else
|
||||||
|
end = -1
|
||||||
|
|
||||||
|
[project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
DocOpsManager.getPreviousDocOps project_id, doc_id, start, end, (error, ops) ->
|
||||||
|
return callback error if error?
|
||||||
|
callback null, ops
|
||||||
|
|
||||||
|
writeOp: (doc_key, opData, callback) ->
|
||||||
|
[project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
DocOpsManager.pushDocOp project_id, doc_id, {op:opData.op, meta:opData.meta}, (error, version) ->
|
||||||
|
return callback error if error?
|
||||||
|
|
||||||
|
if version == opData.v + 1
|
||||||
|
callback()
|
||||||
|
else
|
||||||
|
# The document has been corrupted by the change. For now, throw an exception.
|
||||||
|
# Later, rebuild the snapshot.
|
||||||
|
callback "Version mismatch in db.append. '#{doc_id}' is corrupted."
|
||||||
|
|
||||||
|
getSnapshot: (doc_key, callback) ->
|
||||||
|
[project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
DocumentManager.getDoc project_id, doc_id, (error, lines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
if !lines? or !version?
|
||||||
|
return callback(new Errors.NotFoundError("document not found: #{doc_id}"))
|
||||||
|
|
||||||
|
if lines.length > 0 and lines[0].text?
|
||||||
|
type = "json"
|
||||||
|
snapshot = lines: lines
|
||||||
|
else
|
||||||
|
type = "text"
|
||||||
|
snapshot = lines.join("\n")
|
||||||
|
callback null,
|
||||||
|
snapshot: snapshot
|
||||||
|
v: parseInt(version, 10)
|
||||||
|
type: type
|
||||||
|
|
||||||
|
# To be able to remove a doc from the ShareJS memory
|
||||||
|
# we need to called Model::delete, which calls this
|
||||||
|
# method on the database. However, we will handle removing
|
||||||
|
# it from Redis ourselves
|
||||||
|
delete: (docName, dbMeta, callback) -> callback()
|
|
@ -0,0 +1,68 @@
|
||||||
|
ShareJsModel = require "./sharejs/server/model"
|
||||||
|
ShareJsDB = require "./ShareJsDB"
|
||||||
|
async = require "async"
|
||||||
|
logger = require "logger-sharelatex"
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
Keys = require "./RedisKeyBuilder"
|
||||||
|
{EventEmitter} = require "events"
|
||||||
|
util = require "util"
|
||||||
|
|
||||||
|
redis = require('redis')
|
||||||
|
redisConf = Settings.redis?.web or Settings.redis or {host: "localhost", port: 6379}
|
||||||
|
rclient = redis.createClient(redisConf.port, redisConf.host)
|
||||||
|
rclient.auth(redisConf.password)
|
||||||
|
|
||||||
|
ShareJsModel:: = {}
|
||||||
|
util.inherits ShareJsModel, EventEmitter
|
||||||
|
|
||||||
|
module.exports = ShareJsUpdateManager =
|
||||||
|
getNewShareJsModel: () -> new ShareJsModel(ShareJsDB)
|
||||||
|
|
||||||
|
applyUpdates: (project_id, doc_id, updates, callback = (error, updatedDocLines) ->) ->
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, updates: updates, "applying sharejs updates"
|
||||||
|
jobs = []
|
||||||
|
|
||||||
|
# We could use a global model for all docs, but we're hitting issues with the
|
||||||
|
# internal state of ShareJS not being accessible for clearing caches, and
|
||||||
|
# getting stuck due to queued callbacks (line 260 of sharejs/server/model.coffee)
|
||||||
|
# This adds a small but hopefully acceptable overhead (~12ms per 1000 updates on
|
||||||
|
# my 2009 MBP).
|
||||||
|
model = @getNewShareJsModel()
|
||||||
|
@_listenForOps(model)
|
||||||
|
doc_key = Keys.combineProjectIdAndDocId(project_id, doc_id)
|
||||||
|
for update in updates
|
||||||
|
do (update) =>
|
||||||
|
jobs.push (callback) =>
|
||||||
|
model.applyOp doc_key, update, callback
|
||||||
|
|
||||||
|
async.series jobs, (error) =>
|
||||||
|
logger.log project_id: project_id, doc_id: doc_id, error: error, "applied updates"
|
||||||
|
if error?
|
||||||
|
@_sendError(project_id, doc_id, error)
|
||||||
|
return callback(error)
|
||||||
|
model.getSnapshot doc_key, (error, data) =>
|
||||||
|
if error?
|
||||||
|
@_sendError(project_id, doc_id, error)
|
||||||
|
return callback(error)
|
||||||
|
if typeof data.snapshot == "string"
|
||||||
|
docLines = data.snapshot.split("\n")
|
||||||
|
else
|
||||||
|
docLines = data.snapshot.lines
|
||||||
|
callback(null, docLines, data.v)
|
||||||
|
|
||||||
|
_listenForOps: (model) ->
|
||||||
|
model.on "applyOp", (doc_key, opData) ->
|
||||||
|
[project_id, doc_id] = Keys.splitProjectIdAndDocId(doc_key)
|
||||||
|
data = JSON.stringify
|
||||||
|
project_id: project_id
|
||||||
|
doc_id: doc_id
|
||||||
|
op: opData
|
||||||
|
rclient.publish "applied-ops", data
|
||||||
|
|
||||||
|
_sendError: (project_id, doc_id, error) ->
|
||||||
|
data = JSON.stringify
|
||||||
|
project_id: project_id
|
||||||
|
doc_id: doc_id
|
||||||
|
error: error.message || error
|
||||||
|
rclient.publish "applied-ops", data
|
||||||
|
|
79
services/document-updater/app/coffee/UpdateManager.coffee
Normal file
79
services/document-updater/app/coffee/UpdateManager.coffee
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
LockManager = require "./LockManager"
|
||||||
|
RedisManager = require "./RedisManager"
|
||||||
|
ShareJsUpdateManager = require "./ShareJsUpdateManager"
|
||||||
|
Settings = require('settings-sharelatex')
|
||||||
|
async = require("async")
|
||||||
|
logger = require('logger-sharelatex')
|
||||||
|
Metrics = require "./Metrics"
|
||||||
|
|
||||||
|
module.exports = UpdateManager =
|
||||||
|
resumeProcessing: (callback = (error) ->) ->
|
||||||
|
RedisManager.getDocsWithPendingUpdates (error, docs) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
jobs = for doc in (docs or [])
|
||||||
|
do (doc) =>
|
||||||
|
(callback) => @processOutstandingUpdatesWithLock doc.project_id, doc.doc_id, callback
|
||||||
|
|
||||||
|
async.parallelLimit jobs, 5, callback
|
||||||
|
|
||||||
|
processOutstandingUpdates: (project_id, doc_id, _callback = (error) ->) ->
|
||||||
|
timer = new Metrics.Timer("updateManager.processOutstandingUpdates")
|
||||||
|
callback = (args...) ->
|
||||||
|
timer.done()
|
||||||
|
_callback(args...)
|
||||||
|
|
||||||
|
UpdateManager.fetchAndApplyUpdates project_id, doc_id, (error) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
RedisManager.clearDocFromPendingUpdatesSet project_id, doc_id, (error) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
callback()
|
||||||
|
|
||||||
|
processOutstandingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
LockManager.tryLock doc_id, (error, gotLock) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
return callback() if !gotLock
|
||||||
|
UpdateManager.processOutstandingUpdates project_id, doc_id, (error) ->
|
||||||
|
return UpdateManager._handleErrorInsideLock(doc_id, error, callback) if error?
|
||||||
|
LockManager.releaseLock doc_id, (error) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id, callback
|
||||||
|
|
||||||
|
continueProcessingUpdatesWithLock: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
RedisManager.getUpdatesLength doc_id, (error, length) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
if length > 0
|
||||||
|
UpdateManager.processOutstandingUpdatesWithLock project_id, doc_id, callback
|
||||||
|
else
|
||||||
|
callback()
|
||||||
|
|
||||||
|
fetchAndApplyUpdates: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
RedisManager.getPendingUpdatesForDoc doc_id, (error, updates) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
if updates.length == 0
|
||||||
|
return callback()
|
||||||
|
UpdateManager.applyUpdates project_id, doc_id, updates, callback
|
||||||
|
|
||||||
|
applyUpdates: (project_id, doc_id, updates, callback = (error) ->) ->
|
||||||
|
ShareJsUpdateManager.applyUpdates project_id, doc_id, updates, (error, updatedDocLines, version) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
logger.log doc_id: doc_id, version: version, "updating doc via sharejs"
|
||||||
|
RedisManager.setDocument doc_id, updatedDocLines, version, callback
|
||||||
|
|
||||||
|
lockUpdatesAndDo: (method, project_id, doc_id, args..., callback) ->
|
||||||
|
LockManager.getLock doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
UpdateManager.processOutstandingUpdates project_id, doc_id, (error) ->
|
||||||
|
return UpdateManager._handleErrorInsideLock(doc_id, error, callback) if error?
|
||||||
|
method project_id, doc_id, args..., (error, response_args...) ->
|
||||||
|
return UpdateManager._handleErrorInsideLock(doc_id, error, callback) if error?
|
||||||
|
LockManager.releaseLock doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
callback null, response_args...
|
||||||
|
# We held the lock for a while so updates might have queued up
|
||||||
|
UpdateManager.continueProcessingUpdatesWithLock project_id, doc_id
|
||||||
|
|
||||||
|
_handleErrorInsideLock: (doc_id, original_error, callback = (error) ->) ->
|
||||||
|
LockManager.releaseLock doc_id, (lock_error) ->
|
||||||
|
callback(original_error)
|
||||||
|
|
||||||
|
|
7
services/document-updater/app/coffee/mongojs.coffee
Normal file
7
services/document-updater/app/coffee/mongojs.coffee
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
Settings = require "settings-sharelatex"
|
||||||
|
mongojs = require "mongojs"
|
||||||
|
db = mongojs.connect(Settings.mongo.url, ["docOps"])
|
||||||
|
module.exports =
|
||||||
|
db: db
|
||||||
|
ObjectId: mongojs.ObjectId
|
||||||
|
|
48
services/document-updater/app/coffee/sharejs/README.md
Normal file
48
services/document-updater/app/coffee/sharejs/README.md
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
This directory contains all the operational transform code. Each file defines a type.
|
||||||
|
|
||||||
|
Most of the types in here are for testing or demonstration. The only types which are sent to the webclient
|
||||||
|
are `text` and `json`.
|
||||||
|
|
||||||
|
|
||||||
|
# An OT type
|
||||||
|
|
||||||
|
All OT types have the following fields:
|
||||||
|
|
||||||
|
`name`: _(string)_ Name of the type. Should match the filename.
|
||||||
|
`create() -> snapshot`: Function which creates and returns a new document snapshot
|
||||||
|
|
||||||
|
`apply(snapshot, op) -> snapshot`: A function which creates a new document snapshot with the op applied
|
||||||
|
`transform(op1, op2, side) -> op1'`: OT transform function.
|
||||||
|
|
||||||
|
Given op1, op2, `apply(s, op2, transform(op1, op2, 'left')) == apply(s, op1, transform(op2, op1, 'right'))`.
|
||||||
|
|
||||||
|
Transform and apply must never modify their arguments.
|
||||||
|
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
|
||||||
|
`tp2`: _(bool)_ True if the transform function supports TP2. This allows p2p architectures to work.
|
||||||
|
`compose(op1, op2) -> op`: Create and return a new op which has the same effect as op1 + op2.
|
||||||
|
`serialize(snapshot) -> JSON object`: Serialize a document to something we can JSON.stringify()
|
||||||
|
`deserialize(object) -> snapshot`: Deserialize a JSON object into the document's internal snapshot format
|
||||||
|
`prune(op1', op2, side) -> op1`: Inserse transform function. Only required for TP2 types.
|
||||||
|
`normalize(op) -> op`: Fix up an op to make it valid. Eg, remove skips of size zero.
|
||||||
|
`api`: _(object)_ Set of helper methods which will be mixed in to the client document object for manipulating documents. See below.
|
||||||
|
|
||||||
|
|
||||||
|
# Examples
|
||||||
|
|
||||||
|
`count` and `simple` are two trivial OT type definitions if you want to take a look. JSON defines
|
||||||
|
the ot-for-JSON type (see the wiki for documentation) and all the text types define different text
|
||||||
|
implementations. (I still have no idea which one I like the most, and they're fun to write!)
|
||||||
|
|
||||||
|
|
||||||
|
# API
|
||||||
|
|
||||||
|
Types can also define API functions. These methods are mixed into the client's Doc object when a document is created.
|
||||||
|
You can use them to help construct ops programatically (so users don't need to understand how ops are structured).
|
||||||
|
|
||||||
|
For example, the three text types defined here (text, text-composable and text-tp2) all provide the text API, supplying
|
||||||
|
`.insert()`, `.del()`, `.getLength` and `.getText` methods.
|
||||||
|
|
||||||
|
See text-api.coffee for an example.
|
22
services/document-updater/app/coffee/sharejs/count.coffee
Normal file
22
services/document-updater/app/coffee/sharejs/count.coffee
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment]
|
||||||
|
|
||||||
|
exports.name = 'count'
|
||||||
|
exports.create = -> 1
|
||||||
|
|
||||||
|
exports.apply = (snapshot, op) ->
|
||||||
|
[v, inc] = op
|
||||||
|
throw new Error "Op #{v} != snapshot #{snapshot}" unless snapshot == v
|
||||||
|
snapshot + inc
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
exports.transform = (op1, op2) ->
|
||||||
|
throw new Error "Op1 #{op1[0]} != op2 #{op2[0]}" unless op1[0] == op2[0]
|
||||||
|
[op1[0] + op2[1], op1[1]]
|
||||||
|
|
||||||
|
exports.compose = (op1, op2) ->
|
||||||
|
throw new Error "Op1 #{op1} + 1 != op2 #{op2}" unless op1[0] + op1[1] == op2[0]
|
||||||
|
[op1[0], op1[1] + op2[1]]
|
||||||
|
|
||||||
|
exports.generateRandomOp = (doc) ->
|
||||||
|
[[doc, 1], doc + 1]
|
||||||
|
|
65
services/document-updater/app/coffee/sharejs/helpers.coffee
Normal file
65
services/document-updater/app/coffee/sharejs/helpers.coffee
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
# These methods let you build a transform function from a transformComponent function
|
||||||
|
# for OT types like text and JSON in which operations are lists of components
|
||||||
|
# and transforming them requires N^2 work.
|
||||||
|
|
||||||
|
# Add transform and transformX functions for an OT type which has transformComponent defined.
|
||||||
|
# transformComponent(destination array, component, other component, side)
|
||||||
|
exports['_bt'] = bootstrapTransform = (type, transformComponent, checkValidOp, append) ->
|
||||||
|
transformComponentX = (left, right, destLeft, destRight) ->
|
||||||
|
transformComponent destLeft, left, right, 'left'
|
||||||
|
transformComponent destRight, right, left, 'right'
|
||||||
|
|
||||||
|
# Transforms rightOp by leftOp. Returns ['rightOp', clientOp']
|
||||||
|
type.transformX = type['transformX'] = transformX = (leftOp, rightOp) ->
|
||||||
|
checkValidOp leftOp
|
||||||
|
checkValidOp rightOp
|
||||||
|
|
||||||
|
newRightOp = []
|
||||||
|
|
||||||
|
for rightComponent in rightOp
|
||||||
|
# Generate newLeftOp by composing leftOp by rightComponent
|
||||||
|
newLeftOp = []
|
||||||
|
|
||||||
|
k = 0
|
||||||
|
while k < leftOp.length
|
||||||
|
nextC = []
|
||||||
|
transformComponentX leftOp[k], rightComponent, newLeftOp, nextC
|
||||||
|
k++
|
||||||
|
|
||||||
|
if nextC.length == 1
|
||||||
|
rightComponent = nextC[0]
|
||||||
|
else if nextC.length == 0
|
||||||
|
append newLeftOp, l for l in leftOp[k..]
|
||||||
|
rightComponent = null
|
||||||
|
break
|
||||||
|
else
|
||||||
|
# Recurse.
|
||||||
|
[l_, r_] = transformX leftOp[k..], nextC
|
||||||
|
append newLeftOp, l for l in l_
|
||||||
|
append newRightOp, r for r in r_
|
||||||
|
rightComponent = null
|
||||||
|
break
|
||||||
|
|
||||||
|
append newRightOp, rightComponent if rightComponent?
|
||||||
|
leftOp = newLeftOp
|
||||||
|
|
||||||
|
[leftOp, newRightOp]
|
||||||
|
|
||||||
|
# Transforms op with specified type ('left' or 'right') by otherOp.
|
||||||
|
type.transform = type['transform'] = (op, otherOp, type) ->
|
||||||
|
throw new Error "type must be 'left' or 'right'" unless type == 'left' or type == 'right'
|
||||||
|
|
||||||
|
return op if otherOp.length == 0
|
||||||
|
|
||||||
|
# TODO: Benchmark with and without this line. I _think_ it'll make a big difference...?
|
||||||
|
return transformComponent [], op[0], otherOp[0], type if op.length == 1 and otherOp.length == 1
|
||||||
|
|
||||||
|
if type == 'left'
|
||||||
|
[left, _] = transformX op, otherOp
|
||||||
|
left
|
||||||
|
else
|
||||||
|
[_, right] = transformX otherOp, op
|
||||||
|
right
|
||||||
|
|
||||||
|
if typeof WEB is 'undefined'
|
||||||
|
exports.bootstrapTransform = bootstrapTransform
|
15
services/document-updater/app/coffee/sharejs/index.coffee
Normal file
15
services/document-updater/app/coffee/sharejs/index.coffee
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
|
||||||
|
register = (file) ->
|
||||||
|
type = require file
|
||||||
|
exports[type.name] = type
|
||||||
|
try require "#{file}-api"
|
||||||
|
|
||||||
|
# Import all the built-in types.
|
||||||
|
register './simple'
|
||||||
|
register './count'
|
||||||
|
|
||||||
|
register './text'
|
||||||
|
register './text-composable'
|
||||||
|
register './text-tp2'
|
||||||
|
|
||||||
|
register './json'
|
180
services/document-updater/app/coffee/sharejs/json-api.coffee
Normal file
180
services/document-updater/app/coffee/sharejs/json-api.coffee
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
# API for JSON OT
|
||||||
|
|
||||||
|
json = require './json' if typeof WEB is 'undefined'
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
extendDoc = exports.extendDoc
|
||||||
|
exports.extendDoc = (name, fn) ->
|
||||||
|
SubDoc::[name] = fn
|
||||||
|
extendDoc name, fn
|
||||||
|
|
||||||
|
depath = (path) ->
|
||||||
|
if path.length == 1 and path[0].constructor == Array
|
||||||
|
path[0]
|
||||||
|
else path
|
||||||
|
|
||||||
|
class SubDoc
|
||||||
|
constructor: (@doc, @path) ->
|
||||||
|
at: (path...) -> @doc.at @path.concat depath path
|
||||||
|
get: -> @doc.getAt @path
|
||||||
|
# for objects and lists
|
||||||
|
set: (value, cb) -> @doc.setAt @path, value, cb
|
||||||
|
# for strings and lists.
|
||||||
|
insert: (pos, value, cb) -> @doc.insertAt @path, pos, value, cb
|
||||||
|
# for strings
|
||||||
|
del: (pos, length, cb) -> @doc.deleteTextAt @path, length, pos, cb
|
||||||
|
# for objects and lists
|
||||||
|
remove: (cb) -> @doc.removeAt @path, cb
|
||||||
|
push: (value, cb) -> @insert @get().length, value, cb
|
||||||
|
move: (from, to, cb) -> @doc.moveAt @path, from, to, cb
|
||||||
|
add: (amount, cb) -> @doc.addAt @path, amount, cb
|
||||||
|
on: (event, cb) -> @doc.addListener @path, event, cb
|
||||||
|
removeListener: (l) -> @doc.removeListener l
|
||||||
|
|
||||||
|
# text API compatibility
|
||||||
|
getLength: -> @get().length
|
||||||
|
getText: -> @get()
|
||||||
|
|
||||||
|
traverse = (snapshot, path) ->
|
||||||
|
container = data:snapshot
|
||||||
|
key = 'data'
|
||||||
|
elem = container
|
||||||
|
for p in path
|
||||||
|
elem = elem[key]
|
||||||
|
key = p
|
||||||
|
throw new Error 'bad path' if typeof elem == 'undefined'
|
||||||
|
{elem, key}
|
||||||
|
|
||||||
|
pathEquals = (p1, p2) ->
|
||||||
|
return false if p1.length != p2.length
|
||||||
|
for e,i in p1
|
||||||
|
return false if e != p2[i]
|
||||||
|
true
|
||||||
|
|
||||||
|
json.api =
|
||||||
|
provides: {json:true}
|
||||||
|
|
||||||
|
at: (path...) -> new SubDoc this, depath path
|
||||||
|
|
||||||
|
get: -> @snapshot
|
||||||
|
set: (value, cb) -> @setAt [], value, cb
|
||||||
|
|
||||||
|
getAt: (path) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
return elem[key]
|
||||||
|
|
||||||
|
setAt: (path, value, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = {p:path}
|
||||||
|
if elem.constructor == Array
|
||||||
|
op.li = value
|
||||||
|
op.ld = elem[key] if typeof elem[key] != 'undefined'
|
||||||
|
else if typeof elem == 'object'
|
||||||
|
op.oi = value
|
||||||
|
op.od = elem[key] if typeof elem[key] != 'undefined'
|
||||||
|
else throw new Error 'bad path'
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
removeAt: (path, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
throw new Error 'no element at that path' unless typeof elem[key] != 'undefined'
|
||||||
|
op = {p:path}
|
||||||
|
if elem.constructor == Array
|
||||||
|
op.ld = elem[key]
|
||||||
|
else if typeof elem == 'object'
|
||||||
|
op.od = elem[key]
|
||||||
|
else throw new Error 'bad path'
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
insertAt: (path, pos, value, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = {p:path.concat pos}
|
||||||
|
if elem[key].constructor == Array
|
||||||
|
op.li = value
|
||||||
|
else if typeof elem[key] == 'string'
|
||||||
|
op.si = value
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
moveAt: (path, from, to, cb) ->
|
||||||
|
op = [{p:path.concat(from), lm:to}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
addAt: (path, amount, cb) ->
|
||||||
|
op = [{p:path, na:amount}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
deleteTextAt: (path, length, pos, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = [{p:path.concat(pos), sd:elem[key][pos...(pos + length)]}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
addListener: (path, event, cb) ->
|
||||||
|
l = {path, event, cb}
|
||||||
|
@_listeners.push l
|
||||||
|
l
|
||||||
|
removeListener: (l) ->
|
||||||
|
i = @_listeners.indexOf l
|
||||||
|
return false if i < 0
|
||||||
|
@_listeners.splice i, 1
|
||||||
|
return true
|
||||||
|
_register: ->
|
||||||
|
@_listeners = []
|
||||||
|
@on 'change', (op) ->
|
||||||
|
for c in op
|
||||||
|
if c.na != undefined or c.si != undefined or c.sd != undefined
|
||||||
|
# no change to structure
|
||||||
|
continue
|
||||||
|
to_remove = []
|
||||||
|
for l, i in @_listeners
|
||||||
|
# Transform a dummy op by the incoming op to work out what
|
||||||
|
# should happen to the listener.
|
||||||
|
dummy = {p:l.path, na:0}
|
||||||
|
xformed = @type.transformComponent [], dummy, c, 'left'
|
||||||
|
if xformed.length == 0
|
||||||
|
# The op was transformed to noop, so we should delete the listener.
|
||||||
|
to_remove.push i
|
||||||
|
else if xformed.length == 1
|
||||||
|
# The op remained, so grab its new path into the listener.
|
||||||
|
l.path = xformed[0].p
|
||||||
|
else
|
||||||
|
throw new Error "Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."
|
||||||
|
to_remove.sort (a, b) -> b - a
|
||||||
|
for i in to_remove
|
||||||
|
@_listeners.splice i, 1
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
for c in op
|
||||||
|
match_path = if c.na == undefined then c.p[...c.p.length-1] else c.p
|
||||||
|
for {path, event, cb} in @_listeners
|
||||||
|
if pathEquals path, match_path
|
||||||
|
switch event
|
||||||
|
when 'insert'
|
||||||
|
if c.li != undefined and c.ld == undefined
|
||||||
|
cb(c.p[c.p.length-1], c.li)
|
||||||
|
else if c.oi != undefined and c.od == undefined
|
||||||
|
cb(c.p[c.p.length-1], c.oi)
|
||||||
|
else if c.si != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.si)
|
||||||
|
when 'delete'
|
||||||
|
if c.li == undefined and c.ld != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.ld)
|
||||||
|
else if c.oi == undefined and c.od != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.od)
|
||||||
|
else if c.sd != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.sd)
|
||||||
|
when 'replace'
|
||||||
|
if c.li != undefined and c.ld != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.ld, c.li)
|
||||||
|
else if c.oi != undefined and c.od != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.od, c.oi)
|
||||||
|
when 'move'
|
||||||
|
if c.lm != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.lm)
|
||||||
|
when 'add'
|
||||||
|
if c.na != undefined
|
||||||
|
cb(c.na)
|
||||||
|
else if (common = @type.commonPath match_path, path)?
|
||||||
|
if event == 'child op'
|
||||||
|
if match_path.length == path.length == common
|
||||||
|
throw new Error "paths match length and have commonality, but aren't equal?"
|
||||||
|
child_path = c.p[common+1..]
|
||||||
|
cb(child_path, c)
|
441
services/document-updater/app/coffee/sharejs/json.coffee
Normal file
441
services/document-updater/app/coffee/sharejs/json.coffee
Normal file
|
@ -0,0 +1,441 @@
|
||||||
|
# This is the implementation of the JSON OT type.
|
||||||
|
#
|
||||||
|
# Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
text = exports.types.text
|
||||||
|
else
|
||||||
|
text = require './text'
|
||||||
|
|
||||||
|
json = {}
|
||||||
|
|
||||||
|
json.name = 'json'
|
||||||
|
|
||||||
|
json.create = -> null
|
||||||
|
|
||||||
|
json.invertComponent = (c) ->
|
||||||
|
c_ = {p: c.p}
|
||||||
|
c_.sd = c.si if c.si != undefined
|
||||||
|
c_.si = c.sd if c.sd != undefined
|
||||||
|
c_.od = c.oi if c.oi != undefined
|
||||||
|
c_.oi = c.od if c.od != undefined
|
||||||
|
c_.ld = c.li if c.li != undefined
|
||||||
|
c_.li = c.ld if c.ld != undefined
|
||||||
|
c_.na = -c.na if c.na != undefined
|
||||||
|
if c.lm != undefined
|
||||||
|
c_.lm = c.p[c.p.length-1]
|
||||||
|
c_.p = c.p[0...c.p.length - 1].concat([c.lm])
|
||||||
|
c_
|
||||||
|
|
||||||
|
json.invert = (op) -> json.invertComponent c for c in op.slice().reverse()
|
||||||
|
|
||||||
|
json.checkValidOp = (op) ->
|
||||||
|
|
||||||
|
isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]'
|
||||||
|
json.checkList = (elem) ->
|
||||||
|
throw new Error 'Referenced element not a list' unless isArray(elem)
|
||||||
|
|
||||||
|
json.checkObj = (elem) ->
|
||||||
|
throw new Error "Referenced element not an object (it was #{JSON.stringify elem})" unless elem.constructor is Object
|
||||||
|
|
||||||
|
json.apply = (snapshot, op) ->
|
||||||
|
json.checkValidOp op
|
||||||
|
op = clone op
|
||||||
|
|
||||||
|
container = {data: clone snapshot}
|
||||||
|
|
||||||
|
try
|
||||||
|
for c, i in op
|
||||||
|
parent = null
|
||||||
|
parentkey = null
|
||||||
|
elem = container
|
||||||
|
key = 'data'
|
||||||
|
|
||||||
|
for p in c.p
|
||||||
|
parent = elem
|
||||||
|
parentkey = key
|
||||||
|
elem = elem[key]
|
||||||
|
key = p
|
||||||
|
|
||||||
|
throw new Error 'Path invalid' unless parent?
|
||||||
|
|
||||||
|
if c.na != undefined
|
||||||
|
# Number add
|
||||||
|
throw new Error 'Referenced element not a number' unless typeof elem[key] is 'number'
|
||||||
|
elem[key] += c.na
|
||||||
|
|
||||||
|
else if c.si != undefined
|
||||||
|
# String insert
|
||||||
|
throw new Error "Referenced element not a string (it was #{JSON.stringify elem})" unless typeof elem is 'string'
|
||||||
|
parent[parentkey] = elem[...key] + c.si + elem[key..]
|
||||||
|
else if c.sd != undefined
|
||||||
|
# String delete
|
||||||
|
throw new Error 'Referenced element not a string' unless typeof elem is 'string'
|
||||||
|
throw new Error 'Deleted string does not match' unless elem[key...key + c.sd.length] == c.sd
|
||||||
|
parent[parentkey] = elem[...key] + elem[key + c.sd.length..]
|
||||||
|
|
||||||
|
else if c.li != undefined && c.ld != undefined
|
||||||
|
# List replace
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
# Should check the list element matches c.ld
|
||||||
|
elem[key] = c.li
|
||||||
|
else if c.li != undefined
|
||||||
|
# List insert
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
elem.splice key, 0, c.li
|
||||||
|
else if c.ld != undefined
|
||||||
|
# List delete
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
# Should check the list element matches c.ld here too.
|
||||||
|
elem.splice key, 1
|
||||||
|
else if c.lm != undefined
|
||||||
|
# List move
|
||||||
|
json.checkList elem
|
||||||
|
if c.lm != key
|
||||||
|
e = elem[key]
|
||||||
|
# Remove it...
|
||||||
|
elem.splice key, 1
|
||||||
|
# And insert it back.
|
||||||
|
elem.splice c.lm, 0, e
|
||||||
|
|
||||||
|
else if c.oi != undefined
|
||||||
|
# Object insert / replace
|
||||||
|
json.checkObj elem
|
||||||
|
|
||||||
|
# Should check that elem[key] == c.od
|
||||||
|
elem[key] = c.oi
|
||||||
|
else if c.od != undefined
|
||||||
|
# Object delete
|
||||||
|
json.checkObj elem
|
||||||
|
|
||||||
|
# Should check that elem[key] == c.od
|
||||||
|
delete elem[key]
|
||||||
|
else
|
||||||
|
throw new Error 'invalid / missing instruction in op'
|
||||||
|
catch error
|
||||||
|
# TODO: Roll back all already applied changes. Write tests before implementing this code.
|
||||||
|
throw error
|
||||||
|
|
||||||
|
container.data
|
||||||
|
|
||||||
|
# Checks if two paths, p1 and p2 match.
|
||||||
|
json.pathMatches = (p1, p2, ignoreLast) ->
|
||||||
|
return false unless p1.length == p2.length
|
||||||
|
|
||||||
|
for p, i in p1
|
||||||
|
return false if p != p2[i] and (!ignoreLast or i != p1.length - 1)
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
json.append = (dest, c) ->
|
||||||
|
c = clone c
|
||||||
|
if dest.length != 0 and json.pathMatches c.p, (last = dest[dest.length - 1]).p
|
||||||
|
if last.na != undefined and c.na != undefined
|
||||||
|
dest[dest.length - 1] = { p: last.p, na: last.na + c.na }
|
||||||
|
else if last.li != undefined and c.li == undefined and c.ld == last.li
|
||||||
|
# insert immediately followed by delete becomes a noop.
|
||||||
|
if last.ld != undefined
|
||||||
|
# leave the delete part of the replace
|
||||||
|
delete last.li
|
||||||
|
else
|
||||||
|
dest.pop()
|
||||||
|
else if last.od != undefined and last.oi == undefined and
|
||||||
|
c.oi != undefined and c.od == undefined
|
||||||
|
last.oi = c.oi
|
||||||
|
else if c.lm != undefined and c.p[c.p.length-1] == c.lm
|
||||||
|
null # don't do anything
|
||||||
|
else
|
||||||
|
dest.push c
|
||||||
|
else
|
||||||
|
dest.push c
|
||||||
|
|
||||||
|
json.compose = (op1, op2) ->
|
||||||
|
json.checkValidOp op1
|
||||||
|
json.checkValidOp op2
|
||||||
|
|
||||||
|
newOp = clone op1
|
||||||
|
json.append newOp, c for c in op2
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
json.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
op = [op] unless isArray op
|
||||||
|
|
||||||
|
for c in op
|
||||||
|
c.p ?= []
|
||||||
|
json.append newOp, c
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming
|
||||||
|
# we have browser support for JSON.
|
||||||
|
# http://jsperf.com/cloning-an-object/12
|
||||||
|
clone = (o) -> JSON.parse(JSON.stringify o)
|
||||||
|
|
||||||
|
json.commonPath = (p1, p2) ->
|
||||||
|
p1 = p1.slice()
|
||||||
|
p2 = p2.slice()
|
||||||
|
p1.unshift('data')
|
||||||
|
p2.unshift('data')
|
||||||
|
p1 = p1[...p1.length-1]
|
||||||
|
p2 = p2[...p2.length-1]
|
||||||
|
return -1 if p2.length == 0
|
||||||
|
i = 0
|
||||||
|
while p1[i] == p2[i] && i < p1.length
|
||||||
|
i++
|
||||||
|
if i == p2.length
|
||||||
|
return i-1
|
||||||
|
return
|
||||||
|
|
||||||
|
# transform c so it applies to a document with otherC applied.
|
||||||
|
json.transformComponent = (dest, c, otherC, type) ->
|
||||||
|
c = clone c
|
||||||
|
c.p.push(0) if c.na != undefined
|
||||||
|
otherC.p.push(0) if otherC.na != undefined
|
||||||
|
|
||||||
|
common = json.commonPath c.p, otherC.p
|
||||||
|
common2 = json.commonPath otherC.p, c.p
|
||||||
|
|
||||||
|
cplength = c.p.length
|
||||||
|
otherCplength = otherC.p.length
|
||||||
|
|
||||||
|
c.p.pop() if c.na != undefined # hax
|
||||||
|
otherC.p.pop() if otherC.na != undefined
|
||||||
|
|
||||||
|
if otherC.na
|
||||||
|
if common2? && otherCplength >= cplength && otherC.p[common2] == c.p[common2]
|
||||||
|
if c.ld != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.ld = json.apply clone(c.ld), [oc]
|
||||||
|
else if c.od != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.od = json.apply clone(c.od), [oc]
|
||||||
|
json.append dest, c
|
||||||
|
return dest
|
||||||
|
|
||||||
|
if common2? && otherCplength > cplength && c.p[common2] == otherC.p[common2]
|
||||||
|
# transform based on c
|
||||||
|
if c.ld != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.ld = json.apply clone(c.ld), [oc]
|
||||||
|
else if c.od != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.od = json.apply clone(c.od), [oc]
|
||||||
|
|
||||||
|
|
||||||
|
if common?
|
||||||
|
commonOperand = cplength == otherCplength
|
||||||
|
# transform based on otherC
|
||||||
|
if otherC.na != undefined
|
||||||
|
# this case is handled above due to icky path hax
|
||||||
|
else if otherC.si != undefined || otherC.sd != undefined
|
||||||
|
# String op vs string op - pass through to text type
|
||||||
|
if c.si != undefined || c.sd != undefined
|
||||||
|
throw new Error("must be a string?") unless commonOperand
|
||||||
|
|
||||||
|
# Convert an op component to a text op component
|
||||||
|
convert = (component) ->
|
||||||
|
newC = p:component.p[component.p.length - 1]
|
||||||
|
if component.si
|
||||||
|
newC.i = component.si
|
||||||
|
else
|
||||||
|
newC.d = component.sd
|
||||||
|
newC
|
||||||
|
|
||||||
|
tc1 = convert c
|
||||||
|
tc2 = convert otherC
|
||||||
|
|
||||||
|
res = []
|
||||||
|
text._tc res, tc1, tc2, type
|
||||||
|
for tc in res
|
||||||
|
jc = { p: c.p[...common] }
|
||||||
|
jc.p.push(tc.p)
|
||||||
|
jc.si = tc.i if tc.i?
|
||||||
|
jc.sd = tc.d if tc.d?
|
||||||
|
json.append dest, jc
|
||||||
|
return dest
|
||||||
|
else if otherC.li != undefined && otherC.ld != undefined
|
||||||
|
if otherC.p[common] == c.p[common]
|
||||||
|
# noop
|
||||||
|
if !commonOperand
|
||||||
|
# we're below the deleted element, so -> noop
|
||||||
|
return dest
|
||||||
|
else if c.ld != undefined
|
||||||
|
# we're trying to delete the same element, -> noop
|
||||||
|
if c.li != undefined and type == 'left'
|
||||||
|
# we're both replacing one element with another. only one can
|
||||||
|
# survive!
|
||||||
|
c.ld = clone otherC.li
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else if otherC.li != undefined
|
||||||
|
if c.li != undefined and c.ld == undefined and commonOperand and c.p[common] == otherC.p[common]
|
||||||
|
# in li vs. li, left wins.
|
||||||
|
if type == 'right'
|
||||||
|
c.p[common]++
|
||||||
|
else if otherC.p[common] <= c.p[common]
|
||||||
|
c.p[common]++
|
||||||
|
|
||||||
|
if c.lm != undefined
|
||||||
|
if commonOperand
|
||||||
|
# otherC edits the same list we edit
|
||||||
|
if otherC.p[common] <= c.lm
|
||||||
|
c.lm++
|
||||||
|
# changing c.from is handled above.
|
||||||
|
else if otherC.ld != undefined
|
||||||
|
if c.lm != undefined
|
||||||
|
if commonOperand
|
||||||
|
if otherC.p[common] == c.p[common]
|
||||||
|
# they deleted the thing we're trying to move
|
||||||
|
return dest
|
||||||
|
# otherC edits the same list we edit
|
||||||
|
p = otherC.p[common]
|
||||||
|
from = c.p[common]
|
||||||
|
to = c.lm
|
||||||
|
if p < to || (p == to && from < to)
|
||||||
|
c.lm--
|
||||||
|
|
||||||
|
if otherC.p[common] < c.p[common]
|
||||||
|
c.p[common]--
|
||||||
|
else if otherC.p[common] == c.p[common]
|
||||||
|
if otherCplength < cplength
|
||||||
|
# we're below the deleted element, so -> noop
|
||||||
|
return dest
|
||||||
|
else if c.ld != undefined
|
||||||
|
if c.li != undefined
|
||||||
|
# we're replacing, they're deleting. we become an insert.
|
||||||
|
delete c.ld
|
||||||
|
else
|
||||||
|
# we're trying to delete the same element, -> noop
|
||||||
|
return dest
|
||||||
|
else if otherC.lm != undefined
|
||||||
|
if c.lm != undefined and cplength == otherCplength
|
||||||
|
# lm vs lm, here we go!
|
||||||
|
from = c.p[common]
|
||||||
|
to = c.lm
|
||||||
|
otherFrom = otherC.p[common]
|
||||||
|
otherTo = otherC.lm
|
||||||
|
if otherFrom != otherTo
|
||||||
|
# if otherFrom == otherTo, we don't need to change our op.
|
||||||
|
|
||||||
|
# where did my thing go?
|
||||||
|
if from == otherFrom
|
||||||
|
# they moved it! tie break.
|
||||||
|
if type == 'left'
|
||||||
|
c.p[common] = otherTo
|
||||||
|
if from == to # ugh
|
||||||
|
c.lm = otherTo
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else
|
||||||
|
# they moved around it
|
||||||
|
if from > otherFrom
|
||||||
|
c.p[common]--
|
||||||
|
if from > otherTo
|
||||||
|
c.p[common]++
|
||||||
|
else if from == otherTo
|
||||||
|
if otherFrom > otherTo
|
||||||
|
c.p[common]++
|
||||||
|
if from == to # ugh, again
|
||||||
|
c.lm++
|
||||||
|
|
||||||
|
# step 2: where am i going to put it?
|
||||||
|
if to > otherFrom
|
||||||
|
c.lm--
|
||||||
|
else if to == otherFrom
|
||||||
|
if to > from
|
||||||
|
c.lm--
|
||||||
|
if to > otherTo
|
||||||
|
c.lm++
|
||||||
|
else if to == otherTo
|
||||||
|
# if we're both moving in the same direction, tie break
|
||||||
|
if (otherTo > otherFrom and to > from) or
|
||||||
|
(otherTo < otherFrom and to < from)
|
||||||
|
if type == 'right'
|
||||||
|
c.lm++
|
||||||
|
else
|
||||||
|
if to > from
|
||||||
|
c.lm++
|
||||||
|
else if to == otherFrom
|
||||||
|
c.lm--
|
||||||
|
else if c.li != undefined and c.ld == undefined and commonOperand
|
||||||
|
# li
|
||||||
|
from = otherC.p[common]
|
||||||
|
to = otherC.lm
|
||||||
|
p = c.p[common]
|
||||||
|
if p > from
|
||||||
|
c.p[common]--
|
||||||
|
if p > to
|
||||||
|
c.p[common]++
|
||||||
|
else
|
||||||
|
# ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath
|
||||||
|
# the lm
|
||||||
|
#
|
||||||
|
# i.e. things care about where their item is after the move.
|
||||||
|
from = otherC.p[common]
|
||||||
|
to = otherC.lm
|
||||||
|
p = c.p[common]
|
||||||
|
if p == from
|
||||||
|
c.p[common] = to
|
||||||
|
else
|
||||||
|
if p > from
|
||||||
|
c.p[common]--
|
||||||
|
if p > to
|
||||||
|
c.p[common]++
|
||||||
|
else if p == to
|
||||||
|
if from > to
|
||||||
|
c.p[common]++
|
||||||
|
else if otherC.oi != undefined && otherC.od != undefined
|
||||||
|
if c.p[common] == otherC.p[common]
|
||||||
|
if c.oi != undefined and commonOperand
|
||||||
|
# we inserted where someone else replaced
|
||||||
|
if type == 'right'
|
||||||
|
# left wins
|
||||||
|
return dest
|
||||||
|
else
|
||||||
|
# we win, make our op replace what they inserted
|
||||||
|
c.od = otherC.oi
|
||||||
|
else
|
||||||
|
# -> noop if the other component is deleting the same object (or any
|
||||||
|
# parent)
|
||||||
|
return dest
|
||||||
|
else if otherC.oi != undefined
|
||||||
|
if c.oi != undefined and c.p[common] == otherC.p[common]
|
||||||
|
# left wins if we try to insert at the same place
|
||||||
|
if type == 'left'
|
||||||
|
json.append dest, {p:c.p, od:otherC.oi}
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else if otherC.od != undefined
|
||||||
|
if c.p[common] == otherC.p[common]
|
||||||
|
return dest if !commonOperand
|
||||||
|
if c.oi != undefined
|
||||||
|
delete c.od
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
|
||||||
|
json.append dest, c
|
||||||
|
return dest
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types ||= {}
|
||||||
|
|
||||||
|
# This is kind of awful - come up with a better way to hook this helper code up.
|
||||||
|
exports._bt(json, json.transformComponent, json.checkValidOp, json.append)
|
||||||
|
|
||||||
|
# [] is used to prevent closure from renaming types.text
|
||||||
|
exports.types.json = json
|
||||||
|
else
|
||||||
|
module.exports = json
|
||||||
|
|
||||||
|
require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append)
|
||||||
|
|
603
services/document-updater/app/coffee/sharejs/model.coffee
Normal file
603
services/document-updater/app/coffee/sharejs/model.coffee
Normal file
|
@ -0,0 +1,603 @@
|
||||||
|
# The model of all the ops. Responsible for applying & transforming remote deltas
|
||||||
|
# and managing the storage layer.
|
||||||
|
#
|
||||||
|
# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||||
|
|
||||||
|
{EventEmitter} = require 'events'
|
||||||
|
|
||||||
|
queue = require './syncqueue'
|
||||||
|
types = require '../types'
|
||||||
|
|
||||||
|
isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]'
|
||||||
|
|
||||||
|
# This constructor creates a new Model object. There will be one model object
|
||||||
|
# per server context.
|
||||||
|
#
|
||||||
|
# The model object is responsible for a lot of things:
|
||||||
|
#
|
||||||
|
# - It manages the interactions with the database
|
||||||
|
# - It maintains (in memory) a set of all active documents
|
||||||
|
# - It calls out to the OT functions when necessary
|
||||||
|
#
|
||||||
|
# The model is an event emitter. It emits the following events:
|
||||||
|
#
|
||||||
|
# create(docName, data): A document has been created with the specified name & data
|
||||||
|
module.exports = Model = (db, options) ->
|
||||||
|
# db can be null if the user doesn't want persistance.
|
||||||
|
|
||||||
|
return new Model(db, options) if !(this instanceof Model)
|
||||||
|
|
||||||
|
model = this
|
||||||
|
|
||||||
|
options ?= {}
|
||||||
|
|
||||||
|
# This is a cache of 'live' documents.
|
||||||
|
#
|
||||||
|
# The cache is a map from docName -> {
|
||||||
|
# ops:[{op, meta}]
|
||||||
|
# snapshot
|
||||||
|
# type
|
||||||
|
# v
|
||||||
|
# meta
|
||||||
|
# eventEmitter
|
||||||
|
# reapTimer
|
||||||
|
# committedVersion: v
|
||||||
|
# snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||||
|
# dbMeta: database specific data
|
||||||
|
# opQueue: syncQueue for processing ops
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||||
|
# of them if we're using a memory store).
|
||||||
|
#
|
||||||
|
# Documents are stored in this set so long as the document has been accessed in
|
||||||
|
# the last few seconds (options.reapTime) OR at least one client has the document
|
||||||
|
# open. I don't know if I should keep open (but not being edited) documents live -
|
||||||
|
# maybe if a client has a document open but the document isn't being edited, I should
|
||||||
|
# flush it from the cache.
|
||||||
|
#
|
||||||
|
# In any case, the API to model is designed such that if we want to change that later
|
||||||
|
# it should be pretty easy to do so without any external-to-the-model code changes.
|
||||||
|
docs = {}
|
||||||
|
|
||||||
|
# This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||||
|
# cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||||
|
# are added to the callback list and called when db.getSnapshot() returns.
|
||||||
|
#
|
||||||
|
# callback(error, snapshot data)
|
||||||
|
awaitingGetSnapshot = {}
|
||||||
|
|
||||||
|
# The time that documents which no clients have open will stay in the cache.
|
||||||
|
# Should be > 0.
|
||||||
|
options.reapTime ?= 3000
|
||||||
|
|
||||||
|
# The number of operations the cache holds before reusing the space
|
||||||
|
options.numCachedOps ?= 10
|
||||||
|
|
||||||
|
# This option forces documents to be reaped, even when there's no database backend.
|
||||||
|
# This is useful when you don't care about persistance and don't want to gradually
|
||||||
|
# fill memory.
|
||||||
|
#
|
||||||
|
# You might want to set reapTime to a day or something.
|
||||||
|
options.forceReaping ?= false
|
||||||
|
|
||||||
|
# Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||||
|
# to the database every ~20 submitted ops.
|
||||||
|
options.opsBeforeCommit ?= 20
|
||||||
|
|
||||||
|
# It takes some processing time to transform client ops. The server will punt ops back to the
|
||||||
|
# client to transform if they're too old.
|
||||||
|
options.maximumAge ?= 40
|
||||||
|
|
||||||
|
# **** Cache API methods
|
||||||
|
|
||||||
|
# Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||||
|
# for a single document. This contains the logic for transforming & applying ops.
|
||||||
|
makeOpQueue = (docName, doc) -> queue (opData, callback) ->
|
||||||
|
return callback 'Version missing' unless opData.v >= 0
|
||||||
|
return callback 'Op at future version' if opData.v > doc.v
|
||||||
|
|
||||||
|
# Punt the transforming work back to the client if the op is too old.
|
||||||
|
return callback 'Op too old' if opData.v + options.maximumAge < doc.v
|
||||||
|
|
||||||
|
opData.meta ||= {}
|
||||||
|
opData.meta.ts = Date.now()
|
||||||
|
|
||||||
|
# We'll need to transform the op to the current version of the document. This
|
||||||
|
# calls the callback immediately if opVersion == doc.v.
|
||||||
|
getOps docName, opData.v, doc.v, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
unless doc.v - opData.v == ops.length
|
||||||
|
# This should never happen. It indicates that we didn't get all the ops we
|
||||||
|
# asked for. Its important that the submitted op is correctly transformed.
|
||||||
|
console.error "Could not get old ops in model for document #{docName}"
|
||||||
|
console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
try
|
||||||
|
# If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||||
|
for oldOp in ops
|
||||||
|
# Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||||
|
# If the id matches, we reject it. The client can also detect the op has been submitted
|
||||||
|
# already if it sees its own previous id in the ops it sees when it does catchup.
|
||||||
|
if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource
|
||||||
|
return callback 'Op already submitted'
|
||||||
|
|
||||||
|
opData.op = doc.type.transform opData.op, oldOp.op, 'left'
|
||||||
|
opData.v++
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
try
|
||||||
|
snapshot = doc.type.apply doc.snapshot, opData.op
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
# The op data should be at the current version, and the new document data should be at
|
||||||
|
# the next version.
|
||||||
|
#
|
||||||
|
# This should never happen in practice, but its a nice little check to make sure everything
|
||||||
|
# is hunky-dory.
|
||||||
|
unless opData.v == doc.v
|
||||||
|
# This should never happen.
|
||||||
|
console.error "Version mismatch detected in model. File a ticket - this is a bug."
|
||||||
|
console.error "Expecting #{opData.v} == #{doc.v}"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
#newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||||
|
writeOp = db?.writeOp or (docName, newOpData, callback) -> callback()
|
||||||
|
|
||||||
|
writeOp docName, opData, (error) ->
|
||||||
|
if error
|
||||||
|
# The user should probably know about this.
|
||||||
|
console.warn "Error writing ops to database: #{error}"
|
||||||
|
return callback error
|
||||||
|
|
||||||
|
options.stats?.writeOp?()
|
||||||
|
|
||||||
|
# This is needed when we emit the 'change' event, below.
|
||||||
|
oldSnapshot = doc.snapshot
|
||||||
|
|
||||||
|
# All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||||
|
# and (maybe!) save a new document snapshot to the database.
|
||||||
|
|
||||||
|
doc.v = opData.v + 1
|
||||||
|
doc.snapshot = snapshot
|
||||||
|
|
||||||
|
doc.ops.push opData
|
||||||
|
doc.ops.shift() if db and doc.ops.length > options.numCachedOps
|
||||||
|
|
||||||
|
model.emit 'applyOp', docName, opData, snapshot, oldSnapshot
|
||||||
|
doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot
|
||||||
|
|
||||||
|
# The callback is called with the version of the document at which the op was applied.
|
||||||
|
# This is the op.v after transformation, and its doc.v - 1.
|
||||||
|
callback null, opData.v
|
||||||
|
|
||||||
|
# I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||||
|
#
|
||||||
|
# The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||||
|
# is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||||
|
# ops or something. (Configurable with doc.committedVersion)
|
||||||
|
if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||||
|
tryWriteSnapshot docName, (error) ->
|
||||||
|
console.warn "Error writing snapshot #{error}. This is nonfatal" if error
|
||||||
|
|
||||||
|
# Add the data for the given docName to the cache. The named document shouldn't already
|
||||||
|
# exist in the doc set.
|
||||||
|
#
|
||||||
|
# Returns the new doc.
|
||||||
|
add = (docName, error, data, committedVersion, ops, dbMeta) ->
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
delete awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
if error
|
||||||
|
callback error for callback in callbacks if callbacks
|
||||||
|
else
|
||||||
|
doc = docs[docName] =
|
||||||
|
snapshot: data.snapshot
|
||||||
|
v: data.v
|
||||||
|
type: data.type
|
||||||
|
meta: data.meta
|
||||||
|
|
||||||
|
# Cache of ops
|
||||||
|
ops: ops or []
|
||||||
|
|
||||||
|
eventEmitter: new EventEmitter
|
||||||
|
|
||||||
|
# Timer before the document will be invalidated from the cache (if the document has no
|
||||||
|
# listeners)
|
||||||
|
reapTimer: null
|
||||||
|
|
||||||
|
# Version of the snapshot thats in the database
|
||||||
|
committedVersion: committedVersion ? data.v
|
||||||
|
snapshotWriteLock: false
|
||||||
|
dbMeta: dbMeta
|
||||||
|
|
||||||
|
doc.opQueue = makeOpQueue docName, doc
|
||||||
|
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
model.emit 'add', docName, data
|
||||||
|
callback null, doc for callback in callbacks if callbacks
|
||||||
|
|
||||||
|
doc
|
||||||
|
|
||||||
|
# This is a little helper wrapper around db.getOps. It does two things:
|
||||||
|
#
|
||||||
|
# - If there's no database set, it returns an error to the callback
|
||||||
|
# - It adds version numbers to each op returned from the database
|
||||||
|
# (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||||
|
getOpsInternal = (docName, start, end, callback) ->
|
||||||
|
return callback? 'Document does not exist' unless db
|
||||||
|
|
||||||
|
db.getOps docName, start, end, (error, ops) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
v = start
|
||||||
|
op.v = v++ for op in ops
|
||||||
|
|
||||||
|
callback? null, ops
|
||||||
|
|
||||||
|
# Load the named document into the cache. This function is re-entrant.
|
||||||
|
#
|
||||||
|
# The callback is called with (error, doc)
|
||||||
|
load = (docName, callback) ->
|
||||||
|
if docs[docName]
|
||||||
|
# The document is already loaded. Return immediately.
|
||||||
|
options.stats?.cacheHit? 'getSnapshot'
|
||||||
|
return callback null, docs[docName]
|
||||||
|
|
||||||
|
# We're a memory store. If we don't have it, nobody does.
|
||||||
|
return callback 'Document does not exist' unless db
|
||||||
|
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
# The document is being loaded already. Add ourselves as a callback.
|
||||||
|
return callbacks.push callback if callbacks
|
||||||
|
|
||||||
|
options.stats?.cacheMiss? 'getSnapshot'
|
||||||
|
|
||||||
|
# The document isn't loaded and isn't being loaded. Load it.
|
||||||
|
awaitingGetSnapshot[docName] = [callback]
|
||||||
|
db.getSnapshot docName, (error, data, dbMeta) ->
|
||||||
|
return add docName, error if error
|
||||||
|
|
||||||
|
type = types[data.type]
|
||||||
|
unless type
|
||||||
|
console.warn "Type '#{data.type}' missing"
|
||||||
|
return callback "Type not found"
|
||||||
|
data.type = type
|
||||||
|
|
||||||
|
committedVersion = data.v
|
||||||
|
|
||||||
|
# The server can close without saving the most recent document snapshot.
|
||||||
|
# In this case, there are extra ops which need to be applied before
|
||||||
|
# returning the snapshot.
|
||||||
|
getOpsInternal docName, data.v, null, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}"
|
||||||
|
|
||||||
|
try
|
||||||
|
for op in ops
|
||||||
|
data.snapshot = type.apply data.snapshot, op.op
|
||||||
|
data.v++
|
||||||
|
catch e
|
||||||
|
# This should never happen - it indicates that whats in the
|
||||||
|
# database is invalid.
|
||||||
|
console.error "Op data invalid for #{docName}: #{e.stack}"
|
||||||
|
return callback 'Op data invalid'
|
||||||
|
|
||||||
|
model.emit 'load', docName, data
|
||||||
|
add docName, error, data, committedVersion, ops, dbMeta
|
||||||
|
|
||||||
|
# This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||||
|
# a document, it is loaded from the database and stored.
|
||||||
|
#
|
||||||
|
# Documents are stored so long as either:
|
||||||
|
# - They have been accessed within the past #{PERIOD}
|
||||||
|
# - At least one client has the document open
|
||||||
|
refreshReapingTimeout = (docName) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
return unless doc
|
||||||
|
|
||||||
|
# I want to let the clients list be updated before this is called.
|
||||||
|
process.nextTick ->
|
||||||
|
# This is an awkward way to find out the number of clients on a document. If this
|
||||||
|
# causes performance issues, add a numClients field to the document.
|
||||||
|
#
|
||||||
|
# The first check is because its possible that between refreshReapingTimeout being called and this
|
||||||
|
# event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||||
|
if doc == docs[docName] and
|
||||||
|
doc.eventEmitter.listeners('op').length == 0 and
|
||||||
|
(db or options.forceReaping) and
|
||||||
|
doc.opQueue.busy is false
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
doc.reapTimer = reapTimer = setTimeout ->
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
# If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||||
|
# in the middle of applying an operation, don't reap.
|
||||||
|
delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false
|
||||||
|
, options.reapTime
|
||||||
|
|
||||||
|
tryWriteSnapshot = (docName, callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
# The doc is closed
|
||||||
|
return callback?() unless doc
|
||||||
|
|
||||||
|
# The document is already saved.
|
||||||
|
return callback?() if doc.committedVersion is doc.v
|
||||||
|
|
||||||
|
return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock
|
||||||
|
|
||||||
|
doc.snapshotWriteLock = true
|
||||||
|
|
||||||
|
options.stats?.writeSnapshot?()
|
||||||
|
|
||||||
|
writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback()
|
||||||
|
|
||||||
|
data =
|
||||||
|
v: doc.v
|
||||||
|
meta: doc.meta
|
||||||
|
snapshot: doc.snapshot
|
||||||
|
# The database doesn't know about object types.
|
||||||
|
type: doc.type.name
|
||||||
|
|
||||||
|
# Commit snapshot.
|
||||||
|
writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) ->
|
||||||
|
doc.snapshotWriteLock = false
|
||||||
|
|
||||||
|
# We have to use data.v here because the version in the doc could
|
||||||
|
# have been updated between the call to writeSnapshot() and now.
|
||||||
|
doc.committedVersion = data.v
|
||||||
|
doc.dbMeta = dbMeta
|
||||||
|
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
# *** Model interface methods
|
||||||
|
|
||||||
|
# Create a new document.
|
||||||
|
#
|
||||||
|
# data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||||
|
@create = (docName, type, meta, callback) ->
|
||||||
|
[meta, callback] = [{}, meta] if typeof meta is 'function'
|
||||||
|
|
||||||
|
return callback? 'Invalid document name' if docName.match /\//
|
||||||
|
return callback? 'Document already exists' if docs[docName]
|
||||||
|
|
||||||
|
type = types[type] if typeof type == 'string'
|
||||||
|
return callback? 'Type not found' unless type
|
||||||
|
|
||||||
|
data =
|
||||||
|
snapshot:type.create()
|
||||||
|
type:type.name
|
||||||
|
meta:meta or {}
|
||||||
|
v:0
|
||||||
|
|
||||||
|
done = (error, dbMeta) ->
|
||||||
|
# dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
# From here on we'll store the object version of the type name.
|
||||||
|
data.type = type
|
||||||
|
add docName, null, data, 0, [], dbMeta
|
||||||
|
model.emit 'create', docName, data
|
||||||
|
callback?()
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.create docName, data, done
|
||||||
|
else
|
||||||
|
done()
|
||||||
|
|
||||||
|
# Perminantly deletes the specified document.
|
||||||
|
# If listeners are attached, they are removed.
|
||||||
|
#
|
||||||
|
# The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||||
|
# document was deleted.
|
||||||
|
#
|
||||||
|
# WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||||
|
# deletion. Subsequent op submissions will fail).
|
||||||
|
@delete = (docName, callback) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
if doc
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
delete docs[docName]
|
||||||
|
|
||||||
|
done = (error) ->
|
||||||
|
model.emit 'delete', docName unless error
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.delete docName, doc?.dbMeta, done
|
||||||
|
else
|
||||||
|
done (if !doc then 'Document does not exist')
|
||||||
|
|
||||||
|
# This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||||
|
#
|
||||||
|
# end can be null. This means 'get me all ops from start'.
|
||||||
|
#
|
||||||
|
# Each op returned is in the form {op:o, meta:m, v:version}.
|
||||||
|
#
|
||||||
|
# Callback is called with (error, [ops])
|
||||||
|
#
|
||||||
|
# If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||||
|
# its awkward to figure out whether or not the document exists for things
|
||||||
|
# like the redis database backend. I guess its a bit gross having this inconsistant
|
||||||
|
# with the other DB calls, but its certainly convenient.
|
||||||
|
#
|
||||||
|
# Use getVersion() to determine if a document actually exists, if thats what you're
|
||||||
|
# after.
|
||||||
|
@getOps = getOps = (docName, start, end, callback) ->
|
||||||
|
# getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||||
|
throw new Error 'start must be 0+' unless start >= 0
|
||||||
|
|
||||||
|
[end, callback] = [null, end] if typeof end is 'function'
|
||||||
|
|
||||||
|
ops = docs[docName]?.ops
|
||||||
|
|
||||||
|
if ops
|
||||||
|
version = docs[docName].v
|
||||||
|
|
||||||
|
# Ops contains an array of ops. The last op in the list is the last op applied
|
||||||
|
end ?= version
|
||||||
|
start = Math.min start, end
|
||||||
|
|
||||||
|
return callback null, [] if start == end
|
||||||
|
|
||||||
|
# Base is the version number of the oldest op we have cached
|
||||||
|
base = version - ops.length
|
||||||
|
|
||||||
|
# If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||||
|
if start >= base or db is null
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
options.stats?.cacheHit 'getOps'
|
||||||
|
|
||||||
|
return callback null, ops[(start - base)...(end - base)]
|
||||||
|
|
||||||
|
options.stats?.cacheMiss 'getOps'
|
||||||
|
|
||||||
|
getOpsInternal docName, start, end, callback
|
||||||
|
|
||||||
|
# Gets the snapshot data for the specified document.
|
||||||
|
# getSnapshot(docName, callback)
|
||||||
|
# Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||||
|
@getSnapshot = (docName, callback) ->
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta}
|
||||||
|
|
||||||
|
# Gets the latest version # of the document.
|
||||||
|
# getVersion(docName, callback)
|
||||||
|
# callback is called with (error, version).
|
||||||
|
@getVersion = (docName, callback) ->
|
||||||
|
load docName, (error, doc) -> callback error, doc?.v
|
||||||
|
|
||||||
|
# Apply an op to the specified document.
|
||||||
|
# The callback is passed (error, applied version #)
|
||||||
|
# opData = {op:op, v:v, meta:metadata}
|
||||||
|
#
|
||||||
|
# Ops are queued before being applied so that the following code applies op C before op B:
|
||||||
|
# model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||||
|
# model.applyOp 'doc', OPC
|
||||||
|
@applyOp = (docName, opData, callback) ->
|
||||||
|
# All the logic for this is in makeOpQueue, above.
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
process.nextTick -> doc.opQueue opData, (error, newVersion) ->
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
callback? error, newVersion
|
||||||
|
|
||||||
|
# TODO: store (some) metadata in DB
|
||||||
|
# TODO: op and meta should be combineable in the op that gets sent
|
||||||
|
@applyMetaOp = (docName, metaOpData, callback) ->
|
||||||
|
{path, value} = metaOpData.meta
|
||||||
|
|
||||||
|
return callback? "path should be an array" unless isArray path
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
if error?
|
||||||
|
callback? error
|
||||||
|
else
|
||||||
|
applied = false
|
||||||
|
switch path[0]
|
||||||
|
when 'shout'
|
||||||
|
doc.eventEmitter.emit 'op', metaOpData
|
||||||
|
applied = true
|
||||||
|
|
||||||
|
model.emit 'applyMetaOp', docName, path, value if applied
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Listen to all ops from the specified version. If version is in the past, all
|
||||||
|
# ops since that version are sent immediately to the listener.
|
||||||
|
#
|
||||||
|
# The callback is called once the listener is attached, but before any ops have been passed
|
||||||
|
# to the listener.
|
||||||
|
#
|
||||||
|
# This will _not_ edit the document metadata.
|
||||||
|
#
|
||||||
|
# If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||||
|
# might change in a future version.
|
||||||
|
#
|
||||||
|
# version is the document version at which the document is opened. It can be left out if you want to open
|
||||||
|
# the document at the most recent version.
|
||||||
|
#
|
||||||
|
# listener is called with (opData) each time an op is applied.
|
||||||
|
#
|
||||||
|
# callback(error, openedVersion)
|
||||||
|
@listen = (docName, version, listener, callback) ->
|
||||||
|
[version, listener, callback] = [null, version, listener] if typeof version is 'function'
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
|
||||||
|
if version?
|
||||||
|
getOps docName, version, null, (error, data) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, version
|
||||||
|
for op in data
|
||||||
|
listener op
|
||||||
|
|
||||||
|
# The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||||
|
# This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||||
|
break unless listener in doc.eventEmitter.listeners 'op'
|
||||||
|
|
||||||
|
else # Version is null / undefined. Just add the listener.
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Remove a listener for a particular document.
|
||||||
|
#
|
||||||
|
# removeListener(docName, listener)
|
||||||
|
#
|
||||||
|
# This is synchronous.
|
||||||
|
@removeListener = (docName, listener) ->
|
||||||
|
# The document should already be loaded.
|
||||||
|
doc = docs[docName]
|
||||||
|
throw new Error 'removeListener called but document not loaded' unless doc
|
||||||
|
|
||||||
|
doc.eventEmitter.removeListener 'op', listener
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
|
||||||
|
# Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||||
|
# sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||||
|
@flush = (callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
pendingWrites = 0
|
||||||
|
|
||||||
|
for docName, doc of docs
|
||||||
|
if doc.committedVersion < doc.v
|
||||||
|
pendingWrites++
|
||||||
|
# I'm hoping writeSnapshot will always happen in another thread.
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
process.nextTick ->
|
||||||
|
pendingWrites--
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# If nothing was queued, terminate immediately.
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||||
|
@closeDb = ->
|
||||||
|
db?.close?()
|
||||||
|
db = null
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# Model inherits from EventEmitter.
|
||||||
|
Model:: = new EventEmitter
|
||||||
|
|
603
services/document-updater/app/coffee/sharejs/server/model.coffee
Normal file
603
services/document-updater/app/coffee/sharejs/server/model.coffee
Normal file
|
@ -0,0 +1,603 @@
|
||||||
|
# The model of all the ops. Responsible for applying & transforming remote deltas
|
||||||
|
# and managing the storage layer.
|
||||||
|
#
|
||||||
|
# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||||
|
|
||||||
|
{EventEmitter} = require 'events'
|
||||||
|
|
||||||
|
queue = require './syncqueue'
|
||||||
|
types = require '../types'
|
||||||
|
|
||||||
|
isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]'
|
||||||
|
|
||||||
|
# This constructor creates a new Model object. There will be one model object
|
||||||
|
# per server context.
|
||||||
|
#
|
||||||
|
# The model object is responsible for a lot of things:
|
||||||
|
#
|
||||||
|
# - It manages the interactions with the database
|
||||||
|
# - It maintains (in memory) a set of all active documents
|
||||||
|
# - It calls out to the OT functions when necessary
|
||||||
|
#
|
||||||
|
# The model is an event emitter. It emits the following events:
|
||||||
|
#
|
||||||
|
# create(docName, data): A document has been created with the specified name & data
|
||||||
|
module.exports = Model = (db, options) ->
|
||||||
|
# db can be null if the user doesn't want persistance.
|
||||||
|
|
||||||
|
return new Model(db, options) if !(this instanceof Model)
|
||||||
|
|
||||||
|
model = this
|
||||||
|
|
||||||
|
options ?= {}
|
||||||
|
|
||||||
|
# This is a cache of 'live' documents.
|
||||||
|
#
|
||||||
|
# The cache is a map from docName -> {
|
||||||
|
# ops:[{op, meta}]
|
||||||
|
# snapshot
|
||||||
|
# type
|
||||||
|
# v
|
||||||
|
# meta
|
||||||
|
# eventEmitter
|
||||||
|
# reapTimer
|
||||||
|
# committedVersion: v
|
||||||
|
# snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||||
|
# dbMeta: database specific data
|
||||||
|
# opQueue: syncQueue for processing ops
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||||
|
# of them if we're using a memory store).
|
||||||
|
#
|
||||||
|
# Documents are stored in this set so long as the document has been accessed in
|
||||||
|
# the last few seconds (options.reapTime) OR at least one client has the document
|
||||||
|
# open. I don't know if I should keep open (but not being edited) documents live -
|
||||||
|
# maybe if a client has a document open but the document isn't being edited, I should
|
||||||
|
# flush it from the cache.
|
||||||
|
#
|
||||||
|
# In any case, the API to model is designed such that if we want to change that later
|
||||||
|
# it should be pretty easy to do so without any external-to-the-model code changes.
|
||||||
|
docs = {}
|
||||||
|
|
||||||
|
# This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||||
|
# cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||||
|
# are added to the callback list and called when db.getSnapshot() returns.
|
||||||
|
#
|
||||||
|
# callback(error, snapshot data)
|
||||||
|
awaitingGetSnapshot = {}
|
||||||
|
|
||||||
|
# The time that documents which no clients have open will stay in the cache.
|
||||||
|
# Should be > 0.
|
||||||
|
options.reapTime ?= 3000
|
||||||
|
|
||||||
|
# The number of operations the cache holds before reusing the space
|
||||||
|
options.numCachedOps ?= 10
|
||||||
|
|
||||||
|
# This option forces documents to be reaped, even when there's no database backend.
|
||||||
|
# This is useful when you don't care about persistance and don't want to gradually
|
||||||
|
# fill memory.
|
||||||
|
#
|
||||||
|
# You might want to set reapTime to a day or something.
|
||||||
|
options.forceReaping ?= false
|
||||||
|
|
||||||
|
# Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||||
|
# to the database every ~20 submitted ops.
|
||||||
|
options.opsBeforeCommit ?= 20
|
||||||
|
|
||||||
|
# It takes some processing time to transform client ops. The server will punt ops back to the
|
||||||
|
# client to transform if they're too old.
|
||||||
|
options.maximumAge ?= 40
|
||||||
|
|
||||||
|
# **** Cache API methods
|
||||||
|
|
||||||
|
# Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||||
|
# for a single document. This contains the logic for transforming & applying ops.
|
||||||
|
makeOpQueue = (docName, doc) -> queue (opData, callback) ->
|
||||||
|
return callback 'Version missing' unless opData.v >= 0
|
||||||
|
return callback 'Op at future version' if opData.v > doc.v
|
||||||
|
|
||||||
|
# Punt the transforming work back to the client if the op is too old.
|
||||||
|
return callback 'Op too old' if opData.v + options.maximumAge < doc.v
|
||||||
|
|
||||||
|
opData.meta ||= {}
|
||||||
|
opData.meta.ts = Date.now()
|
||||||
|
|
||||||
|
# We'll need to transform the op to the current version of the document. This
|
||||||
|
# calls the callback immediately if opVersion == doc.v.
|
||||||
|
getOps docName, opData.v, doc.v, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
unless doc.v - opData.v == ops.length
|
||||||
|
# This should never happen. It indicates that we didn't get all the ops we
|
||||||
|
# asked for. Its important that the submitted op is correctly transformed.
|
||||||
|
console.error "Could not get old ops in model for document #{docName}"
|
||||||
|
console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
try
|
||||||
|
# If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||||
|
for oldOp in ops
|
||||||
|
# Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||||
|
# If the id matches, we reject it. The client can also detect the op has been submitted
|
||||||
|
# already if it sees its own previous id in the ops it sees when it does catchup.
|
||||||
|
if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource
|
||||||
|
return callback 'Op already submitted'
|
||||||
|
|
||||||
|
opData.op = doc.type.transform opData.op, oldOp.op, 'left'
|
||||||
|
opData.v++
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
try
|
||||||
|
snapshot = doc.type.apply doc.snapshot, opData.op
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
# The op data should be at the current version, and the new document data should be at
|
||||||
|
# the next version.
|
||||||
|
#
|
||||||
|
# This should never happen in practice, but its a nice little check to make sure everything
|
||||||
|
# is hunky-dory.
|
||||||
|
unless opData.v == doc.v
|
||||||
|
# This should never happen.
|
||||||
|
console.error "Version mismatch detected in model. File a ticket - this is a bug."
|
||||||
|
console.error "Expecting #{opData.v} == #{doc.v}"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
#newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||||
|
writeOp = db?.writeOp or (docName, newOpData, callback) -> callback()
|
||||||
|
|
||||||
|
writeOp docName, opData, (error) ->
|
||||||
|
if error
|
||||||
|
# The user should probably know about this.
|
||||||
|
console.warn "Error writing ops to database: #{error}"
|
||||||
|
return callback error
|
||||||
|
|
||||||
|
options.stats?.writeOp?()
|
||||||
|
|
||||||
|
# This is needed when we emit the 'change' event, below.
|
||||||
|
oldSnapshot = doc.snapshot
|
||||||
|
|
||||||
|
# All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||||
|
# and (maybe!) save a new document snapshot to the database.
|
||||||
|
|
||||||
|
doc.v = opData.v + 1
|
||||||
|
doc.snapshot = snapshot
|
||||||
|
|
||||||
|
doc.ops.push opData
|
||||||
|
doc.ops.shift() if db and doc.ops.length > options.numCachedOps
|
||||||
|
|
||||||
|
model.emit 'applyOp', docName, opData, snapshot, oldSnapshot
|
||||||
|
doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot
|
||||||
|
|
||||||
|
# The callback is called with the version of the document at which the op was applied.
|
||||||
|
# This is the op.v after transformation, and its doc.v - 1.
|
||||||
|
callback null, opData.v
|
||||||
|
|
||||||
|
# I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||||
|
#
|
||||||
|
# The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||||
|
# is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||||
|
# ops or something. (Configurable with doc.committedVersion)
|
||||||
|
if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||||
|
tryWriteSnapshot docName, (error) ->
|
||||||
|
console.warn "Error writing snapshot #{error}. This is nonfatal" if error
|
||||||
|
|
||||||
|
# Add the data for the given docName to the cache. The named document shouldn't already
|
||||||
|
# exist in the doc set.
|
||||||
|
#
|
||||||
|
# Returns the new doc.
|
||||||
|
add = (docName, error, data, committedVersion, ops, dbMeta) ->
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
delete awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
if error
|
||||||
|
callback error for callback in callbacks if callbacks
|
||||||
|
else
|
||||||
|
doc = docs[docName] =
|
||||||
|
snapshot: data.snapshot
|
||||||
|
v: data.v
|
||||||
|
type: data.type
|
||||||
|
meta: data.meta
|
||||||
|
|
||||||
|
# Cache of ops
|
||||||
|
ops: ops or []
|
||||||
|
|
||||||
|
eventEmitter: new EventEmitter
|
||||||
|
|
||||||
|
# Timer before the document will be invalidated from the cache (if the document has no
|
||||||
|
# listeners)
|
||||||
|
reapTimer: null
|
||||||
|
|
||||||
|
# Version of the snapshot thats in the database
|
||||||
|
committedVersion: committedVersion ? data.v
|
||||||
|
snapshotWriteLock: false
|
||||||
|
dbMeta: dbMeta
|
||||||
|
|
||||||
|
doc.opQueue = makeOpQueue docName, doc
|
||||||
|
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
model.emit 'add', docName, data
|
||||||
|
callback null, doc for callback in callbacks if callbacks
|
||||||
|
|
||||||
|
doc
|
||||||
|
|
||||||
|
# This is a little helper wrapper around db.getOps. It does two things:
|
||||||
|
#
|
||||||
|
# - If there's no database set, it returns an error to the callback
|
||||||
|
# - It adds version numbers to each op returned from the database
|
||||||
|
# (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||||
|
getOpsInternal = (docName, start, end, callback) ->
|
||||||
|
return callback? 'Document does not exist' unless db
|
||||||
|
|
||||||
|
db.getOps docName, start, end, (error, ops) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
v = start
|
||||||
|
op.v = v++ for op in ops
|
||||||
|
|
||||||
|
callback? null, ops
|
||||||
|
|
||||||
|
# Load the named document into the cache. This function is re-entrant.
|
||||||
|
#
|
||||||
|
# The callback is called with (error, doc)
|
||||||
|
load = (docName, callback) ->
|
||||||
|
if docs[docName]
|
||||||
|
# The document is already loaded. Return immediately.
|
||||||
|
options.stats?.cacheHit? 'getSnapshot'
|
||||||
|
return callback null, docs[docName]
|
||||||
|
|
||||||
|
# We're a memory store. If we don't have it, nobody does.
|
||||||
|
return callback 'Document does not exist' unless db
|
||||||
|
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
# The document is being loaded already. Add ourselves as a callback.
|
||||||
|
return callbacks.push callback if callbacks
|
||||||
|
|
||||||
|
options.stats?.cacheMiss? 'getSnapshot'
|
||||||
|
|
||||||
|
# The document isn't loaded and isn't being loaded. Load it.
|
||||||
|
awaitingGetSnapshot[docName] = [callback]
|
||||||
|
db.getSnapshot docName, (error, data, dbMeta) ->
|
||||||
|
return add docName, error if error
|
||||||
|
|
||||||
|
type = types[data.type]
|
||||||
|
unless type
|
||||||
|
console.warn "Type '#{data.type}' missing"
|
||||||
|
return callback "Type not found"
|
||||||
|
data.type = type
|
||||||
|
|
||||||
|
committedVersion = data.v
|
||||||
|
|
||||||
|
# The server can close without saving the most recent document snapshot.
|
||||||
|
# In this case, there are extra ops which need to be applied before
|
||||||
|
# returning the snapshot.
|
||||||
|
getOpsInternal docName, data.v, null, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}"
|
||||||
|
|
||||||
|
try
|
||||||
|
for op in ops
|
||||||
|
data.snapshot = type.apply data.snapshot, op.op
|
||||||
|
data.v++
|
||||||
|
catch e
|
||||||
|
# This should never happen - it indicates that whats in the
|
||||||
|
# database is invalid.
|
||||||
|
console.error "Op data invalid for #{docName}: #{e.stack}"
|
||||||
|
return callback 'Op data invalid'
|
||||||
|
|
||||||
|
model.emit 'load', docName, data
|
||||||
|
add docName, error, data, committedVersion, ops, dbMeta
|
||||||
|
|
||||||
|
# This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||||
|
# a document, it is loaded from the database and stored.
|
||||||
|
#
|
||||||
|
# Documents are stored so long as either:
|
||||||
|
# - They have been accessed within the past #{PERIOD}
|
||||||
|
# - At least one client has the document open
|
||||||
|
refreshReapingTimeout = (docName) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
return unless doc
|
||||||
|
|
||||||
|
# I want to let the clients list be updated before this is called.
|
||||||
|
process.nextTick ->
|
||||||
|
# This is an awkward way to find out the number of clients on a document. If this
|
||||||
|
# causes performance issues, add a numClients field to the document.
|
||||||
|
#
|
||||||
|
# The first check is because its possible that between refreshReapingTimeout being called and this
|
||||||
|
# event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||||
|
if doc == docs[docName] and
|
||||||
|
doc.eventEmitter.listeners('op').length == 0 and
|
||||||
|
(db or options.forceReaping) and
|
||||||
|
doc.opQueue.busy is false
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
doc.reapTimer = reapTimer = setTimeout ->
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
# If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||||
|
# in the middle of applying an operation, don't reap.
|
||||||
|
delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false
|
||||||
|
, options.reapTime
|
||||||
|
|
||||||
|
tryWriteSnapshot = (docName, callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
# The doc is closed
|
||||||
|
return callback?() unless doc
|
||||||
|
|
||||||
|
# The document is already saved.
|
||||||
|
return callback?() if doc.committedVersion is doc.v
|
||||||
|
|
||||||
|
return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock
|
||||||
|
|
||||||
|
doc.snapshotWriteLock = true
|
||||||
|
|
||||||
|
options.stats?.writeSnapshot?()
|
||||||
|
|
||||||
|
writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback()
|
||||||
|
|
||||||
|
data =
|
||||||
|
v: doc.v
|
||||||
|
meta: doc.meta
|
||||||
|
snapshot: doc.snapshot
|
||||||
|
# The database doesn't know about object types.
|
||||||
|
type: doc.type.name
|
||||||
|
|
||||||
|
# Commit snapshot.
|
||||||
|
writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) ->
|
||||||
|
doc.snapshotWriteLock = false
|
||||||
|
|
||||||
|
# We have to use data.v here because the version in the doc could
|
||||||
|
# have been updated between the call to writeSnapshot() and now.
|
||||||
|
doc.committedVersion = data.v
|
||||||
|
doc.dbMeta = dbMeta
|
||||||
|
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
# *** Model interface methods
|
||||||
|
|
||||||
|
# Create a new document.
|
||||||
|
#
|
||||||
|
# data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||||
|
@create = (docName, type, meta, callback) ->
|
||||||
|
[meta, callback] = [{}, meta] if typeof meta is 'function'
|
||||||
|
|
||||||
|
return callback? 'Invalid document name' if docName.match /\//
|
||||||
|
return callback? 'Document already exists' if docs[docName]
|
||||||
|
|
||||||
|
type = types[type] if typeof type == 'string'
|
||||||
|
return callback? 'Type not found' unless type
|
||||||
|
|
||||||
|
data =
|
||||||
|
snapshot:type.create()
|
||||||
|
type:type.name
|
||||||
|
meta:meta or {}
|
||||||
|
v:0
|
||||||
|
|
||||||
|
done = (error, dbMeta) ->
|
||||||
|
# dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
# From here on we'll store the object version of the type name.
|
||||||
|
data.type = type
|
||||||
|
add docName, null, data, 0, [], dbMeta
|
||||||
|
model.emit 'create', docName, data
|
||||||
|
callback?()
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.create docName, data, done
|
||||||
|
else
|
||||||
|
done()
|
||||||
|
|
||||||
|
# Perminantly deletes the specified document.
|
||||||
|
# If listeners are attached, they are removed.
|
||||||
|
#
|
||||||
|
# The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||||
|
# document was deleted.
|
||||||
|
#
|
||||||
|
# WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||||
|
# deletion. Subsequent op submissions will fail).
|
||||||
|
@delete = (docName, callback) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
if doc
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
delete docs[docName]
|
||||||
|
|
||||||
|
done = (error) ->
|
||||||
|
model.emit 'delete', docName unless error
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.delete docName, doc?.dbMeta, done
|
||||||
|
else
|
||||||
|
done (if !doc then 'Document does not exist')
|
||||||
|
|
||||||
|
# This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||||
|
#
|
||||||
|
# end can be null. This means 'get me all ops from start'.
|
||||||
|
#
|
||||||
|
# Each op returned is in the form {op:o, meta:m, v:version}.
|
||||||
|
#
|
||||||
|
# Callback is called with (error, [ops])
|
||||||
|
#
|
||||||
|
# If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||||
|
# its awkward to figure out whether or not the document exists for things
|
||||||
|
# like the redis database backend. I guess its a bit gross having this inconsistant
|
||||||
|
# with the other DB calls, but its certainly convenient.
|
||||||
|
#
|
||||||
|
# Use getVersion() to determine if a document actually exists, if thats what you're
|
||||||
|
# after.
|
||||||
|
@getOps = getOps = (docName, start, end, callback) ->
|
||||||
|
# getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||||
|
throw new Error 'start must be 0+' unless start >= 0
|
||||||
|
|
||||||
|
[end, callback] = [null, end] if typeof end is 'function'
|
||||||
|
|
||||||
|
ops = docs[docName]?.ops
|
||||||
|
|
||||||
|
if ops
|
||||||
|
version = docs[docName].v
|
||||||
|
|
||||||
|
# Ops contains an array of ops. The last op in the list is the last op applied
|
||||||
|
end ?= version
|
||||||
|
start = Math.min start, end
|
||||||
|
|
||||||
|
return callback null, [] if start == end
|
||||||
|
|
||||||
|
# Base is the version number of the oldest op we have cached
|
||||||
|
base = version - ops.length
|
||||||
|
|
||||||
|
# If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||||
|
if start >= base or db is null
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
options.stats?.cacheHit 'getOps'
|
||||||
|
|
||||||
|
return callback null, ops[(start - base)...(end - base)]
|
||||||
|
|
||||||
|
options.stats?.cacheMiss 'getOps'
|
||||||
|
|
||||||
|
getOpsInternal docName, start, end, callback
|
||||||
|
|
||||||
|
# Gets the snapshot data for the specified document.
|
||||||
|
# getSnapshot(docName, callback)
|
||||||
|
# Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||||
|
@getSnapshot = (docName, callback) ->
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta}
|
||||||
|
|
||||||
|
# Gets the latest version # of the document.
|
||||||
|
# getVersion(docName, callback)
|
||||||
|
# callback is called with (error, version).
|
||||||
|
@getVersion = (docName, callback) ->
|
||||||
|
load docName, (error, doc) -> callback error, doc?.v
|
||||||
|
|
||||||
|
# Apply an op to the specified document.
|
||||||
|
# The callback is passed (error, applied version #)
|
||||||
|
# opData = {op:op, v:v, meta:metadata}
|
||||||
|
#
|
||||||
|
# Ops are queued before being applied so that the following code applies op C before op B:
|
||||||
|
# model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||||
|
# model.applyOp 'doc', OPC
|
||||||
|
@applyOp = (docName, opData, callback) ->
|
||||||
|
# All the logic for this is in makeOpQueue, above.
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
process.nextTick -> doc.opQueue opData, (error, newVersion) ->
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
callback? error, newVersion
|
||||||
|
|
||||||
|
# TODO: store (some) metadata in DB
|
||||||
|
# TODO: op and meta should be combineable in the op that gets sent
|
||||||
|
@applyMetaOp = (docName, metaOpData, callback) ->
|
||||||
|
{path, value} = metaOpData.meta
|
||||||
|
|
||||||
|
return callback? "path should be an array" unless isArray path
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
if error?
|
||||||
|
callback? error
|
||||||
|
else
|
||||||
|
applied = false
|
||||||
|
switch path[0]
|
||||||
|
when 'shout'
|
||||||
|
doc.eventEmitter.emit 'op', metaOpData
|
||||||
|
applied = true
|
||||||
|
|
||||||
|
model.emit 'applyMetaOp', docName, path, value if applied
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Listen to all ops from the specified version. If version is in the past, all
|
||||||
|
# ops since that version are sent immediately to the listener.
|
||||||
|
#
|
||||||
|
# The callback is called once the listener is attached, but before any ops have been passed
|
||||||
|
# to the listener.
|
||||||
|
#
|
||||||
|
# This will _not_ edit the document metadata.
|
||||||
|
#
|
||||||
|
# If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||||
|
# might change in a future version.
|
||||||
|
#
|
||||||
|
# version is the document version at which the document is opened. It can be left out if you want to open
|
||||||
|
# the document at the most recent version.
|
||||||
|
#
|
||||||
|
# listener is called with (opData) each time an op is applied.
|
||||||
|
#
|
||||||
|
# callback(error, openedVersion)
|
||||||
|
@listen = (docName, version, listener, callback) ->
|
||||||
|
[version, listener, callback] = [null, version, listener] if typeof version is 'function'
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
|
||||||
|
if version?
|
||||||
|
getOps docName, version, null, (error, data) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, version
|
||||||
|
for op in data
|
||||||
|
listener op
|
||||||
|
|
||||||
|
# The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||||
|
# This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||||
|
break unless listener in doc.eventEmitter.listeners 'op'
|
||||||
|
|
||||||
|
else # Version is null / undefined. Just add the listener.
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Remove a listener for a particular document.
|
||||||
|
#
|
||||||
|
# removeListener(docName, listener)
|
||||||
|
#
|
||||||
|
# This is synchronous.
|
||||||
|
@removeListener = (docName, listener) ->
|
||||||
|
# The document should already be loaded.
|
||||||
|
doc = docs[docName]
|
||||||
|
throw new Error 'removeListener called but document not loaded' unless doc
|
||||||
|
|
||||||
|
doc.eventEmitter.removeListener 'op', listener
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
|
||||||
|
# Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||||
|
# sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||||
|
@flush = (callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
pendingWrites = 0
|
||||||
|
|
||||||
|
for docName, doc of docs
|
||||||
|
if doc.committedVersion < doc.v
|
||||||
|
pendingWrites++
|
||||||
|
# I'm hoping writeSnapshot will always happen in another thread.
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
process.nextTick ->
|
||||||
|
pendingWrites--
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# If nothing was queued, terminate immediately.
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||||
|
@closeDb = ->
|
||||||
|
db?.close?()
|
||||||
|
db = null
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# Model inherits from EventEmitter.
|
||||||
|
Model:: = new EventEmitter
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
# A synchronous processing queue. The queue calls process on the arguments,
|
||||||
|
# ensuring that process() is only executing once at a time.
|
||||||
|
#
|
||||||
|
# process(data, callback) _MUST_ eventually call its callback.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# queue = require 'syncqueue'
|
||||||
|
#
|
||||||
|
# fn = queue (data, callback) ->
|
||||||
|
# asyncthing data, ->
|
||||||
|
# callback(321)
|
||||||
|
#
|
||||||
|
# fn(1)
|
||||||
|
# fn(2)
|
||||||
|
# fn(3, (result) -> console.log(result))
|
||||||
|
#
|
||||||
|
# ^--- async thing will only be running once at any time.
|
||||||
|
|
||||||
|
module.exports = (process) ->
|
||||||
|
throw new Error('process is not a function') unless typeof process == 'function'
|
||||||
|
queue = []
|
||||||
|
|
||||||
|
enqueue = (data, callback) ->
|
||||||
|
queue.push [data, callback]
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue.busy = false
|
||||||
|
|
||||||
|
flush = ->
|
||||||
|
return if enqueue.busy or queue.length == 0
|
||||||
|
|
||||||
|
enqueue.busy = true
|
||||||
|
[data, callback] = queue.shift()
|
||||||
|
process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow.
|
||||||
|
enqueue.busy = false
|
||||||
|
# This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||||
|
callback.apply null, result if callback
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue
|
||||||
|
|
38
services/document-updater/app/coffee/sharejs/simple.coffee
Normal file
38
services/document-updater/app/coffee/sharejs/simple.coffee
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# This is a really simple OT type. Its not compiled with the web client, but it could be.
|
||||||
|
#
|
||||||
|
# Its mostly included for demonstration purposes and its used in a lot of unit tests.
|
||||||
|
#
|
||||||
|
# This defines a really simple text OT type which only allows inserts. (No deletes).
|
||||||
|
#
|
||||||
|
# Ops look like:
|
||||||
|
# {position:#, text:"asdf"}
|
||||||
|
#
|
||||||
|
# Document snapshots look like:
|
||||||
|
# {str:string}
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
# The name of the OT type. The type is stored in types[type.name]. The name can be
|
||||||
|
# used in place of the actual type in all the API methods.
|
||||||
|
name: 'simple'
|
||||||
|
|
||||||
|
# Create a new document snapshot
|
||||||
|
create: -> {str:""}
|
||||||
|
|
||||||
|
# Apply the given op to the document snapshot. Returns the new snapshot.
|
||||||
|
#
|
||||||
|
# The original snapshot should not be modified.
|
||||||
|
apply: (snapshot, op) ->
|
||||||
|
throw new Error 'Invalid position' unless 0 <= op.position <= snapshot.str.length
|
||||||
|
|
||||||
|
str = snapshot.str
|
||||||
|
str = str.slice(0, op.position) + op.text + str.slice(op.position)
|
||||||
|
{str}
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the
|
||||||
|
# op being transformed comes from the client or the server.
|
||||||
|
transform: (op1, op2, sym) ->
|
||||||
|
pos = op1.position
|
||||||
|
pos += op2.text.length if op2.position < pos or (op2.position == pos and sym is 'left')
|
||||||
|
|
||||||
|
return {position:pos, text:op1.text}
|
|
@ -0,0 +1,42 @@
|
||||||
|
# A synchronous processing queue. The queue calls process on the arguments,
|
||||||
|
# ensuring that process() is only executing once at a time.
|
||||||
|
#
|
||||||
|
# process(data, callback) _MUST_ eventually call its callback.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# queue = require 'syncqueue'
|
||||||
|
#
|
||||||
|
# fn = queue (data, callback) ->
|
||||||
|
# asyncthing data, ->
|
||||||
|
# callback(321)
|
||||||
|
#
|
||||||
|
# fn(1)
|
||||||
|
# fn(2)
|
||||||
|
# fn(3, (result) -> console.log(result))
|
||||||
|
#
|
||||||
|
# ^--- async thing will only be running once at any time.
|
||||||
|
|
||||||
|
module.exports = (process) ->
|
||||||
|
throw new Error('process is not a function') unless typeof process == 'function'
|
||||||
|
queue = []
|
||||||
|
|
||||||
|
enqueue = (data, callback) ->
|
||||||
|
queue.push [data, callback]
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue.busy = false
|
||||||
|
|
||||||
|
flush = ->
|
||||||
|
return if enqueue.busy or queue.length == 0
|
||||||
|
|
||||||
|
enqueue.busy = true
|
||||||
|
[data, callback] = queue.shift()
|
||||||
|
process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow.
|
||||||
|
enqueue.busy = false
|
||||||
|
# This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||||
|
callback.apply null, result if callback
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue
|
||||||
|
|
32
services/document-updater/app/coffee/sharejs/text-api.coffee
Normal file
32
services/document-updater/app/coffee/sharejs/text-api.coffee
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
# Text document API for text
|
||||||
|
|
||||||
|
text = require './text' if typeof WEB is 'undefined'
|
||||||
|
|
||||||
|
text.api =
|
||||||
|
provides: {text:true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
getLength: -> @snapshot.length
|
||||||
|
|
||||||
|
# Get the text contents of a document
|
||||||
|
getText: -> @snapshot
|
||||||
|
|
||||||
|
insert: (pos, text, callback) ->
|
||||||
|
op = [{p:pos, i:text}]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
del: (pos, length, callback) ->
|
||||||
|
op = [{p:pos, d:@snapshot[pos...(pos + length)]}]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
_register: ->
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
for component in op
|
||||||
|
if component.i != undefined
|
||||||
|
@emit 'insert', component.p, component.i
|
||||||
|
else
|
||||||
|
@emit 'delete', component.p, component.d
|
|
@ -0,0 +1,43 @@
|
||||||
|
# Text document API for text
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
type = exports.types['text-composable']
|
||||||
|
else
|
||||||
|
type = require './text-composable'
|
||||||
|
|
||||||
|
type.api =
|
||||||
|
provides: {'text':true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
'getLength': -> @snapshot.length
|
||||||
|
|
||||||
|
# Get the text contents of a document
|
||||||
|
'getText': -> @snapshot
|
||||||
|
|
||||||
|
'insert': (pos, text, callback) ->
|
||||||
|
op = type.normalize [pos, 'i':text, (@snapshot.length - pos)]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'del': (pos, length, callback) ->
|
||||||
|
op = type.normalize [pos, 'd':@snapshot[pos...(pos + length)], (@snapshot.length - pos - length)]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
_register: ->
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
pos = 0
|
||||||
|
for component in op
|
||||||
|
if typeof component is 'number'
|
||||||
|
pos += component
|
||||||
|
else if component.i != undefined
|
||||||
|
@emit 'insert', pos, component.i
|
||||||
|
pos += component.i.length
|
||||||
|
else
|
||||||
|
# delete
|
||||||
|
@emit 'delete', pos, component.d
|
||||||
|
# We don't increment pos, because the position
|
||||||
|
# specified is after the delete has happened.
|
||||||
|
|
|
@ -0,0 +1,261 @@
|
||||||
|
# An alternate composable implementation for text. This is much closer
|
||||||
|
# to the implementation used by google wave.
|
||||||
|
#
|
||||||
|
# Ops are lists of components which iterate over the whole document.
|
||||||
|
# Components are either:
|
||||||
|
# A number N: Skip N characters in the original document
|
||||||
|
# {i:'str'}: Insert 'str' at the current position in the document
|
||||||
|
# {d:'str'}: Delete 'str', which appears at the current position in the document
|
||||||
|
#
|
||||||
|
# Eg: [3, {i:'hi'}, 5, {d:'internet'}]
|
||||||
|
#
|
||||||
|
# Snapshots are strings.
|
||||||
|
|
||||||
|
p = -> #require('util').debug
|
||||||
|
i = -> #require('util').inspect
|
||||||
|
|
||||||
|
exports = if WEB? then {} else module.exports
|
||||||
|
|
||||||
|
exports.name = 'text-composable'
|
||||||
|
|
||||||
|
exports.create = -> ''
|
||||||
|
|
||||||
|
# -------- Utility methods
|
||||||
|
|
||||||
|
checkOp = (op) ->
|
||||||
|
throw new Error('Op must be an array of components') unless Array.isArray(op)
|
||||||
|
last = null
|
||||||
|
for c in op
|
||||||
|
if typeof(c) == 'object'
|
||||||
|
throw new Error("Invalid op component: #{i c}") unless (c.i? && c.i.length > 0) or (c.d? && c.d.length > 0)
|
||||||
|
else
|
||||||
|
throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number'
|
||||||
|
throw new Error('Skip components must be a positive number') unless c > 0
|
||||||
|
throw new Error('Adjacent skip components should be added') if typeof(last) == 'number'
|
||||||
|
|
||||||
|
last = c
|
||||||
|
|
||||||
|
# Makes a function for appending components to a given op.
|
||||||
|
# Exported for the randomOpGenerator.
|
||||||
|
exports._makeAppend = makeAppend = (op) -> (component) ->
|
||||||
|
if component == 0 || component.i == '' || component.d == ''
|
||||||
|
return
|
||||||
|
else if op.length == 0
|
||||||
|
op.push component
|
||||||
|
else if typeof(component) == 'number' && typeof(op[op.length - 1]) == 'number'
|
||||||
|
op[op.length - 1] += component
|
||||||
|
else if component.i? && op[op.length - 1].i?
|
||||||
|
op[op.length - 1].i += component.i
|
||||||
|
else if component.d? && op[op.length - 1].d?
|
||||||
|
op[op.length - 1].d += component.d
|
||||||
|
else
|
||||||
|
op.push component
|
||||||
|
|
||||||
|
# checkOp op
|
||||||
|
|
||||||
|
# Makes 2 functions for taking components from the start of an op, and for peeking
|
||||||
|
# at the next op that could be taken.
|
||||||
|
makeTake = (op) ->
|
||||||
|
# The index of the next component to take
|
||||||
|
idx = 0
|
||||||
|
# The offset into the component
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
# Take up to length n from the front of op. If n is null, take the next
|
||||||
|
# op component. If indivisableField == 'd', delete components won't be separated.
|
||||||
|
# If indivisableField == 'i', insert components won't be separated.
|
||||||
|
take = (n, indivisableField) ->
|
||||||
|
return null if idx == op.length
|
||||||
|
#assert.notStrictEqual op.length, i, 'The op is too short to traverse the document'
|
||||||
|
|
||||||
|
if typeof(op[idx]) == 'number'
|
||||||
|
if !n? or op[idx] - offset <= n
|
||||||
|
c = op[idx] - offset
|
||||||
|
++idx; offset = 0
|
||||||
|
c
|
||||||
|
else
|
||||||
|
offset += n
|
||||||
|
n
|
||||||
|
else
|
||||||
|
# Take from the string
|
||||||
|
field = if op[idx].i then 'i' else 'd'
|
||||||
|
c = {}
|
||||||
|
if !n? or op[idx][field].length - offset <= n or field == indivisableField
|
||||||
|
c[field] = op[idx][field][offset..]
|
||||||
|
++idx; offset = 0
|
||||||
|
else
|
||||||
|
c[field] = op[idx][field][offset...(offset + n)]
|
||||||
|
offset += n
|
||||||
|
c
|
||||||
|
|
||||||
|
peekType = () ->
|
||||||
|
op[idx]
|
||||||
|
|
||||||
|
[take, peekType]
|
||||||
|
|
||||||
|
# Find and return the length of an op component
|
||||||
|
componentLength = (component) ->
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
component
|
||||||
|
else if component.i?
|
||||||
|
component.i.length
|
||||||
|
else
|
||||||
|
component.d.length
|
||||||
|
|
||||||
|
# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||||
|
# adjacent inserts and deletes.
|
||||||
|
exports.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
append = makeAppend newOp
|
||||||
|
append component for component in op
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# Apply the op to the string. Returns the new string.
|
||||||
|
exports.apply = (str, op) ->
|
||||||
|
p "Applying #{i op} to '#{str}'"
|
||||||
|
throw new Error('Snapshot should be a string') unless typeof(str) == 'string'
|
||||||
|
checkOp op
|
||||||
|
|
||||||
|
pos = 0
|
||||||
|
newDoc = []
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
throw new Error('The op is too long for this document') if component > str.length
|
||||||
|
newDoc.push str[...component]
|
||||||
|
str = str[component..]
|
||||||
|
else if component.i?
|
||||||
|
newDoc.push component.i
|
||||||
|
else
|
||||||
|
throw new Error("The deleted text '#{component.d}' doesn't match the next characters in the document '#{str[...component.d.length]}'") unless component.d == str[...component.d.length]
|
||||||
|
str = str[component.d.length..]
|
||||||
|
|
||||||
|
throw new Error("The applied op doesn't traverse the entire document") unless '' == str
|
||||||
|
|
||||||
|
newDoc.join ''
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# op1 and op2 are unchanged by transform.
|
||||||
|
exports.transform = (op, otherOp, side) ->
|
||||||
|
throw new Error "side (#{side} must be 'left' or 'right'" unless side == 'left' or side == 'right'
|
||||||
|
|
||||||
|
checkOp op
|
||||||
|
checkOp otherOp
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
append = makeAppend newOp
|
||||||
|
[take, peek] = makeTake op
|
||||||
|
|
||||||
|
for component in otherOp
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'i')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append chunk
|
||||||
|
length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.i?
|
||||||
|
else if component.i? # Insert
|
||||||
|
if side == 'left'
|
||||||
|
# The left insert should go first.
|
||||||
|
o = peek()
|
||||||
|
append take() if o?.i
|
||||||
|
|
||||||
|
# Otherwise, skip the inserted text.
|
||||||
|
append(component.i.length)
|
||||||
|
else # Delete.
|
||||||
|
#assert.ok component.d
|
||||||
|
length = component.d.length
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'i')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
if typeof(chunk) == 'number'
|
||||||
|
length -= chunk
|
||||||
|
else if chunk.i?
|
||||||
|
append(chunk)
|
||||||
|
else
|
||||||
|
#assert.ok chunk.d
|
||||||
|
# The delete is unnecessary now.
|
||||||
|
length -= chunk.d.length
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in the op: #{i component}" unless component?.i?
|
||||||
|
append component
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
|
||||||
|
# Compose 2 ops into 1 op.
|
||||||
|
exports.compose = (op1, op2) ->
|
||||||
|
p "COMPOSE #{i op1} + #{i op2}"
|
||||||
|
checkOp op1
|
||||||
|
checkOp op2
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
append = makeAppend result
|
||||||
|
[take, _] = makeTake op1
|
||||||
|
|
||||||
|
for component in op2
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'd')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append chunk
|
||||||
|
length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.d?
|
||||||
|
|
||||||
|
else if component.i? # Insert
|
||||||
|
append {i:component.i}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
offset = 0
|
||||||
|
while offset < component.d.length
|
||||||
|
chunk = take(component.d.length - offset, 'd')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
# If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length.
|
||||||
|
if typeof(chunk) == 'number'
|
||||||
|
append {d:component.d[offset...(offset + chunk)]}
|
||||||
|
offset += chunk
|
||||||
|
else if chunk.i?
|
||||||
|
throw new Error("The deleted text doesn't match the inserted text") unless component.d[offset...(offset + chunk.i.length)] == chunk.i
|
||||||
|
offset += chunk.i.length
|
||||||
|
# The ops cancel each other out.
|
||||||
|
else
|
||||||
|
# Delete
|
||||||
|
append chunk
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Trailing stuff in op1 #{i component}" unless component?.d?
|
||||||
|
append component
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
|
||||||
|
invertComponent = (c) ->
|
||||||
|
if typeof(c) == 'number'
|
||||||
|
c
|
||||||
|
else if c.i?
|
||||||
|
{d:c.i}
|
||||||
|
else
|
||||||
|
{i:c.d}
|
||||||
|
|
||||||
|
# Invert an op
|
||||||
|
exports.invert = (op) ->
|
||||||
|
result = []
|
||||||
|
append = makeAppend result
|
||||||
|
|
||||||
|
append(invertComponent component) for component in op
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
if window?
|
||||||
|
window.ot ||= {}
|
||||||
|
window.ot.types ||= {}
|
||||||
|
window.ot.types.text = exports
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
# Text document API for text-tp2
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
type = exports.types['text-tp2']
|
||||||
|
else
|
||||||
|
type = require './text-tp2'
|
||||||
|
|
||||||
|
{_takeDoc:takeDoc, _append:append} = type
|
||||||
|
|
||||||
|
appendSkipChars = (op, doc, pos, maxlength) ->
|
||||||
|
while (maxlength == undefined || maxlength > 0) and pos.index < doc.data.length
|
||||||
|
part = takeDoc doc, pos, maxlength, true
|
||||||
|
maxlength -= part.length if maxlength != undefined and typeof part is 'string'
|
||||||
|
append op, (part.length || part)
|
||||||
|
|
||||||
|
type['api'] =
|
||||||
|
'provides': {'text':true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
'getLength': -> @snapshot.charLength
|
||||||
|
|
||||||
|
# Flatten a document into a string
|
||||||
|
'getText': ->
|
||||||
|
strings = (elem for elem in @snapshot.data when typeof elem is 'string')
|
||||||
|
strings.join ''
|
||||||
|
|
||||||
|
'insert': (pos, text, callback) ->
|
||||||
|
pos = 0 if pos == undefined
|
||||||
|
|
||||||
|
op = []
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos, pos
|
||||||
|
append op, {'i':text}
|
||||||
|
appendSkipChars op, @snapshot, docPos
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'del': (pos, length, callback) ->
|
||||||
|
op = []
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos, pos
|
||||||
|
|
||||||
|
while length > 0
|
||||||
|
part = takeDoc @snapshot, docPos, length, true
|
||||||
|
if typeof part is 'string'
|
||||||
|
append op, {'d':part.length}
|
||||||
|
length -= part.length
|
||||||
|
else
|
||||||
|
append op, part
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'_register': ->
|
||||||
|
# Interpret recieved ops + generate more detailed events for them
|
||||||
|
@on 'remoteop', (op, snapshot) ->
|
||||||
|
textPos = 0
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof component is 'number'
|
||||||
|
# Skip
|
||||||
|
remainder = component
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc snapshot, docPos, remainder
|
||||||
|
if typeof part is 'string'
|
||||||
|
textPos += part.length
|
||||||
|
remainder -= part.length || part
|
||||||
|
else if component.i != undefined
|
||||||
|
# Insert
|
||||||
|
if typeof component.i is 'string'
|
||||||
|
@emit 'insert', textPos, component.i
|
||||||
|
textPos += component.i.length
|
||||||
|
else
|
||||||
|
# Delete
|
||||||
|
remainder = component.d
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc snapshot, docPos, remainder
|
||||||
|
if typeof part is 'string'
|
||||||
|
@emit 'delete', textPos, part
|
||||||
|
remainder -= part.length || part
|
||||||
|
|
||||||
|
return
|
||||||
|
|
322
services/document-updater/app/coffee/sharejs/text-tp2.coffee
Normal file
322
services/document-updater/app/coffee/sharejs/text-tp2.coffee
Normal file
|
@ -0,0 +1,322 @@
|
||||||
|
# A TP2 implementation of text, following this spec:
|
||||||
|
# http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README
|
||||||
|
#
|
||||||
|
# A document is made up of a string and a set of tombstones inserted throughout
|
||||||
|
# the string. For example, 'some ', (2 tombstones), 'string'.
|
||||||
|
#
|
||||||
|
# This is encoded in a document as: {s:'some string', t:[5, -2, 6]}
|
||||||
|
#
|
||||||
|
# Ops are lists of components which iterate over the whole document.
|
||||||
|
# Components are either:
|
||||||
|
# N: Skip N characters in the original document
|
||||||
|
# {i:'str'}: Insert 'str' at the current position in the document
|
||||||
|
# {i:N}: Insert N tombstones at the current position in the document
|
||||||
|
# {d:N}: Delete (tombstone) N characters at the current position in the document
|
||||||
|
#
|
||||||
|
# Eg: [3, {i:'hi'}, 5, {d:8}]
|
||||||
|
#
|
||||||
|
# Snapshots are lists with characters and tombstones. Characters are stored in strings
|
||||||
|
# and adjacent tombstones are flattened into numbers.
|
||||||
|
#
|
||||||
|
# Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters)
|
||||||
|
# would be represented by a document snapshot of ['Hello ', 5, 'world']
|
||||||
|
|
||||||
|
type =
|
||||||
|
name: 'text-tp2'
|
||||||
|
tp2: true
|
||||||
|
create: -> {charLength:0, totalLength:0, positionCache:[], data:[]}
|
||||||
|
serialize: (doc) ->
|
||||||
|
throw new Error 'invalid doc snapshot' unless doc.data
|
||||||
|
doc.data
|
||||||
|
deserialize: (data) ->
|
||||||
|
doc = type.create()
|
||||||
|
doc.data = data
|
||||||
|
|
||||||
|
for component in data
|
||||||
|
if typeof component is 'string'
|
||||||
|
doc.charLength += component.length
|
||||||
|
doc.totalLength += component.length
|
||||||
|
else
|
||||||
|
doc.totalLength += component
|
||||||
|
|
||||||
|
doc
|
||||||
|
|
||||||
|
|
||||||
|
checkOp = (op) ->
|
||||||
|
throw new Error('Op must be an array of components') unless Array.isArray(op)
|
||||||
|
last = null
|
||||||
|
for c in op
|
||||||
|
if typeof(c) == 'object'
|
||||||
|
if c.i != undefined
|
||||||
|
throw new Error('Inserts must insert a string or a +ive number') unless (typeof(c.i) == 'string' and c.i.length > 0) or (typeof(c.i) == 'number' and c.i > 0)
|
||||||
|
else if c.d != undefined
|
||||||
|
throw new Error('Deletes must be a +ive number') unless typeof(c.d) == 'number' and c.d > 0
|
||||||
|
else
|
||||||
|
throw new Error('Operation component must define .i or .d')
|
||||||
|
else
|
||||||
|
throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number'
|
||||||
|
throw new Error('Skip components must be a positive number') unless c > 0
|
||||||
|
throw new Error('Adjacent skip components should be combined') if typeof(last) == 'number'
|
||||||
|
|
||||||
|
last = c
|
||||||
|
|
||||||
|
# Take the next part from the specified position in a document snapshot.
|
||||||
|
# position = {index, offset}. It will be updated.
|
||||||
|
type._takeDoc = takeDoc = (doc, position, maxlength, tombsIndivisible) ->
|
||||||
|
throw new Error 'Operation goes past the end of the document' if position.index >= doc.data.length
|
||||||
|
|
||||||
|
part = doc.data[position.index]
|
||||||
|
# peel off data[0]
|
||||||
|
result = if typeof(part) == 'string'
|
||||||
|
if maxlength != undefined
|
||||||
|
part[position.offset...(position.offset + maxlength)]
|
||||||
|
else
|
||||||
|
part[position.offset...]
|
||||||
|
else
|
||||||
|
if maxlength == undefined or tombsIndivisible
|
||||||
|
part - position.offset
|
||||||
|
else
|
||||||
|
Math.min(maxlength, part - position.offset)
|
||||||
|
|
||||||
|
resultLen = result.length || result
|
||||||
|
|
||||||
|
if (part.length || part) - position.offset > resultLen
|
||||||
|
position.offset += resultLen
|
||||||
|
else
|
||||||
|
position.index++
|
||||||
|
position.offset = 0
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
# Append a part to the end of a document
|
||||||
|
type._appendDoc = appendDoc = (doc, p) ->
|
||||||
|
return if p == 0 or p == ''
|
||||||
|
|
||||||
|
if typeof p is 'string'
|
||||||
|
doc.charLength += p.length
|
||||||
|
doc.totalLength += p.length
|
||||||
|
else
|
||||||
|
doc.totalLength += p
|
||||||
|
|
||||||
|
data = doc.data
|
||||||
|
if data.length == 0
|
||||||
|
data.push p
|
||||||
|
else if typeof(data[data.length - 1]) == typeof(p)
|
||||||
|
data[data.length - 1] += p
|
||||||
|
else
|
||||||
|
data.push p
|
||||||
|
return
|
||||||
|
|
||||||
|
# Apply the op to the document. The document is not modified in the process.
|
||||||
|
type.apply = (doc, op) ->
|
||||||
|
unless doc.totalLength != undefined and doc.charLength != undefined and doc.data.length != undefined
|
||||||
|
throw new Error('Snapshot is invalid')
|
||||||
|
|
||||||
|
checkOp op
|
||||||
|
|
||||||
|
newDoc = type.create()
|
||||||
|
position = {index:0, offset:0}
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof(component) is 'number'
|
||||||
|
remainder = component
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc doc, position, remainder
|
||||||
|
|
||||||
|
appendDoc newDoc, part
|
||||||
|
remainder -= part.length || part
|
||||||
|
|
||||||
|
else if component.i != undefined
|
||||||
|
appendDoc newDoc, component.i
|
||||||
|
else if component.d != undefined
|
||||||
|
remainder = component.d
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc doc, position, remainder
|
||||||
|
remainder -= part.length || part
|
||||||
|
appendDoc newDoc, component.d
|
||||||
|
|
||||||
|
newDoc
|
||||||
|
|
||||||
|
# Append an op component to the end of the specified op.
|
||||||
|
# Exported for the randomOpGenerator.
|
||||||
|
type._append = append = (op, component) ->
|
||||||
|
if component == 0 || component.i == '' || component.i == 0 || component.d == 0
|
||||||
|
return
|
||||||
|
else if op.length == 0
|
||||||
|
op.push component
|
||||||
|
else
|
||||||
|
last = op[op.length - 1]
|
||||||
|
if typeof(component) == 'number' && typeof(last) == 'number'
|
||||||
|
op[op.length - 1] += component
|
||||||
|
else if component.i != undefined && last.i? && typeof(last.i) == typeof(component.i)
|
||||||
|
last.i += component.i
|
||||||
|
else if component.d != undefined && last.d?
|
||||||
|
last.d += component.d
|
||||||
|
else
|
||||||
|
op.push component
|
||||||
|
|
||||||
|
# Makes 2 functions for taking components from the start of an op, and for peeking
|
||||||
|
# at the next op that could be taken.
|
||||||
|
makeTake = (op) ->
|
||||||
|
# The index of the next component to take
|
||||||
|
index = 0
|
||||||
|
# The offset into the component
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
# Take up to length maxlength from the op. If maxlength is not defined, there is no max.
|
||||||
|
# If insertsIndivisible is true, inserts (& insert tombstones) won't be separated.
|
||||||
|
#
|
||||||
|
# Returns null when op is fully consumed.
|
||||||
|
take = (maxlength, insertsIndivisible) ->
|
||||||
|
return null if index == op.length
|
||||||
|
|
||||||
|
e = op[index]
|
||||||
|
if typeof((current = e)) == 'number' or typeof((current = e.i)) == 'number' or (current = e.d) != undefined
|
||||||
|
if !maxlength? or current - offset <= maxlength or (insertsIndivisible and e.i != undefined)
|
||||||
|
# Return the rest of the current element.
|
||||||
|
c = current - offset
|
||||||
|
++index; offset = 0
|
||||||
|
else
|
||||||
|
offset += maxlength
|
||||||
|
c = maxlength
|
||||||
|
if e.i != undefined then {i:c} else if e.d != undefined then {d:c} else c
|
||||||
|
else
|
||||||
|
# Take from the inserted string
|
||||||
|
if !maxlength? or e.i.length - offset <= maxlength or insertsIndivisible
|
||||||
|
result = {i:e.i[offset..]}
|
||||||
|
++index; offset = 0
|
||||||
|
else
|
||||||
|
result = {i:e.i[offset...offset + maxlength]}
|
||||||
|
offset += maxlength
|
||||||
|
result
|
||||||
|
|
||||||
|
peekType = -> op[index]
|
||||||
|
|
||||||
|
[take, peekType]
|
||||||
|
|
||||||
|
# Find and return the length of an op component
|
||||||
|
componentLength = (component) ->
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
component
|
||||||
|
else if typeof(component.i) == 'string'
|
||||||
|
component.i.length
|
||||||
|
else
|
||||||
|
# This should work because c.d and c.i must be +ive.
|
||||||
|
component.d or component.i
|
||||||
|
|
||||||
|
# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||||
|
# adjacent inserts and deletes.
|
||||||
|
type.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
append newOp, component for component in op
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# This is a helper method to transform and prune. goForwards is true for transform, false for prune.
|
||||||
|
transformer = (op, otherOp, goForwards, side) ->
|
||||||
|
checkOp op
|
||||||
|
checkOp otherOp
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
[take, peek] = makeTake op
|
||||||
|
|
||||||
|
for component in otherOp
|
||||||
|
length = componentLength component
|
||||||
|
|
||||||
|
if component.i != undefined # Insert text or tombs
|
||||||
|
if goForwards # transform - insert skips over inserted parts
|
||||||
|
if side == 'left'
|
||||||
|
# The left insert should go first.
|
||||||
|
append newOp, take() while peek()?.i != undefined
|
||||||
|
|
||||||
|
# In any case, skip the inserted text.
|
||||||
|
append newOp, length
|
||||||
|
|
||||||
|
else # Prune. Remove skips for inserts.
|
||||||
|
while length > 0
|
||||||
|
chunk = take length, true
|
||||||
|
|
||||||
|
throw new Error 'The transformed op is invalid' unless chunk != null
|
||||||
|
throw new Error 'The transformed op deletes locally inserted characters - it cannot be purged of the insert.' if chunk.d != undefined
|
||||||
|
|
||||||
|
if typeof chunk is 'number'
|
||||||
|
length -= chunk
|
||||||
|
else
|
||||||
|
append newOp, chunk
|
||||||
|
|
||||||
|
else # Skip or delete
|
||||||
|
while length > 0
|
||||||
|
chunk = take length, true
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append newOp, chunk
|
||||||
|
length -= componentLength chunk unless chunk.i
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in the op: #{component}" unless component.i != undefined
|
||||||
|
append newOp, component
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# op1 and op2 are unchanged by transform.
|
||||||
|
# side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op.
|
||||||
|
type.transform = (op, otherOp, side) ->
|
||||||
|
throw new Error "side (#{side}) should be 'left' or 'right'" unless side == 'left' or side == 'right'
|
||||||
|
transformer op, otherOp, true, side
|
||||||
|
|
||||||
|
# Prune is the inverse of transform.
|
||||||
|
type.prune = (op, otherOp) -> transformer op, otherOp, false
|
||||||
|
|
||||||
|
# Compose 2 ops into 1 op.
|
||||||
|
type.compose = (op1, op2) ->
|
||||||
|
return op2 if op1 == null or op1 == undefined
|
||||||
|
|
||||||
|
checkOp op1
|
||||||
|
checkOp op2
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
[take, _] = makeTake op1
|
||||||
|
|
||||||
|
for component in op2
|
||||||
|
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
# Just copy from op1.
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take length
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append result, chunk
|
||||||
|
length -= componentLength chunk
|
||||||
|
|
||||||
|
else if component.i != undefined # Insert
|
||||||
|
append result, {i:component.i}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
length = component.d
|
||||||
|
while length > 0
|
||||||
|
chunk = take length
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
chunkLength = componentLength chunk
|
||||||
|
if chunk.i != undefined
|
||||||
|
append result, {i:chunkLength}
|
||||||
|
else
|
||||||
|
append result, {d:chunkLength}
|
||||||
|
|
||||||
|
length -= chunkLength
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in op1: #{component}" unless component.i != undefined
|
||||||
|
append result, component
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types['text-tp2'] = type
|
||||||
|
else
|
||||||
|
module.exports = type
|
||||||
|
|
209
services/document-updater/app/coffee/sharejs/text.coffee
Normal file
209
services/document-updater/app/coffee/sharejs/text.coffee
Normal file
|
@ -0,0 +1,209 @@
|
||||||
|
# A simple text implementation
|
||||||
|
#
|
||||||
|
# Operations are lists of components.
|
||||||
|
# Each component either inserts or deletes at a specified position in the document.
|
||||||
|
#
|
||||||
|
# Components are either:
|
||||||
|
# {i:'str', p:100}: Insert 'str' at position 100 in the document
|
||||||
|
# {d:'str', p:100}: Delete 'str' at position 100 in the document
|
||||||
|
#
|
||||||
|
# Components in an operation are executed sequentially, so the position of components
|
||||||
|
# assumes previous components have already executed.
|
||||||
|
#
|
||||||
|
# Eg: This op:
|
||||||
|
# [{i:'abc', p:0}]
|
||||||
|
# is equivalent to this op:
|
||||||
|
# [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}]
|
||||||
|
|
||||||
|
# NOTE: The global scope here is shared with other sharejs files when built with closure.
|
||||||
|
# Be careful what ends up in your namespace.
|
||||||
|
|
||||||
|
text = {}
|
||||||
|
|
||||||
|
text.name = 'text'
|
||||||
|
|
||||||
|
text.create = -> ''
|
||||||
|
|
||||||
|
strInject = (s1, pos, s2) -> s1[...pos] + s2 + s1[pos..]
|
||||||
|
|
||||||
|
checkValidComponent = (c) ->
|
||||||
|
throw new Error 'component missing position field' if typeof c.p != 'number'
|
||||||
|
|
||||||
|
i_type = typeof c.i
|
||||||
|
d_type = typeof c.d
|
||||||
|
throw new Error 'component needs an i or d field' unless (i_type == 'string') ^ (d_type == 'string')
|
||||||
|
|
||||||
|
throw new Error 'position cannot be negative' unless c.p >= 0
|
||||||
|
|
||||||
|
checkValidOp = (op) ->
|
||||||
|
checkValidComponent(c) for c in op
|
||||||
|
true
|
||||||
|
|
||||||
|
text.apply = (snapshot, op) ->
|
||||||
|
checkValidOp op
|
||||||
|
for component in op
|
||||||
|
if component.i?
|
||||||
|
snapshot = strInject snapshot, component.p, component.i
|
||||||
|
else
|
||||||
|
deleted = snapshot[component.p...(component.p + component.d.length)]
|
||||||
|
throw new Error "Delete component '#{component.d}' does not match deleted text '#{deleted}'" unless component.d == deleted
|
||||||
|
snapshot = snapshot[...component.p] + snapshot[(component.p + component.d.length)..]
|
||||||
|
|
||||||
|
snapshot
|
||||||
|
|
||||||
|
|
||||||
|
# Exported for use by the random op generator.
|
||||||
|
#
|
||||||
|
# For simplicity, this version of append does not compress adjacent inserts and deletes of
|
||||||
|
# the same text. It would be nice to change that at some stage.
|
||||||
|
text._append = append = (newOp, c) ->
|
||||||
|
return if c.i == '' or c.d == ''
|
||||||
|
if newOp.length == 0
|
||||||
|
newOp.push c
|
||||||
|
else
|
||||||
|
last = newOp[newOp.length - 1]
|
||||||
|
|
||||||
|
# Compose the insert into the previous insert if possible
|
||||||
|
if last.i? && c.i? and last.p <= c.p <= (last.p + last.i.length)
|
||||||
|
newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p}
|
||||||
|
else if last.d? && c.d? and c.p <= last.p <= (c.p + c.d.length)
|
||||||
|
newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p}
|
||||||
|
else
|
||||||
|
newOp.push c
|
||||||
|
|
||||||
|
text.compose = (op1, op2) ->
|
||||||
|
checkValidOp op1
|
||||||
|
checkValidOp op2
|
||||||
|
|
||||||
|
newOp = op1.slice()
|
||||||
|
append newOp, c for c in op2
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# Attempt to compress the op components together 'as much as possible'.
|
||||||
|
# This implementation preserves order and preserves create/delete pairs.
|
||||||
|
text.compress = (op) -> text.compose [], op
|
||||||
|
|
||||||
|
text.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
# Normalize should allow ops which are a single (unwrapped) component:
|
||||||
|
# {i:'asdf', p:23}.
|
||||||
|
# There's no good way to test if something is an array:
|
||||||
|
# http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/
|
||||||
|
# so this is probably the least bad solution.
|
||||||
|
op = [op] if op.i? or op.p?
|
||||||
|
|
||||||
|
for c in op
|
||||||
|
c.p ?= 0
|
||||||
|
append newOp, c
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# This helper method transforms a position by an op component.
|
||||||
|
#
|
||||||
|
# If c is an insert, insertAfter specifies whether the transform
|
||||||
|
# is pushed after the insert (true) or before it (false).
|
||||||
|
#
|
||||||
|
# insertAfter is optional for deletes.
|
||||||
|
transformPosition = (pos, c, insertAfter) ->
|
||||||
|
if c.i?
|
||||||
|
if c.p < pos || (c.p == pos && insertAfter)
|
||||||
|
pos + c.i.length
|
||||||
|
else
|
||||||
|
pos
|
||||||
|
else
|
||||||
|
# I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length))
|
||||||
|
# but I think its harder to read that way, and it compiles using ternary operators anyway
|
||||||
|
# so its no slower written like this.
|
||||||
|
if pos <= c.p
|
||||||
|
pos
|
||||||
|
else if pos <= c.p + c.d.length
|
||||||
|
c.p
|
||||||
|
else
|
||||||
|
pos - c.d.length
|
||||||
|
|
||||||
|
# Helper method to transform a cursor position as a result of an op.
|
||||||
|
#
|
||||||
|
# Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position
|
||||||
|
# is pushed after an insert (true) or before it (false).
|
||||||
|
text.transformCursor = (position, op, side) ->
|
||||||
|
insertAfter = side == 'right'
|
||||||
|
position = transformPosition position, c, insertAfter for c in op
|
||||||
|
position
|
||||||
|
|
||||||
|
# Transform an op component by another op component. Asymmetric.
|
||||||
|
# The result will be appended to destination.
|
||||||
|
#
|
||||||
|
# exported for use in JSON type
|
||||||
|
text._tc = transformComponent = (dest, c, otherC, side) ->
|
||||||
|
checkValidOp [c]
|
||||||
|
checkValidOp [otherC]
|
||||||
|
|
||||||
|
if c.i?
|
||||||
|
append dest, {i:c.i, p:transformPosition(c.p, otherC, side == 'right')}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
if otherC.i? # delete vs insert
|
||||||
|
s = c.d
|
||||||
|
if c.p < otherC.p
|
||||||
|
append dest, {d:s[...otherC.p - c.p], p:c.p}
|
||||||
|
s = s[(otherC.p - c.p)..]
|
||||||
|
if s != ''
|
||||||
|
append dest, {d:s, p:c.p + otherC.i.length}
|
||||||
|
|
||||||
|
else # Delete vs delete
|
||||||
|
if c.p >= otherC.p + otherC.d.length
|
||||||
|
append dest, {d:c.d, p:c.p - otherC.d.length}
|
||||||
|
else if c.p + c.d.length <= otherC.p
|
||||||
|
append dest, c
|
||||||
|
else
|
||||||
|
# They overlap somewhere.
|
||||||
|
newC = {d:'', p:c.p}
|
||||||
|
if c.p < otherC.p
|
||||||
|
newC.d = c.d[...(otherC.p - c.p)]
|
||||||
|
if c.p + c.d.length > otherC.p + otherC.d.length
|
||||||
|
newC.d += c.d[(otherC.p + otherC.d.length - c.p)..]
|
||||||
|
|
||||||
|
# This is entirely optional - just for a check that the deleted
|
||||||
|
# text in the two ops matches
|
||||||
|
intersectStart = Math.max c.p, otherC.p
|
||||||
|
intersectEnd = Math.min c.p + c.d.length, otherC.p + otherC.d.length
|
||||||
|
cIntersect = c.d[intersectStart - c.p...intersectEnd - c.p]
|
||||||
|
otherIntersect = otherC.d[intersectStart - otherC.p...intersectEnd - otherC.p]
|
||||||
|
throw new Error 'Delete ops delete different text in the same region of the document' unless cIntersect == otherIntersect
|
||||||
|
|
||||||
|
if newC.d != ''
|
||||||
|
# This could be rewritten similarly to insert v delete, above.
|
||||||
|
newC.p = transformPosition newC.p, otherC
|
||||||
|
append dest, newC
|
||||||
|
|
||||||
|
dest
|
||||||
|
|
||||||
|
invertComponent = (c) ->
|
||||||
|
if c.i?
|
||||||
|
{d:c.i, p:c.p}
|
||||||
|
else
|
||||||
|
{i:c.d, p:c.p}
|
||||||
|
|
||||||
|
# No need to use append for invert, because the components won't be able to
|
||||||
|
# cancel with one another.
|
||||||
|
text.invert = (op) -> (invertComponent c for c in op.slice().reverse())
|
||||||
|
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types ||= {}
|
||||||
|
|
||||||
|
# This is kind of awful - come up with a better way to hook this helper code up.
|
||||||
|
bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||||
|
|
||||||
|
# [] is used to prevent closure from renaming types.text
|
||||||
|
exports.types.text = text
|
||||||
|
else
|
||||||
|
module.exports = text
|
||||||
|
|
||||||
|
# The text type really shouldn't need this - it should be possible to define
|
||||||
|
# an efficient transform function by making a sort of transform map and passing each
|
||||||
|
# op component through it.
|
||||||
|
require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
# This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment]
|
||||||
|
|
||||||
|
exports.name = 'count'
|
||||||
|
exports.create = -> 1
|
||||||
|
|
||||||
|
exports.apply = (snapshot, op) ->
|
||||||
|
[v, inc] = op
|
||||||
|
throw new Error "Op #{v} != snapshot #{snapshot}" unless snapshot == v
|
||||||
|
snapshot + inc
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
exports.transform = (op1, op2) ->
|
||||||
|
throw new Error "Op1 #{op1[0]} != op2 #{op2[0]}" unless op1[0] == op2[0]
|
||||||
|
[op1[0] + op2[1], op1[1]]
|
||||||
|
|
||||||
|
exports.compose = (op1, op2) ->
|
||||||
|
throw new Error "Op1 #{op1} + 1 != op2 #{op2}" unless op1[0] + op1[1] == op2[0]
|
||||||
|
[op1[0], op1[1] + op2[1]]
|
||||||
|
|
||||||
|
exports.generateRandomOp = (doc) ->
|
||||||
|
[[doc, 1], doc + 1]
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
# These methods let you build a transform function from a transformComponent function
|
||||||
|
# for OT types like text and JSON in which operations are lists of components
|
||||||
|
# and transforming them requires N^2 work.
|
||||||
|
|
||||||
|
# Add transform and transformX functions for an OT type which has transformComponent defined.
|
||||||
|
# transformComponent(destination array, component, other component, side)
|
||||||
|
exports['_bt'] = bootstrapTransform = (type, transformComponent, checkValidOp, append) ->
|
||||||
|
transformComponentX = (left, right, destLeft, destRight) ->
|
||||||
|
transformComponent destLeft, left, right, 'left'
|
||||||
|
transformComponent destRight, right, left, 'right'
|
||||||
|
|
||||||
|
# Transforms rightOp by leftOp. Returns ['rightOp', clientOp']
|
||||||
|
type.transformX = type['transformX'] = transformX = (leftOp, rightOp) ->
|
||||||
|
checkValidOp leftOp
|
||||||
|
checkValidOp rightOp
|
||||||
|
|
||||||
|
newRightOp = []
|
||||||
|
|
||||||
|
for rightComponent in rightOp
|
||||||
|
# Generate newLeftOp by composing leftOp by rightComponent
|
||||||
|
newLeftOp = []
|
||||||
|
|
||||||
|
k = 0
|
||||||
|
while k < leftOp.length
|
||||||
|
nextC = []
|
||||||
|
transformComponentX leftOp[k], rightComponent, newLeftOp, nextC
|
||||||
|
k++
|
||||||
|
|
||||||
|
if nextC.length == 1
|
||||||
|
rightComponent = nextC[0]
|
||||||
|
else if nextC.length == 0
|
||||||
|
append newLeftOp, l for l in leftOp[k..]
|
||||||
|
rightComponent = null
|
||||||
|
break
|
||||||
|
else
|
||||||
|
# Recurse.
|
||||||
|
[l_, r_] = transformX leftOp[k..], nextC
|
||||||
|
append newLeftOp, l for l in l_
|
||||||
|
append newRightOp, r for r in r_
|
||||||
|
rightComponent = null
|
||||||
|
break
|
||||||
|
|
||||||
|
append newRightOp, rightComponent if rightComponent?
|
||||||
|
leftOp = newLeftOp
|
||||||
|
|
||||||
|
[leftOp, newRightOp]
|
||||||
|
|
||||||
|
# Transforms op with specified type ('left' or 'right') by otherOp.
|
||||||
|
type.transform = type['transform'] = (op, otherOp, type) ->
|
||||||
|
throw new Error "type must be 'left' or 'right'" unless type == 'left' or type == 'right'
|
||||||
|
|
||||||
|
return op if otherOp.length == 0
|
||||||
|
|
||||||
|
# TODO: Benchmark with and without this line. I _think_ it'll make a big difference...?
|
||||||
|
return transformComponent [], op[0], otherOp[0], type if op.length == 1 and otherOp.length == 1
|
||||||
|
|
||||||
|
if type == 'left'
|
||||||
|
[left, _] = transformX op, otherOp
|
||||||
|
left
|
||||||
|
else
|
||||||
|
[_, right] = transformX otherOp, op
|
||||||
|
right
|
||||||
|
|
||||||
|
if typeof WEB is 'undefined'
|
||||||
|
exports.bootstrapTransform = bootstrapTransform
|
|
@ -0,0 +1,15 @@
|
||||||
|
|
||||||
|
register = (file) ->
|
||||||
|
type = require file
|
||||||
|
exports[type.name] = type
|
||||||
|
try require "#{file}-api"
|
||||||
|
|
||||||
|
# Import all the built-in types.
|
||||||
|
register './simple'
|
||||||
|
register './count'
|
||||||
|
|
||||||
|
register './text'
|
||||||
|
register './text-composable'
|
||||||
|
register './text-tp2'
|
||||||
|
|
||||||
|
register './json'
|
|
@ -0,0 +1,180 @@
|
||||||
|
# API for JSON OT
|
||||||
|
|
||||||
|
json = require './json' if typeof WEB is 'undefined'
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
extendDoc = exports.extendDoc
|
||||||
|
exports.extendDoc = (name, fn) ->
|
||||||
|
SubDoc::[name] = fn
|
||||||
|
extendDoc name, fn
|
||||||
|
|
||||||
|
depath = (path) ->
|
||||||
|
if path.length == 1 and path[0].constructor == Array
|
||||||
|
path[0]
|
||||||
|
else path
|
||||||
|
|
||||||
|
class SubDoc
|
||||||
|
constructor: (@doc, @path) ->
|
||||||
|
at: (path...) -> @doc.at @path.concat depath path
|
||||||
|
get: -> @doc.getAt @path
|
||||||
|
# for objects and lists
|
||||||
|
set: (value, cb) -> @doc.setAt @path, value, cb
|
||||||
|
# for strings and lists.
|
||||||
|
insert: (pos, value, cb) -> @doc.insertAt @path, pos, value, cb
|
||||||
|
# for strings
|
||||||
|
del: (pos, length, cb) -> @doc.deleteTextAt @path, length, pos, cb
|
||||||
|
# for objects and lists
|
||||||
|
remove: (cb) -> @doc.removeAt @path, cb
|
||||||
|
push: (value, cb) -> @insert @get().length, value, cb
|
||||||
|
move: (from, to, cb) -> @doc.moveAt @path, from, to, cb
|
||||||
|
add: (amount, cb) -> @doc.addAt @path, amount, cb
|
||||||
|
on: (event, cb) -> @doc.addListener @path, event, cb
|
||||||
|
removeListener: (l) -> @doc.removeListener l
|
||||||
|
|
||||||
|
# text API compatibility
|
||||||
|
getLength: -> @get().length
|
||||||
|
getText: -> @get()
|
||||||
|
|
||||||
|
traverse = (snapshot, path) ->
|
||||||
|
container = data:snapshot
|
||||||
|
key = 'data'
|
||||||
|
elem = container
|
||||||
|
for p in path
|
||||||
|
elem = elem[key]
|
||||||
|
key = p
|
||||||
|
throw new Error 'bad path' if typeof elem == 'undefined'
|
||||||
|
{elem, key}
|
||||||
|
|
||||||
|
pathEquals = (p1, p2) ->
|
||||||
|
return false if p1.length != p2.length
|
||||||
|
for e,i in p1
|
||||||
|
return false if e != p2[i]
|
||||||
|
true
|
||||||
|
|
||||||
|
json.api =
|
||||||
|
provides: {json:true}
|
||||||
|
|
||||||
|
at: (path...) -> new SubDoc this, depath path
|
||||||
|
|
||||||
|
get: -> @snapshot
|
||||||
|
set: (value, cb) -> @setAt [], value, cb
|
||||||
|
|
||||||
|
getAt: (path) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
return elem[key]
|
||||||
|
|
||||||
|
setAt: (path, value, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = {p:path}
|
||||||
|
if elem.constructor == Array
|
||||||
|
op.li = value
|
||||||
|
op.ld = elem[key] if typeof elem[key] != 'undefined'
|
||||||
|
else if typeof elem == 'object'
|
||||||
|
op.oi = value
|
||||||
|
op.od = elem[key] if typeof elem[key] != 'undefined'
|
||||||
|
else throw new Error 'bad path'
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
removeAt: (path, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
throw new Error 'no element at that path' unless typeof elem[key] != 'undefined'
|
||||||
|
op = {p:path}
|
||||||
|
if elem.constructor == Array
|
||||||
|
op.ld = elem[key]
|
||||||
|
else if typeof elem == 'object'
|
||||||
|
op.od = elem[key]
|
||||||
|
else throw new Error 'bad path'
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
insertAt: (path, pos, value, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = {p:path.concat pos}
|
||||||
|
if elem[key].constructor == Array
|
||||||
|
op.li = value
|
||||||
|
else if typeof elem[key] == 'string'
|
||||||
|
op.si = value
|
||||||
|
@submitOp [op], cb
|
||||||
|
|
||||||
|
moveAt: (path, from, to, cb) ->
|
||||||
|
op = [{p:path.concat(from), lm:to}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
addAt: (path, amount, cb) ->
|
||||||
|
op = [{p:path, na:amount}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
deleteTextAt: (path, length, pos, cb) ->
|
||||||
|
{elem, key} = traverse @snapshot, path
|
||||||
|
op = [{p:path.concat(pos), sd:elem[key][pos...(pos + length)]}]
|
||||||
|
@submitOp op, cb
|
||||||
|
|
||||||
|
addListener: (path, event, cb) ->
|
||||||
|
l = {path, event, cb}
|
||||||
|
@_listeners.push l
|
||||||
|
l
|
||||||
|
removeListener: (l) ->
|
||||||
|
i = @_listeners.indexOf l
|
||||||
|
return false if i < 0
|
||||||
|
@_listeners.splice i, 1
|
||||||
|
return true
|
||||||
|
_register: ->
|
||||||
|
@_listeners = []
|
||||||
|
@on 'change', (op) ->
|
||||||
|
for c in op
|
||||||
|
if c.na != undefined or c.si != undefined or c.sd != undefined
|
||||||
|
# no change to structure
|
||||||
|
continue
|
||||||
|
to_remove = []
|
||||||
|
for l, i in @_listeners
|
||||||
|
# Transform a dummy op by the incoming op to work out what
|
||||||
|
# should happen to the listener.
|
||||||
|
dummy = {p:l.path, na:0}
|
||||||
|
xformed = @type.transformComponent [], dummy, c, 'left'
|
||||||
|
if xformed.length == 0
|
||||||
|
# The op was transformed to noop, so we should delete the listener.
|
||||||
|
to_remove.push i
|
||||||
|
else if xformed.length == 1
|
||||||
|
# The op remained, so grab its new path into the listener.
|
||||||
|
l.path = xformed[0].p
|
||||||
|
else
|
||||||
|
throw new Error "Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."
|
||||||
|
to_remove.sort (a, b) -> b - a
|
||||||
|
for i in to_remove
|
||||||
|
@_listeners.splice i, 1
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
for c in op
|
||||||
|
match_path = if c.na == undefined then c.p[...c.p.length-1] else c.p
|
||||||
|
for {path, event, cb} in @_listeners
|
||||||
|
if pathEquals path, match_path
|
||||||
|
switch event
|
||||||
|
when 'insert'
|
||||||
|
if c.li != undefined and c.ld == undefined
|
||||||
|
cb(c.p[c.p.length-1], c.li)
|
||||||
|
else if c.oi != undefined and c.od == undefined
|
||||||
|
cb(c.p[c.p.length-1], c.oi)
|
||||||
|
else if c.si != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.si)
|
||||||
|
when 'delete'
|
||||||
|
if c.li == undefined and c.ld != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.ld)
|
||||||
|
else if c.oi == undefined and c.od != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.od)
|
||||||
|
else if c.sd != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.sd)
|
||||||
|
when 'replace'
|
||||||
|
if c.li != undefined and c.ld != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.ld, c.li)
|
||||||
|
else if c.oi != undefined and c.od != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.od, c.oi)
|
||||||
|
when 'move'
|
||||||
|
if c.lm != undefined
|
||||||
|
cb(c.p[c.p.length-1], c.lm)
|
||||||
|
when 'add'
|
||||||
|
if c.na != undefined
|
||||||
|
cb(c.na)
|
||||||
|
else if (common = @type.commonPath match_path, path)?
|
||||||
|
if event == 'child op'
|
||||||
|
if match_path.length == path.length == common
|
||||||
|
throw new Error "paths match length and have commonality, but aren't equal?"
|
||||||
|
child_path = c.p[common+1..]
|
||||||
|
cb(child_path, c)
|
441
services/document-updater/app/coffee/sharejs/types/json.coffee
Normal file
441
services/document-updater/app/coffee/sharejs/types/json.coffee
Normal file
|
@ -0,0 +1,441 @@
|
||||||
|
# This is the implementation of the JSON OT type.
|
||||||
|
#
|
||||||
|
# Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
text = exports.types.text
|
||||||
|
else
|
||||||
|
text = require './text'
|
||||||
|
|
||||||
|
json = {}
|
||||||
|
|
||||||
|
json.name = 'json'
|
||||||
|
|
||||||
|
json.create = -> null
|
||||||
|
|
||||||
|
json.invertComponent = (c) ->
|
||||||
|
c_ = {p: c.p}
|
||||||
|
c_.sd = c.si if c.si != undefined
|
||||||
|
c_.si = c.sd if c.sd != undefined
|
||||||
|
c_.od = c.oi if c.oi != undefined
|
||||||
|
c_.oi = c.od if c.od != undefined
|
||||||
|
c_.ld = c.li if c.li != undefined
|
||||||
|
c_.li = c.ld if c.ld != undefined
|
||||||
|
c_.na = -c.na if c.na != undefined
|
||||||
|
if c.lm != undefined
|
||||||
|
c_.lm = c.p[c.p.length-1]
|
||||||
|
c_.p = c.p[0...c.p.length - 1].concat([c.lm])
|
||||||
|
c_
|
||||||
|
|
||||||
|
json.invert = (op) -> json.invertComponent c for c in op.slice().reverse()
|
||||||
|
|
||||||
|
json.checkValidOp = (op) ->
|
||||||
|
|
||||||
|
isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]'
|
||||||
|
json.checkList = (elem) ->
|
||||||
|
throw new Error 'Referenced element not a list' unless isArray(elem)
|
||||||
|
|
||||||
|
json.checkObj = (elem) ->
|
||||||
|
throw new Error "Referenced element not an object (it was #{JSON.stringify elem})" unless elem.constructor is Object
|
||||||
|
|
||||||
|
json.apply = (snapshot, op) ->
|
||||||
|
json.checkValidOp op
|
||||||
|
op = clone op
|
||||||
|
|
||||||
|
container = {data: clone snapshot}
|
||||||
|
|
||||||
|
try
|
||||||
|
for c, i in op
|
||||||
|
parent = null
|
||||||
|
parentkey = null
|
||||||
|
elem = container
|
||||||
|
key = 'data'
|
||||||
|
|
||||||
|
for p in c.p
|
||||||
|
parent = elem
|
||||||
|
parentkey = key
|
||||||
|
elem = elem[key]
|
||||||
|
key = p
|
||||||
|
|
||||||
|
throw new Error 'Path invalid' unless parent?
|
||||||
|
|
||||||
|
if c.na != undefined
|
||||||
|
# Number add
|
||||||
|
throw new Error 'Referenced element not a number' unless typeof elem[key] is 'number'
|
||||||
|
elem[key] += c.na
|
||||||
|
|
||||||
|
else if c.si != undefined
|
||||||
|
# String insert
|
||||||
|
throw new Error "Referenced element not a string (it was #{JSON.stringify elem})" unless typeof elem is 'string'
|
||||||
|
parent[parentkey] = elem[...key] + c.si + elem[key..]
|
||||||
|
else if c.sd != undefined
|
||||||
|
# String delete
|
||||||
|
throw new Error 'Referenced element not a string' unless typeof elem is 'string'
|
||||||
|
throw new Error 'Deleted string does not match' unless elem[key...key + c.sd.length] == c.sd
|
||||||
|
parent[parentkey] = elem[...key] + elem[key + c.sd.length..]
|
||||||
|
|
||||||
|
else if c.li != undefined && c.ld != undefined
|
||||||
|
# List replace
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
# Should check the list element matches c.ld
|
||||||
|
elem[key] = c.li
|
||||||
|
else if c.li != undefined
|
||||||
|
# List insert
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
elem.splice key, 0, c.li
|
||||||
|
else if c.ld != undefined
|
||||||
|
# List delete
|
||||||
|
json.checkList elem
|
||||||
|
|
||||||
|
# Should check the list element matches c.ld here too.
|
||||||
|
elem.splice key, 1
|
||||||
|
else if c.lm != undefined
|
||||||
|
# List move
|
||||||
|
json.checkList elem
|
||||||
|
if c.lm != key
|
||||||
|
e = elem[key]
|
||||||
|
# Remove it...
|
||||||
|
elem.splice key, 1
|
||||||
|
# And insert it back.
|
||||||
|
elem.splice c.lm, 0, e
|
||||||
|
|
||||||
|
else if c.oi != undefined
|
||||||
|
# Object insert / replace
|
||||||
|
json.checkObj elem
|
||||||
|
|
||||||
|
# Should check that elem[key] == c.od
|
||||||
|
elem[key] = c.oi
|
||||||
|
else if c.od != undefined
|
||||||
|
# Object delete
|
||||||
|
json.checkObj elem
|
||||||
|
|
||||||
|
# Should check that elem[key] == c.od
|
||||||
|
delete elem[key]
|
||||||
|
else
|
||||||
|
throw new Error 'invalid / missing instruction in op'
|
||||||
|
catch error
|
||||||
|
# TODO: Roll back all already applied changes. Write tests before implementing this code.
|
||||||
|
throw error
|
||||||
|
|
||||||
|
container.data
|
||||||
|
|
||||||
|
# Checks if two paths, p1 and p2 match.
|
||||||
|
json.pathMatches = (p1, p2, ignoreLast) ->
|
||||||
|
return false unless p1.length == p2.length
|
||||||
|
|
||||||
|
for p, i in p1
|
||||||
|
return false if p != p2[i] and (!ignoreLast or i != p1.length - 1)
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
json.append = (dest, c) ->
|
||||||
|
c = clone c
|
||||||
|
if dest.length != 0 and json.pathMatches c.p, (last = dest[dest.length - 1]).p
|
||||||
|
if last.na != undefined and c.na != undefined
|
||||||
|
dest[dest.length - 1] = { p: last.p, na: last.na + c.na }
|
||||||
|
else if last.li != undefined and c.li == undefined and c.ld == last.li
|
||||||
|
# insert immediately followed by delete becomes a noop.
|
||||||
|
if last.ld != undefined
|
||||||
|
# leave the delete part of the replace
|
||||||
|
delete last.li
|
||||||
|
else
|
||||||
|
dest.pop()
|
||||||
|
else if last.od != undefined and last.oi == undefined and
|
||||||
|
c.oi != undefined and c.od == undefined
|
||||||
|
last.oi = c.oi
|
||||||
|
else if c.lm != undefined and c.p[c.p.length-1] == c.lm
|
||||||
|
null # don't do anything
|
||||||
|
else
|
||||||
|
dest.push c
|
||||||
|
else
|
||||||
|
dest.push c
|
||||||
|
|
||||||
|
json.compose = (op1, op2) ->
|
||||||
|
json.checkValidOp op1
|
||||||
|
json.checkValidOp op2
|
||||||
|
|
||||||
|
newOp = clone op1
|
||||||
|
json.append newOp, c for c in op2
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
json.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
op = [op] unless isArray op
|
||||||
|
|
||||||
|
for c in op
|
||||||
|
c.p ?= []
|
||||||
|
json.append newOp, c
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming
|
||||||
|
# we have browser support for JSON.
|
||||||
|
# http://jsperf.com/cloning-an-object/12
|
||||||
|
clone = (o) -> JSON.parse(JSON.stringify o)
|
||||||
|
|
||||||
|
json.commonPath = (p1, p2) ->
|
||||||
|
p1 = p1.slice()
|
||||||
|
p2 = p2.slice()
|
||||||
|
p1.unshift('data')
|
||||||
|
p2.unshift('data')
|
||||||
|
p1 = p1[...p1.length-1]
|
||||||
|
p2 = p2[...p2.length-1]
|
||||||
|
return -1 if p2.length == 0
|
||||||
|
i = 0
|
||||||
|
while p1[i] == p2[i] && i < p1.length
|
||||||
|
i++
|
||||||
|
if i == p2.length
|
||||||
|
return i-1
|
||||||
|
return
|
||||||
|
|
||||||
|
# transform c so it applies to a document with otherC applied.
|
||||||
|
json.transformComponent = (dest, c, otherC, type) ->
|
||||||
|
c = clone c
|
||||||
|
c.p.push(0) if c.na != undefined
|
||||||
|
otherC.p.push(0) if otherC.na != undefined
|
||||||
|
|
||||||
|
common = json.commonPath c.p, otherC.p
|
||||||
|
common2 = json.commonPath otherC.p, c.p
|
||||||
|
|
||||||
|
cplength = c.p.length
|
||||||
|
otherCplength = otherC.p.length
|
||||||
|
|
||||||
|
c.p.pop() if c.na != undefined # hax
|
||||||
|
otherC.p.pop() if otherC.na != undefined
|
||||||
|
|
||||||
|
if otherC.na
|
||||||
|
if common2? && otherCplength >= cplength && otherC.p[common2] == c.p[common2]
|
||||||
|
if c.ld != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.ld = json.apply clone(c.ld), [oc]
|
||||||
|
else if c.od != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.od = json.apply clone(c.od), [oc]
|
||||||
|
json.append dest, c
|
||||||
|
return dest
|
||||||
|
|
||||||
|
if common2? && otherCplength > cplength && c.p[common2] == otherC.p[common2]
|
||||||
|
# transform based on c
|
||||||
|
if c.ld != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.ld = json.apply clone(c.ld), [oc]
|
||||||
|
else if c.od != undefined
|
||||||
|
oc = clone otherC
|
||||||
|
oc.p = oc.p[cplength..]
|
||||||
|
c.od = json.apply clone(c.od), [oc]
|
||||||
|
|
||||||
|
|
||||||
|
if common?
|
||||||
|
commonOperand = cplength == otherCplength
|
||||||
|
# transform based on otherC
|
||||||
|
if otherC.na != undefined
|
||||||
|
# this case is handled above due to icky path hax
|
||||||
|
else if otherC.si != undefined || otherC.sd != undefined
|
||||||
|
# String op vs string op - pass through to text type
|
||||||
|
if c.si != undefined || c.sd != undefined
|
||||||
|
throw new Error("must be a string?") unless commonOperand
|
||||||
|
|
||||||
|
# Convert an op component to a text op component
|
||||||
|
convert = (component) ->
|
||||||
|
newC = p:component.p[component.p.length - 1]
|
||||||
|
if component.si
|
||||||
|
newC.i = component.si
|
||||||
|
else
|
||||||
|
newC.d = component.sd
|
||||||
|
newC
|
||||||
|
|
||||||
|
tc1 = convert c
|
||||||
|
tc2 = convert otherC
|
||||||
|
|
||||||
|
res = []
|
||||||
|
text._tc res, tc1, tc2, type
|
||||||
|
for tc in res
|
||||||
|
jc = { p: c.p[...common] }
|
||||||
|
jc.p.push(tc.p)
|
||||||
|
jc.si = tc.i if tc.i?
|
||||||
|
jc.sd = tc.d if tc.d?
|
||||||
|
json.append dest, jc
|
||||||
|
return dest
|
||||||
|
else if otherC.li != undefined && otherC.ld != undefined
|
||||||
|
if otherC.p[common] == c.p[common]
|
||||||
|
# noop
|
||||||
|
if !commonOperand
|
||||||
|
# we're below the deleted element, so -> noop
|
||||||
|
return dest
|
||||||
|
else if c.ld != undefined
|
||||||
|
# we're trying to delete the same element, -> noop
|
||||||
|
if c.li != undefined and type == 'left'
|
||||||
|
# we're both replacing one element with another. only one can
|
||||||
|
# survive!
|
||||||
|
c.ld = clone otherC.li
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else if otherC.li != undefined
|
||||||
|
if c.li != undefined and c.ld == undefined and commonOperand and c.p[common] == otherC.p[common]
|
||||||
|
# in li vs. li, left wins.
|
||||||
|
if type == 'right'
|
||||||
|
c.p[common]++
|
||||||
|
else if otherC.p[common] <= c.p[common]
|
||||||
|
c.p[common]++
|
||||||
|
|
||||||
|
if c.lm != undefined
|
||||||
|
if commonOperand
|
||||||
|
# otherC edits the same list we edit
|
||||||
|
if otherC.p[common] <= c.lm
|
||||||
|
c.lm++
|
||||||
|
# changing c.from is handled above.
|
||||||
|
else if otherC.ld != undefined
|
||||||
|
if c.lm != undefined
|
||||||
|
if commonOperand
|
||||||
|
if otherC.p[common] == c.p[common]
|
||||||
|
# they deleted the thing we're trying to move
|
||||||
|
return dest
|
||||||
|
# otherC edits the same list we edit
|
||||||
|
p = otherC.p[common]
|
||||||
|
from = c.p[common]
|
||||||
|
to = c.lm
|
||||||
|
if p < to || (p == to && from < to)
|
||||||
|
c.lm--
|
||||||
|
|
||||||
|
if otherC.p[common] < c.p[common]
|
||||||
|
c.p[common]--
|
||||||
|
else if otherC.p[common] == c.p[common]
|
||||||
|
if otherCplength < cplength
|
||||||
|
# we're below the deleted element, so -> noop
|
||||||
|
return dest
|
||||||
|
else if c.ld != undefined
|
||||||
|
if c.li != undefined
|
||||||
|
# we're replacing, they're deleting. we become an insert.
|
||||||
|
delete c.ld
|
||||||
|
else
|
||||||
|
# we're trying to delete the same element, -> noop
|
||||||
|
return dest
|
||||||
|
else if otherC.lm != undefined
|
||||||
|
if c.lm != undefined and cplength == otherCplength
|
||||||
|
# lm vs lm, here we go!
|
||||||
|
from = c.p[common]
|
||||||
|
to = c.lm
|
||||||
|
otherFrom = otherC.p[common]
|
||||||
|
otherTo = otherC.lm
|
||||||
|
if otherFrom != otherTo
|
||||||
|
# if otherFrom == otherTo, we don't need to change our op.
|
||||||
|
|
||||||
|
# where did my thing go?
|
||||||
|
if from == otherFrom
|
||||||
|
# they moved it! tie break.
|
||||||
|
if type == 'left'
|
||||||
|
c.p[common] = otherTo
|
||||||
|
if from == to # ugh
|
||||||
|
c.lm = otherTo
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else
|
||||||
|
# they moved around it
|
||||||
|
if from > otherFrom
|
||||||
|
c.p[common]--
|
||||||
|
if from > otherTo
|
||||||
|
c.p[common]++
|
||||||
|
else if from == otherTo
|
||||||
|
if otherFrom > otherTo
|
||||||
|
c.p[common]++
|
||||||
|
if from == to # ugh, again
|
||||||
|
c.lm++
|
||||||
|
|
||||||
|
# step 2: where am i going to put it?
|
||||||
|
if to > otherFrom
|
||||||
|
c.lm--
|
||||||
|
else if to == otherFrom
|
||||||
|
if to > from
|
||||||
|
c.lm--
|
||||||
|
if to > otherTo
|
||||||
|
c.lm++
|
||||||
|
else if to == otherTo
|
||||||
|
# if we're both moving in the same direction, tie break
|
||||||
|
if (otherTo > otherFrom and to > from) or
|
||||||
|
(otherTo < otherFrom and to < from)
|
||||||
|
if type == 'right'
|
||||||
|
c.lm++
|
||||||
|
else
|
||||||
|
if to > from
|
||||||
|
c.lm++
|
||||||
|
else if to == otherFrom
|
||||||
|
c.lm--
|
||||||
|
else if c.li != undefined and c.ld == undefined and commonOperand
|
||||||
|
# li
|
||||||
|
from = otherC.p[common]
|
||||||
|
to = otherC.lm
|
||||||
|
p = c.p[common]
|
||||||
|
if p > from
|
||||||
|
c.p[common]--
|
||||||
|
if p > to
|
||||||
|
c.p[common]++
|
||||||
|
else
|
||||||
|
# ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath
|
||||||
|
# the lm
|
||||||
|
#
|
||||||
|
# i.e. things care about where their item is after the move.
|
||||||
|
from = otherC.p[common]
|
||||||
|
to = otherC.lm
|
||||||
|
p = c.p[common]
|
||||||
|
if p == from
|
||||||
|
c.p[common] = to
|
||||||
|
else
|
||||||
|
if p > from
|
||||||
|
c.p[common]--
|
||||||
|
if p > to
|
||||||
|
c.p[common]++
|
||||||
|
else if p == to
|
||||||
|
if from > to
|
||||||
|
c.p[common]++
|
||||||
|
else if otherC.oi != undefined && otherC.od != undefined
|
||||||
|
if c.p[common] == otherC.p[common]
|
||||||
|
if c.oi != undefined and commonOperand
|
||||||
|
# we inserted where someone else replaced
|
||||||
|
if type == 'right'
|
||||||
|
# left wins
|
||||||
|
return dest
|
||||||
|
else
|
||||||
|
# we win, make our op replace what they inserted
|
||||||
|
c.od = otherC.oi
|
||||||
|
else
|
||||||
|
# -> noop if the other component is deleting the same object (or any
|
||||||
|
# parent)
|
||||||
|
return dest
|
||||||
|
else if otherC.oi != undefined
|
||||||
|
if c.oi != undefined and c.p[common] == otherC.p[common]
|
||||||
|
# left wins if we try to insert at the same place
|
||||||
|
if type == 'left'
|
||||||
|
json.append dest, {p:c.p, od:otherC.oi}
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
else if otherC.od != undefined
|
||||||
|
if c.p[common] == otherC.p[common]
|
||||||
|
return dest if !commonOperand
|
||||||
|
if c.oi != undefined
|
||||||
|
delete c.od
|
||||||
|
else
|
||||||
|
return dest
|
||||||
|
|
||||||
|
json.append dest, c
|
||||||
|
return dest
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types ||= {}
|
||||||
|
|
||||||
|
# This is kind of awful - come up with a better way to hook this helper code up.
|
||||||
|
exports._bt(json, json.transformComponent, json.checkValidOp, json.append)
|
||||||
|
|
||||||
|
# [] is used to prevent closure from renaming types.text
|
||||||
|
exports.types.json = json
|
||||||
|
else
|
||||||
|
module.exports = json
|
||||||
|
|
||||||
|
require('./helpers').bootstrapTransform(json, json.transformComponent, json.checkValidOp, json.append)
|
||||||
|
|
603
services/document-updater/app/coffee/sharejs/types/model.coffee
Normal file
603
services/document-updater/app/coffee/sharejs/types/model.coffee
Normal file
|
@ -0,0 +1,603 @@
|
||||||
|
# The model of all the ops. Responsible for applying & transforming remote deltas
|
||||||
|
# and managing the storage layer.
|
||||||
|
#
|
||||||
|
# Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||||
|
|
||||||
|
{EventEmitter} = require 'events'
|
||||||
|
|
||||||
|
queue = require './syncqueue'
|
||||||
|
types = require '../types'
|
||||||
|
|
||||||
|
isArray = (o) -> Object.prototype.toString.call(o) == '[object Array]'
|
||||||
|
|
||||||
|
# This constructor creates a new Model object. There will be one model object
|
||||||
|
# per server context.
|
||||||
|
#
|
||||||
|
# The model object is responsible for a lot of things:
|
||||||
|
#
|
||||||
|
# - It manages the interactions with the database
|
||||||
|
# - It maintains (in memory) a set of all active documents
|
||||||
|
# - It calls out to the OT functions when necessary
|
||||||
|
#
|
||||||
|
# The model is an event emitter. It emits the following events:
|
||||||
|
#
|
||||||
|
# create(docName, data): A document has been created with the specified name & data
|
||||||
|
module.exports = Model = (db, options) ->
|
||||||
|
# db can be null if the user doesn't want persistance.
|
||||||
|
|
||||||
|
return new Model(db, options) if !(this instanceof Model)
|
||||||
|
|
||||||
|
model = this
|
||||||
|
|
||||||
|
options ?= {}
|
||||||
|
|
||||||
|
# This is a cache of 'live' documents.
|
||||||
|
#
|
||||||
|
# The cache is a map from docName -> {
|
||||||
|
# ops:[{op, meta}]
|
||||||
|
# snapshot
|
||||||
|
# type
|
||||||
|
# v
|
||||||
|
# meta
|
||||||
|
# eventEmitter
|
||||||
|
# reapTimer
|
||||||
|
# committedVersion: v
|
||||||
|
# snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||||
|
# dbMeta: database specific data
|
||||||
|
# opQueue: syncQueue for processing ops
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||||
|
# of them if we're using a memory store).
|
||||||
|
#
|
||||||
|
# Documents are stored in this set so long as the document has been accessed in
|
||||||
|
# the last few seconds (options.reapTime) OR at least one client has the document
|
||||||
|
# open. I don't know if I should keep open (but not being edited) documents live -
|
||||||
|
# maybe if a client has a document open but the document isn't being edited, I should
|
||||||
|
# flush it from the cache.
|
||||||
|
#
|
||||||
|
# In any case, the API to model is designed such that if we want to change that later
|
||||||
|
# it should be pretty easy to do so without any external-to-the-model code changes.
|
||||||
|
docs = {}
|
||||||
|
|
||||||
|
# This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||||
|
# cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||||
|
# are added to the callback list and called when db.getSnapshot() returns.
|
||||||
|
#
|
||||||
|
# callback(error, snapshot data)
|
||||||
|
awaitingGetSnapshot = {}
|
||||||
|
|
||||||
|
# The time that documents which no clients have open will stay in the cache.
|
||||||
|
# Should be > 0.
|
||||||
|
options.reapTime ?= 3000
|
||||||
|
|
||||||
|
# The number of operations the cache holds before reusing the space
|
||||||
|
options.numCachedOps ?= 10
|
||||||
|
|
||||||
|
# This option forces documents to be reaped, even when there's no database backend.
|
||||||
|
# This is useful when you don't care about persistance and don't want to gradually
|
||||||
|
# fill memory.
|
||||||
|
#
|
||||||
|
# You might want to set reapTime to a day or something.
|
||||||
|
options.forceReaping ?= false
|
||||||
|
|
||||||
|
# Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||||
|
# to the database every ~20 submitted ops.
|
||||||
|
options.opsBeforeCommit ?= 20
|
||||||
|
|
||||||
|
# It takes some processing time to transform client ops. The server will punt ops back to the
|
||||||
|
# client to transform if they're too old.
|
||||||
|
options.maximumAge ?= 40
|
||||||
|
|
||||||
|
# **** Cache API methods
|
||||||
|
|
||||||
|
# Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||||
|
# for a single document. This contains the logic for transforming & applying ops.
|
||||||
|
makeOpQueue = (docName, doc) -> queue (opData, callback) ->
|
||||||
|
return callback 'Version missing' unless opData.v >= 0
|
||||||
|
return callback 'Op at future version' if opData.v > doc.v
|
||||||
|
|
||||||
|
# Punt the transforming work back to the client if the op is too old.
|
||||||
|
return callback 'Op too old' if opData.v + options.maximumAge < doc.v
|
||||||
|
|
||||||
|
opData.meta ||= {}
|
||||||
|
opData.meta.ts = Date.now()
|
||||||
|
|
||||||
|
# We'll need to transform the op to the current version of the document. This
|
||||||
|
# calls the callback immediately if opVersion == doc.v.
|
||||||
|
getOps docName, opData.v, doc.v, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
unless doc.v - opData.v == ops.length
|
||||||
|
# This should never happen. It indicates that we didn't get all the ops we
|
||||||
|
# asked for. Its important that the submitted op is correctly transformed.
|
||||||
|
console.error "Could not get old ops in model for document #{docName}"
|
||||||
|
console.error "Expected ops #{opData.v} to #{doc.v} and got #{ops.length} ops"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
try
|
||||||
|
# If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||||
|
for oldOp in ops
|
||||||
|
# Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||||
|
# If the id matches, we reject it. The client can also detect the op has been submitted
|
||||||
|
# already if it sees its own previous id in the ops it sees when it does catchup.
|
||||||
|
if oldOp.meta.source and opData.dupIfSource and oldOp.meta.source in opData.dupIfSource
|
||||||
|
return callback 'Op already submitted'
|
||||||
|
|
||||||
|
opData.op = doc.type.transform opData.op, oldOp.op, 'left'
|
||||||
|
opData.v++
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
try
|
||||||
|
snapshot = doc.type.apply doc.snapshot, opData.op
|
||||||
|
catch error
|
||||||
|
console.error error.stack
|
||||||
|
return callback error.message
|
||||||
|
|
||||||
|
# The op data should be at the current version, and the new document data should be at
|
||||||
|
# the next version.
|
||||||
|
#
|
||||||
|
# This should never happen in practice, but its a nice little check to make sure everything
|
||||||
|
# is hunky-dory.
|
||||||
|
unless opData.v == doc.v
|
||||||
|
# This should never happen.
|
||||||
|
console.error "Version mismatch detected in model. File a ticket - this is a bug."
|
||||||
|
console.error "Expecting #{opData.v} == #{doc.v}"
|
||||||
|
return callback 'Internal error'
|
||||||
|
|
||||||
|
#newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||||
|
writeOp = db?.writeOp or (docName, newOpData, callback) -> callback()
|
||||||
|
|
||||||
|
writeOp docName, opData, (error) ->
|
||||||
|
if error
|
||||||
|
# The user should probably know about this.
|
||||||
|
console.warn "Error writing ops to database: #{error}"
|
||||||
|
return callback error
|
||||||
|
|
||||||
|
options.stats?.writeOp?()
|
||||||
|
|
||||||
|
# This is needed when we emit the 'change' event, below.
|
||||||
|
oldSnapshot = doc.snapshot
|
||||||
|
|
||||||
|
# All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||||
|
# and (maybe!) save a new document snapshot to the database.
|
||||||
|
|
||||||
|
doc.v = opData.v + 1
|
||||||
|
doc.snapshot = snapshot
|
||||||
|
|
||||||
|
doc.ops.push opData
|
||||||
|
doc.ops.shift() if db and doc.ops.length > options.numCachedOps
|
||||||
|
|
||||||
|
model.emit 'applyOp', docName, opData, snapshot, oldSnapshot
|
||||||
|
doc.eventEmitter.emit 'op', opData, snapshot, oldSnapshot
|
||||||
|
|
||||||
|
# The callback is called with the version of the document at which the op was applied.
|
||||||
|
# This is the op.v after transformation, and its doc.v - 1.
|
||||||
|
callback null, opData.v
|
||||||
|
|
||||||
|
# I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||||
|
#
|
||||||
|
# The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||||
|
# is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||||
|
# ops or something. (Configurable with doc.committedVersion)
|
||||||
|
if !doc.snapshotWriteLock and doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||||
|
tryWriteSnapshot docName, (error) ->
|
||||||
|
console.warn "Error writing snapshot #{error}. This is nonfatal" if error
|
||||||
|
|
||||||
|
# Add the data for the given docName to the cache. The named document shouldn't already
|
||||||
|
# exist in the doc set.
|
||||||
|
#
|
||||||
|
# Returns the new doc.
|
||||||
|
add = (docName, error, data, committedVersion, ops, dbMeta) ->
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
delete awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
if error
|
||||||
|
callback error for callback in callbacks if callbacks
|
||||||
|
else
|
||||||
|
doc = docs[docName] =
|
||||||
|
snapshot: data.snapshot
|
||||||
|
v: data.v
|
||||||
|
type: data.type
|
||||||
|
meta: data.meta
|
||||||
|
|
||||||
|
# Cache of ops
|
||||||
|
ops: ops or []
|
||||||
|
|
||||||
|
eventEmitter: new EventEmitter
|
||||||
|
|
||||||
|
# Timer before the document will be invalidated from the cache (if the document has no
|
||||||
|
# listeners)
|
||||||
|
reapTimer: null
|
||||||
|
|
||||||
|
# Version of the snapshot thats in the database
|
||||||
|
committedVersion: committedVersion ? data.v
|
||||||
|
snapshotWriteLock: false
|
||||||
|
dbMeta: dbMeta
|
||||||
|
|
||||||
|
doc.opQueue = makeOpQueue docName, doc
|
||||||
|
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
model.emit 'add', docName, data
|
||||||
|
callback null, doc for callback in callbacks if callbacks
|
||||||
|
|
||||||
|
doc
|
||||||
|
|
||||||
|
# This is a little helper wrapper around db.getOps. It does two things:
|
||||||
|
#
|
||||||
|
# - If there's no database set, it returns an error to the callback
|
||||||
|
# - It adds version numbers to each op returned from the database
|
||||||
|
# (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||||
|
getOpsInternal = (docName, start, end, callback) ->
|
||||||
|
return callback? 'Document does not exist' unless db
|
||||||
|
|
||||||
|
db.getOps docName, start, end, (error, ops) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
v = start
|
||||||
|
op.v = v++ for op in ops
|
||||||
|
|
||||||
|
callback? null, ops
|
||||||
|
|
||||||
|
# Load the named document into the cache. This function is re-entrant.
|
||||||
|
#
|
||||||
|
# The callback is called with (error, doc)
|
||||||
|
load = (docName, callback) ->
|
||||||
|
if docs[docName]
|
||||||
|
# The document is already loaded. Return immediately.
|
||||||
|
options.stats?.cacheHit? 'getSnapshot'
|
||||||
|
return callback null, docs[docName]
|
||||||
|
|
||||||
|
# We're a memory store. If we don't have it, nobody does.
|
||||||
|
return callback 'Document does not exist' unless db
|
||||||
|
|
||||||
|
callbacks = awaitingGetSnapshot[docName]
|
||||||
|
|
||||||
|
# The document is being loaded already. Add ourselves as a callback.
|
||||||
|
return callbacks.push callback if callbacks
|
||||||
|
|
||||||
|
options.stats?.cacheMiss? 'getSnapshot'
|
||||||
|
|
||||||
|
# The document isn't loaded and isn't being loaded. Load it.
|
||||||
|
awaitingGetSnapshot[docName] = [callback]
|
||||||
|
db.getSnapshot docName, (error, data, dbMeta) ->
|
||||||
|
return add docName, error if error
|
||||||
|
|
||||||
|
type = types[data.type]
|
||||||
|
unless type
|
||||||
|
console.warn "Type '#{data.type}' missing"
|
||||||
|
return callback "Type not found"
|
||||||
|
data.type = type
|
||||||
|
|
||||||
|
committedVersion = data.v
|
||||||
|
|
||||||
|
# The server can close without saving the most recent document snapshot.
|
||||||
|
# In this case, there are extra ops which need to be applied before
|
||||||
|
# returning the snapshot.
|
||||||
|
getOpsInternal docName, data.v, null, (error, ops) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
if ops.length > 0
|
||||||
|
console.log "Catchup #{docName} #{data.v} -> #{data.v + ops.length}"
|
||||||
|
|
||||||
|
try
|
||||||
|
for op in ops
|
||||||
|
data.snapshot = type.apply data.snapshot, op.op
|
||||||
|
data.v++
|
||||||
|
catch e
|
||||||
|
# This should never happen - it indicates that whats in the
|
||||||
|
# database is invalid.
|
||||||
|
console.error "Op data invalid for #{docName}: #{e.stack}"
|
||||||
|
return callback 'Op data invalid'
|
||||||
|
|
||||||
|
model.emit 'load', docName, data
|
||||||
|
add docName, error, data, committedVersion, ops, dbMeta
|
||||||
|
|
||||||
|
# This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||||
|
# a document, it is loaded from the database and stored.
|
||||||
|
#
|
||||||
|
# Documents are stored so long as either:
|
||||||
|
# - They have been accessed within the past #{PERIOD}
|
||||||
|
# - At least one client has the document open
|
||||||
|
refreshReapingTimeout = (docName) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
return unless doc
|
||||||
|
|
||||||
|
# I want to let the clients list be updated before this is called.
|
||||||
|
process.nextTick ->
|
||||||
|
# This is an awkward way to find out the number of clients on a document. If this
|
||||||
|
# causes performance issues, add a numClients field to the document.
|
||||||
|
#
|
||||||
|
# The first check is because its possible that between refreshReapingTimeout being called and this
|
||||||
|
# event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||||
|
if doc == docs[docName] and
|
||||||
|
doc.eventEmitter.listeners('op').length == 0 and
|
||||||
|
(db or options.forceReaping) and
|
||||||
|
doc.opQueue.busy is false
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
doc.reapTimer = reapTimer = setTimeout ->
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
# If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||||
|
# in the middle of applying an operation, don't reap.
|
||||||
|
delete docs[docName] if docs[docName].reapTimer is reapTimer and doc.opQueue.busy is false
|
||||||
|
, options.reapTime
|
||||||
|
|
||||||
|
tryWriteSnapshot = (docName, callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
# The doc is closed
|
||||||
|
return callback?() unless doc
|
||||||
|
|
||||||
|
# The document is already saved.
|
||||||
|
return callback?() if doc.committedVersion is doc.v
|
||||||
|
|
||||||
|
return callback? 'Another snapshot write is in progress' if doc.snapshotWriteLock
|
||||||
|
|
||||||
|
doc.snapshotWriteLock = true
|
||||||
|
|
||||||
|
options.stats?.writeSnapshot?()
|
||||||
|
|
||||||
|
writeSnapshot = db?.writeSnapshot or (docName, docData, dbMeta, callback) -> callback()
|
||||||
|
|
||||||
|
data =
|
||||||
|
v: doc.v
|
||||||
|
meta: doc.meta
|
||||||
|
snapshot: doc.snapshot
|
||||||
|
# The database doesn't know about object types.
|
||||||
|
type: doc.type.name
|
||||||
|
|
||||||
|
# Commit snapshot.
|
||||||
|
writeSnapshot docName, data, doc.dbMeta, (error, dbMeta) ->
|
||||||
|
doc.snapshotWriteLock = false
|
||||||
|
|
||||||
|
# We have to use data.v here because the version in the doc could
|
||||||
|
# have been updated between the call to writeSnapshot() and now.
|
||||||
|
doc.committedVersion = data.v
|
||||||
|
doc.dbMeta = dbMeta
|
||||||
|
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
# *** Model interface methods
|
||||||
|
|
||||||
|
# Create a new document.
|
||||||
|
#
|
||||||
|
# data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||||
|
@create = (docName, type, meta, callback) ->
|
||||||
|
[meta, callback] = [{}, meta] if typeof meta is 'function'
|
||||||
|
|
||||||
|
return callback? 'Invalid document name' if docName.match /\//
|
||||||
|
return callback? 'Document already exists' if docs[docName]
|
||||||
|
|
||||||
|
type = types[type] if typeof type == 'string'
|
||||||
|
return callback? 'Type not found' unless type
|
||||||
|
|
||||||
|
data =
|
||||||
|
snapshot:type.create()
|
||||||
|
type:type.name
|
||||||
|
meta:meta or {}
|
||||||
|
v:0
|
||||||
|
|
||||||
|
done = (error, dbMeta) ->
|
||||||
|
# dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
# From here on we'll store the object version of the type name.
|
||||||
|
data.type = type
|
||||||
|
add docName, null, data, 0, [], dbMeta
|
||||||
|
model.emit 'create', docName, data
|
||||||
|
callback?()
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.create docName, data, done
|
||||||
|
else
|
||||||
|
done()
|
||||||
|
|
||||||
|
# Perminantly deletes the specified document.
|
||||||
|
# If listeners are attached, they are removed.
|
||||||
|
#
|
||||||
|
# The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||||
|
# document was deleted.
|
||||||
|
#
|
||||||
|
# WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||||
|
# deletion. Subsequent op submissions will fail).
|
||||||
|
@delete = (docName, callback) ->
|
||||||
|
doc = docs[docName]
|
||||||
|
|
||||||
|
if doc
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
delete docs[docName]
|
||||||
|
|
||||||
|
done = (error) ->
|
||||||
|
model.emit 'delete', docName unless error
|
||||||
|
callback? error
|
||||||
|
|
||||||
|
if db
|
||||||
|
db.delete docName, doc?.dbMeta, done
|
||||||
|
else
|
||||||
|
done (if !doc then 'Document does not exist')
|
||||||
|
|
||||||
|
# This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||||
|
#
|
||||||
|
# end can be null. This means 'get me all ops from start'.
|
||||||
|
#
|
||||||
|
# Each op returned is in the form {op:o, meta:m, v:version}.
|
||||||
|
#
|
||||||
|
# Callback is called with (error, [ops])
|
||||||
|
#
|
||||||
|
# If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||||
|
# its awkward to figure out whether or not the document exists for things
|
||||||
|
# like the redis database backend. I guess its a bit gross having this inconsistant
|
||||||
|
# with the other DB calls, but its certainly convenient.
|
||||||
|
#
|
||||||
|
# Use getVersion() to determine if a document actually exists, if thats what you're
|
||||||
|
# after.
|
||||||
|
@getOps = getOps = (docName, start, end, callback) ->
|
||||||
|
# getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||||
|
throw new Error 'start must be 0+' unless start >= 0
|
||||||
|
|
||||||
|
[end, callback] = [null, end] if typeof end is 'function'
|
||||||
|
|
||||||
|
ops = docs[docName]?.ops
|
||||||
|
|
||||||
|
if ops
|
||||||
|
version = docs[docName].v
|
||||||
|
|
||||||
|
# Ops contains an array of ops. The last op in the list is the last op applied
|
||||||
|
end ?= version
|
||||||
|
start = Math.min start, end
|
||||||
|
|
||||||
|
return callback null, [] if start == end
|
||||||
|
|
||||||
|
# Base is the version number of the oldest op we have cached
|
||||||
|
base = version - ops.length
|
||||||
|
|
||||||
|
# If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||||
|
if start >= base or db is null
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
options.stats?.cacheHit 'getOps'
|
||||||
|
|
||||||
|
return callback null, ops[(start - base)...(end - base)]
|
||||||
|
|
||||||
|
options.stats?.cacheMiss 'getOps'
|
||||||
|
|
||||||
|
getOpsInternal docName, start, end, callback
|
||||||
|
|
||||||
|
# Gets the snapshot data for the specified document.
|
||||||
|
# getSnapshot(docName, callback)
|
||||||
|
# Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||||
|
@getSnapshot = (docName, callback) ->
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
callback error, if doc then {v:doc.v, type:doc.type, snapshot:doc.snapshot, meta:doc.meta}
|
||||||
|
|
||||||
|
# Gets the latest version # of the document.
|
||||||
|
# getVersion(docName, callback)
|
||||||
|
# callback is called with (error, version).
|
||||||
|
@getVersion = (docName, callback) ->
|
||||||
|
load docName, (error, doc) -> callback error, doc?.v
|
||||||
|
|
||||||
|
# Apply an op to the specified document.
|
||||||
|
# The callback is passed (error, applied version #)
|
||||||
|
# opData = {op:op, v:v, meta:metadata}
|
||||||
|
#
|
||||||
|
# Ops are queued before being applied so that the following code applies op C before op B:
|
||||||
|
# model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||||
|
# model.applyOp 'doc', OPC
|
||||||
|
@applyOp = (docName, opData, callback) ->
|
||||||
|
# All the logic for this is in makeOpQueue, above.
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback error if error
|
||||||
|
|
||||||
|
process.nextTick -> doc.opQueue opData, (error, newVersion) ->
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
callback? error, newVersion
|
||||||
|
|
||||||
|
# TODO: store (some) metadata in DB
|
||||||
|
# TODO: op and meta should be combineable in the op that gets sent
|
||||||
|
@applyMetaOp = (docName, metaOpData, callback) ->
|
||||||
|
{path, value} = metaOpData.meta
|
||||||
|
|
||||||
|
return callback? "path should be an array" unless isArray path
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
if error?
|
||||||
|
callback? error
|
||||||
|
else
|
||||||
|
applied = false
|
||||||
|
switch path[0]
|
||||||
|
when 'shout'
|
||||||
|
doc.eventEmitter.emit 'op', metaOpData
|
||||||
|
applied = true
|
||||||
|
|
||||||
|
model.emit 'applyMetaOp', docName, path, value if applied
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Listen to all ops from the specified version. If version is in the past, all
|
||||||
|
# ops since that version are sent immediately to the listener.
|
||||||
|
#
|
||||||
|
# The callback is called once the listener is attached, but before any ops have been passed
|
||||||
|
# to the listener.
|
||||||
|
#
|
||||||
|
# This will _not_ edit the document metadata.
|
||||||
|
#
|
||||||
|
# If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||||
|
# might change in a future version.
|
||||||
|
#
|
||||||
|
# version is the document version at which the document is opened. It can be left out if you want to open
|
||||||
|
# the document at the most recent version.
|
||||||
|
#
|
||||||
|
# listener is called with (opData) each time an op is applied.
|
||||||
|
#
|
||||||
|
# callback(error, openedVersion)
|
||||||
|
@listen = (docName, version, listener, callback) ->
|
||||||
|
[version, listener, callback] = [null, version, listener] if typeof version is 'function'
|
||||||
|
|
||||||
|
load docName, (error, doc) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
clearTimeout doc.reapTimer
|
||||||
|
|
||||||
|
if version?
|
||||||
|
getOps docName, version, null, (error, data) ->
|
||||||
|
return callback? error if error
|
||||||
|
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, version
|
||||||
|
for op in data
|
||||||
|
listener op
|
||||||
|
|
||||||
|
# The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||||
|
# This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||||
|
break unless listener in doc.eventEmitter.listeners 'op'
|
||||||
|
|
||||||
|
else # Version is null / undefined. Just add the listener.
|
||||||
|
doc.eventEmitter.on 'op', listener
|
||||||
|
callback? null, doc.v
|
||||||
|
|
||||||
|
# Remove a listener for a particular document.
|
||||||
|
#
|
||||||
|
# removeListener(docName, listener)
|
||||||
|
#
|
||||||
|
# This is synchronous.
|
||||||
|
@removeListener = (docName, listener) ->
|
||||||
|
# The document should already be loaded.
|
||||||
|
doc = docs[docName]
|
||||||
|
throw new Error 'removeListener called but document not loaded' unless doc
|
||||||
|
|
||||||
|
doc.eventEmitter.removeListener 'op', listener
|
||||||
|
refreshReapingTimeout docName
|
||||||
|
|
||||||
|
# Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||||
|
# sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||||
|
@flush = (callback) ->
|
||||||
|
return callback?() unless db
|
||||||
|
|
||||||
|
pendingWrites = 0
|
||||||
|
|
||||||
|
for docName, doc of docs
|
||||||
|
if doc.committedVersion < doc.v
|
||||||
|
pendingWrites++
|
||||||
|
# I'm hoping writeSnapshot will always happen in another thread.
|
||||||
|
tryWriteSnapshot docName, ->
|
||||||
|
process.nextTick ->
|
||||||
|
pendingWrites--
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# If nothing was queued, terminate immediately.
|
||||||
|
callback?() if pendingWrites is 0
|
||||||
|
|
||||||
|
# Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||||
|
@closeDb = ->
|
||||||
|
db?.close?()
|
||||||
|
db = null
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# Model inherits from EventEmitter.
|
||||||
|
Model:: = new EventEmitter
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
# This is a really simple OT type. Its not compiled with the web client, but it could be.
|
||||||
|
#
|
||||||
|
# Its mostly included for demonstration purposes and its used in a lot of unit tests.
|
||||||
|
#
|
||||||
|
# This defines a really simple text OT type which only allows inserts. (No deletes).
|
||||||
|
#
|
||||||
|
# Ops look like:
|
||||||
|
# {position:#, text:"asdf"}
|
||||||
|
#
|
||||||
|
# Document snapshots look like:
|
||||||
|
# {str:string}
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
# The name of the OT type. The type is stored in types[type.name]. The name can be
|
||||||
|
# used in place of the actual type in all the API methods.
|
||||||
|
name: 'simple'
|
||||||
|
|
||||||
|
# Create a new document snapshot
|
||||||
|
create: -> {str:""}
|
||||||
|
|
||||||
|
# Apply the given op to the document snapshot. Returns the new snapshot.
|
||||||
|
#
|
||||||
|
# The original snapshot should not be modified.
|
||||||
|
apply: (snapshot, op) ->
|
||||||
|
throw new Error 'Invalid position' unless 0 <= op.position <= snapshot.str.length
|
||||||
|
|
||||||
|
str = snapshot.str
|
||||||
|
str = str.slice(0, op.position) + op.text + str.slice(op.position)
|
||||||
|
{str}
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the
|
||||||
|
# op being transformed comes from the client or the server.
|
||||||
|
transform: (op1, op2, sym) ->
|
||||||
|
pos = op1.position
|
||||||
|
pos += op2.text.length if op2.position < pos or (op2.position == pos and sym is 'left')
|
||||||
|
|
||||||
|
return {position:pos, text:op1.text}
|
|
@ -0,0 +1,42 @@
|
||||||
|
# A synchronous processing queue. The queue calls process on the arguments,
|
||||||
|
# ensuring that process() is only executing once at a time.
|
||||||
|
#
|
||||||
|
# process(data, callback) _MUST_ eventually call its callback.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# queue = require 'syncqueue'
|
||||||
|
#
|
||||||
|
# fn = queue (data, callback) ->
|
||||||
|
# asyncthing data, ->
|
||||||
|
# callback(321)
|
||||||
|
#
|
||||||
|
# fn(1)
|
||||||
|
# fn(2)
|
||||||
|
# fn(3, (result) -> console.log(result))
|
||||||
|
#
|
||||||
|
# ^--- async thing will only be running once at any time.
|
||||||
|
|
||||||
|
module.exports = (process) ->
|
||||||
|
throw new Error('process is not a function') unless typeof process == 'function'
|
||||||
|
queue = []
|
||||||
|
|
||||||
|
enqueue = (data, callback) ->
|
||||||
|
queue.push [data, callback]
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue.busy = false
|
||||||
|
|
||||||
|
flush = ->
|
||||||
|
return if enqueue.busy or queue.length == 0
|
||||||
|
|
||||||
|
enqueue.busy = true
|
||||||
|
[data, callback] = queue.shift()
|
||||||
|
process data, (result...) -> # TODO: Make this not use varargs - varargs are really slow.
|
||||||
|
enqueue.busy = false
|
||||||
|
# This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||||
|
callback.apply null, result if callback
|
||||||
|
flush()
|
||||||
|
|
||||||
|
enqueue
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Text document API for text
|
||||||
|
|
||||||
|
text = require './text' if typeof WEB is 'undefined'
|
||||||
|
|
||||||
|
text.api =
|
||||||
|
provides: {text:true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
getLength: -> @snapshot.length
|
||||||
|
|
||||||
|
# Get the text contents of a document
|
||||||
|
getText: -> @snapshot
|
||||||
|
|
||||||
|
insert: (pos, text, callback) ->
|
||||||
|
op = [{p:pos, i:text}]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
del: (pos, length, callback) ->
|
||||||
|
op = [{p:pos, d:@snapshot[pos...(pos + length)]}]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
_register: ->
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
for component in op
|
||||||
|
if component.i != undefined
|
||||||
|
@emit 'insert', component.p, component.i
|
||||||
|
else
|
||||||
|
@emit 'delete', component.p, component.d
|
|
@ -0,0 +1,43 @@
|
||||||
|
# Text document API for text
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
type = exports.types['text-composable']
|
||||||
|
else
|
||||||
|
type = require './text-composable'
|
||||||
|
|
||||||
|
type.api =
|
||||||
|
provides: {'text':true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
'getLength': -> @snapshot.length
|
||||||
|
|
||||||
|
# Get the text contents of a document
|
||||||
|
'getText': -> @snapshot
|
||||||
|
|
||||||
|
'insert': (pos, text, callback) ->
|
||||||
|
op = type.normalize [pos, 'i':text, (@snapshot.length - pos)]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'del': (pos, length, callback) ->
|
||||||
|
op = type.normalize [pos, 'd':@snapshot[pos...(pos + length)], (@snapshot.length - pos - length)]
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
_register: ->
|
||||||
|
@on 'remoteop', (op) ->
|
||||||
|
pos = 0
|
||||||
|
for component in op
|
||||||
|
if typeof component is 'number'
|
||||||
|
pos += component
|
||||||
|
else if component.i != undefined
|
||||||
|
@emit 'insert', pos, component.i
|
||||||
|
pos += component.i.length
|
||||||
|
else
|
||||||
|
# delete
|
||||||
|
@emit 'delete', pos, component.d
|
||||||
|
# We don't increment pos, because the position
|
||||||
|
# specified is after the delete has happened.
|
||||||
|
|
|
@ -0,0 +1,261 @@
|
||||||
|
# An alternate composable implementation for text. This is much closer
|
||||||
|
# to the implementation used by google wave.
|
||||||
|
#
|
||||||
|
# Ops are lists of components which iterate over the whole document.
|
||||||
|
# Components are either:
|
||||||
|
# A number N: Skip N characters in the original document
|
||||||
|
# {i:'str'}: Insert 'str' at the current position in the document
|
||||||
|
# {d:'str'}: Delete 'str', which appears at the current position in the document
|
||||||
|
#
|
||||||
|
# Eg: [3, {i:'hi'}, 5, {d:'internet'}]
|
||||||
|
#
|
||||||
|
# Snapshots are strings.
|
||||||
|
|
||||||
|
p = -> #require('util').debug
|
||||||
|
i = -> #require('util').inspect
|
||||||
|
|
||||||
|
exports = if WEB? then {} else module.exports
|
||||||
|
|
||||||
|
exports.name = 'text-composable'
|
||||||
|
|
||||||
|
exports.create = -> ''
|
||||||
|
|
||||||
|
# -------- Utility methods
|
||||||
|
|
||||||
|
checkOp = (op) ->
|
||||||
|
throw new Error('Op must be an array of components') unless Array.isArray(op)
|
||||||
|
last = null
|
||||||
|
for c in op
|
||||||
|
if typeof(c) == 'object'
|
||||||
|
throw new Error("Invalid op component: #{i c}") unless (c.i? && c.i.length > 0) or (c.d? && c.d.length > 0)
|
||||||
|
else
|
||||||
|
throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number'
|
||||||
|
throw new Error('Skip components must be a positive number') unless c > 0
|
||||||
|
throw new Error('Adjacent skip components should be added') if typeof(last) == 'number'
|
||||||
|
|
||||||
|
last = c
|
||||||
|
|
||||||
|
# Makes a function for appending components to a given op.
|
||||||
|
# Exported for the randomOpGenerator.
|
||||||
|
exports._makeAppend = makeAppend = (op) -> (component) ->
|
||||||
|
if component == 0 || component.i == '' || component.d == ''
|
||||||
|
return
|
||||||
|
else if op.length == 0
|
||||||
|
op.push component
|
||||||
|
else if typeof(component) == 'number' && typeof(op[op.length - 1]) == 'number'
|
||||||
|
op[op.length - 1] += component
|
||||||
|
else if component.i? && op[op.length - 1].i?
|
||||||
|
op[op.length - 1].i += component.i
|
||||||
|
else if component.d? && op[op.length - 1].d?
|
||||||
|
op[op.length - 1].d += component.d
|
||||||
|
else
|
||||||
|
op.push component
|
||||||
|
|
||||||
|
# checkOp op
|
||||||
|
|
||||||
|
# Makes 2 functions for taking components from the start of an op, and for peeking
|
||||||
|
# at the next op that could be taken.
|
||||||
|
makeTake = (op) ->
|
||||||
|
# The index of the next component to take
|
||||||
|
idx = 0
|
||||||
|
# The offset into the component
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
# Take up to length n from the front of op. If n is null, take the next
|
||||||
|
# op component. If indivisableField == 'd', delete components won't be separated.
|
||||||
|
# If indivisableField == 'i', insert components won't be separated.
|
||||||
|
take = (n, indivisableField) ->
|
||||||
|
return null if idx == op.length
|
||||||
|
#assert.notStrictEqual op.length, i, 'The op is too short to traverse the document'
|
||||||
|
|
||||||
|
if typeof(op[idx]) == 'number'
|
||||||
|
if !n? or op[idx] - offset <= n
|
||||||
|
c = op[idx] - offset
|
||||||
|
++idx; offset = 0
|
||||||
|
c
|
||||||
|
else
|
||||||
|
offset += n
|
||||||
|
n
|
||||||
|
else
|
||||||
|
# Take from the string
|
||||||
|
field = if op[idx].i then 'i' else 'd'
|
||||||
|
c = {}
|
||||||
|
if !n? or op[idx][field].length - offset <= n or field == indivisableField
|
||||||
|
c[field] = op[idx][field][offset..]
|
||||||
|
++idx; offset = 0
|
||||||
|
else
|
||||||
|
c[field] = op[idx][field][offset...(offset + n)]
|
||||||
|
offset += n
|
||||||
|
c
|
||||||
|
|
||||||
|
peekType = () ->
|
||||||
|
op[idx]
|
||||||
|
|
||||||
|
[take, peekType]
|
||||||
|
|
||||||
|
# Find and return the length of an op component
|
||||||
|
componentLength = (component) ->
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
component
|
||||||
|
else if component.i?
|
||||||
|
component.i.length
|
||||||
|
else
|
||||||
|
component.d.length
|
||||||
|
|
||||||
|
# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||||
|
# adjacent inserts and deletes.
|
||||||
|
exports.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
append = makeAppend newOp
|
||||||
|
append component for component in op
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# Apply the op to the string. Returns the new string.
|
||||||
|
exports.apply = (str, op) ->
|
||||||
|
p "Applying #{i op} to '#{str}'"
|
||||||
|
throw new Error('Snapshot should be a string') unless typeof(str) == 'string'
|
||||||
|
checkOp op
|
||||||
|
|
||||||
|
pos = 0
|
||||||
|
newDoc = []
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
throw new Error('The op is too long for this document') if component > str.length
|
||||||
|
newDoc.push str[...component]
|
||||||
|
str = str[component..]
|
||||||
|
else if component.i?
|
||||||
|
newDoc.push component.i
|
||||||
|
else
|
||||||
|
throw new Error("The deleted text '#{component.d}' doesn't match the next characters in the document '#{str[...component.d.length]}'") unless component.d == str[...component.d.length]
|
||||||
|
str = str[component.d.length..]
|
||||||
|
|
||||||
|
throw new Error("The applied op doesn't traverse the entire document") unless '' == str
|
||||||
|
|
||||||
|
newDoc.join ''
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# op1 and op2 are unchanged by transform.
|
||||||
|
exports.transform = (op, otherOp, side) ->
|
||||||
|
throw new Error "side (#{side} must be 'left' or 'right'" unless side == 'left' or side == 'right'
|
||||||
|
|
||||||
|
checkOp op
|
||||||
|
checkOp otherOp
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
append = makeAppend newOp
|
||||||
|
[take, peek] = makeTake op
|
||||||
|
|
||||||
|
for component in otherOp
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'i')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append chunk
|
||||||
|
length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.i?
|
||||||
|
else if component.i? # Insert
|
||||||
|
if side == 'left'
|
||||||
|
# The left insert should go first.
|
||||||
|
o = peek()
|
||||||
|
append take() if o?.i
|
||||||
|
|
||||||
|
# Otherwise, skip the inserted text.
|
||||||
|
append(component.i.length)
|
||||||
|
else # Delete.
|
||||||
|
#assert.ok component.d
|
||||||
|
length = component.d.length
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'i')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
if typeof(chunk) == 'number'
|
||||||
|
length -= chunk
|
||||||
|
else if chunk.i?
|
||||||
|
append(chunk)
|
||||||
|
else
|
||||||
|
#assert.ok chunk.d
|
||||||
|
# The delete is unnecessary now.
|
||||||
|
length -= chunk.d.length
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in the op: #{i component}" unless component?.i?
|
||||||
|
append component
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
|
||||||
|
# Compose 2 ops into 1 op.
|
||||||
|
exports.compose = (op1, op2) ->
|
||||||
|
p "COMPOSE #{i op1} + #{i op2}"
|
||||||
|
checkOp op1
|
||||||
|
checkOp op2
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
append = makeAppend result
|
||||||
|
[take, _] = makeTake op1
|
||||||
|
|
||||||
|
for component in op2
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take(length, 'd')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append chunk
|
||||||
|
length -= componentLength chunk unless typeof(chunk) == 'object' && chunk.d?
|
||||||
|
|
||||||
|
else if component.i? # Insert
|
||||||
|
append {i:component.i}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
offset = 0
|
||||||
|
while offset < component.d.length
|
||||||
|
chunk = take(component.d.length - offset, 'd')
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
# If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length.
|
||||||
|
if typeof(chunk) == 'number'
|
||||||
|
append {d:component.d[offset...(offset + chunk)]}
|
||||||
|
offset += chunk
|
||||||
|
else if chunk.i?
|
||||||
|
throw new Error("The deleted text doesn't match the inserted text") unless component.d[offset...(offset + chunk.i.length)] == chunk.i
|
||||||
|
offset += chunk.i.length
|
||||||
|
# The ops cancel each other out.
|
||||||
|
else
|
||||||
|
# Delete
|
||||||
|
append chunk
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Trailing stuff in op1 #{i component}" unless component?.d?
|
||||||
|
append component
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
|
||||||
|
invertComponent = (c) ->
|
||||||
|
if typeof(c) == 'number'
|
||||||
|
c
|
||||||
|
else if c.i?
|
||||||
|
{d:c.i}
|
||||||
|
else
|
||||||
|
{i:c.d}
|
||||||
|
|
||||||
|
# Invert an op
|
||||||
|
exports.invert = (op) ->
|
||||||
|
result = []
|
||||||
|
append = makeAppend result
|
||||||
|
|
||||||
|
append(invertComponent component) for component in op
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
if window?
|
||||||
|
window.ot ||= {}
|
||||||
|
window.ot.types ||= {}
|
||||||
|
window.ot.types.text = exports
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
# Text document API for text-tp2
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
type = exports.types['text-tp2']
|
||||||
|
else
|
||||||
|
type = require './text-tp2'
|
||||||
|
|
||||||
|
{_takeDoc:takeDoc, _append:append} = type
|
||||||
|
|
||||||
|
appendSkipChars = (op, doc, pos, maxlength) ->
|
||||||
|
while (maxlength == undefined || maxlength > 0) and pos.index < doc.data.length
|
||||||
|
part = takeDoc doc, pos, maxlength, true
|
||||||
|
maxlength -= part.length if maxlength != undefined and typeof part is 'string'
|
||||||
|
append op, (part.length || part)
|
||||||
|
|
||||||
|
type['api'] =
|
||||||
|
'provides': {'text':true}
|
||||||
|
|
||||||
|
# The number of characters in the string
|
||||||
|
'getLength': -> @snapshot.charLength
|
||||||
|
|
||||||
|
# Flatten a document into a string
|
||||||
|
'getText': ->
|
||||||
|
strings = (elem for elem in @snapshot.data when typeof elem is 'string')
|
||||||
|
strings.join ''
|
||||||
|
|
||||||
|
'insert': (pos, text, callback) ->
|
||||||
|
pos = 0 if pos == undefined
|
||||||
|
|
||||||
|
op = []
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos, pos
|
||||||
|
append op, {'i':text}
|
||||||
|
appendSkipChars op, @snapshot, docPos
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'del': (pos, length, callback) ->
|
||||||
|
op = []
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos, pos
|
||||||
|
|
||||||
|
while length > 0
|
||||||
|
part = takeDoc @snapshot, docPos, length, true
|
||||||
|
if typeof part is 'string'
|
||||||
|
append op, {'d':part.length}
|
||||||
|
length -= part.length
|
||||||
|
else
|
||||||
|
append op, part
|
||||||
|
|
||||||
|
appendSkipChars op, @snapshot, docPos
|
||||||
|
|
||||||
|
@submitOp op, callback
|
||||||
|
op
|
||||||
|
|
||||||
|
'_register': ->
|
||||||
|
# Interpret recieved ops + generate more detailed events for them
|
||||||
|
@on 'remoteop', (op, snapshot) ->
|
||||||
|
textPos = 0
|
||||||
|
docPos = {index:0, offset:0}
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof component is 'number'
|
||||||
|
# Skip
|
||||||
|
remainder = component
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc snapshot, docPos, remainder
|
||||||
|
if typeof part is 'string'
|
||||||
|
textPos += part.length
|
||||||
|
remainder -= part.length || part
|
||||||
|
else if component.i != undefined
|
||||||
|
# Insert
|
||||||
|
if typeof component.i is 'string'
|
||||||
|
@emit 'insert', textPos, component.i
|
||||||
|
textPos += component.i.length
|
||||||
|
else
|
||||||
|
# Delete
|
||||||
|
remainder = component.d
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc snapshot, docPos, remainder
|
||||||
|
if typeof part is 'string'
|
||||||
|
@emit 'delete', textPos, part
|
||||||
|
remainder -= part.length || part
|
||||||
|
|
||||||
|
return
|
||||||
|
|
|
@ -0,0 +1,322 @@
|
||||||
|
# A TP2 implementation of text, following this spec:
|
||||||
|
# http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README
|
||||||
|
#
|
||||||
|
# A document is made up of a string and a set of tombstones inserted throughout
|
||||||
|
# the string. For example, 'some ', (2 tombstones), 'string'.
|
||||||
|
#
|
||||||
|
# This is encoded in a document as: {s:'some string', t:[5, -2, 6]}
|
||||||
|
#
|
||||||
|
# Ops are lists of components which iterate over the whole document.
|
||||||
|
# Components are either:
|
||||||
|
# N: Skip N characters in the original document
|
||||||
|
# {i:'str'}: Insert 'str' at the current position in the document
|
||||||
|
# {i:N}: Insert N tombstones at the current position in the document
|
||||||
|
# {d:N}: Delete (tombstone) N characters at the current position in the document
|
||||||
|
#
|
||||||
|
# Eg: [3, {i:'hi'}, 5, {d:8}]
|
||||||
|
#
|
||||||
|
# Snapshots are lists with characters and tombstones. Characters are stored in strings
|
||||||
|
# and adjacent tombstones are flattened into numbers.
|
||||||
|
#
|
||||||
|
# Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters)
|
||||||
|
# would be represented by a document snapshot of ['Hello ', 5, 'world']
|
||||||
|
|
||||||
|
type =
|
||||||
|
name: 'text-tp2'
|
||||||
|
tp2: true
|
||||||
|
create: -> {charLength:0, totalLength:0, positionCache:[], data:[]}
|
||||||
|
serialize: (doc) ->
|
||||||
|
throw new Error 'invalid doc snapshot' unless doc.data
|
||||||
|
doc.data
|
||||||
|
deserialize: (data) ->
|
||||||
|
doc = type.create()
|
||||||
|
doc.data = data
|
||||||
|
|
||||||
|
for component in data
|
||||||
|
if typeof component is 'string'
|
||||||
|
doc.charLength += component.length
|
||||||
|
doc.totalLength += component.length
|
||||||
|
else
|
||||||
|
doc.totalLength += component
|
||||||
|
|
||||||
|
doc
|
||||||
|
|
||||||
|
|
||||||
|
checkOp = (op) ->
|
||||||
|
throw new Error('Op must be an array of components') unless Array.isArray(op)
|
||||||
|
last = null
|
||||||
|
for c in op
|
||||||
|
if typeof(c) == 'object'
|
||||||
|
if c.i != undefined
|
||||||
|
throw new Error('Inserts must insert a string or a +ive number') unless (typeof(c.i) == 'string' and c.i.length > 0) or (typeof(c.i) == 'number' and c.i > 0)
|
||||||
|
else if c.d != undefined
|
||||||
|
throw new Error('Deletes must be a +ive number') unless typeof(c.d) == 'number' and c.d > 0
|
||||||
|
else
|
||||||
|
throw new Error('Operation component must define .i or .d')
|
||||||
|
else
|
||||||
|
throw new Error('Op components must be objects or numbers') unless typeof(c) == 'number'
|
||||||
|
throw new Error('Skip components must be a positive number') unless c > 0
|
||||||
|
throw new Error('Adjacent skip components should be combined') if typeof(last) == 'number'
|
||||||
|
|
||||||
|
last = c
|
||||||
|
|
||||||
|
# Take the next part from the specified position in a document snapshot.
|
||||||
|
# position = {index, offset}. It will be updated.
|
||||||
|
type._takeDoc = takeDoc = (doc, position, maxlength, tombsIndivisible) ->
|
||||||
|
throw new Error 'Operation goes past the end of the document' if position.index >= doc.data.length
|
||||||
|
|
||||||
|
part = doc.data[position.index]
|
||||||
|
# peel off data[0]
|
||||||
|
result = if typeof(part) == 'string'
|
||||||
|
if maxlength != undefined
|
||||||
|
part[position.offset...(position.offset + maxlength)]
|
||||||
|
else
|
||||||
|
part[position.offset...]
|
||||||
|
else
|
||||||
|
if maxlength == undefined or tombsIndivisible
|
||||||
|
part - position.offset
|
||||||
|
else
|
||||||
|
Math.min(maxlength, part - position.offset)
|
||||||
|
|
||||||
|
resultLen = result.length || result
|
||||||
|
|
||||||
|
if (part.length || part) - position.offset > resultLen
|
||||||
|
position.offset += resultLen
|
||||||
|
else
|
||||||
|
position.index++
|
||||||
|
position.offset = 0
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
# Append a part to the end of a document
|
||||||
|
type._appendDoc = appendDoc = (doc, p) ->
|
||||||
|
return if p == 0 or p == ''
|
||||||
|
|
||||||
|
if typeof p is 'string'
|
||||||
|
doc.charLength += p.length
|
||||||
|
doc.totalLength += p.length
|
||||||
|
else
|
||||||
|
doc.totalLength += p
|
||||||
|
|
||||||
|
data = doc.data
|
||||||
|
if data.length == 0
|
||||||
|
data.push p
|
||||||
|
else if typeof(data[data.length - 1]) == typeof(p)
|
||||||
|
data[data.length - 1] += p
|
||||||
|
else
|
||||||
|
data.push p
|
||||||
|
return
|
||||||
|
|
||||||
|
# Apply the op to the document. The document is not modified in the process.
|
||||||
|
type.apply = (doc, op) ->
|
||||||
|
unless doc.totalLength != undefined and doc.charLength != undefined and doc.data.length != undefined
|
||||||
|
throw new Error('Snapshot is invalid')
|
||||||
|
|
||||||
|
checkOp op
|
||||||
|
|
||||||
|
newDoc = type.create()
|
||||||
|
position = {index:0, offset:0}
|
||||||
|
|
||||||
|
for component in op
|
||||||
|
if typeof(component) is 'number'
|
||||||
|
remainder = component
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc doc, position, remainder
|
||||||
|
|
||||||
|
appendDoc newDoc, part
|
||||||
|
remainder -= part.length || part
|
||||||
|
|
||||||
|
else if component.i != undefined
|
||||||
|
appendDoc newDoc, component.i
|
||||||
|
else if component.d != undefined
|
||||||
|
remainder = component.d
|
||||||
|
while remainder > 0
|
||||||
|
part = takeDoc doc, position, remainder
|
||||||
|
remainder -= part.length || part
|
||||||
|
appendDoc newDoc, component.d
|
||||||
|
|
||||||
|
newDoc
|
||||||
|
|
||||||
|
# Append an op component to the end of the specified op.
|
||||||
|
# Exported for the randomOpGenerator.
|
||||||
|
type._append = append = (op, component) ->
|
||||||
|
if component == 0 || component.i == '' || component.i == 0 || component.d == 0
|
||||||
|
return
|
||||||
|
else if op.length == 0
|
||||||
|
op.push component
|
||||||
|
else
|
||||||
|
last = op[op.length - 1]
|
||||||
|
if typeof(component) == 'number' && typeof(last) == 'number'
|
||||||
|
op[op.length - 1] += component
|
||||||
|
else if component.i != undefined && last.i? && typeof(last.i) == typeof(component.i)
|
||||||
|
last.i += component.i
|
||||||
|
else if component.d != undefined && last.d?
|
||||||
|
last.d += component.d
|
||||||
|
else
|
||||||
|
op.push component
|
||||||
|
|
||||||
|
# Makes 2 functions for taking components from the start of an op, and for peeking
|
||||||
|
# at the next op that could be taken.
|
||||||
|
makeTake = (op) ->
|
||||||
|
# The index of the next component to take
|
||||||
|
index = 0
|
||||||
|
# The offset into the component
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
# Take up to length maxlength from the op. If maxlength is not defined, there is no max.
|
||||||
|
# If insertsIndivisible is true, inserts (& insert tombstones) won't be separated.
|
||||||
|
#
|
||||||
|
# Returns null when op is fully consumed.
|
||||||
|
take = (maxlength, insertsIndivisible) ->
|
||||||
|
return null if index == op.length
|
||||||
|
|
||||||
|
e = op[index]
|
||||||
|
if typeof((current = e)) == 'number' or typeof((current = e.i)) == 'number' or (current = e.d) != undefined
|
||||||
|
if !maxlength? or current - offset <= maxlength or (insertsIndivisible and e.i != undefined)
|
||||||
|
# Return the rest of the current element.
|
||||||
|
c = current - offset
|
||||||
|
++index; offset = 0
|
||||||
|
else
|
||||||
|
offset += maxlength
|
||||||
|
c = maxlength
|
||||||
|
if e.i != undefined then {i:c} else if e.d != undefined then {d:c} else c
|
||||||
|
else
|
||||||
|
# Take from the inserted string
|
||||||
|
if !maxlength? or e.i.length - offset <= maxlength or insertsIndivisible
|
||||||
|
result = {i:e.i[offset..]}
|
||||||
|
++index; offset = 0
|
||||||
|
else
|
||||||
|
result = {i:e.i[offset...offset + maxlength]}
|
||||||
|
offset += maxlength
|
||||||
|
result
|
||||||
|
|
||||||
|
peekType = -> op[index]
|
||||||
|
|
||||||
|
[take, peekType]
|
||||||
|
|
||||||
|
# Find and return the length of an op component
|
||||||
|
componentLength = (component) ->
|
||||||
|
if typeof(component) == 'number'
|
||||||
|
component
|
||||||
|
else if typeof(component.i) == 'string'
|
||||||
|
component.i.length
|
||||||
|
else
|
||||||
|
# This should work because c.d and c.i must be +ive.
|
||||||
|
component.d or component.i
|
||||||
|
|
||||||
|
# Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||||
|
# adjacent inserts and deletes.
|
||||||
|
type.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
append newOp, component for component in op
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# This is a helper method to transform and prune. goForwards is true for transform, false for prune.
|
||||||
|
transformer = (op, otherOp, goForwards, side) ->
|
||||||
|
checkOp op
|
||||||
|
checkOp otherOp
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
[take, peek] = makeTake op
|
||||||
|
|
||||||
|
for component in otherOp
|
||||||
|
length = componentLength component
|
||||||
|
|
||||||
|
if component.i != undefined # Insert text or tombs
|
||||||
|
if goForwards # transform - insert skips over inserted parts
|
||||||
|
if side == 'left'
|
||||||
|
# The left insert should go first.
|
||||||
|
append newOp, take() while peek()?.i != undefined
|
||||||
|
|
||||||
|
# In any case, skip the inserted text.
|
||||||
|
append newOp, length
|
||||||
|
|
||||||
|
else # Prune. Remove skips for inserts.
|
||||||
|
while length > 0
|
||||||
|
chunk = take length, true
|
||||||
|
|
||||||
|
throw new Error 'The transformed op is invalid' unless chunk != null
|
||||||
|
throw new Error 'The transformed op deletes locally inserted characters - it cannot be purged of the insert.' if chunk.d != undefined
|
||||||
|
|
||||||
|
if typeof chunk is 'number'
|
||||||
|
length -= chunk
|
||||||
|
else
|
||||||
|
append newOp, chunk
|
||||||
|
|
||||||
|
else # Skip or delete
|
||||||
|
while length > 0
|
||||||
|
chunk = take length, true
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append newOp, chunk
|
||||||
|
length -= componentLength chunk unless chunk.i
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in the op: #{component}" unless component.i != undefined
|
||||||
|
append newOp, component
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# transform op1 by op2. Return transformed version of op1.
|
||||||
|
# op1 and op2 are unchanged by transform.
|
||||||
|
# side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op.
|
||||||
|
type.transform = (op, otherOp, side) ->
|
||||||
|
throw new Error "side (#{side}) should be 'left' or 'right'" unless side == 'left' or side == 'right'
|
||||||
|
transformer op, otherOp, true, side
|
||||||
|
|
||||||
|
# Prune is the inverse of transform.
|
||||||
|
type.prune = (op, otherOp) -> transformer op, otherOp, false
|
||||||
|
|
||||||
|
# Compose 2 ops into 1 op.
|
||||||
|
type.compose = (op1, op2) ->
|
||||||
|
return op2 if op1 == null or op1 == undefined
|
||||||
|
|
||||||
|
checkOp op1
|
||||||
|
checkOp op2
|
||||||
|
|
||||||
|
result = []
|
||||||
|
|
||||||
|
[take, _] = makeTake op1
|
||||||
|
|
||||||
|
for component in op2
|
||||||
|
|
||||||
|
if typeof(component) == 'number' # Skip
|
||||||
|
# Just copy from op1.
|
||||||
|
length = component
|
||||||
|
while length > 0
|
||||||
|
chunk = take length
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
append result, chunk
|
||||||
|
length -= componentLength chunk
|
||||||
|
|
||||||
|
else if component.i != undefined # Insert
|
||||||
|
append result, {i:component.i}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
length = component.d
|
||||||
|
while length > 0
|
||||||
|
chunk = take length
|
||||||
|
throw new Error('The op traverses more elements than the document has') unless chunk != null
|
||||||
|
|
||||||
|
chunkLength = componentLength chunk
|
||||||
|
if chunk.i != undefined
|
||||||
|
append result, {i:chunkLength}
|
||||||
|
else
|
||||||
|
append result, {d:chunkLength}
|
||||||
|
|
||||||
|
length -= chunkLength
|
||||||
|
|
||||||
|
# Append extras from op1
|
||||||
|
while (component = take())
|
||||||
|
throw new Error "Remaining fragments in op1: #{component}" unless component.i != undefined
|
||||||
|
append result, component
|
||||||
|
|
||||||
|
result
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types['text-tp2'] = type
|
||||||
|
else
|
||||||
|
module.exports = type
|
||||||
|
|
209
services/document-updater/app/coffee/sharejs/types/text.coffee
Normal file
209
services/document-updater/app/coffee/sharejs/types/text.coffee
Normal file
|
@ -0,0 +1,209 @@
|
||||||
|
# A simple text implementation
|
||||||
|
#
|
||||||
|
# Operations are lists of components.
|
||||||
|
# Each component either inserts or deletes at a specified position in the document.
|
||||||
|
#
|
||||||
|
# Components are either:
|
||||||
|
# {i:'str', p:100}: Insert 'str' at position 100 in the document
|
||||||
|
# {d:'str', p:100}: Delete 'str' at position 100 in the document
|
||||||
|
#
|
||||||
|
# Components in an operation are executed sequentially, so the position of components
|
||||||
|
# assumes previous components have already executed.
|
||||||
|
#
|
||||||
|
# Eg: This op:
|
||||||
|
# [{i:'abc', p:0}]
|
||||||
|
# is equivalent to this op:
|
||||||
|
# [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}]
|
||||||
|
|
||||||
|
# NOTE: The global scope here is shared with other sharejs files when built with closure.
|
||||||
|
# Be careful what ends up in your namespace.
|
||||||
|
|
||||||
|
text = {}
|
||||||
|
|
||||||
|
text.name = 'text'
|
||||||
|
|
||||||
|
text.create = -> ''
|
||||||
|
|
||||||
|
strInject = (s1, pos, s2) -> s1[...pos] + s2 + s1[pos..]
|
||||||
|
|
||||||
|
checkValidComponent = (c) ->
|
||||||
|
throw new Error 'component missing position field' if typeof c.p != 'number'
|
||||||
|
|
||||||
|
i_type = typeof c.i
|
||||||
|
d_type = typeof c.d
|
||||||
|
throw new Error 'component needs an i or d field' unless (i_type == 'string') ^ (d_type == 'string')
|
||||||
|
|
||||||
|
throw new Error 'position cannot be negative' unless c.p >= 0
|
||||||
|
|
||||||
|
checkValidOp = (op) ->
|
||||||
|
checkValidComponent(c) for c in op
|
||||||
|
true
|
||||||
|
|
||||||
|
text.apply = (snapshot, op) ->
|
||||||
|
checkValidOp op
|
||||||
|
for component in op
|
||||||
|
if component.i?
|
||||||
|
snapshot = strInject snapshot, component.p, component.i
|
||||||
|
else
|
||||||
|
deleted = snapshot[component.p...(component.p + component.d.length)]
|
||||||
|
throw new Error "Delete component '#{component.d}' does not match deleted text '#{deleted}'" unless component.d == deleted
|
||||||
|
snapshot = snapshot[...component.p] + snapshot[(component.p + component.d.length)..]
|
||||||
|
|
||||||
|
snapshot
|
||||||
|
|
||||||
|
|
||||||
|
# Exported for use by the random op generator.
|
||||||
|
#
|
||||||
|
# For simplicity, this version of append does not compress adjacent inserts and deletes of
|
||||||
|
# the same text. It would be nice to change that at some stage.
|
||||||
|
text._append = append = (newOp, c) ->
|
||||||
|
return if c.i == '' or c.d == ''
|
||||||
|
if newOp.length == 0
|
||||||
|
newOp.push c
|
||||||
|
else
|
||||||
|
last = newOp[newOp.length - 1]
|
||||||
|
|
||||||
|
# Compose the insert into the previous insert if possible
|
||||||
|
if last.i? && c.i? and last.p <= c.p <= (last.p + last.i.length)
|
||||||
|
newOp[newOp.length - 1] = {i:strInject(last.i, c.p - last.p, c.i), p:last.p}
|
||||||
|
else if last.d? && c.d? and c.p <= last.p <= (c.p + c.d.length)
|
||||||
|
newOp[newOp.length - 1] = {d:strInject(c.d, last.p - c.p, last.d), p:c.p}
|
||||||
|
else
|
||||||
|
newOp.push c
|
||||||
|
|
||||||
|
text.compose = (op1, op2) ->
|
||||||
|
checkValidOp op1
|
||||||
|
checkValidOp op2
|
||||||
|
|
||||||
|
newOp = op1.slice()
|
||||||
|
append newOp, c for c in op2
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# Attempt to compress the op components together 'as much as possible'.
|
||||||
|
# This implementation preserves order and preserves create/delete pairs.
|
||||||
|
text.compress = (op) -> text.compose [], op
|
||||||
|
|
||||||
|
text.normalize = (op) ->
|
||||||
|
newOp = []
|
||||||
|
|
||||||
|
# Normalize should allow ops which are a single (unwrapped) component:
|
||||||
|
# {i:'asdf', p:23}.
|
||||||
|
# There's no good way to test if something is an array:
|
||||||
|
# http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/
|
||||||
|
# so this is probably the least bad solution.
|
||||||
|
op = [op] if op.i? or op.p?
|
||||||
|
|
||||||
|
for c in op
|
||||||
|
c.p ?= 0
|
||||||
|
append newOp, c
|
||||||
|
|
||||||
|
newOp
|
||||||
|
|
||||||
|
# This helper method transforms a position by an op component.
|
||||||
|
#
|
||||||
|
# If c is an insert, insertAfter specifies whether the transform
|
||||||
|
# is pushed after the insert (true) or before it (false).
|
||||||
|
#
|
||||||
|
# insertAfter is optional for deletes.
|
||||||
|
transformPosition = (pos, c, insertAfter) ->
|
||||||
|
if c.i?
|
||||||
|
if c.p < pos || (c.p == pos && insertAfter)
|
||||||
|
pos + c.i.length
|
||||||
|
else
|
||||||
|
pos
|
||||||
|
else
|
||||||
|
# I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length))
|
||||||
|
# but I think its harder to read that way, and it compiles using ternary operators anyway
|
||||||
|
# so its no slower written like this.
|
||||||
|
if pos <= c.p
|
||||||
|
pos
|
||||||
|
else if pos <= c.p + c.d.length
|
||||||
|
c.p
|
||||||
|
else
|
||||||
|
pos - c.d.length
|
||||||
|
|
||||||
|
# Helper method to transform a cursor position as a result of an op.
|
||||||
|
#
|
||||||
|
# Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position
|
||||||
|
# is pushed after an insert (true) or before it (false).
|
||||||
|
text.transformCursor = (position, op, side) ->
|
||||||
|
insertAfter = side == 'right'
|
||||||
|
position = transformPosition position, c, insertAfter for c in op
|
||||||
|
position
|
||||||
|
|
||||||
|
# Transform an op component by another op component. Asymmetric.
|
||||||
|
# The result will be appended to destination.
|
||||||
|
#
|
||||||
|
# exported for use in JSON type
|
||||||
|
text._tc = transformComponent = (dest, c, otherC, side) ->
|
||||||
|
checkValidOp [c]
|
||||||
|
checkValidOp [otherC]
|
||||||
|
|
||||||
|
if c.i?
|
||||||
|
append dest, {i:c.i, p:transformPosition(c.p, otherC, side == 'right')}
|
||||||
|
|
||||||
|
else # Delete
|
||||||
|
if otherC.i? # delete vs insert
|
||||||
|
s = c.d
|
||||||
|
if c.p < otherC.p
|
||||||
|
append dest, {d:s[...otherC.p - c.p], p:c.p}
|
||||||
|
s = s[(otherC.p - c.p)..]
|
||||||
|
if s != ''
|
||||||
|
append dest, {d:s, p:c.p + otherC.i.length}
|
||||||
|
|
||||||
|
else # Delete vs delete
|
||||||
|
if c.p >= otherC.p + otherC.d.length
|
||||||
|
append dest, {d:c.d, p:c.p - otherC.d.length}
|
||||||
|
else if c.p + c.d.length <= otherC.p
|
||||||
|
append dest, c
|
||||||
|
else
|
||||||
|
# They overlap somewhere.
|
||||||
|
newC = {d:'', p:c.p}
|
||||||
|
if c.p < otherC.p
|
||||||
|
newC.d = c.d[...(otherC.p - c.p)]
|
||||||
|
if c.p + c.d.length > otherC.p + otherC.d.length
|
||||||
|
newC.d += c.d[(otherC.p + otherC.d.length - c.p)..]
|
||||||
|
|
||||||
|
# This is entirely optional - just for a check that the deleted
|
||||||
|
# text in the two ops matches
|
||||||
|
intersectStart = Math.max c.p, otherC.p
|
||||||
|
intersectEnd = Math.min c.p + c.d.length, otherC.p + otherC.d.length
|
||||||
|
cIntersect = c.d[intersectStart - c.p...intersectEnd - c.p]
|
||||||
|
otherIntersect = otherC.d[intersectStart - otherC.p...intersectEnd - otherC.p]
|
||||||
|
throw new Error 'Delete ops delete different text in the same region of the document' unless cIntersect == otherIntersect
|
||||||
|
|
||||||
|
if newC.d != ''
|
||||||
|
# This could be rewritten similarly to insert v delete, above.
|
||||||
|
newC.p = transformPosition newC.p, otherC
|
||||||
|
append dest, newC
|
||||||
|
|
||||||
|
dest
|
||||||
|
|
||||||
|
invertComponent = (c) ->
|
||||||
|
if c.i?
|
||||||
|
{d:c.i, p:c.p}
|
||||||
|
else
|
||||||
|
{i:c.d, p:c.p}
|
||||||
|
|
||||||
|
# No need to use append for invert, because the components won't be able to
|
||||||
|
# cancel with one another.
|
||||||
|
text.invert = (op) -> (invertComponent c for c in op.slice().reverse())
|
||||||
|
|
||||||
|
|
||||||
|
if WEB?
|
||||||
|
exports.types ||= {}
|
||||||
|
|
||||||
|
# This is kind of awful - come up with a better way to hook this helper code up.
|
||||||
|
bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||||
|
|
||||||
|
# [] is used to prevent closure from renaming types.text
|
||||||
|
exports.types.text = text
|
||||||
|
else
|
||||||
|
module.exports = text
|
||||||
|
|
||||||
|
# The text type really shouldn't need this - it should be possible to define
|
||||||
|
# an efficient transform function by making a sort of transform map and passing each
|
||||||
|
# op component through it.
|
||||||
|
require('./helpers').bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
# This is included at the top of each compiled type file for the web.
|
||||||
|
|
||||||
|
`/**
|
||||||
|
@const
|
||||||
|
@type {boolean}
|
||||||
|
*/
|
||||||
|
var WEB = true;
|
||||||
|
`
|
||||||
|
|
||||||
|
exports = window['sharejs']
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
# This is included at the top of each compiled type file for the web.
|
||||||
|
|
||||||
|
`/**
|
||||||
|
@const
|
||||||
|
@type {boolean}
|
||||||
|
*/
|
||||||
|
var WEB = true;
|
||||||
|
`
|
||||||
|
|
||||||
|
exports = window['sharejs']
|
||||||
|
|
2193
services/document-updater/app/lib/diff_match_patch.js
Normal file
2193
services/document-updater/app/lib/diff_match_patch.js
Normal file
File diff suppressed because it is too large
Load diff
23
services/document-updater/config/settings.development.coffee
Executable file
23
services/document-updater/config/settings.development.coffee
Executable file
|
@ -0,0 +1,23 @@
|
||||||
|
Path = require('path')
|
||||||
|
http = require('http')
|
||||||
|
http.globalAgent.maxSockets = 300
|
||||||
|
|
||||||
|
module.exports =
|
||||||
|
internal:
|
||||||
|
documentupdater:
|
||||||
|
port: 3003
|
||||||
|
|
||||||
|
apis:
|
||||||
|
web:
|
||||||
|
url: "http://localhost:3000"
|
||||||
|
user: "sharelatex"
|
||||||
|
pass: "password"
|
||||||
|
|
||||||
|
redis:
|
||||||
|
web:
|
||||||
|
port:"6379"
|
||||||
|
host:"localhost"
|
||||||
|
password:""
|
||||||
|
|
||||||
|
mongo:
|
||||||
|
url: 'mongodb://127.0.0.1/sharelatex'
|
30
services/document-updater/package.json
Normal file
30
services/document-updater/package.json
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
{
|
||||||
|
"name": "document-updater-sharelatex",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"dependencies": {
|
||||||
|
"express": "3.3.4",
|
||||||
|
"underscore": "1.2.2",
|
||||||
|
"redis": "0.7.2",
|
||||||
|
"chai": "",
|
||||||
|
"request": "2.25.0",
|
||||||
|
"sandboxed-module": "~0.2.0",
|
||||||
|
"chai-spies": "",
|
||||||
|
"async": "",
|
||||||
|
"lynx": "0.0.11",
|
||||||
|
"coffee-script": "1.4.0",
|
||||||
|
"settings-sharelatex": "git+ssh://git@bitbucket.org:sharelatex/settings-sharelatex.git#master",
|
||||||
|
"logger-sharelatex": "git+ssh://git@bitbucket.org:sharelatex/logger-sharelatex.git#bunyan",
|
||||||
|
"sinon": "~1.5.2",
|
||||||
|
"mongojs": "0.9.11"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"grunt-execute": "~0.1.5",
|
||||||
|
"grunt-contrib-clean": "~0.5.0",
|
||||||
|
"grunt-mocha-test": "~0.9.0",
|
||||||
|
"grunt": "~0.4.2",
|
||||||
|
"grunt-available-tasks": "~0.4.1",
|
||||||
|
"grunt-contrib-coffee": "~0.10.0",
|
||||||
|
"bunyan": "~0.22.1",
|
||||||
|
"grunt-bunyan": "~0.5.0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,215 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
async = require "async"
|
||||||
|
mongojs = require "../../../app/js/mongojs"
|
||||||
|
db = mongojs.db
|
||||||
|
ObjectId = mongojs.ObjectId
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Applying updates to a doc", ->
|
||||||
|
before ->
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@update =
|
||||||
|
doc: @doc_id
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
@result = ["one", "one and a half", "two", "three"]
|
||||||
|
|
||||||
|
describe "when the document is not loaded", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, @doc_id, @update, (error) ->
|
||||||
|
throw error if error?
|
||||||
|
setTimeout done, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should load the document from the web API", ->
|
||||||
|
MockWebApi.getDocument
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should update the doc", (done) ->
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @result
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the document is loaded", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, @doc_id, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, @doc_id, @update, (error) ->
|
||||||
|
throw error if error?
|
||||||
|
setTimeout done, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should not need to call the web api", ->
|
||||||
|
MockWebApi.getDocument.called.should.equal false
|
||||||
|
|
||||||
|
it "should update the doc", (done) ->
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @result
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the document has been deleted", ->
|
||||||
|
describe "when the ops come in a single linear order", ->
|
||||||
|
before ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
@lines = ["", "", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
@updates = [
|
||||||
|
{ doc_id: @doc_id, v: 0, op: [i: "h", p: 0 ] }
|
||||||
|
{ doc_id: @doc_id, v: 1, op: [i: "e", p: 1 ] }
|
||||||
|
{ doc_id: @doc_id, v: 2, op: [i: "l", p: 2 ] }
|
||||||
|
{ doc_id: @doc_id, v: 3, op: [i: "l", p: 3 ] }
|
||||||
|
{ doc_id: @doc_id, v: 4, op: [i: "o", p: 4 ] }
|
||||||
|
{ doc_id: @doc_id, v: 5, op: [i: " ", p: 5 ] }
|
||||||
|
{ doc_id: @doc_id, v: 6, op: [i: "w", p: 6 ] }
|
||||||
|
{ doc_id: @doc_id, v: 7, op: [i: "o", p: 7 ] }
|
||||||
|
{ doc_id: @doc_id, v: 8, op: [i: "r", p: 8 ] }
|
||||||
|
{ doc_id: @doc_id, v: 9, op: [i: "l", p: 9 ] }
|
||||||
|
{ doc_id: @doc_id, v: 10, op: [i: "d", p: 10] }
|
||||||
|
]
|
||||||
|
@result = ["hello world", "", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
it "should be able to continue applying updates when the project has been deleted", (done) ->
|
||||||
|
actions = []
|
||||||
|
for update in @updates.slice(0,6)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
actions.push (callback) => DocUpdaterClient.deleteDoc @project_id, @doc_id, callback
|
||||||
|
for update in @updates.slice(6)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
|
||||||
|
async.series actions, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @result
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when older ops come in after the delete", ->
|
||||||
|
before ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
@lines = ["", "", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
@updates = [
|
||||||
|
{ doc_id: @doc_id, v: 0, op: [i: "h", p: 0 ] }
|
||||||
|
{ doc_id: @doc_id, v: 1, op: [i: "e", p: 1 ] }
|
||||||
|
{ doc_id: @doc_id, v: 2, op: [i: "l", p: 2 ] }
|
||||||
|
{ doc_id: @doc_id, v: 3, op: [i: "l", p: 3 ] }
|
||||||
|
{ doc_id: @doc_id, v: 4, op: [i: "o", p: 4 ] }
|
||||||
|
{ doc_id: @doc_id, v: 0, op: [i: "world", p: 1 ] }
|
||||||
|
]
|
||||||
|
@result = ["hello", "world", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
it "should be able to continue applying updates when the project has been deleted", (done) ->
|
||||||
|
actions = []
|
||||||
|
for update in @updates.slice(0,5)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
actions.push (callback) => DocUpdaterClient.deleteDoc @project_id, @doc_id, callback
|
||||||
|
for update in @updates.slice(5)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
|
||||||
|
async.series actions, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @result
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the mongo array has been trimmed", ->
|
||||||
|
before ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
@lines = ["", "", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
@updates = [
|
||||||
|
{ doc_id: @doc_id, v: 0, op: [i: "h", p: 0 ] }
|
||||||
|
{ doc_id: @doc_id, v: 1, op: [i: "e", p: 1 ] }
|
||||||
|
{ doc_id: @doc_id, v: 2, op: [i: "l", p: 2 ] }
|
||||||
|
{ doc_id: @doc_id, v: 3, op: [i: "l", p: 3 ] }
|
||||||
|
{ doc_id: @doc_id, v: 4, op: [i: "o", p: 4 ] }
|
||||||
|
{ doc_id: @doc_id, v: 3, op: [i: "world", p: 4 ] }
|
||||||
|
]
|
||||||
|
@result = ["hello", "world", ""]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
it "should be able to reload the required ops from the trimmed mongo array", (done) ->
|
||||||
|
actions = []
|
||||||
|
# Apply first set of ops
|
||||||
|
for update in @updates.slice(0,5)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
# Delete doc from redis and trim ops back to version 3
|
||||||
|
actions.push (callback) => DocUpdaterClient.deleteDoc @project_id, @doc_id, callback
|
||||||
|
actions.push (callback) =>
|
||||||
|
db.docOps.update({doc_id: ObjectId(@doc_id)}, {$push: docOps: { $each: [], $slice: -2 }}, callback)
|
||||||
|
# Apply older update back from version 3
|
||||||
|
for update in @updates.slice(5)
|
||||||
|
do (update) =>
|
||||||
|
actions.push (callback) => DocUpdaterClient.sendUpdate @project_id, @doc_id, update, callback
|
||||||
|
# Flush ops to mongo
|
||||||
|
actions.push (callback) => DocUpdaterClient.flushDoc @project_id, @doc_id, callback
|
||||||
|
|
||||||
|
async.series actions, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
db.docOps.find {doc_id: ObjectId(@doc_id)}, (error, docOps) =>
|
||||||
|
# Check mongo array has been trimmed
|
||||||
|
docOps = docOps[0]
|
||||||
|
docOps.docOps.length.should.equal 3
|
||||||
|
# Check ops have all be applied properly
|
||||||
|
doc.lines.should.deep.equal @result
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "with a broken update", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, @doc_id, @undefined, (error) ->
|
||||||
|
throw error if error?
|
||||||
|
setTimeout done, 200
|
||||||
|
|
||||||
|
it "should not update the doc", (done) ->
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @lines
|
||||||
|
done()
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Deleting a document", ->
|
||||||
|
before ->
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@update =
|
||||||
|
doc: @doc_id
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
@result = ["one", "one and a half", "two", "three"]
|
||||||
|
|
||||||
|
describe "when the updated doc exists in the doc updater", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, @doc_id, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, @doc_id, @update, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.deleteDoc @project_id, @doc_id, (error, res, body) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should return a 204 status code", ->
|
||||||
|
@statusCode.should.equal 204
|
||||||
|
|
||||||
|
it "should send the updated document to the web api", ->
|
||||||
|
MockWebApi.setDocumentLines
|
||||||
|
.calledWith(@project_id, @doc_id, @result)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should need to reload the doc if read again", (done) ->
|
||||||
|
MockWebApi.getDocument.called.should.equal.false
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
MockWebApi.getDocument
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the doc is not in the doc updater", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.deleteDoc @project_id, @doc_id, (error, res, body) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should return a 204 status code", ->
|
||||||
|
@statusCode.should.equal 204
|
||||||
|
|
||||||
|
it "should not need to send the updated document to the web api", ->
|
||||||
|
MockWebApi.setDocumentLines.called.should.equal false
|
||||||
|
|
||||||
|
it "should need to reload the doc if read again", (done) ->
|
||||||
|
MockWebApi.getDocument.called.should.equal.false
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
MockWebApi.getDocument
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
done()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
async = require "async"
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Deleting a project", ->
|
||||||
|
before ->
|
||||||
|
@project_id = DocUpdaterClient.randomId()
|
||||||
|
@docs = [{
|
||||||
|
id: doc_id0 = DocUpdaterClient.randomId()
|
||||||
|
lines: ["one", "two", "three"]
|
||||||
|
update:
|
||||||
|
doc: doc_id0
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
updatedLines: ["one", "one and a half", "two", "three"]
|
||||||
|
}, {
|
||||||
|
id: doc_id1 = DocUpdaterClient.randomId()
|
||||||
|
lines: ["four", "five", "six"]
|
||||||
|
update:
|
||||||
|
doc: doc_id1
|
||||||
|
op: [{
|
||||||
|
i: "four and a half\n"
|
||||||
|
p: 5
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
updatedLines: ["four", "four and a half", "five", "six"]
|
||||||
|
}]
|
||||||
|
for doc in @docs
|
||||||
|
MockWebApi.insertDoc @project_id, doc.id, {
|
||||||
|
lines: doc.lines
|
||||||
|
}
|
||||||
|
|
||||||
|
describe "with documents which have been updated", ->
|
||||||
|
before (done) ->
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
async.series @docs.map((doc) =>
|
||||||
|
(callback) =>
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, doc.id, (error) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, doc.id, doc.update, (error) =>
|
||||||
|
callback(error)
|
||||||
|
), (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.deleteProject @project_id, (error, res, body) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
|
||||||
|
it "should return a 204 status code", ->
|
||||||
|
@statusCode.should.equal 204
|
||||||
|
|
||||||
|
it "should send each document to the web api", ->
|
||||||
|
for doc in @docs
|
||||||
|
MockWebApi.setDocumentLines
|
||||||
|
.calledWith(@project_id, doc.id, doc.updatedLines)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should need to reload the docs if read again", (done) ->
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
async.series @docs.map((doc) =>
|
||||||
|
(callback) =>
|
||||||
|
MockWebApi.getDocument.calledWith(@project_id, doc.id).should.equal false
|
||||||
|
DocUpdaterClient.getDoc @project_id, doc.id, (error, res, returnedDoc) =>
|
||||||
|
MockWebApi.getDocument.calledWith(@project_id, doc.id).should.equal true
|
||||||
|
callback()
|
||||||
|
), () ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
done()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
async = require "async"
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Flushing a project", ->
|
||||||
|
before ->
|
||||||
|
@project_id = DocUpdaterClient.randomId()
|
||||||
|
@docs = [{
|
||||||
|
id: doc_id0 = DocUpdaterClient.randomId()
|
||||||
|
lines: ["one", "two", "three"]
|
||||||
|
update:
|
||||||
|
doc: doc_id0
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
updatedLines: ["one", "one and a half", "two", "three"]
|
||||||
|
}, {
|
||||||
|
id: doc_id1 = DocUpdaterClient.randomId()
|
||||||
|
lines: ["four", "five", "six"]
|
||||||
|
update:
|
||||||
|
doc: doc_id1
|
||||||
|
op: [{
|
||||||
|
i: "four and a half\n"
|
||||||
|
p: 5
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
updatedLines: ["four", "four and a half", "five", "six"]
|
||||||
|
}]
|
||||||
|
for doc in @docs
|
||||||
|
MockWebApi.insertDoc @project_id, doc.id, {
|
||||||
|
lines: doc.lines
|
||||||
|
}
|
||||||
|
|
||||||
|
describe "with documents which have been updated", ->
|
||||||
|
before (done) ->
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
async.series @docs.map((doc) =>
|
||||||
|
(callback) =>
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, doc.id, (error) =>
|
||||||
|
return callback(error) if error?
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, doc.id, doc.update, (error) =>
|
||||||
|
callback(error)
|
||||||
|
), (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.flushProject @project_id, (error, res, body) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
|
||||||
|
it "should return a 204 status code", ->
|
||||||
|
@statusCode.should.equal 204
|
||||||
|
|
||||||
|
it "should send each document to the web api", ->
|
||||||
|
for doc in @docs
|
||||||
|
MockWebApi.setDocumentLines
|
||||||
|
.calledWith(@project_id, doc.id, doc.updatedLines)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should update the lines in the doc updater", (done) ->
|
||||||
|
async.series @docs.map((doc) =>
|
||||||
|
(callback) =>
|
||||||
|
DocUpdaterClient.getDoc @project_id, doc.id, (error, res, returnedDoc) =>
|
||||||
|
returnedDoc.lines.should.deep.equal doc.updatedLines
|
||||||
|
callback()
|
||||||
|
), done
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
async = require "async"
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
mongojs = require "../../../app/js/mongojs"
|
||||||
|
db = mongojs.db
|
||||||
|
ObjectId = mongojs.ObjectId
|
||||||
|
|
||||||
|
describe "Flushing a doc to Mongo", ->
|
||||||
|
before ->
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@update =
|
||||||
|
doc: @doc_id
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
@result = ["one", "one and a half", "two", "three"]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
describe "when the updated doc exists in the doc updater", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
|
||||||
|
DocUpdaterClient.sendUpdates @project_id, @doc_id, [@update], (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.flushDoc @project_id, @doc_id, done
|
||||||
|
, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
|
||||||
|
it "should flush the updated document to the web api", ->
|
||||||
|
MockWebApi.setDocumentLines
|
||||||
|
.calledWith(@project_id, @doc_id, @result)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should flush the doc ops to Mongo", (done) ->
|
||||||
|
db.docOps.find doc_id: ObjectId(@doc_id), (error, docs) =>
|
||||||
|
doc = docs[0]
|
||||||
|
doc.docOps[0].op.should.deep.equal @update.op
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the doc has a large number of ops to be flushed", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
@updates = []
|
||||||
|
for v in [0..999]
|
||||||
|
@updates.push
|
||||||
|
doc_id: @doc_id,
|
||||||
|
op: [i: v.toString(), p: 0]
|
||||||
|
v: v
|
||||||
|
|
||||||
|
DocUpdaterClient.sendUpdates @project_id, @doc_id, @updates, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.flushDoc @project_id, @doc_id, done
|
||||||
|
, 200
|
||||||
|
|
||||||
|
it "should flush the doc ops to Mongo in order", (done) ->
|
||||||
|
db.docOps.find doc_id: ObjectId(@doc_id), (error, docs) =>
|
||||||
|
doc = docs[0]
|
||||||
|
updates = @updates.slice(-100)
|
||||||
|
for update, i in doc.docOps
|
||||||
|
update.op.should.deep.equal updates[i].op
|
||||||
|
done()
|
||||||
|
|
||||||
|
describe "when the doc does not exist in the doc updater", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
DocUpdaterClient.flushDoc @project_id, @doc_id, done
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
|
||||||
|
it "should not flush the doc to the web api", ->
|
||||||
|
MockWebApi.setDocumentLines.called.should.equal false
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Getting a document", ->
|
||||||
|
describe "when the document is not loaded", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines = ["one", "two", "three"]
|
||||||
|
}
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, @returnedDoc) => done()
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should load the document from the web API", ->
|
||||||
|
MockWebApi.getDocument
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the document lines", ->
|
||||||
|
@returnedDoc.lines.should.deep.equal @lines
|
||||||
|
|
||||||
|
it "should return the document at version 0", ->
|
||||||
|
@returnedDoc.version.should.equal 0
|
||||||
|
|
||||||
|
describe "when the document is already loaded", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines = ["one", "two", "three"]
|
||||||
|
}
|
||||||
|
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, @doc_id, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, @returnedDoc) => done()
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should not load the document from the web API", ->
|
||||||
|
MockWebApi.getDocument.called.should.equal false
|
||||||
|
|
||||||
|
it "should return the document lines", ->
|
||||||
|
@returnedDoc.lines.should.deep.equal @lines
|
||||||
|
|
||||||
|
describe "when the request asks for some recent ops", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines = ["one", "two", "three"]
|
||||||
|
}
|
||||||
|
|
||||||
|
@updates = for v in [0..99]
|
||||||
|
doc_id: @doc_id,
|
||||||
|
op: [i: v.toString(), p: 0]
|
||||||
|
v: v
|
||||||
|
|
||||||
|
DocUpdaterClient.sendUpdates @project_id, @doc_id, @updates, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
sinon.spy MockWebApi, "getDocument"
|
||||||
|
DocUpdaterClient.getDocAndRecentOps @project_id, @doc_id, 90, (error, res, @returnedDoc) => done()
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should return the recent ops", ->
|
||||||
|
@returnedDoc.ops.length.should.equal 10
|
||||||
|
for update, i in @updates.slice(90, -1)
|
||||||
|
@returnedDoc.ops[i].op.should.deep.equal update.op
|
||||||
|
|
||||||
|
|
||||||
|
describe "when the document does not exist", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should return 404", ->
|
||||||
|
@statusCode.should.equal 404
|
||||||
|
|
||||||
|
describe "when the web api returns an error", ->
|
||||||
|
before (done) ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
sinon.stub MockWebApi, "getDocument", (project_id, doc_id, callback = (error, doc) ->) ->
|
||||||
|
callback new Error("oops")
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.getDocument.restore()
|
||||||
|
|
||||||
|
it "should return 500", ->
|
||||||
|
@statusCode.should.equal 500
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
sinon = require "sinon"
|
||||||
|
chai = require("chai")
|
||||||
|
chai.should()
|
||||||
|
|
||||||
|
MockWebApi = require "./helpers/MockWebApi"
|
||||||
|
DocUpdaterClient = require "./helpers/DocUpdaterClient"
|
||||||
|
|
||||||
|
describe "Setting a document", ->
|
||||||
|
before ->
|
||||||
|
[@project_id, @doc_id] = [DocUpdaterClient.randomId(), DocUpdaterClient.randomId()]
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@update =
|
||||||
|
doc: @doc_id
|
||||||
|
op: [{
|
||||||
|
i: "one and a half\n"
|
||||||
|
p: 4
|
||||||
|
}]
|
||||||
|
v: 0
|
||||||
|
@result = ["one", "one and a half", "two", "three"]
|
||||||
|
@newLines = ["these", "are", "the", "new", "lines"]
|
||||||
|
MockWebApi.insertDoc @project_id, @doc_id, {
|
||||||
|
lines: @lines
|
||||||
|
}
|
||||||
|
|
||||||
|
describe "when the updated doc exists in the doc updater", ->
|
||||||
|
before (done) ->
|
||||||
|
sinon.spy MockWebApi, "setDocumentLines"
|
||||||
|
DocUpdaterClient.preloadDoc @project_id, @doc_id, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
DocUpdaterClient.sendUpdate @project_id, @doc_id, @update, (error) =>
|
||||||
|
throw error if error?
|
||||||
|
setTimeout () =>
|
||||||
|
DocUpdaterClient.setDocLines @project_id, @doc_id, @newLines, (error, res, body) =>
|
||||||
|
@statusCode = res.statusCode
|
||||||
|
done()
|
||||||
|
, 200
|
||||||
|
|
||||||
|
after ->
|
||||||
|
MockWebApi.setDocumentLines.restore()
|
||||||
|
|
||||||
|
it "should return a 204 status code", ->
|
||||||
|
@statusCode.should.equal 204
|
||||||
|
|
||||||
|
it "should send the updated document to the web api", ->
|
||||||
|
MockWebApi.setDocumentLines
|
||||||
|
.calledWith(@project_id, @doc_id, @newLines)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should update the lines in the doc updater", (done) ->
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.lines.should.deep.equal @newLines
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should bump the version in the doc updater", (done) ->
|
||||||
|
DocUpdaterClient.getDoc @project_id, @doc_id, (error, res, doc) =>
|
||||||
|
doc.version.should.equal 2
|
||||||
|
done()
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
rclient = require("redis").createClient()
|
||||||
|
request = require("request").defaults(jar: false)
|
||||||
|
async = require "async"
|
||||||
|
|
||||||
|
module.exports = DocUpdaterClient =
|
||||||
|
randomId: () ->
|
||||||
|
return require("../../../../app/js/mongojs").ObjectId().toString()
|
||||||
|
|
||||||
|
sendUpdate: (project_id, doc_id, update, callback = (error) ->) ->
|
||||||
|
rclient.rpush "PendingUpdates:#{doc_id}", JSON.stringify(update), (error)->
|
||||||
|
return callback(error) if error?
|
||||||
|
doc_key = "#{project_id}:#{doc_id}"
|
||||||
|
rclient.sadd "DocsWithPendingUpdates", doc_key, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
rclient.publish "pending-updates", doc_key, callback
|
||||||
|
|
||||||
|
sendUpdates: (project_id, doc_id, updates, callback = (error) ->) ->
|
||||||
|
DocUpdaterClient.preloadDoc project_id, doc_id, (error) ->
|
||||||
|
return callback(error) if error?
|
||||||
|
jobs = []
|
||||||
|
for update in updates
|
||||||
|
do (update) ->
|
||||||
|
jobs.push (callback) ->
|
||||||
|
DocUpdaterClient.sendUpdate project_id, doc_id, update, callback
|
||||||
|
async.series jobs, callback
|
||||||
|
|
||||||
|
getDoc: (project_id, doc_id, callback = (error, res, body) ->) ->
|
||||||
|
request.get "http://localhost:3003/project/#{project_id}/doc/#{doc_id}", (error, res, body) ->
|
||||||
|
if body? and res.statusCode >= 200 and res.statusCode < 300
|
||||||
|
body = JSON.parse(body)
|
||||||
|
callback error, res, body
|
||||||
|
|
||||||
|
getDocAndRecentOps: (project_id, doc_id, fromVersion, callback = (error, res, body) ->) ->
|
||||||
|
request.get "http://localhost:3003/project/#{project_id}/doc/#{doc_id}?fromVersion=#{fromVersion}", (error, res, body) ->
|
||||||
|
if body? and res.statusCode >= 200 and res.statusCode < 300
|
||||||
|
body = JSON.parse(body)
|
||||||
|
callback error, res, body
|
||||||
|
|
||||||
|
preloadDoc: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
DocUpdaterClient.getDoc project_id, doc_id, callback
|
||||||
|
|
||||||
|
flushDoc: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
request.post "http://localhost:3003/project/#{project_id}/doc/#{doc_id}/flush", (error, res, body) ->
|
||||||
|
callback error, res, body
|
||||||
|
|
||||||
|
setDocLines: (project_id, doc_id, lines, callback = (error) ->) ->
|
||||||
|
request.post {
|
||||||
|
url: "http://localhost:3003/project/#{project_id}/doc/#{doc_id}"
|
||||||
|
json:
|
||||||
|
lines: lines
|
||||||
|
}, (error, res, body) ->
|
||||||
|
callback error, res, body
|
||||||
|
|
||||||
|
deleteDoc: (project_id, doc_id, callback = (error) ->) ->
|
||||||
|
request.del "http://localhost:3003/project/#{project_id}/doc/#{doc_id}", (error, res, body) ->
|
||||||
|
callback error, res, body
|
||||||
|
|
||||||
|
flushProject: (project_id, callback = () ->) ->
|
||||||
|
request.post "http://localhost:3003/project/#{project_id}/flush", callback
|
||||||
|
|
||||||
|
deleteProject: (project_id, callback = () ->) ->
|
||||||
|
request.del "http://localhost:3003/project/#{project_id}", callback
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
express = require("express")
|
||||||
|
app = express()
|
||||||
|
|
||||||
|
module.exports = MockWebApi =
|
||||||
|
docs: {}
|
||||||
|
|
||||||
|
clearDocs: () -> @docs = {}
|
||||||
|
|
||||||
|
insertDoc: (project_id, doc_id, doc) ->
|
||||||
|
@docs["#{project_id}:#{doc_id}"] = doc
|
||||||
|
|
||||||
|
setDocumentLines: (project_id, doc_id, lines, callback = (error) ->) ->
|
||||||
|
@docs["#{project_id}:#{doc_id}"] ||= {}
|
||||||
|
@docs["#{project_id}:#{doc_id}"].lines = lines
|
||||||
|
callback null
|
||||||
|
|
||||||
|
getDocument: (project_id, doc_id, callback = (error, doc) ->) ->
|
||||||
|
callback null, @docs["#{project_id}:#{doc_id}"]
|
||||||
|
|
||||||
|
run: () ->
|
||||||
|
app.get "/project/:project_id/doc/:doc_id", (req, res, next) =>
|
||||||
|
@getDocument req.params.project_id, req.params.doc_id, (error, doc) ->
|
||||||
|
if error?
|
||||||
|
res.send 500
|
||||||
|
else if doc?
|
||||||
|
res.send JSON.stringify doc
|
||||||
|
else
|
||||||
|
res.send 404
|
||||||
|
|
||||||
|
app.post "/project/:project_id/doc/:doc_id", express.bodyParser(), (req, res, next) =>
|
||||||
|
@setDocumentLines req.params.project_id, req.params.doc_id, req.body.lines, (error) ->
|
||||||
|
if error?
|
||||||
|
res.send 500
|
||||||
|
else
|
||||||
|
res.send 204
|
||||||
|
|
||||||
|
app.listen(3000)
|
||||||
|
|
||||||
|
MockWebApi.run()
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
require('coffee-script')
|
||||||
|
assert = require('assert')
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../app/js/RedisManager.js'
|
||||||
|
keys = require(path.join __dirname, '../../../app/js/RedisKeyBuilder.js')
|
||||||
|
project_id = 1234
|
||||||
|
doc_id = 5678
|
||||||
|
loadModule = require('./module-loader').loadModule
|
||||||
|
|
||||||
|
describe 'putting a doc into memory', ()->
|
||||||
|
lines = ["this is one line", "and another line"]
|
||||||
|
version = 42
|
||||||
|
|
||||||
|
potentialSets = {}
|
||||||
|
potentialSets[keys.docLines(doc_id:doc_id)] = lines
|
||||||
|
potentialSets[keys.projectKey(doc_id:doc_id)] = project_id
|
||||||
|
potentialSets[keys.docVersion(doc_id:doc_id)] = version
|
||||||
|
|
||||||
|
potentialSAdds = {}
|
||||||
|
potentialSAdds[keys.allDocs] = doc_id
|
||||||
|
potentialSAdds[keys.docsInProject(project_id:project_id)] = doc_id
|
||||||
|
|
||||||
|
potentialDels = {}
|
||||||
|
potentialDels[keys.docOps(doc_id:doc_id)] = true
|
||||||
|
|
||||||
|
mocks =
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
redis:
|
||||||
|
createClient : ()->
|
||||||
|
auth:->
|
||||||
|
multi: ()->
|
||||||
|
set:(key, value)->
|
||||||
|
result = potentialSets[key]
|
||||||
|
delete potentialSets[key]
|
||||||
|
if key == keys.docLines(doc_id:doc_id)
|
||||||
|
value = JSON.parse(value)
|
||||||
|
assert.deepEqual result, value
|
||||||
|
incr:()->
|
||||||
|
sadd:(key, value)->
|
||||||
|
result = potentialSAdds[key]
|
||||||
|
delete potentialSAdds[key]
|
||||||
|
assert.equal result, value
|
||||||
|
del: (key) ->
|
||||||
|
result = potentialDels[key]
|
||||||
|
delete potentialDels[key]
|
||||||
|
assert.equal result, true
|
||||||
|
exec:(callback)->
|
||||||
|
callback()
|
||||||
|
|
||||||
|
redisManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
|
||||||
|
it 'should put a all data into memory', (done)->
|
||||||
|
redisManager.putDocInMemory project_id, doc_id, lines, version, ()->
|
||||||
|
assert.deepEqual potentialSets, {}
|
||||||
|
assert.deepEqual potentialSAdds, {}
|
||||||
|
assert.deepEqual potentialDels, {}
|
||||||
|
done()
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
assert = require('chai').assert
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../app/js/RedisManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
doc_id = "1234"
|
||||||
|
|
||||||
|
describe 'Document Manager - getUpdatesLength ', ->
|
||||||
|
|
||||||
|
beforeEach ->
|
||||||
|
|
||||||
|
@llenStub = sinon.stub()
|
||||||
|
@redisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
redis:
|
||||||
|
createClient:=>
|
||||||
|
auth:->
|
||||||
|
llen:@llenStub
|
||||||
|
|
||||||
|
it "should the number of things to process in the que", (done)->
|
||||||
|
|
||||||
|
@llenStub.callsArgWith(1, null, 3)
|
||||||
|
@redisManager.getUpdatesLength doc_id, (err, len)=>
|
||||||
|
@llenStub.calledWith("PendingUpdates:#{doc_id}").should.equal true
|
||||||
|
len.should.equal 3
|
||||||
|
done()
|
|
@ -0,0 +1,56 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
expect = chai.expect
|
||||||
|
modulePath = "../../../../app/js/DiffCodec.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DiffCodec", ->
|
||||||
|
beforeEach ->
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@DiffCodec = SandboxedModule.require modulePath
|
||||||
|
|
||||||
|
describe "diffAsShareJsOps", ->
|
||||||
|
it "should insert new text correctly", (done) ->
|
||||||
|
@before = ["hello world"]
|
||||||
|
@after = ["hello beautiful world"]
|
||||||
|
@DiffCodec.diffAsShareJsOp @before, @after, (error, ops) ->
|
||||||
|
expect(ops).to.deep.equal [
|
||||||
|
i: "beautiful "
|
||||||
|
p: 6
|
||||||
|
]
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should shift later inserts by previous inserts", (done) ->
|
||||||
|
@before = ["the boy played with the ball"]
|
||||||
|
@after = ["the tall boy played with the red ball"]
|
||||||
|
@DiffCodec.diffAsShareJsOp @before, @after, (error, ops) ->
|
||||||
|
expect(ops).to.deep.equal [
|
||||||
|
{ i: "tall ", p: 4 }
|
||||||
|
{ i: "red ", p: 29 }
|
||||||
|
]
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should delete text correctly", (done) ->
|
||||||
|
@before = ["hello beautiful world"]
|
||||||
|
@after = ["hello world"]
|
||||||
|
@DiffCodec.diffAsShareJsOp @before, @after, (error, ops) ->
|
||||||
|
expect(ops).to.deep.equal [
|
||||||
|
d: "beautiful "
|
||||||
|
p: 6
|
||||||
|
]
|
||||||
|
done()
|
||||||
|
|
||||||
|
|
||||||
|
it "should shift later deletes by the first deletes", (done) ->
|
||||||
|
@before = ["the tall boy played with the red ball"]
|
||||||
|
@after = ["the boy played with the ball"]
|
||||||
|
@DiffCodec.diffAsShareJsOp @before, @after, (error, ops) ->
|
||||||
|
expect(ops).to.deep.equal [
|
||||||
|
{ d: "tall ", p: 4 }
|
||||||
|
{ d: "red ", p: 24 }
|
||||||
|
]
|
||||||
|
done()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,309 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocOpsManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
ObjectId = require("../../../../app/js/mongojs").ObjectId
|
||||||
|
|
||||||
|
describe "DocOpsManager", ->
|
||||||
|
beforeEach ->
|
||||||
|
@doc_id = ObjectId().toString()
|
||||||
|
@project_id = "project-id"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@DocOpsManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./mongojs":
|
||||||
|
db: @db = { docOps: {} }
|
||||||
|
ObjectId: ObjectId
|
||||||
|
"logger-sharelatex": @logger = { log: sinon.stub(), error: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
describe "flushDocOpsToMongo", ->
|
||||||
|
describe "when versions are consistent", ->
|
||||||
|
beforeEach ->
|
||||||
|
@mongo_version = 40
|
||||||
|
@redis_version = 42
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2" ]
|
||||||
|
@DocOpsManager.getDocVersionInMongo = sinon.stub().callsArgWith(1, null, @mongo_version)
|
||||||
|
@RedisManager.getDocVersion = sinon.stub().callsArgWith(1, null, @redis_version)
|
||||||
|
@RedisManager.getPreviousDocOps = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager.flushDocOpsToMongo @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the version from Mongo", ->
|
||||||
|
@DocOpsManager.getDocVersionInMongo
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the version from REdis", ->
|
||||||
|
@RedisManager.getDocVersion
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get all doc ops since the version in Mongo", ->
|
||||||
|
@RedisManager.getPreviousDocOps
|
||||||
|
.calledWith(@doc_id, @mongo_version, -1)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should update Mongo with the new ops", ->
|
||||||
|
@DocOpsManager._appendDocOpsInMongo
|
||||||
|
.calledWith(@doc_id, @ops, @redis_version)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the number of ops does not match the difference in versions", ->
|
||||||
|
beforeEach ->
|
||||||
|
@mongo_version = 40
|
||||||
|
@redis_version = 45
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2" ]
|
||||||
|
@DocOpsManager.getDocVersionInMongo = sinon.stub().callsArgWith(1, null, @mongo_version)
|
||||||
|
@RedisManager.getDocVersion = sinon.stub().callsArgWith(1, null, @redis_version)
|
||||||
|
@RedisManager.getPreviousDocOps = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager.flushDocOpsToMongo @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should call the callback with an error", ->
|
||||||
|
@callback.calledWith(new Error("inconsistet versions")).should.equal true
|
||||||
|
|
||||||
|
it "should log an error", ->
|
||||||
|
@logger.error
|
||||||
|
.calledWith(doc_id: @doc_id, mongoVersion: @mongo_version, redisVersion: @redis_version, opsLength: @ops.length, "version difference does not match ops length")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should not modify mongo", ->
|
||||||
|
@DocOpsManager._appendDocOpsInMongo.called.should.equal false
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when redis version is behind mongo version", ->
|
||||||
|
beforeEach ->
|
||||||
|
@mongo_version = 40
|
||||||
|
@redis_version = 30
|
||||||
|
@DocOpsManager.getDocVersionInMongo = sinon.stub().callsArgWith(1, null, @mongo_version)
|
||||||
|
@RedisManager.getDocVersion = sinon.stub().callsArgWith(1, null, @redis_version)
|
||||||
|
@RedisManager.getPreviousDocOps = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager.flushDocOpsToMongo @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should call the callback with an error", ->
|
||||||
|
@callback.calledWith(new Error("inconsistet versions")).should.equal true
|
||||||
|
|
||||||
|
it "should log an error", ->
|
||||||
|
@logger.error
|
||||||
|
.calledWith(doc_id: @doc_id, mongoVersion: @mongo_version, redisVersion: @redis_version, "mongo version is ahead of redis")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should not modify mongo", ->
|
||||||
|
@DocOpsManager._appendDocOpsInMongo.called.should.equal false
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "getPreviousDocOps", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2" ]
|
||||||
|
@start = 30
|
||||||
|
@end = 32
|
||||||
|
@RedisManager.getPreviousDocOps = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@DocOpsManager._ensureOpsAreLoaded = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager.getPreviousDocOps @project_id, @doc_id, @start, @end, @callback
|
||||||
|
|
||||||
|
it "should ensure the ops are loaded back far enough", ->
|
||||||
|
@DocOpsManager._ensureOpsAreLoaded
|
||||||
|
.calledWith(@project_id, @doc_id, @start)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the previous doc ops", ->
|
||||||
|
@RedisManager.getPreviousDocOps
|
||||||
|
.calledWith(@doc_id, @start, @end)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the ops", ->
|
||||||
|
@callback.calledWith(null, @ops).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "_ensureOpsAreLoaded", ->
|
||||||
|
describe "when the ops are not loaded", ->
|
||||||
|
beforeEach ->
|
||||||
|
@redisVersion = 42
|
||||||
|
@redisOpsLength = 10
|
||||||
|
@backToVersion = 30
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2" ]
|
||||||
|
@RedisManager.getDocVersion = sinon.stub().callsArgWith(1, null, @redisVersion)
|
||||||
|
@RedisManager.getDocOpsLength = sinon.stub().callsArgWith(1, null, @redisOpsLength)
|
||||||
|
@DocOpsManager._getDocOpsFromMongo = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@RedisManager.prependDocOps = sinon.stub().callsArgWith(2, null)
|
||||||
|
@DocOpsManager._ensureOpsAreLoaded @project_id, @doc_id, @backToVersion, @callback
|
||||||
|
|
||||||
|
it "should get the doc version from redis", ->
|
||||||
|
@RedisManager.getDocVersion
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc ops length in redis", ->
|
||||||
|
@RedisManager.getDocOpsLength
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc ops that need loading from Mongo", ->
|
||||||
|
@DocOpsManager._getDocOpsFromMongo
|
||||||
|
.calledWith(@doc_id, @backToVersion, @redisVersion - @redisOpsLength)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should prepend the retrieved ops to redis", ->
|
||||||
|
@RedisManager.prependDocOps
|
||||||
|
.calledWith(@doc_id, @ops)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the ops are loaded", ->
|
||||||
|
beforeEach ->
|
||||||
|
@redisVersion = 42
|
||||||
|
@redisOpsLength = 10
|
||||||
|
@backToVersion = 35
|
||||||
|
@RedisManager.getDocVersion = sinon.stub().callsArgWith(1, null, @redisVersion)
|
||||||
|
@RedisManager.getDocOpsLength = sinon.stub().callsArgWith(1, null, @redisOpsLength)
|
||||||
|
@DocOpsManager._getDocOpsFromMongo = sinon.stub().callsArgWith(3, null, @ops)
|
||||||
|
@RedisManager.prependDocOps = sinon.stub().callsArgWith(2, null)
|
||||||
|
@DocOpsManager._ensureOpsAreLoaded @project_id, @doc_id, @backToVersion, @callback
|
||||||
|
|
||||||
|
it "should not need to get the docs from Mongo or put any into redis", ->
|
||||||
|
@DocOpsManager._getDocOpsFromMongo.called.should.equal false
|
||||||
|
@RedisManager.prependDocOps.called.should.equal false
|
||||||
|
|
||||||
|
it "should call the callback", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
describe "getDocVersionInMongo", ->
|
||||||
|
describe "when the doc exists", ->
|
||||||
|
beforeEach ->
|
||||||
|
@doc =
|
||||||
|
version: @version = 42
|
||||||
|
@db.docOps.find = sinon.stub().callsArgWith(2, null, [@doc])
|
||||||
|
@DocOpsManager.getDocVersionInMongo @doc_id, @callback
|
||||||
|
|
||||||
|
it "should look for the doc in the database", ->
|
||||||
|
@db.docOps.find
|
||||||
|
.calledWith({ doc_id: ObjectId(@doc_id) }, {version: 1})
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the version", ->
|
||||||
|
@callback.calledWith(null, @version).should.equal true
|
||||||
|
|
||||||
|
describe "when the doc doesn't exist", ->
|
||||||
|
beforeEach ->
|
||||||
|
@db.docOps.find = sinon.stub().callsArgWith(2, null, [])
|
||||||
|
@DocOpsManager.getDocVersionInMongo @doc_id, @callback
|
||||||
|
|
||||||
|
it "should call the callback with 0", ->
|
||||||
|
@callback.calledWith(null, 0).should.equal true
|
||||||
|
|
||||||
|
describe "_appendDocOpsInMongo", ->
|
||||||
|
describe "with a small set of updates", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2" ]
|
||||||
|
@version = 42
|
||||||
|
@db.docOps.update = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo @doc_id, @ops, @version, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should update the database", ->
|
||||||
|
@db.docOps.update
|
||||||
|
.calledWith({
|
||||||
|
doc_id: ObjectId(@doc_id)
|
||||||
|
}, {
|
||||||
|
$push: docOps: { $each: @ops, $slice: -100 }
|
||||||
|
$set: version: @version
|
||||||
|
}, {
|
||||||
|
upsert: true
|
||||||
|
})
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callbak", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
describe "with a large set of updates", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@ops = [ "mock-op-1", "mock-op-2", "mock-op-3", "mock-op-4", "mock-op-5" ]
|
||||||
|
@version = 42
|
||||||
|
@DocOpsManager.APPEND_OPS_BATCH_SIZE = 2
|
||||||
|
@db.docOps.update = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo @doc_id, @ops, @version, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should update the database in batches", ->
|
||||||
|
@db.docOps.update
|
||||||
|
.calledWith({ doc_id: ObjectId(@doc_id) }, {
|
||||||
|
$push: docOps: { $each: @ops.slice(0,2), $slice: -100 }
|
||||||
|
$set: version: @version - 3
|
||||||
|
}, { upsert: true })
|
||||||
|
.should.equal true
|
||||||
|
@db.docOps.update
|
||||||
|
.calledWith({ doc_id: ObjectId(@doc_id) }, {
|
||||||
|
$push: docOps: { $each: @ops.slice(2,4), $slice: -100 }
|
||||||
|
$set: version: @version - 1
|
||||||
|
}, { upsert: true })
|
||||||
|
.should.equal true
|
||||||
|
@db.docOps.update
|
||||||
|
.calledWith({ doc_id: ObjectId(@doc_id) }, {
|
||||||
|
$push: docOps: { $each: @ops.slice(4,5), $slice: -100 }
|
||||||
|
$set: version: @version
|
||||||
|
}, { upsert: true })
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callbak", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
describe "with no updates", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@ops = []
|
||||||
|
@version = 42
|
||||||
|
@db.docOps.update = sinon.stub().callsArg(3)
|
||||||
|
@DocOpsManager._appendDocOpsInMongo @doc_id, @ops, @version, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should not try to update the database", ->
|
||||||
|
@db.docOps.update.called.should.equal false
|
||||||
|
|
||||||
|
describe "_getDocsOpsFromMongo", ->
|
||||||
|
beforeEach ->
|
||||||
|
@version = 42
|
||||||
|
@start = 32
|
||||||
|
@limit = 5
|
||||||
|
@doc =
|
||||||
|
docOps: ["mock-ops"]
|
||||||
|
@DocOpsManager.getDocVersionInMongo = sinon.stub().callsArgWith(1, null, @version)
|
||||||
|
@db.docOps.find = sinon.stub().callsArgWith(2, null, [@doc])
|
||||||
|
@DocOpsManager._getDocOpsFromMongo @doc_id, @start, @start + @limit, @callback
|
||||||
|
|
||||||
|
it "should get the current version", ->
|
||||||
|
@DocOpsManager.getDocVersionInMongo
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc ops", ->
|
||||||
|
@db.docOps.find
|
||||||
|
.calledWith({ doc_id: ObjectId(@doc_id) }, {
|
||||||
|
docOps: $slice: [-(@version - @start), @limit]
|
||||||
|
})
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the ops", ->
|
||||||
|
@callback.calledWith(null, @doc.docOps).should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocumentManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DocumentUpdater - flushAndDeleteDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./PersistenceManager": @PersistenceManager = {}
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
"./DocOpsManager" :{}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager.removeDocFromMemory = sinon.stub().callsArg(2)
|
||||||
|
@DocumentManager.flushDocIfLoaded = sinon.stub().callsArgWith(2)
|
||||||
|
@DocumentManager.flushAndDeleteDoc @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should flush the doc", ->
|
||||||
|
@DocumentManager.flushDocIfLoaded
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should remove the doc from redis", ->
|
||||||
|
@RedisManager.removeDocFromMemory
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
|
@ -0,0 +1,73 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocumentManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DocumentUpdater - flushDocIfLoaded", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./PersistenceManager": @PersistenceManager = {}
|
||||||
|
"./DocOpsManager": @DocOpsManager = {}
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@version = 42
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "when the doc is in Redis", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager.getDoc = sinon.stub().callsArgWith(1, null, @lines, @version)
|
||||||
|
@PersistenceManager.setDoc = sinon.stub().callsArgWith(3)
|
||||||
|
@DocOpsManager.flushDocOpsToMongo = sinon.stub().callsArgWith(2)
|
||||||
|
@DocumentManager.flushDocIfLoaded @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the doc from redis", ->
|
||||||
|
@RedisManager.getDoc
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should write the doc lines to the persistence layer", ->
|
||||||
|
@PersistenceManager.setDoc
|
||||||
|
.calledWith(@project_id, @doc_id, @lines)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should write the doc ops to mongo", ->
|
||||||
|
@DocOpsManager.flushDocOpsToMongo
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the document is not in Redis", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager.getDoc = sinon.stub().callsArgWith(1, null, null, null)
|
||||||
|
@PersistenceManager.setDoc = sinon.stub().callsArgWith(3)
|
||||||
|
@DocOpsManager.flushDocOpsToMongo = sinon.stub().callsArgWith(2)
|
||||||
|
@DocumentManager.flushDocIfLoaded @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the doc from redis", ->
|
||||||
|
@RedisManager.getDoc
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should not write anything to the persistence layer", ->
|
||||||
|
@PersistenceManager.setDoc.called.should.equal false
|
||||||
|
@DocOpsManager.flushDocOpsToMongo.called.should.equal false
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocumentManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DocumentUpdater - getDocAndRecentOps", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./PersistenceManager": @PersistenceManager = {}
|
||||||
|
"./DocOpsManager": @DocOpsManager = {}
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@version = 42
|
||||||
|
@fromVersion = 40
|
||||||
|
@ops = ["mock-op-1", "mock-op-2"]
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "with a previous version specified", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @lines, @version)
|
||||||
|
@DocOpsManager.getPreviousDocOps = sinon.stub().callsArgWith(4, null, @ops)
|
||||||
|
@DocumentManager.getDocAndRecentOps @project_id, @doc_id, @fromVersion, @callback
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc ops", ->
|
||||||
|
@DocOpsManager.getPreviousDocOps
|
||||||
|
.calledWith(@project_id, @doc_id, @fromVersion, @version)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the doc info", ->
|
||||||
|
@callback.calledWith(null, @lines, @version, @ops).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "with no previous version specified", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @lines, @version)
|
||||||
|
@DocOpsManager.getPreviousDocOps = sinon.stub().callsArgWith(4, null, @ops)
|
||||||
|
@DocumentManager.getDocAndRecentOps @project_id, @doc_id, -1, @callback
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should not need to get the doc ops", ->
|
||||||
|
@DocOpsManager.getPreviousDocOps.called.should.equal false
|
||||||
|
|
||||||
|
it "should call the callback with the doc info", ->
|
||||||
|
@callback.calledWith(null, @lines, @version, []).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocumentManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DocumentUpdater - getDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./PersistenceManager": @PersistenceManager = {}
|
||||||
|
"./DocOpsManager": @DocOpsManager = {}
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@version = 42
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "when the doc exists in Redis", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager.getDoc = sinon.stub().callsArgWith(1, null, @lines, @version)
|
||||||
|
@DocumentManager.getDoc @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the doc from Redis", ->
|
||||||
|
@RedisManager.getDoc
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the doc info", ->
|
||||||
|
@callback.calledWith(null, @lines, @version).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the doc does not exist in Redis", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager.getDoc = sinon.stub().callsArgWith(1, null, null, null)
|
||||||
|
@PersistenceManager.getDoc = sinon.stub().callsArgWith(2, null, @lines)
|
||||||
|
@DocOpsManager.getDocVersionInMongo = sinon.stub().callsArgWith(1, null, @version)
|
||||||
|
@RedisManager.putDocInMemory = sinon.stub().callsArg(4)
|
||||||
|
@DocumentManager.getDoc @project_id, @doc_id, @callback
|
||||||
|
|
||||||
|
it "should try to get the doc from Redis", ->
|
||||||
|
@RedisManager.getDoc
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc version from Mongo", ->
|
||||||
|
@DocOpsManager.getDocVersionInMongo
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the doc from the PersistenceManager", ->
|
||||||
|
@PersistenceManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should set the doc in Redis", ->
|
||||||
|
@RedisManager.putDocInMemory
|
||||||
|
.calledWith(@project_id, @doc_id, @lines, @version)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the doc info", ->
|
||||||
|
@callback.calledWith(null, @lines, @version).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/DocumentManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "DocumentManager - setDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./PersistenceManager": @PersistenceManager = {}
|
||||||
|
"./DiffCodec": @DiffCodec = {}
|
||||||
|
"./DocOpsManager":{}
|
||||||
|
"./UpdateManager": @UpdateManager = {}
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@version = 42
|
||||||
|
@ops = ["mock-ops"]
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "with plain tex lines", ->
|
||||||
|
beforeEach ->
|
||||||
|
@beforeLines = ["before", "lines"]
|
||||||
|
@afterLines = ["after", "lines"]
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @beforeLines, @version)
|
||||||
|
@DiffCodec.diffAsShareJsOp = sinon.stub().callsArgWith(2, null, @ops)
|
||||||
|
@UpdateManager.applyUpdates = sinon.stub().callsArgWith(3, null)
|
||||||
|
@DocumentManager.flushDocIfLoaded = sinon.stub().callsArg(2)
|
||||||
|
@DocumentManager.setDoc @project_id, @doc_id, @afterLines, @callback
|
||||||
|
|
||||||
|
it "should get the current doc lines", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a diff of the old and new lines", ->
|
||||||
|
@DiffCodec.diffAsShareJsOp
|
||||||
|
.calledWith(@beforeLines, @afterLines)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should apply the diff as a ShareJS op", ->
|
||||||
|
@UpdateManager.applyUpdates
|
||||||
|
.calledWith(@project_id, @doc_id, [doc: @doc_id, v: @version, op: @ops, meta: { type: "external" }])
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should flush the doc to Mongo", ->
|
||||||
|
@DocumentManager.flushDocIfLoaded
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "with json lines", ->
|
||||||
|
beforeEach ->
|
||||||
|
@beforeLines = [text: "before", text: "lines"]
|
||||||
|
@afterLines = ["after", "lines"]
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @beforeLines, @version)
|
||||||
|
@DiffCodec.diffAsShareJsOp = sinon.stub().callsArgWith(2, null, @ops)
|
||||||
|
@UpdateManager.applyUpdates = sinon.stub().callsArgWith(3, null)
|
||||||
|
@DocumentManager.flushDocIfLoaded = sinon.stub().callsArg(2)
|
||||||
|
@DocumentManager.setDoc @project_id, @doc_id, @afterLines, @callback
|
||||||
|
|
||||||
|
it "should get the current doc lines", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return not try to get a diff", ->
|
||||||
|
@DiffCodec.diffAsShareJsOp.called.should.equal false
|
||||||
|
|
||||||
|
it "should call the callback", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
describe "without new lines", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @beforeLines, @version)
|
||||||
|
@DocumentManager.setDoc @project_id, @doc_id, null, @callback
|
||||||
|
|
||||||
|
it "should return teh callback with an error", ->
|
||||||
|
@callback.calledWith(new Error("No lines were passed to setDoc"))
|
||||||
|
|
||||||
|
it "should not try to get the doc lines", ->
|
||||||
|
@DocumentManager.getDoc.called.should.equal false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
41
services/document-updater/test/unit/coffee/GettingDoc.coffee
Normal file
41
services/document-updater/test/unit/coffee/GettingDoc.coffee
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../app/js/RedisManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe 'RedisManager - getDoc', ->
|
||||||
|
beforeEach ->
|
||||||
|
@rclient = {}
|
||||||
|
@rclient.auth = () ->
|
||||||
|
@rclient.multi = () => @rclient
|
||||||
|
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis": @redis =
|
||||||
|
createClient: () => @rclient
|
||||||
|
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@jsonlines = JSON.stringify @lines
|
||||||
|
@version = 42
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
@rclient.get = sinon.stub()
|
||||||
|
@rclient.exec = sinon.stub().callsArgWith(0, null, [@jsonlines, @version])
|
||||||
|
|
||||||
|
@RedisManager.getDoc @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the lines from redis", ->
|
||||||
|
@rclient.get
|
||||||
|
.calledWith("doclines:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the version from", ->
|
||||||
|
@rclient.get
|
||||||
|
.calledWith("DocVersion:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it 'should return the document', ->
|
||||||
|
@callback
|
||||||
|
.calledWith(null, @lines, @version)
|
||||||
|
.should.equal true
|
|
@ -0,0 +1,42 @@
|
||||||
|
assert = require('assert')
|
||||||
|
should = require('chai').should()
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../app/js/RedisManager.js'
|
||||||
|
_ = require('underscore')
|
||||||
|
loadModule = require('./module-loader').loadModule
|
||||||
|
keys = require(path.join __dirname, '../../../app/js/RedisKeyBuilder.js')
|
||||||
|
|
||||||
|
describe 'getting entire list of pending updates', ()->
|
||||||
|
|
||||||
|
doc_id = 123
|
||||||
|
redisMemory = {}
|
||||||
|
correctUpdates = [{"update1"}, {"update2"}, {"update3"}]
|
||||||
|
jsonCorrectUpdates = _.map correctUpdates, (d)-> JSON.stringify d
|
||||||
|
redisMemory[keys.pendingUpdates(doc_id:doc_id)] = jsonCorrectUpdates
|
||||||
|
redisMemory[keys.pendingUpdates(doc_id:"notThis")] = JSON.stringify([{"updatex"}, {"updatez"}])
|
||||||
|
|
||||||
|
redisReturn = []
|
||||||
|
|
||||||
|
mocks =
|
||||||
|
redis:
|
||||||
|
createClient: ()->
|
||||||
|
auth:->
|
||||||
|
multi: ()->
|
||||||
|
lrange:(key, start, end)->
|
||||||
|
key.should.equal(keys.pendingUpdates(doc_id:doc_id))
|
||||||
|
start.should.equal(0)
|
||||||
|
end.should.equal(-1)
|
||||||
|
redisReturn.push(redisMemory[key])
|
||||||
|
del : (key)->
|
||||||
|
key.should.equal(keys.pendingUpdates(doc_id:doc_id))
|
||||||
|
redisReturn.push(1)
|
||||||
|
exec: (callback)->
|
||||||
|
callback(null, redisReturn)
|
||||||
|
|
||||||
|
redisManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
|
||||||
|
it 'should have 3 elements in array', (done)->
|
||||||
|
redisManager.getPendingUpdatesForDoc doc_id, (err, listOfUpdates)->
|
||||||
|
listOfUpdates.length.should.equal(3)
|
||||||
|
done()
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
require('coffee-script')
|
||||||
|
assert = require('assert')
|
||||||
|
should = require('chai').should()
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../app/js/RedisManager.js'
|
||||||
|
keys = require(path.join __dirname, '../../../app/js/RedisKeyBuilder.js')
|
||||||
|
loadModule = require('./module-loader').loadModule
|
||||||
|
|
||||||
|
describe 'getting cound of docs from memory', ()->
|
||||||
|
|
||||||
|
project_id = "12345"
|
||||||
|
doc_id1 = "docid1"
|
||||||
|
doc_id2 = "docid2"
|
||||||
|
doc_id3 = "docid3"
|
||||||
|
|
||||||
|
redisMemory = {}
|
||||||
|
redisManager = undefined
|
||||||
|
|
||||||
|
beforeEach (done)->
|
||||||
|
mocks =
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
redis:
|
||||||
|
createClient : ()->
|
||||||
|
auth:->
|
||||||
|
smembers:(key, callback)->
|
||||||
|
callback(null, redisMemory[key])
|
||||||
|
multi: ()->
|
||||||
|
set:(key, value)->
|
||||||
|
redisMemory[key] = value
|
||||||
|
sadd:(key, value)->
|
||||||
|
if !redisMemory[key]?
|
||||||
|
redisMemory[key] = []
|
||||||
|
redisMemory[key].push value
|
||||||
|
del:()->
|
||||||
|
exec:(callback)->
|
||||||
|
callback()
|
||||||
|
|
||||||
|
redisManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
redisManager.putDocInMemory project_id, doc_id1, 0, ["line"], ->
|
||||||
|
redisManager.putDocInMemory project_id, doc_id2, 0, ["ledf"], ->
|
||||||
|
redisManager.putDocInMemory project_id, doc_id3, 0, ["ledf"], ->
|
||||||
|
done()
|
||||||
|
|
||||||
|
it 'should return total', (done)->
|
||||||
|
redisManager.getCountOfDocsInMemory (err, count)->
|
||||||
|
assert.equal count, 3
|
||||||
|
done()
|
|
@ -0,0 +1,63 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - deleteProject", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager": @ProjectManager = {}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager.flushAndDeleteProjectWithLocks = sinon.stub().callsArgWith(1)
|
||||||
|
@HttpController.deleteProject(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should delete the project", ->
|
||||||
|
@ProjectManager.flushAndDeleteProjectWithLocks
|
||||||
|
.calledWith(@project_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a successful No Content response", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(204)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(project_id: @project_id, "deleting project via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager.flushAndDeleteProjectWithLocks = sinon.stub().callsArgWith(1, new Error("oops"))
|
||||||
|
@HttpController.deleteProject(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - flushAndDeleteDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager":{}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
doc_id: @doc_id
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock = sinon.stub().callsArgWith(2)
|
||||||
|
@HttpController.flushAndDeleteDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should flush and delete the doc", ->
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a successful No Content response", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(204)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(doc_id: @doc_id, project_id: @project_id, "deleting doc via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock = sinon.stub().callsArgWith(2, new Error("oops"))
|
||||||
|
@HttpController.flushAndDeleteDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - flushDocIfLoaded", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager": {}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@version = 42
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
doc_id: @doc_id
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock = sinon.stub().callsArgWith(2)
|
||||||
|
@HttpController.flushDocIfLoaded(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should flush the doc", ->
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a successful No Content response", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(204)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(doc_id: @doc_id, project_id: @project_id, "flushing doc via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock = sinon.stub().callsArgWith(2, new Error("oops"))
|
||||||
|
@HttpController.flushDocIfLoaded(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - flushProject", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager": @ProjectManager = {}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager.flushProjectWithLocks = sinon.stub().callsArgWith(1)
|
||||||
|
@HttpController.flushProject(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should flush the project", ->
|
||||||
|
@ProjectManager.flushProjectWithLocks
|
||||||
|
.calledWith(@project_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a successful No Content response", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(204)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(project_id: @project_id, "flushing project via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager.flushProjectWithLocks = sinon.stub().callsArgWith(1, new Error("oops"))
|
||||||
|
@HttpController.flushProject(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - getDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager": {}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@ops = ["mock-op-1", "mock-op-2"]
|
||||||
|
@version = 42
|
||||||
|
@fromVersion = 42
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
doc_id: @doc_id
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "when the document exists and no recent ops are requested", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock = sinon.stub().callsArgWith(3, null, @lines, @version, [])
|
||||||
|
@HttpController.getDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock
|
||||||
|
.calledWith(@project_id, @doc_id, -1)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the doc as JSON", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(JSON.stringify({
|
||||||
|
id: @doc_id
|
||||||
|
lines: @lines
|
||||||
|
version: @version
|
||||||
|
ops: []
|
||||||
|
}))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(doc_id: @doc_id, project_id: @project_id, "getting doc via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when recent ops are requested", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock = sinon.stub().callsArgWith(3, null, @lines, @version, @ops)
|
||||||
|
@req.query = fromVersion: "#{@fromVersion}"
|
||||||
|
@HttpController.getDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock
|
||||||
|
.calledWith(@project_id, @doc_id, @fromVersion)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the doc as JSON", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(JSON.stringify({
|
||||||
|
id: @doc_id
|
||||||
|
lines: @lines
|
||||||
|
version: @version
|
||||||
|
ops: @ops
|
||||||
|
}))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(doc_id: @doc_id, project_id: @project_id, "getting doc via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the document does not exist", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock = sinon.stub().callsArgWith(3, null, null, null)
|
||||||
|
@HttpController.getDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with NotFoundError", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Errors.NotFoundError("not found"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDocAndRecentOpsWithLock = sinon.stub().callsArgWith(3, new Error("oops"), null, null)
|
||||||
|
@HttpController.getDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/HttpController.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors.js"
|
||||||
|
|
||||||
|
describe "HttpController - setDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@HttpController = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./ProjectManager": {}
|
||||||
|
"logger-sharelatex" : @logger = { log: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics = {}
|
||||||
|
|
||||||
|
@Metrics.Timer = class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@res =
|
||||||
|
send: sinon.stub()
|
||||||
|
@req =
|
||||||
|
params:
|
||||||
|
project_id: @project_id
|
||||||
|
doc_id: @doc_id
|
||||||
|
body:
|
||||||
|
lines: @lines
|
||||||
|
@next = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.setDocWithLock = sinon.stub().callsArgWith(3)
|
||||||
|
@HttpController.setDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should set the doc", ->
|
||||||
|
@DocumentManager.setDocWithLock
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return a successful No Content response", ->
|
||||||
|
@res.send
|
||||||
|
.calledWith(204)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should log the request", ->
|
||||||
|
@logger.log
|
||||||
|
.calledWith(doc_id: @doc_id, project_id: @project_id, lines: @lines, "setting doc via http")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should time the request", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when an errors occurs", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.setDocWithLock = sinon.stub().callsArgWith(3, new Error("oops"))
|
||||||
|
@HttpController.setDoc(@req, @res, @next)
|
||||||
|
|
||||||
|
it "should call next with the error", ->
|
||||||
|
@next
|
||||||
|
.calledWith(new Error("oops"))
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
require('coffee-script')
|
||||||
|
sinon = require('sinon')
|
||||||
|
assert = require('assert')
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../../app/js/LockManager.js'
|
||||||
|
keys = require(path.join __dirname, '../../../../app/js/RedisKeyBuilder.js')
|
||||||
|
project_id = 1234
|
||||||
|
doc_id = 5678
|
||||||
|
blockingKey = "Blocking:#{doc_id}"
|
||||||
|
loadModule = require('../module-loader').loadModule
|
||||||
|
|
||||||
|
describe 'Lock Manager - checking the lock', ()->
|
||||||
|
|
||||||
|
existsStub = sinon.stub()
|
||||||
|
setStub = sinon.stub()
|
||||||
|
exireStub = sinon.stub()
|
||||||
|
execStub = sinon.stub()
|
||||||
|
|
||||||
|
mocks =
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
|
||||||
|
redis:
|
||||||
|
createClient : ()->
|
||||||
|
auth:->
|
||||||
|
multi: ->
|
||||||
|
exists: existsStub
|
||||||
|
expire: exireStub
|
||||||
|
set: setStub
|
||||||
|
exec: execStub
|
||||||
|
LockManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
|
||||||
|
it 'should check if lock exists but not set or expire', (done)->
|
||||||
|
execStub.callsArgWith(0, null, ["1"])
|
||||||
|
LockManager.checkLock doc_id, (err, docIsLocked)->
|
||||||
|
existsStub.calledWith(blockingKey).should.equal true
|
||||||
|
setStub.called.should.equal false
|
||||||
|
exireStub.called.should.equal false
|
||||||
|
done()
|
||||||
|
|
||||||
|
it 'should return true if the key does not exists', (done)->
|
||||||
|
execStub.callsArgWith(0, null, "0")
|
||||||
|
LockManager.checkLock doc_id, (err, free)->
|
||||||
|
free.should.equal true
|
||||||
|
done()
|
||||||
|
|
||||||
|
it 'should return false if the key does exists', (done)->
|
||||||
|
execStub.callsArgWith(0, null, "1")
|
||||||
|
LockManager.checkLock doc_id, (err, free)->
|
||||||
|
free.should.equal false
|
||||||
|
done()
|
|
@ -0,0 +1,28 @@
|
||||||
|
require('coffee-script')
|
||||||
|
sinon = require('sinon')
|
||||||
|
assert = require('assert')
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../../app/js/LockManager.js'
|
||||||
|
keys = require(path.join __dirname, '../../../../app/js/RedisKeyBuilder.js')
|
||||||
|
project_id = 1234
|
||||||
|
doc_id = 5678
|
||||||
|
loadModule = require('../module-loader').loadModule
|
||||||
|
|
||||||
|
describe 'LockManager - releasing the lock', ()->
|
||||||
|
|
||||||
|
deleteStub = sinon.stub().callsArgWith(1)
|
||||||
|
mocks =
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
|
||||||
|
redis:
|
||||||
|
createClient : ()->
|
||||||
|
auth:->
|
||||||
|
del:deleteStub
|
||||||
|
|
||||||
|
LockManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
|
||||||
|
it 'should put a all data into memory', (done)->
|
||||||
|
LockManager.releaseLock doc_id, ->
|
||||||
|
deleteStub.calledWith("Blocking:#{doc_id}").should.equal true
|
||||||
|
done()
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/LockManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe 'LockManager - getting the lock', ->
|
||||||
|
beforeEach ->
|
||||||
|
@LockManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
redis:
|
||||||
|
createClient : () =>
|
||||||
|
auth:->
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
|
||||||
|
describe "when the lock is not set", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@LockManager.tryLock = sinon.stub().callsArgWith(1, null, true)
|
||||||
|
@LockManager.getLock @doc_id, (args...) =>
|
||||||
|
@callback(args...)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should try to get the lock", ->
|
||||||
|
@LockManager.tryLock
|
||||||
|
.calledWith(@doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should only need to try once", ->
|
||||||
|
@LockManager.tryLock.callCount.should.equal 1
|
||||||
|
|
||||||
|
it "should return the callback", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
describe "when the lock is initially set", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
startTime = Date.now()
|
||||||
|
@LockManager.LOCK_TEST_INTERVAL = 5
|
||||||
|
@LockManager.tryLock = (doc_id, callback = (error, isFree) ->) ->
|
||||||
|
if Date.now() - startTime < 20
|
||||||
|
callback null, false
|
||||||
|
else
|
||||||
|
callback null, true
|
||||||
|
sinon.spy @LockManager, "tryLock"
|
||||||
|
|
||||||
|
@LockManager.getLock @doc_id, (args...) =>
|
||||||
|
@callback(args...)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should call tryLock multiple times until free", ->
|
||||||
|
(@LockManager.tryLock.callCount > 1).should.equal true
|
||||||
|
|
||||||
|
it "should return the callback", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
describe "when the lock times out", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
time = Date.now()
|
||||||
|
@LockManager.MAX_LOCK_WAIT_TIME = 5
|
||||||
|
@LockManager.tryLock = sinon.stub().callsArgWith(1, null, false)
|
||||||
|
@LockManager.getLock @doc_id, (args...) =>
|
||||||
|
@callback(args...)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should return the callback with an error", ->
|
||||||
|
@callback.calledWith(new Error("timeout")).should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/LockManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe 'LockManager - trying the lock', ->
|
||||||
|
beforeEach ->
|
||||||
|
@LockManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"logger-sharelatex": log:->
|
||||||
|
redis:
|
||||||
|
createClient : () =>
|
||||||
|
auth:->
|
||||||
|
set: @set = sinon.stub()
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
|
||||||
|
describe "when the lock is not set", ->
|
||||||
|
beforeEach ->
|
||||||
|
@set.callsArgWith(5, null, "OK")
|
||||||
|
@LockManager.tryLock @doc_id, @callback
|
||||||
|
|
||||||
|
it "should set the lock key with an expiry if it is not set", ->
|
||||||
|
@set.calledWith("Blocking:#{@doc_id}", "locked", "EX", 10, "NX")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the callback with true", ->
|
||||||
|
@callback.calledWith(null, true).should.equal true
|
||||||
|
|
||||||
|
describe "when the lock is already set", ->
|
||||||
|
beforeEach ->
|
||||||
|
@set.callsArgWith(5, null, null)
|
||||||
|
@LockManager.tryLock @doc_id, @callback
|
||||||
|
|
||||||
|
it "should return the callback with false", ->
|
||||||
|
@callback.calledWith(null, false).should.equal true
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/PersistenceManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors"
|
||||||
|
|
||||||
|
describe "PersistenceManager.getDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@PersistenceManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"request": @request = sinon.stub()
|
||||||
|
"settings-sharelatex": @Settings = {}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@Settings.apis =
|
||||||
|
web:
|
||||||
|
url: @url = "www.example.com"
|
||||||
|
user: @user = "sharelatex"
|
||||||
|
pass: @pass = "password"
|
||||||
|
|
||||||
|
describe "with a successful response from the web api", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 200}, JSON.stringify(lines: @lines))
|
||||||
|
@PersistenceManager.getDoc(@project_id, @doc_id, @callback)
|
||||||
|
|
||||||
|
it "should call the web api", ->
|
||||||
|
@request
|
||||||
|
.calledWith({
|
||||||
|
url: "#{@url}/project/#{@project_id}/doc/#{@doc_id}"
|
||||||
|
method: "GET"
|
||||||
|
headers:
|
||||||
|
"accept": "application/json"
|
||||||
|
auth:
|
||||||
|
user: @user
|
||||||
|
pass: @pass
|
||||||
|
sendImmediately: true
|
||||||
|
jar: false
|
||||||
|
})
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the doc lines", ->
|
||||||
|
@callback.calledWith(null, @lines).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when request returns an error", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, @error = new Error("oops"), null, null)
|
||||||
|
@PersistenceManager.getDoc(@project_id, @doc_id, @callback)
|
||||||
|
|
||||||
|
it "should return the error", ->
|
||||||
|
@callback.calledWith(@error).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the request returns 404", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 404}, "")
|
||||||
|
@PersistenceManager.getDoc(@project_id, @doc_id, @callback)
|
||||||
|
|
||||||
|
it "should return a NotFoundError", ->
|
||||||
|
@callback.calledWith(new Errors.NotFoundError("not found")).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the request returns an error status code", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 500}, "")
|
||||||
|
@PersistenceManager.getDoc(@project_id, @doc_id, @callback)
|
||||||
|
|
||||||
|
it "should return an error", ->
|
||||||
|
@callback.calledWith(new Error("web api error")).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/PersistenceManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors"
|
||||||
|
|
||||||
|
describe "PersistenceManager.setDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@PersistenceManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"request": @request = sinon.stub()
|
||||||
|
"settings-sharelatex": @Settings = {}
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@Settings.apis =
|
||||||
|
web:
|
||||||
|
url: @url = "www.example.com"
|
||||||
|
user: @user = "sharelatex"
|
||||||
|
pass: @pass = "password"
|
||||||
|
|
||||||
|
describe "with a successful response from the web api", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 200}, JSON.stringify(lines: @lines))
|
||||||
|
@PersistenceManager.setDoc(@project_id, @doc_id, @lines, @callback)
|
||||||
|
|
||||||
|
it "should call the web api", ->
|
||||||
|
@request
|
||||||
|
.calledWith({
|
||||||
|
url: "#{@url}/project/#{@project_id}/doc/#{@doc_id}"
|
||||||
|
body: JSON.stringify
|
||||||
|
lines: @lines
|
||||||
|
method: "POST"
|
||||||
|
headers:
|
||||||
|
"content-type": "application/json"
|
||||||
|
auth:
|
||||||
|
user: @user
|
||||||
|
pass: @pass
|
||||||
|
sendImmediately: true
|
||||||
|
jar: false
|
||||||
|
})
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when request returns an error", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, @error = new Error("oops"), null, null)
|
||||||
|
@PersistenceManager.setDoc(@project_id, @doc_id, @lines, @callback)
|
||||||
|
|
||||||
|
it "should return the error", ->
|
||||||
|
@callback.calledWith(@error).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the request returns 404", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 404}, "")
|
||||||
|
@PersistenceManager.setDoc(@project_id, @doc_id, @lines, @callback)
|
||||||
|
|
||||||
|
it "should return a NotFoundError", ->
|
||||||
|
@callback.calledWith(new Errors.NotFoundError("not found")).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when the request returns an error status code", ->
|
||||||
|
beforeEach ->
|
||||||
|
@request.callsArgWith(1, null, {statusCode: 500}, "")
|
||||||
|
@PersistenceManager.setDoc(@project_id, @doc_id, @lines, @callback)
|
||||||
|
|
||||||
|
it "should return an error", ->
|
||||||
|
@callback.calledWith(new Error("web api error")).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/ProjectManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "ProjectManager - flushAndDeleteProject", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"logger-sharelatex": @logger = { log: sinon.stub(), error: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@doc_ids = ["doc-id-1", "doc-id-2", "doc-id-3"]
|
||||||
|
@RedisManager.getDocIdsInProject = sinon.stub().callsArgWith(1, null, @doc_ids)
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock = sinon.stub().callsArg(2)
|
||||||
|
@ProjectManager.flushAndDeleteProjectWithLocks @project_id, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should get the doc ids in the project", ->
|
||||||
|
@RedisManager.getDocIdsInProject
|
||||||
|
.calledWith(@project_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should delete each doc in the project", ->
|
||||||
|
for doc_id in @doc_ids
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock
|
||||||
|
.calledWith(@project_id, doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when a doc errors", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@doc_ids = ["doc-id-1", "doc-id-2", "doc-id-3"]
|
||||||
|
@RedisManager.getDocIdsInProject = sinon.stub().callsArgWith(1, null, @doc_ids)
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock = sinon.spy (project_id, doc_id, callback = (error) ->) =>
|
||||||
|
if doc_id == "doc-id-1"
|
||||||
|
callback(@error = new Error("oops, something went wrong"))
|
||||||
|
else
|
||||||
|
callback()
|
||||||
|
@ProjectManager.flushAndDeleteProjectWithLocks @project_id, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should still flush each doc in the project", ->
|
||||||
|
for doc_id in @doc_ids
|
||||||
|
@DocumentManager.flushAndDeleteDocWithLock
|
||||||
|
.calledWith(@project_id, doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should record the error", ->
|
||||||
|
@logger.error
|
||||||
|
.calledWith(err: @error, project_id: @project_id, doc_id: "doc-id-1", "error deleting doc")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with an error", ->
|
||||||
|
@callback.calledWith(new Error()).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/ProjectManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "ProjectManager - flushProject", ->
|
||||||
|
beforeEach ->
|
||||||
|
@ProjectManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"logger-sharelatex": @logger = { log: sinon.stub(), error: sinon.stub() }
|
||||||
|
"./Metrics": @Metrics =
|
||||||
|
Timer: class Timer
|
||||||
|
done: sinon.stub()
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@doc_ids = ["doc-id-1", "doc-id-2", "doc-id-3"]
|
||||||
|
@RedisManager.getDocIdsInProject = sinon.stub().callsArgWith(1, null, @doc_ids)
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock = sinon.stub().callsArg(2)
|
||||||
|
@ProjectManager.flushProjectWithLocks @project_id, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should get the doc ids in the project", ->
|
||||||
|
@RedisManager.getDocIdsInProject
|
||||||
|
.calledWith(@project_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should flush each doc in the project", ->
|
||||||
|
for doc_id in @doc_ids
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock
|
||||||
|
.calledWith(@project_id, doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without error", ->
|
||||||
|
@callback.calledWith(null).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
describe "when a doc errors", ->
|
||||||
|
beforeEach (done) ->
|
||||||
|
@doc_ids = ["doc-id-1", "doc-id-2", "doc-id-3"]
|
||||||
|
@RedisManager.getDocIdsInProject = sinon.stub().callsArgWith(1, null, @doc_ids)
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock = sinon.spy (project_id, doc_id, callback = (error) ->) =>
|
||||||
|
if doc_id == "doc-id-1"
|
||||||
|
callback(@error = new Error("oops, something went wrong"))
|
||||||
|
else
|
||||||
|
callback()
|
||||||
|
@ProjectManager.flushProjectWithLocks @project_id, (error) =>
|
||||||
|
@callback(error)
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should still flush each doc in the project", ->
|
||||||
|
for doc_id in @doc_ids
|
||||||
|
@DocumentManager.flushDocIfLoadedWithLock
|
||||||
|
.calledWith(@project_id, doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should record the error", ->
|
||||||
|
@logger.error
|
||||||
|
.calledWith(err: @error, project_id: @project_id, doc_id: "doc-id-1", "error flushing doc")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with an error", ->
|
||||||
|
@callback.calledWith(new Error()).should.equal true
|
||||||
|
|
||||||
|
it "should time the execution", ->
|
||||||
|
@Metrics.Timer::done.called.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.clearDocFromPendingUpdatesSet", ->
|
||||||
|
beforeEach ->
|
||||||
|
@project_id = "project-id"
|
||||||
|
@doc_id = "document-id"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis" : createClient: () =>
|
||||||
|
@rclient = auth:->
|
||||||
|
|
||||||
|
@rclient.srem = sinon.stub().callsArg(2)
|
||||||
|
@RedisManager.clearDocFromPendingUpdatesSet(@project_id, @doc_id, @callback)
|
||||||
|
|
||||||
|
it "should get the docs with pending updates", ->
|
||||||
|
@rclient.srem
|
||||||
|
.calledWith("DocsWithPendingUpdates", "#{@project_id}:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the callback", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.getDocsWithPendingUpdates", ->
|
||||||
|
beforeEach ->
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis" : createClient: () =>
|
||||||
|
@rclient = auth:->
|
||||||
|
|
||||||
|
@docs = [{
|
||||||
|
doc_id: "doc-id-1"
|
||||||
|
project_id: "project-id-1"
|
||||||
|
}, {
|
||||||
|
doc_id: "doc-id-2"
|
||||||
|
project_id: "project-id-2"
|
||||||
|
}]
|
||||||
|
@doc_keys = @docs.map (doc) -> "#{doc.project_id}:#{doc.doc_id}"
|
||||||
|
|
||||||
|
@rclient.smembers = sinon.stub().callsArgWith(1, null, @doc_keys)
|
||||||
|
@RedisManager.getDocsWithPendingUpdates(@callback)
|
||||||
|
|
||||||
|
it "should get the docs with pending updates", ->
|
||||||
|
@rclient.smembers
|
||||||
|
.calledWith("DocsWithPendingUpdates")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the docs with pending updates", ->
|
||||||
|
@callback.calledWith(null, @docs).should.equal true
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.getPendingUpdatesForDoc", ->
|
||||||
|
beforeEach ->
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis": createClient: () =>
|
||||||
|
@rclient =
|
||||||
|
auth: () ->
|
||||||
|
multi: () => @rclient
|
||||||
|
"logger-sharelatex": @logger = {log: sinon.stub()}
|
||||||
|
@project_id = "project-id-123"
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@rclient.lrange = sinon.stub()
|
||||||
|
@rclient.del = sinon.stub()
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@updates = [
|
||||||
|
{ op: [{ i: "foo", p: 4 }] }
|
||||||
|
{ op: [{ i: "foo", p: 4 }] }
|
||||||
|
]
|
||||||
|
@jsonUpdates = @updates.map (update) -> JSON.stringify update
|
||||||
|
@rclient.exec = sinon.stub().callsArgWith(0, null, [@jsonUpdates])
|
||||||
|
@RedisManager.getPendingUpdatesForDoc @doc_id, @callback
|
||||||
|
|
||||||
|
it "should get the pending updates", ->
|
||||||
|
@rclient.lrange
|
||||||
|
.calledWith("PendingUpdates:#{@doc_id}", 0, -1)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should delete the pending updates", ->
|
||||||
|
@rclient.del
|
||||||
|
.calledWith("PendingUpdates:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the updates", ->
|
||||||
|
@callback.calledWith(null, @updates).should.equal true
|
||||||
|
|
||||||
|
describe "when the JSON doesn't parse", ->
|
||||||
|
beforeEach ->
|
||||||
|
@jsonUpdates = [
|
||||||
|
JSON.stringify { op: [{ i: "foo", p: 4 }] }
|
||||||
|
"broken json"
|
||||||
|
]
|
||||||
|
@rclient.exec = sinon.stub().callsArgWith(0, null, [@jsonUpdates])
|
||||||
|
@RedisManager.getPendingUpdatesForDoc @doc_id, @callback
|
||||||
|
|
||||||
|
it "should return an error to the callback", ->
|
||||||
|
@callback.calledWith(new Error("JSON parse error")).should.equal true
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.getPreviousDocOpsTests", ->
|
||||||
|
beforeEach ->
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis" : createClient: () =>
|
||||||
|
@rclient =
|
||||||
|
auth: ->
|
||||||
|
multi: => @rclient
|
||||||
|
"logger-sharelatex": @logger = { error: sinon.stub(), log: sinon.stub() }
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
|
||||||
|
describe "with a start and an end value", ->
|
||||||
|
beforeEach ->
|
||||||
|
@first_version_in_redis = 30
|
||||||
|
@version = 70
|
||||||
|
@length = @version - @first_version_in_redis
|
||||||
|
@start = 50
|
||||||
|
@end = 60
|
||||||
|
@ops = [
|
||||||
|
{ "mock": "op-1" },
|
||||||
|
{ "mock": "op-2" }
|
||||||
|
]
|
||||||
|
@jsonOps = @ops.map (op) -> JSON.stringify op
|
||||||
|
@rclient.llen = sinon.stub().callsArgWith(1, null, @length)
|
||||||
|
@rclient.get = sinon.stub().callsArgWith(1, null, @version.toString())
|
||||||
|
@rclient.lrange = sinon.stub().callsArgWith(3, null, @jsonOps)
|
||||||
|
@RedisManager.getPreviousDocOps(@doc_id, @start, @end, @callback)
|
||||||
|
|
||||||
|
it "should get the length of the existing doc ops", ->
|
||||||
|
@rclient.llen
|
||||||
|
.calledWith("DocOps:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the current version of the doc", ->
|
||||||
|
@rclient.get
|
||||||
|
.calledWith("DocVersion:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should get the appropriate docs ops", ->
|
||||||
|
@rclient.lrange
|
||||||
|
.calledWith("DocOps:#{@doc_id}", @start - @first_version_in_redis, @end - @first_version_in_redis)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the docs with the doc ops deserialized", ->
|
||||||
|
@callback.calledWith(null, @ops).should.equal true
|
||||||
|
|
||||||
|
describe "with an end value of -1", ->
|
||||||
|
beforeEach ->
|
||||||
|
@first_version_in_redis = 30
|
||||||
|
@version = 70
|
||||||
|
@length = @version - @first_version_in_redis
|
||||||
|
@start = 50
|
||||||
|
@end = -1
|
||||||
|
@ops = [
|
||||||
|
{ "mock": "op-1" },
|
||||||
|
{ "mock": "op-2" }
|
||||||
|
]
|
||||||
|
@jsonOps = @ops.map (op) -> JSON.stringify op
|
||||||
|
@rclient.llen = sinon.stub().callsArgWith(1, null, @length)
|
||||||
|
@rclient.get = sinon.stub().callsArgWith(1, null, @version.toString())
|
||||||
|
@rclient.lrange = sinon.stub().callsArgWith(3, null, @jsonOps)
|
||||||
|
@RedisManager.getPreviousDocOps(@doc_id, @start, @end, @callback)
|
||||||
|
|
||||||
|
it "should get the appropriate docs ops to the end of list", ->
|
||||||
|
@rclient.lrange
|
||||||
|
.calledWith("DocOps:#{@doc_id}", @start - @first_version_in_redis, -1)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the docs with the doc ops deserialized", ->
|
||||||
|
@callback.calledWith(null, @ops).should.equal true
|
||||||
|
|
||||||
|
describe "when the requested range is not in Redis", ->
|
||||||
|
beforeEach ->
|
||||||
|
@first_version_in_redis = 30
|
||||||
|
@version = 70
|
||||||
|
@length = @version - @first_version_in_redis
|
||||||
|
@start = 20
|
||||||
|
@end = -1
|
||||||
|
@ops = [
|
||||||
|
{ "mock": "op-1" },
|
||||||
|
{ "mock": "op-2" }
|
||||||
|
]
|
||||||
|
@jsonOps = @ops.map (op) -> JSON.stringify op
|
||||||
|
@rclient.llen = sinon.stub().callsArgWith(1, null, @length)
|
||||||
|
@rclient.get = sinon.stub().callsArgWith(1, null, @version.toString())
|
||||||
|
@rclient.lrange = sinon.stub().callsArgWith(3, null, @jsonOps)
|
||||||
|
@RedisManager.getPreviousDocOps(@doc_id, @start, @end, @callback)
|
||||||
|
|
||||||
|
it "should return an error", ->
|
||||||
|
@callback.calledWith(new Error("range is not loaded in redis")).should.equal true
|
||||||
|
|
||||||
|
it "should log out the problem", ->
|
||||||
|
@logger.error.called.should.equal true
|
|
@ -0,0 +1,32 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.clearDocFromPendingUpdatesSet", ->
|
||||||
|
beforeEach ->
|
||||||
|
@doc_id = "document-id"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis" : createClient: () =>
|
||||||
|
@rclient = auth:->
|
||||||
|
|
||||||
|
@rclient.lpush = sinon.stub().callsArg(2)
|
||||||
|
@ops = [
|
||||||
|
{ "mock" : "op-1" },
|
||||||
|
{ "mock" : "op-2" }
|
||||||
|
]
|
||||||
|
@reversedJsonOps = @ops.map((op) -> JSON.stringify op).reverse()
|
||||||
|
@RedisManager.prependDocOps(@doc_id, @ops, @callback)
|
||||||
|
|
||||||
|
it "should push the reversed JSONed ops", ->
|
||||||
|
@rclient.lpush
|
||||||
|
.calledWith("DocOps:#{@doc_id}", @reversedJsonOps)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the callback", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/RedisManager"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "RedisManager.getPreviousDocOpsTests", ->
|
||||||
|
beforeEach ->
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@RedisManager = SandboxedModule.require modulePath, requires:
|
||||||
|
"redis" : createClient: () =>
|
||||||
|
@rclient =
|
||||||
|
auth: ->
|
||||||
|
multi: => @rclient
|
||||||
|
@doc_id = "doc-id-123"
|
||||||
|
|
||||||
|
beforeEach ->
|
||||||
|
@version = 70
|
||||||
|
@op =
|
||||||
|
{ "mock": "op-1" }
|
||||||
|
@jsonOp = JSON.stringify @op
|
||||||
|
@rclient.rpush = sinon.stub().callsArgWith(2, null)
|
||||||
|
@rclient.incr = sinon.stub().callsArgWith(1, null, @version.toString())
|
||||||
|
@RedisManager.pushDocOp(@doc_id, @op, @callback)
|
||||||
|
|
||||||
|
it "should push the op into redis", ->
|
||||||
|
@rclient.rpush
|
||||||
|
.calledWith("DocOps:#{@doc_id}", @jsonOp)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should increment the version number", ->
|
||||||
|
@rclient.incr
|
||||||
|
.calledWith("DocVersion:#{@doc_id}")
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback with the new version", ->
|
||||||
|
@callback.calledWith(null, @version).should.equal true
|
|
@ -0,0 +1,73 @@
|
||||||
|
require('coffee-script')
|
||||||
|
_ = require("underscore")
|
||||||
|
assert = require('assert')
|
||||||
|
sinon = require('sinon')
|
||||||
|
path = require('path')
|
||||||
|
modulePath = path.join __dirname, '../../../app/js/RedisManager.js'
|
||||||
|
keys = require(path.join __dirname, '../../../app/js/RedisKeyBuilder.js')
|
||||||
|
loadModule = require('./module-loader').loadModule
|
||||||
|
|
||||||
|
describe 'removing single doc from memory', ()->
|
||||||
|
|
||||||
|
project_id = "12345"
|
||||||
|
doc_id1 = "docid1"
|
||||||
|
doc_id2 = "docid2"
|
||||||
|
doc_id3 = "docid3"
|
||||||
|
|
||||||
|
redisMemory = undefined
|
||||||
|
redisManager = undefined
|
||||||
|
self = @
|
||||||
|
beforeEach (done)->
|
||||||
|
redisMemory = {}
|
||||||
|
|
||||||
|
mocks =
|
||||||
|
"logger-sharelatex":
|
||||||
|
error:->
|
||||||
|
log:->
|
||||||
|
redis:
|
||||||
|
createClient : ->
|
||||||
|
auth:->
|
||||||
|
multi: ->
|
||||||
|
get:->
|
||||||
|
set:(key, value)->
|
||||||
|
redisMemory[key] = value
|
||||||
|
sadd:(key, value)->
|
||||||
|
if !redisMemory[key]?
|
||||||
|
redisMemory[key] = []
|
||||||
|
redisMemory[key].push value
|
||||||
|
del : (key)->
|
||||||
|
delete redisMemory[key]
|
||||||
|
srem : (key, member)->
|
||||||
|
index = redisMemory[key].indexOf(member)
|
||||||
|
redisMemory[key].splice(index, 1)
|
||||||
|
exec:(callback)->
|
||||||
|
callback(null, [])
|
||||||
|
|
||||||
|
redisManager = loadModule(modulePath, mocks).module.exports
|
||||||
|
redisManager.putDocInMemory project_id, doc_id1, 0, ["line"], ->
|
||||||
|
redisManager.putDocInMemory project_id, doc_id2, 0, ["ledf"], ->
|
||||||
|
redisManager.putDocInMemory project_id, doc_id3, 0, ["ledf"], ->
|
||||||
|
done()
|
||||||
|
|
||||||
|
it 'should remove doc lines from memory', (done)->
|
||||||
|
keyExists = false
|
||||||
|
redisManager.removeDocFromMemory project_id, doc_id1, ()->
|
||||||
|
assert.equal redisMemory[keys.docLines(doc_id:doc_id1)], undefined
|
||||||
|
keys = _.keys(redisMemory)
|
||||||
|
containsKey(keys, doc_id1)
|
||||||
|
keys.forEach (sets)->
|
||||||
|
containsKey sets, doc_id1
|
||||||
|
_.each redisMemory, (value)->
|
||||||
|
if value.indexOf(doc_id1) != -1
|
||||||
|
assert.equal false, "#{doc_id1} found in value #{value}"
|
||||||
|
done()
|
||||||
|
|
||||||
|
|
||||||
|
containsKey = (haystack, key)->
|
||||||
|
if haystack.forEach?
|
||||||
|
haystack.forEach (area)->
|
||||||
|
if area.indexOf(key) != -1
|
||||||
|
assert.equal false, "#{key} found in haystack in #{area}"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/ShareJsDB.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "ShareJsDB.getOps", ->
|
||||||
|
beforeEach ->
|
||||||
|
@doc_id = "document-id"
|
||||||
|
@project_id = "project-id"
|
||||||
|
@doc_key = "#{@project_id}:#{@doc_id}"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@ops = [{p: 20, t: "foo"}]
|
||||||
|
@redis_ops = (JSON.stringify(op) for op in @ops)
|
||||||
|
@ShareJsDB = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./DocOpsManager": @DocOpsManager = {}
|
||||||
|
"./DocumentManager":{}
|
||||||
|
|
||||||
|
describe "with start == end", ->
|
||||||
|
beforeEach ->
|
||||||
|
@start = @end = 42
|
||||||
|
@ShareJsDB.getOps @doc_key, @start, @end, @callback
|
||||||
|
|
||||||
|
it "should return an empty array", ->
|
||||||
|
@callback.calledWith(null, []).should.equal true
|
||||||
|
|
||||||
|
describe "with a non empty range", ->
|
||||||
|
beforeEach ->
|
||||||
|
@start = 35
|
||||||
|
@end = 42
|
||||||
|
@DocOpsManager.getPreviousDocOps = sinon.stub().callsArgWith(4, null, @ops)
|
||||||
|
@ShareJsDB.getOps @doc_key, @start, @end, @callback
|
||||||
|
|
||||||
|
it "should get the range from redis", ->
|
||||||
|
@DocOpsManager.getPreviousDocOps
|
||||||
|
.calledWith(@project_id, @doc_id, @start, @end-1)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the ops", ->
|
||||||
|
@callback.calledWith(null, @ops).should.equal true
|
||||||
|
|
||||||
|
describe "with no specified end", ->
|
||||||
|
beforeEach ->
|
||||||
|
@start = 35
|
||||||
|
@end = null
|
||||||
|
@DocOpsManager.getPreviousDocOps = sinon.stub().callsArgWith(4, null, @ops)
|
||||||
|
@ShareJsDB.getOps @doc_key, @start, @end, @callback
|
||||||
|
|
||||||
|
it "should get until the end of the list", ->
|
||||||
|
@DocOpsManager.getPreviousDocOps
|
||||||
|
.calledWith(@project_id, @doc_id, @start, -1)
|
||||||
|
.should.equal true
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
expect = chai.expect
|
||||||
|
modulePath = "../../../../app/js/ShareJsDB.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
Errors = require "../../../../app/js/Errors"
|
||||||
|
|
||||||
|
describe "ShareJsDB.getSnapshot", ->
|
||||||
|
beforeEach ->
|
||||||
|
@doc_id = "document-id"
|
||||||
|
@project_id = "project-id"
|
||||||
|
@doc_key = "#{@project_id}:#{@doc_id}"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@ShareJsDB = SandboxedModule.require modulePath, requires:
|
||||||
|
"./DocumentManager": @DocumentManager = {}
|
||||||
|
"./RedisManager": {}
|
||||||
|
"./DocOpsManager": {}
|
||||||
|
|
||||||
|
@version = 42
|
||||||
|
|
||||||
|
describe "with a text document", ->
|
||||||
|
beforeEach ->
|
||||||
|
@lines = ["one", "two", "three"]
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @lines, @version)
|
||||||
|
@ShareJsDB.getSnapshot @doc_key, @callback
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the doc lines", ->
|
||||||
|
@callback.args[0][1].snapshot.should.equal @lines.join("\n")
|
||||||
|
|
||||||
|
it "should return the doc version", ->
|
||||||
|
@callback.args[0][1].v.should.equal @version
|
||||||
|
|
||||||
|
it "should return the type as text", ->
|
||||||
|
@callback.args[0][1].type.should.equal "text"
|
||||||
|
|
||||||
|
describe "when the doclines do not exist", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, null, null)
|
||||||
|
@ShareJsDB.getSnapshot @doc_key, @callback
|
||||||
|
|
||||||
|
it "should return the callback with a NotFoundError", ->
|
||||||
|
@callback.calledWith(new Errors.NotFoundError("not found")).should.equal true
|
||||||
|
|
||||||
|
describe "when getDoc returns an error", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, @error = new Error("oops"), null, null)
|
||||||
|
@ShareJsDB.getSnapshot @doc_key, @callback
|
||||||
|
|
||||||
|
it "should return the callback with an error", ->
|
||||||
|
@callback.calledWith(@error).should.equal true
|
||||||
|
|
||||||
|
describe "with a JSON document", ->
|
||||||
|
beforeEach ->
|
||||||
|
@lines = [{text: "one"}, {text:"two"}, {text:"three"}]
|
||||||
|
|
||||||
|
describe "successfully", ->
|
||||||
|
beforeEach ->
|
||||||
|
@DocumentManager.getDoc = sinon.stub().callsArgWith(2, null, @lines, @version)
|
||||||
|
@ShareJsDB.getSnapshot @doc_key, @callback
|
||||||
|
|
||||||
|
it "should get the doc", ->
|
||||||
|
@DocumentManager.getDoc
|
||||||
|
.calledWith(@project_id, @doc_id)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should return the doc lines", ->
|
||||||
|
expect(@callback.args[0][1].snapshot).to.deep.equal lines: @lines
|
||||||
|
|
||||||
|
it "should return the doc version", ->
|
||||||
|
@callback.args[0][1].v.should.equal @version
|
||||||
|
|
||||||
|
it "should return the type as text", ->
|
||||||
|
@callback.args[0][1].type.should.equal "json"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
sinon = require('sinon')
|
||||||
|
chai = require('chai')
|
||||||
|
should = chai.should()
|
||||||
|
modulePath = "../../../../app/js/ShareJsDB.js"
|
||||||
|
SandboxedModule = require('sandboxed-module')
|
||||||
|
|
||||||
|
describe "ShareJsDB.writeOps", ->
|
||||||
|
beforeEach ->
|
||||||
|
@project_id = "project-id"
|
||||||
|
@doc_id = "document-id"
|
||||||
|
@doc_key = "#{@project_id}:#{@doc_id}"
|
||||||
|
@callback = sinon.stub()
|
||||||
|
@opData =
|
||||||
|
op: {p: 20, t: "foo"}
|
||||||
|
meta: {source: "bar"}
|
||||||
|
@ShareJsDB = SandboxedModule.require modulePath, requires:
|
||||||
|
"./RedisManager": @RedisManager = {}
|
||||||
|
"./DocOpsManager": @DocOpsManager = {}
|
||||||
|
"./DocumentManager": {}
|
||||||
|
|
||||||
|
describe "writing an op", ->
|
||||||
|
beforeEach ->
|
||||||
|
@version = 42
|
||||||
|
@opData.v = @version
|
||||||
|
@DocOpsManager.pushDocOp = sinon.stub().callsArgWith(3, null, @version+1)
|
||||||
|
@ShareJsDB.writeOp @doc_key, @opData, @callback
|
||||||
|
|
||||||
|
it "should write the op to redis", ->
|
||||||
|
op =
|
||||||
|
op: @opData.op
|
||||||
|
meta: @opData.meta
|
||||||
|
@DocOpsManager.pushDocOp
|
||||||
|
.calledWith(@project_id, @doc_id, op)
|
||||||
|
.should.equal true
|
||||||
|
|
||||||
|
it "should call the callback without an error", ->
|
||||||
|
@callback.called.should.equal true
|
||||||
|
(@callback.args[0][0]?).should.equal false
|
||||||
|
|
||||||
|
describe "writing an op at the wrong version", ->
|
||||||
|
beforeEach ->
|
||||||
|
@version = 42
|
||||||
|
@mismatch = 5
|
||||||
|
@opData.v = @version
|
||||||
|
@DocOpsManager.pushDocOp = sinon.stub().callsArgWith(3, null, @version + @mismatch)
|
||||||
|
@ShareJsDB.writeOp @doc_key, @opData, @callback
|
||||||
|
|
||||||
|
it "should call the callback with an error", ->
|
||||||
|
@callback.calledWith(sinon.match.string).should.equal true
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue