Get acceptance tests running

This commit is contained in:
James Allen 2014-02-26 12:11:45 +00:00
parent 45fe6978af
commit d27872c9bd
9 changed files with 91 additions and 19 deletions

View file

@ -6,11 +6,13 @@ HttpController = require "./app/js/HttpController"
express = require "express"
app = express()
app.use express.logger()
app.post "/doc/:doc_id/flush", HttpController.flushUpdatesWithLock
app.use (error, req, res, next) ->
logger.error err: error, "an internal error occured"
req.send 500
res.send 500
port = Settings.internal?.history?.port or 3014
host = Settings.internal?.history?.host or "localhost"

View file

@ -12,6 +12,7 @@ module.exports = HistoryManager =
MongoManager.popLastCompressedUpdate doc_id, (error, lastCompressedUpdate) ->
return callback(error) if error?
logger.log doc_id: doc_id, "popped last update"
# Ensure that raw updates start where lastCompressedUpdate left off
if lastCompressedUpdate?
@ -30,25 +31,31 @@ module.exports = HistoryManager =
REDIS_READ_BATCH_SIZE: 100
processUncompressedUpdates: (doc_id, callback = (error) ->) ->
logger.log "processUncompressedUpdates"
RedisManager.getOldestRawUpdates doc_id, HistoryManager.REDIS_READ_BATCH_SIZE, (error, rawUpdates) ->
return callback(error) if error?
length = rawUpdates.length
logger.log doc_id: doc_id, length: length, "got raw updates from redis"
HistoryManager.compressAndSaveRawUpdates doc_id, rawUpdates, (error) ->
return callback(error) if error?
logger.log doc_id: doc_id, "compressed and saved doc updates"
RedisManager.deleteOldestRawUpdates doc_id, HistoryManager.REDIS_READ_BATCH_SIZE, (error) ->
return callback(error) if error?
if length == HistoryManager.REDIS_READ_BATCH_SIZE
# There might be more updates
logger.log doc_id: doc_id, "continuing processing updates"
setTimeout () ->
HistoryManager.processUncompressedUpdates doc_id, callback
, 0
else
logger.log doc_id: doc_id, "all raw updates processed"
callback()
processUncompressedUpdatesWithLock: (doc_id, callback = (error) ->) ->
LockManager.runWithLock(
"HistoryLock:#{doc_id}",
HistoryManager.processUncompressedUpdates,
(releaseLock) ->
HistoryManager.processUncompressedUpdates doc_id, releaseLock
callback
)

View file

@ -7,4 +7,5 @@ module.exports = HttpController =
logger.log doc_id: doc_id, "compressing doc history"
HistoryManager.processUncompressedUpdatesWithLock doc_id, (error) ->
return next(error) if error?
logger.log "done http request"
res.send 204

View file

@ -26,6 +26,7 @@ module.exports = UpdateCompressor =
start_ts: update.meta.start_ts or update.meta.ts
end_ts: update.meta.end_ts or update.meta.ts
user_id: update.meta.user_id
v: update.v
return normalizedUpdates
compressRawUpdates: (lastPreviousUpdate, rawUpdates) ->
@ -56,12 +57,14 @@ module.exports = UpdateCompressor =
user_id: firstUpdate.meta.user_id or null
start_ts: firstUpdate.meta.start_ts or firstUpdate.meta.ts
end_ts: firstUpdate.meta.end_ts or firstUpdate.meta.ts
v: firstUpdate.v
secondUpdate =
op: secondUpdate.op
meta:
user_id: secondUpdate.meta.user_id or null
start_ts: secondUpdate.meta.start_ts or secondUpdate.meta.ts
end_ts: secondUpdate.meta.end_ts or secondUpdate.meta.ts
v: secondUpdate.v
if firstUpdate.meta.user_id != secondUpdate.meta.user_id
return [firstUpdate, secondUpdate]
@ -81,6 +84,7 @@ module.exports = UpdateCompressor =
op:
p: firstOp.p
i: strInject(firstOp.i, secondOp.p - firstOp.p, secondOp.i)
v: secondUpdate.v
]
# Two deletes
else if firstOp.d? and secondOp.d? and secondOp.p <= firstOp.p <= (secondOp.p + secondOp.d.length)
@ -92,6 +96,7 @@ module.exports = UpdateCompressor =
op:
p: secondOp.p
d: strInject(secondOp.d, firstOp.p - secondOp.p, firstOp.d)
v: secondUpdate.v
]
# An insert and then a delete
else if firstOp.i? and secondOp.d? and firstOp.p <= secondOp.p <= (firstOp.p + firstOp.i.length)
@ -99,7 +104,6 @@ module.exports = UpdateCompressor =
insertedText = firstOp.i.slice(offset, offset + secondOp.d.length)
if insertedText == secondOp.d
insert = strRemove(firstOp.i, offset, secondOp.d.length)
return [] if insert == ""
return [
meta:
start_ts: firstUpdate.meta.start_ts
@ -108,6 +112,7 @@ module.exports = UpdateCompressor =
op:
p: firstOp.p
i: insert
v: secondUpdate.v
]
else
# This shouldn't be possible!

View file

@ -1,6 +1,6 @@
Settings = require "settings-sharelatex"
mongojs = require "mongojs"
db = mongojs.connect(Settings.mongo.url, ["docHistory", "docOps"])
db = mongojs.connect(Settings.mongo.url, ["docHistory"])
module.exports =
db: db
ObjectId: mongojs.ObjectId

View file

@ -10,6 +10,7 @@
"mongojs": "~0.9.11",
"settings": "git+ssh://git@bitbucket.org:sharelatex/settings-sharelatex.git#master",
"logger": "git+ssh://git@bitbucket.org:sharelatex/logger-sharelatex.git#bunyan",
"request": "~2.33.0"
"request": "~2.33.0",
"redis": "~0.10.1"
}
}

View file

@ -1,11 +1,13 @@
sinon = require "sinon"
chai = require("chai")
chai.should()
expect = chai.expect
mongojs = require "../../../app/js/mongojs"
db = mongojs.db
ObjectId = mongojs.ObjectId
Settings = require "settings-sharelatex"
request = require "request"
rclient = require("redis").createClient() # Only works locally for now
describe "Appending doc ops to the history", ->
describe "when the history does not exist yet", ->
@ -15,27 +17,41 @@ describe "Appending doc ops to the history", ->
updates = [{
op: [{ i: "f", p: 3 }]
meta: { ts: Date.now(), user_id: @user_id }
v: 3
}, {
op: [{ i: "o", p: 4 }]
meta: { ts: Date.now(), user_id: @user_id }
v: 4
}, {
op: [{ i: "o", p: 5 }]
meta: { ts: Date.now(), user_id: @user_id }
v: 5
}]
@version = 3
rclient.rpush "UncompressedHistoryOps:#{@doc_id}", (JSON.stringify(u) for u in updates)...
request.post {
url: "http://localhost:#{Settings.port}/doc/#{@doc_id}/history"
json:
version: @version
docOps: updates
url: "http://localhost:#{Settings.port}/doc/#{@doc_id}/flush"
}, (@error, @response, @body) =>
db.docHistory
.find(doc_id: ObjectId(@doc_id))
.sort("meta.end_ts": -1)
.toArray (error, updates) =>
@update = updates[0]
done()
it "should return a successful response", ->
@response.statusCode.should.equal 204
it "should insert the compressed op into mongo", ->
expect(@update.op).to.deep.equal {
p: 3, i: "foo"
}
it "should insert the correct version number into mongo", ->
expect(@update.v).to.equal 5
###
describe "when the history has already been started", ->
beforeEach (done) ->
@doc_id = ObjectId().toString()
@ -112,4 +128,4 @@ describe "Appending doc ops to the history", ->
it "should return a successful response", ->
@response.statusCode.should.equal 204
###

View file

@ -176,15 +176,14 @@ describe "HistoryManager", ->
describe "processCompressedUpdatesWithLock", ->
beforeEach ->
@HistoryManager.processUncompressedUpdates = sinon.stub()
@HistoryManager.processUncompressedUpdates = sinon.stub().callsArg(2)
@LockManager.runWithLock = sinon.stub().callsArg(2)
@HistoryManager.processUncompressedUpdatesWithLock @doc_id, @callback
it "should run processUncompressedUpdates with the lock", ->
@LockManager.runWithLock
.calledWith(
"HistoryLock:#{@doc_id}",
@HistoryManager.processUncompressedUpdates
"HistoryLock:#{@doc_id}"
)
.should.equal true

View file

@ -18,19 +18,24 @@ describe "UpdateCompressor", ->
expect(@UpdateCompressor.convertRawUpdatesToCompressedFormat [{
op: [ @op1 = { p: 0, i: "Foo" }, @op2 = { p: 6, i: "bar"} ]
meta: { ts: @ts1, user_id: @user_id }
v: 42
}, {
op: [ @op3 = { p: 10, i: "baz" } ]
meta: { ts: @ts2, user_id: @other_user_id }
v: 43
}])
.to.deep.equal [{
op: @op1,
meta: { start_ts: @ts1, end_ts: @ts1, user_id: @user_id }
meta: { start_ts: @ts1, end_ts: @ts1, user_id: @user_id },
v: 42
}, {
op: @op2,
meta: { start_ts: @ts1, end_ts: @ts1, user_id: @user_id }
meta: { start_ts: @ts1, end_ts: @ts1, user_id: @user_id },
v: 42
}, {
op: @op3,
meta: { start_ts: @ts2, end_ts: @ts2, user_id: @other_user_id }
meta: { start_ts: @ts2, end_ts: @ts2, user_id: @other_user_id },
v: 43
}]
describe "compress", ->
@ -39,42 +44,52 @@ describe "UpdateCompressor", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 6, i: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "foobar" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should insert one insert inside the other", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 5, i: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "fobaro" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should not append separated inserts", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, i: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "foo" }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, i: "bar" }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]
describe "delete - delete", ->
@ -82,42 +97,52 @@ describe "UpdateCompressor", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, d: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3, d: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, d: "foobar" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should insert one delete inside the other", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, d: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 1, d: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 1, d: "bafoor" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should not append separated deletes", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, d: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, d: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, d: "foo" }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, d: "bar" }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]
describe "insert - delete", ->
@ -125,52 +150,68 @@ describe "UpdateCompressor", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 5, d: "o" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "fo" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should remove part of an insert from the middle", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "fobaro" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 5, d: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "foo" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should cancel out two opposite updates", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3, d: "foo" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal []
.to.deep.equal [
op: { p: 3, i: "" }
meta: start_ts: @ts1, end_ts: @ts2, user_id: @user_id
v: 43
]
it "should not combine separated updates", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, d: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "foo" }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 9, d: "bar" }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]