avoid compressing updates if the result would be too big

This commit is contained in:
Brian Gough 2016-01-26 12:23:21 +00:00
parent ed0aaa189d
commit b7a4c72f9c
2 changed files with 70 additions and 2 deletions

View file

@ -80,6 +80,7 @@ module.exports = UpdateCompressor =
return compressedUpdates return compressedUpdates
MAX_TIME_BETWEEN_UPDATES: oneMinute = 60 * 1000 MAX_TIME_BETWEEN_UPDATES: oneMinute = 60 * 1000
MAX_UPDATE_SIZE: twoMegabytes = 2* 1024 * 1024
_concatTwoUpdates: (firstUpdate, secondUpdate) -> _concatTwoUpdates: (firstUpdate, secondUpdate) ->
firstUpdate = firstUpdate =
@ -105,8 +106,12 @@ module.exports = UpdateCompressor =
firstOp = firstUpdate.op firstOp = firstUpdate.op
secondOp = secondUpdate.op secondOp = secondUpdate.op
firstSize = firstOp.i?.length or firstOp.d?.length
secondSize = secondOp.i?.length or secondOp.d?.length
# Two inserts # Two inserts
if firstOp.i? and secondOp.i? and firstOp.p <= secondOp.p <= (firstOp.p + firstOp.i.length) if firstOp.i? and secondOp.i? and firstOp.p <= secondOp.p <= (firstOp.p + firstOp.i.length) and firstSize + secondSize < UpdateCompressor.MAX_UPDATE_SIZE
return [ return [
meta: meta:
start_ts: firstUpdate.meta.start_ts start_ts: firstUpdate.meta.start_ts
@ -118,7 +123,7 @@ module.exports = UpdateCompressor =
v: secondUpdate.v v: secondUpdate.v
] ]
# Two deletes # Two deletes
else if firstOp.d? and secondOp.d? and secondOp.p <= firstOp.p <= (secondOp.p + secondOp.d.length) else if firstOp.d? and secondOp.d? and secondOp.p <= firstOp.p <= (secondOp.p + secondOp.d.length) and firstSize + secondSize < UpdateCompressor.MAX_UPDATE_SIZE
return [ return [
meta: meta:
start_ts: firstUpdate.meta.start_ts start_ts: firstUpdate.meta.start_ts

View file

@ -10,6 +10,8 @@ describe "UpdateCompressor", ->
@UpdateCompressor = SandboxedModule.require modulePath @UpdateCompressor = SandboxedModule.require modulePath
@user_id = "user-id-1" @user_id = "user-id-1"
@other_user_id = "user-id-2" @other_user_id = "user-id-2"
@bigstring = ("a" for [0 .. 2*1024*1024]).join("")
@mediumstring = ("a" for [0 .. 1024*1024]).join("")
@ts1 = Date.now() @ts1 = Date.now()
@ts2 = Date.now() + 1000 @ts2 = Date.now() + 1000
@ -141,6 +143,67 @@ describe "UpdateCompressor", ->
v: 43 v: 43
}] }]
it "should not append inserts that are too big (second op)", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: "foo" }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 6, i: @bigstring }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: "foo" }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 6, i: @bigstring }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should not append inserts that are too big (first op)", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: @bigstring }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3 + @bigstring.length, i: "bar" }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: @bigstring }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3 + @bigstring.length, i: "bar" }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]
it "should not append inserts that are too big (first and second op)", ->
expect(@UpdateCompressor.compressUpdates [{
op: { p: 3, i: @mediumstring }
meta: ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3 + @mediumstring.length, i: @mediumstring }
meta: ts: @ts2, user_id: @user_id
v: 43
}])
.to.deep.equal [{
op: { p: 3, i: @mediumstring }
meta: start_ts: @ts1, end_ts: @ts1, user_id: @user_id
v: 42
}, {
op: { p: 3 + @mediumstring.length, i: @mediumstring }
meta: start_ts: @ts2, end_ts: @ts2, user_id: @user_id
v: 43
}]
describe "delete - delete", -> describe "delete - delete", ->
it "should append one delete to the other", -> it "should append one delete to the other", ->
expect(@UpdateCompressor.compressUpdates [{ expect(@UpdateCompressor.compressUpdates [{