Merge pull request #39 from sharelatex/bg-fix-message-ids

fix message id parsing
This commit is contained in:
Brian Gough 2019-03-22 11:54:03 +00:00 committed by GitHub
commit 61ec1d195b
3 changed files with 58 additions and 32 deletions

View file

@ -14,14 +14,15 @@ module.exports = EventLogger =
checkEventOrder: (channel, message_id, message) ->
return if typeof(message_id) isnt 'string'
[key, count] = message_id.split("-", 2)
count = parseInt(count, 10)
return if !(result = message_id.match(/^(.*)-(\d+)$/))
key = result[1]
count = parseInt(result[2], 0)
if !(count >= 0)# ignore checks if counter is not present
return
# store the last count in a hash for each host
previous = EventLogger._storeEventCount(key, count)
if !previous? || count == (previous + 1)
metrics.inc "event.#{channel}.valid", 0.001
metrics.inc "event.#{channel}.valid", 0.001 # downsample high rate docupdater events
return # order is ok
if (count == previous)
metrics.inc "event.#{channel}.duplicate"
@ -30,7 +31,7 @@ module.exports = EventLogger =
else
metrics.inc "event.#{channel}.out-of-order"
# logger.error {key:key, previous: previous, count:count, message:message}, "events out of order"
return # out of order
return "out-of-order"
_storeEventCount: (key, count) ->
previous = EVENT_LOG_COUNTER[key]

View file

@ -22,6 +22,7 @@ describe "DocumentUpdaterController", ->
@rclient = {}
"./SafeJsonParse": @SafeJsonParse =
parse: (data, cb) => cb null, JSON.parse(data)
"./EventLogger": @EventLogger = {checkEventOrder: sinon.stub()}
describe "listenForUpdatesFromDocumentUpdater", ->
beforeEach ->

View file

@ -11,9 +11,11 @@ describe 'EventLogger', ->
tk.freeze(new Date(@start))
@EventLogger = SandboxedModule.require modulePath, requires:
"logger-sharelatex": @logger = {error: sinon.stub()}
@id_1 = "abc-1"
"metrics-sharelatex": @metrics = {inc: sinon.stub()}
@channel = "applied-ops"
@id_1 = "random-hostname:abc-1"
@message_1 = "message-1"
@id_2 = "abc-2"
@id_2 = "random-hostname:abc-2"
@message_2 = "message-2"
afterEach ->
@ -21,32 +23,54 @@ describe 'EventLogger', ->
describe 'checkEventOrder', ->
it 'should accept events in order', ->
@EventLogger.checkEventOrder(@id_1, @message_1)
status = @EventLogger.checkEventOrder(@id_2, @message_2)
expect(status).to.be.undefined
describe 'when the events are in order', ->
beforeEach ->
@EventLogger.checkEventOrder(@channel, @id_1, @message_1)
@status = @EventLogger.checkEventOrder(@channel, @id_2, @message_2)
it 'should return "duplicate" for the same event', ->
@EventLogger.checkEventOrder(@id_1, @message_1)
status = @EventLogger.checkEventOrder(@id_1, @message_1)
expect(status).to.equal "duplicate"
it 'should accept events in order', ->
expect(@status).to.be.undefined
it 'should log an error for out of order events', ->
@EventLogger.checkEventOrder(@id_1, @message_1)
@EventLogger.checkEventOrder(@id_2, @message_2)
status = @EventLogger.checkEventOrder(@id_1, @message_1)
expect(status).to.be.undefined
it 'should increment the valid event metric', ->
@metrics.inc.calledWith("event.#{@channel}.valid", 1)
.should.equal.true
it 'should flush old entries', ->
@EventLogger.MAX_EVENTS_BEFORE_CLEAN = 10
@EventLogger.checkEventOrder(@id_1, @message_1)
for i in [1..8]
status = @EventLogger.checkEventOrder(@id_1, @message_1)
expect(status).to.equal "duplicate"
# the next event should flush the old entries aboce
@EventLogger.MAX_STALE_TIME_IN_MS=1000
tk.freeze(new Date(@start + 5 * 1000))
# because we flushed the entries this should not be a duplicate
@EventLogger.checkEventOrder('other-1', @message_2)
status = @EventLogger.checkEventOrder(@id_1, @message_1)
expect(status).to.be.undefined
describe 'when there is a duplicate events', ->
beforeEach ->
@EventLogger.checkEventOrder(@channel, @id_1, @message_1)
@status = @EventLogger.checkEventOrder(@channel, @id_1, @message_1)
it 'should return "duplicate" for the same event', ->
expect(@status).to.equal "duplicate"
it 'should increment the duplicate event metric', ->
@metrics.inc.calledWith("event.#{@channel}.duplicate", 1)
.should.equal.true
describe 'when there are out of order events', ->
beforeEach ->
@EventLogger.checkEventOrder(@channel, @id_1, @message_1)
@EventLogger.checkEventOrder(@channel, @id_2, @message_2)
@status = @EventLogger.checkEventOrder(@channel, @id_1, @message_1)
it 'should return "out-of-order" for the event', ->
expect(@status).to.equal "out-of-order"
it 'should increment the out-of-order event metric', ->
@metrics.inc.calledWith("event.#{@channel}.out-of-order", 1)
.should.equal.true
describe 'after MAX_STALE_TIME_IN_MS', ->
it 'should flush old entries', ->
@EventLogger.MAX_EVENTS_BEFORE_CLEAN = 10
@EventLogger.checkEventOrder(@channel, @id_1, @message_1)
for i in [1..8]
status = @EventLogger.checkEventOrder(@channel, @id_1, @message_1)
expect(status).to.equal "duplicate"
# the next event should flush the old entries aboce
@EventLogger.MAX_STALE_TIME_IN_MS=1000
tk.freeze(new Date(@start + 5 * 1000))
# because we flushed the entries this should not be a duplicate
@EventLogger.checkEventOrder(@channel, 'other-1', @message_2)
status = @EventLogger.checkEventOrder(@channel, @id_1, @message_1)
expect(status).to.be.undefined