overleaf/services/real-time/app.coffee

160 lines
5.1 KiB
CoffeeScript
Raw Normal View History

2018-12-04 09:11:28 -05:00
Metrics = require("metrics-sharelatex")
2018-12-04 09:21:05 -05:00
Settings = require "settings-sharelatex"
2018-12-04 09:11:28 -05:00
Metrics.initialize(Settings.appName or "real-time")
async = require("async")
2019-07-02 10:36:17 -04:00
_ = require "underscore"
2018-12-04 09:11:28 -05:00
logger = require "logger-sharelatex"
2019-01-31 10:33:11 -05:00
logger.initialize("real-time")
2018-12-04 09:11:28 -05:00
Metrics.event_loop.monitor(logger)
express = require("express")
session = require("express-session")
redis = require("redis-sharelatex")
2017-06-06 04:49:38 -04:00
if Settings.sentry?.dsn?
logger.initializeErrorReporting(Settings.sentry.dsn)
2017-05-10 10:52:35 -04:00
sessionRedisClient = redis.createClient(Settings.redis.websessions)
RedisStore = require('connect-redis')(session)
SessionSockets = require('session.socket.io')
CookieParser = require("cookie-parser")
2018-12-04 09:06:39 -05:00
DrainManager = require("./app/js/DrainManager")
2019-04-15 09:05:26 -04:00
HealthCheckManager = require("./app/js/HealthCheckManager")
2018-12-04 09:06:39 -05:00
# Set up socket.io server
app = express()
2019-01-03 11:17:31 -05:00
Metrics.injectMetricsRoute(app)
server = require('http').createServer(app)
io = require('socket.io').listen(server)
# Bind to sessions
2016-11-09 07:09:15 -05:00
sessionStore = new RedisStore(client: sessionRedisClient)
cookieParser = CookieParser(Settings.security.sessionSecret)
2018-12-04 08:32:37 -05:00
sessionSockets = new SessionSockets(io, sessionStore, cookieParser, Settings.cookieName)
io.configure ->
io.enable('browser client minification')
io.enable('browser client etag')
# Fix for Safari 5 error of "Error during WebSocket handshake: location mismatch"
# See http://answers.dotcloud.com/question/578/problem-with-websocket-over-ssl-in-safari-with
io.set('match origin protocol', true)
# gzip uses a Node 0.8.x method of calling the gzip program which
# doesn't work with 0.6.x
#io.enable('browser client gzip')
2019-05-24 10:21:48 -04:00
io.set('transports', ['websocket', 'flashsocket', 'htmlfile', 'xhr-polling', 'jsonp-polling'])
io.set('log level', 1)
2014-11-20 11:56:09 -05:00
app.get "/", (req, res, next) ->
res.send "real-time-sharelatex is alive"
2014-11-20 11:56:09 -05:00
app.get "/status", (req, res, next) ->
res.send "real-time-sharelatex is alive"
2019-04-11 10:00:25 -04:00
app.get "/debug/events", (req, res, next) ->
Settings.debugEvents = parseInt(req.query?.count,10) || 20
logger.log {count: Settings.debugEvents}, "starting debug mode"
res.send "debug mode will log next #{Settings.debugEvents} events"
rclient = require("redis-sharelatex").createClient(Settings.redis.realtime)
2019-07-08 06:17:08 -04:00
2014-11-20 11:56:09 -05:00
app.get "/health_check/redis", (req, res, next) ->
rclient.healthCheck (error) ->
if error?
logger.err {err: error}, "failed redis health check"
res.sendStatus 500
2019-04-15 09:05:26 -04:00
else if HealthCheckManager.isFailing()
status = HealthCheckManager.status()
logger.err {pubSubErrors: status}, "failed pubsub health check"
res.sendStatus 500
else
res.sendStatus 200
Metrics.injectMetricsRoute(app)
Router = require "./app/js/Router"
Router.configure(app, io, sessionSockets)
WebsocketLoadBalancer = require "./app/js/WebsocketLoadBalancer"
WebsocketLoadBalancer.listenForEditorEvents(io)
DocumentUpdaterController = require "./app/js/DocumentUpdaterController"
DocumentUpdaterController.listenForUpdatesFromDocumentUpdater(io)
port = Settings.internal.realTime.port
host = Settings.internal.realTime.host
server.listen port, host, (error) ->
throw error if error?
2015-04-30 10:05:31 -04:00
logger.info "realtime starting up, listening on #{host}:#{port}"
# Stop huge stack traces in logs from all the socket.io parsing steps.
Error.stackTraceLimit = 10
2018-12-04 08:47:04 -05:00
shutdownCleanly = (signal) ->
2018-12-04 09:06:39 -05:00
connectedClients = io.sockets.clients()?.length
if connectedClients == 0
logger.log("no clients connected, exiting")
process.exit()
else
2018-12-04 09:11:28 -05:00
logger.log {connectedClients}, "clients still connected, not shutting down yet"
2018-12-04 09:06:39 -05:00
setTimeout () ->
shutdownCleanly(signal)
, 10000
forceDrain = ->
2018-12-04 09:33:25 -05:00
logger.log {delay_ms:Settings.forceDrainMsDelay}, "starting force drain after timeout"
2018-12-04 09:11:28 -05:00
setTimeout ()->
logger.log "starting drain"
2018-12-04 09:06:39 -05:00
DrainManager.startDrain(io, 4)
2018-12-04 09:31:07 -05:00
, Settings.forceDrainMsDelay
2018-12-04 09:33:25 -05:00
shutDownInProgress = false
2018-12-04 09:31:07 -05:00
if Settings.forceDrainMsDelay?
2018-12-04 09:33:25 -05:00
Settings.forceDrainMsDelay = parseInt(Settings.forceDrainMsDelay, 10)
logger.log forceDrainMsDelay: Settings.forceDrainMsDelay,"forceDrainMsDelay enabled"
2018-12-04 09:06:39 -05:00
for signal in ['SIGINT', 'SIGHUP', 'SIGQUIT', 'SIGUSR1', 'SIGUSR2', 'SIGTERM', 'SIGABRT']
process.on signal, ->
if shutDownInProgress
logger.log signal: signal, "shutdown already in progress, ignoring signal"
return
else
shutDownInProgress = true
logger.log signal: signal, "received interrupt, cleaning up"
shutdownCleanly(signal)
forceDrain()
if Settings.continualPubsubTraffic
console.log "continualPubsubTraffic enabled"
redisClients = [redis.createClient(Settings.redis.documentupdater), redis.createClient(Settings.redis.pubsub)]
publishJob = (channel, callback)->
2019-04-15 09:05:26 -04:00
checker = new HealthCheckManager(channel)
logger.debug {channel:channel}, "sending pub to keep connection alive"
2019-04-15 09:05:26 -04:00
json = JSON.stringify({health_check:true, key: checker.id, date: new Date().toString()})
jobs = _.map redisClients, (checkRclient)->
return (cb)->
checkRclient.publish channel, json, (err)->
if err?
logger.err {err, channel}, "error publishing pubsub traffic to redis"
return cb(err)
2019-07-02 10:36:17 -04:00
async.series jobs, callback
runPubSubTraffic = ->
async.map ["applied-ops", "editor-events"], publishJob, (err)->
2019-04-15 09:05:26 -04:00
setTimeout(runPubSubTraffic, 1000 * 20)
runPubSubTraffic()