mirror of
https://github.com/overleaf/overleaf.git
synced 2024-09-16 02:52:31 -04:00
Merge pull request #58 from overleaf/spd-cleanup-and-metrics
Add metrics for S3 ingress and egress, and improve acceptance tests
This commit is contained in:
commit
26c37c7ce2
7 changed files with 174 additions and 44 deletions
|
@ -9,6 +9,7 @@ https.globalAgent.maxSockets = 300
|
||||||
settings = require("settings-sharelatex")
|
settings = require("settings-sharelatex")
|
||||||
request = require("request")
|
request = require("request")
|
||||||
logger = require("logger-sharelatex")
|
logger = require("logger-sharelatex")
|
||||||
|
metrics = require("metrics-sharelatex")
|
||||||
fs = require("fs")
|
fs = require("fs")
|
||||||
knox = require("knox")
|
knox = require("knox")
|
||||||
path = require("path")
|
path = require("path")
|
||||||
|
@ -16,10 +17,15 @@ LocalFileWriter = require("./LocalFileWriter")
|
||||||
Errors = require("./Errors")
|
Errors = require("./Errors")
|
||||||
_ = require("underscore")
|
_ = require("underscore")
|
||||||
awsS3 = require "aws-sdk/clients/s3"
|
awsS3 = require "aws-sdk/clients/s3"
|
||||||
|
URL = require('url')
|
||||||
|
|
||||||
thirtySeconds = 30 * 1000
|
thirtySeconds = 30 * 1000
|
||||||
|
|
||||||
buildDefaultOptions = (bucketName, method, key)->
|
buildDefaultOptions = (bucketName, method, key)->
|
||||||
|
if settings.filestore.s3.endpoint
|
||||||
|
endpoint = "#{settings.filestore.s3.endpoint}/#{bucketName}"
|
||||||
|
else
|
||||||
|
endpoint = "https://#{bucketName}.s3.amazonaws.com"
|
||||||
return {
|
return {
|
||||||
aws:
|
aws:
|
||||||
key: settings.filestore.s3.key
|
key: settings.filestore.s3.key
|
||||||
|
@ -27,33 +33,51 @@ buildDefaultOptions = (bucketName, method, key)->
|
||||||
bucket: bucketName
|
bucket: bucketName
|
||||||
method: method
|
method: method
|
||||||
timeout: thirtySeconds
|
timeout: thirtySeconds
|
||||||
uri:"https://#{bucketName}.s3.amazonaws.com/#{key}"
|
uri:"#{endpoint}/#{key}"
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultS3Client = new awsS3({
|
getS3Options = (credentials) ->
|
||||||
credentials:
|
options =
|
||||||
accessKeyId: settings.filestore.s3.key,
|
credentials:
|
||||||
secretAccessKey: settings.filestore.s3.secret
|
accessKeyId: credentials.auth_key
|
||||||
})
|
secretAccessKey: credentials.auth_secret
|
||||||
|
|
||||||
|
if settings.filestore.s3.endpoint
|
||||||
|
endpoint = URL.parse(settings.filestore.s3.endpoint)
|
||||||
|
options.endpoint = settings.filestore.s3.endpoint
|
||||||
|
options.sslEnabled = endpoint.protocol == 'https'
|
||||||
|
|
||||||
|
return options
|
||||||
|
|
||||||
|
defaultS3Client = new awsS3(getS3Options({
|
||||||
|
auth_key: settings.filestore.s3.key,
|
||||||
|
auth_secret: settings.filestore.s3.secret
|
||||||
|
}))
|
||||||
|
|
||||||
getS3Client = (credentials) ->
|
getS3Client = (credentials) ->
|
||||||
if credentials?
|
if credentials?
|
||||||
return new awsS3({
|
return new awsS3(getS3Options(credentials))
|
||||||
credentials:
|
|
||||||
accessKeyId: credentials.auth_key
|
|
||||||
secretAccessKey: credentials.auth_secret
|
|
||||||
})
|
|
||||||
else
|
else
|
||||||
return defaultS3Client
|
return defaultS3Client
|
||||||
|
|
||||||
|
getKnoxClient = (bucketName) =>
|
||||||
|
options =
|
||||||
|
key: settings.filestore.s3.key
|
||||||
|
secret: settings.filestore.s3.secret
|
||||||
|
bucket: bucketName
|
||||||
|
if settings.filestore.s3.endpoint
|
||||||
|
endpoint = URL.parse(settings.filestore.s3.endpoint)
|
||||||
|
options.endpoint = endpoint.hostname
|
||||||
|
options.port = endpoint.port
|
||||||
|
return knox.createClient(options)
|
||||||
|
|
||||||
module.exports =
|
module.exports =
|
||||||
|
|
||||||
sendFile: (bucketName, key, fsPath, callback)->
|
sendFile: (bucketName, key, fsPath, callback)->
|
||||||
s3Client = knox.createClient
|
s3Client = getKnoxClient(bucketName)
|
||||||
key: settings.filestore.s3.key
|
uploaded = 0
|
||||||
secret: settings.filestore.s3.secret
|
|
||||||
bucket: bucketName
|
|
||||||
putEventEmiter = s3Client.putFile fsPath, key, (err, res)->
|
putEventEmiter = s3Client.putFile fsPath, key, (err, res)->
|
||||||
|
metrics.count 's3.egress', uploaded
|
||||||
if err?
|
if err?
|
||||||
logger.err err:err, bucketName:bucketName, key:key, fsPath:fsPath,"something went wrong uploading file to s3"
|
logger.err err:err, bucketName:bucketName, key:key, fsPath:fsPath,"something went wrong uploading file to s3"
|
||||||
return callback(err)
|
return callback(err)
|
||||||
|
@ -68,6 +92,8 @@ module.exports =
|
||||||
putEventEmiter.on "error", (err)->
|
putEventEmiter.on "error", (err)->
|
||||||
logger.err err:err, bucketName:bucketName, key:key, fsPath:fsPath, "error emmited on put of file"
|
logger.err err:err, bucketName:bucketName, key:key, fsPath:fsPath, "error emmited on put of file"
|
||||||
callback err
|
callback err
|
||||||
|
putEventEmiter.on "progress", (progress)->
|
||||||
|
uploaded = progress.written
|
||||||
|
|
||||||
sendStream: (bucketName, key, readStream, callback)->
|
sendStream: (bucketName, key, readStream, callback)->
|
||||||
logger.log bucketName:bucketName, key:key, "sending file to s3"
|
logger.log bucketName:bucketName, key:key, "sending file to s3"
|
||||||
|
@ -95,9 +121,9 @@ module.exports =
|
||||||
}
|
}
|
||||||
if opts.start? and opts.end?
|
if opts.start? and opts.end?
|
||||||
s3Params['Range'] = "bytes=#{opts.start}-#{opts.end}"
|
s3Params['Range'] = "bytes=#{opts.start}-#{opts.end}"
|
||||||
request = s3.getObject(s3Params)
|
s3Request = s3.getObject(s3Params)
|
||||||
|
|
||||||
request.on 'httpHeaders', (statusCode, headers, response, statusMessage) =>
|
s3Request.on 'httpHeaders', (statusCode, headers, response, statusMessage) =>
|
||||||
if statusCode in [403, 404]
|
if statusCode in [403, 404]
|
||||||
# S3 returns a 403 instead of a 404 when the user doesn't have
|
# S3 returns a 403 instead of a 404 when the user doesn't have
|
||||||
# permission to list the bucket contents.
|
# permission to list the bucket contents.
|
||||||
|
@ -107,13 +133,16 @@ module.exports =
|
||||||
logger.log({bucketName: bucketName, key: key }, "error getting file from s3: #{statusCode}")
|
logger.log({bucketName: bucketName, key: key }, "error getting file from s3: #{statusCode}")
|
||||||
return callback(new Error("Got non-200 response from S3: #{statusCode} #{statusMessage}"), null)
|
return callback(new Error("Got non-200 response from S3: #{statusCode} #{statusMessage}"), null)
|
||||||
stream = response.httpResponse.createUnbufferedStream()
|
stream = response.httpResponse.createUnbufferedStream()
|
||||||
|
stream.on 'data', (data) ->
|
||||||
|
metrics.count 's3.ingress', data.byteLength
|
||||||
|
|
||||||
callback(null, stream)
|
callback(null, stream)
|
||||||
|
|
||||||
request.on 'error', (err) =>
|
s3Request.on 'error', (err) =>
|
||||||
logger.err({ err: err, bucketName: bucketName, key: key }, "error getting file stream from s3")
|
logger.err({ err: err, bucketName: bucketName, key: key }, "error getting file stream from s3")
|
||||||
callback(err)
|
callback(err)
|
||||||
|
|
||||||
request.send()
|
s3Request.send()
|
||||||
|
|
||||||
getFileSize: (bucketName, key, callback) ->
|
getFileSize: (bucketName, key, callback) ->
|
||||||
logger.log({ bucketName: bucketName, key: key }, "getting file size from S3")
|
logger.log({ bucketName: bucketName, key: key }, "getting file size from S3")
|
||||||
|
@ -171,10 +200,7 @@ module.exports =
|
||||||
_callback = () ->
|
_callback = () ->
|
||||||
|
|
||||||
logger.log key: key, bucketName: bucketName, "deleting directory"
|
logger.log key: key, bucketName: bucketName, "deleting directory"
|
||||||
s3Client = knox.createClient
|
s3Client = getKnoxClient(bucketName)
|
||||||
key: settings.filestore.s3.key
|
|
||||||
secret: settings.filestore.s3.secret
|
|
||||||
bucket: bucketName
|
|
||||||
s3Client.list prefix:key, (err, data)->
|
s3Client.list prefix:key, (err, data)->
|
||||||
if err?
|
if err?
|
||||||
logger.err err:err, bucketName:bucketName, key:key, "something went wrong listing prefix in aws"
|
logger.err err:err, bucketName:bucketName, key:key, "something went wrong listing prefix in aws"
|
||||||
|
@ -200,10 +226,7 @@ module.exports =
|
||||||
|
|
||||||
directorySize:(bucketName, key, callback)->
|
directorySize:(bucketName, key, callback)->
|
||||||
logger.log bucketName:bucketName, key:key, "get project size in s3"
|
logger.log bucketName:bucketName, key:key, "get project size in s3"
|
||||||
s3Client = knox.createClient
|
s3Client = getKnoxClient(bucketName)
|
||||||
key: settings.filestore.s3.key
|
|
||||||
secret: settings.filestore.s3.secret
|
|
||||||
bucket: bucketName
|
|
||||||
s3Client.list prefix:key, (err, data)->
|
s3Client.list prefix:key, (err, data)->
|
||||||
if err?
|
if err?
|
||||||
logger.err err:err, bucketName:bucketName, key:key, "something went wrong listing prefix in aws"
|
logger.err err:err, bucketName:bucketName, key:key, "something went wrong listing prefix in aws"
|
||||||
|
|
|
@ -16,6 +16,7 @@ settings =
|
||||||
s3:
|
s3:
|
||||||
key: process.env['AWS_KEY']
|
key: process.env['AWS_KEY']
|
||||||
secret: process.env['AWS_SECRET']
|
secret: process.env['AWS_SECRET']
|
||||||
|
endpoint: process.env['AWS_S3_ENDPOINT']
|
||||||
stores:
|
stores:
|
||||||
user_files: process.env['AWS_S3_USER_FILES_BUCKET_NAME']
|
user_files: process.env['AWS_S3_USER_FILES_BUCKET_NAME']
|
||||||
template_files: process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME']
|
template_files: process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME']
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||||
# Version: 1.1.24
|
# Version: 1.1.24
|
||||||
|
|
||||||
version: "2"
|
version: "2.1"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
test_unit:
|
test_unit:
|
||||||
|
@ -25,9 +25,20 @@ services:
|
||||||
ENABLE_CONVERSIONS: "true"
|
ENABLE_CONVERSIONS: "true"
|
||||||
MOCHA_GREP: ${MOCHA_GREP}
|
MOCHA_GREP: ${MOCHA_GREP}
|
||||||
NODE_ENV: test
|
NODE_ENV: test
|
||||||
|
USE_PROM_METRICS: "true"
|
||||||
|
AWS_KEY: fake
|
||||||
|
AWS_SECRET: fake
|
||||||
|
AWS_S3_USER_FILES_BUCKET_NAME: fake_user_files
|
||||||
|
AWS_S3_TEMPLATE_FILES_BUCKET_NAME: fake_template_files
|
||||||
|
AWS_S3_PUBLIC_FILES_BUCKET_NAME: fake_public_files
|
||||||
|
AWS_S3_ENDPOINT: http://fakes3:9090
|
||||||
depends_on:
|
depends_on:
|
||||||
- mongo
|
mongo:
|
||||||
- redis
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
fakes3:
|
||||||
|
condition: service_healthy
|
||||||
user: node
|
user: node
|
||||||
command: npm run test:acceptance:_run
|
command: npm run test:acceptance:_run
|
||||||
|
|
||||||
|
@ -46,3 +57,11 @@ services:
|
||||||
|
|
||||||
mongo:
|
mongo:
|
||||||
image: mongo:3.4
|
image: mongo:3.4
|
||||||
|
|
||||||
|
fakes3:
|
||||||
|
image: adobe/s3mock
|
||||||
|
environment:
|
||||||
|
- initialBuckets=fake_user_files,fake_template_files,fake_public_files
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:9090"]
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||||
# Version: 1.1.24
|
# Version: 1.1.24
|
||||||
|
|
||||||
version: "2"
|
version: "2.1"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
test_unit:
|
test_unit:
|
||||||
|
@ -31,10 +31,21 @@ services:
|
||||||
ENABLE_CONVERSIONS: "true"
|
ENABLE_CONVERSIONS: "true"
|
||||||
LOG_LEVEL: ERROR
|
LOG_LEVEL: ERROR
|
||||||
NODE_ENV: test
|
NODE_ENV: test
|
||||||
|
USE_PROM_METRICS: "true"
|
||||||
|
AWS_KEY: fake
|
||||||
|
AWS_SECRET: fake
|
||||||
|
AWS_S3_USER_FILES_BUCKET_NAME: fake_user_files
|
||||||
|
AWS_S3_TEMPLATE_FILES_BUCKET_NAME: fake_template_files
|
||||||
|
AWS_S3_PUBLIC_FILES_BUCKET_NAME: fake_public_files
|
||||||
|
AWS_S3_ENDPOINT: http://fakes3:9090
|
||||||
user: node
|
user: node
|
||||||
depends_on:
|
depends_on:
|
||||||
- mongo
|
mongo:
|
||||||
- redis
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
fakes3:
|
||||||
|
condition: service_healthy
|
||||||
command: npm run test:acceptance
|
command: npm run test:acceptance
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,4 +64,11 @@ services:
|
||||||
mongo:
|
mongo:
|
||||||
image: mongo:3.4
|
image: mongo:3.4
|
||||||
|
|
||||||
|
fakes3:
|
||||||
|
image: adobe/s3mock
|
||||||
|
environment:
|
||||||
|
- initialBuckets=fake_user_files,fake_template_files,fake_public_files
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:9090"]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,9 @@ app = require('../../../app')
|
||||||
require("logger-sharelatex").logger.level("info")
|
require("logger-sharelatex").logger.level("info")
|
||||||
logger = require("logger-sharelatex")
|
logger = require("logger-sharelatex")
|
||||||
Settings = require("settings-sharelatex")
|
Settings = require("settings-sharelatex")
|
||||||
|
request = require('request')
|
||||||
|
|
||||||
|
S3_TRIES = 30
|
||||||
|
|
||||||
module.exports =
|
module.exports =
|
||||||
running: false
|
running: false
|
||||||
|
@ -22,3 +25,21 @@ module.exports =
|
||||||
|
|
||||||
for callback in @callbacks
|
for callback in @callbacks
|
||||||
callback()
|
callback()
|
||||||
|
|
||||||
|
waitForS3: (callback, tries) ->
|
||||||
|
return callback() unless Settings.filestore.s3?.endpoint
|
||||||
|
tries = 1 unless tries
|
||||||
|
|
||||||
|
request.get "#{Settings.filestore.s3.endpoint}/", (err, response) =>
|
||||||
|
console.log(err, response?.statusCode, tries)
|
||||||
|
if !err && [200, 404].includes(response?.statusCode)
|
||||||
|
return callback()
|
||||||
|
|
||||||
|
if tries == S3_TRIES
|
||||||
|
return callback('timed out waiting for S3')
|
||||||
|
|
||||||
|
setTimeout(
|
||||||
|
() =>
|
||||||
|
@waitForS3 callback, tries + 1
|
||||||
|
1000
|
||||||
|
)
|
||||||
|
|
|
@ -9,9 +9,16 @@ fs = require("fs")
|
||||||
request = require("request")
|
request = require("request")
|
||||||
settings = require("settings-sharelatex")
|
settings = require("settings-sharelatex")
|
||||||
FilestoreApp = require "./FilestoreApp"
|
FilestoreApp = require "./FilestoreApp"
|
||||||
|
async = require('async')
|
||||||
|
|
||||||
|
|
||||||
|
getMetric = (filestoreUrl, metric, cb) ->
|
||||||
|
request.get "#{filestoreUrl}/metrics", (err, res) ->
|
||||||
|
expect(res.statusCode).to.equal 200
|
||||||
|
metricRegex = new RegExp("^#{metric}{[^}]+} ([0-9]+)$", "m")
|
||||||
|
cb(parseInt(metricRegex.exec(res.body)?[1] || '0'))
|
||||||
|
|
||||||
describe "Filestore", ->
|
describe "Filestore", ->
|
||||||
|
|
||||||
before (done)->
|
before (done)->
|
||||||
@localFileReadPath = "/tmp/filestore_acceptence_tests_file_read.txt"
|
@localFileReadPath = "/tmp/filestore_acceptence_tests_file_read.txt"
|
||||||
@localFileWritePath = "/tmp/filestore_acceptence_tests_file_write.txt"
|
@localFileWritePath = "/tmp/filestore_acceptence_tests_file_write.txt"
|
||||||
|
@ -22,15 +29,26 @@ describe "Filestore", ->
|
||||||
"there are 3 lines in all"
|
"there are 3 lines in all"
|
||||||
].join("\n")
|
].join("\n")
|
||||||
|
|
||||||
fs.writeFile(@localFileReadPath, @constantFileContent, done)
|
|
||||||
@filestoreUrl = "http://localhost:#{settings.internal.filestore.port}"
|
@filestoreUrl = "http://localhost:#{settings.internal.filestore.port}"
|
||||||
|
fs.writeFile @localFileReadPath, @constantFileContent, (err) ->
|
||||||
|
return done(err) if err
|
||||||
|
FilestoreApp.waitForS3(done)
|
||||||
|
|
||||||
beforeEach (done)->
|
beforeEach (done)->
|
||||||
FilestoreApp.ensureRunning =>
|
FilestoreApp.ensureRunning =>
|
||||||
fs.unlink @localFileWritePath, ->
|
async.parallel [
|
||||||
done()
|
(cb) =>
|
||||||
|
fs.unlink @localFileWritePath, () ->
|
||||||
|
cb()
|
||||||
|
(cb) =>
|
||||||
|
getMetric @filestoreUrl, 's3_egress', (metric) =>
|
||||||
|
@previousEgress = metric
|
||||||
|
cb()
|
||||||
|
(cb) =>
|
||||||
|
getMetric @filestoreUrl, 's3_ingress', (metric) =>
|
||||||
|
@previousIngress = metric
|
||||||
|
cb()
|
||||||
|
], done
|
||||||
|
|
||||||
it "should send a 200 for status endpoint", (done)->
|
it "should send a 200 for status endpoint", (done)->
|
||||||
request "#{@filestoreUrl}/status", (err, response, body)->
|
request "#{@filestoreUrl}/status", (err, response, body)->
|
||||||
|
@ -59,6 +77,11 @@ describe "Filestore", ->
|
||||||
response.statusCode.should.equal 404
|
response.statusCode.should.equal 404
|
||||||
done()
|
done()
|
||||||
|
|
||||||
|
it 'should record an egress metric for the upload', (done) ->
|
||||||
|
getMetric @filestoreUrl, 's3_egress', (metric) =>
|
||||||
|
expect(metric - @previousEgress).to.equal @constantFileContent.length
|
||||||
|
done()
|
||||||
|
|
||||||
it "should return the file size on a HEAD request", (done) ->
|
it "should return the file size on a HEAD request", (done) ->
|
||||||
expectedLength = Buffer.byteLength(@constantFileContent)
|
expectedLength = Buffer.byteLength(@constantFileContent)
|
||||||
request.head @fileUrl, (err, res) =>
|
request.head @fileUrl, (err, res) =>
|
||||||
|
@ -72,7 +95,14 @@ describe "Filestore", ->
|
||||||
body.should.equal @constantFileContent
|
body.should.equal @constantFileContent
|
||||||
done()
|
done()
|
||||||
|
|
||||||
it "should be able to get back the first 8 bytes of the file", (done) ->
|
it "should record an ingress metric when downloading the file", (done)->
|
||||||
|
@timeout(1000 * 10)
|
||||||
|
request.get @fileUrl, () =>
|
||||||
|
getMetric @filestoreUrl, 's3_ingress', (metric) =>
|
||||||
|
expect(metric - @previousIngress).to.equal @constantFileContent.length
|
||||||
|
done()
|
||||||
|
|
||||||
|
it "should be able to get back the first 9 bytes of the file", (done) ->
|
||||||
@timeout(1000 * 10)
|
@timeout(1000 * 10)
|
||||||
options =
|
options =
|
||||||
uri: @fileUrl
|
uri: @fileUrl
|
||||||
|
@ -82,6 +112,17 @@ describe "Filestore", ->
|
||||||
body.should.equal 'hello wor'
|
body.should.equal 'hello wor'
|
||||||
done()
|
done()
|
||||||
|
|
||||||
|
it "should record an ingress metric for a partial download", (done)->
|
||||||
|
@timeout(1000 * 10)
|
||||||
|
options =
|
||||||
|
uri: @fileUrl
|
||||||
|
headers:
|
||||||
|
'Range': 'bytes=0-8'
|
||||||
|
request.get options, ()=>
|
||||||
|
getMetric @filestoreUrl, 's3_ingress', (metric) =>
|
||||||
|
expect(metric - @previousIngress).to.equal 9
|
||||||
|
done()
|
||||||
|
|
||||||
it "should be able to get back bytes 4 through 10 of the file", (done) ->
|
it "should be able to get back bytes 4 through 10 of the file", (done) ->
|
||||||
@timeout(1000 * 10)
|
@timeout(1000 * 10)
|
||||||
options =
|
options =
|
||||||
|
@ -128,11 +169,17 @@ describe "Filestore", ->
|
||||||
@file_id = Math.random()
|
@file_id = Math.random()
|
||||||
@fileUrl = "#{@filestoreUrl}/project/acceptence_tests/file/#{@file_id}"
|
@fileUrl = "#{@filestoreUrl}/project/acceptence_tests/file/#{@file_id}"
|
||||||
@localFileReadPath = __dirname + '/../../fixtures/test.pdf'
|
@localFileReadPath = __dirname + '/../../fixtures/test.pdf'
|
||||||
|
fs.stat @localFileReadPath, (err, stat) =>
|
||||||
|
@localFileSize = stat.size
|
||||||
|
writeStream = request.post(@fileUrl)
|
||||||
|
|
||||||
writeStream = request.post(@fileUrl)
|
writeStream.on "end", done
|
||||||
|
fs.createReadStream(@localFileReadPath).pipe writeStream
|
||||||
|
|
||||||
writeStream.on "end", done
|
it 'should record an egress metric for the upload', (done) ->
|
||||||
fs.createReadStream(@localFileReadPath).pipe writeStream
|
getMetric @filestoreUrl, 's3_egress', (metric) =>
|
||||||
|
expect(metric - @previousEgress).to.equal @localFileSize
|
||||||
|
done()
|
||||||
|
|
||||||
it "should be able get the file back", (done)->
|
it "should be able get the file back", (done)->
|
||||||
@timeout(1000 * 10)
|
@timeout(1000 * 10)
|
||||||
|
|
|
@ -62,6 +62,7 @@ describe "S3PersistorManagerTests", ->
|
||||||
describe "success", ->
|
describe "success", ->
|
||||||
beforeEach () ->
|
beforeEach () ->
|
||||||
@expectedStream = { expectedStream: true }
|
@expectedStream = { expectedStream: true }
|
||||||
|
@expectedStream.on = sinon.stub()
|
||||||
@s3Request.send.callsFake () =>
|
@s3Request.send.callsFake () =>
|
||||||
@s3EventHandlers.httpHeaders(200, {}, @s3Response, "OK")
|
@s3EventHandlers.httpHeaders(200, {}, @s3Response, "OK")
|
||||||
@s3Response.httpResponse.createUnbufferedStream.returns(@expectedStream)
|
@s3Response.httpResponse.createUnbufferedStream.returns(@expectedStream)
|
||||||
|
|
Loading…
Reference in a new issue