2014-04-02 12:45:41 -04:00
|
|
|
Path = require "path"
|
|
|
|
|
2019-12-17 04:57:51 -05:00
|
|
|
# environment variables renamed for consistency
|
|
|
|
# use AWS_ACCESS_KEY_ID-style going forward
|
|
|
|
if process.env['AWS_KEY'] && !process.env['AWS_ACCESS_KEY_ID']
|
|
|
|
process.env['AWS_ACCESS_KEY_ID'] = process.env['AWS_KEY']
|
|
|
|
if process.env['AWS_SECRET'] && !process.env['AWS_SECRET_ACCESS_KEY']
|
|
|
|
process.env['AWS_SECRET_ACCESS_KEY'] = process.env['AWS_SECRET']
|
|
|
|
|
2020-02-17 09:04:42 -05:00
|
|
|
# pre-backend setting, fall back to old behaviour
|
|
|
|
unless process.env['BACKEND']?
|
|
|
|
if process.env['AWS_ACCESS_KEY_ID']? or process.env['S3_BUCKET_CREDENTIALS']?
|
|
|
|
process.env['BACKEND'] = "s3"
|
|
|
|
process.env['USER_FILES_BUCKET_NAME'] = process.env['AWS_S3_USER_FILES_BUCKET_NAME']
|
|
|
|
process.env['TEMPLATE_FILES_BUCKET_NAME'] = process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME']
|
|
|
|
process.env['PUBLIC_FILES_BUCKET_NAME'] = process.env['AWS_S3_PUBLIC_FILES_BUCKET_NAME']
|
|
|
|
else
|
|
|
|
process.env['BACKEND'] = "fs"
|
|
|
|
process.env['USER_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../user_files")
|
|
|
|
process.env['TEMPLATE_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../template_files")
|
|
|
|
process.env['PUBLIC_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../public_files")
|
|
|
|
|
2018-03-23 11:26:38 -04:00
|
|
|
settings =
|
2014-02-14 11:39:05 -05:00
|
|
|
internal:
|
|
|
|
filestore:
|
|
|
|
port: 3009
|
2018-03-23 12:14:30 -04:00
|
|
|
host: process.env['LISTEN_ADDRESS'] or "localhost"
|
2014-03-04 10:01:13 -05:00
|
|
|
|
|
|
|
filestore:
|
2014-04-02 12:45:41 -04:00
|
|
|
# Which backend persistor to use.
|
|
|
|
# Choices are
|
2014-03-04 10:01:13 -05:00
|
|
|
# s3 - Amazon S3
|
|
|
|
# fs - local filesystem
|
2020-02-12 06:02:05 -05:00
|
|
|
# gcs - Google Cloud Storage
|
2020-02-17 09:04:42 -05:00
|
|
|
backend: process.env['BACKEND']
|
|
|
|
|
2020-03-05 12:23:47 -05:00
|
|
|
gcs:
|
2020-03-13 12:14:06 -04:00
|
|
|
endpoint:
|
|
|
|
if process.env['GCS_API_ENDPOINT']
|
|
|
|
apiEndpoint: process.env['GCS_API_ENDPOINT']
|
|
|
|
apiScheme: process.env['GCS_API_SCHEME']
|
|
|
|
projectId: process.env['GCS_PROJECT_ID']
|
|
|
|
unlockBeforeDelete: process.env['GCS_UNLOCK_BEFORE_DELETE'] == "true" # unlock an event-based hold before deleting. default false
|
|
|
|
deletedBucketSuffix: process.env['GCS_DELETED_BUCKET_SUFFIX'] # if present, copy file to another bucket on delete. default null
|
2020-03-16 11:54:05 -04:00
|
|
|
deleteConcurrency: parseInt(process.env['GCS_DELETE_CONCURRENCY']) || 50
|
2020-07-08 05:14:02 -04:00
|
|
|
signedUrlExpiryInMs: parseInt(process.env['LINK_EXPIRY_TIMEOUT'] || 60000)
|
2020-03-05 12:23:47 -05:00
|
|
|
|
2020-02-17 09:04:42 -05:00
|
|
|
s3:
|
|
|
|
if process.env['AWS_ACCESS_KEY_ID']? or process.env['S3_BUCKET_CREDENTIALS']?
|
2019-12-16 12:09:38 -05:00
|
|
|
key: process.env['AWS_ACCESS_KEY_ID']
|
|
|
|
secret: process.env['AWS_SECRET_ACCESS_KEY']
|
2019-12-05 08:55:08 -05:00
|
|
|
endpoint: process.env['AWS_S3_ENDPOINT']
|
2020-01-10 04:51:49 -05:00
|
|
|
pathStyle: process.env['AWS_S3_PATH_STYLE']
|
2020-02-18 05:24:29 -05:00
|
|
|
partSize: process.env['AWS_S3_PARTSIZE'] or (100 * 1024 * 1024)
|
2020-07-08 05:14:02 -04:00
|
|
|
bucketCreds: JSON.parse process.env['S3_BUCKET_CREDENTIALS'] if process.env['S3_BUCKET_CREDENTIALS']?
|
2020-02-17 09:04:42 -05:00
|
|
|
|
2020-02-12 06:02:05 -05:00
|
|
|
# GCS should be configured by the service account on the kubernetes pod. See GOOGLE_APPLICATION_CREDENTIALS,
|
|
|
|
# which will be picked up automatically.
|
|
|
|
|
2020-02-17 09:04:42 -05:00
|
|
|
stores:
|
|
|
|
user_files: process.env['USER_FILES_BUCKET_NAME']
|
|
|
|
template_files: process.env['TEMPLATE_FILES_BUCKET_NAME']
|
|
|
|
public_files: process.env['PUBLIC_FILES_BUCKET_NAME']
|
|
|
|
|
|
|
|
fallback:
|
|
|
|
if process.env['FALLBACK_BACKEND']?
|
|
|
|
backend: process.env['FALLBACK_BACKEND']
|
|
|
|
# mapping of bucket names on the fallback, to bucket names on the primary.
|
|
|
|
# e.g. { myS3UserFilesBucketName: 'myGoogleUserFilesBucketName' }
|
|
|
|
buckets: JSON.parse(process.env['FALLBACK_BUCKET_MAPPING'] || '{}')
|
|
|
|
copyOnMiss: process.env['COPY_ON_MISS'] == 'true'
|
2018-03-23 11:26:38 -04:00
|
|
|
|
2020-04-09 12:11:19 -04:00
|
|
|
allowRedirects: if process.env['ALLOW_REDIRECTS'] == 'true' then true else false
|
|
|
|
|
2014-05-17 16:01:48 -04:00
|
|
|
path:
|
|
|
|
uploadFolder: Path.resolve(__dirname + "/../uploads")
|
2018-07-06 05:28:02 -04:00
|
|
|
|
2016-05-09 06:37:35 -04:00
|
|
|
commands:
|
|
|
|
# Any commands to wrap the convert utility in, for example ["nice"], or ["firejail", "--profile=/etc/firejail/convert.profile"]
|
|
|
|
convertCommandPrefix: []
|
2014-05-17 16:01:48 -04:00
|
|
|
|
2019-02-05 06:19:02 -05:00
|
|
|
enableConversions: if process.env['ENABLE_CONVERSIONS'] == 'true' then true else false
|
2018-03-23 11:26:38 -04:00
|
|
|
|
2019-03-05 12:20:16 -05:00
|
|
|
sentry:
|
|
|
|
dsn: process.env.SENTRY_DSN
|
2020-03-05 12:23:47 -05:00
|
|
|
|
2018-03-23 11:26:38 -04:00
|
|
|
# Filestore health check
|
|
|
|
# ----------------------
|
|
|
|
# Project and file details to check in persistor when calling /health_check
|
|
|
|
if process.env['HEALTH_CHECK_PROJECT_ID']? and process.env['HEALTH_CHECK_FILE_ID']?
|
|
|
|
settings.health_check =
|
|
|
|
project_id: process.env['HEALTH_CHECK_PROJECT_ID']
|
|
|
|
file_id: process.env['HEALTH_CHECK_FILE_ID']
|
|
|
|
|
|
|
|
module.exports = settings
|