overleaf/services/project-history/app/js/LocalFileWriter.js
Antoine Clausse 7f48c67512 Add prefer-node-protocol ESLint rule (#21532)
* Add `unicorn/prefer-node-protocol`

* Fix `unicorn/prefer-node-protocol` ESLint errors

* Run `npm run format:fix`

* Add sandboxed-module sourceTransformers in mocha setups

Fix `no such file or directory, open 'node:fs'` in `sandboxed-module`

* Remove `node:` in the SandboxedModule requires

* Fix new linting errors with `node:`

GitOrigin-RevId: 68f6e31e2191fcff4cb8058dd0a6914c14f59926
2024-11-11 09:04:51 +00:00

114 lines
3.3 KiB
JavaScript

/* eslint-disable
no-unused-vars,
*/
// TODO: This file was created by bulk-decaffeinate.
// Fix any style issues and re-enable lint.
/*
* decaffeinate suggestions:
* DS102: Remove unnecessary code created because of implicit returns
* DS207: Consider shorter variations of null checks
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
*/
import fs from 'node:fs'
import { pipeline } from 'node:stream'
import { randomUUID } from 'node:crypto'
import path from 'node:path'
import _ from 'lodash'
import logger from '@overleaf/logger'
import metrics from '@overleaf/metrics'
import Settings from '@overleaf/settings'
import OError from '@overleaf/o-error'
import * as LargeFileManager from './LargeFileManager.js'
//
// This method takes a stream and provides you a new stream which is now
// reading from disk.
//
// This is useful if we're piping one network stream to another. If the stream
// we're piping to can't consume data as quickly as the one we're consuming
// from then large quantities of data may be held in memory. Instead the read
// stream can be passed to this method, the data will then be held on disk
// rather than in memory and will be cleaned up once it has been consumed.
//
export function bufferOnDisk(
inStream,
url,
fileId,
consumeOutStream,
callback
) {
const timer = new metrics.Timer('LocalFileWriter.writeStream')
const fsPath = path.join(
Settings.path.uploadFolder,
randomUUID() + `-${fileId}`
)
const cleanup = _.once((streamError, res) => {
return deleteFile(fsPath, function (cleanupError) {
if (streamError) {
OError.tag(streamError, 'error deleting temporary file', {
fsPath,
url,
})
}
if (cleanupError) {
OError.tag(cleanupError)
}
if (streamError && cleanupError) {
// logging the cleanup error in case only the stream error is sent to the callback
logger.error(cleanupError)
}
return callback(streamError || cleanupError, res)
})
})
logger.debug({ fsPath, url }, 'writing file locally')
const writeStream = fs.createWriteStream(fsPath)
pipeline(inStream, writeStream, err => {
if (err) {
OError.tag(err, 'problem writing file locally', {
fsPath,
url,
})
return cleanup(err)
}
timer.done()
// in future check inStream.response.headers for hash value here
logger.debug({ fsPath, url }, 'stream closed after writing file locally')
const fileSize = writeStream.bytesWritten
return LargeFileManager.replaceWithStubIfNeeded(
fsPath,
fileId,
fileSize,
function (err, newFsPath) {
if (err != null) {
OError.tag(err, 'problem in large file manager', {
newFsPath,
fsPath,
fileId,
fileSize,
})
return cleanup(err)
}
return consumeOutStream(newFsPath, cleanup)
}
)
})
}
export function deleteFile(fsPath, callback) {
if (fsPath == null || fsPath === '') {
return callback()
}
logger.debug({ fsPath }, 'removing local temp file')
return fs.unlink(fsPath, function (err) {
if (err != null && err.code !== 'ENOENT') {
// ignore errors deleting the file when it was never created
return callback(OError.tag(err))
} else {
return callback()
}
})
}