2022-07-06 07:06:53 -04:00
|
|
|
import OError from '@overleaf/o-error'
|
|
|
|
|
|
|
|
const PDF_JS_CHUNK_SIZE = 128 * 1024
|
|
|
|
const MAX_SUB_REQUEST_COUNT = 4
|
|
|
|
const MAX_SUB_REQUEST_BYTES = 4 * PDF_JS_CHUNK_SIZE
|
2022-11-03 06:42:50 -04:00
|
|
|
const SAMPLE_NGINX_BOUNDARY = '00000000000000000001'
|
|
|
|
const HEADER_OVERHEAD_PER_MULTI_PART_CHUNK = composeMultipartHeader({
|
|
|
|
boundary: SAMPLE_NGINX_BOUNDARY,
|
|
|
|
// Assume an upper bound of O(9GB) for the pdf size.
|
|
|
|
start: 9 * 1024 * 1024 * 1024,
|
|
|
|
end: 9 * 1024 * 1024 * 1024,
|
|
|
|
size: 9 * 1024 * 1024 * 1024,
|
|
|
|
})
|
2022-08-01 07:31:05 -04:00
|
|
|
const MULTI_PART_THRESHOLD = 4
|
2022-07-06 07:06:53 -04:00
|
|
|
const INCREMENTAL_CACHE_SIZE = 1000
|
2022-08-01 07:31:05 -04:00
|
|
|
// Download large chunks once the shard bandwidth exceeds 50% of their size.
|
|
|
|
const CHUNK_USAGE_THRESHOLD_PREFETCH_LARGE = 0.5
|
|
|
|
// Preferred caching once we downloaded a chunk (in multiple shards) in full.
|
|
|
|
const CHUNK_USAGE_THRESHOLD_TRIGGER_PREFERRED = 1
|
|
|
|
const CHUNK_USAGE_THRESHOLD_CACHED = 42
|
|
|
|
// 42 * 0.7^11 < 1, aka we keep stale entries around for 11 compiles.
|
|
|
|
const CHUNK_USAGE_STALE_DECAY_RATE = 0.7
|
2022-07-06 07:06:53 -04:00
|
|
|
|
2022-08-01 07:31:05 -04:00
|
|
|
/**
|
|
|
|
* @param {Object} file
|
|
|
|
*/
|
2022-07-06 07:06:53 -04:00
|
|
|
function backfillEdgeBounds(file) {
|
2022-08-01 07:31:05 -04:00
|
|
|
const encoder = new TextEncoder()
|
|
|
|
for (const chunk of file.ranges) {
|
|
|
|
if (chunk.objectId) {
|
|
|
|
chunk.objectId = encoder.encode(chunk.objectId)
|
|
|
|
chunk.start -= chunk.objectId.byteLength
|
|
|
|
chunk.size = chunk.end - chunk.start
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Map} usageScore
|
|
|
|
* @param {Map} cachedUrls
|
|
|
|
*/
|
|
|
|
function trimState({ usageScore, cachedUrls }) {
|
|
|
|
for (const hash of usageScore) {
|
|
|
|
if (usageScore.size < INCREMENTAL_CACHE_SIZE) {
|
|
|
|
break
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
const score = usageScore.get(hash)
|
|
|
|
if (score >= CHUNK_USAGE_THRESHOLD_TRIGGER_PREFERRED) {
|
|
|
|
// Keep entries that are worth caching around for longer.
|
|
|
|
usageScore.set(hash, score * CHUNK_USAGE_STALE_DECAY_RATE)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cachedUrls.delete(hash)
|
|
|
|
usageScore.delete(hash)
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Object} file
|
|
|
|
* @param {Map} usageScore
|
|
|
|
* @param {Map} cachedUrls
|
|
|
|
*/
|
|
|
|
function preprocessFileOnce({ file, usageScore, cachedUrls }) {
|
|
|
|
if (file.preprocessed) return
|
|
|
|
file.preprocessed = true
|
|
|
|
|
|
|
|
file.createdAt = new Date(file.createdAt)
|
|
|
|
file.prefetched = file.prefetched || []
|
|
|
|
trimState({ usageScore, cachedUrls })
|
|
|
|
backfillEdgeBounds(file)
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Array} chunks
|
|
|
|
*/
|
2022-11-03 06:42:50 -04:00
|
|
|
function estimateSizeOfMultipartResponse(chunks) {
|
|
|
|
/*
|
|
|
|
--boundary
|
|
|
|
HEADER
|
|
|
|
BLOB
|
|
|
|
--boundary
|
|
|
|
HEADER
|
|
|
|
BLOB
|
|
|
|
--boundary--
|
|
|
|
*/
|
|
|
|
return (
|
|
|
|
chunks.reduce(
|
|
|
|
(totalBytes, chunk) =>
|
|
|
|
totalBytes +
|
|
|
|
HEADER_OVERHEAD_PER_MULTI_PART_CHUNK +
|
|
|
|
(chunk.end - chunk.start)
|
|
|
|
) + ('\r\n' + SAMPLE_NGINX_BOUNDARY + '--').length
|
|
|
|
)
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {Object} metrics
|
|
|
|
* @param {number} size
|
|
|
|
* @param {number} cachedCount
|
|
|
|
* @param {number} cachedBytes
|
|
|
|
* @param {number} fetchedCount
|
|
|
|
* @param {number} fetchedBytes
|
|
|
|
*/
|
|
|
|
function trackDownloadStats(
|
|
|
|
metrics,
|
|
|
|
{ size, cachedCount, cachedBytes, fetchedCount, fetchedBytes }
|
|
|
|
) {
|
|
|
|
metrics.cachedCount += cachedCount
|
|
|
|
metrics.cachedBytes += cachedBytes
|
|
|
|
metrics.fetchedCount += fetchedCount
|
|
|
|
metrics.fetchedBytes += fetchedBytes
|
|
|
|
metrics.requestedCount++
|
|
|
|
metrics.requestedBytes += size
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Object} metrics
|
|
|
|
* @param {boolean} sizeDiffers
|
|
|
|
* @param {boolean} mismatch
|
|
|
|
* @param {boolean} success
|
|
|
|
*/
|
|
|
|
function trackChunkVerify(metrics, { sizeDiffers, mismatch, success }) {
|
|
|
|
if (sizeDiffers) {
|
|
|
|
metrics.chunkVerifySizeDiffers |= 0
|
|
|
|
metrics.chunkVerifySizeDiffers += 1
|
|
|
|
}
|
|
|
|
if (mismatch) {
|
|
|
|
metrics.chunkVerifyMismatch |= 0
|
|
|
|
metrics.chunkVerifyMismatch += 1
|
|
|
|
}
|
|
|
|
if (success) {
|
|
|
|
metrics.chunkVerifySuccess |= 0
|
|
|
|
metrics.chunkVerifySuccess += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param chunk
|
|
|
|
* @param {ArrayBuffer} arrayBuffer
|
|
|
|
* @return {Uint8Array}
|
|
|
|
*/
|
|
|
|
function backFillObjectContext(chunk, arrayBuffer) {
|
|
|
|
if (!chunk.objectId) {
|
|
|
|
// This is a dynamic chunk
|
|
|
|
return new Uint8Array(arrayBuffer)
|
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
const { size, objectId } = chunk
|
|
|
|
const fullBuffer = new Uint8Array(size)
|
2022-11-01 11:22:40 -04:00
|
|
|
const sourceBuffer = new Uint8Array(arrayBuffer)
|
|
|
|
try {
|
|
|
|
fullBuffer.set(objectId, 0)
|
|
|
|
fullBuffer.set(sourceBuffer, objectId.byteLength)
|
|
|
|
} catch (err) {
|
|
|
|
throw OError.tag(err, 'broken back-filling of object-id', {
|
|
|
|
objectIdByteLength: objectId.byteLength,
|
|
|
|
fullBufferByteLength: fullBuffer.byteLength,
|
|
|
|
arrayBufferByteLength: arrayBuffer.byteLength,
|
|
|
|
sourceBufferByteLength: sourceBuffer.byteLength,
|
|
|
|
})
|
|
|
|
}
|
2022-07-06 07:06:53 -04:00
|
|
|
return fullBuffer
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Array} chunks
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @returns {Array}
|
|
|
|
*/
|
|
|
|
function getMatchingChunks(chunks, start, end) {
|
|
|
|
const matchingChunks = []
|
|
|
|
for (const chunk of chunks) {
|
|
|
|
if (chunk.end <= start) {
|
|
|
|
// no overlap:
|
|
|
|
// | REQUESTED_RANGE |
|
|
|
|
// | CHUNK |
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if (chunk.start >= end) {
|
|
|
|
// no overlap:
|
|
|
|
// | REQUESTED_RANGE |
|
|
|
|
// | CHUNK |
|
|
|
|
break
|
|
|
|
}
|
|
|
|
matchingChunks.push(chunk)
|
|
|
|
}
|
|
|
|
return matchingChunks
|
|
|
|
}
|
|
|
|
|
2022-08-01 07:31:05 -04:00
|
|
|
/**
|
|
|
|
* @param {Object} a
|
|
|
|
* @param {Object} b
|
|
|
|
*/
|
|
|
|
function sortBySizeDESC(a, b) {
|
|
|
|
return a.size > b.size ? -1 : 1
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Object} a
|
|
|
|
* @param {Object} b
|
|
|
|
*/
|
|
|
|
function sortByStartASC(a, b) {
|
|
|
|
return a.start > b.start ? 1 : -1
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Object} chunk
|
|
|
|
*/
|
|
|
|
function usageAboveThreshold(chunk) {
|
|
|
|
// We fetched enough shards of this chunk. Cache it in full now.
|
|
|
|
return chunk.totalUsage > CHUNK_USAGE_THRESHOLD_TRIGGER_PREFERRED
|
|
|
|
}
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
/**
|
|
|
|
* @param {Array} potentialChunks
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {Map} usageScore
|
|
|
|
* @param {Map} cachedUrls
|
2022-07-06 07:06:53 -04:00
|
|
|
* @param {Object} metrics
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @param {boolean} prefetchLargeEnabled
|
2022-07-06 07:06:53 -04:00
|
|
|
*/
|
2022-08-01 07:31:05 -04:00
|
|
|
function cutRequestAmplification({
|
|
|
|
potentialChunks,
|
|
|
|
usageScore,
|
|
|
|
cachedUrls,
|
|
|
|
metrics,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
prefetchLargeEnabled,
|
|
|
|
}) {
|
|
|
|
// NOTE: Map keys are stored in insertion order.
|
|
|
|
// We re-insert keys on cache hit and turn 'usageScore' into a cheap LRU.
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
const chunks = []
|
2022-08-01 07:31:05 -04:00
|
|
|
const skipAlreadyAdded = chunk => !chunks.includes(chunk)
|
2022-07-06 07:06:53 -04:00
|
|
|
let tooManyRequests = false
|
2022-08-01 07:31:05 -04:00
|
|
|
let tooMuchBandwidth = false
|
|
|
|
let newChunks = 0
|
|
|
|
let newCacheBandwidth = 0
|
|
|
|
for (const chunk of potentialChunks) {
|
|
|
|
const newUsage =
|
|
|
|
(Math.min(end, chunk.end) - Math.max(start, chunk.start)) / chunk.size
|
|
|
|
const totalUsage = (usageScore.get(chunk.hash) || 0) + newUsage
|
|
|
|
usageScore.delete(chunk.hash)
|
|
|
|
usageScore.set(chunk.hash, totalUsage)
|
|
|
|
chunk.totalUsage = totalUsage
|
|
|
|
}
|
|
|
|
|
|
|
|
// Always download already cached entries
|
2022-07-06 07:06:53 -04:00
|
|
|
for (const chunk of potentialChunks) {
|
2022-08-01 07:31:05 -04:00
|
|
|
if (chunk.totalUsage >= CHUNK_USAGE_THRESHOLD_CACHED) {
|
2022-07-06 07:06:53 -04:00
|
|
|
chunks.push(chunk)
|
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Prefer large blobs over small ones.
|
|
|
|
potentialChunks.sort(sortBySizeDESC)
|
|
|
|
|
|
|
|
// Prefer chunks with high (previous) usage over brand-new chunks.
|
|
|
|
const firstComeFirstCache = () => true
|
|
|
|
for (const trigger of [usageAboveThreshold, firstComeFirstCache]) {
|
|
|
|
for (const chunk of potentialChunks.filter(skipAlreadyAdded)) {
|
|
|
|
if (newCacheBandwidth + chunk.size > MAX_SUB_REQUEST_BYTES) {
|
|
|
|
// We would breach the bandwidth amplification limit.
|
|
|
|
tooMuchBandwidth = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if (newChunks + 1 > MAX_SUB_REQUEST_COUNT) {
|
|
|
|
// We would breach the request rate amplification limit.
|
|
|
|
tooManyRequests = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if (trigger(chunk)) {
|
|
|
|
newCacheBandwidth += chunk.size
|
|
|
|
newChunks += 1
|
|
|
|
chunks.push(chunk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const largeChunk = potentialChunks.filter(skipAlreadyAdded)[0]
|
|
|
|
if (largeChunk?.size >= PDF_JS_CHUNK_SIZE) {
|
|
|
|
// This is a large chunk that exceeds the bandwidth amplification limit.
|
|
|
|
if (largeChunk.start <= start && largeChunk.end >= end) {
|
|
|
|
// This is a large chunk spanning the entire range. pdf.js will only
|
|
|
|
// request these in case it needs the underlying stream, so it is OK to
|
|
|
|
// download as much data as the stream is large in one go.
|
|
|
|
chunks.push(largeChunk)
|
|
|
|
} else if (
|
|
|
|
prefetchLargeEnabled &&
|
|
|
|
largeChunk.totalUsage > CHUNK_USAGE_THRESHOLD_PREFETCH_LARGE
|
|
|
|
) {
|
|
|
|
// pdf.js actually wants the smaller (dynamic) chunk in the range that
|
|
|
|
// happens to sit right next to this large chunk.
|
|
|
|
// pdf.js has requested a lot of the large chunk via shards by now, and it
|
|
|
|
// is time to download it in full to stop "wasting" more bandwidth and
|
|
|
|
// more importantly cut down latency as we can prefetch the small chunk.
|
|
|
|
chunks.push(largeChunk)
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tooManyRequests) {
|
|
|
|
metrics.tooManyRequestsCount++
|
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
if (tooMuchBandwidth) {
|
|
|
|
metrics.tooMuchBandwidthCount++
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
|
|
|
|
chunks.sort(sortByStartASC)
|
|
|
|
return chunks
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Array} chunks
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @returns {Array}
|
|
|
|
*/
|
|
|
|
function getInterleavingDynamicChunks(chunks, start, end) {
|
|
|
|
const dynamicChunks = []
|
|
|
|
for (const chunk of chunks) {
|
|
|
|
if (start < chunk.start) {
|
|
|
|
dynamicChunks.push({ start, end: chunk.start })
|
|
|
|
}
|
|
|
|
start = chunk.end
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start < end) {
|
|
|
|
dynamicChunks.push({ start, end })
|
|
|
|
}
|
|
|
|
return dynamicChunks
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {Response} response
|
|
|
|
*/
|
|
|
|
function getServerTime(response) {
|
|
|
|
const raw = response.headers.get('Date')
|
|
|
|
if (!raw) return new Date()
|
|
|
|
return new Date(raw)
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {Response} response
|
|
|
|
*/
|
|
|
|
function getResponseSize(response) {
|
|
|
|
const raw = response.headers.get('Content-Length')
|
|
|
|
if (!raw) return 0
|
|
|
|
return parseInt(raw, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {Response} response
|
|
|
|
* @param chunk
|
|
|
|
*/
|
|
|
|
function getMultipartBoundary(response, chunk) {
|
|
|
|
if (!Array.isArray(chunk)) return ''
|
|
|
|
|
|
|
|
const raw = response.headers.get('Content-Type')
|
|
|
|
if (raw.includes('multipart/byteranges')) {
|
|
|
|
const idx = raw.indexOf('boundary=')
|
|
|
|
if (idx !== -1) return raw.slice(idx + 'boundary='.length)
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new OError('missing boundary on multipart request', {
|
|
|
|
headers: Object.fromEntries(response.headers.entries()),
|
|
|
|
chunk,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-03 06:42:50 -04:00
|
|
|
/**
|
|
|
|
* @param {string} boundary
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @param {number} size
|
|
|
|
* @return {string}
|
|
|
|
*/
|
|
|
|
function composeMultipartHeader({ boundary, start, end, size }) {
|
|
|
|
return `\r\n--${boundary}\r\nContent-Type: application/pdf\r\nContent-Range: bytes ${start}-${
|
|
|
|
end - 1
|
|
|
|
}/${size}\r\n\r\n`
|
|
|
|
}
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
/**
|
|
|
|
* @param {Object} file
|
2022-07-20 04:15:58 -04:00
|
|
|
* @param {Array} chunks
|
|
|
|
* @param {Uint8Array} data
|
|
|
|
* @param {string} boundary
|
2022-07-06 07:06:53 -04:00
|
|
|
* @param {Object} metrics
|
|
|
|
*/
|
2022-07-20 04:15:58 -04:00
|
|
|
function resolveMultiPartResponses({ file, chunks, data, boundary, metrics }) {
|
2022-07-06 07:06:53 -04:00
|
|
|
const responses = []
|
|
|
|
let offsetStart = 0
|
2022-07-28 05:21:46 -04:00
|
|
|
const encoder = new TextEncoder()
|
2022-07-06 07:06:53 -04:00
|
|
|
for (const chunk of chunks) {
|
2022-11-03 06:42:50 -04:00
|
|
|
const header = composeMultipartHeader({
|
|
|
|
boundary,
|
|
|
|
start: chunk.start,
|
|
|
|
end: chunk.end,
|
|
|
|
size: file.size,
|
|
|
|
})
|
2022-07-06 07:06:53 -04:00
|
|
|
const headerSize = header.length
|
|
|
|
|
|
|
|
// Verify header content. A proxy might have tampered with it.
|
2022-07-28 05:21:46 -04:00
|
|
|
const headerRaw = encoder.encode(header)
|
2022-07-06 07:06:53 -04:00
|
|
|
if (
|
|
|
|
!data
|
|
|
|
.subarray(offsetStart, offsetStart + headerSize)
|
|
|
|
.every((v, idx) => v === headerRaw[idx])
|
|
|
|
) {
|
|
|
|
metrics.headerVerifyFailure |= 0
|
|
|
|
metrics.headerVerifyFailure++
|
|
|
|
throw new OError('multipart response header does not match', {
|
|
|
|
actual: new TextDecoder().decode(
|
|
|
|
data.subarray(offsetStart, offsetStart + headerSize)
|
|
|
|
),
|
|
|
|
expected: header,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
offsetStart += headerSize
|
|
|
|
const chunkSize = chunk.end - chunk.start
|
|
|
|
responses.push({
|
|
|
|
chunk,
|
|
|
|
data: data.subarray(offsetStart, offsetStart + chunkSize),
|
|
|
|
})
|
|
|
|
offsetStart += chunkSize
|
|
|
|
}
|
|
|
|
return responses
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {Response} response
|
2022-11-03 06:42:50 -04:00
|
|
|
* @param {number} estimatedSize
|
|
|
|
* @param {RequestInit} init
|
2022-07-06 07:06:53 -04:00
|
|
|
*/
|
2022-11-03 06:42:50 -04:00
|
|
|
function checkChunkResponse(response, estimatedSize, init) {
|
2022-07-06 07:06:53 -04:00
|
|
|
if (!(response.status === 206 || response.status === 200)) {
|
2022-11-08 06:48:44 -05:00
|
|
|
throw new OError('non successful response status: ' + response.status, {
|
|
|
|
responseHeaders: Object.fromEntries(response.headers.entries()),
|
|
|
|
requestHeader: init.headers,
|
|
|
|
})
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-11-03 06:42:50 -04:00
|
|
|
const responseSize = getResponseSize(response)
|
|
|
|
if (!responseSize) {
|
|
|
|
throw new OError('content-length response header missing', {
|
|
|
|
responseHeaders: Object.fromEntries(response.headers.entries()),
|
|
|
|
requestHeader: init.headers,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if (responseSize > estimatedSize) {
|
|
|
|
throw new OError('response size exceeds estimate', {
|
|
|
|
estimatedSize,
|
|
|
|
responseSize,
|
|
|
|
responseHeaders: Object.fromEntries(response.headers.entries()),
|
|
|
|
requestHeader: init.headers,
|
|
|
|
})
|
|
|
|
}
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {string} url
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
2022-07-20 04:15:58 -04:00
|
|
|
* @param {AbortSignal} abortSignal
|
2022-07-06 07:06:53 -04:00
|
|
|
*/
|
2022-07-20 04:15:58 -04:00
|
|
|
export async function fallbackRequest({ url, start, end, abortSignal }) {
|
2022-07-28 10:39:05 -04:00
|
|
|
try {
|
2022-11-03 06:42:50 -04:00
|
|
|
const init = {
|
2022-11-09 04:22:31 -05:00
|
|
|
cache: 'no-store',
|
2022-07-28 10:39:05 -04:00
|
|
|
headers: { Range: `bytes=${start}-${end - 1}` },
|
|
|
|
signal: abortSignal,
|
2022-11-03 06:42:50 -04:00
|
|
|
}
|
|
|
|
const response = await fetch(url, init)
|
|
|
|
checkChunkResponse(response, end - start, init)
|
2022-07-28 10:39:05 -04:00
|
|
|
return await response.arrayBuffer()
|
|
|
|
} catch (e) {
|
|
|
|
throw OError.tag(e, 'fallback request failed', { url, start, end })
|
|
|
|
}
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {string} url
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @param {Object} metrics
|
|
|
|
* @param {Uint8Array} actual
|
2022-07-20 04:15:58 -04:00
|
|
|
* @param {AbortSignal} abortSignal
|
2022-07-06 07:06:53 -04:00
|
|
|
*/
|
2022-07-20 04:15:58 -04:00
|
|
|
async function verifyRange({ url, start, end, metrics, actual, abortSignal }) {
|
2022-07-06 07:06:53 -04:00
|
|
|
let expectedRaw
|
|
|
|
try {
|
2022-07-20 04:15:58 -04:00
|
|
|
expectedRaw = await fallbackRequest({ url, start, end, abortSignal })
|
2022-07-06 07:06:53 -04:00
|
|
|
} catch (error) {
|
|
|
|
throw OError.tag(error, 'cannot verify range', { url, start, end })
|
|
|
|
}
|
|
|
|
const expected = new Uint8Array(expectedRaw)
|
|
|
|
const stats = {}
|
|
|
|
if (actual.byteLength !== expected.byteLength) {
|
|
|
|
stats.sizeDiffers = true
|
|
|
|
} else if (!expected.every((v, idx) => v === actual[idx])) {
|
|
|
|
stats.mismatch = true
|
|
|
|
} else {
|
|
|
|
stats.success = true
|
|
|
|
}
|
|
|
|
trackChunkVerify(metrics, stats)
|
|
|
|
return expected
|
|
|
|
}
|
|
|
|
|
2022-08-01 07:31:05 -04:00
|
|
|
/**
|
|
|
|
* @param {Array} chunks
|
|
|
|
* @param {Array} prefetched
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
*/
|
|
|
|
function skipPrefetched(chunks, prefetched, start, end) {
|
|
|
|
return chunks.filter(chunk => {
|
|
|
|
return !prefetched.find(
|
|
|
|
c =>
|
|
|
|
c.start <= Math.max(chunk.start, start) &&
|
|
|
|
c.end >= Math.min(chunk.end, end)
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-11-03 06:42:50 -04:00
|
|
|
* @param {Object|Object[]} chunk
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {string} url
|
2022-11-03 06:42:50 -04:00
|
|
|
* @param {RequestInit} init
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {Map<string, string>} cachedUrls
|
|
|
|
* @param {Object} metrics
|
|
|
|
* @param {boolean} cachedUrlLookupEnabled
|
|
|
|
*/
|
|
|
|
async function fetchChunk({
|
|
|
|
chunk,
|
|
|
|
url,
|
|
|
|
init,
|
|
|
|
cachedUrls,
|
|
|
|
metrics,
|
|
|
|
cachedUrlLookupEnabled,
|
|
|
|
}) {
|
2022-11-08 06:48:44 -05:00
|
|
|
const estimatedSize = Array.isArray(chunk)
|
|
|
|
? estimateSizeOfMultipartResponse(chunk)
|
|
|
|
: chunk.end - chunk.start
|
|
|
|
|
2022-08-01 07:31:05 -04:00
|
|
|
const oldUrl = cachedUrls.get(chunk.hash)
|
|
|
|
if (cachedUrlLookupEnabled && chunk.hash && oldUrl && oldUrl !== url) {
|
|
|
|
// When the clsi server id changes, the content id changes too and as a
|
|
|
|
// result all the browser cache keys (aka urls) get invalidated.
|
|
|
|
// We memorize the previous browser cache keys in `cachedUrls`.
|
|
|
|
try {
|
|
|
|
const response = await fetch(oldUrl, init)
|
|
|
|
if (response.status === 200) {
|
2022-11-08 06:48:44 -05:00
|
|
|
checkChunkResponse(response, estimatedSize, init)
|
2022-08-01 07:31:05 -04:00
|
|
|
metrics.oldUrlHitCount += 1
|
|
|
|
return response
|
|
|
|
}
|
|
|
|
if (response.status === 404) {
|
|
|
|
// The old browser cache entry is gone and the old file is gone too.
|
|
|
|
metrics.oldUrlMissCount += 1
|
|
|
|
}
|
|
|
|
// Fallback to the latest url.
|
|
|
|
} catch (e) {
|
|
|
|
// Fallback to the latest url.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const response = await fetch(url, init)
|
2022-11-03 06:42:50 -04:00
|
|
|
checkChunkResponse(response, estimatedSize, init)
|
2022-08-01 07:31:05 -04:00
|
|
|
if (chunk.hash) cachedUrls.set(chunk.hash, url)
|
|
|
|
return response
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Object} file
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @param {Array} dynamicChunks
|
|
|
|
* @param {boolean} prefetchXRefTable
|
|
|
|
* @param {number} startXRefTableRange
|
|
|
|
*/
|
|
|
|
function addPrefetchingChunks({
|
|
|
|
file,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
dynamicChunks,
|
|
|
|
prefetchXRefTable,
|
|
|
|
startXRefTableRange,
|
|
|
|
}) {
|
|
|
|
// Prefetch in case this is the first range, or we are fetching dynamic
|
|
|
|
// chunks anyway (so we can ride-share the round trip).
|
|
|
|
// Rendering cannot start without downloading the xref table, so it's OK to
|
|
|
|
// "delay" the first range.
|
|
|
|
if (!(start === 0 || dynamicChunks.length > 0)) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-01 11:22:49 -04:00
|
|
|
let extraChunks = []
|
2022-08-01 07:31:05 -04:00
|
|
|
if (prefetchXRefTable) {
|
|
|
|
// Prefetch the dynamic chunks around the xref table.
|
2022-11-01 11:22:49 -04:00
|
|
|
extraChunks = skipPrefetched(
|
2022-08-01 07:31:05 -04:00
|
|
|
getInterleavingDynamicChunks(
|
|
|
|
getMatchingChunks(file.ranges, startXRefTableRange, file.size),
|
|
|
|
startXRefTableRange,
|
|
|
|
file.size
|
|
|
|
),
|
|
|
|
file.prefetched,
|
|
|
|
startXRefTableRange,
|
|
|
|
file.size
|
|
|
|
)
|
|
|
|
}
|
|
|
|
// Stop at the xref table range if present -- we may prefetch it early ^^^.
|
|
|
|
const prefetchEnd = startXRefTableRange || file.size
|
2022-11-01 11:22:49 -04:00
|
|
|
extraChunks = extraChunks.concat(
|
2022-08-01 07:31:05 -04:00
|
|
|
skipPrefetched(
|
|
|
|
getInterleavingDynamicChunks(
|
|
|
|
getMatchingChunks(file.ranges, end, prefetchEnd),
|
|
|
|
end,
|
|
|
|
prefetchEnd
|
|
|
|
),
|
|
|
|
file.prefetched,
|
|
|
|
end,
|
|
|
|
prefetchEnd
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2022-11-03 06:42:50 -04:00
|
|
|
let sum = estimateSizeOfMultipartResponse(dynamicChunks)
|
2022-11-01 11:22:49 -04:00
|
|
|
for (const chunk of extraChunks) {
|
|
|
|
const downloadSize =
|
|
|
|
chunk.end - chunk.start + HEADER_OVERHEAD_PER_MULTI_PART_CHUNK
|
|
|
|
if (sum + downloadSize > PDF_JS_CHUNK_SIZE) {
|
|
|
|
// In prefetching this chunk we would exceed the bandwidth limit.
|
|
|
|
// Try to prefetch another (smaller) chunk.
|
|
|
|
continue
|
2022-08-01 07:31:05 -04:00
|
|
|
}
|
|
|
|
const sibling = dynamicChunks.find(
|
|
|
|
sibling => sibling.end === chunk.start || sibling.start === chunk.end
|
|
|
|
)
|
|
|
|
if (sibling) {
|
2022-11-01 11:22:49 -04:00
|
|
|
sum += downloadSize
|
|
|
|
// Just expand the existing dynamic chunk.
|
2022-08-01 07:31:05 -04:00
|
|
|
sibling.start = Math.min(sibling.start, chunk.start)
|
|
|
|
sibling.end = Math.max(sibling.end, chunk.end)
|
|
|
|
continue
|
|
|
|
}
|
2022-11-01 11:22:49 -04:00
|
|
|
if (dynamicChunks.length > MULTI_PART_THRESHOLD) {
|
|
|
|
// We are already performing a multipart request. Add another part.
|
|
|
|
} else if (dynamicChunks.length < MULTI_PART_THRESHOLD) {
|
|
|
|
// We are not yet performing a multipart request. Add another request.
|
|
|
|
} else {
|
|
|
|
// In prefetching this chunk we would switch to a multipart request.
|
|
|
|
// Try to prefetch another (smaller) chunk.
|
|
|
|
continue
|
2022-08-01 07:31:05 -04:00
|
|
|
}
|
2022-11-01 11:22:49 -04:00
|
|
|
sum += downloadSize
|
2022-08-01 07:31:05 -04:00
|
|
|
dynamicChunks.push(chunk)
|
|
|
|
}
|
|
|
|
dynamicChunks.sort(sortByStartASC)
|
2022-11-01 11:22:49 -04:00
|
|
|
|
|
|
|
// Ensure that no chunks are overlapping.
|
|
|
|
let lastEnd = 0
|
|
|
|
for (const [idx, chunk] of dynamicChunks.entries()) {
|
|
|
|
if (chunk.start < lastEnd) {
|
|
|
|
throw new OError('detected overlapping dynamic chunks', {
|
|
|
|
chunk,
|
|
|
|
lastChunk: dynamicChunks[idx - 1],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
lastEnd = chunk.end
|
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
}
|
|
|
|
|
2022-10-25 05:23:59 -04:00
|
|
|
class Timer {
|
|
|
|
constructor() {
|
|
|
|
this.max = 0
|
|
|
|
this.total = 0
|
|
|
|
this.lastStart = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
startBlockingCompute() {
|
|
|
|
this.lastStart = performance.now()
|
|
|
|
}
|
|
|
|
|
|
|
|
finishBlockingCompute() {
|
|
|
|
if (this.lastStart === 0) return
|
|
|
|
const last = performance.now() - this.lastStart
|
|
|
|
if (last > this.max) {
|
|
|
|
this.max = last
|
|
|
|
}
|
|
|
|
this.total += last
|
|
|
|
this.lastStart = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
reportInto(metrics) {
|
|
|
|
const max = Math.ceil(this.max)
|
|
|
|
const total = Math.ceil(this.total)
|
|
|
|
if (max > metrics.latencyComputeMax) {
|
|
|
|
metrics.latencyComputeMax = max
|
|
|
|
}
|
|
|
|
metrics.latencyComputeTotal += total
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param {string} url
|
|
|
|
* @param {number} start
|
|
|
|
* @param {number} end
|
|
|
|
* @param {Object} file
|
|
|
|
* @param {Object} metrics
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {Map} usageScore
|
|
|
|
* @param {Map} cachedUrls
|
2022-07-08 04:15:13 -04:00
|
|
|
* @param {boolean} verifyChunks
|
2022-08-01 07:31:05 -04:00
|
|
|
* @param {boolean} prefetchingEnabled
|
|
|
|
* @param {boolean} prefetchLargeEnabled
|
|
|
|
* @param {boolean} tryOldCachedUrlEnabled
|
2022-07-20 04:15:58 -04:00
|
|
|
* @param {AbortSignal} abortSignal
|
2022-07-06 07:06:53 -04:00
|
|
|
*/
|
2022-07-08 04:15:13 -04:00
|
|
|
export async function fetchRange({
|
|
|
|
url,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
file,
|
|
|
|
metrics,
|
2022-08-01 07:31:05 -04:00
|
|
|
usageScore,
|
|
|
|
cachedUrls,
|
2022-07-08 04:15:13 -04:00
|
|
|
verifyChunks,
|
2022-08-01 07:31:05 -04:00
|
|
|
prefetchingEnabled,
|
|
|
|
prefetchLargeEnabled,
|
|
|
|
cachedUrlLookupEnabled,
|
2022-07-20 04:15:58 -04:00
|
|
|
abortSignal,
|
2022-07-08 04:15:13 -04:00
|
|
|
}) {
|
2022-10-25 05:23:59 -04:00
|
|
|
const timer = new Timer()
|
|
|
|
timer.startBlockingCompute()
|
2022-08-01 07:31:05 -04:00
|
|
|
preprocessFileOnce({ file, usageScore, cachedUrls })
|
|
|
|
const startXRefTableRange =
|
|
|
|
Math.floor(file.startXRefTable / PDF_JS_CHUNK_SIZE) * PDF_JS_CHUNK_SIZE
|
2022-10-25 11:39:26 -04:00
|
|
|
const prefetchXRefTable =
|
|
|
|
prefetchingEnabled && startXRefTableRange > 0 && start === 0
|
2022-08-01 07:31:05 -04:00
|
|
|
const prefetched = getMatchingChunks(file.prefetched, start, end)
|
2022-07-06 07:06:53 -04:00
|
|
|
|
|
|
|
// Check that handling the range request won't trigger excessive sub-requests,
|
|
|
|
// (to avoid unwanted latency compared to the original request).
|
2022-08-01 07:31:05 -04:00
|
|
|
const chunks = cutRequestAmplification({
|
|
|
|
potentialChunks: skipPrefetched(
|
|
|
|
getMatchingChunks(file.ranges, start, end),
|
|
|
|
prefetched,
|
|
|
|
start,
|
|
|
|
end
|
|
|
|
),
|
|
|
|
usageScore,
|
|
|
|
cachedUrls,
|
|
|
|
metrics,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
prefetchLargeEnabled,
|
|
|
|
})
|
|
|
|
const dynamicChunks = skipPrefetched(
|
|
|
|
getInterleavingDynamicChunks(chunks, start, end),
|
|
|
|
prefetched,
|
|
|
|
start,
|
|
|
|
end
|
2022-07-06 07:06:53 -04:00
|
|
|
)
|
|
|
|
const size = end - start
|
|
|
|
|
2022-08-01 07:31:05 -04:00
|
|
|
if (
|
|
|
|
chunks.length === 0 &&
|
|
|
|
prefetched.length === 0 &&
|
|
|
|
dynamicChunks.length === 1 &&
|
|
|
|
!prefetchXRefTable
|
|
|
|
) {
|
2022-07-06 07:06:53 -04:00
|
|
|
// fall back to the original range request when no chunks are cached.
|
2022-08-01 07:31:05 -04:00
|
|
|
// Exception: The first range should fetch the xref table as well.
|
2022-10-25 05:23:59 -04:00
|
|
|
timer.finishBlockingCompute()
|
|
|
|
timer.reportInto(metrics)
|
2022-07-06 07:06:53 -04:00
|
|
|
trackDownloadStats(metrics, {
|
|
|
|
size,
|
|
|
|
cachedCount: 0,
|
|
|
|
cachedBytes: 0,
|
|
|
|
fetchedCount: 1,
|
|
|
|
fetchedBytes: size,
|
|
|
|
})
|
2022-07-20 04:15:58 -04:00
|
|
|
return fallbackRequest({ url, start, end, abortSignal })
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-08-01 07:31:05 -04:00
|
|
|
|
|
|
|
if (prefetchingEnabled) {
|
|
|
|
addPrefetchingChunks({
|
|
|
|
file,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
dynamicChunks,
|
|
|
|
prefetchXRefTable,
|
|
|
|
startXRefTableRange,
|
2022-07-06 07:06:53 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
const byteRanges = dynamicChunks
|
|
|
|
.map(chunk => `${chunk.start}-${chunk.end - 1}`)
|
|
|
|
.join(',')
|
|
|
|
const coalescedDynamicChunks = []
|
2022-08-01 07:31:05 -04:00
|
|
|
switch (true) {
|
|
|
|
case dynamicChunks.length === 0:
|
2022-07-06 07:06:53 -04:00
|
|
|
break
|
2022-08-01 07:31:05 -04:00
|
|
|
case dynamicChunks.length === 1:
|
2022-07-06 07:06:53 -04:00
|
|
|
coalescedDynamicChunks.push({
|
|
|
|
chunk: dynamicChunks[0],
|
|
|
|
url,
|
2022-11-09 04:22:31 -05:00
|
|
|
init: {
|
|
|
|
cache: 'no-store',
|
|
|
|
headers: { Range: `bytes=${byteRanges}` },
|
|
|
|
},
|
2022-07-06 07:06:53 -04:00
|
|
|
})
|
|
|
|
break
|
2022-08-01 07:31:05 -04:00
|
|
|
case dynamicChunks.length <= MULTI_PART_THRESHOLD:
|
|
|
|
// There will always be an OPTIONS request for multi-ranges requests.
|
|
|
|
// It is faster to request few ranges in parallel instead of waiting for
|
|
|
|
// the OPTIONS request to round trip.
|
|
|
|
dynamicChunks.forEach(chunk => {
|
|
|
|
coalescedDynamicChunks.push({
|
|
|
|
chunk,
|
|
|
|
url,
|
2022-11-09 04:22:31 -05:00
|
|
|
init: {
|
|
|
|
cache: 'no-store',
|
|
|
|
headers: { Range: `bytes=${chunk.start}-${chunk.end - 1}` },
|
|
|
|
},
|
2022-08-01 07:31:05 -04:00
|
|
|
})
|
|
|
|
})
|
|
|
|
break
|
2022-07-06 07:06:53 -04:00
|
|
|
default:
|
|
|
|
coalescedDynamicChunks.push({
|
|
|
|
chunk: dynamicChunks,
|
|
|
|
url,
|
2022-11-09 04:22:31 -05:00
|
|
|
init: {
|
|
|
|
cache: 'no-store',
|
|
|
|
headers: { Range: `bytes=${byteRanges}` },
|
|
|
|
},
|
2022-07-06 07:06:53 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
const params = new URL(url).searchParams
|
|
|
|
// drop no needed params
|
|
|
|
params.delete('enable_pdf_caching')
|
|
|
|
params.delete('verify_chunks')
|
|
|
|
const query = params.toString()
|
|
|
|
// The schema of `url` is https://domain/project/:id/user/:id/build/... for
|
|
|
|
// authenticated and https://domain/project/:id/build/... for
|
|
|
|
// unauthenticated users. Cut it before /build/.
|
|
|
|
// The path may have an optional /zone/b prefix too.
|
|
|
|
const perUserPrefix = url.slice(0, url.indexOf('/build/'))
|
|
|
|
const requests = chunks
|
|
|
|
.map(chunk => ({
|
|
|
|
chunk,
|
|
|
|
url: `${perUserPrefix}/content/${file.contentId}/${chunk.hash}?${query}`,
|
2022-07-20 04:15:58 -04:00
|
|
|
init: {},
|
2022-07-06 07:06:53 -04:00
|
|
|
}))
|
|
|
|
.concat(coalescedDynamicChunks)
|
|
|
|
let cachedCount = 0
|
|
|
|
let cachedBytes = 0
|
|
|
|
let fetchedCount = 0
|
|
|
|
let fetchedBytes = 0
|
|
|
|
const reassembledBlob = new Uint8Array(size)
|
|
|
|
|
2022-10-25 05:23:59 -04:00
|
|
|
// Pause while performing network IO
|
|
|
|
timer.finishBlockingCompute()
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
const rawResponses = await Promise.all(
|
|
|
|
requests.map(async ({ chunk, url, init }) => {
|
|
|
|
try {
|
2022-08-01 07:31:05 -04:00
|
|
|
const response = await fetchChunk({
|
|
|
|
chunk,
|
|
|
|
url,
|
|
|
|
init: { ...init, signal: abortSignal },
|
|
|
|
cachedUrls,
|
|
|
|
metrics,
|
|
|
|
cachedUrlLookupEnabled,
|
|
|
|
})
|
2022-10-25 05:23:59 -04:00
|
|
|
timer.startBlockingCompute()
|
2022-07-06 07:06:53 -04:00
|
|
|
const boundary = getMultipartBoundary(response, chunk)
|
|
|
|
const blobFetchDate = getServerTime(response)
|
|
|
|
const blobSize = getResponseSize(response)
|
|
|
|
if (blobFetchDate && blobSize) {
|
|
|
|
// Example: 2MB PDF, 1MB image, 128KB PDF.js chunk.
|
|
|
|
// | pdf.js chunk |
|
|
|
|
// | A BIG IMAGE BLOB |
|
|
|
|
// | THE FULL PDF |
|
|
|
|
if (chunk.hash && blobFetchDate < file.createdAt) {
|
|
|
|
const usedChunkSection =
|
|
|
|
Math.min(end, chunk.end) - Math.max(start, chunk.start)
|
|
|
|
cachedCount++
|
|
|
|
cachedBytes += usedChunkSection
|
|
|
|
// Roll the position of the hash in the Map.
|
2022-08-01 07:31:05 -04:00
|
|
|
usageScore.delete(chunk.hash)
|
|
|
|
usageScore.set(chunk.hash, CHUNK_USAGE_THRESHOLD_CACHED)
|
2022-07-06 07:06:53 -04:00
|
|
|
} else {
|
|
|
|
// Blobs are fetched in bulk, record the full size.
|
|
|
|
fetchedCount++
|
|
|
|
fetchedBytes += blobSize
|
|
|
|
}
|
|
|
|
}
|
2022-10-25 05:23:59 -04:00
|
|
|
timer.finishBlockingCompute()
|
|
|
|
const buf = await response.arrayBuffer()
|
|
|
|
timer.startBlockingCompute()
|
|
|
|
const data = backFillObjectContext(chunk, buf)
|
2022-07-20 04:15:58 -04:00
|
|
|
if (!Array.isArray(chunk)) {
|
|
|
|
return [{ chunk, data }]
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
2022-07-20 04:15:58 -04:00
|
|
|
return resolveMultiPartResponses({
|
|
|
|
file,
|
|
|
|
chunks: chunk,
|
|
|
|
data,
|
|
|
|
boundary,
|
|
|
|
metrics,
|
|
|
|
})
|
|
|
|
} catch (err) {
|
|
|
|
throw OError.tag(err, 'cannot fetch chunk', { chunk, url, init })
|
2022-10-25 05:23:59 -04:00
|
|
|
} finally {
|
|
|
|
timer.finishBlockingCompute()
|
2022-07-06 07:06:53 -04:00
|
|
|
}
|
|
|
|
})
|
|
|
|
)
|
|
|
|
|
2022-10-25 05:23:59 -04:00
|
|
|
timer.startBlockingCompute()
|
|
|
|
|
2022-07-06 07:06:53 -04:00
|
|
|
rawResponses
|
2022-07-20 04:15:58 -04:00
|
|
|
.flat() // flatten after splitting multipart responses
|
2022-08-01 07:31:05 -04:00
|
|
|
.concat(prefetched.map(chunk => ({ chunk, data: chunk.buffer })))
|
2022-07-06 07:06:53 -04:00
|
|
|
.forEach(({ chunk, data }) => {
|
2022-08-01 07:31:05 -04:00
|
|
|
if (!chunk.hash && chunk.end > end) {
|
|
|
|
// This is a (partially) prefetched chunk.
|
|
|
|
chunk.buffer = data
|
|
|
|
file.prefetched.push(chunk)
|
|
|
|
if (chunk.start > end) return // This is a fully prefetched chunk.
|
|
|
|
}
|
2022-07-06 07:06:53 -04:00
|
|
|
// overlap:
|
|
|
|
// | REQUESTED_RANGE |
|
|
|
|
// | CHUNK |
|
|
|
|
const offsetStart = Math.max(start - chunk.start, 0)
|
|
|
|
// overlap:
|
|
|
|
// | REQUESTED_RANGE |
|
|
|
|
// | CHUNK |
|
|
|
|
const offsetEnd = Math.max(chunk.end - end, 0)
|
2022-10-31 07:47:53 -04:00
|
|
|
const oldDataLength = data.length
|
2022-07-06 07:06:53 -04:00
|
|
|
if (offsetStart > 0 || offsetEnd > 0) {
|
|
|
|
// compute index positions for slice to handle case where offsetEnd=0
|
|
|
|
const chunkSize = chunk.end - chunk.start
|
|
|
|
data = data.subarray(offsetStart, chunkSize - offsetEnd)
|
|
|
|
}
|
2022-10-31 07:47:53 -04:00
|
|
|
const newDataLength = data.length
|
2022-07-06 07:06:53 -04:00
|
|
|
const insertPosition = Math.max(chunk.start - start, 0)
|
2022-10-31 07:47:53 -04:00
|
|
|
try {
|
|
|
|
reassembledBlob.set(data, insertPosition)
|
|
|
|
} catch (err) {
|
|
|
|
const reassembledBlobLength = reassembledBlob.length
|
|
|
|
const trimmedChunk = {
|
|
|
|
start: chunk.start,
|
|
|
|
end: chunk.end,
|
|
|
|
hash: chunk.hash,
|
|
|
|
objectId: new TextDecoder().decode(chunk.objectId),
|
|
|
|
}
|
|
|
|
throw OError.tag(err, 'broken reassembly', {
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
chunk: trimmedChunk,
|
|
|
|
oldDataLength,
|
|
|
|
newDataLength,
|
|
|
|
offsetStart,
|
|
|
|
offsetEnd,
|
|
|
|
insertPosition,
|
|
|
|
reassembledBlobLength,
|
|
|
|
})
|
|
|
|
}
|
2022-07-06 07:06:53 -04:00
|
|
|
})
|
|
|
|
|
2022-10-25 05:23:59 -04:00
|
|
|
timer.finishBlockingCompute()
|
|
|
|
timer.reportInto(metrics)
|
2022-07-06 07:06:53 -04:00
|
|
|
trackDownloadStats(metrics, {
|
|
|
|
size,
|
|
|
|
cachedCount,
|
|
|
|
cachedBytes,
|
|
|
|
fetchedCount,
|
|
|
|
fetchedBytes,
|
|
|
|
})
|
|
|
|
|
2022-07-08 04:15:13 -04:00
|
|
|
if (verifyChunks) {
|
2022-07-06 07:06:53 -04:00
|
|
|
return await verifyRange({
|
|
|
|
url,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
metrics,
|
|
|
|
actual: reassembledBlob,
|
2022-07-20 04:15:58 -04:00
|
|
|
abortSignal,
|
2022-07-06 07:06:53 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return reassembledBlob
|
|
|
|
}
|