140 lines
5.5 KiB
JavaScript
Executable File
140 lines
5.5 KiB
JavaScript
Executable File
"use strict";
|
|
Object.defineProperty(exports, "__esModule", {
|
|
value: true
|
|
});
|
|
0 && (module.exports = {
|
|
isAbortError: null,
|
|
pipeToNodeResponse: null
|
|
});
|
|
function _export(target, all) {
|
|
for(var name in all)Object.defineProperty(target, name, {
|
|
enumerable: true,
|
|
get: all[name]
|
|
});
|
|
}
|
|
_export(exports, {
|
|
isAbortError: function() {
|
|
return isAbortError;
|
|
},
|
|
pipeToNodeResponse: function() {
|
|
return pipeToNodeResponse;
|
|
}
|
|
});
|
|
const _nextrequest = require("./web/spec-extension/adapters/next-request");
|
|
const _detachedpromise = require("../lib/detached-promise");
|
|
const _tracer = require("./lib/trace/tracer");
|
|
const _constants = require("./lib/trace/constants");
|
|
const _clientcomponentrendererlogger = require("./client-component-renderer-logger");
|
|
function isAbortError(e) {
|
|
return (e == null ? void 0 : e.name) === 'AbortError' || (e == null ? void 0 : e.name) === _nextrequest.ResponseAbortedName;
|
|
}
|
|
function createWriterFromResponse(res, waitUntilForEnd) {
|
|
let started = false;
|
|
// Create a promise that will resolve once the response has drained. See
|
|
// https://nodejs.org/api/stream.html#stream_event_drain
|
|
let drained = new _detachedpromise.DetachedPromise();
|
|
function onDrain() {
|
|
drained.resolve();
|
|
}
|
|
res.on('drain', onDrain);
|
|
// If the finish event fires, it means we shouldn't block and wait for the
|
|
// drain event.
|
|
res.once('close', ()=>{
|
|
res.off('drain', onDrain);
|
|
drained.resolve();
|
|
});
|
|
// Create a promise that will resolve once the response has finished. See
|
|
// https://nodejs.org/api/http.html#event-finish_1
|
|
const finished = new _detachedpromise.DetachedPromise();
|
|
res.once('finish', ()=>{
|
|
finished.resolve();
|
|
});
|
|
// Create a writable stream that will write to the response.
|
|
return new WritableStream({
|
|
write: async (chunk)=>{
|
|
// You'd think we'd want to use `start` instead of placing this in `write`
|
|
// but this ensures that we don't actually flush the headers until we've
|
|
// started writing chunks.
|
|
if (!started) {
|
|
started = true;
|
|
if ('performance' in globalThis && process.env.NEXT_OTEL_PERFORMANCE_PREFIX) {
|
|
const metrics = (0, _clientcomponentrendererlogger.getClientComponentLoaderMetrics)();
|
|
if (metrics) {
|
|
performance.measure(`${process.env.NEXT_OTEL_PERFORMANCE_PREFIX}:next-client-component-loading`, {
|
|
start: metrics.clientComponentLoadStart,
|
|
end: metrics.clientComponentLoadStart + metrics.clientComponentLoadTimes
|
|
});
|
|
}
|
|
}
|
|
res.flushHeaders();
|
|
(0, _tracer.getTracer)().trace(_constants.NextNodeServerSpan.startResponse, {
|
|
spanName: 'start response'
|
|
}, ()=>undefined);
|
|
}
|
|
try {
|
|
const ok = res.write(chunk);
|
|
// Added by the `compression` middleware, this is a function that will
|
|
// flush the partially-compressed response to the client.
|
|
if ('flush' in res && typeof res.flush === 'function') {
|
|
res.flush();
|
|
}
|
|
// If the write returns false, it means there's some backpressure, so
|
|
// wait until it's streamed before continuing.
|
|
if (!ok) {
|
|
await drained.promise;
|
|
// Reset the drained promise so that we can wait for the next drain event.
|
|
drained = new _detachedpromise.DetachedPromise();
|
|
}
|
|
} catch (err) {
|
|
res.end();
|
|
throw Object.defineProperty(new Error('failed to write chunk to response', {
|
|
cause: err
|
|
}), "__NEXT_ERROR_CODE", {
|
|
value: "E321",
|
|
enumerable: false,
|
|
configurable: true
|
|
});
|
|
}
|
|
},
|
|
abort: (err)=>{
|
|
if (res.writableFinished) return;
|
|
res.destroy(err);
|
|
},
|
|
close: async ()=>{
|
|
// if a waitUntil promise was passed, wait for it to resolve before
|
|
// ending the response.
|
|
if (waitUntilForEnd) {
|
|
await waitUntilForEnd;
|
|
}
|
|
if (res.writableFinished) return;
|
|
res.end();
|
|
return finished.promise;
|
|
}
|
|
});
|
|
}
|
|
async function pipeToNodeResponse(readable, res, waitUntilForEnd) {
|
|
try {
|
|
// If the response has already errored, then just return now.
|
|
const { errored, destroyed } = res;
|
|
if (errored || destroyed) return;
|
|
// Create a new AbortController so that we can abort the readable if the
|
|
// client disconnects.
|
|
const controller = (0, _nextrequest.createAbortController)(res);
|
|
const writer = createWriterFromResponse(res, waitUntilForEnd);
|
|
await readable.pipeTo(writer, {
|
|
signal: controller.signal
|
|
});
|
|
} catch (err) {
|
|
// If this isn't related to an abort error, re-throw it.
|
|
if (isAbortError(err)) return;
|
|
throw Object.defineProperty(new Error('failed to pipe response', {
|
|
cause: err
|
|
}), "__NEXT_ERROR_CODE", {
|
|
value: "E180",
|
|
enumerable: false,
|
|
configurable: true
|
|
});
|
|
}
|
|
}
|
|
|
|
//# sourceMappingURL=pipe-readable.js.map
|