perf: optimize undici connection settings and tarball buffering (#11151)

- Enable Happy Eyeballs (`autoSelectFamily`) for faster dual-stack (IPv4/IPv6) connection establishment
- Increase keep-alive timeouts (30s idle, 10min max) to reduce connection churn during install
- Set optimized global dispatcher so requests without custom options still benefit
- Pre-allocate `SharedArrayBuffer` for tarball downloads when `Content-Length` is known, avoiding intermediate chunk array and double-copy
This commit is contained in:
Zoltan Kochan
2026-03-31 00:33:42 +02:00
committed by GitHub
parent 14bb19ba6b
commit 6b3d87a4ca
3 changed files with 74 additions and 25 deletions

View File

@@ -0,0 +1,7 @@
---
"@pnpm/network.fetch": minor
"@pnpm/fetching.tarball-fetcher": minor
"pnpm": minor
---
Improved HTTP performance with Happy Eyeballs (dual-stack), better keep-alive settings, and an optimized global dispatcher. Tarball downloads with known size now pre-allocate memory to avoid double-copy overhead.

View File

@@ -134,9 +134,8 @@ export function createDownloader (
}
const contentLength = res.headers.has('content-length') && res.headers.get('content-length')
const size = typeof contentLength === 'string'
? parseInt(contentLength, 10)
: null
const parsedLength = typeof contentLength === 'string' ? parseInt(contentLength, 10) : NaN
const size = Number.isFinite(parsedLength) && parsedLength >= 0 ? parsedLength : null
if (opts.onStart != null) {
opts.onStart(size, currentAttempt)
}
@@ -146,18 +145,44 @@ export function createDownloader (
: undefined
const startTime = Date.now()
let downloaded = 0
const chunks: Uint8Array[] = []
for await (const chunk of res.body!) {
chunks.push(chunk as Uint8Array)
downloaded += (chunk as Uint8Array).byteLength
onProgress?.(downloaded)
}
if (size !== null && size !== downloaded) {
throw new BadTarballError({
expectedSize: size,
receivedSize: downloaded,
tarballUrl: url,
})
if (size !== null) {
// Known size: pre-allocate and copy directly (avoids intermediate array + second copy pass)
data = Buffer.from(new SharedArrayBuffer(size))
for await (const chunk of res.body!) {
const c = chunk as Uint8Array
const nextDownloaded = downloaded + c.byteLength
if (nextDownloaded > size) {
throw new BadTarballError({
expectedSize: size,
receivedSize: nextDownloaded,
tarballUrl: url,
})
}
data.set(c, downloaded)
downloaded = nextDownloaded
onProgress?.(downloaded)
}
if (size !== downloaded) {
throw new BadTarballError({
expectedSize: size,
receivedSize: downloaded,
tarballUrl: url,
})
}
} else {
const chunks: Uint8Array[] = []
for await (const chunk of res.body!) {
const c = chunk as Uint8Array
chunks.push(c)
downloaded += c.byteLength
onProgress?.(downloaded)
}
data = Buffer.from(new SharedArrayBuffer(downloaded))
let offset = 0
for (const chunk of chunks) {
data.set(chunk, offset)
offset += chunk.byteLength
}
}
const elapsedSec = (Date.now() - startTime) / 1000
const avgKiBps = Math.floor((downloaded / elapsedSec) / 1024)
@@ -165,13 +190,6 @@ export function createDownloader (
const sizeKb = Math.floor(downloaded / 1024)
globalWarn(`Tarball download average speed ${avgKiBps} KiB/s (size ${sizeKb} KiB) is below ${fetchMinSpeedKiBps} KiB/s: ${url} (GET)`)
}
data = Buffer.from(new SharedArrayBuffer(downloaded))
let offset: number = 0
for (const chunk of chunks) {
data.set(chunk, offset)
offset += chunk.byteLength
}
} catch (err: unknown) {
const error = util.types.isNativeError(err) ? err : new Error(String(err), { cause: err })
Object.assign(error, {

View File

@@ -7,9 +7,27 @@ import { PnpmError } from '@pnpm/error'
import type { SslConfig } from '@pnpm/types'
import { LRUCache } from 'lru-cache'
import { SocksClient } from 'socks'
import { Agent, type Dispatcher, ProxyAgent } from 'undici'
import { Agent, type Dispatcher, ProxyAgent, setGlobalDispatcher } from 'undici'
const DEFAULT_MAX_SOCKETS = 50
const KEEP_ALIVE_TIMEOUT = 30_000 // 30 seconds
const KEEP_ALIVE_MAX_TIMEOUT = 600_000 // 10 minutes
// Set an optimized global dispatcher so that requests without custom options
// (no proxy, no custom certs) still benefit from better keep-alive and Happy Eyeballs.
//
// Note: we intentionally do NOT enable HTTP/2 (allowH2) or HTTP/1.1 pipelining here.
// With HTTP/2, undici multiplexes many streams over 1-2 TCP connections sharing a single
// congestion window. In benchmarks this was slower than opening ~50 independent HTTP/1.1
// connections that each get their own congestion window and can saturate bandwidth in parallel.
setGlobalDispatcher(new Agent({
connections: DEFAULT_MAX_SOCKETS,
keepAliveTimeout: KEEP_ALIVE_TIMEOUT,
keepAliveMaxTimeout: KEEP_ALIVE_MAX_TIMEOUT,
connect: {
autoSelectFamily: true,
},
}))
const DISPATCHER_CACHE = new LRUCache<string, Dispatcher>({
max: 50,
@@ -152,6 +170,8 @@ function createHttpProxyDispatcher (
? `Basic ${Buffer.from(`${decodeURIComponent(proxyUrl.username)}:${decodeURIComponent(proxyUrl.password)}`).toString('base64')}`
: undefined,
connections: opts.maxSockets ?? DEFAULT_MAX_SOCKETS,
keepAliveTimeout: KEEP_ALIVE_TIMEOUT,
keepAliveMaxTimeout: KEEP_ALIVE_MAX_TIMEOUT,
requestTls: isHttps
? {
ca: tlsConfig.ca,
@@ -181,6 +201,8 @@ function createSocksDispatcher (
return new Agent({
connections: opts.maxSockets ?? DEFAULT_MAX_SOCKETS,
keepAliveTimeout: KEEP_ALIVE_TIMEOUT,
keepAliveMaxTimeout: KEEP_ALIVE_MAX_TIMEOUT,
connect: async (connectOpts, callback) => {
try {
const { socket } = await SocksClient.createConnection({
@@ -251,10 +273,11 @@ function getNonProxyDispatcher (parsedUri: URL, opts: DispatcherOptions): Dispat
const agent = new Agent({
connections: opts.maxSockets ?? DEFAULT_MAX_SOCKETS,
connectTimeout,
keepAliveTimeout: 4000,
keepAliveMaxTimeout: 15000,
keepAliveTimeout: KEEP_ALIVE_TIMEOUT,
keepAliveMaxTimeout: KEEP_ALIVE_MAX_TIMEOUT,
connect: isHttps
? {
autoSelectFamily: true,
ca,
cert,
key: certKey,
@@ -262,6 +285,7 @@ function getNonProxyDispatcher (parsedUri: URL, opts: DispatcherOptions): Dispat
localAddress: opts.localAddress,
}
: {
autoSelectFamily: true,
localAddress: opts.localAddress,
},
})