diff --git a/.buildkite/ci.mjs b/.buildkite/ci.mjs old mode 100644 new mode 100755 index c9c005198790be..3de0aa57b61a6b --- a/.buildkite/ci.mjs +++ b/.buildkite/ci.mjs @@ -8,10 +8,13 @@ import { writeFileSync } from "node:fs"; import { join } from "node:path"; import { + getBootstrapVersion, + getBuildNumber, getCanaryRevision, getChangedFiles, getCommit, getCommitMessage, + getEnv, getLastSuccessfulBuild, getMainBranch, getTargetBranch, @@ -21,103 +24,162 @@ import { isMergeQueue, printEnvironment, spawnSafe, - startGroup, + toYaml, + uploadArtifact, } from "../scripts/utils.mjs"; -function toYaml(obj, indent = 0) { - const spaces = " ".repeat(indent); - let result = ""; - - for (const [key, value] of Object.entries(obj)) { - if (value === undefined) { - continue; - } - - if (value === null) { - result += `${spaces}${key}: null\n`; - continue; - } +/** + * @typedef PipelineOptions + * @property {string} [buildId] + * @property {boolean} [buildImages] + * @property {boolean} [publishImages] + * @property {boolean} [skipTests] + */ - if (Array.isArray(value)) { - result += `${spaces}${key}:\n`; - value.forEach(item => { - if (typeof item === "object" && item !== null) { - result += `${spaces}- \n${toYaml(item, indent + 2) - .split("\n") - .map(line => `${spaces} ${line}`) - .join("\n")}\n`; - } else { - result += `${spaces}- ${item}\n`; - } - }); - continue; - } +/** + * @param {PipelineOptions} options + */ +function getPipeline(options) { + const { buildId, buildImages, publishImages, skipTests } = options; - if (typeof value === "object") { - result += `${spaces}${key}:\n${toYaml(value, indent + 2)}`; - continue; - } + /** + * Helpers + */ - if ( - typeof value === "string" && - (value.includes(":") || value.includes("#") || value.includes("'") || value.includes('"') || value.includes("\n")) - ) { - result += `${spaces}${key}: "${value.replace(/"/g, '\\"')}"\n`; - continue; + /** + * @param {string} text + * @returns {string} + * @link https://github.com/buildkite/emojis#emoji-reference + */ + const getEmoji = string => { + if (string === "amazonlinux") { + return ":aws:"; } + return `:${string}:`; + }; - result += `${spaces}${key}: ${value}\n`; - } - - return result; -} - -function getPipeline(buildId) { /** - * Helpers + * @typedef {"linux" | "darwin" | "windows"} Os + * @typedef {"aarch64" | "x64"} Arch + * @typedef {"musl"} Abi */ - const getKey = platform => { - const { os, arch, abi, baseline } = platform; + /** + * @typedef Target + * @property {Os} os + * @property {Arch} arch + * @property {Abi} [abi] + * @property {boolean} [baseline] + */ + /** + * @param {Target} target + * @returns {string} + */ + const getTargetKey = target => { + const { os, arch, abi, baseline } = target; + let key = `${os}-${arch}`; if (abi) { - if (baseline) { - return `${os}-${arch}-${abi}-baseline`; - } - return `${os}-${arch}-${abi}`; + key += `-${abi}`; } if (baseline) { - return `${os}-${arch}-baseline`; + key += "-baseline"; } - - return `${os}-${arch}`; + return key; }; - const getLabel = platform => { - const { os, arch, abi, baseline, release } = platform; - let label = release ? `:${os}: ${release} ${arch}` : `:${os}: ${arch}`; + /** + * @param {Target} target + * @returns {string} + */ + const getTargetLabel = target => { + const { os, arch, abi, baseline } = target; + let label = `${getEmoji(os)} ${arch}`; if (abi) { label += `-${abi}`; } if (baseline) { - label += `-baseline`; + label += "-baseline"; } return label; }; - // https://buildkite.com/docs/pipelines/command-step#retry-attributes - const getRetry = (limit = 3) => { + /** + * @typedef Platform + * @property {Os} os + * @property {Arch} arch + * @property {Abi} [abi] + * @property {boolean} [baseline] + * @property {string} [distro] + * @property {string} release + */ + + /** + * @param {Platform} platform + * @returns {string} + */ + const getPlatformKey = platform => { + const { os, arch, abi, baseline, distro, release } = platform; + const target = getTargetKey({ os, arch, abi, baseline }); + if (distro) { + return `${target}-${distro}-${release.replace(/\./g, "")}`; + } + return `${target}-${release.replace(/\./g, "")}`; + }; + + /** + * @param {Platform} platform + * @returns {string} + */ + const getPlatformLabel = platform => { + const { os, arch, baseline, distro, release } = platform; + let label = `${getEmoji(distro || os)} ${release} ${arch}`; + if (baseline) { + label += "-baseline"; + } + return label; + }; + + /** + * @param {Platform} platform + * @returns {string} + */ + const getImageKey = platform => { + const { os, arch, distro, release } = platform; + if (distro) { + return `${os}-${arch}-${distro}-${release.replace(/\./g, "")}`; + } + return `${os}-${arch}-${release.replace(/\./g, "")}`; + }; + + /** + * @param {Platform} platform + * @returns {string} + */ + const getImageLabel = platform => { + const { os, arch, distro, release } = platform; + return `${getEmoji(distro || os)} ${release} ${arch}`; + }; + + /** + * @param {number} [limit] + * @link https://buildkite.com/docs/pipelines/command-step#retry-attributes + */ + const getRetry = (limit = 0) => { return { automatic: [ - { exit_status: 1, limit: 1 }, - { exit_status: -1, limit }, - { exit_status: 255, limit }, - { signal_reason: "agent_stop", limit }, + { exit_status: 1, limit }, + { exit_status: -1, limit: 3 }, + { exit_status: 255, limit: 3 }, + { signal_reason: "agent_stop", limit: 3 }, ], }; }; - // https://buildkite.com/docs/pipelines/managing-priorities + /** + * @returns {number} + * @link https://buildkite.com/docs/pipelines/managing-priorities + */ const getPriority = () => { if (isFork()) { return -1; @@ -132,156 +194,333 @@ function getPipeline(buildId) { }; /** - * Steps + * @param {Target} target + * @returns {Record} */ + const getBuildEnv = target => { + const { baseline, abi } = target; + return { + ENABLE_BASELINE: baseline ? "ON" : "OFF", + ABI: abi === "musl" ? "musl" : undefined, + }; + }; - const getBuildVendorStep = platform => { - const { os, arch, abi, baseline } = platform; + /** + * @param {Target} target + * @returns {string} + */ + const getBuildToolchain = target => { + const { os, arch, abi, baseline } = target; + let key = `${os}-${arch}`; + if (abi) { + key += `-${abi}`; + } + if (baseline) { + key += "-baseline"; + } + return key; + }; + + /** + * Agents + */ + + /** + * @typedef {Record} Agent + */ + + /** + * @param {Platform} platform + * @returns {boolean} + */ + const isUsingNewAgent = platform => { + const { os } = platform; + if (os === "linux") { + return true; + } + return false; + }; + /** + * @param {"v1" | "v2"} version + * @param {Platform} platform + * @param {string} [instanceType] + * @returns {Agent} + */ + const getEmphemeralAgent = (version, platform, instanceType) => { + const { os, arch, abi, distro, release } = platform; + if (version === "v1") { + return { + robobun: true, + os, + arch, + distro, + release, + }; + } + let image; + if (distro) { + image = `${os}-${arch}-${distro}-${release}`; + } else { + image = `${os}-${arch}-${release}`; + } + if (buildImages && !publishImages) { + image += `-build-${getBuildNumber()}`; + } else { + image += `-v${getBootstrapVersion()}`; + } return { - key: `${getKey(platform)}-build-vendor`, - label: `build-vendor`, - agents: { + robobun: true, + robobun2: true, + os, + arch, + abi, + distro, + release, + "image-name": image, + "instance-type": instanceType, + }; + }; + + /** + * @param {Target} target + * @returns {Agent} + */ + const getBuildAgent = target => { + const { os, arch, abi } = target; + if (isUsingNewAgent(target)) { + const instanceType = arch === "aarch64" ? "c8g.8xlarge" : "c7i.8xlarge"; + return getEmphemeralAgent("v2", target, instanceType); + } + return { + queue: `build-${os}`, + os, + arch, + abi, + }; + }; + + /** + * @param {Target} target + * @returns {Agent} + */ + const getZigAgent = target => { + const { abi, arch } = target; + // if (abi === "musl") { + // const instanceType = arch === "aarch64" ? "c8g.large" : "c7i.large"; + // return getEmphemeralAgent("v2", target, instanceType); + // } + return { + queue: "build-zig", + }; + }; + + /** + * @param {Platform} platform + * @returns {Agent} + */ + const getTestAgent = platform => { + const { os, arch, release } = platform; + if (isUsingNewAgent(platform)) { + const instanceType = arch === "aarch64" ? "t4g.large" : "t3.large"; + return getEmphemeralAgent("v2", platform, instanceType); + } + if (os === "darwin") { + return { os, arch, - abi, - queue: abi ? `build-${os}-${abi}` : `build-${os}`, + release, + queue: "test-darwin", + }; + } + return getEmphemeralAgent("v1", platform); + }; + + /** + * Steps + */ + + /** + * @typedef Step + * @property {string} key + * @property {string} [label] + * @property {Record} [agents] + * @property {Record} [env] + * @property {string} command + * @property {string[]} [depends_on] + * @property {Record} [retry] + * @property {boolean} [cancel_on_build_failing] + * @property {boolean} [soft_fail] + * @property {number} [parallelism] + * @property {number} [concurrency] + * @property {string} [concurrency_group] + * @property {number} [priority] + * @property {number} [timeout_in_minutes] + * @link https://buildkite.com/docs/pipelines/command-step + */ + + /** + * @param {Platform} platform + * @param {string} [step] + * @returns {string[]} + */ + const getDependsOn = (platform, step) => { + if (imagePlatforms.has(getImageKey(platform))) { + const key = `${getImageKey(platform)}-build-image`; + if (key !== step) { + return [key]; + } + } + return []; + }; + + /** + * @param {Platform} platform + * @returns {Step} + */ + const getBuildImageStep = platform => { + const { os, arch, distro, release } = platform; + const action = publishImages ? "publish-image" : "create-image"; + return { + key: `${getImageKey(platform)}-build-image`, + label: `${getImageLabel(platform)} - build-image`, + agents: { + queue: "build-image", }, - retry: getRetry(), - cancel_on_build_failing: isMergeQueue(), env: { - ENABLE_BASELINE: baseline ? "ON" : "OFF", + DEBUG: "1", }, + retry: getRetry(), + command: `node ./scripts/machine.mjs ${action} --ci --cloud=aws --os=${os} --arch=${arch} --distro=${distro} --distro-version=${release}`, + }; + }; + + /** + * @param {Platform} platform + * @returns {Step} + */ + const getBuildVendorStep = platform => { + return { + key: `${getTargetKey(platform)}-build-vendor`, + label: `${getTargetLabel(platform)} - build-vendor`, + depends_on: getDependsOn(platform), + agents: getBuildAgent(platform), + retry: getRetry(), + cancel_on_build_failing: isMergeQueue(), + env: getBuildEnv(platform), command: "bun run build:ci --target dependencies", }; }; + /** + * @param {Platform} platform + * @returns {Step} + */ const getBuildCppStep = platform => { - const { os, arch, abi, baseline } = platform; - return { - key: `${getKey(platform)}-build-cpp`, - label: `build-cpp`, - agents: { - os, - arch, - abi, - queue: abi ? `build-${os}-${abi}` : `build-${os}`, - }, + key: `${getTargetKey(platform)}-build-cpp`, + label: `${getTargetLabel(platform)} - build-cpp`, + depends_on: getDependsOn(platform), + agents: getBuildAgent(platform), retry: getRetry(), cancel_on_build_failing: isMergeQueue(), env: { BUN_CPP_ONLY: "ON", - ENABLE_BASELINE: baseline ? "ON" : "OFF", + ...getBuildEnv(platform), }, command: "bun run build:ci --target bun", }; }; + /** + * @param {Platform} platform + * @returns {Step} + */ const getBuildZigStep = platform => { - const { os, arch, abi, baseline } = platform; - const toolchain = getKey(platform); - + const toolchain = getBuildToolchain(platform); return { - key: `${getKey(platform)}-build-zig`, - label: `build-zig`, - agents: { - queue: "build-zig", - }, - retry: getRetry(), + key: `${getTargetKey(platform)}-build-zig`, + label: `${getTargetLabel(platform)} - build-zig`, + depends_on: getDependsOn(platform), + agents: getZigAgent(platform), + retry: getRetry(1), // FIXME: Sometimes zig build hangs, so we need to retry once cancel_on_build_failing: isMergeQueue(), - env: { - ENABLE_BASELINE: baseline ? "ON" : "OFF", - }, + env: getBuildEnv(platform), command: `bun run build:ci --target bun-zig --toolchain ${toolchain}`, }; }; + /** + * @param {Platform} platform + * @returns {Step} + */ const getBuildBunStep = platform => { - const { os, arch, abi, baseline } = platform; - return { - key: `${getKey(platform)}-build-bun`, - label: `build-bun`, + key: `${getTargetKey(platform)}-build-bun`, + label: `${getTargetLabel(platform)} - build-bun`, depends_on: [ - `${getKey(platform)}-build-vendor`, - `${getKey(platform)}-build-cpp`, - `${getKey(platform)}-build-zig`, + `${getTargetKey(platform)}-build-vendor`, + `${getTargetKey(platform)}-build-cpp`, + `${getTargetKey(platform)}-build-zig`, ], - agents: { - os, - arch, - abi, - queue: `build-${os}`, - }, + agents: getBuildAgent(platform), retry: getRetry(), cancel_on_build_failing: isMergeQueue(), env: { BUN_LINK_ONLY: "ON", - ENABLE_BASELINE: baseline ? "ON" : "OFF", + ...getBuildEnv(platform), }, command: "bun run build:ci --target bun", }; }; + /** + * @param {Platform} platform + * @returns {Step} + */ const getTestBunStep = platform => { - const { os, arch, abi, distro, release } = platform; - - let name; - if (os === "darwin" || os === "windows") { - name = getLabel({ ...platform, release }); - } else { - name = getLabel({ ...platform, os: distro, release }); - } - - let agents; - if (os === "darwin") { - agents = { os, arch, abi, queue: `test-darwin` }; - } else if (os === "windows") { - agents = { os, arch, abi, robobun: true }; - } else { - agents = { os, arch, abi, distro, release, robobun: true }; - } - + const { os } = platform; let command; if (os === "windows") { - command = `node .\\scripts\\runner.node.mjs --step ${getKey(platform)}-build-bun`; + command = `node .\\scripts\\runner.node.mjs --step ${getTargetKey(platform)}-build-bun`; } else { - command = `./scripts/runner.node.mjs --step ${getKey(platform)}-build-bun`; + command = `./scripts/runner.node.mjs --step ${getTargetKey(platform)}-build-bun`; } - let parallelism; if (os === "darwin") { parallelism = 2; } else { parallelism = 10; } - - let depends; let env; + let depends = []; if (buildId) { env = { BUILDKITE_ARTIFACT_BUILD_ID: buildId, }; } else { - depends = [`${getKey(platform)}-build-bun`]; + depends = [`${getTargetKey(platform)}-build-bun`]; } - let retry; if (os !== "windows") { // When the runner fails on Windows, Buildkite only detects an exit code of 1. // Because of this, we don't know if the run was fatal, or soft-failed. - retry = getRetry(); + retry = getRetry(1); + } + let soft_fail; + if (isMainBranch()) { + soft_fail = true; + } else { + soft_fail = [{ exit_status: 2 }]; } - return { - key: `${getKey(platform)}-${distro}-${release.replace(/\./g, "")}-test-bun`, - label: `${name} - test-bun`, - depends_on: depends, - agents, + key: `${getPlatformKey(platform)}-test-bun`, + label: `${getPlatformLabel(platform)} - test-bun`, + depends_on: [...depends, ...getDependsOn(platform)], + agents: getTestAgent(platform), retry, cancel_on_build_failing: isMergeQueue(), - soft_fail: isMainBranch(), + soft_fail, parallelism, command, env, @@ -292,66 +531,136 @@ function getPipeline(buildId) { * Config */ + /** + * @type {Platform[]} + */ const buildPlatforms = [ - { os: "darwin", arch: "aarch64" }, - { os: "darwin", arch: "x64" }, - { os: "linux", arch: "aarch64" }, - // { os: "linux", arch: "aarch64", abi: "musl" }, // TODO: - { os: "linux", arch: "x64" }, - { os: "linux", arch: "x64", baseline: true }, - // { os: "linux", arch: "x64", abi: "musl" }, // TODO: - { os: "windows", arch: "x64" }, - { os: "windows", arch: "x64", baseline: true }, + { os: "darwin", arch: "aarch64", release: "14" }, + { os: "darwin", arch: "x64", release: "14" }, + { os: "linux", arch: "aarch64", distro: "debian", release: "11" }, + { os: "linux", arch: "x64", distro: "debian", release: "11" }, + { os: "linux", arch: "x64", baseline: true, distro: "debian", release: "11" }, + { os: "linux", arch: "aarch64", abi: "musl", distro: "alpine", release: "3.20" }, + { os: "linux", arch: "x64", abi: "musl", distro: "alpine", release: "3.20" }, + { os: "linux", arch: "x64", abi: "musl", baseline: true, distro: "alpine", release: "3.20" }, + { os: "windows", arch: "x64", release: "2019" }, + { os: "windows", arch: "x64", baseline: true, release: "2019" }, ]; + /** + * @type {Platform[]} + */ const testPlatforms = [ - { os: "darwin", arch: "aarch64", distro: "sonoma", release: "14" }, - { os: "darwin", arch: "aarch64", distro: "ventura", release: "13" }, - { os: "darwin", arch: "x64", distro: "sonoma", release: "14" }, - { os: "darwin", arch: "x64", distro: "ventura", release: "13" }, + { os: "darwin", arch: "aarch64", release: "14" }, + { os: "darwin", arch: "aarch64", release: "13" }, + { os: "darwin", arch: "x64", release: "14" }, + { os: "darwin", arch: "x64", release: "13" }, { os: "linux", arch: "aarch64", distro: "debian", release: "12" }, + { os: "linux", arch: "aarch64", distro: "debian", release: "11" }, + { os: "linux", arch: "aarch64", distro: "debian", release: "10" }, + { os: "linux", arch: "x64", distro: "debian", release: "12" }, + { os: "linux", arch: "x64", distro: "debian", release: "11" }, + { os: "linux", arch: "x64", distro: "debian", release: "10" }, + { os: "linux", arch: "x64", baseline: true, distro: "debian", release: "12" }, + { os: "linux", arch: "x64", baseline: true, distro: "debian", release: "11" }, + { os: "linux", arch: "x64", baseline: true, distro: "debian", release: "10" }, + // { os: "linux", arch: "aarch64", distro: "ubuntu", release: "24.04" }, { os: "linux", arch: "aarch64", distro: "ubuntu", release: "22.04" }, { os: "linux", arch: "aarch64", distro: "ubuntu", release: "20.04" }, - // { os: "linux", arch: "aarch64", abi: "musl", distro: "alpine", release: "edge" }, // TODO: - { os: "linux", arch: "x64", distro: "debian", release: "12" }, + // { os: "linux", arch: "x64", distro: "ubuntu", release: "24.04" }, { os: "linux", arch: "x64", distro: "ubuntu", release: "22.04" }, { os: "linux", arch: "x64", distro: "ubuntu", release: "20.04" }, - { os: "linux", arch: "x64", distro: "debian", release: "12", baseline: true }, - { os: "linux", arch: "x64", distro: "ubuntu", release: "22.04", baseline: true }, - { os: "linux", arch: "x64", distro: "ubuntu", release: "20.04", baseline: true }, - // { os: "linux", arch: "x64", abi: "musl", distro: "alpine", release: "edge" }, // TODO: - { os: "windows", arch: "x64", distro: "server", release: "2019" }, - { os: "windows", arch: "x64", distro: "server", release: "2019", baseline: true }, + // { os: "linux", arch: "x64", baseline: true, distro: "ubuntu", release: "24.04" }, + { os: "linux", arch: "x64", baseline: true, distro: "ubuntu", release: "22.04" }, + { os: "linux", arch: "x64", baseline: true, distro: "ubuntu", release: "20.04" }, + { os: "linux", arch: "aarch64", distro: "amazonlinux", release: "2023" }, + // { os: "linux", arch: "aarch64", distro: "amazonlinux", release: "2" }, + { os: "linux", arch: "x64", distro: "amazonlinux", release: "2023" }, + // { os: "linux", arch: "x64", distro: "amazonlinux", release: "2" }, + { os: "linux", arch: "x64", baseline: true, distro: "amazonlinux", release: "2023" }, + // { os: "linux", arch: "x64", baseline: true, distro: "amazonlinux", release: "2" }, + { os: "linux", arch: "aarch64", abi: "musl", distro: "alpine", release: "3.20" }, + // { os: "linux", arch: "aarch64", abi: "musl", distro: "alpine", release: "3.17" }, + { os: "linux", arch: "x64", abi: "musl", distro: "alpine", release: "3.20" }, + // { os: "linux", arch: "x64", abi: "musl", distro: "alpine", release: "3.17" }, + { os: "linux", arch: "x64", abi: "musl", baseline: true, distro: "alpine", release: "3.20" }, + // { os: "linux", arch: "x64", abi: "musl", baseline: true, distro: "alpine", release: "3.17" }, + { os: "windows", arch: "x64", release: "2019" }, + { os: "windows", arch: "x64", baseline: true, release: "2019" }, ]; + const imagePlatforms = new Map( + [...buildPlatforms, ...testPlatforms] + .filter(platform => buildImages && isUsingNewAgent(platform)) + .map(platform => [getImageKey(platform), platform]), + ); + + /** + * @type {Step[]} + */ + const steps = []; + + if (imagePlatforms.size) { + steps.push({ + group: ":docker:", + steps: [...imagePlatforms.values()].map(platform => getBuildImageStep(platform)), + }); + } + + for (const platform of buildPlatforms) { + const { os, arch, abi, baseline } = platform; + + /** @type {Step[]} */ + const platformSteps = []; + + if (buildImages || !buildId) { + platformSteps.push( + getBuildVendorStep(platform), + getBuildCppStep(platform), + getBuildZigStep(platform), + getBuildBunStep(platform), + ); + } + + if (!skipTests) { + platformSteps.push( + ...testPlatforms + .filter( + testPlatform => + testPlatform.os === os && + testPlatform.arch === arch && + testPlatform.abi === abi && + testPlatform.baseline === baseline, + ) + .map(testPlatform => getTestBunStep(testPlatform)), + ); + } + + if (!platformSteps.length) { + continue; + } + + steps.push({ + key: getTargetKey(platform), + group: getTargetLabel(platform), + steps: platformSteps, + }); + } + + if (isMainBranch() && !isFork()) { + steps.push({ + label: ":github:", + agents: { + queue: "test-darwin", + }, + depends_on: buildPlatforms.map(platform => `${getTargetKey(platform)}-build-bun`), + command: ".buildkite/scripts/upload-release.sh", + }); + } + return { priority: getPriority(), - steps: [ - ...buildPlatforms.map(platform => { - const { os, arch, baseline } = platform; - - let steps = [ - ...testPlatforms - .filter(platform => platform.os === os && platform.arch === arch && baseline === platform.baseline) - .map(platform => getTestBunStep(platform)), - ]; - - if (!buildId) { - steps.unshift( - getBuildVendorStep(platform), - getBuildCppStep(platform), - getBuildZigStep(platform), - getBuildBunStep(platform), - ); - } - - return { - key: getKey(platform), - group: getLabel(platform), - steps, - }; - }), - ], + steps, }; } @@ -369,26 +678,51 @@ async function main() { console.log(" - No build found"); } - console.log("Checking changed files..."); - const baseRef = getCommit(); - console.log(" - Base Ref:", baseRef); - const headRef = lastBuild?.commit_id || getTargetBranch() || getMainBranch(); - console.log(" - Head Ref:", headRef); - - const changedFiles = await getChangedFiles(undefined, baseRef, headRef); - if (changedFiles) { - if (changedFiles.length) { - changedFiles.forEach(filename => console.log(` - ${filename}`)); - } else { - console.log(" - No changed files"); + let changedFiles; + // FIXME: Fix various bugs when calculating changed files + // false -> !isFork() && !isMainBranch() + if (false) { + console.log("Checking changed files..."); + const baseRef = lastBuild?.commit_id || getTargetBranch() || getMainBranch(); + console.log(" - Base Ref:", baseRef); + const headRef = getCommit(); + console.log(" - Head Ref:", headRef); + + changedFiles = await getChangedFiles(undefined, baseRef, headRef); + if (changedFiles) { + if (changedFiles.length) { + changedFiles.forEach(filename => console.log(` - ${filename}`)); + } else { + console.log(" - No changed files"); + } } } const isDocumentationFile = filename => /^(\.vscode|\.github|bench|docs|examples)|\.(md)$/i.test(filename); const isTestFile = filename => /^test/i.test(filename) || /runner\.node\.mjs$/i.test(filename); - console.log("Checking if CI should be skipped..."); + console.log("Checking if CI should be forced..."); + let forceBuild; + let ciFileChanged; { + const message = getCommitMessage(); + const match = /\[(force ci|ci force|ci force build)\]/i.exec(message); + if (match) { + const [, reason] = match; + console.log(" - Yes, because commit message contains:", reason); + forceBuild = true; + } + for (const coref of [".buildkite/ci.mjs", "scripts/utils.mjs", "scripts/bootstrap.sh", "scripts/machine.mjs"]) { + if (changedFiles && changedFiles.includes(coref)) { + console.log(" - Yes, because the list of changed files contains:", coref); + forceBuild = true; + ciFileChanged = true; + } + } + } + + console.log("Checking if CI should be skipped..."); + if (!forceBuild) { const message = getCommitMessage(); const match = /\[(skip ci|no ci|ci skip|ci no)\]/i.exec(message); if (match) { @@ -396,15 +730,49 @@ async function main() { console.log(" - Yes, because commit message contains:", reason); return; } + if (changedFiles && changedFiles.every(filename => isDocumentationFile(filename))) { + console.log(" - Yes, because all changed files are documentation"); + return; + } } - if (changedFiles && changedFiles.every(filename => isDocumentationFile(filename))) { - console.log(" - Yes, because all changed files are documentation"); - return; + + console.log("Checking if CI should re-build images..."); + let buildImages; + { + const message = getCommitMessage(); + const match = /\[(build images?|images? build)\]/i.exec(message); + if (match) { + const [, reason] = match; + console.log(" - Yes, because commit message contains:", reason); + buildImages = true; + } + if (ciFileChanged) { + console.log(" - Yes, because a core CI file changed"); + buildImages = true; + } + } + + console.log("Checking if CI should publish images..."); + let publishImages; + { + const message = getCommitMessage(); + const match = /\[(publish images?|images? publish)\]/i.exec(message); + if (match) { + const [, reason] = match; + console.log(" - Yes, because commit message contains:", reason); + publishImages = true; + buildImages = true; + } + if (ciFileChanged && isMainBranch()) { + console.log(" - Yes, because a core CI file changed and this is main branch"); + publishImages = true; + buildImages = true; + } } console.log("Checking if build should be skipped..."); let skipBuild; - { + if (!forceBuild) { const message = getCommitMessage(); const match = /\[(only tests?|tests? only|skip build|no build|build skip|build no)\]/i.exec(message); if (match) { @@ -412,15 +780,33 @@ async function main() { console.log(" - Yes, because commit message contains:", reason); skipBuild = true; } + if (changedFiles && changedFiles.every(filename => isTestFile(filename) || isDocumentationFile(filename))) { + console.log(" - Yes, because all changed files are tests or documentation"); + skipBuild = true; + } } - if (changedFiles && changedFiles.every(filename => isTestFile(filename) || isDocumentationFile(filename))) { - console.log(" - Yes, because all changed files are tests or documentation"); - skipBuild = true; + + console.log("Checking if tests should be skipped..."); + let skipTests; + { + const message = getCommitMessage(); + const match = /\[(skip tests?|tests? skip|no tests?|tests? no)\]/i.exec(message); + if (match) { + console.log(" - Yes, because commit message contains:", match[1]); + skipTests = true; + } + if (isMainBranch()) { + console.log(" - Yes, because we're on main branch"); + skipTests = true; + } } console.log("Checking if build is a named release..."); let buildRelease; - { + if (/^(1|true|on|yes)$/i.test(getEnv("RELEASE", false))) { + console.log(" - Yes, because RELEASE environment variable is set"); + buildRelease = true; + } else { const message = getCommitMessage(); const match = /\[(release|release build|build release)\]/i.exec(message); if (match) { @@ -431,7 +817,13 @@ async function main() { } console.log("Generating pipeline..."); - const pipeline = getPipeline(lastBuild && skipBuild ? lastBuild.id : undefined); + const pipeline = getPipeline({ + buildId: lastBuild && skipBuild && !forceBuild ? lastBuild.id : undefined, + buildImages, + publishImages, + skipTests, + }); + const content = toYaml(pipeline); const contentPath = join(process.cwd(), ".buildkite", "ci.yml"); writeFileSync(contentPath, content); @@ -439,14 +831,17 @@ async function main() { console.log("Generated pipeline:"); console.log(" - Path:", contentPath); console.log(" - Size:", (content.length / 1024).toFixed(), "KB"); + if (isBuildkite) { + await uploadArtifact(contentPath); + } if (isBuildkite) { console.log("Setting canary revision..."); const canaryRevision = buildRelease ? 0 : await getCanaryRevision(); - await spawnSafe(["buildkite-agent", "meta-data", "set", "canary", `${canaryRevision}`]); + await spawnSafe(["buildkite-agent", "meta-data", "set", "canary", `${canaryRevision}`], { stdio: "inherit" }); console.log("Uploading pipeline..."); - await spawnSafe(["buildkite-agent", "pipeline", "upload", contentPath]); + await spawnSafe(["buildkite-agent", "pipeline", "upload", contentPath], { stdio: "inherit" }); } } diff --git a/.buildkite/scripts/upload-release.sh b/.buildkite/scripts/upload-release.sh index 83d70d5730dca3..b684dfb4a3d958 100755 --- a/.buildkite/scripts/upload-release.sh +++ b/.buildkite/scripts/upload-release.sh @@ -164,7 +164,9 @@ function upload_s3_file() { function send_bench_webhook() { if [ -z "$BENCHMARK_URL" ]; then - return 1 + echo "error: \$BENCHMARK_URL is not set" + # exit 1 # TODO: this isn't live yet + return fi local tag="$1" @@ -200,6 +202,12 @@ function create_release() { bun-linux-x64-profile.zip bun-linux-x64-baseline.zip bun-linux-x64-baseline-profile.zip + bun-linux-aarch64-musl.zip + bun-linux-aarch64-musl-profile.zip + bun-linux-x64-musl.zip + bun-linux-x64-musl-profile.zip + bun-linux-x64-musl-baseline.zip + bun-linux-x64-musl-baseline-profile.zip bun-windows-x64.zip bun-windows-x64-profile.zip bun-windows-x64-baseline.zip diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9feff2712177b1..0bf6e5cf591e00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ Bun currently requires `glibc >=2.32` in development which means if you're on Ub Using your system's package manager, install Bun's dependencies: -{% codetabs %} +{% codetabs group="os" %} ```bash#macOS (Homebrew) $ brew install automake ccache cmake coreutils gnu-sed go icu4c libiconv libtool ninja pkg-config rust ruby @@ -60,7 +60,7 @@ $ brew install bun Bun requires LLVM 16 (`clang` is part of LLVM). This version requirement is to match WebKit (precompiled), as mismatching versions will cause memory allocation failures at runtime. In most cases, you can install LLVM through your system package manager: -{% codetabs %} +{% codetabs group="os" %} ```bash#macOS (Homebrew) $ brew install llvm@18 @@ -97,7 +97,7 @@ $ which clang-16 If not, run this to manually add it: -{% codetabs %} +{% codetabs group="os" %} ```bash#macOS (Homebrew) # use fish_add_path if you're using fish @@ -285,7 +285,7 @@ If you see this error when compiling, run: $ xcode-select --install ``` -## Cannot find `libatomic.a` +### Cannot find `libatomic.a` Bun defaults to linking `libatomic` statically, as not all systems have it. If you are building on a distro that does not have a static libatomic available, you can run the following command to enable dynamic linking: @@ -295,7 +295,7 @@ $ bun run build -DUSE_STATIC_LIBATOMIC=OFF The built version of Bun may not work on other systems if compiled this way. -## ccache conflicts with building TinyCC on macOS +### ccache conflicts with building TinyCC on macOS If you run into issues with `ccache` when building TinyCC, try reinstalling ccache @@ -303,3 +303,9 @@ If you run into issues with `ccache` when building TinyCC, try reinstalling ccac brew uninstall ccache brew install ccache ``` + +## Using bun-debug + +- Disable logging: `BUN_DEBUG_QUIET_LOGS=1 bun-debug ...` (to disable all debug logging) +- Enable logging for a specific zig scope: `BUN_DEBUG_EventLoop=1 bun-debug ...` (to allow `std.log.scoped(.EventLoop)`) +- Bun transpiles every file it runs, to see the actual executed source in a debug build find it in `/tmp/bun-debug-src/...path/to/file`, for example the transpiled version of `/home/bun/index.ts` would be in `/tmp/bun-debug-src/home/bun/index.ts` diff --git a/LATEST b/LATEST index 9b51125a6c52d3..474ad5be60e054 100644 --- a/LATEST +++ b/LATEST @@ -1 +1 @@ -1.1.34 \ No newline at end of file +1.1.36 \ No newline at end of file diff --git a/ci/linux/Dockerfile b/ci/linux/Dockerfile new file mode 100644 index 00000000000000..3b46e73f6ccdbd --- /dev/null +++ b/ci/linux/Dockerfile @@ -0,0 +1,18 @@ +ARG IMAGE=debian:11 +FROM $IMAGE +COPY ./scripts/bootstrap.sh /tmp/bootstrap.sh +ENV CI=true +RUN sh /tmp/bootstrap.sh && rm -rf /tmp/* +WORKDIR /workspace/bun +COPY bunfig.toml bunfig.toml +COPY package.json package.json +COPY CMakeLists.txt CMakeLists.txt +COPY cmake/ cmake/ +COPY scripts/ scripts/ +COPY patches/ patches/ +COPY *.zig ./ +COPY src/ src/ +COPY packages/ packages/ +COPY test/ test/ +RUN bun i +RUN bun run build:ci diff --git a/ci/linux/scripts/set-hostname.sh b/ci/linux/scripts/set-hostname.sh new file mode 100644 index 00000000000000..e529f74ce01976 --- /dev/null +++ b/ci/linux/scripts/set-hostname.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +# This script sets the hostname of the current machine. + +execute() { + echo "$ $@" >&2 + if ! "$@"; then + echo "Command failed: $@" >&2 + exit 1 + fi +} + +main() { + if [ "$#" -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 + fi + + if [ -f "$(which hostnamectl)" ]; then + execute hostnamectl set-hostname "$1" + else + echo "Error: hostnamectl is not installed." >&2 + exit 1 + fi +} + +main "$@" diff --git a/ci/linux/scripts/start-tailscale.sh b/ci/linux/scripts/start-tailscale.sh new file mode 100644 index 00000000000000..3b519bfdf59339 --- /dev/null +++ b/ci/linux/scripts/start-tailscale.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script starts tailscale on the current machine. + +execute() { + echo "$ $@" >&2 + if ! "$@"; then + echo "Command failed: $@" >&2 + exit 1 + fi +} + +main() { + if [ "$#" -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 + fi + + execute tailscale up --reset --ssh --accept-risk=lose-ssh --auth-key="$1" +} + +main "$@" diff --git a/ci/package.json b/ci/package.json index ffb1297dcdd3a9..28bd56c959fcd2 100644 --- a/ci/package.json +++ b/ci/package.json @@ -2,7 +2,7 @@ "private": true, "scripts": { "bootstrap": "brew install gh jq cirruslabs/cli/tart cirruslabs/cli/sshpass hashicorp/tap/packer && packer init darwin", - "login": "gh auth token | tart login ghcr.io --username $(gh api user --jq .login) --password-stdin", + "login": "token=$(gh auth token); username=$(gh api user --jq .login); echo \"Login as $username...\"; echo \"$token\" | tart login ghcr.io --username \"$username\" --password-stdin; echo \"$token\" | docker login ghcr.io --username \"$username\" --password-stdin", "fetch:image-name": "echo ghcr.io/oven-sh/bun-vm", "fetch:darwin-version": "echo 1", "fetch:macos-version": "sw_vers -productVersion | cut -d. -f1", diff --git a/cmake/CompilerFlags.cmake b/cmake/CompilerFlags.cmake index bf8cf576abf7e9..31d738134a0af1 100644 --- a/cmake/CompilerFlags.cmake +++ b/cmake/CompilerFlags.cmake @@ -265,7 +265,7 @@ if(ENABLE_LTO) endif() # --- Remapping --- -if(UNIX) +if(UNIX AND CI) register_compiler_flags( DESCRIPTION "Remap source files" -ffile-prefix-map=${CWD}=. diff --git a/cmake/Globals.cmake b/cmake/Globals.cmake index 9760101274181a..106e1285ea586d 100644 --- a/cmake/Globals.cmake +++ b/cmake/Globals.cmake @@ -105,14 +105,6 @@ else() unsupported(CMAKE_HOST_SYSTEM_NAME) endif() -if(EXISTS "/lib/ld-musl-aarch64.so.1") - set(IS_MUSL ON) -elseif(EXISTS "/lib/ld-musl-x86_64.so.1") - set(IS_MUSL ON) -else() - set(IS_MUSL OFF) -endif() - if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64") set(HOST_OS "aarch64") elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64|X86_64|x64|X64|amd64|AMD64") @@ -144,6 +136,16 @@ else() set(WARNING WARNING) endif() +if(LINUX) + if(EXISTS "/etc/alpine-release") + set(DEFAULT_ABI "musl") + else() + set(DEFAULT_ABI "gnu") + endif() + + optionx(ABI "musl|gnu" "The ABI to use (e.g. musl, gnu)" DEFAULT ${DEFAULT_ABI}) +endif() + # TODO: This causes flaky zig builds in CI, so temporarily disable it. # if(CI) # set(DEFAULT_VENDOR_PATH ${CACHE_PATH}/vendor) diff --git a/cmake/targets/BuildBun.cmake b/cmake/targets/BuildBun.cmake index 9976d8dae339af..c27d820afee4ba 100644 --- a/cmake/targets/BuildBun.cmake +++ b/cmake/targets/BuildBun.cmake @@ -484,14 +484,12 @@ set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o) if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm|ARM|arm64|ARM64|aarch64|AARCH64") - set(IS_ARM64 ON) if(APPLE) set(ZIG_CPU "apple_m1") else() set(ZIG_CPU "native") endif() elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|X86_64|x64|X64|amd64|AMD64") - set(IS_X86_64 ON) if(ENABLE_BASELINE) set(ZIG_CPU "nehalem") else() @@ -528,6 +526,7 @@ register_command( -Dcanary=${CANARY_REVISION} -Dcodegen_path=${CODEGEN_PATH} -Dcodegen_embed=$,true,false> + --prominent-compile-errors ${ZIG_FLAGS_BUN} ARTIFACTS ${BUN_ZIG_OUTPUT} @@ -760,8 +759,8 @@ if(NOT WIN32) ) if(DEBUG) # TODO: this shouldn't be necessary long term - if (NOT IS_MUSL) - set(ABI_PUBLIC_FLAGS + if (NOT ABI STREQUAL "musl") + target_compile_options(${bun} PUBLIC -fsanitize=null -fsanitize-recover=all -fsanitize=bounds @@ -772,14 +771,9 @@ if(NOT WIN32) -fsanitize=returns-nonnull-attribute -fsanitize=unreachable ) - set(ABI_PRIVATE_FLAGS + target_link_libraries(${bun} PRIVATE -fsanitize=null ) - else() - set(ABI_PUBLIC_FLAGS - ) - set(ABI_PRIVATE_FLAGS - ) endif() target_compile_options(${bun} PUBLIC @@ -797,10 +791,6 @@ if(NOT WIN32) -Wno-unused-function -Wno-nullability-completeness -Werror - ${ABI_PUBLIC_FLAGS} - ) - target_link_libraries(${bun} PRIVATE - ${ABI_PRIVATE_FLAGS} ) else() # Leave -Werror=unused off in release builds so we avoid errors from being used in ASSERT @@ -845,65 +835,48 @@ if(WIN32) /delayload:IPHLPAPI.dll ) endif() -elseif(APPLE) +endif() + +if(APPLE) target_link_options(${bun} PUBLIC -dead_strip -dead_strip_dylibs + -Wl,-ld_new + -Wl,-no_compact_unwind -Wl,-stack_size,0x1200000 -fno-keep-static-consts + -Wl,-map,${bun}.linker-map ) -else() - # Try to use lld-16 if available, otherwise fallback to lld - # Cache it so we don't have to re-run CMake to pick it up - if((NOT DEFINED LLD_NAME) AND (NOT CI OR BUN_LINK_ONLY)) - find_program(LLD_EXECUTABLE_NAME lld-${LLVM_VERSION_MAJOR}) - - if(NOT LLD_EXECUTABLE_NAME) - if(CI) - # Ensure we don't use a differing version of lld in CI vs clang - message(FATAL_ERROR "lld-${LLVM_VERSION_MAJOR} not found. Please make sure you have LLVM ${LLVM_VERSION_MAJOR}.x installed and set to lld-${LLVM_VERSION_MAJOR}") - endif() - - # To make it easier for contributors, allow differing versions of lld vs clang/cmake - find_program(LLD_EXECUTABLE_NAME lld) - endif() +endif() - if(NOT LLD_EXECUTABLE_NAME) - message(FATAL_ERROR "LLD not found. Please make sure you have LLVM ${LLVM_VERSION_MAJOR}.x installed and lld is available in your PATH as lld-${LLVM_VERSION_MAJOR}") +if(LINUX) + if(NOT ABI STREQUAL "musl") + if(ARCH STREQUAL "aarch64") + target_link_options(${bun} PUBLIC + -Wl,--wrap=fcntl64 + -Wl,--wrap=statx + ) + endif() + + if(ARCH STREQUAL "x64") + target_link_options(${bun} PUBLIC + -Wl,--wrap=fcntl + -Wl,--wrap=fcntl64 + -Wl,--wrap=fstat + -Wl,--wrap=fstat64 + -Wl,--wrap=fstatat + -Wl,--wrap=fstatat64 + -Wl,--wrap=lstat + -Wl,--wrap=lstat64 + -Wl,--wrap=mknod + -Wl,--wrap=mknodat + -Wl,--wrap=stat + -Wl,--wrap=stat64 + -Wl,--wrap=statx + ) endif() - # normalize to basename so it can be used with -fuse-ld - get_filename_component(LLD_NAME ${LLD_EXECUTABLE_NAME} NAME CACHE) - message(STATUS "Using linker: ${LLD_NAME} (${LLD_EXECUTABLE_NAME})") - elseif(NOT DEFINED LLD_NAME) - set(LLD_NAME lld-${LLVM_VERSION_MAJOR}) - endif() - - if (IS_ARM64) - set(ARCH_WRAP_FLAGS - -Wl,--wrap=fcntl64 - -Wl,--wrap=statx - ) - elseif(IS_X86_64) - set(ARCH_WRAP_FLAGS - -Wl,--wrap=fcntl - -Wl,--wrap=fcntl64 - -Wl,--wrap=fstat - -Wl,--wrap=fstat64 - -Wl,--wrap=fstatat - -Wl,--wrap=fstatat64 - -Wl,--wrap=lstat - -Wl,--wrap=lstat64 - -Wl,--wrap=mknod - -Wl,--wrap=mknodat - -Wl,--wrap=stat - -Wl,--wrap=stat64 - -Wl,--wrap=statx - ) - endif() - - if (NOT IS_MUSL) - set(ABI_WRAP_FLAGS + target_link_options(${bun} PUBLIC -Wl,--wrap=cosf -Wl,--wrap=exp -Wl,--wrap=expf @@ -920,26 +893,37 @@ else() -Wl,--wrap=sinf -Wl,--wrap=tanf ) + endif() + + if(NOT ABI STREQUAL "musl") + target_link_options(${bun} PUBLIC + -static-libstdc++ + -static-libgcc + ) else() - set(ABI_WRAP_FLAGS + target_link_options(${bun} PUBLIC + -lstdc++ + -lgcc ) endif() target_link_options(${bun} PUBLIC - -fuse-ld=${LLD_NAME} + --ld-path=${LLD_PROGRAM} -fno-pic - -static-libstdc++ - -static-libgcc -Wl,-no-pie -Wl,-icf=safe -Wl,--as-needed -Wl,--gc-sections -Wl,-z,stack-size=12800000 - ${ARCH_WRAP_FLAGS} - ${ABI_WRAP_FLAGS} -Wl,--compress-debug-sections=zlib -Wl,-z,lazy -Wl,-z,norelro + -Wl,-z,combreloc + -Wl,--no-eh-frame-hdr + -Wl,--sort-section=name + -Wl,--hash-style=gnu + -Wl,--build-id=sha1 # Better for debugging than default + -Wl,-Map=${bun}.linker-map ) endif() @@ -1079,6 +1063,18 @@ endif() # --- Packaging --- if(NOT BUN_CPP_ONLY) + set(CMAKE_STRIP_FLAGS "") + if(APPLE) + # We do not build with exceptions enabled. These are generated by lolhtml + # and other dependencies. We build lolhtml with abort on panic, so it + # shouldn't be including these in the first place. + set(CMAKE_STRIP_FLAGS --remove-section=__TEXT,__eh_frame --remove-section=__TEXT,__unwind_info --remove-section=__TEXT,__gcc_except_tab) + elseif(LINUX AND NOT ABI STREQUAL "musl") + # When you use llvm-strip to do this, it doesn't delete it from the binary and instead keeps it as [LOAD #2 [R]] + # So, we must use GNU strip to do this. + set(CMAKE_STRIP_FLAGS -R .eh_frame -R .gcc_except_table) + endif() + if(bunStrip) register_command( TARGET @@ -1090,6 +1086,7 @@ if(NOT BUN_CPP_ONLY) COMMAND ${CMAKE_STRIP} ${bunExe} + ${CMAKE_STRIP_FLAGS} --strip-all --strip-debug --discard-all @@ -1165,10 +1162,12 @@ if(NOT BUN_CPP_ONLY) endif() if(CI) + set(bunTriplet bun-${OS}-${ARCH}) + if(ABI STREQUAL "musl") + set(bunTriplet ${bunTriplet}-musl) + endif() if(ENABLE_BASELINE) - set(bunTriplet bun-${OS}-${ARCH}-baseline) - else() - set(bunTriplet bun-${OS}-${ARCH}) + set(bunTriplet ${bunTriplet}-baseline) endif() string(REPLACE bun ${bunTriplet} bunPath ${bun}) set(bunFiles ${bunExe} features.json) @@ -1177,6 +1176,12 @@ if(NOT BUN_CPP_ONLY) elseif(APPLE) list(APPEND bunFiles ${bun}.dSYM) endif() + + if(APPLE OR LINUX) + list(APPEND bunFiles ${bun}.linker-map) + endif() + + register_command( TARGET ${bun} diff --git a/cmake/targets/BuildLolHtml.cmake b/cmake/targets/BuildLolHtml.cmake index 9a02362723960e..aeac571321c24d 100644 --- a/cmake/targets/BuildLolHtml.cmake +++ b/cmake/targets/BuildLolHtml.cmake @@ -26,6 +26,13 @@ if(RELEASE) list(APPEND LOLHTML_BUILD_ARGS --release) endif() +# Windows requires unwind tables, apparently. +if (NOT WIN32) + # The encoded escape sequences are intentional. They're how you delimit multiple arguments in a single environment variable. + # Also add rust optimization flag for smaller binary size, but not huge speed penalty. + set(RUSTFLAGS "-Cpanic=abort-Cdebuginfo=0-Cforce-unwind-tables=no-Copt-level=s") +endif() + register_command( TARGET lolhtml @@ -37,6 +44,11 @@ register_command( ${LOLHTML_BUILD_ARGS} ARTIFACTS ${LOLHTML_LIBRARY} + ENVIRONMENT + CARGO_TERM_COLOR=always + CARGO_TERM_VERBOSE=true + CARGO_TERM_DIAGNOSTIC=true + CARGO_ENCODED_RUSTFLAGS=${RUSTFLAGS} ) target_link_libraries(${bun} PRIVATE ${LOLHTML_LIBRARY}) diff --git a/cmake/toolchains/linux-aarch64-musl.cmake b/cmake/toolchains/linux-aarch64-musl.cmake new file mode 100644 index 00000000000000..e4a33f709e88cd --- /dev/null +++ b/cmake/toolchains/linux-aarch64-musl.cmake @@ -0,0 +1,6 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(ABI musl) + +set(CMAKE_C_COMPILER_WORKS ON) +set(CMAKE_CXX_COMPILER_WORKS ON) \ No newline at end of file diff --git a/cmake/toolchains/linux-aarch64.cmake b/cmake/toolchains/linux-aarch64.cmake index bc23a063020bea..657594dae8c513 100644 --- a/cmake/toolchains/linux-aarch64.cmake +++ b/cmake/toolchains/linux-aarch64.cmake @@ -1,5 +1,6 @@ set(CMAKE_SYSTEM_NAME Linux) set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(ABI gnu) set(CMAKE_C_COMPILER_WORKS ON) set(CMAKE_CXX_COMPILER_WORKS ON) \ No newline at end of file diff --git a/cmake/toolchains/linux-x64-baseline.cmake b/cmake/toolchains/linux-x64-baseline.cmake index f521cfcc4afc14..73d6bc61e4946e 100644 --- a/cmake/toolchains/linux-x64-baseline.cmake +++ b/cmake/toolchains/linux-x64-baseline.cmake @@ -1,6 +1,7 @@ set(CMAKE_SYSTEM_NAME Linux) set(CMAKE_SYSTEM_PROCESSOR x64) set(ENABLE_BASELINE ON) +set(ABI gnu) set(CMAKE_C_COMPILER_WORKS ON) set(CMAKE_CXX_COMPILER_WORKS ON) \ No newline at end of file diff --git a/cmake/toolchains/linux-x64-musl-baseline.cmake b/cmake/toolchains/linux-x64-musl-baseline.cmake new file mode 100644 index 00000000000000..ea28a1757ac8d0 --- /dev/null +++ b/cmake/toolchains/linux-x64-musl-baseline.cmake @@ -0,0 +1,7 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR x64) +set(ENABLE_BASELINE ON) +set(ABI musl) + +set(CMAKE_C_COMPILER_WORKS ON) +set(CMAKE_CXX_COMPILER_WORKS ON) \ No newline at end of file diff --git a/cmake/toolchains/linux-x64-musl.cmake b/cmake/toolchains/linux-x64-musl.cmake new file mode 100644 index 00000000000000..db4998bba9d510 --- /dev/null +++ b/cmake/toolchains/linux-x64-musl.cmake @@ -0,0 +1,6 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR x64) +set(ABI musl) + +set(CMAKE_C_COMPILER_WORKS ON) +set(CMAKE_CXX_COMPILER_WORKS ON) diff --git a/cmake/toolchains/linux-x64.cmake b/cmake/toolchains/linux-x64.cmake index 66bc7a592fd0dd..4104a1c5df7396 100644 --- a/cmake/toolchains/linux-x64.cmake +++ b/cmake/toolchains/linux-x64.cmake @@ -1,5 +1,6 @@ set(CMAKE_SYSTEM_NAME Linux) set(CMAKE_SYSTEM_PROCESSOR x64) +set(ABI gnu) set(CMAKE_C_COMPILER_WORKS ON) set(CMAKE_CXX_COMPILER_WORKS ON) diff --git a/cmake/tools/SetupGit.cmake b/cmake/tools/SetupGit.cmake index 8e0f87c3120ac5..769735b7b0946c 100644 --- a/cmake/tools/SetupGit.cmake +++ b/cmake/tools/SetupGit.cmake @@ -29,7 +29,7 @@ execute_process( ) if(NOT GIT_DIFF_RESULT EQUAL 0) - message(${WARNING} "Command failed: ${GIT_DIFF_COMMAND} ${GIT_DIFF_ERROR}") + message(WARNING "Command failed: ${GIT_DIFF_COMMAND} ${GIT_DIFF_ERROR}") return() endif() diff --git a/cmake/tools/SetupLLVM.cmake b/cmake/tools/SetupLLVM.cmake index a7046d996f4791..5e5fd3a9532a71 100644 --- a/cmake/tools/SetupLLVM.cmake +++ b/cmake/tools/SetupLLVM.cmake @@ -4,7 +4,7 @@ if(NOT ENABLE_LLVM) return() endif() -if(CMAKE_HOST_WIN32 OR CMAKE_HOST_APPLE OR IS_MUSL) +if(CMAKE_HOST_WIN32 OR CMAKE_HOST_APPLE OR ABI STREQUAL "musl") set(DEFAULT_LLVM_VERSION "18.1.8") else() set(DEFAULT_LLVM_VERSION "16.0.6") @@ -52,6 +52,7 @@ if(UNIX) /usr/lib/llvm-${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}/bin /usr/lib/llvm-${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}/bin /usr/lib/llvm-${LLVM_VERSION_MAJOR}/bin + /usr/lib/llvm${LLVM_VERSION_MAJOR}/bin ) endif() endif() @@ -108,8 +109,23 @@ else() find_llvm_command(CMAKE_CXX_COMPILER clang++) find_llvm_command(CMAKE_LINKER llvm-link) find_llvm_command(CMAKE_AR llvm-ar) - find_llvm_command(CMAKE_STRIP llvm-strip) + if (LINUX) + # On Linux, strip ends up being more useful for us. + find_command( + VARIABLE + CMAKE_STRIP + COMMAND + strip + REQUIRED + ON + ) + else() + find_llvm_command(CMAKE_STRIP llvm-strip) + endif() find_llvm_command(CMAKE_RANLIB llvm-ranlib) + if(LINUX) + find_llvm_command(LLD_PROGRAM ld.lld) + endif() if(APPLE) find_llvm_command(CMAKE_DSYMUTIL dsymutil) endif() diff --git a/cmake/tools/SetupWebKit.cmake b/cmake/tools/SetupWebKit.cmake index 677c8c0ad392b3..2cdea17edc99b7 100644 --- a/cmake/tools/SetupWebKit.cmake +++ b/cmake/tools/SetupWebKit.cmake @@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use") option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading") if(NOT WEBKIT_VERSION) - set(WEBKIT_VERSION 73b551e25d97e463e8e2c86cb819b8639fcbda06) + set(WEBKIT_VERSION 3bc4abf2d5875baf500b4687ef869987f6d19e00) endif() if(WEBKIT_LOCAL) @@ -63,7 +63,7 @@ else() message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}") endif() -if(IS_MUSL) +if(ABI STREQUAL "musl") set(WEBKIT_SUFFIX "-musl") endif() diff --git a/cmake/tools/SetupZig.cmake b/cmake/tools/SetupZig.cmake index d34c4b53ff22aa..e5a5e574ef99aa 100644 --- a/cmake/tools/SetupZig.cmake +++ b/cmake/tools/SetupZig.cmake @@ -11,7 +11,7 @@ if(APPLE) elseif(WIN32) set(DEFAULT_ZIG_TARGET ${DEFAULT_ZIG_ARCH}-windows-msvc) elseif(LINUX) - if(IS_MUSL) + if(ABI STREQUAL "musl") set(DEFAULT_ZIG_TARGET ${DEFAULT_ZIG_ARCH}-linux-musl) else() set(DEFAULT_ZIG_TARGET ${DEFAULT_ZIG_ARCH}-linux-gnu) diff --git a/dockerhub/alpine/Dockerfile b/dockerhub/alpine/Dockerfile index e2bbba7aa42a3d..0ef8ce5f6ed86f 100644 --- a/dockerhub/alpine/Dockerfile +++ b/dockerhub/alpine/Dockerfile @@ -1,30 +1,13 @@ -FROM alpine:3.18 AS build +FROM alpine:3.20 AS build # https://github.com/oven-sh/bun/releases ARG BUN_VERSION=latest -# TODO: Instead of downloading glibc from a third-party source, we should -# build it from source. This is a temporary solution. -# See: https://github.com/sgerrand/alpine-pkg-glibc - -# https://github.com/sgerrand/alpine-pkg-glibc/releases -# https://github.com/sgerrand/alpine-pkg-glibc/issues/176 -ARG GLIBC_VERSION=2.34-r0 - -# https://github.com/oven-sh/bun/issues/5545#issuecomment-1722461083 -ARG GLIBC_VERSION_AARCH64=2.26-r1 - -RUN apk --no-cache add \ - ca-certificates \ - curl \ - dirmngr \ - gpg \ - gpg-agent \ - unzip \ +RUN apk --no-cache add ca-certificates curl dirmngr gpg gpg-agent unzip \ && arch="$(apk --print-arch)" \ && case "${arch##*-}" in \ - x86_64) build="x64-baseline";; \ - aarch64) build="aarch64";; \ + x86_64) build="x64-musl-baseline";; \ + aarch64) build="aarch64-musl";; \ *) echo "error: unsupported architecture: $arch"; exit 1 ;; \ esac \ && version="$BUN_VERSION" \ @@ -59,37 +42,9 @@ RUN apk --no-cache add \ && unzip "bun-linux-$build.zip" \ && mv "bun-linux-$build/bun" /usr/local/bin/bun \ && rm -f "bun-linux-$build.zip" SHASUMS256.txt.asc SHASUMS256.txt \ - && chmod +x /usr/local/bin/bun \ - && cd /tmp \ - && case "${arch##*-}" in \ - x86_64) curl "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-${GLIBC_VERSION}.apk" \ - -fsSLO \ - --compressed \ - --retry 5 \ - || (echo "error: failed to download: glibc v${GLIBC_VERSION}" && exit 1) \ - && mv "glibc-${GLIBC_VERSION}.apk" glibc.apk \ - && curl "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-bin-${GLIBC_VERSION}.apk" \ - -fsSLO \ - --compressed \ - --retry 5 \ - || (echo "error: failed to download: glibc-bin v${GLIBC_VERSION}" && exit 1) \ - && mv "glibc-bin-${GLIBC_VERSION}.apk" glibc-bin.apk ;; \ - aarch64) curl "https://raw.githubusercontent.com/squishyu/alpine-pkg-glibc-aarch64-bin/master/glibc-${GLIBC_VERSION_AARCH64}.apk" \ - -fsSLO \ - --compressed \ - --retry 5 \ - || (echo "error: failed to download: glibc v${GLIBC_VERSION_AARCH64}" && exit 1) \ - && mv "glibc-${GLIBC_VERSION_AARCH64}.apk" glibc.apk \ - && curl "https://raw.githubusercontent.com/squishyu/alpine-pkg-glibc-aarch64-bin/master/glibc-bin-${GLIBC_VERSION_AARCH64}.apk" \ - -fsSLO \ - --compressed \ - --retry 5 \ - || (echo "error: failed to download: glibc-bin v${GLIBC_VERSION_AARCH64}" && exit 1) \ - && mv "glibc-bin-${GLIBC_VERSION_AARCH64}.apk" glibc-bin.apk ;; \ - *) echo "error: unsupported architecture '$arch'"; exit 1 ;; \ - esac + && chmod +x /usr/local/bin/bun -FROM alpine:3.18 +FROM alpine:3.20 # Disable the runtime transpiler cache by default inside Docker containers. # On ephemeral containers, the cache is not useful @@ -107,10 +62,8 @@ COPY docker-entrypoint.sh /usr/local/bin/ RUN --mount=type=bind,from=build,source=/tmp,target=/tmp \ addgroup -g 1000 bun \ && adduser -u 1000 -G bun -s /bin/sh -D bun \ - && apk --no-cache --force-overwrite --allow-untrusted add \ - /tmp/glibc.apk \ - /tmp/glibc-bin.apk \ && ln -s /usr/local/bin/bun /usr/local/bin/bunx \ + && apk add libgcc libstdc++ \ && which bun \ && which bunx \ && bun --version diff --git a/docs/cli/test.md b/docs/cli/test.md index 3a9ec639a5d930..8ff796f5b02d05 100644 --- a/docs/cli/test.md +++ b/docs/cli/test.md @@ -55,6 +55,49 @@ $ bun test ./test/specific-file.test.ts The test runner runs all tests in a single process. It loads all `--preload` scripts (see [Lifecycle](https://bun.sh/docs/test/lifecycle) for details), then runs all tests. If a test fails, the test runner will exit with a non-zero exit code. +## CI/CD integration + +`bun test` supports a variety of CI/CD integrations. + +### GitHub Actions + +`bun test` automatically detects if it's running inside GitHub Actions and will emit GitHub Actions annotations to the console directly. + +No configuration is needed, other than installing `bun` in the workflow and running `bun test`. + +#### How to install `bun` in a GitHub Actions workflow + +To use `bun test` in a GitHub Actions workflow, add the following step: + +```yaml +jobs: + build: + name: build-app + runs-on: ubuntu-latest + steps: + - name: Install bun + uses: oven-sh/setup-bun + - name: Install dependencies # (assuming your project has dependencies) + run: bun install # You can use npm/yarn/pnpm instead if you prefer + - name: Run tests + run: bun test +``` + +From there, you'll get GitHub Actions annotations. + +### JUnit XML reports (GitLab, etc.) + +To use `bun test` with a JUnit XML reporter, you can use the `--reporter=junit` in combination with `--reporter-outfile`. + +```sh +$ bun test --reporter=junit --reporter-outfile=./bun.xml +``` + +This will continue to output to stdout/stderr as usual, and also write a JUnit +XML report to the given path at the very end of the test run. + +JUnit XML is a popular format for reporting test results in CI/CD pipelines. + ## Timeouts Use the `--timeout` flag to specify a _per-test_ timeout in milliseconds. If a test times out, it will be marked as failed. The default value is `5000`. diff --git a/docs/dev/bundev.md b/docs/dev/bundev.md deleted file mode 100644 index baccf7658ab27f..00000000000000 --- a/docs/dev/bundev.md +++ /dev/null @@ -1,11 +0,0 @@ -- pages -- auto-bundle dependencies -- pages is function that returns a list of pages? -- plugins for svelte and vue -- custom loaders -- HMR -- server endpoints - -```ts -Bun.serve({}); -``` diff --git a/docs/dev/cra.md b/docs/dev/cra.md deleted file mode 100644 index 8eb86871503342..00000000000000 --- a/docs/dev/cra.md +++ /dev/null @@ -1,31 +0,0 @@ -To create a new React app: - -```bash -$ bun create react ./app -$ cd app -$ bun dev # start dev server -``` - -To use an existing React app: - -```bash -$ bun add -d react-refresh # install React Fast Refresh -$ bun bun ./src/index.js # generate a bundle for your entry point(s) -$ bun dev # start the dev server -``` - -From there, Bun relies on the filesystem for mapping dev server paths to source files. All URL paths are relative to the project root (where `package.json` is located). - -Here are examples of routing source code file paths: - -| Dev Server URL | File Path (relative to cwd) | -| -------------------------- | --------------------------- | -| /src/components/Button.tsx | src/components/Button.tsx | -| /src/index.tsx | src/index.tsx | -| /pages/index.js | pages/index.js | - -You do not need to include file extensions in `import` paths. CommonJS-style import paths without the file extension work. - -You can override the public directory by passing `--public-dir="path-to-folder"`. - -If no directory is specified and `./public/` doesn’t exist, Bun will try `./static/`. If `./static/` does not exist, but won’t serve from a public directory. If you pass `--public-dir=./` Bun will serve from the current directory, but it will check the current directory last instead of first. diff --git a/docs/dev/css.md b/docs/dev/css.md deleted file mode 100644 index 53ebc6c0666f77..00000000000000 --- a/docs/dev/css.md +++ /dev/null @@ -1,77 +0,0 @@ -## With `bun dev` - -When importing CSS in JavaScript-like loaders, CSS is treated special. - -By default, Bun will transform a statement like this: - -```js -import "../styles/global.css"; -``` - -### When `platform` is `browser` - -```js -globalThis.document?.dispatchEvent( - new CustomEvent("onimportcss", { - detail: "http://localhost:3000/styles/globals.css", - }), -); -``` - -An event handler for turning that into a `` is automatically registered when HMR is enabled. That event handler can be turned off either in a framework’s `package.json` or by setting `globalThis["Bun_disableCSSImports"] = true;` in client-side code. Additionally, you can get a list of every .css file imported this way via `globalThis["__BUN"].allImportedStyles`. - -### When `platform` is `bun` - -```js -//@import url("http://localhost:3000/styles/globals.css"); -``` - -Additionally, Bun exposes an API for SSR/SSG that returns a flat list of URLs to css files imported. That function is `Bun.getImportedStyles()`. - -```ts -// This specifically is for "framework" in package.json when loaded via `bun dev` -// This API needs to be changed somewhat to work more generally with Bun.js -// Initially, you could only use Bun.js through `bun dev` -// and this API was created at that time -addEventListener("fetch", async (event: FetchEvent) => { - let route = Bun.match(event); - const App = await import("pages/_app"); - - // This returns all .css files that were imported in the line above. - // It’s recursive, so any file that imports a CSS file will be included. - const appStylesheets = bun.getImportedStyles(); - - // ...rest of code -}); -``` - -This is useful for preventing flash of unstyled content. - -## With `bun bun` - -Bun bundles `.css` files imported via `@import` into a single file. It doesn’t auto-prefix or minify CSS today. Multiple `.css` files imported in one JavaScript file will _not_ be bundled into one file. You’ll have to import those from a `.css` file. - -This input: - -```css -@import url("./hi.css"); -@import url("./hello.css"); -@import url("./yo.css"); -``` - -Becomes: - -```css -/* hi.css */ -/* ...contents of hi.css */ -/* hello.css */ -/* ...contents of hello.css */ -/* yo.css */ -/* ...contents of yo.css */ -``` - -## CSS runtime - -To support hot CSS reloading, Bun inserts `@supports` annotations into CSS that tag which files a stylesheet is composed of. Browsers ignore this, so it doesn’t impact styles. - -By default, Bun’s runtime code automatically listens to `onimportcss` and will insert the `event.detail` into a `` if there is no existing `link` tag with that stylesheet. That’s how Bun’s equivalent of `style-loader` works. diff --git a/docs/dev/discord.md b/docs/dev/discord.md deleted file mode 100644 index d3e9c5a2b7ab18..00000000000000 --- a/docs/dev/discord.md +++ /dev/null @@ -1,26 +0,0 @@ -## Creating a Discord bot with Bun - -Discord bots perform actions in response to _application commands_. There are 3 types of commands accessible in different interfaces: the chat input, a message's context menu (top-right menu or right-clicking in a message), and a user's context menu (right-clicking on a user). - -To get started you can use the interactions template: - -```bash -bun create discord-interactions my-interactions-bot -cd my-interactions-bot -``` - -If you don't have a Discord bot/application yet, you can create one [here (https://discord.com/developers/applications/me)](https://discord.com/developers/applications/me). - -Invite bot to your server by visiting `https://discord.com/api/oauth2/authorize?client_id=&scope=bot%20applications.commands` - -Afterwards you will need to get your bot's token, public key, and application id from the application page and put them into `.env.example` file - -Then you can run the http server that will handle your interactions: - -```bash -$ bun install -$ mv .env.example .env -$ bun run.js # listening on port 1337 -``` - -Discord does not accept an insecure HTTP server, so you will need to provide an SSL certificate or put the interactions server behind a secure reverse proxy. For development, you can use ngrok/cloudflare tunnel to expose local ports as secure URL. diff --git a/docs/guides/http/cluster.md b/docs/guides/http/cluster.md index 7d5e8b992d0c07..c434337d79f702 100644 --- a/docs/guides/http/cluster.md +++ b/docs/guides/http/cluster.md @@ -63,4 +63,4 @@ process.on("exit", kill); --- -At the time of writing, Bun hasn't implemented the `node:cluster` module yet, but this is a faster, simple, and limited alternative. We will also implement `node:cluster` in the future. +Bun has also implemented the `node:cluster` module, but this is a faster, simple, and limited alternative. diff --git a/docs/guides/test/todo-tests.md b/docs/guides/test/todo-tests.md index 577843206c6bad..da9bd9e70c7897 100644 --- a/docs/guides/test/todo-tests.md +++ b/docs/guides/test/todo-tests.md @@ -44,10 +44,17 @@ test.todo("unimplemented feature", () => { --- -If an implementation is provided, it will be executed and _expected to fail_ by test runner! If a todo test passes, the `bun test` run will return a non-zero exit code to signal the failure. +If an implementation is provided, it will not be run unless the `--todo` flag is passed. If the `--todo` flag is passed, the test will be executed and _expected to fail_ by test runner! If a todo test passes, the `bun test` run will return a non-zero exit code to signal the failure. ```sh -$ bun test +$ bun test --todo +my.test.ts: +✗ unimplemented feature + ^ this test is marked as todo but passes. Remove `.todo` or check that test is correct. + + 0 pass + 1 fail + 1 expect() calls $ echo $? 1 # this is the exit code of the previous command ``` diff --git a/docs/project/building-windows.md b/docs/project/building-windows.md index 041207c9ff25cd..8831bd5c448433 100644 --- a/docs/project/building-windows.md +++ b/docs/project/building-windows.md @@ -73,15 +73,10 @@ After Visual Studio, you need the following: **Note** – The Zig compiler is automatically downloaded, installed, and updated by the building process. {% /callout %} -[WinGet](https://learn.microsoft.com/windows/package-manager/winget) or [Scoop](https://scoop.sh) can be used to install these remaining tools easily: +[Scoop](https://scoop.sh) can be used to install these remaining tools easily. {% codetabs group="a" %} -```ps1#WinGet -## Select "Add LLVM to the system PATH for all users" in the LLVM installer -> winget install -i LLVM.LLVM -v 18.1.8 && winget install GoLang.Go Rustlang.Rustup NASM.NASM StrawberryPerl.StrawberryPerl RubyInstallerTeam.Ruby.3.2 OpenJS.NodeJS.LTS Ccache.Ccache -``` - ```ps1#Scoop > irm https://get.scoop.sh | iex > scoop install nodejs-lts go rust nasm ruby perl ccache @@ -91,20 +86,16 @@ After Visual Studio, you need the following: {% /codetabs %} -If you intend on building WebKit locally (optional), you should install these packages: - -{% codetabs group="a" %} +{% callout %} +Please do not use WinGet/other package manager for these, as you will likely install Strawberry Perl instead of a more minimal installation of Perl. Strawberry Perl includes many other utilities that get installed into `$Env:PATH` that will conflict with MSVC and break the build. +{% /callout %} -```ps1#WinGet -> winget install ezwinports.make Cygwin.Cygwin Python.Python.3.12 -``` +If you intend on building WebKit locally (optional), you should install these packages: ```ps1#Scoop > scoop install make cygwin python ``` -{% /codetabs %} - From here on out, it is **expected you use a PowerShell Terminal with `.\scripts\vs-shell.ps1` sourced**. This script is available in the Bun repository and can be loaded by executing it: ```ps1 diff --git a/docs/rfcs/README.md b/docs/rfcs/README.md deleted file mode 100644 index 65ef33ead849a5..00000000000000 --- a/docs/rfcs/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# RFCs - -| Number | Name | Issue | -| ------ | ---- | ----- | diff --git a/docs/test/writing.md b/docs/test/writing.md index ab7e99c8832dce..ef1ab57de00b8a 100644 --- a/docs/test/writing.md +++ b/docs/test/writing.md @@ -97,7 +97,7 @@ test.skip("wat", () => { ## `test.todo` -Mark a test as a todo with `test.todo`. These tests _will_ be run, and the test runner will expect them to fail. If they pass, you will be prompted to mark it as a regular test. +Mark a test as a todo with `test.todo`. These tests will not be run. ```ts import { expect, test } from "bun:test"; @@ -107,12 +107,22 @@ test.todo("fix this", () => { }); ``` -To exclusively run tests marked as _todo_, use `bun test --todo`. +To run todo tests and find any which are passing, use `bun test --todo`. ```sh $ bun test --todo +my.test.ts: +✗ unimplemented feature + ^ this test is marked as todo but passes. Remove `.todo` or check that test is correct. + + 0 pass + 1 fail + 1 expect() calls ``` +With this flag, failing todo tests will not cause an error, but todo tests which pass will be marked as failing so you can remove the todo mark or +fix the test. + ## `test.only` To run a particular test or suite of tests use `test.only()` or `describe.only()`. Once declared, running `bun test --only` will only execute tests/suites that have been marked with `.only()`. Running `bun test` without the `--only` option with `test.only()` declared will result in all tests in the given suite being executed _up to_ the test with `.only()`. `describe.only()` functions the same in both execution scenarios. diff --git a/package.json b/package.json index c8f3afe1543d05..51e442673031d4 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "private": true, "name": "bun", - "version": "1.1.35", + "version": "1.1.37", "workspaces": [ "./packages/bun-types" ], diff --git a/packages/bun-types/test.d.ts b/packages/bun-types/test.d.ts index dd76ecc98a0c12..6ef3a6040380f1 100644 --- a/packages/bun-types/test.d.ts +++ b/packages/bun-types/test.d.ts @@ -387,9 +387,9 @@ declare module "bun:test" { /** * Marks this test as to be written or to be fixed. * - * When a test function is passed, it will be marked as `todo` in the test results - * as long the test does not pass. When the test passes, the test will be marked as - * `fail` in the results; you will have to remove the `.todo` or check that your test + * These tests will not be executed unless the `--todo` flag is passed. With the flag, + * if the test passes, the test will be marked as `fail` in the results; you will have to + * remove the `.todo` or check that your test * is implemented correctly. * * @param label the label for the test diff --git a/packages/bun-usockets/src/crypto/openssl.c b/packages/bun-usockets/src/crypto/openssl.c index 5880fa35cc8b0e..4c4c2a76d57d6c 100644 --- a/packages/bun-usockets/src/crypto/openssl.c +++ b/packages/bun-usockets/src/crypto/openssl.c @@ -201,7 +201,7 @@ struct loop_ssl_data * us_internal_set_loop_ssl_data(struct us_internal_ssl_sock struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s, int is_client, char *ip, - int ip_length) { + int ip_length, const char* sni) { struct us_internal_ssl_socket_context_t *context = (struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s); @@ -231,6 +231,10 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s, if (is_client) { SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit); SSL_set_connect_state(s->ssl); + + if (sni) { + SSL_set_tlsext_host_name(s->ssl, sni); + } } else { SSL_set_accept_state(s->ssl); // we do not allow renegotiation on the server side (should be the default for BoringSSL, but we set to make openssl compatible) @@ -1603,6 +1607,10 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_connect_unix( socket_ext_size); } +static void ssl_on_open_without_sni(struct us_internal_ssl_socket_t *s, int is_client, char *ip, int ip_length) { + ssl_on_open(s, is_client, ip, ip_length, NULL); +} + void us_internal_ssl_socket_context_on_open( struct us_internal_ssl_socket_context_t *context, struct us_internal_ssl_socket_t *(*on_open)( @@ -1611,7 +1619,7 @@ void us_internal_ssl_socket_context_on_open( us_socket_context_on_open( 0, &context->sc, (struct us_socket_t * (*)(struct us_socket_t *, int, char *, int)) - ssl_on_open); + ssl_on_open_without_sni); context->on_open = on_open; } @@ -2005,7 +2013,30 @@ us_internal_ssl_socket_open(struct us_internal_ssl_socket_t *s, int is_client, return s; // start SSL open - return ssl_on_open(s, is_client, ip, ip_length); + return ssl_on_open(s, is_client, ip, ip_length, NULL); +} + +struct us_socket_t *us_socket_upgrade_to_tls(us_socket_r s, us_socket_context_r new_context, const char *sni) { + // Resize to tls + ext size + void** prev_ext_ptr = (void**)us_socket_ext(0, s); + void* prev_ext = *prev_ext_ptr; + struct us_internal_ssl_socket_t *socket = + (struct us_internal_ssl_socket_t *)us_socket_context_adopt_socket( + 0, new_context, s, + (sizeof(struct us_internal_ssl_socket_t) - sizeof(struct us_socket_t)) + sizeof(void*)); + socket->ssl = NULL; + socket->ssl_write_wants_read = 0; + socket->ssl_read_wants_write = 0; + socket->fatal_error = 0; + socket->handshake_state = HANDSHAKE_PENDING; + + void** new_ext_ptr = (void**)us_socket_ext(1, (struct us_socket_t *)socket); + *new_ext_ptr = prev_ext; + + ssl_on_open(socket, 1, NULL, 0, sni); + + + return (struct us_socket_t *)socket; } struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls( diff --git a/packages/bun-usockets/src/crypto/root_certs.cpp b/packages/bun-usockets/src/crypto/root_certs.cpp index 003685714cfb3d..f675a0ab16e80e 100644 --- a/packages/bun-usockets/src/crypto/root_certs.cpp +++ b/packages/bun-usockets/src/crypto/root_certs.cpp @@ -7,12 +7,6 @@ #include #include static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]); -static X509 *root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = { - NULL}; -static X509 *root_extra_cert_instances = {NULL}; - -static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT; -static std::atomic_bool root_cert_instances_initialized = 0; // This callback is used to avoid the default passphrase callback in OpenSSL // which will typically prompt for the passphrase. The prompting is designed @@ -78,7 +72,9 @@ us_ssl_ctx_get_X509_without_callback_from_file(const char *filename) { return NULL; } -static void us_internal_init_root_certs() { +static void us_internal_init_root_certs(X509 *root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])], X509 *&root_extra_cert_instances) { + static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT; + static std::atomic_bool root_cert_instances_initialized = 0; if (std::atomic_load(&root_cert_instances_initialized) == 1) return; @@ -123,7 +119,11 @@ extern "C" X509_STORE *us_get_default_ca_store() { return NULL; } - us_internal_init_root_certs(); + static X509 *root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = { + NULL}; + static X509 *root_extra_cert_instances = NULL; + + us_internal_init_root_certs(root_cert_instances, root_extra_cert_instances); // load all root_cert_instances on the default ca store for (size_t i = 0; i < root_certs_size; i++) { diff --git a/packages/bun-usockets/src/libusockets.h b/packages/bun-usockets/src/libusockets.h index d2719af2c90e7c..c32768fc2d4fdf 100644 --- a/packages/bun-usockets/src/libusockets.h +++ b/packages/bun-usockets/src/libusockets.h @@ -190,7 +190,7 @@ struct us_socket_context_options_t { }; struct us_bun_verify_error_t { - long error; + int error; const char* code; const char* reason; }; @@ -338,6 +338,8 @@ struct us_loop_t *us_socket_context_loop(int ssl, us_socket_context_r context) n * Used mainly for "socket upgrades" such as when transitioning from HTTP to WebSocket. */ struct us_socket_t *us_socket_context_adopt_socket(int ssl, us_socket_context_r context, us_socket_r s, int ext_size); +struct us_socket_t *us_socket_upgrade_to_tls(us_socket_r s, us_socket_context_r new_context, const char *sni); + /* Create a child socket context which acts much like its own socket context with its own callbacks yet still relies on the * parent socket context for some shared resources. Child socket contexts should be used together with socket adoptions and nothing else. */ struct us_socket_context_t *us_create_child_socket_context(int ssl, us_socket_context_r context, int context_ext_size); diff --git a/packages/bun-uws/src/HttpParser.h b/packages/bun-uws/src/HttpParser.h index 457b6655724110..3eb88e353e4b0f 100644 --- a/packages/bun-uws/src/HttpParser.h +++ b/packages/bun-uws/src/HttpParser.h @@ -613,7 +613,9 @@ namespace uWS * ought to be handled as an error. */ std::string_view transferEncodingString = req->getHeader("transfer-encoding"); std::string_view contentLengthString = req->getHeader("content-length"); - if (transferEncodingString.length() && contentLengthString.length()) { + auto transferEncodingStringLen = transferEncodingString.length(); + auto contentLengthStringLen = contentLengthString.length(); + if (transferEncodingStringLen && contentLengthStringLen) { /* Returning fullptr is the same as calling the errorHandler */ /* We could be smart and set an error in the context along with this, to indicate what * http error response we might want to return */ @@ -623,6 +625,15 @@ namespace uWS /* Parse query */ const char *querySeparatorPtr = (const char *) memchr(req->headers->value.data(), '?', req->headers->value.length()); req->querySeparator = (unsigned int) ((querySeparatorPtr ? querySeparatorPtr : req->headers->value.data() + req->headers->value.length()) - req->headers->value.data()); + + // lets check if content len is valid before calling requestHandler + if(contentLengthStringLen) { + remainingStreamingBytes = toUnsignedInteger(contentLengthString); + if (remainingStreamingBytes == UINT64_MAX) { + /* Parser error */ + return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR}; + } + } /* If returned socket is not what we put in we need * to break here as we either have upgraded to @@ -642,7 +653,7 @@ namespace uWS /* RFC 9112 6.3 * If a message is received with both a Transfer-Encoding and a Content-Length header field, * the Transfer-Encoding overrides the Content-Length. */ - if (transferEncodingString.length()) { + if (transferEncodingStringLen) { /* If a proxy sent us the transfer-encoding header that 100% means it must be chunked or else the proxy is * not RFC 9112 compliant. Therefore it is always better to assume this is the case, since that entirely eliminates @@ -665,6 +676,7 @@ namespace uWS dataHandler(user, chunk, chunk.length() == 0); } if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) { + // TODO: what happen if we already responded? return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR}; } unsigned int consumed = (length - (unsigned int) dataToConsume.length()); @@ -672,13 +684,8 @@ namespace uWS length = (unsigned int) dataToConsume.length(); consumedTotal += consumed; } - } else if (contentLengthString.length()) { - remainingStreamingBytes = toUnsignedInteger(contentLengthString); - if (remainingStreamingBytes == UINT64_MAX) { - /* Parser error */ - return {HTTP_ERROR_400_BAD_REQUEST, FULLPTR}; - } - + } else if (contentLengthStringLen) { + if (!CONSUME_MINIMALLY) { unsigned int emittable = (unsigned int) std::min(remainingStreamingBytes, length); dataHandler(user, std::string_view(data, emittable), emittable == remainingStreamingBytes); diff --git a/scripts/agent.mjs b/scripts/agent.mjs new file mode 100755 index 00000000000000..84af695374742b --- /dev/null +++ b/scripts/agent.mjs @@ -0,0 +1,246 @@ +#!/usr/bin/env node + +// An agent that starts buildkite-agent and runs others services. + +import { join } from "node:path"; +import { realpathSync } from "node:fs"; +import { + isWindows, + getOs, + getArch, + getKernel, + getAbi, + getAbiVersion, + getDistro, + getDistroVersion, + getHostname, + getCloud, + getCloudMetadataTag, + which, + getEnv, + writeFile, + spawnSafe, +} from "./utils.mjs"; +import { parseArgs } from "node:util"; + +/** + * @param {"install" | "start"} action + */ +async function doBuildkiteAgent(action) { + const username = "buildkite-agent"; + const command = which("buildkite-agent", { required: true }); + + let homePath, cachePath, logsPath, agentLogPath, pidPath; + if (isWindows) { + homePath = "C:\\buildkite-agent"; + cachePath = join(homePath, "cache"); + logsPath = join(homePath, "logs"); + agentLogPath = join(logsPath, "buildkite-agent.log"); + } else { + homePath = "/var/lib/buildkite-agent"; + cachePath = "/var/cache/buildkite-agent"; + logsPath = "/var/log/buildkite-agent"; + agentLogPath = join(logsPath, "buildkite-agent.log"); + pidPath = join(logsPath, "buildkite-agent.pid"); + } + + async function install() { + const command = process.execPath; + const args = [realpathSync(process.argv[1]), "start"]; + + if (isWindows) { + const serviceCommand = [ + "New-Service", + "-Name", + "buildkite-agent", + "-StartupType", + "Automatic", + "-BinaryPathName", + `${escape(command)} ${escape(args.map(escape).join(" "))}`, + ]; + await spawnSafe(["powershell", "-Command", serviceCommand.join(" ")], { stdio: "inherit" }); + } + + if (isOpenRc()) { + const servicePath = "/etc/init.d/buildkite-agent"; + const service = `#!/sbin/openrc-run + name="buildkite-agent" + description="Buildkite Agent" + command=${escape(command)} + command_args=${escape(args.map(escape).join(" "))} + command_user=${escape(username)} + + pidfile=${escape(pidPath)} + start_stop_daemon_args=" \ + --background \ + --make-pidfile \ + --stdout ${escape(agentLogPath)} \ + --stderr ${escape(agentLogPath)}" + + depend() { + need net + use dns logger + } + `; + writeFile(servicePath, service, { mode: 0o755 }); + await spawnSafe(["rc-update", "add", "buildkite-agent", "default"], { stdio: "inherit", privileged: true }); + } + + if (isSystemd()) { + const servicePath = "/etc/systemd/system/buildkite-agent.service"; + const service = ` + [Unit] + Description=Buildkite Agent + After=syslog.target + After=network-online.target + + [Service] + Type=simple + User=${username} + ExecStart=${escape(command)} ${args.map(escape).join(" ")} + RestartSec=5 + Restart=on-failure + KillMode=process + + [Journal] + Storage=persistent + StateDirectory=${escape(agentLogPath)} + + [Install] + WantedBy=multi-user.target + `; + writeFile(servicePath, service); + await spawnSafe(["systemctl", "daemon-reload"], { stdio: "inherit", privileged: true }); + await spawnSafe(["systemctl", "enable", "buildkite-agent"], { stdio: "inherit", privileged: true }); + } + } + + async function start() { + const cloud = await getCloud(); + + let token = getEnv("BUILDKITE_AGENT_TOKEN", false); + if (!token && cloud) { + token = await getCloudMetadataTag("buildkite:token"); + } + + let shell; + if (isWindows) { + const pwsh = which(["pwsh", "powershell"], { required: true }); + shell = `${pwsh} -Command`; + } else { + const sh = which(["bash", "sh"], { required: true }); + shell = `${sh} -c`; + } + + const flags = ["enable-job-log-tmpfile", "no-feature-reporting"]; + const options = { + "name": getHostname(), + "token": token || "xxx", + "shell": shell, + "job-log-path": logsPath, + "build-path": join(homePath, "builds"), + "hooks-path": join(homePath, "hooks"), + "plugins-path": join(homePath, "plugins"), + "experiment": "normalised-upload-paths,resolve-commit-after-checkout,agent-api", + }; + + let ephemeral; + if (cloud) { + const jobId = await getCloudMetadataTag("buildkite:job-uuid"); + if (jobId) { + options["acquire-job"] = jobId; + flags.push("disconnect-after-job"); + ephemeral = true; + } + } + + if (ephemeral) { + options["git-clone-flags"] = "-v --depth=1"; + options["git-fetch-flags"] = "-v --prune --depth=1"; + } else { + options["git-mirrors-path"] = join(cachePath, "git"); + } + + const tags = { + "os": getOs(), + "arch": getArch(), + "kernel": getKernel(), + "abi": getAbi(), + "abi-version": getAbiVersion(), + "distro": getDistro(), + "distro-version": getDistroVersion(), + "cloud": cloud, + }; + + if (cloud) { + const requiredTags = ["robobun", "robobun2"]; + for (const tag of requiredTags) { + const value = await getCloudMetadataTag(tag); + if (typeof value === "string") { + tags[tag] = value; + } + } + } + + options["tags"] = Object.entries(tags) + .filter(([, value]) => value) + .map(([key, value]) => `${key}=${value}`) + .join(","); + + await spawnSafe( + [ + command, + "start", + ...flags.map(flag => `--${flag}`), + ...Object.entries(options).map(([key, value]) => `--${key}=${value}`), + ], + { + stdio: "inherit", + }, + ); + } + + if (action === "install") { + await install(); + } else if (action === "start") { + await start(); + } +} + +/** + * @returns {boolean} + */ +function isSystemd() { + return !!which("systemctl"); +} + +/** + * @returns {boolean} + */ +function isOpenRc() { + return !!which("rc-service"); +} + +function escape(string) { + return JSON.stringify(string); +} + +async function main() { + const { positionals: args } = parseArgs({ + allowPositionals: true, + }); + + if (!args.length || args.includes("install")) { + console.log("Installing agent..."); + await doBuildkiteAgent("install"); + console.log("Agent installed."); + } + + if (args.includes("start")) { + console.log("Starting agent..."); + await doBuildkiteAgent("start"); + console.log("Agent started."); + } +} + +await main(); diff --git a/scripts/bootstrap.ps1 b/scripts/bootstrap.ps1 new file mode 100755 index 00000000000000..eda27d917aa8d4 --- /dev/null +++ b/scripts/bootstrap.ps1 @@ -0,0 +1,339 @@ +# Version: 4 +# A powershell script that installs the dependencies needed to build and test Bun. +# This should work on Windows 10 or newer. + +# If this script does not work on your machine, please open an issue: +# https://github.com/oven-sh/bun/issues + +# If you need to make a change to this script, such as upgrading a dependency, +# increment the version comment to indicate that a new image should be built. +# Otherwise, the existing image will be retroactively updated. + +param ( + [Parameter(Mandatory = $false)] + [switch]$CI = $false, + [Parameter(Mandatory = $false)] + [switch]$Optimize = $CI +) + +function Execute-Command { + $command = $args -join ' ' + Write-Output "$ $command" + + & $args[0] $args[1..$args.Length] + + if ((-not $?) -or ($LASTEXITCODE -ne 0 -and $null -ne $LASTEXITCODE)) { + throw "Command failed: $command" + } +} + +function Which { + param ([switch]$Required = $false) + + foreach ($command in $args) { + $result = Get-Command $command -ErrorAction SilentlyContinue + if ($result -and $result.Path) { + return $result.Path + } + } + + if ($Required) { + $commands = $args -join ', ' + throw "Command not found: $commands" + } +} + +function Install-Chocolatey { + if (Which choco) { + return + } + + Write-Output "Installing Chocolatey..." + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 + iex -Command ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + Refresh-Path +} + +function Refresh-Path { + $paths = @( + [System.Environment]::GetEnvironmentVariable("Path", "Machine"), + [System.Environment]::GetEnvironmentVariable("Path", "User"), + [System.Environment]::GetEnvironmentVariable("Path", "Process") + ) + $uniquePaths = $paths | + Where-Object { $_ } | + ForEach-Object { $_.Split(';', [StringSplitOptions]::RemoveEmptyEntries) } | + Where-Object { $_ -and (Test-Path $_) } | + Select-Object -Unique + $env:Path = ($uniquePaths -join ';').TrimEnd(';') + + if ($env:ChocolateyInstall) { + Import-Module $env:ChocolateyInstall\helpers\chocolateyProfile.psm1 -ErrorAction SilentlyContinue + } +} + +function Add-To-Path { + $absolutePath = Resolve-Path $args[0] + $currentPath = [Environment]::GetEnvironmentVariable("Path", "Machine") + if ($currentPath -like "*$absolutePath*") { + return + } + + $newPath = $currentPath.TrimEnd(";") + ";" + $absolutePath + if ($newPath.Length -ge 2048) { + Write-Warning "PATH is too long, removing duplicate and old entries..." + + $paths = $currentPath.Split(';', [StringSplitOptions]::RemoveEmptyEntries) | + Where-Object { $_ -and (Test-Path $_) } | + Select-Object -Unique + + $paths += $absolutePath + $newPath = $paths -join ';' + while ($newPath.Length -ge 2048 -and $paths.Count -gt 1) { + $paths = $paths[1..$paths.Count] + $newPath = $paths -join ';' + } + } + + Write-Output "Adding $absolutePath to PATH..." + [Environment]::SetEnvironmentVariable("Path", $newPath, "Machine") + Refresh-Path +} + +function Install-Package { + param ( + [Parameter(Mandatory = $true, Position = 0)] + [string]$Name, + [Parameter(Mandatory = $false)] + [string]$Command = $Name, + [Parameter(Mandatory = $false)] + [string]$Version, + [Parameter(Mandatory = $false)] + [switch]$Force = $false, + [Parameter(Mandatory = $false)] + [string[]]$ExtraArgs = @() + ) + + if (-not $Force ` + -and (Which $Command) ` + -and (-not $Version -or (& $Command --version) -like "*$Version*")) { + return + } + + Write-Output "Installing $Name..." + $flags = @( + "--yes", + "--accept-license", + "--no-progress", + "--force" + ) + if ($Version) { + $flags += "--version=$Version" + } + + Execute-Command choco install $Name @flags @ExtraArgs + Refresh-Path +} + +function Install-Packages { + foreach ($package in $args) { + Install-Package -Name $package + } +} + +function Install-Common-Software { + Install-Chocolatey + Install-Pwsh + Install-Git + Install-Packages curl 7zip + Install-NodeJs + Install-Bun + Install-Cygwin + if ($CI) { + Install-Tailscale + Install-Buildkite + } +} + +function Install-Pwsh { + Install-Package powershell-core -Command pwsh + + if ($CI) { + $shellPath = (Which pwsh -Required) + New-ItemProperty ` + -Path "HKLM:\\SOFTWARE\\OpenSSH" ` + -Name DefaultShell ` + -Value $shellPath ` + -PropertyType String ` + -Force + } +} + +function Install-Git { + Install-Packages git + + if ($CI) { + Execute-Command git config --system --add safe.directory "*" + Execute-Command git config --system core.autocrlf false + Execute-Command git config --system core.eol lf + Execute-Command git config --system core.longpaths true + } +} + +function Install-NodeJs { + Install-Package nodejs -Command node -Version "22.9.0" +} + +function Install-Bun { + Install-Package bun -Version "1.1.30" +} + +function Install-Cygwin { + Install-Package cygwin + Add-To-Path "C:\tools\cygwin\bin" +} + +function Install-Tailscale { + Install-Package tailscale +} + +function Install-Buildkite { + if (Which buildkite-agent) { + return + } + + Write-Output "Installing Buildkite agent..." + $env:buildkiteAgentToken = "xxx" + iex ((New-Object System.Net.WebClient).DownloadString("https://raw.githubusercontent.com/buildkite/agent/main/install.ps1")) + Refresh-Path +} + +function Install-Build-Essentials { + # Install-Visual-Studio + Install-Packages ` + cmake ` + make ` + ninja ` + ccache ` + python ` + golang ` + nasm ` + ruby ` + mingw + Install-Rust + Install-Llvm +} + +function Install-Visual-Studio { + $components = @( + "Microsoft.VisualStudio.Workload.NativeDesktop", + "Microsoft.VisualStudio.Component.Windows10SDK.18362", + "Microsoft.VisualStudio.Component.Windows11SDK.22000", + "Microsoft.VisualStudio.Component.Windows11Sdk.WindowsPerformanceToolkit", + "Microsoft.VisualStudio.Component.VC.ASAN", # C++ AddressSanitizer + "Microsoft.VisualStudio.Component.VC.ATL", # C++ ATL for latest v143 build tools (x86 & x64) + "Microsoft.VisualStudio.Component.VC.DiagnosticTools", # C++ Diagnostic Tools + "Microsoft.VisualStudio.Component.VC.CLI.Support", # C++/CLI support for v143 build tools (Latest) + "Microsoft.VisualStudio.Component.VC.CoreIde", # C++ core features + "Microsoft.VisualStudio.Component.VC.Redist.14.Latest" # C++ 2022 Redistributable Update + ) + + $arch = (Get-WmiObject Win32_Processor).Architecture + if ($arch -eq 9) { + $components += @( + "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", # MSVC v143 build tools (x86 & x64) + "Microsoft.VisualStudio.Component.VC.Modules.x86.x64" # MSVC v143 C++ Modules for latest v143 build tools (x86 & x64) + ) + } elseif ($arch -eq 5) { + $components += @( + "Microsoft.VisualStudio.Component.VC.Tools.ARM64", # MSVC v143 build tools (ARM64) + "Microsoft.VisualStudio.Component.UWP.VC.ARM64" # C++ Universal Windows Platform support for v143 build tools (ARM64/ARM64EC) + ) + } + + $packageParameters = $components | ForEach-Object { "--add $_" } + Install-Package visualstudio2022community ` + -ExtraArgs "--package-parameters '--add Microsoft.VisualStudio.Workload.NativeDesktop --includeRecommended --includeOptional'" +} + +function Install-Rust { + if (Which rustc) { + return + } + + Write-Output "Installing Rust..." + $rustupInit = "$env:TEMP\rustup-init.exe" + (New-Object System.Net.WebClient).DownloadFile("https://win.rustup.rs/", $rustupInit) + Execute-Command $rustupInit -y + Add-To-Path "$env:USERPROFILE\.cargo\bin" +} + +function Install-Llvm { + Install-Package llvm ` + -Command clang-cl ` + -Version "18.1.8" + Add-To-Path "C:\Program Files\LLVM\bin" +} + +function Optimize-System { + Disable-Windows-Defender + Disable-Windows-Threat-Protection + Disable-Windows-Services + Disable-Power-Management + Uninstall-Windows-Defender +} + +function Disable-Windows-Defender { + Write-Output "Disabling Windows Defender..." + Set-MpPreference -DisableRealtimeMonitoring $true + Add-MpPreference -ExclusionPath "C:\", "D:\" +} + +function Disable-Windows-Threat-Protection { + $itemPath = "HKLM:\SOFTWARE\Policies\Microsoft\Windows Advanced Threat Protection" + if (Test-Path $itemPath) { + Write-Output "Disabling Windows Threat Protection..." + Set-ItemProperty -Path $itemPath -Name "ForceDefenderPassiveMode" -Value 1 -Type DWORD + } +} + +function Uninstall-Windows-Defender { + Write-Output "Uninstalling Windows Defender..." + Uninstall-WindowsFeature -Name Windows-Defender +} + +function Disable-Windows-Services { + $services = @( + "WSearch", # Windows Search + "wuauserv", # Windows Update + "DiagTrack", # Connected User Experiences and Telemetry + "dmwappushservice", # WAP Push Message Routing Service + "PcaSvc", # Program Compatibility Assistant + "SysMain" # Superfetch + ) + + foreach ($service in $services) { + Stop-Service $service -Force + Set-Service $service -StartupType Disabled + } +} + +function Disable-Power-Management { + Write-Output "Disabling power management features..." + powercfg /setactive 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c # High performance + powercfg /change monitor-timeout-ac 0 + powercfg /change monitor-timeout-dc 0 + powercfg /change standby-timeout-ac 0 + powercfg /change standby-timeout-dc 0 + powercfg /change hibernate-timeout-ac 0 + powercfg /change hibernate-timeout-dc 0 +} + +Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass -Force +if ($Optimize) { + Optimize-System +} + +Install-Common-Software +Install-Build-Essentials + diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh index e09ef4fb6ceabc..c5f59ca1163d5d 100755 --- a/scripts/bootstrap.sh +++ b/scripts/bootstrap.sh @@ -1,4 +1,5 @@ #!/bin/sh +# Version: 5 # A script that installs the dependencies needed to build and test Bun. # This should work on macOS and Linux with a POSIX shell. @@ -7,11 +8,10 @@ # https://github.com/oven-sh/bun/issues # If you need to make a change to this script, such as upgrading a dependency, -# increment the version number to indicate that a new image should be built. +# increment the version comment to indicate that a new image should be built. # Otherwise, the existing image will be retroactively updated. -v="3" + pid=$$ -script="$(realpath "$0")" print() { echo "$@" @@ -24,28 +24,41 @@ error() { } execute() { - print "$ $@" >&2 - if ! "$@"; then - error "Command failed: $@" - fi + print "$ $@" >&2 + if ! "$@"; then + error "Command failed: $@" + fi } execute_sudo() { - if [ "$sudo" = "1" ]; then + if [ "$sudo" = "1" ] || [ -z "$can_sudo" ]; then execute "$@" else - execute sudo "$@" + execute sudo -n "$@" fi } -execute_non_root() { - if [ "$sudo" = "1" ]; then - execute sudo -u "$user" "$@" +execute_as_user() { + if [ "$sudo" = "1" ] || [ "$can_sudo" = "1" ]; then + if [ -f "$(which sudo)" ]; then + execute sudo -n -u "$user" /bin/sh -c "$*" + elif [ -f "$(which doas)" ]; then + execute doas -u "$user" /bin/sh -c "$*" + elif [ -f "$(which su)" ]; then + execute su -s /bin/sh "$user" -c "$*" + else + execute /bin/sh -c "$*" + fi else - execute "$@" + execute /bin/sh -c "$*" fi } +grant_to_user() { + path="$1" + execute_sudo chown -R "$user:$group" "$path" +} + which() { command -v "$1" } @@ -73,12 +86,16 @@ fetch() { } download_file() { - url="$1" - filename="${2:-$(basename "$url")}" - path="$(mktemp -d)/$filename" + url="$1" + filename="${2:-$(basename "$url")}" + tmp="$(execute mktemp -d)" + execute chmod 755 "$tmp" - fetch "$url" > "$path" - print "$path" + path="$tmp/$filename" + fetch "$url" > "$path" + execute chmod 644 "$path" + + print "$path" } compare_version() { @@ -96,13 +113,13 @@ append_to_file() { content="$2" if ! [ -f "$file" ]; then - execute mkdir -p "$(dirname "$file")" - execute touch "$file" + execute_as_user mkdir -p "$(dirname "$file")" + execute_as_user touch "$file" fi echo "$content" | while read -r line; do if ! grep -q "$line" "$file"; then - echo "$line" >> "$file" + echo "$line" >>"$file" fi done } @@ -111,7 +128,7 @@ append_to_profile() { content="$1" profiles=".profile .zprofile .bash_profile .bashrc .zshrc" for profile in $profiles; do - file="$HOME/$profile" + file="$home/$profile" if [ "$ci" = "1" ] || [ -f "$file" ]; then append_to_file "$file" "$content" fi @@ -124,172 +141,265 @@ append_to_path() { error "Could not find directory: \"$path\"" fi - append_to_profile "export PATH=\"\$PATH\":$path" - export PATH="$PATH:$path" + append_to_profile "export PATH=\"$path:\$PATH\"" + export PATH="$path:$PATH" } -check_system() { +link_to_bin() { + path="$1" + if ! [ -d "$path" ]; then + error "Could not find directory: \"$path\"" + fi + + for file in "$path"/*; do + if [ -f "$file" ]; then + grant_to_user "$file" + execute_sudo ln -sf "$file" "/usr/bin/$(basename "$file")" + fi + done +} + +check_features() { + print "Checking features..." + + case "$CI" in + true | 1) + ci=1 + print "CI: enabled" + ;; + esac + + case "$@" in + *--ci*) + ci=1 + print "CI: enabled" + ;; + esac +} + +check_operating_system() { + print "Checking operating system..." uname="$(require uname)" - os="$($uname -s)" + os="$("$uname" -s)" case "$os" in Linux*) os="linux" ;; Darwin*) os="darwin" ;; *) error "Unsupported operating system: $os" ;; esac + print "Operating System: $os" - arch="$($uname -m)" + arch="$("$uname" -m)" case "$arch" in x86_64 | x64 | amd64) arch="x64" ;; aarch64 | arm64) arch="aarch64" ;; *) error "Unsupported architecture: $arch" ;; esac + print "Architecture: $arch" - kernel="$(uname -r)" + kernel="$("$uname" -r)" + print "Kernel: $kernel" - if [ "$os" = "darwin" ]; then + case "$os" in + linux) + if [ -f "/etc/alpine-release" ]; then + distro="alpine" + abi="musl" + alpine="$(cat /etc/alpine-release)" + if [ "$alpine" ~ "_" ]; then + release="$(echo "$alpine" | cut -d_ -f1)-edge" + else + release="$alpine" + fi + elif [ -f "/etc/os-release" ]; then + . /etc/os-release + if [ -n "$ID" ]; then + distro="$ID" + fi + if [ -n "$VERSION_ID" ]; then + release="$VERSION_ID" + fi + fi + ;; + darwin) sw_vers="$(which sw_vers)" if [ -f "$sw_vers" ]; then - distro="$($sw_vers -productName)" - release="$($sw_vers -productVersion)" + distro="$("$sw_vers" -productName)" + release="$("$sw_vers" -productVersion)" fi - - if [ "$arch" = "x64" ]; then + case "$arch" in + x64) sysctl="$(which sysctl)" - if [ -f "$sysctl" ] && [ "$($sysctl -n sysctl.proc_translated 2>/dev/null)" = "1" ]; then + if [ -f "$sysctl" ] && [ "$("$sysctl" -n sysctl.proc_translated 2>/dev/null)" = "1" ]; then arch="aarch64" rosetta="1" + print "Rosetta: enabled" fi - fi + ;; + esac + ;; + esac + + if [ -n "$distro" ]; then + print "Distribution: $distro $release" fi - if [ "$os" = "linux" ] && [ -f /etc/os-release ]; then - . /etc/os-release - if [ -n "$ID" ]; then - distro="$ID" + case "$os" in + linux) + ldd="$(which ldd)" + if [ -f "$ldd" ]; then + ldd_version="$($ldd --version 2>&1)" + abi_version="$(echo "$ldd_version" | grep -o -E '[0-9]+\.[0-9]+(\.[0-9]+)?' | head -n 1)" + case "$ldd_version" in + *musl*) + abi="musl" + ;; + *GNU* | *GLIBC*) + abi="gnu" + ;; + esac fi - if [ -n "$VERSION_ID" ]; then - release="$VERSION_ID" - if [ "$distro" = "alpine" ]; then - if [ "$(echo $release | grep -c '_')" = "1" ]; then - release="edge" - fi - fi + if [ -n "$abi" ]; then + print "ABI: $abi $abi_version" fi + ;; + esac +} + +check_inside_docker() { + if ! [ "$os" = "linux" ]; then + return fi + print "Checking if inside Docker..." - if [ "$os" = "linux" ]; then - rpm="$(which rpm)" - if [ -f "$rpm" ]; then - glibc="$($rpm -q glibc --queryformat '%{VERSION}\n')" - else - ldd="$(which ldd)" - awk="$(which awk)" - if [ -f "$ldd" ] && [ -f "$awk" ]; then - glibc="$($ldd --version | $awk 'NR==1{print $NF}')" - fi + if [ -f "/.dockerenv" ]; then + docker=1 + else + if [ -f "/proc/1/cgroup" ]; then + case "$(cat /proc/1/cgroup)" in + */docker/*) + docker=1 + ;; + esac fi - fi - if [ "$os" = "darwin" ]; then - brew="$(which brew)" - pm="brew" + if [ -f "/proc/self/mountinfo" ]; then + case "$(cat /proc/self/mountinfo)" in + */docker/*) + docker=1 + ;; + esac + fi fi - if [ "$os" = "linux" ]; then - apt="$(which apt-get)" - if [ -f "$apt" ]; then - pm="apt" + if [ "$docker" = "1" ]; then + print "Docker: enabled" + fi +} - else - dnf="$(which dnf)" - if [ -f "$dnf" ]; then - pm="dnf" +check_package_manager() { + print "Checking package manager..." - else - yum="$(which yum)" - if [ -f "$yum" ]; then - pm="yum" - - else - apk="$(which apk)" - if [ -f "$apk" ]; then - pm="apk" - fi - fi - fi + case "$os" in + darwin) + if ! [ -f "$(which brew)" ]; then + install_brew fi - - if [ -z "$pm" ]; then + pm="brew" + ;; + linux) + if [ -f "$(which apt-get)" ]; then + pm="apt" + elif [ -f "$(which dnf)" ]; then + pm="dnf" + elif [ -f "$(which yum)" ]; then + pm="yum" + elif [ -f "$(which apk)" ]; then + pm="apk" + else error "No package manager found. (apt, dnf, yum, apk)" fi - fi + ;; + esac + print "Package manager: $pm" + + print "Updating package manager..." + case "$pm" in + apt) + export DEBIAN_FRONTEND=noninteractive + package_manager update -y + ;; + apk) + package_manager update + ;; + esac +} + +check_user() { + print "Checking user..." if [ -n "$SUDO_USER" ]; then user="$SUDO_USER" else - whoami="$(which whoami)" - if [ -f "$whoami" ]; then - user="$($whoami)" - else - error "Could not determine the current user, set \$USER." - fi + id="$(require id)" + user="$("$id" -un)" + group="$("$id" -gn)" fi - - id="$(which id)" - if [ -f "$id" ] && [ "$($id -u)" = "0" ]; then - sudo=1 + if [ -z "$user" ]; then + error "Could not determine user" fi + print "User: $user" + print "Group: $group" - if [ "$CI" = "true" ]; then - ci=1 + home="$(execute_as_user echo '~')" + if [ -z "$home" ] || [ "$home" = "~" ]; then + error "Could not determine home directory for user: $user" fi + print "Home: $home" - print "System information:" - if [ -n "$distro" ]; then - print "| Distro: $distro $release" - fi - print "| Operating system: $os" - print "| Architecture: $arch" - if [ -n "$rosetta" ]; then - print "| Rosetta: true" - fi - if [ -n "$glibc" ]; then - print "| Glibc: $glibc" - fi - print "| Package manager: $pm" - print "| User: $user" - if [ -n "$sudo" ]; then - print "| Sudo: true" - fi - if [ -n "$ci" ]; then - print "| CI: true" + id="$(which id)" + if [ -f "$id" ] && [ "$($id -u)" = "0" ]; then + sudo=1 + print "Sudo: enabled" + elif [ -f "$(which sudo)" ] && [ "$(sudo -n echo 1 2>/dev/null)" = "1" ]; then + can_sudo=1 + print "Sudo: can be used" fi } package_manager() { case "$pm" in - apt) DEBIAN_FRONTEND=noninteractive \ - execute "$apt" "$@" ;; - dnf) execute dnf "$@" ;; - yum) execute "$yum" "$@" ;; + apt) + while ! sudo -n apt-get update -y; do + sleep 1 + done + execute_sudo apt-get "$@" + ;; + dnf) + case "$distro" in + rhel) + execute_sudo dnf \ + --disableplugin=subscription-manager \ + "$@" + ;; + *) + execute_sudo dnf "$@" + ;; + esac + ;; + yum) + execute_sudo yum "$@" + ;; + apk) + execute_sudo apk "$@" + ;; brew) - if ! [ -f "$brew" ]; then - install_brew - fi - execute_non_root "$brew" "$@" - ;; - apk) execute "$apk" "$@" ;; - *) error "Unsupported package manager: $pm" ;; - esac -} - -update_packages() { - case "$pm" in - apt | apk) - package_manager update - ;; + execute_as_user brew "$@" + ;; + *) + error "Unsupported package manager: $pm" + ;; esac } @@ -310,20 +420,38 @@ check_package() { install_packages() { case "$pm" in apt) - package_manager install --yes --no-install-recommends "$@" + package_manager install \ + --yes \ + --no-install-recommends \ + "$@" ;; dnf) - package_manager install --assumeyes --nodocs --noautoremove --allowerasing "$@" + package_manager install \ + --assumeyes \ + --nodocs \ + --noautoremove \ + --allowerasing \ + "$@" ;; yum) package_manager install -y "$@" ;; brew) - package_manager install --force --formula "$@" - package_manager link --force --overwrite "$@" + package_manager install \ + --force \ + --formula \ + "$@" + package_manager link \ + --force \ + --overwrite \ + "$@" ;; apk) - package_manager add "$@" + package_manager add \ + --no-cache \ + --no-interactive \ + --no-progress \ + "$@" ;; *) error "Unsupported package manager: $pm" @@ -331,24 +459,12 @@ install_packages() { esac } -get_version() { - command="$1" - path="$(which "$command")" - - if [ -f "$path" ]; then - case "$command" in - go | zig) "$path" version ;; - *) "$path" --version ;; - esac - else - print "not found" - fi -} - install_brew() { - bash="$(require bash)" - script=$(download_file "https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh") - NONINTERACTIVE=1 execute_non_root "$bash" "$script" + print "Installing Homebrew..." + + bash="$(require bash)" + script=$(download_file "https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh") + NONINTERACTIVE=1 execute_as_user "$bash" "$script" case "$arch" in x64) @@ -370,78 +486,166 @@ install_brew() { install_common_software() { case "$pm" in - apt) install_packages \ - apt-transport-https \ - software-properties-common - ;; - dnf) install_packages \ - dnf-plugins-core \ - tar - ;; + apt) + install_packages \ + apt-transport-https \ + software-properties-common + ;; + dnf) + install_packages \ + dnf-plugins-core + ;; + esac + + case "$distro" in + amzn) + install_packages \ + tar + ;; + rhel) + rhel_version="$(execute rpm -E %rhel)" + install_packages \ + "https://dl.fedoraproject.org/pub/epel/epel-release-latest-$rhel_version.noarch.rpm" + ;; + centos) + install_packages \ + epel-release + ;; esac + crb="$(which crb)" + if [ -f "$crb" ]; then + execute "$crb" enable + fi + install_packages \ bash \ ca-certificates \ curl \ - jq \ htop \ gnupg \ git \ unzip \ - wget \ - zip + wget install_rosetta install_nodejs install_bun + install_tailscale + install_buildkite } -install_nodejs() { - version="${1:-"22"}" - - if ! [ "$(compare_version "$glibc" "2.27")" = "1" ]; then - version="16" +nodejs_version_exact() { + # https://unofficial-builds.nodejs.org/download/release/ + if ! [ "$abi" = "musl" ] && [ -n "$abi_version" ] && ! [ "$(compare_version "$abi_version" "2.27")" = "1" ]; then + print "16.9.1" + else + print "22.9.0" fi +} +nodejs_version() { + echo "$(nodejs_version_exact)" | cut -d. -f1 +} + +install_nodejs() { case "$pm" in dnf | yum) - bash="$(require bash)" - script=$(download_file "https://rpm.nodesource.com/setup_$version.x") - execute "$bash" "$script" + bash="$(require bash)" + script=$(download_file "https://rpm.nodesource.com/setup_$(nodejs_version).x") + execute_sudo "$bash" "$script" ;; apt) - bash="$(require bash)" - script=$(download_file "https://deb.nodesource.com/setup_$version.x") - execute "$bash" "$script" + bash="$(require bash)" + script="$(download_file "https://deb.nodesource.com/setup_$(nodejs_version).x")" + execute_sudo "$bash" "$script" ;; esac - install_packages nodejs + case "$pm" in + apk) + install_packages nodejs npm + ;; + *) + install_packages nodejs + ;; + esac + + # Some distros do not install the node headers by default. + # These are needed for certain FFI tests, such as: `cc.test.ts` + case "$distro" in + alpine | amzn) + install_nodejs_headers + ;; + esac +} + +install_nodejs_headers() { + headers_tar="$(download_file "https://nodejs.org/download/release/v$(nodejs_version_exact)/node-v$(nodejs_version_exact)-headers.tar.gz")" + headers_dir="$(dirname "$headers_tar")" + execute tar -xzf "$headers_tar" -C "$headers_dir" + headers_include="$headers_dir/node-v$(nodejs_version_exact)/include" + execute_sudo cp -R "$headers_include/" "/usr" } install_bun() { - if [ "$os" = "linux" ] && [ "$distro" = "alpine" ] && [ "$arch" = "aarch64" ]; then - mkdir -p "$HOME/.bun/bin" - wget -O "$HOME/.bun/bin/bun" https://pub-61e0d0e2da4146a099e4545a59a9f0f7.r2.dev/bun-musl-arm64 - chmod +x "$HOME/.bun/bin/bun" - append_to_path "$HOME/.bun/bin" + case "$os-$abi" in + linux-musl) + case "$arch" in + x64) + exe="$(download_file https://pub-61e0d0e2da4146a099e4545a59a9f0f7.r2.dev/bun-musl-x64)" + ;; + aarch64) + exe="$(download_file https://pub-61e0d0e2da4146a099e4545a59a9f0f7.r2.dev/bun-musl-arm64)" + ;; + esac + execute chmod +x "$exe" + execute mkdir -p "$home/.bun/bin" + execute mv "$exe" "$home/.bun/bin/bun" + execute ln -fs "$home/.bun/bin/bun" "$home/.bun/bin/bunx" + link_to_bin "$home/.bun/bin" return - fi - bash="$(require bash)" - script=$(download_file "https://bun.sh/install") + ;; + esac + + bash="$(require bash)" + script=$(download_file "https://bun.sh/install") - version="${1:-"latest"}" + version="${1:-"latest"}" case "$version" in latest) - execute "$bash" "$script" + execute_as_user "$bash" "$script" ;; *) - execute "$bash" "$script" -s "$version" + execute_as_user "$bash" "$script" -s "$version" ;; esac - append_to_path "$HOME/.bun/bin" + link_to_bin "$home/.bun/bin" +} + +install_cmake() { + case "$os-$pm" in + darwin-* | linux-apk) + install_packages cmake + ;; + linux-*) + sh="$(require sh)" + cmake_version="3.30.5" + case "$arch" in + x64) + url="https://github.com/Kitware/CMake/releases/download/v$cmake_version/cmake-$cmake_version-linux-x86_64.sh" + ;; + aarch64) + url="https://github.com/Kitware/CMake/releases/download/v$cmake_version/cmake-$cmake_version-linux-aarch64.sh" + ;; + esac + script=$(download_file "$url") + execute_sudo "$sh" "$script" \ + --skip-license \ + --prefix=/usr + ;; + esac } install_rosetta() { @@ -459,27 +663,56 @@ install_rosetta() { install_build_essentials() { case "$pm" in apt) - install_packages build-essential ninja-build xz-utils pkg-config golang - ;; + install_packages \ + build-essential \ + ninja-build \ + xz-utils \ + pkg-config \ + golang + ;; dnf | yum) - install_packages ninja-build gcc-c++ xz pkg-config golang - ;; + install_packages \ + gcc-c++ \ + xz \ + pkg-config \ + golang + case "$distro" in + rhel) ;; + *) + install_packages ninja-build + ;; + esac + ;; brew) - install_packages ninja pkg-config golang - ;; + install_packages \ + ninja \ + pkg-config \ + golang + ;; apk) - install_packages musl-dev ninja xz - ;; + install_packages \ + build-base \ + linux-headers \ + ninja \ + go \ + xz + ;; + esac + + case "$distro-$pm" in + amzn-dnf) + package_manager groupinstall -y "Development Tools" + ;; esac install_packages \ make \ - cmake \ python3 \ libtool \ ruby \ perl + install_cmake install_llvm install_ccache install_rust @@ -487,185 +720,189 @@ install_build_essentials() { } llvm_version_exact() { - if [ "$os" = "linux" ] && [ "$distro" = "alpine" ]; then + case "$os-$abi" in + darwin-* | windows-* | linux-musl) print "18.1.8" - return - fi - case "$os" in - linux) - print "16.0.6" - ;; - darwin | windows) - print "18.1.8" - ;; - esac + ;; + linux-*) + print "16.0.6" + ;; + esac } llvm_version() { - echo "$(llvm_version_exact)" | cut -d. -f1 + echo "$(llvm_version_exact)" | cut -d. -f1 } install_llvm() { case "$pm" in apt) - bash="$(require bash)" - script=$(download_file "https://apt.llvm.org/llvm.sh") - execute "$bash" "$script" "$(llvm_version)" all + bash="$(require bash)" + script="$(download_file "https://apt.llvm.org/llvm.sh")" + case "$distro-$release" in + ubuntu-24*) + execute_sudo "$bash" "$script" "$(llvm_version)" all -njammy + ;; + *) + execute_sudo "$bash" "$script" "$(llvm_version)" all + ;; + esac + ;; + brew) + install_packages "llvm@$(llvm_version)" + ;; + apk) + install_packages \ + "llvm$(llvm_version)" \ + "clang$(llvm_version)" \ + "scudo-malloc" \ + --repository "http://dl-cdn.alpinelinux.org/alpine/edge/main" + install_packages \ + "lld$(llvm_version)" \ + --repository "http://dl-cdn.alpinelinux.org/alpine/edge/community" ;; - brew) - install_packages "llvm@$(llvm_version)" - ;; - apk) - install_packages "llvm$(llvm_version)-dev" "clang$(llvm_version)-dev" "lld$(llvm_version)-dev" - append_to_path "/usr/lib/llvm$(llvm_version)/bin" - ;; esac } install_ccache() { - case "$pm" in - apt | brew) - install_packages ccache - ;; - esac + case "$pm" in + apt | apk | brew) + install_packages ccache + ;; + esac } install_rust() { - if [ "$os" = "linux" ] && [ "$distro" = "alpine" ]; then - install_packages rust cargo - mkdir -p "$HOME/.cargo/bin" - append_to_path "$HOME/.cargo/bin" - return - fi - sh="$(require sh)" - script=$(download_file "https://sh.rustup.rs") - execute "$sh" "$script" -y - append_to_path "$HOME/.cargo/bin" + case "$pm" in + apk) + install_packages \ + rust \ + cargo + ;; + *) + sh="$(require sh)" + script=$(download_file "https://sh.rustup.rs") + execute_as_user "$sh" "$script" -y + ;; + esac + + # FIXME: This causes cargo to fail to build: + # > error: rustup could not choose a version of cargo to run, + # > because one wasn't specified explicitly, and no default is configured. + # link_to_bin "$home/.cargo/bin" } install_docker() { case "$pm" in brew) - if ! [ -d "/Applications/Docker.app" ]; then - package_manager install docker --cask - fi - ;; - apk) - install_packages docker + if ! [ -d "/Applications/Docker.app" ]; then + package_manager install docker --cask + fi ;; *) - case "$distro-$release" in - amzn-2 | amzn-1) - execute amazon-linux-extras install docker - ;; - amzn-*) - install_packages docker - ;; - *) - sh="$(require sh)" - script=$(download_file "https://get.docker.com") - execute "$sh" "$script" - ;; - esac - ;; - esac - - systemctl="$(which systemctl)" - if [ -f "$systemctl" ]; then - execute "$systemctl" enable docker - fi -} - -install_ci_dependencies() { - if ! [ "$ci" = "1" ]; then - return + case "$distro-$release" in + amzn-2 | amzn-1) + execute amazon-linux-extras install docker + ;; + amzn-* | alpine-*) + install_packages docker + ;; + *) + sh="$(require sh)" + script=$(download_file "https://get.docker.com") + execute "$sh" "$script" + ;; + esac + ;; + esac + + systemctl="$(which systemctl)" + if [ -f "$systemctl" ]; then + execute_sudo "$systemctl" enable docker fi - install_tailscale - install_buildkite + getent="$(which getent)" + if [ -n "$("$getent" group docker)" ]; then + usermod="$(which usermod)" + if [ -f "$usermod" ]; then + execute_sudo "$usermod" -aG docker "$user" + fi + fi } install_tailscale() { + if [ "$docker" = "1" ]; then + return + fi + case "$os" in linux) - sh="$(require sh)" - script=$(download_file "https://tailscale.com/install.sh") - execute "$sh" "$script" + sh="$(require sh)" + script=$(download_file "https://tailscale.com/install.sh") + execute "$sh" "$script" ;; darwin) install_packages go - execute_non_root go install tailscale.com/cmd/tailscale{,d}@latest - append_to_path "$HOME/go/bin" + execute_as_user go install tailscale.com/cmd/tailscale{,d}@latest + append_to_path "$home/go/bin" ;; esac } -install_buildkite() { - home_dir="/var/lib/buildkite-agent" - config_dir="/etc/buildkite-agent" - config_file="$config_dir/buildkite-agent.cfg" - - if ! [ -d "$home_dir" ]; then - execute_sudo mkdir -p "$home_dir" +create_buildkite_user() { + if ! [ "$ci" = "1" ] || ! [ "$os" = "linux" ]; then + return fi - if ! [ -d "$config_dir" ]; then - execute_sudo mkdir -p "$config_dir" - fi + print "Creating Buildkite user..." + user="buildkite-agent" + group="$user" + home="/var/lib/buildkite-agent" - case "$os" in - linux) - getent="$(require getent)" - if [ -z "$("$getent" passwd buildkite-agent)" ]; then - useradd="$(require useradd)" - execute "$useradd" buildkite-agent \ - --system \ - --no-create-home \ - --home-dir "$home_dir" - fi - - if [ -n "$("$getent" group docker)" ]; then - usermod="$(require usermod)" - execute "$usermod" -aG docker buildkite-agent - fi - - execute chown -R buildkite-agent:buildkite-agent "$home_dir" - execute chown -R buildkite-agent:buildkite-agent "$config_dir" - ;; - darwin) - execute_sudo chown -R "$user:admin" "$home_dir" - execute_sudo chown -R "$user:admin" "$config_dir" + case "$distro" in + amzn) + install_packages \ + shadow-utils \ + util-linux ;; esac - if ! [ -f "$config_file" ]; then - cat <"$config_file" -# This is generated by scripts/bootstrap.sh -# https://buildkite.com/docs/agent/v3/configuration + if [ -z "$(getent passwd "$user")" ]; then + execute_sudo useradd "$user" \ + --system \ + --no-create-home \ + --home-dir "$home" + fi -name="%hostname-%random" -tags="v=$v,os=$os,arch=$arch,distro=$distro,release=$release,kernel=$kernel,glibc=$glibc" + if [ -n "$(getent group docker)" ]; then + execute_sudo usermod -aG docker "$user" + fi -build-path="$home_dir/builds" -git-mirrors-path="$home_dir/git" -job-log-path="$home_dir/logs" -plugins-path="$config_dir/plugins" -hooks-path="$config_dir/hooks" + paths="$home /var/cache/buildkite-agent /var/log/buildkite-agent /var/run/buildkite-agent /var/run/buildkite-agent/buildkite-agent.sock" + for path in $paths; do + execute_sudo mkdir -p "$path" + execute_sudo chown -R "$user:$group" "$path" + done + + files="/var/run/buildkite-agent/buildkite-agent.pid" + for file in $files; do + execute_sudo touch "$file" + execute_sudo chown "$user:$group" "$file" + done +} -no-ssh-keyscan=true -cancel-grace-period=3600000 # 1 hour -enable-job-log-tmpfile=true -experiment="normalised-upload-paths,resolve-commit-after-checkout,agent-api" -EOF +install_buildkite() { + if ! [ "$ci" = "1" ]; then + return fi bash="$(require bash)" - script=$(download_file "https://raw.githubusercontent.com/buildkite/agent/main/install.sh") - execute "$bash" "$script" + script="$(download_file "https://raw.githubusercontent.com/buildkite/agent/main/install.sh")" + tmp_dir="$(execute dirname "$script")" + HOME="$tmp_dir" execute "$bash" "$script" - out_dir="$HOME/.buildkite-agent" - execute_sudo mv -f "$out_dir/bin/buildkite-agent" "/usr/local/bin/buildkite-agent" - execute rm -rf "$out_dir" + out_dir="$tmp_dir/.buildkite-agent" + execute_sudo mv -f "$out_dir/bin/buildkite-agent" "/usr/bin/buildkite-agent" } install_chrome_dependencies() { @@ -738,19 +975,26 @@ install_chrome_dependencies() { xorg-x11-fonts-Type1 \ xorg-x11-utils ;; - apk) - echo # TODO: + esac + + case "$distro" in + amzn) + install_packages \ + mesa-libgbm ;; esac } main() { - check_system - update_packages - install_common_software - install_build_essentials - install_chrome_dependencies - install_ci_dependencies -} - -main + check_features "$@" + check_operating_system + check_inside_docker + check_user + check_package_manager + create_buildkite_user + install_common_software + install_build_essentials + install_chrome_dependencies +} + +main "$@" diff --git a/scripts/build.mjs b/scripts/build.mjs old mode 100644 new mode 100755 diff --git a/scripts/features.mjs b/scripts/features.mjs old mode 100644 new mode 100755 diff --git a/scripts/machine.mjs b/scripts/machine.mjs new file mode 100755 index 00000000000000..1048d6abff98f3 --- /dev/null +++ b/scripts/machine.mjs @@ -0,0 +1,1305 @@ +#!/usr/bin/env node + +import { inspect, parseArgs } from "node:util"; +import { + $, + getBootstrapVersion, + getBuildNumber, + getSecret, + isCI, + parseArch, + parseOs, + readFile, + spawn, + spawnSafe, + spawnSyncSafe, + startGroup, + tmpdir, + waitForPort, + which, + escapePowershell, +} from "./utils.mjs"; +import { join, relative, resolve } from "node:path"; +import { homedir } from "node:os"; +import { existsSync, mkdirSync, mkdtempSync, readdirSync } from "node:fs"; +import { fileURLToPath } from "node:url"; + +const docker = { + getPlatform(platform) { + const { os, arch } = platform; + + if (os === "linux" || os === "windows") { + if (arch === "aarch64") { + return `${os}/arm64`; + } else if (arch === "x64") { + return `${os}/amd64`; + } + } + + throw new Error(`Unsupported platform: ${inspect(platform)}`); + }, + + async createMachine(platform) { + const { id } = await docker.getImage(platform); + const platformString = docker.getPlatform(platform); + + const command = ["sleep", "1d"]; + const { stdout } = await spawnSafe(["docker", "run", "--rm", "--platform", platformString, "-d", id, ...command]); + const containerId = stdout.trim(); + + const spawn = async command => { + return spawn(["docker", "exec", containerId, ...command]); + }; + + const spawnSafe = async command => { + return spawnSafe(["docker", "exec", containerId, ...command]); + }; + + const attach = async () => { + const { exitCode, spawnError } = await spawn(["docker", "exec", "-it", containerId, "bash"], { + stdio: "inherit", + }); + + if (exitCode === 0 || exitCode === 130) { + return; + } + + throw spawnError; + }; + + const kill = async () => { + await spawnSafe(["docker", "kill", containerId]); + }; + + return { + spawn, + spawnSafe, + attach, + close: kill, + [Symbol.asyncDispose]: kill, + }; + }, + + async getImage(platform) { + const os = platform["os"]; + const distro = platform["distro"]; + const release = platform["release"] || "latest"; + + let url; + if (os === "linux") { + if (distro === "debian") { + url = `docker.io/library/debian:${release}`; + } else if (distro === "ubuntu") { + url = `docker.io/library/ubuntu:${release}`; + } else if (distro === "amazonlinux") { + url = `public.ecr.aws/amazonlinux/amazonlinux:${release}`; + } else if (distro === "alpine") { + url = `docker.io/library/alpine:${release}`; + } + } + + if (url) { + await spawnSafe(["docker", "pull", "--platform", docker.getPlatform(platform), url]); + const { stdout } = await spawnSafe(["docker", "image", "inspect", url, "--format", "json"]); + const [{ Id }] = JSON.parse(stdout); + return { + id: Id, + name: url, + username: "root", + }; + } + + throw new Error(`Unsupported platform: ${inspect(platform)}`); + }, +}; + +export const aws = { + get name() { + return "aws"; + }, + + /** + * @param {string[]} args + * @returns {Promise} + */ + async spawn(args) { + const aws = which("aws"); + if (!aws) { + throw new Error("AWS CLI is not installed, please install it"); + } + + let env; + if (isCI) { + env = { + AWS_ACCESS_KEY_ID: getSecret("EC2_ACCESS_KEY_ID", { required: true }), + AWS_SECRET_ACCESS_KEY: getSecret("EC2_SECRET_ACCESS_KEY", { required: true }), + AWS_REGION: getSecret("EC2_REGION", { required: false }) || "us-east-1", + }; + } + + const { error, stdout } = await spawn($`${aws} ${args} --output json`, { env }); + if (error) { + if (/max attempts exceeded/i.test(inspect(error))) { + return this.spawn(args); + } + throw error; + } + + try { + return JSON.parse(stdout); + } catch { + return; + } + }, + + /** + * @param {Record} [options] + * @returns {string[]} + */ + getFilters(options = {}) { + return Object.entries(options) + .filter(([_, value]) => typeof value !== "undefined") + .map(([key, value]) => `Name=${key},Values=${value}`); + }, + + /** + * @param {Record} [options] + * @returns {string[]} + */ + getFlags(options = {}) { + return Object.entries(options) + .filter(([_, value]) => typeof value !== "undefined") + .map(([key, value]) => `--${key}=${value}`); + }, + + /** + * @typedef AwsInstance + * @property {string} InstanceId + * @property {string} ImageId + * @property {string} InstanceType + * @property {string} [PublicIpAddress] + * @property {string} [PlatformDetails] + * @property {string} [Architecture] + * @property {object} [Placement] + * @property {string} [Placement.AvailabilityZone] + * @property {string} LaunchTime + */ + + /** + * @param {Record} [options] + * @returns {Promise} + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html + */ + async describeInstances(options) { + const filters = aws.getFilters(options); + const { Reservations } = await aws.spawn($`ec2 describe-instances --filters ${filters}`); + return Reservations.flatMap(({ Instances }) => Instances).sort((a, b) => (a.LaunchTime < b.LaunchTime ? 1 : -1)); + }, + + /** + * @param {Record} [options] + * @returns {Promise} + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/run-instances.html + */ + async runInstances(options) { + const flags = aws.getFlags(options); + const { Instances } = await aws.spawn($`ec2 run-instances ${flags}`); + return Instances.sort((a, b) => (a.LaunchTime < b.LaunchTime ? 1 : -1)); + }, + + /** + * @param {...string} instanceIds + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/stop-instances.html + */ + async stopInstances(...instanceIds) { + await aws.spawn($`ec2 stop-instances --no-hibernate --force --instance-ids ${instanceIds}`); + }, + + /** + * @param {...string} instanceIds + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/terminate-instances.html + */ + async terminateInstances(...instanceIds) { + await aws.spawn($`ec2 terminate-instances --instance-ids ${instanceIds}`); + }, + + /** + * @param {"instance-running" | "instance-stopped" | "instance-terminated"} action + * @param {...string} instanceIds + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/wait.html + */ + async waitInstances(action, ...instanceIds) { + await aws.spawn($`ec2 wait ${action} --instance-ids ${instanceIds}`); + }, + + /** + * @typedef AwsImage + * @property {string} ImageId + * @property {string} Name + * @property {string} State + * @property {string} CreationDate + */ + + /** + * @param {Record} [options] + * @returns {Promise} + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html + */ + async describeImages(options = {}) { + const { ["owner-alias"]: owners, ...filterOptions } = options; + const filters = aws.getFilters(filterOptions); + if (owners) { + filters.push(`--owners=${owners}`); + } + const { Images } = await aws.spawn($`ec2 describe-images --filters ${filters}`); + return Images.sort((a, b) => (a.CreationDate < b.CreationDate ? 1 : -1)); + }, + + /** + * @param {Record} [options] + * @returns {Promise} + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-image.html + */ + async createImage(options) { + const flags = aws.getFlags(options); + try { + const { ImageId } = await aws.spawn($`ec2 create-image ${flags}`); + return ImageId; + } catch (error) { + const match = /already in use by AMI (ami-[a-z0-9]+)/i.exec(inspect(error)); + if (!match) { + throw error; + } + const [, existingImageId] = match; + await aws.spawn($`ec2 deregister-image --image-id ${existingImageId}`); + const { ImageId } = await aws.spawn($`ec2 create-image ${flags}`); + return ImageId; + } + }, + + /** + * @param {Record} options + * @returns {Promise} + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/copy-image.html + */ + async copyImage(options) { + const flags = aws.getFlags(options); + const { ImageId } = await aws.spawn($`ec2 copy-image ${flags}`); + return ImageId; + }, + + /** + * @param {"image-available"} action + * @param {...string} imageIds + * @link https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/wait/image-available.html + */ + async waitImage(action, ...imageIds) { + await aws.spawn($`ec2 wait ${action} --image-ids ${imageIds}`); + }, + + /** + * @param {AwsImage | string} imageOrImageId + * @returns {Promise} + */ + async getAvailableImage(imageOrImageId) { + let imageId = imageOrImageId; + if (typeof imageOrImageId === "object") { + const { ImageId, State } = imageOrImageId; + if (State === "available") { + return imageOrImageId; + } + imageId = ImageId; + } + + await aws.waitImage("image-available", imageId); + const [availableImage] = await aws.describeImages({ + "state": "available", + "image-id": imageId, + }); + + if (!availableImage) { + throw new Error(`Failed to find available image: ${imageId}`); + } + + return availableImage; + }, + + /** + * @param {MachineOptions} options + * @returns {Promise} + */ + async getBaseImage(options) { + const { os, arch, distro, distroVersion } = options; + + let name, owner; + if (os === "linux") { + if (!distro || distro === "debian") { + owner = "amazon"; + name = `debian-${distroVersion || "*"}-${arch === "aarch64" ? "arm64" : "amd64"}-*`; + } else if (distro === "ubuntu") { + owner = "099720109477"; + name = `ubuntu/images/hvm-ssd*/ubuntu-*-${distroVersion || "*"}-${arch === "aarch64" ? "arm64" : "amd64"}-server-*`; + } else if (distro === "amazonlinux") { + owner = "amazon"; + if (distroVersion === "1") { + // EOL + } else if (distroVersion === "2") { + name = `amzn2-ami-hvm-*-${arch === "aarch64" ? "arm64" : "x86_64"}-gp2`; + } else { + name = `al${distroVersion || "*"}-ami-*-${arch === "aarch64" ? "arm64" : "x86_64"}`; + } + } else if (distro === "alpine") { + owner = "538276064493"; + name = `alpine-${distroVersion || "*"}.*-${arch === "aarch64" ? "aarch64" : "x86_64"}-uefi-cloudinit-*`; + } else if (distro === "centos") { + owner = "aws-marketplace"; + name = `CentOS-Stream-ec2-${distroVersion || "*"}-*.${arch === "aarch64" ? "aarch64" : "x86_64"}-*`; + } + } else if (os === "windows") { + if (!distro || distro === "server") { + owner = "amazon"; + name = `Windows_Server-${distroVersion || "*"}-English-Full-Base-*`; + } + } + + if (!name) { + throw new Error(`Unsupported platform: ${inspect(options)}`); + } + + const baseImages = await aws.describeImages({ + "state": "available", + "owner-alias": owner, + "name": name, + }); + + if (!baseImages.length) { + throw new Error(`No base image found: ${inspect(options)}`); + } + + const [baseImage] = baseImages; + return aws.getAvailableImage(baseImage); + }, + + /** + * @param {MachineOptions} options + * @returns {Promise} + */ + async createMachine(options) { + const { os, arch, imageId, instanceType, tags } = options; + + /** @type {AwsImage} */ + let image; + if (imageId) { + image = await aws.getAvailableImage(imageId); + } else { + image = await aws.getBaseImage(options); + } + + const { ImageId, Name, RootDeviceName, BlockDeviceMappings } = image; + const blockDeviceMappings = BlockDeviceMappings.map(device => { + const { DeviceName } = device; + if (DeviceName === RootDeviceName) { + return { + ...device, + Ebs: { + VolumeSize: getDiskSize(options), + }, + }; + } + return device; + }); + + const username = getUsername(Name); + + let userData = getUserData({ ...options, username }); + if (os === "windows") { + userData = `${userData}-ExecutionPolicy Unrestricted -NoProfile -NonInteractivefalse`; + } + + let tagSpecification = []; + if (tags) { + tagSpecification = ["instance", "volume"].map(resourceType => { + return { + ResourceType: resourceType, + Tags: Object.entries(tags).map(([Key, Value]) => ({ Key, Value: String(Value) })), + }; + }); + } + + const [instance] = await aws.runInstances({ + ["image-id"]: ImageId, + ["instance-type"]: instanceType || (arch === "aarch64" ? "t4g.large" : "t3.large"), + ["user-data"]: userData, + ["block-device-mappings"]: JSON.stringify(blockDeviceMappings), + ["metadata-options"]: JSON.stringify({ + "HttpTokens": "optional", + "HttpEndpoint": "enabled", + "HttpProtocolIpv6": "enabled", + "InstanceMetadataTags": "enabled", + }), + ["tag-specifications"]: JSON.stringify(tagSpecification), + ["key-name"]: "ashcon-bun", + }); + + return aws.toMachine(instance, { ...options, username }); + }, + + /** + * @param {AwsInstance} instance + * @param {MachineOptions} [options] + * @returns {Machine} + */ + toMachine(instance, options = {}) { + let { InstanceId, ImageId, InstanceType, Placement, PublicIpAddress } = instance; + + const connect = async () => { + if (!PublicIpAddress) { + await aws.waitInstances("instance-running", InstanceId); + const [{ PublicIpAddress: IpAddress }] = await aws.describeInstances({ + ["instance-id"]: InstanceId, + }); + PublicIpAddress = IpAddress; + } + + const { username, sshKeys } = options; + const identityPaths = sshKeys + ?.filter(({ privatePath }) => existsSync(privatePath)) + ?.map(({ privatePath }) => privatePath); + + return { hostname: PublicIpAddress, username, identityPaths }; + }; + + const spawn = async (command, options) => { + const connectOptions = await connect(); + return spawnSsh({ ...connectOptions, command }, options); + }; + + const spawnSafe = async (command, options) => { + const connectOptions = await connect(); + return spawnSshSafe({ ...connectOptions, command }, options); + }; + + const attach = async () => { + const connectOptions = await connect(); + await spawnSshSafe({ ...connectOptions }); + }; + + const upload = async (source, destination) => { + const connectOptions = await connect(); + await spawnScp({ ...connectOptions, source, destination }); + }; + + const snapshot = async name => { + await aws.stopInstances(InstanceId); + await aws.waitInstances("instance-stopped", InstanceId); + const imageId = await aws.createImage({ + ["instance-id"]: InstanceId, + ["name"]: name || `${InstanceId}-snapshot-${Date.now()}`, + }); + await aws.waitImage("image-available", imageId); + return imageId; + }; + + const terminate = async () => { + await aws.terminateInstances(InstanceId); + }; + + return { + cloud: "aws", + id: InstanceId, + imageId: ImageId, + instanceType: InstanceType, + region: Placement?.AvailabilityZone, + get publicIp() { + return PublicIpAddress; + }, + spawn, + spawnSafe, + upload, + attach, + snapshot, + close: terminate, + [Symbol.asyncDispose]: terminate, + }; + }, +}; + +const google = { + async createMachine(platform) { + const image = await google.getImage(platform); + const { id: imageId, username } = image; + + const authorizedKeys = await getAuthorizedKeys(); + const sshKeys = authorizedKeys?.map(key => `${username}:${key}`).join("\n") ?? ""; + + const { os, ["instance-type"]: type } = platform; + const instanceType = type || "e2-standard-4"; + + let metadata = `ssh-keys=${sshKeys}`; + if (os === "windows") { + metadata += `,sysprep-specialize-script-cmd=googet -noconfirm=true install google-compute-engine-ssh,enable-windows-ssh=TRUE`; + } + + const [{ id, networkInterfaces }] = await google.createInstances({ + ["zone"]: "us-central1-a", + ["image"]: imageId, + ["machine-type"]: instanceType, + ["boot-disk-auto-delete"]: true, + // ["boot-disk-size"]: "10GB", + // ["boot-disk-type"]: "pd-standard", + ["metadata"]: metadata, + }); + + const publicIp = () => { + for (const { accessConfigs } of networkInterfaces) { + for (const { natIP } of accessConfigs) { + return natIP; + } + } + throw new Error(`Failed to find public IP for instance: ${id}`); + }; + + const spawn = command => { + const hostname = publicIp(); + return spawnSsh({ hostname, username, command }); + }; + + const spawnSafe = command => { + const hostname = publicIp(); + return spawnSshSafe({ hostname, username, command }); + }; + + const attach = async () => { + const hostname = publicIp(); + await spawnSshSafe({ hostname, username }); + }; + + const terminate = async () => { + await google.deleteInstance(id); + }; + + return { + spawn, + spawnSafe, + attach, + close: terminate, + [Symbol.asyncDispose]: terminate, + }; + }, + + async getImage(platform) { + const { os, arch, distro, release } = platform; + const architecture = arch === "aarch64" ? "ARM64" : "X86_64"; + + let name; + let username; + if (os === "linux") { + if (distro === "debian") { + name = `debian-${release}-*`; + username = "admin"; + } else if (distro === "ubuntu") { + name = `ubuntu-${release.replace(/\./g, "")}-*`; + username = "ubuntu"; + } + } else if (os === "windows" && arch === "x64") { + if (distro === "server") { + name = `windows-server-${release}-dc-core-*`; + username = "administrator"; + } + } + + if (name && username) { + const images = await google.listImages({ name, architecture }); + if (images.length) { + const [image] = images; + const { name, selfLink } = image; + return { + id: selfLink, + name, + username, + }; + } + } + + throw new Error(`Unsupported platform: ${inspect(platform)}`); + }, + + async listImages(options = {}) { + const filter = Object.entries(options) + .map(([key, value]) => [value.includes("*") ? `${key}~${value}` : `${key}=${value}`]) + .join(" AND "); + const filters = filter ? ["--filter", filter] : []; + const { stdout } = await spawnSafe(["gcloud", "compute", "images", "list", ...filters, "--format", "json"]); + const images = JSON.parse(stdout); + return images.sort((a, b) => (a.creationTimestamp < b.creationTimestamp ? 1 : -1)); + }, + + async listInstances(options = {}) { + const filter = Object.entries(options) + .map(([key, value]) => [value.includes("*") ? `${key}~${value}` : `${key}=${value}`]) + .join(" AND "); + const filters = filter ? ["--filter", filter] : []; + const { stdout } = await spawnSafe(["gcloud", "compute", "instances", "list", ...filters, "--format", "json"]); + const instances = JSON.parse(stdout); + return instances.sort((a, b) => (a.creationTimestamp < b.creationTimestamp ? 1 : -1)); + }, + + async createInstances(options = {}) { + const flags = Object.entries(options).flatMap(([key, value]) => + typeof value === "boolean" ? `--${key}` : `--${key}=${value}`, + ); + const randomId = "i-" + Math.random().toString(36).substring(2, 15); + const { stdout } = await spawnSafe([ + "gcloud", + "compute", + "instances", + "create", + randomId, + ...flags, + "--format", + "json", + ]); + const instances = JSON.parse(stdout); + return instances.sort((a, b) => (a.creationTimestamp < b.creationTimestamp ? 1 : -1)); + }, + + async deleteInstance(instanceId) { + await spawnSafe(["gcloud", "compute", "instances", "delete", instanceId, "--zone", "us-central1-a", "--quiet"]); + }, +}; + +/** + * @typedef CloudInit + * @property {string} [distro] + * @property {SshKey[]} [sshKeys] + * @property {string} [username] + * @property {string} [password] + */ + +function getUserData(cloudInit) { + const { os } = cloudInit; + if (os === "windows") { + return getWindowsStartupScript(cloudInit); + } + return getCloudInit(cloudInit); +} + +/** + * @param {CloudInit} cloudInit + * @returns {string} + */ +function getCloudInit(cloudInit) { + const username = cloudInit["username"] || "root"; + const password = cloudInit["password"] || crypto.randomUUID(); + const authorizedKeys = JSON.stringify(cloudInit["sshKeys"]?.map(({ publicKey }) => publicKey) || []); + + let sftpPath = "/usr/lib/openssh/sftp-server"; + switch (cloudInit["distro"]) { + case "alpine": + sftpPath = "/usr/lib/ssh/sftp-server"; + break; + case "amazonlinux": + case "rhel": + case "centos": + sftpPath = "/usr/libexec/openssh/sftp-server"; + break; + } + + // https://cloudinit.readthedocs.io/en/stable/ + return `#cloud-config + + package_update: true + packages: + - curl + - ca-certificates + - openssh-server + + write_files: + - path: /etc/ssh/sshd_config + content: | + PermitRootLogin yes + PasswordAuthentication yes + Subsystem sftp ${sftpPath} + + chpasswd: + expire: false + list: | + root:${password} + ${username}:${password} + + disable_root: false + + ssh_pwauth: true + ssh_authorized_keys: ${authorizedKeys} + `; +} + +/** + * @param {CloudInit} cloudInit + * @returns {string} + */ +function getWindowsStartupScript(cloudInit) { + const { sshKeys } = cloudInit; + const authorizedKeys = sshKeys.filter(({ publicKey }) => publicKey).map(({ publicKey }) => publicKey); + + return ` + $ErrorActionPreference = "Stop" + Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass -Force + + function Install-Ssh { + $sshService = Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH.Server*' + if ($sshService.State -ne "Installed") { + Write-Output "Installing OpenSSH server..." + Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 + } + + $pwshPath = Get-Command pwsh -ErrorAction SilentlyContinue | Select-Object -ExpandProperty Path + if (-not $pwshPath) { + $pwshPath = Get-Command powershell -ErrorAction SilentlyContinue | Select-Object -ExpandProperty Path + } + + if (-not (Get-Service -Name sshd -ErrorAction SilentlyContinue)) { + Write-Output "Enabling OpenSSH server..." + Set-Service -Name sshd -StartupType Automatic + Start-Service sshd + } + + if ($pwshPath) { + Write-Output "Setting default shell to $pwshPath..." + New-ItemProperty -Path "HKLM:\\SOFTWARE\\OpenSSH" -Name DefaultShell -Value $pwshPath -PropertyType String -Force + } + + $firewallRule = Get-NetFirewallRule -Name "OpenSSH-Server-In-TCP" -ErrorAction SilentlyContinue + if (-not $firewallRule) { + Write-Output "Configuring firewall..." + New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 + } + + $sshPath = "C:\\ProgramData\\ssh" + if (-not (Test-Path $sshPath)) { + Write-Output "Creating SSH directory..." + New-Item -Path $sshPath -ItemType Directory + } + + $authorizedKeysPath = Join-Path $sshPath "administrators_authorized_keys" + $authorizedKeys = @(${authorizedKeys.map(key => `"${escapePowershell(key)}"`).join("\n")}) + if (-not (Test-Path $authorizedKeysPath) -or (Get-Content $authorizedKeysPath) -ne $authorizedKeys) { + Write-Output "Adding SSH keys..." + Set-Content -Path $authorizedKeysPath -Value $authorizedKeys + } + + $sshdConfigPath = Join-Path $sshPath "sshd_config" + $sshdConfig = @" + PasswordAuthentication no + PubkeyAuthentication yes + AuthorizedKeysFile $authorizedKeysPath + Subsystem sftp sftp-server.exe +"@ + if (-not (Test-Path $sshdConfigPath) -or (Get-Content $sshdConfigPath) -ne $sshdConfig) { + Write-Output "Writing SSH configuration..." + Set-Content -Path $sshdConfigPath -Value $sshdConfig + } + + Write-Output "Restarting SSH server..." + Restart-Service sshd + } + + Install-Ssh + `; +} + +/** + * @param {string} distro + * @returns {string} + */ +function getUsername(distro) { + if (/windows/i.test(distro)) { + return "administrator"; + } + + if (/alpine|centos/i.test(distro)) { + return "root"; + } + + if (/debian/i.test(distro)) { + return "admin"; + } + + if (/ubuntu/i.test(distro)) { + return "ubuntu"; + } + + if (/amazon|amzn|al\d+|rhel/i.test(distro)) { + return "ec2-user"; + } + + throw new Error(`Unsupported distro: ${distro}`); +} + +/** + * @param {MachineOptions} options + * @returns {number} + */ +function getDiskSize(options) { + const { os, diskSizeGb } = options; + + if (diskSizeGb) { + return diskSizeGb; + } + + return os === "windows" ? 50 : 30; +} + +/** + * @typedef SshKey + * @property {string} privatePath + * @property {string} publicPath + * @property {string} publicKey + */ + +/** + * @returns {SshKey} + */ +function createSshKey() { + const sshPath = join(homedir(), ".ssh"); + if (!existsSync(sshPath)) { + mkdirSync(sshPath, { recursive: true }); + } + + const name = `id_rsa_${crypto.randomUUID()}`; + const privatePath = join(sshPath, name); + const publicPath = join(sshPath, `${name}.pub`); + spawnSyncSafe(["ssh-keygen", "-t", "rsa", "-b", "4096", "-f", privatePath, "-N", ""], { stdio: "inherit" }); + + if (!existsSync(privatePath) || !existsSync(publicPath)) { + throw new Error(`Failed to generate SSH key: ${privatePath} / ${publicPath}`); + } + + const sshAgent = which("ssh-agent"); + const sshAdd = which("ssh-add"); + if (sshAgent && sshAdd) { + spawnSyncSafe(["sh", "-c", `eval $(${sshAgent} -s) && ${sshAdd} ${privatePath}`], { stdio: "inherit" }); + } + + return { + privatePath, + publicPath, + get publicKey() { + return readFile(publicPath, { cache: true }); + }, + }; +} + +/** + * @returns {SshKey[]} + */ +function getSshKeys() { + const homePath = homedir(); + const sshPath = join(homePath, ".ssh"); + + /** @type {SshKey[]} */ + const sshKeys = []; + if (existsSync(sshPath)) { + const sshFiles = readdirSync(sshPath, { withFileTypes: true }); + const publicPaths = sshFiles + .filter(entry => entry.isFile() && entry.name.endsWith(".pub")) + .map(({ name }) => join(sshPath, name)); + + sshKeys.push( + ...publicPaths.map(publicPath => ({ + publicPath, + privatePath: publicPath.replace(/\.pub$/, ""), + get publicKey() { + return readFile(publicPath, { cache: true }).trim(); + }, + })), + ); + } + + if (!sshKeys.length) { + sshKeys.push(createSshKey()); + } + + return sshKeys; +} + +/** + * @typedef SshOptions + * @property {string} hostname + * @property {number} [port] + * @property {string} [username] + * @property {string[]} [command] + * @property {string[]} [identityPaths] + * @property {number} [retries] + */ + +/** + * @param {SshOptions} options + * @param {object} [spawnOptions] + * @returns {Promise} + */ +async function spawnSsh(options, spawnOptions = {}) { + const { hostname, port, username, identityPaths, command } = options; + await waitForPort({ hostname, port: port || 22 }); + + const ssh = ["ssh", hostname, "-o", "StrictHostKeyChecking=no", "-o", "BatchMode=yes"]; + if (port) { + ssh.push("-p", port); + } + if (username) { + ssh.push("-l", username); + } + if (identityPaths) { + ssh.push(...identityPaths.flatMap(path => ["-i", path])); + } + const stdio = command ? "pipe" : "inherit"; + if (command) { + ssh.push(...command); + } + + return spawn(ssh, { stdio, ...spawnOptions }); +} + +/** + * @param {SshOptions} options + * @param {object} [spawnOptions] + * @returns {Promise} + */ +async function spawnSshSafe(options, spawnOptions = {}) { + const { hostname, port, username, identityPaths, command } = options; + await waitForPort({ hostname, port: port || 22 }); + + const ssh = ["ssh", hostname, "-o", "StrictHostKeyChecking=no", "-o", "BatchMode=yes"]; + if (port) { + ssh.push("-p", port); + } + if (username) { + ssh.push("-l", username); + } + if (identityPaths) { + ssh.push(...identityPaths.flatMap(path => ["-i", path])); + } + const stdio = command ? "pipe" : "inherit"; + if (command) { + ssh.push(...command); + } + + return spawnSafe(ssh, { stdio, ...spawnOptions }); +} + +/** + * @typedef ScpOptions + * @property {string} hostname + * @property {string} source + * @property {string} destination + * @property {string[]} [identityPaths] + * @property {string} [port] + * @property {string} [username] + * @property {number} [retries] + */ + +/** + * @param {ScpOptions} options + * @returns {Promise} + */ +async function spawnScp(options) { + const { hostname, port, username, identityPaths, source, destination, retries = 10 } = options; + await waitForPort({ hostname, port: port || 22 }); + + const command = ["scp", "-o", "StrictHostKeyChecking=no", "-o", "BatchMode=yes"]; + if (port) { + command.push("-P", port); + } + if (identityPaths) { + command.push(...identityPaths.flatMap(path => ["-i", path])); + } + command.push(resolve(source)); + if (username) { + command.push(`${username}@${hostname}:${destination}`); + } else { + command.push(`${hostname}:${destination}`); + } + + let cause; + for (let i = 0; i < retries; i++) { + const result = await spawn(command, { stdio: "inherit" }); + const { exitCode, stderr } = result; + if (exitCode === 0) { + return; + } + + cause = stderr.trim() || undefined; + if (/(bad configuration option)|(no such file or directory)/i.test(stderr)) { + break; + } + await new Promise(resolve => setTimeout(resolve, Math.pow(2, i) * 1000)); + } + + throw new Error(`SCP failed: ${source} -> ${username}@${hostname}:${destination}`, { cause }); +} + +/** + * @typedef Cloud + * @property {string} name + * @property {(options: MachineOptions) => Promise} createMachine + */ + +/** + * @param {string} name + * @returns {Cloud} + */ +function getCloud(name) { + switch (name) { + case "aws": + return aws; + } + throw new Error(`Unsupported cloud: ${name}`); +} + +/** + * @typedef Machine + * @property {string} cloud + * @property {string} [name] + * @property {string} id + * @property {string} imageId + * @property {string} instanceType + * @property {string} region + * @property {string} [publicIp] + * @property {(command: string[]) => Promise} spawn + * @property {(command: string[]) => Promise} spawnSafe + * @property {(source: string, destination: string) => Promise} upload + * @property {() => Promise} attach + * @property {() => Promise} snapshot + * @property {() => Promise} close + */ + +/** + * @typedef {"linux" | "darwin" | "windows"} Os + * @typedef {"aarch64" | "x64"} Arch + */ + +/** + * @typedef MachineOptions + * @property {Cloud} cloud + * @property {Os} os + * @property {Arch} arch + * @property {string} distro + * @property {string} [distroVersion] + * @property {string} [imageId] + * @property {string} [imageName] + * @property {number} [cpuCount] + * @property {number} [memoryGb] + * @property {number} [diskSizeGb] + * @property {boolean} [persistent] + * @property {boolean} [detached] + * @property {Record} [tags] + * @property {boolean} [bootstrap] + * @property {boolean} [ci] + * @property {SshKey[]} [sshKeys] + */ + +async function main() { + const { positionals } = parseArgs({ + allowPositionals: true, + strict: false, + }); + + const [command] = positionals; + if (!/^(ssh|create-image|publish-image)$/.test(command)) { + const scriptPath = relative(process.cwd(), fileURLToPath(import.meta.url)); + throw new Error(`Usage: ./${scriptPath} [ssh|create-image|publish-image] [options]`); + } + + const { values: args } = parseArgs({ + allowPositionals: true, + options: { + "cloud": { type: "string", default: "aws" }, + "os": { type: "string", default: "linux" }, + "arch": { type: "string", default: "x64" }, + "distro": { type: "string" }, + "distro-version": { type: "string" }, + "instance-type": { type: "string" }, + "image-id": { type: "string" }, + "image-name": { type: "string" }, + "cpu-count": { type: "string" }, + "memory-gb": { type: "string" }, + "disk-size-gb": { type: "string" }, + "persistent": { type: "boolean" }, + "detached": { type: "boolean" }, + "tag": { type: "string", multiple: true }, + "ci": { type: "boolean" }, + "no-bootstrap": { type: "boolean" }, + "buildkite-token": { type: "string" }, + "tailscale-authkey": { type: "string" }, + }, + }); + + /** @type {MachineOptions} */ + const options = { + cloud: getCloud(args["cloud"]), + os: parseOs(args["os"]), + arch: parseArch(args["arch"]), + distro: args["distro"], + distroVersion: args["distro-version"], + instanceType: args["instance-type"], + imageId: args["image-id"], + imageName: args["image-name"], + tags: { + "robobun": "true", + "robobun2": "true", + "buildkite:token": args["buildkite-token"], + "tailscale:authkey": args["tailscale-authkey"], + ...Object.fromEntries(args["tag"]?.map(tag => tag.split("=")) ?? []), + }, + cpuCount: parseInt(args["cpu-count"]) || undefined, + memoryGb: parseInt(args["memory-gb"]) || undefined, + diskSizeGb: parseInt(args["disk-size-gb"]) || undefined, + persistent: !!args["persistent"], + detached: !!args["detached"], + bootstrap: args["no-bootstrap"] !== true, + ci: !!args["ci"], + sshKeys: getSshKeys(), + }; + + const { cloud, detached, bootstrap, ci, os, arch, distro, distroVersion } = options; + const name = `${os}-${arch}-${distro}-${distroVersion}`; + + let bootstrapPath, agentPath; + if (bootstrap) { + bootstrapPath = resolve(import.meta.dirname, os === "windows" ? "bootstrap.ps1" : "bootstrap.sh"); + if (!existsSync(bootstrapPath)) { + throw new Error(`Script not found: ${bootstrapPath}`); + } + if (ci) { + const npx = which("bunx") || which("npx"); + if (!npx) { + throw new Error("Executable not found: bunx or npx"); + } + const entryPath = resolve(import.meta.dirname, "agent.mjs"); + const tmpPath = mkdtempSync(join(tmpdir(), "agent-")); + agentPath = join(tmpPath, "agent.mjs"); + await spawnSafe($`${npx} esbuild ${entryPath} --bundle --platform=node --format=esm --outfile=${agentPath}`); + } + } + + /** @type {Machine} */ + const machine = await startGroup("Creating machine...", async () => { + console.log("Creating machine:", JSON.parse(JSON.stringify(options))); + const result = await cloud.createMachine(options); + console.log("Created machine:", result); + return result; + }); + + if (!detached) { + let closing; + for (const event of ["beforeExit", "SIGINT", "SIGTERM"]) { + process.on(event, () => { + if (!closing) { + closing = true; + machine.close().finally(() => { + if (event !== "beforeExit") { + process.exit(1); + } + }); + } + }); + } + } + + try { + await startGroup("Connecting...", async () => { + const command = os === "windows" ? ["cmd", "/c", "ver"] : ["uname", "-a"]; + await machine.spawnSafe(command, { stdio: "inherit" }); + }); + + if (bootstrapPath) { + if (os === "windows") { + const remotePath = "C:\\Windows\\Temp\\bootstrap.ps1"; + const args = ci ? ["-CI"] : []; + await startGroup("Running bootstrap...", async () => { + await machine.upload(bootstrapPath, remotePath); + await machine.spawnSafe(["powershell", remotePath, ...args], { stdio: "inherit" }); + }); + } else { + const remotePath = "/tmp/bootstrap.sh"; + const args = ci ? ["--ci"] : []; + await startGroup("Running bootstrap...", async () => { + await machine.upload(bootstrapPath, remotePath); + await machine.spawnSafe(["sh", remotePath, ...args], { stdio: "inherit" }); + }); + } + } + + if (agentPath) { + if (os === "windows") { + // TODO + // const remotePath = "C:\\Windows\\Temp\\agent.mjs"; + // await startGroup("Installing agent...", async () => { + // await machine.upload(agentPath, remotePath); + // await machine.spawnSafe(["node", remotePath, "install"], { stdio: "inherit" }); + // }); + } else { + const tmpPath = "/tmp/agent.mjs"; + const remotePath = "/var/lib/buildkite-agent/agent.mjs"; + await startGroup("Installing agent...", async () => { + await machine.upload(agentPath, tmpPath); + const command = []; + { + const { exitCode } = await machine.spawn(["sudo", "echo", "1"], { stdio: "ignore" }); + if (exitCode === 0) { + command.unshift("sudo"); + } + } + await machine.spawnSafe([...command, "cp", tmpPath, remotePath]); + { + const { stdout } = await machine.spawn(["node", "-v"]); + const version = parseInt(stdout.trim().replace(/^v/, "")); + if (isNaN(version) || version < 20) { + command.push("bun"); + } else { + command.push("node"); + } + } + await machine.spawnSafe([...command, remotePath, "install"], { stdio: "inherit" }); + }); + } + } + + if (command === "create-image" || command === "publish-image") { + let suffix; + if (command === "publish-image") { + suffix = `v${getBootstrapVersion()}`; + } else if (isCI) { + suffix = `build-${getBuildNumber()}`; + } else { + suffix = `draft-${Date.now()}`; + } + const label = `${name}-${suffix}`; + await startGroup("Creating image...", async () => { + console.log("Creating image:", label); + const result = await machine.snapshot(label); + console.log("Created image:", result); + }); + } + + if (command === "ssh") { + await machine.attach(); + } + } catch (error) { + if (isCI) { + throw error; + } + console.error(error); + try { + await machine.attach(); + } catch (error) { + console.error(error); + } + } finally { + if (!detached) { + await machine.close(); + } + } +} + +await main(); diff --git a/scripts/runner.node.mjs b/scripts/runner.node.mjs index 898b596a50f313..792c825ac13460 100755 --- a/scripts/runner.node.mjs +++ b/scripts/runner.node.mjs @@ -39,7 +39,7 @@ import { } from "./utils.mjs"; import { userInfo } from "node:os"; -const cwd = dirname(import.meta.dirname); +const cwd = import.meta.dirname ? dirname(import.meta.dirname) : process.cwd(); const testsPath = join(cwd, "test"); const spawnTimeout = 5_000; @@ -232,7 +232,7 @@ async function runTests() { if (testRunner === "bun") { await runTest(title, () => spawnBunTest(execPath, testPath, { cwd: vendorPath })); } else { - const testRunnerPath = join(import.meta.dirname, "..", "test", "runners", `${testRunner}.ts`); + const testRunnerPath = join(cwd, "test", "runners", `${testRunner}.ts`); if (!existsSync(testRunnerPath)) { throw new Error(`Unsupported test runner: ${testRunner}`); } @@ -632,7 +632,7 @@ function parseTestStdout(stdout, testPath) { const removeStart = lines.length - skipCount; const removeCount = skipCount - 2; const omitLine = `${getAnsi("gray")}... omitted ${removeCount} tests ...${getAnsi("reset")}`; - lines = lines.toSpliced(removeStart, removeCount, omitLine); + lines.splice(removeStart, removeCount, omitLine); } skipCount = 0; } @@ -1133,6 +1133,13 @@ function addPath(...paths) { return paths.join(":"); } +/** + * @returns {string | undefined} + */ +function getTestLabel() { + return getBuildLabel()?.replace(" - test-bun", ""); +} + /** * @param {TestResult | TestResult[]} result * @param {boolean} concise @@ -1140,7 +1147,7 @@ function addPath(...paths) { */ function formatTestToMarkdown(result, concise) { const results = Array.isArray(result) ? result : [result]; - const buildLabel = getBuildLabel(); + const buildLabel = getTestLabel(); const buildUrl = getBuildUrl(); const platform = buildUrl ? `${buildLabel}` : buildLabel; @@ -1273,7 +1280,7 @@ function reportAnnotationToBuildKite({ label, content, style = "error", priority const cause = error ?? signal ?? `code ${status}`; throw new Error(`Failed to create annotation: ${label}`, { cause }); } - const buildLabel = getBuildLabel(); + const buildLabel = getTestLabel(); const buildUrl = getBuildUrl(); const platform = buildUrl ? `${buildLabel}` : buildLabel; let errorMessage = `
${label} - annotation error on ${platform}`; diff --git a/scripts/utils.mjs b/scripts/utils.mjs old mode 100644 new mode 100755 index 185bebf7d7aa48..17a27da7a74fa9 --- a/scripts/utils.mjs +++ b/scripts/utils.mjs @@ -3,9 +3,18 @@ import { spawn as nodeSpawn, spawnSync as nodeSpawnSync } from "node:child_process"; import { createHash } from "node:crypto"; -import { appendFileSync, existsSync, mkdtempSync, readdirSync, readFileSync, writeFileSync } from "node:fs"; -import { writeFile } from "node:fs/promises"; -import { hostname, tmpdir as nodeTmpdir, userInfo } from "node:os"; +import { + appendFileSync, + chmodSync, + existsSync, + mkdirSync, + mkdtempSync, + readdirSync, + readFileSync, + writeFileSync, +} from "node:fs"; +import { connect } from "node:net"; +import { hostname, tmpdir as nodeTmpdir, userInfo, release } from "node:os"; import { dirname, join, relative, resolve } from "node:path"; import { normalize as normalizeWindows } from "node:path/win32"; @@ -53,8 +62,9 @@ export function getSecret(name, options = { required: true, redact: true }) { command.push("--skip-redaction"); } - const { error, stdout: secret } = spawnSync(command); - if (error || !secret.trim()) { + const { error, stdout } = spawnSync(command); + const secret = stdout.trim(); + if (error || !secret) { const orgId = getEnv("BUILDKITE_ORGANIZATION_SLUG", false); const clusterId = getEnv("BUILDKITE_CLUSTER_ID", false); @@ -106,8 +116,8 @@ export function setEnv(name, value) { * @property {string} [cwd] * @property {number} [timeout] * @property {Record} [env] - * @property {string} [stdout] - * @property {string} [stderr] + * @property {string} [stdin] + * @property {boolean} [privileged] */ /** @@ -119,20 +129,93 @@ export function setEnv(name, value) { * @property {Error} [error] */ +/** + * @param {TemplateStringsArray} strings + * @param {...any} values + * @returns {string[]} + */ +export function $(strings, ...values) { + const result = []; + for (let i = 0; i < strings.length; i++) { + result.push(...strings[i].trim().split(/\s+/).filter(Boolean)); + if (i < values.length) { + const value = values[i]; + if (Array.isArray(value)) { + result.push(...value); + } else if (typeof value === "string") { + if (result.at(-1)?.endsWith("=")) { + result[result.length - 1] += value; + } else { + result.push(value); + } + } + } + } + return result; +} + +/** @type {string[] | undefined} */ +let priviledgedCommand; + +/** + * @param {string[]} command + * @param {SpawnOptions} options + */ +function parseCommand(command, options) { + if (options?.privileged) { + return [...getPrivilegedCommand(), ...command]; + } + return command; +} + +/** + * @returns {string[]} + */ +function getPrivilegedCommand() { + if (typeof priviledgedCommand !== "undefined") { + return priviledgedCommand; + } + + if (isWindows) { + return (priviledgedCommand = []); + } + + const sudo = ["sudo", "-n"]; + const { error: sudoError } = spawnSync([...sudo, "true"]); + if (!sudoError) { + return (priviledgedCommand = sudo); + } + + const su = ["su", "-s", "sh", "root", "-c"]; + const { error: suError } = spawnSync([...su, "true"]); + if (!suError) { + return (priviledgedCommand = su); + } + + const doas = ["doas", "-u", "root"]; + const { error: doasError } = spawnSync([...doas, "true"]); + if (!doasError) { + return (priviledgedCommand = doas); + } + + return (priviledgedCommand = []); +} + /** * @param {string[]} command * @param {SpawnOptions} options * @returns {Promise} */ export async function spawn(command, options = {}) { - debugLog("$", ...command); + const [cmd, ...args] = parseCommand(command, options); + debugLog("$", cmd, ...args); - const [cmd, ...args] = command; + const stdin = options["stdin"]; const spawnOptions = { cwd: options["cwd"] ?? process.cwd(), timeout: options["timeout"] ?? undefined, env: options["env"] ?? undefined, - stdio: ["ignore", "pipe", "pipe"], + stdio: [stdin ? "pipe" : "ignore", "pipe", "pipe"], ...options, }; @@ -145,6 +228,16 @@ export async function spawn(command, options = {}) { const result = new Promise((resolve, reject) => { const subprocess = nodeSpawn(cmd, args, spawnOptions); + if (typeof stdin !== "undefined") { + subprocess.stdin?.on("error", error => { + if (error.code !== "EPIPE") { + reject(error); + } + }); + subprocess.stdin?.write(stdin); + subprocess.stdin?.end(); + } + subprocess.stdout?.on("data", chunk => { stdout += chunk; }); @@ -215,9 +308,9 @@ export async function spawnSafe(command, options) { * @returns {SpawnResult} */ export function spawnSync(command, options = {}) { - debugLog("$", ...command); + const [cmd, ...args] = parseCommand(command, options); + debugLog("$", cmd, ...args); - const [cmd, ...args] = command; const spawnOptions = { cwd: options["cwd"] ?? process.cwd(), timeout: options["timeout"] ?? undefined, @@ -245,8 +338,8 @@ export function spawnSync(command, options = {}) { } else { exitCode = status ?? 1; signalCode = signal || undefined; - stdout = stdoutBuffer.toString(); - stderr = stderrBuffer.toString(); + stdout = stdoutBuffer?.toString(); + stderr = stderrBuffer?.toString(); } if (exitCode !== 0 && isWindows) { @@ -258,7 +351,7 @@ export function spawnSync(command, options = {}) { if (error || signalCode || exitCode !== 0) { const description = command.map(arg => (arg.includes(" ") ? `"${arg.replace(/"/g, '\\"')}"` : arg)).join(" "); - const cause = error || stderr.trim() || stdout.trim() || undefined; + const cause = error || stderr?.trim() || stdout?.trim() || undefined; if (signalCode) { error = new Error(`Command killed with ${signalCode}: ${description}`, { cause }); @@ -376,6 +469,20 @@ export function getRepository(cwd) { } } +/** + * @param {string} [cwd] + * @returns {string | undefined} + */ +export function getRepositoryOwner(cwd) { + const repository = getRepository(cwd); + if (repository) { + const [owner] = repository.split("/"); + if (owner) { + return owner; + } + } +} + /** * @param {string} [cwd] * @returns {string | undefined} @@ -490,7 +597,7 @@ export function isMainBranch(cwd) { */ export function isPullRequest() { if (isBuildkite) { - return getEnv("BUILDKITE_PULL_REQUEST", false) === "true"; + return !isNaN(parseInt(getEnv("BUILDKITE_PULL_REQUEST", false))); } if (isGithubAction) { @@ -656,7 +763,7 @@ export async function curl(url, options = {}) { try { if (filename && ok) { const buffer = await response.arrayBuffer(); - await writeFile(filename, new Uint8Array(buffer)); + writeFile(filename, new Uint8Array(buffer)); } else if (arrayBuffer && ok) { body = await response.arrayBuffer(); } else if (json && ok) { @@ -721,7 +828,7 @@ export function readFile(filename, options = {}) { } const relativePath = relative(process.cwd(), absolutePath); - debugLog("cat", relativePath); + debugLog("$", "cat", relativePath); let content; try { @@ -738,6 +845,53 @@ export function readFile(filename, options = {}) { return content; } +/** + * @param {string} filename + * @param {string | Buffer} content + * @param {object} [options] + * @param {number} [options.mode] + */ +export function writeFile(filename, content, options = {}) { + const parent = dirname(filename); + if (!existsSync(parent)) { + mkdirSync(parent, { recursive: true }); + } + + writeFileSync(filename, content); + + if (options["mode"]) { + chmodSync(filename, options["mode"]); + } +} + +/** + * @param {string | string[]} command + * @param {object} [options] + * @param {boolean} [options.required] + * @returns {string | undefined} + */ +export function which(command, options = {}) { + const commands = Array.isArray(command) ? command : [command]; + const executables = isWindows ? commands.flatMap(name => [name, `${name}.exe`, `${name}.cmd`]) : commands; + + const path = getEnv("PATH", false) || ""; + const binPaths = path.split(isWindows ? ";" : ":"); + + for (const binPath of binPaths) { + for (const executable of executables) { + const executablePath = join(binPath, executable); + if (existsSync(executablePath)) { + return executablePath; + } + } + } + + if (options["required"]) { + const description = commands.join(" or "); + throw new Error(`Command not found: ${description}`); + } +} + /** * @param {string} [cwd] * @param {string} [base] @@ -746,10 +900,10 @@ export function readFile(filename, options = {}) { */ export async function getChangedFiles(cwd, base, head) { const repository = getRepository(cwd); - base ||= getCommit(cwd); - head ||= `${base}^1`; + head ||= getCommit(cwd); + base ||= `${head}^1`; - const url = `https://api.github.com/repos/${repository}/compare/${head}...${base}`; + const url = `https://api.github.com/repos/${repository}/compare/${base}...${head}`; const { error, body } = await curl(url, { json: true }); if (error) { @@ -826,7 +980,7 @@ export function getBuildUrl() { */ export function getBuildLabel() { if (isBuildkite) { - const label = getEnv("BUILDKITE_GROUP_LABEL", false) || getEnv("BUILDKITE_LABEL", false); + const label = getEnv("BUILDKITE_LABEL", false) || getEnv("BUILDKITE_GROUP_LABEL", false); if (label) { return label; } @@ -840,6 +994,22 @@ export function getBuildLabel() { } } +/** + * @returns {number} + */ +export function getBootstrapVersion() { + if (isWindows) { + return 0; // TODO + } + const scriptPath = join(import.meta.dirname, "bootstrap.sh"); + const scriptContent = readFile(scriptPath, { cache: true }); + const match = /# Version: (\d+)/.exec(scriptContent); + if (match) { + return parseInt(match[1]); + } + return 0; +} + /** * @typedef {object} BuildArtifact * @property {string} [job] @@ -1013,6 +1183,17 @@ export async function getLastSuccessfulBuild() { } } +/** + * @param {string} filename + * @param {string} [cwd] + */ +export async function uploadArtifact(filename, cwd) { + if (isBuildkite) { + const relativePath = relative(cwd ?? process.cwd(), filename); + await spawnSafe(["buildkite-agent", "artifact", "upload", relativePath], { cwd, stdio: "inherit" }); + } +} + /** * @param {string} string * @returns {string} @@ -1021,6 +1202,17 @@ export function stripAnsi(string) { return string.replace(/\u001b\[\d+m/g, ""); } +/** + * @param {string} string + * @returns {string} + */ +export function escapeYaml(string) { + if (/[:"{}[\],&*#?|\-<>=!%@`]/.test(string)) { + return `"${string.replace(/"/g, '\\"')}"`; + } + return string; +} + /** * @param {string} string * @returns {string} @@ -1059,6 +1251,14 @@ export function escapeCodeBlock(string) { return string.replace(/`/g, "\\`"); } +/** + * @param {string} string + * @returns {string} + */ +export function escapePowershell(string) { + return string.replace(/'/g, "''").replace(/`/g, "``"); +} + /** * @returns {string} */ @@ -1090,14 +1290,6 @@ export function tmpdir() { return nodeTmpdir(); } -/** - * @param {string} string - * @returns {string} - */ -function escapePowershell(string) { - return string.replace(/'/g, "''").replace(/`/g, "``"); -} - /** * @param {string} filename * @param {string} [output] @@ -1159,24 +1351,79 @@ export function getArch() { return parseArch(process.arch); } +/** + * @returns {string} + */ +export function getKernel() { + const kernel = release(); + const match = /(\d+)\.(\d+)(?:\.(\d+))?/.exec(kernel); + + if (match) { + const [, major, minor, patch] = match; + if (patch) { + return `${major}.${minor}.${patch}`; + } + return `${major}.${minor}`; + } + + return kernel; +} + /** * @returns {"musl" | "gnu" | undefined} */ export function getAbi() { - if (isLinux) { - const arch = getArch() === "x64" ? "x86_64" : "aarch64"; - const muslLibPath = `/lib/ld-musl-${arch}.so.1`; - if (existsSync(muslLibPath)) { + if (!isLinux) { + return; + } + + if (existsSync("/etc/alpine-release")) { + return "musl"; + } + + const arch = getArch() === "x64" ? "x86_64" : "aarch64"; + const muslLibPath = `/lib/ld-musl-${arch}.so.1`; + if (existsSync(muslLibPath)) { + return "musl"; + } + + const gnuLibPath = `/lib/ld-linux-${arch}.so.2`; + if (existsSync(gnuLibPath)) { + return "gnu"; + } + + const { error, stdout } = spawnSync(["ldd", "--version"]); + if (!error) { + if (/musl/i.test(stdout)) { return "musl"; } - - const gnuLibPath = `/lib/ld-linux-${arch}.so.2`; - if (existsSync(gnuLibPath)) { + if (/gnu|glibc/i.test(stdout)) { return "gnu"; } } } +/** + * @returns {string | undefined} + */ +export function getAbiVersion() { + if (!isLinux) { + return; + } + + const { error, stdout } = spawnSync(["ldd", "--version"]); + if (!error) { + const match = /(\d+)\.(\d+)(?:\.(\d+))?/.exec(stdout); + if (match) { + const [, major, minor, patch] = match; + if (patch) { + return `${major}.${minor}.${patch}`; + } + return `${major}.${minor}`; + } + } +} + /** * @typedef {object} Target * @property {"darwin" | "linux" | "windows"} os @@ -1346,17 +1593,24 @@ export async function downloadTarget(target, release) { } /** - * @returns {string | undefined} + * @returns {string} */ -export function getTailscaleIp() { - let tailscale = "tailscale"; +export function getTailscale() { if (isMacOS) { const tailscaleApp = "/Applications/Tailscale.app/Contents/MacOS/tailscale"; if (existsSync(tailscaleApp)) { - tailscale = tailscaleApp; + return tailscaleApp; } } + return "tailscale"; +} + +/** + * @returns {string | undefined} + */ +export function getTailscaleIp() { + const tailscale = getTailscale(); const { error, stdout } = spawnSync([tailscale, "ip", "--1"]); if (!error) { return stdout.trim(); @@ -1405,7 +1659,31 @@ export function getUsername() { } /** - * @returns {string} + * @typedef {object} User + * @property {string} username + * @property {number} uid + * @property {number} gid + */ + +/** + * @param {string} username + * @returns {Promise} + */ +export async function getUser(username) { + if (isWindows) { + throw new Error("TODO: Windows"); + } + + const [uid, gid] = await Promise.all([ + spawnSafe(["id", "-u", username]).then(({ stdout }) => parseInt(stdout.trim())), + spawnSafe(["id", "-g", username]).then(({ stdout }) => parseInt(stdout.trim())), + ]); + + return { username, uid, gid }; +} + +/** + * @returns {string | undefined} */ export function getDistro() { if (isMacOS) { @@ -1413,10 +1691,15 @@ export function getDistro() { } if (isLinux) { + const alpinePath = "/etc/alpine-release"; + if (existsSync(alpinePath)) { + return "alpine"; + } + const releasePath = "/etc/os-release"; if (existsSync(releasePath)) { const releaseFile = readFile(releasePath, { cache: true }); - const match = releaseFile.match(/ID=\"(.*)\"/); + const match = releaseFile.match(/^ID=\"?(.*)\"?/m); if (match) { return match[1]; } @@ -1424,10 +1707,8 @@ export function getDistro() { const { error, stdout } = spawnSync(["lsb_release", "-is"]); if (!error) { - return stdout.trim(); + return stdout.trim().toLowerCase(); } - - return "Linux"; } if (isWindows) { @@ -1435,17 +1716,13 @@ export function getDistro() { if (!error) { return stdout.trim(); } - - return "Windows"; } - - return `${process.platform} ${process.arch}`; } /** * @returns {string | undefined} */ -export function getDistroRelease() { +export function getDistroVersion() { if (isMacOS) { const { error, stdout } = spawnSync(["sw_vers", "-productVersion"]); if (!error) { @@ -1454,10 +1731,20 @@ export function getDistroRelease() { } if (isLinux) { + const alpinePath = "/etc/alpine-release"; + if (existsSync(alpinePath)) { + const release = readFile(alpinePath, { cache: true }).trim(); + if (release.includes("_")) { + const [version] = release.split("_"); + return `${version}-edge`; + } + return release; + } + const releasePath = "/etc/os-release"; if (existsSync(releasePath)) { const releaseFile = readFile(releasePath, { cache: true }); - const match = releaseFile.match(/VERSION_ID=\"(.*)\"/); + const match = releaseFile.match(/^VERSION_ID=\"?(.*)\"?/m); if (match) { return match[1]; } @@ -1477,6 +1764,231 @@ export function getDistroRelease() { } } +/** + * @typedef {"aws" | "google"} Cloud + */ + +/** @type {Cloud | undefined} */ +let detectedCloud; + +/** + * @returns {Promise} + */ +export async function isAws() { + if (typeof detectedCloud === "string") { + return detectedCloud === "aws"; + } + + async function checkAws() { + if (isLinux) { + const kernel = release(); + if (kernel.endsWith("-aws")) { + return true; + } + + const { error: systemdError, stdout } = await spawn(["systemd-detect-virt"]); + if (!systemdError) { + if (stdout.includes("amazon")) { + return true; + } + } + + const dmiPath = "/sys/devices/virtual/dmi/id/board_asset_tag"; + if (existsSync(dmiPath)) { + const dmiFile = readFileSync(dmiPath, { encoding: "utf-8" }); + if (dmiFile.startsWith("i-")) { + return true; + } + } + } + + if (isWindows) { + const executionEnv = getEnv("AWS_EXECUTION_ENV", false); + if (executionEnv === "EC2") { + return true; + } + + const { error: powershellError, stdout } = await spawn([ + "powershell", + "-Command", + "Get-CimInstance -ClassName Win32_ComputerSystem | Select-Object Manufacturer", + ]); + if (!powershellError) { + return stdout.includes("Amazon"); + } + } + + const instanceId = await getCloudMetadata("instance-id", "google"); + if (instanceId) { + return true; + } + } + + if (await checkAws()) { + detectedCloud = "aws"; + return true; + } +} + +/** + * @returns {Promise} + */ +export async function isGoogleCloud() { + if (typeof detectedCloud === "string") { + return detectedCloud === "google"; + } + + async function detectGoogleCloud() { + if (isLinux) { + const vendorPaths = [ + "/sys/class/dmi/id/sys_vendor", + "/sys/class/dmi/id/bios_vendor", + "/sys/class/dmi/id/product_name", + ]; + + for (const vendorPath of vendorPaths) { + if (existsSync(vendorPath)) { + const vendorFile = readFileSync(vendorPath, { encoding: "utf-8" }); + if (vendorFile.includes("Google")) { + return true; + } + } + } + } + + const instanceId = await getCloudMetadata("id", "google"); + if (instanceId) { + return true; + } + } + + if (await detectGoogleCloud()) { + detectedCloud = "google"; + return true; + } +} + +/** + * @returns {Promise} + */ +export async function getCloud() { + if (typeof detectedCloud === "string") { + return detectedCloud; + } + + if (await isAws()) { + return "aws"; + } + + if (await isGoogleCloud()) { + return "google"; + } +} + +/** + * @param {string | Record} name + * @param {Cloud} [cloud] + * @returns {Promise} + */ +export async function getCloudMetadata(name, cloud) { + cloud ??= await getCloud(); + if (!cloud) { + return; + } + + if (typeof name === "object") { + name = name[cloud]; + } + + let url; + let headers; + if (cloud === "aws") { + url = new URL(name, "http://169.254.169.254/latest/meta-data/"); + } else if (cloud === "google") { + url = new URL(name, "http://metadata.google.internal/computeMetadata/v1/instance/"); + headers = { "Metadata-Flavor": "Google" }; + } else { + throw new Error(`Unsupported cloud: ${inspect(cloud)}`); + } + + const { error, body } = await curl(url, { headers, retries: 0 }); + if (error) { + return; + } + + return body.trim(); +} + +/** + * @param {string} tag + * @param {Cloud} [cloud] + * @returns {Promise} + */ +export function getCloudMetadataTag(tag, cloud) { + const metadata = { + "aws": `tags/instance/${tag}`, + }; + + return getCloudMetadata(metadata, cloud); +} + +/** + * @param {string} name + * @returns {Promise} + */ +export async function getBuildMetadata(name) { + if (isBuildkite) { + const { error, stdout } = await spawn(["buildkite-agent", "meta-data", "get", name]); + if (!error) { + const value = stdout.trim(); + if (value) { + return value; + } + } + } +} + +/** + * @typedef ConnectOptions + * @property {string} hostname + * @property {number} port + * @property {number} [retries] + */ + +/** + * @param {ConnectOptions} options + * @returns {Promise} + */ +export async function waitForPort(options) { + const { hostname, port, retries = 10 } = options; + + let cause; + for (let i = 0; i < retries; i++) { + if (cause) { + await new Promise(resolve => setTimeout(resolve, Math.pow(2, i) * 1000)); + } + + const connected = new Promise((resolve, reject) => { + const socket = connect({ host: hostname, port }); + socket.on("connect", () => { + socket.destroy(); + resolve(); + }); + socket.on("error", error => { + socket.destroy(); + reject(error); + }); + }); + + try { + return await connected; + } catch (error) { + cause = error; + } + } + + return cause; +} /** * @returns {Promise} */ @@ -1522,6 +2034,52 @@ export function getGithubUrl() { return new URL(getEnv("GITHUB_SERVER_URL", false) || "https://github.com"); } +/** + * @param {object} obj + * @param {number} indent + * @returns {string} + */ +export function toYaml(obj, indent = 0) { + const spaces = " ".repeat(indent); + let result = ""; + for (const [key, value] of Object.entries(obj)) { + if (value === undefined) { + continue; + } + if (value === null) { + result += `${spaces}${key}: null\n`; + continue; + } + if (Array.isArray(value)) { + result += `${spaces}${key}:\n`; + value.forEach(item => { + if (typeof item === "object" && item !== null) { + result += `${spaces}- \n${toYaml(item, indent + 2) + .split("\n") + .map(line => `${spaces} ${line}`) + .join("\n")}\n`; + } else { + result += `${spaces}- ${item}\n`; + } + }); + continue; + } + if (typeof value === "object") { + result += `${spaces}${key}:\n${toYaml(value, indent + 2)}`; + continue; + } + if ( + typeof value === "string" && + (value.includes(":") || value.includes("#") || value.includes("'") || value.includes('"') || value.includes("\n")) + ) { + result += `${spaces}${key}: "${value.replace(/"/g, '\\"')}"\n`; + continue; + } + result += `${spaces}${key}: ${value}\n`; + } + return result; +} + /** * @param {string} title * @param {function} [fn] @@ -1561,11 +2119,13 @@ export function printEnvironment() { startGroup("Machine", () => { console.log("Operating System:", getOs()); console.log("Architecture:", getArch()); + console.log("Kernel:", getKernel()); if (isLinux) { console.log("ABI:", getAbi()); + console.log("ABI Version:", getAbiVersion()); } console.log("Distro:", getDistro()); - console.log("Release:", getDistroRelease()); + console.log("Distro Version:", getDistroVersion()); console.log("Hostname:", getHostname()); if (isCI) { console.log("Tailscale IP:", getTailscaleIp()); diff --git a/src/ArenaAllocator.zig b/src/ArenaAllocator.zig deleted file mode 100644 index 4c62038cabc63a..00000000000000 --- a/src/ArenaAllocator.zig +++ /dev/null @@ -1,248 +0,0 @@ -const std = @import("std"); -const bun = @import("root").bun; -const assert = bun.assert; -const mem = std.mem; -const Allocator = std.mem.Allocator; - -/// This allocator takes an existing allocator, wraps it, and provides an interface -/// where you can allocate without freeing, and then free it all together. -pub const ArenaAllocator = struct { - child_allocator: Allocator, - state: State, - - /// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator - /// as a memory-saving optimization. - pub const State = struct { - buffer_list: std.SinglyLinkedList(usize) = .{}, - end_index: usize = 0, - - pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator { - return .{ - .child_allocator = child_allocator, - .state = self, - }; - } - }; - - pub fn allocator(self: *ArenaAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .free = free, - }, - }; - } - - const BufNode = std.SinglyLinkedList(usize).Node; - - pub fn init(child_allocator: Allocator) ArenaAllocator { - return (State{}).promote(child_allocator); - } - - pub fn deinit(self: ArenaAllocator) void { - // NOTE: When changing this, make sure `reset()` is adjusted accordingly! - - var it = self.state.buffer_list.first; - while (it) |node| { - // this has to occur before the free because the free frees node - const next_it = node.next; - const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; - self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); - it = next_it; - } - } - - pub const ResetMode = union(enum) { - /// Releases all allocated memory in the arena. - free_all, - /// This will pre-heat the arena for future allocations by allocating a - /// large enough buffer for all previously done allocations. - /// Preheating will speed up the allocation process by invoking the backing allocator - /// less often than before. If `reset()` is used in a loop, this means that after the - /// biggest operation, no memory allocations are performed anymore. - retain_capacity, - /// This is the same as `retain_capacity`, but the memory will be shrunk to - /// this value if it exceeds the limit. - retain_with_limit: usize, - }; - /// Queries the current memory use of this arena. - /// This will **not** include the storage required for internal keeping. - pub fn queryCapacity(self: ArenaAllocator) usize { - var size: usize = 0; - var it = self.state.buffer_list.first; - while (it) |node| : (it = node.next) { - // Compute the actually allocated size excluding the - // linked list node. - size += node.data - @sizeOf(BufNode); - } - return size; - } - /// Resets the arena allocator and frees all allocated memory. - /// - /// `mode` defines how the currently allocated memory is handled. - /// See the variant documentation for `ResetMode` for the effects of each mode. - /// - /// The function will return whether the reset operation was successful or not. - /// If the reallocation failed `false` is returned. The arena will still be fully - /// functional in that case, all memory is released. Future allocations just might - /// be slower. - /// - /// NOTE: If `mode` is `free_mode`, the function will always return `true`. - pub fn reset(self: *ArenaAllocator, mode: ResetMode) bool { - // Some words on the implementation: - // The reset function can be implemented with two basic approaches: - // - Counting how much bytes were allocated since the last reset, and storing that - // information in State. This will make reset fast and alloc only a teeny tiny bit - // slower. - // - Counting how much bytes were allocated by iterating the chunk linked list. This - // will make reset slower, but alloc() keeps the same speed when reset() as if reset() - // would not exist. - // - // The second variant was chosen for implementation, as with more and more calls to reset(), - // the function will get faster and faster. At one point, the complexity of the function - // will drop to amortized O(1), as we're only ever having a single chunk that will not be - // reallocated, and we're not even touching the backing allocator anymore. - // - // Thus, only the first hand full of calls to reset() will actually need to iterate the linked - // list, all future calls are just taking the first node, and only resetting the `end_index` - // value. - const requested_capacity = switch (mode) { - .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| @min(limit, self.queryCapacity()), - .free_all => 0, - }; - if (requested_capacity == 0) { - // just reset when we don't have anything to reallocate - self.deinit(); - self.state = State{}; - return true; - } - const total_size = requested_capacity + @sizeOf(BufNode); - const align_bits = std.math.log2_int(usize, @alignOf(BufNode)); - // Free all nodes except for the last one - var it = self.state.buffer_list.first; - const maybe_first_node = while (it) |node| { - // this has to occur before the free because the free frees node - const next_it = node.next; - if (next_it == null) - break node; - const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data]; - self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress()); - it = next_it; - } else null; - assert(maybe_first_node == null or maybe_first_node.?.next == null); - // reset the state before we try resizing the buffers, so we definitely have reset the arena to 0. - self.state.end_index = 0; - if (maybe_first_node) |first_node| { - self.state.buffer_list.first = first_node; - // perfect, no need to invoke the child_allocator - if (first_node.data == total_size) - return true; - const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data]; - if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) { - // successful resize - first_node.data = total_size; - } else { - // manual realloc - const new_ptr = self.child_allocator.rawAlloc(total_size, align_bits, @returnAddress()) orelse { - // we failed to preheat the arena properly, signal this to the user. - return false; - }; - self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress()); - const node: *BufNode = @ptrCast(@alignCast(new_ptr)); - node.* = .{ .data = total_size }; - self.state.buffer_list.first = node; - } - } - return true; - } - - fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) ?*BufNode { - const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); - const big_enough_len = prev_len + actual_min_size; - const len = big_enough_len + big_enough_len / 2; - const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); - const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse - return null; - const buf_node: *BufNode = @ptrCast(@alignCast(ptr)); - buf_node.* = .{ .data = len }; - self.state.buffer_list.prepend(buf_node); - self.state.end_index = 0; - return buf_node; - } - - fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 { - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - - const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)); - var cur_node = if (self.state.buffer_list.first) |first_node| - first_node - else - (self.createNode(0, n + ptr_align) orelse return null); - while (true) { - const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data]; - const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; - const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(usize, addr, ptr_align); - const adjusted_index = self.state.end_index + (adjusted_addr - addr); - const new_end_index = adjusted_index + n; - - if (new_end_index <= cur_buf.len) { - const result = cur_buf[adjusted_index..new_end_index]; - self.state.end_index = new_end_index; - return result.ptr; - } - - const bigger_buf_size = @sizeOf(BufNode) + new_end_index; - const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode)); - if (self.child_allocator.rawResize(cur_alloc_buf, log2_align, bigger_buf_size, @returnAddress())) { - cur_node.data = bigger_buf_size; - } else { - // Allocate a new node if that's not possible - cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null; - } - } - } - - fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - _ = log2_buf_align; - _ = ret_addr; - - const cur_node = self.state.buffer_list.first orelse return false; - const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) { - // It's not the most recent allocation, so it cannot be expanded, - // but it's fine if they want to make it smaller. - return new_len <= buf.len; - } - - if (buf.len >= new_len) { - self.state.end_index -= buf.len - new_len; - return true; - } else if (cur_buf.len - self.state.end_index >= new_len - buf.len) { - self.state.end_index += new_len - buf.len; - return true; - } else { - return false; - } - } - - fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { - _ = log2_buf_align; - _ = ret_addr; - - const self: *ArenaAllocator = @ptrCast(@alignCast(ctx)); - - const cur_node = self.state.buffer_list.first orelse return; - const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data]; - - if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) { - self.state.end_index -= buf.len; - } - } -}; diff --git a/src/Global.zig b/src/Global.zig index 94b0bc70c3bdb4..b219cf74671067 100644 --- a/src/Global.zig +++ b/src/Global.zig @@ -118,6 +118,10 @@ pub fn exit(code: u32) noreturn { switch (Environment.os) { .mac => std.c.exit(@bitCast(code)), + .windows => { + Bun__onExit(); + std.os.windows.kernel32.ExitProcess(code); + }, else => bun.C.quick_exit(@bitCast(code)), } } diff --git a/src/StandaloneModuleGraph.zig b/src/StandaloneModuleGraph.zig index a58989280f8b97..e3daa4da1713db 100644 --- a/src/StandaloneModuleGraph.zig +++ b/src/StandaloneModuleGraph.zig @@ -1072,7 +1072,7 @@ pub const StandaloneModuleGraph = struct { if (item.data != .e_string) return error.InvalidSourceMap; - const decoded = try item.data.e_string.stringDecodedUTF8(arena); + const decoded = try item.data.e_string.stringCloned(arena); const offset = string_payload.items.len; try string_payload.appendSlice(decoded); @@ -1089,7 +1089,7 @@ pub const StandaloneModuleGraph = struct { if (item.data != .e_string) return error.InvalidSourceMap; - const utf8 = try item.data.e_string.stringDecodedUTF8(arena); + const utf8 = try item.data.e_string.stringCloned(arena); defer arena.free(utf8); const offset = string_payload.items.len; diff --git a/src/allocators.zig b/src/allocators.zig index 727146960f8f55..5b3fd6cc824314 100644 --- a/src/allocators.zig +++ b/src/allocators.zig @@ -177,6 +177,11 @@ pub fn OverflowList(comptime ValueType: type, comptime count: comptime_int) type }; } +/// "Formerly-BSSList" +/// It's not actually BSS anymore. +/// +/// We do keep a pointer to it globally, but because the data is not zero-initialized, it ends up taking space in the object file. +/// We don't want to spend 1-2 MB on these structs. pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { const count = _count * 2; const max_index = count - 1; @@ -205,7 +210,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { backing_buf: [count]ValueType = undefined, used: u32 = 0, - pub var instance: Self = undefined; + pub var instance: *Self = undefined; pub var loaded = false; pub inline fn blockIndex(index: u31) usize { @@ -214,7 +219,8 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { pub fn init(allocator: std.mem.Allocator) *Self { if (!loaded) { - instance = Self{ + instance = bun.default_allocator.create(Self) catch bun.outOfMemory(); + instance.* = Self{ .allocator = allocator, .tail = OverflowBlock{}, }; @@ -222,7 +228,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { loaded = true; } - return &instance; + return instance; } pub fn isOverflowing() bool { @@ -289,7 +295,7 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type slice_buf: [count][]const u8 = undefined, slice_buf_used: u16 = 0, mutex: Mutex = .{}, - pub var instance: Self = undefined; + pub var instance: *Self = undefined; var loaded: bool = false; // only need the mutex on append @@ -299,14 +305,15 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type pub fn init(allocator: std.mem.Allocator) *Self { if (!loaded) { - instance = Self{ + instance = bun.default_allocator.create(Self) catch bun.outOfMemory(); + instance.* = Self{ .allocator = allocator, .backing_buf_used = 0, }; loaded = true; } - return &instance; + return instance; } pub inline fn isOverflowing() bool { @@ -469,20 +476,21 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_ backing_buf: [count]ValueType = undefined, backing_buf_used: u16 = 0, - pub var instance: Self = undefined; + pub var instance: *Self = undefined; var loaded: bool = false; pub fn init(allocator: std.mem.Allocator) *Self { if (!loaded) { - instance = Self{ + instance = bun.default_allocator.create(Self) catch bun.outOfMemory(); + instance.* = Self{ .index = IndexMap{}, .allocator = allocator, }; loaded = true; } - return &instance; + return instance; } pub fn isOverflowing() bool { @@ -621,18 +629,19 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_ key_list_overflow: OverflowList([]u8, count / 4) = OverflowList([]u8, count / 4){}, const Self = @This(); - pub var instance: Self = undefined; + pub var instance: *Self = undefined; pub var instance_loaded = false; pub fn init(allocator: std.mem.Allocator) *Self { if (!instance_loaded) { - instance = Self{ + instance = bun.default_allocator.create(Self) catch bun.outOfMemory(); + instance.* = Self{ .map = BSSMapType.init(allocator), }; instance_loaded = true; } - return &instance; + return instance; } pub fn isOverflowing() bool { diff --git a/src/api/schema.zig b/src/api/schema.zig index fa851862807022..002f43223f1e2e 100644 --- a/src/api/schema.zig +++ b/src/api/schema.zig @@ -2816,7 +2816,7 @@ pub const Api = struct { fn expectString(this: *Parser, expr: js_ast.Expr) !void { switch (expr.data) { - .e_string, .e_utf8_string => {}, + .e_string => {}, else => { this.log.addErrorFmt(this.source, expr.loc, this.allocator, "expected string but received {}", .{ @as(js_ast.Expr.Tag, expr.data), diff --git a/src/bake/BakeGlobalObject.cpp b/src/bake/BakeGlobalObject.cpp index 14b8809fc8b34f..44ee0e1854ae02 100644 --- a/src/bake/BakeGlobalObject.cpp +++ b/src/bake/BakeGlobalObject.cpp @@ -2,38 +2,50 @@ #include "JSNextTickQueue.h" #include "JavaScriptCore/GlobalObjectMethodTable.h" #include "JavaScriptCore/JSInternalPromise.h" -#include "ProcessIdentifier.h" #include "headers-handwritten.h" +#include "JavaScriptCore/JSModuleLoader.h" +#include "JavaScriptCore/Completion.h" -namespace Bake { +extern "C" BunString BakeProdResolve(JSC::JSGlobalObject*, BunString a, BunString b); -extern "C" void BakeInitProcessIdentifier() -{ - // assert is on main thread - WebCore::Process::identifier(); -} +namespace Bake { JSC::JSInternalPromise* -bakeModuleLoaderImportModule(JSC::JSGlobalObject* jsGlobalObject, - JSC::JSModuleLoader*, JSC::JSString* moduleNameValue, +bakeModuleLoaderImportModule(JSC::JSGlobalObject* global, + JSC::JSModuleLoader* moduleLoader, JSC::JSString* moduleNameValue, JSC::JSValue parameters, const JSC::SourceOrigin& sourceOrigin) { - // TODO: forward this to the runtime? - JSC::VM& vm = jsGlobalObject->vm(); - WTF::String keyString = moduleNameValue->getString(jsGlobalObject); - auto err = JSC::createTypeError( - jsGlobalObject, - WTF::makeString( - "Dynamic import to '"_s, keyString, - "' should have been replaced with a hook into the module runtime"_s)); - auto* promise = JSC::JSInternalPromise::create( - vm, jsGlobalObject->internalPromiseStructure()); - promise->reject(jsGlobalObject, err); - return promise; -} + WTF::String keyString = moduleNameValue->getString(global); + if (keyString.startsWith("bake:/"_s)) { + JSC::VM& vm = global->vm(); + return JSC::importModule(global, JSC::Identifier::fromString(vm, keyString), + JSC::jsUndefined(), parameters, JSC::jsUndefined()); + } -extern "C" BunString BakeProdResolve(JSC::JSGlobalObject*, BunString a, BunString b); + if (!sourceOrigin.isNull() && sourceOrigin.string().startsWith("bake:/"_s)) { + JSC::VM& vm = global->vm(); + auto scope = DECLARE_THROW_SCOPE(vm); + + WTF::String refererString = sourceOrigin.string(); + WTF::String keyString = moduleNameValue->getString(global); + + if (!keyString) { + auto promise = JSC::JSInternalPromise::create(vm, global->internalPromiseStructure()); + promise->reject(global, JSC::createError(global, "import() requires a string"_s)); + return promise; + } + + BunString result = BakeProdResolve(global, Bun::toString(refererString), Bun::toString(keyString)); + RETURN_IF_EXCEPTION(scope, nullptr); + + return JSC::importModule(global, JSC::Identifier::fromString(vm, result.toWTFString()), + JSC::jsUndefined(), parameters, JSC::jsUndefined()); + } + + // Use Zig::GlobalObject's function + return jsCast(global)->moduleLoaderImportModule(global, moduleLoader, moduleNameValue, parameters, sourceOrigin); +} JSC::Identifier bakeModuleLoaderResolve(JSC::JSGlobalObject* jsGlobal, JSC::JSModuleLoader* loader, JSC::JSValue key, @@ -43,19 +55,21 @@ JSC::Identifier bakeModuleLoaderResolve(JSC::JSGlobalObject* jsGlobal, JSC::VM& vm = global->vm(); auto scope = DECLARE_THROW_SCOPE(vm); - if (global->isProduction()) { - WTF::String keyString = key.toWTFString(global); - RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); + ASSERT(referrer.isString()); + WTF::String refererString = jsCast(referrer)->getString(global); - ASSERT(referrer.isString()); - auto refererString = jsCast(referrer)->value(global); + WTF::String keyString = key.toWTFString(global); + RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); + if (refererString.startsWith("bake:/"_s) || (refererString == "."_s && keyString.startsWith("bake:/"_s))) { BunString result = BakeProdResolve(global, Bun::toString(referrer.getString(global)), Bun::toString(keyString)); + RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); + return JSC::Identifier::fromString(vm, result.toWTFString(BunString::ZeroCopy)); - } else { - JSC::throwTypeError(global, scope, "External imports are not allowed in Bun Bake's dev server. This is a bug in Bun's bundler."_s); - return vm.propertyNames->emptyIdentifier; } + + // Use Zig::GlobalObject's function + return Zig::GlobalObject::moduleLoaderResolve(jsGlobal, loader, key, referrer, origin); } #define INHERIT_HOOK_METHOD(name) \ @@ -100,12 +114,12 @@ void GlobalObject::finishCreation(JSC::VM& vm) ASSERT(inherits(info())); } +struct BunVirtualMachine; extern "C" BunVirtualMachine* Bun__getVM(); // A lot of this function is taken from 'Zig__GlobalObject__create' // TODO: remove this entire method -extern "C" GlobalObject* BakeCreateDevGlobal(DevServer* owner, - void* console) +extern "C" GlobalObject* BakeCreateProdGlobal(void* console) { JSC::VM& vm = JSC::VM::create(JSC::HeapType::Large).leakRef(); vm.heap.acquireAccess(); @@ -119,7 +133,6 @@ extern "C" GlobalObject* BakeCreateDevGlobal(DevServer* owner, if (!global) BUN_PANIC("Failed to create BakeGlobalObject"); - global->m_devServer = owner; global->m_bunVM = bunVM; JSC::gcProtect(global); @@ -142,25 +155,4 @@ extern "C" GlobalObject* BakeCreateDevGlobal(DevServer* owner, return global; } -extern "C" GlobalObject* BakeCreateProdGlobal(JSC::VM* vm, void* console) -{ - JSC::JSLockHolder locker(vm); - BunVirtualMachine* bunVM = Bun__getVM(); - - JSC::Structure* structure = GlobalObject::createStructure(*vm); - GlobalObject* global = GlobalObject::create(*vm, structure, &GlobalObject::s_globalObjectMethodTable); - if (!global) - BUN_PANIC("Failed to create BakeGlobalObject"); - - global->m_devServer = nullptr; - global->m_bunVM = bunVM; - - JSC::gcProtect(global); - - global->setConsole(console); - global->setStackTraceLimit(10); // Node.js defaults to 10 - - return global; -} - }; // namespace Bake diff --git a/src/bake/BakeGlobalObject.h b/src/bake/BakeGlobalObject.h index 406a509ec1588c..af2b3490f96b4d 100644 --- a/src/bake/BakeGlobalObject.h +++ b/src/bake/BakeGlobalObject.h @@ -4,17 +4,10 @@ namespace Bake { -struct DevServer; // DevServer.zig -struct Route; // DevServer.zig -struct BunVirtualMachine; - class GlobalObject : public Zig::GlobalObject { public: using Base = Zig::GlobalObject; - /// Null if in production - DevServer* m_devServer; - template static JSC::GCClient::IsoSubspace* subspaceFor(JSC::VM& vm) { if constexpr (mode == JSC::SubspaceAccess::Concurrently) @@ -31,16 +24,10 @@ class GlobalObject : public Zig::GlobalObject { static const JSC::GlobalObjectMethodTable s_globalObjectMethodTable; static GlobalObject* create(JSC::VM& vm, JSC::Structure* structure, const JSC::GlobalObjectMethodTable* methodTable); - ALWAYS_INLINE bool isProduction() const { return !m_devServer; } - void finishCreation(JSC::VM& vm); GlobalObject(JSC::VM& vm, JSC::Structure* structure, const JSC::GlobalObjectMethodTable* methodTable) : Zig::GlobalObject(vm, structure, methodTable) { } }; -// Zig API -extern "C" void KitInitProcessIdentifier(); -extern "C" GlobalObject* KitCreateDevGlobal(DevServer* owner, void* console); - }; // namespace Kit diff --git a/src/bake/BakeProduction.cpp b/src/bake/BakeProduction.cpp index 726a8ea258538d..887dbb565e8b68 100644 --- a/src/bake/BakeProduction.cpp +++ b/src/bake/BakeProduction.cpp @@ -6,25 +6,33 @@ namespace Bake { -extern "C" JSC::JSPromise* BakeRenderRoutesForProd( +extern "C" JSC::JSPromise* BakeRenderRoutesForProdStatic( JSC::JSGlobalObject* global, - BunString outbase, - JSC::JSValue renderStaticCallback, + BunString outBase, + JSC::JSValue allServerFiles, + JSC::JSValue renderStatic, JSC::JSValue clientEntryUrl, + JSC::JSValue pattern, JSC::JSValue files, - JSC::JSValue patterns, + JSC::JSValue typeAndFlags, + JSC::JSValue sourceRouteFiles, + JSC::JSValue paramInformation, JSC::JSValue styles) { JSC::VM& vm = global->vm(); - JSC::JSFunction* cb = JSC::JSFunction::create(vm, global, WebCore::bakeRenderRoutesForProdCodeGenerator(vm), global); + JSC::JSFunction* cb = JSC::JSFunction::create(vm, global, WebCore::bakeRenderRoutesForProdStaticCodeGenerator(vm), global); JSC::CallData callData = JSC::getCallData(cb); JSC::MarkedArgumentBuffer args; - args.append(JSC::jsString(vm, outbase.toWTFString())); - args.append(renderStaticCallback); + args.append(JSC::jsString(vm, outBase.toWTFString())); + args.append(allServerFiles); + args.append(renderStatic); args.append(clientEntryUrl); + args.append(pattern); args.append(files); - args.append(patterns); + args.append(typeAndFlags); + args.append(sourceRouteFiles); + args.append(paramInformation); args.append(styles); NakedPtr returnedException = nullptr; diff --git a/src/bake/BakeSourceProvider.cpp b/src/bake/BakeSourceProvider.cpp index b821d136703192..cf7ef839ab8284 100644 --- a/src/bake/BakeSourceProvider.cpp +++ b/src/bake/BakeSourceProvider.cpp @@ -8,33 +8,40 @@ #include "JavaScriptCore/JSLock.h" #include "JavaScriptCore/JSMap.h" #include "JavaScriptCore/JSModuleLoader.h" +#include "JavaScriptCore/JSModuleRecord.h" #include "JavaScriptCore/JSString.h" #include "JavaScriptCore/JSModuleNamespaceObject.h" +#include "ImportMetaObject.h" namespace Bake { -extern "C" LoadServerCodeResult BakeLoadInitialServerCode(GlobalObject* global, BunString source) { +extern "C" JSC::EncodedJSValue BakeLoadInitialServerCode(GlobalObject* global, BunString source, bool separateSSRGraph) { JSC::VM& vm = global->vm(); auto scope = DECLARE_THROW_SCOPE(vm); - String string = "bake://server.js"_s; - JSC::JSString* key = JSC::jsString(vm, string); + String string = "bake://server-runtime.js"_s; JSC::SourceOrigin origin = JSC::SourceOrigin(WTF::URL(string)); JSC::SourceCode sourceCode = JSC::SourceCode(DevSourceProvider::create( source.toWTFString(), origin, WTFMove(string), WTF::TextPosition(), - JSC::SourceProviderSourceType::Module + JSC::SourceProviderSourceType::Program )); - global->moduleLoader()->provideFetch(global, key, sourceCode); - RETURN_IF_EXCEPTION(scope, {}); - - JSC::JSInternalPromise* internalPromise = global->moduleLoader()->loadAndEvaluateModule(global, key, JSC::jsUndefined(), JSC::jsUndefined()); - RETURN_IF_EXCEPTION(scope, {}); + JSC::JSValue fnValue = vm.interpreter.executeProgram(sourceCode, global, global); + RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode({})); + + RELEASE_ASSERT(fnValue); + + JSC::JSFunction* fn = jsCast(fnValue); + JSC::CallData callData = JSC::getCallData(fn); + + JSC::MarkedArgumentBuffer args; + args.append(JSC::jsBoolean(separateSSRGraph)); // separateSSRGraph + args.append(Zig::ImportMetaObject::create(global, "bake://server-runtime.js"_s)); // importMeta - return { internalPromise, key }; + return JSC::JSValue::encode(JSC::call(global, fn, callData, JSC::jsUndefined(), args)); } extern "C" JSC::JSInternalPromise* BakeLoadModuleByKey(GlobalObject* global, JSC::JSString* key) { diff --git a/src/bake/BakeSourceProvider.h b/src/bake/BakeSourceProvider.h index 191dd927f72c9d..2d821fc40116d0 100644 --- a/src/bake/BakeSourceProvider.h +++ b/src/bake/BakeSourceProvider.h @@ -6,11 +6,6 @@ namespace Bake { -struct LoadServerCodeResult { - JSC::JSInternalPromise* promise; - JSC::JSString* key; -}; - class DevSourceProvider final : public JSC::StringSourceProvider { public: static Ref create( diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index c07599f638dc0d..fe5e658d45d414 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -1,7 +1,11 @@ -//! Instance of the development server. Controls an event loop, web server, -//! bundling state, filesystem watcher, and JavaScript VM instance. +//! Instance of the development server. Attaches to an instance of `Bun.serve`, +//! controlling bundler, routing, and hot module reloading. //! -//! All work is cached in-memory. +//! Reprocessing files that did not change is banned; by having perfect +//! incremental tracking over the project, editing a file's contents (asides +//! adjusting imports) must always rebundle only that one file. +//! +//! All work is held in-memory, using manually managed data-oriented design. //! //! TODO: Currently does not have a `deinit()`, as it was assumed to be alive for //! the remainder of this process' lifespan. Later, it will be required to fully @@ -11,14 +15,12 @@ pub const debug = bun.Output.Scoped(.Bake, false); pub const igLog = bun.Output.scoped(.IncrementalGraph, false); pub const Options = struct { - allocator: ?Allocator = null, // defaults to a named heap - cwd: []u8, - routes: []Route, + root: []const u8, framework: bake.Framework, - listen_config: uws.AppListenConfig = .{ .port = 3000 }, dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null, + dump_state_on_crash: bool = bun.FeatureFlags.bake_debugging_features, verbose_watcher: bool = false, - // TODO: make it required to inherit a js VM + vm: *VirtualMachine, }; // The fields `client_graph`, `server_graph`, and `directory_watchers` all @@ -29,28 +31,48 @@ pub const Options = struct { /// Used for all server-wide allocations. In debug, this shows up in /// a separate named heap. Thread-safe. allocator: Allocator, -/// Project root directory. For the HMR runtime, its -/// module IDs are strings relative to this. -cwd: []const u8, +/// Absolute path to project root directory. For the HMR +/// runtime, its module IDs are strings relative to this. +root: []const u8, /// Hex string generated by hashing the framework config and bun revision. /// Emebedding in client bundles and sent when the HMR Socket is opened; /// When the value mismatches the page is forcibly reloaded. configuration_hash_key: [16]u8, - -// UWS App -app: *App, -routes: []Route, -address: struct { - port: u16, - hostname: [*:0]const u8, -}, -listener: ?*App.ListenSocket, - -// Server Runtime -server_global: *DevGlobalObject, +/// The virtual machine (global object) to execute code in. vm: *VirtualMachine, +/// May be `null` if not attached to an HTTP server yet. +server: ?bun.JSC.API.AnyServer, +/// Contains the tree of routes. This structure contains FileIndex +router: FrameworkRouter, +/// Every navigatable route has bundling state here. +route_bundles: ArrayListUnmanaged(RouteBundle), +/// All access into IncrementalGraph is guarded by a DebugThreadLock. This is +/// only a debug assertion as contention to this is always a bug; If a bundle is +/// active and a file is changed, that change is placed into the next bundle. +graph_safety_lock: bun.DebugThreadLock, +client_graph: IncrementalGraph(.client), +server_graph: IncrementalGraph(.server), +/// State populated during bundling and hot updates. Often cleared +incremental_result: IncrementalResult, +/// CSS files are accessible via `/_bun/css/.css` +/// Value is bundled code owned by `dev.allocator` +css_files: AutoArrayHashMapUnmanaged(u64, []const u8), +/// JS files are accessible via `/_bun/client/route..js` +/// These are randomly generated to avoid possible browser caching of old assets. +route_js_payloads: AutoArrayHashMapUnmanaged(u64, Route.Index), +// /// Assets are accessible via `/_bun/asset/` +// assets: bun.StringArrayHashMapUnmanaged(u64, Asset), +/// All bundling failures are stored until a file is saved and rebuilt. +/// They are stored in the wire format the HMR runtime expects so that +/// serialization only happens once. +bundling_failures: std.ArrayHashMapUnmanaged( + SerializedFailure, + void, + SerializedFailure.ArrayHashContextViaOwner, + false, +) = .{}, -// These values are handles to the functions in server_exports. +// These values are handles to the functions in `hmr-runtime-server.ts`. // For type definitions, see `./bake.private.d.ts` server_fetch_function_callback: JSC.Strong, server_register_update_callback: JSC.Strong, @@ -67,79 +89,70 @@ watch_events: [2]HotReloadTask.Aligned, watch_state: std.atomic.Value(u32), watch_current: u1 = 0, -// Bundling +/// Number of bundles that have been executed. This is currently not read, but +/// will be used later to determine when to invoke graph garbage collection. generation: usize = 0, +/// Displayed in the HMR success indicator bundles_since_last_error: usize = 0, -/// All access into IncrementalGraph is guarded by this. This is only -/// a debug assertion since there is no actual contention. -graph_safety_lock: bun.DebugThreadLock, -client_graph: IncrementalGraph(.client), -server_graph: IncrementalGraph(.server), -/// CSS files are accessible via `/_bun/css/.css` -/// Value is bundled code. -css_files: AutoArrayHashMapUnmanaged(u64, []const u8), -// /// Assets are accessible via `/_bun/asset/` -// assets: bun.StringArrayHashMapUnmanaged(u64, Asset), -/// All bundling failures are stored until a file is saved and rebuilt. -/// They are stored in the wire format the HMR runtime expects so that -/// serialization only happens once. -bundling_failures: std.ArrayHashMapUnmanaged( - SerializedFailure, - void, - SerializedFailure.ArrayHashContextViaOwner, - false, -) = .{}, -/// Quickly retrieve a route's index from the entry point file. -route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, Route.Index), -/// State populated during bundling. Often cleared -incremental_result: IncrementalResult, + +/// Quickly retrieve a route's index from the entry point file. These are +/// populated as the routes are discovered. The route may not be bundled or +/// navigatable, in the case a layout's index is looked up. +route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag), + framework: bake.Framework, // Each logical graph gets its own bundler configuration server_bundler: Bundler, client_bundler: Bundler, ssr_bundler: Bundler, + +// TODO: This being shared state is likely causing a crash /// Stored and reused for bundling tasks log: Log, // Debugging dump_dir: ?std.fs.Dir, +/// Reference count to number of active sockets with the visualizer enabled. emit_visualizer_events: u32, +has_pre_crash_handler: bool, pub const internal_prefix = "/_bun"; pub const client_prefix = internal_prefix ++ "/client"; pub const asset_prefix = internal_prefix ++ "/asset"; pub const css_prefix = internal_prefix ++ "/css"; -pub const Route = struct { - pub const Index = bun.GenericIndex(u30, Route); +pub const RouteBundle = struct { + pub const Index = bun.GenericIndex(u30, RouteBundle); + + route: Route.Index, - // Config - pattern: [:0]const u8, - entry_point: []const u8, + server_state: State, - server_state: State = .unqueued, - /// Cached to avoid looking up by filename in `server_graph` - server_file: IncrementalGraph(.server).FileIndex.Optional = .none, + /// Used to communicate over WebSocket the pattern. The HMR client contains code + /// to match this against the URL bar to determine if a reloading route applies + /// or not. + full_pattern: []const u8, /// Generated lazily when the client JS is requested (HTTP GET /_bun/client/*.js), /// which is only needed when a hard-reload is performed. /// /// Freed when a client module updates. - client_bundle: ?[]const u8 = null, + client_bundle: ?[]const u8, /// Contain the list of serialized failures. Hashmap allows for /// efficient lookup and removal of failing files. /// When state == .evaluation_failure, this is popualted with that error. - evaluate_failure: ?SerializedFailure = null, + evaluate_failure: ?SerializedFailure, - /// Cached to avoid re-creating the string every request - module_name_string: JSC.Strong = .{}, - /// Cached to avoid re-creating the string every request - client_bundle_url_value: JSC.Strong = .{}, - /// Cached to avoid re-creating the array every request - css_file_array: JSC.Strong = .{}, + // TODO: micro-opt: use a singular strong - /// Assigned in DevServer.init - dev: *DevServer = undefined, - client_bundled_url: []u8 = undefined, + /// Cached to avoid re-creating the array every request. + /// Invalidated when a layout is added or removed from this route. + cached_module_list: JSC.Strong, + /// Cached to avoid re-creating the string every request. + /// Invalidated when any client file associated with the route is updated. + cached_client_bundle_url: JSC.Strong, + /// Cached to avoid re-creating the array every request. + /// Invalidated when the list of CSS files changes. + cached_css_file_array: JSC.Strong, /// A union is not used so that `bundler_failure_logs` can re-use memory, as /// this state frequently changes between `loaded` and the failure variants. @@ -147,8 +160,11 @@ pub const Route = struct { /// In development mode, routes are lazily built. This state implies a /// build of this route has never been run. It is possible to bundle the /// route entry point and still have an unqueued route if another route - /// imports this one. + /// imports this one. This state is implied if `FrameworkRouter.Route` + /// has no bundle index assigned. unqueued, + /// A bundle associated with this route is happening + bundling, /// This route was flagged for bundling failures. There are edge cases /// where a route can be disconnected from its failures, so the route /// imports has to be traced to discover if possible failures still @@ -162,51 +178,48 @@ pub const Route = struct { }; }; -const Asset = union(enum) { - /// File contents are allocated with `dev.allocator` - /// The slice is mirrored in `dev.client_graph.bundled_files`, so freeing this slice is not required. - css: []const u8, - /// A file path relative to cwd, owned by `dev.allocator` - file_path: []const u8, +pub const DeferredRequest = struct { + next: ?*DeferredRequest, + bundle: RouteBundle.Index, + data: Data, + + const Data = union(enum) { + server_handler: bun.JSC.API.SavedRequest, + /// onJsRequestWithBundle + js_payload: *Response, + + const Tag = @typeInfo(Data).Union.tag_type.?; + }; }; /// DevServer is stored on the heap, storing its allocator. +// TODO: change the error set to JSOrMemoryError!*DevServer pub fn init(options: Options) !*DevServer { - const allocator = options.allocator orelse bun.default_allocator; + const allocator = bun.default_allocator; bun.analytics.Features.kit_dev +|= 1; - if (JSC.VirtualMachine.VMHolder.vm != null) - @panic("Cannot initialize bake.DevServer on a thread with an active JSC.VirtualMachine"); - - const dump_dir = if (options.dump_sources) |dir| - std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: { - bun.handleErrorReturnTrace(err, @errorReturnTrace()); - Output.warn("Could not open directory for dumping sources: {}", .{err}); - break :dir null; - } - else - null; - const app = App.create(.{}) orelse { - Output.prettyErrorln("Failed to create app", .{}); - return error.AppInitialization; - }; + var dump_dir = if (bun.FeatureFlags.bake_debugging_features) + if (options.dump_sources) |dir| + std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + Output.warn("Could not open directory for dumping sources: {}", .{err}); + break :dir null; + } + else + null; + errdefer if (bun.FeatureFlags.bake_debugging_features) if (dump_dir) |*dir| dir.close(); const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; const dev = bun.create(allocator, DevServer, .{ .allocator = allocator, - .cwd = options.cwd, - .app = app, - .routes = options.routes, - .address = .{ - .port = @intCast(options.listen_config.port), - .hostname = options.listen_config.host orelse "localhost", - }, + .root = options.root, + .vm = options.vm, + .server = null, .directory_watchers = DirectoryWatchStore.empty, .server_fetch_function_callback = .{}, .server_register_update_callback = .{}, - .listener = null, .generation = 0, .graph_safety_lock = .{}, .log = Log.init(allocator), @@ -215,7 +228,9 @@ pub fn init(options: Options) !*DevServer { .watch_state = .{ .raw = 0 }, .watch_current = 0, .emit_visualizer_events = 0, + .has_pre_crash_handler = options.dump_state_on_crash, .css_files = .{}, + .route_js_payloads = .{}, // .assets = .{}, .client_graph = IncrementalGraph(.client).empty, @@ -227,13 +242,13 @@ pub fn init(options: Options) !*DevServer { .client_bundler = undefined, .ssr_bundler = undefined, - .server_global = undefined, - .vm = undefined, - .bun_watcher = undefined, .watch_events = undefined, .configuration_hash_key = undefined, + + .router = undefined, + .route_bundles = .{}, }); errdefer allocator.destroy(dev); @@ -241,7 +256,10 @@ pub fn init(options: Options) !*DevServer { assert(dev.client_graph.owner() == dev); assert(dev.directory_watchers.owner() == dev); - const fs = try bun.fs.FileSystem.init(options.cwd); + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + const fs = try bun.fs.FileSystem.init(options.root); dev.bun_watcher = try Watcher.init(DevServer, dev, fs, bun.default_allocator); errdefer dev.bun_watcher.deinit(false); @@ -264,25 +282,16 @@ pub fn init(options: Options) !*DevServer { dev.ssr_bundler.options.dev_server = dev; } - dev.framework = dev.framework.resolve( - &dev.server_bundler.resolver, - &dev.client_bundler.resolver, - ) catch { - // bun i react@experimental react-dom@experimental react-server-dom-webpack@experimental react-refresh@experimental + dev.framework = dev.framework.resolve(&dev.server_bundler.resolver, &dev.client_bundler.resolver) catch { Output.errGeneric("Failed to resolve all imports required by the framework", .{}); return error.FrameworkInitialization; }; - dev.vm = VirtualMachine.initKit(.{ - .allocator = bun.default_allocator, - .args = std.mem.zeroes(bun.Schema.Api.TransformOptions), - }) catch |err| - Output.panic("Failed to create Global object: {}", .{err}); - dev.server_global = c.BakeCreateDevGlobal(dev, dev.vm.console); - dev.vm.global = dev.server_global.js(); - dev.vm.regular_event_loop.global = dev.vm.global; - dev.vm.jsc = dev.vm.global.vm(); - dev.vm.event_loop.ensureWaker(); + errdefer dev.route_lookup.clearAndFree(allocator); + // errdefer dev.client_graph.deinit(allocator); + // errdefer dev.server_graph.deinit(allocator); + + dev.vm.global = @ptrCast(dev.vm.global); dev.configuration_hash_key = hash_key: { var hash = std.hash.Wyhash.init(128); @@ -296,8 +305,10 @@ pub fn init(options: Options) !*DevServer { hash.update(bun.Environment.git_sha_short); } - hash.update(dev.framework.entry_client); - hash.update(dev.framework.entry_server); + // TODO: hash router types + // hash.update(dev.framework.entry_client); + // hash.update(dev.framework.entry_server); + if (dev.framework.server_components) |sc| { bun.writeAnyToHasher(&hash, true); bun.writeAnyToHasher(&hash, sc.separate_ssr_graph); @@ -325,163 +336,147 @@ pub fn init(options: Options) !*DevServer { break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower); }; - var has_fallback = false; - - for (options.routes, 0..) |*route, i| { - app.any(route.pattern, *Route, route, onServerRequest); - - route.dev = dev; - route.client_bundled_url = std.fmt.allocPrint( - allocator, - client_prefix ++ "/{d}.js", - .{i}, - ) catch bun.outOfMemory(); - - if (bun.strings.eqlComptime(route.pattern, "/*")) - has_fallback = true; + // Add react fast refresh if needed. This is the first file on the client side, + // as it will be referred to by index. + if (dev.framework.react_fast_refresh) |rfr| { + assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index); } - app.get(client_prefix ++ "/:route", *DevServer, dev, onJsRequest); - app.get(asset_prefix ++ "/:asset", *DevServer, dev, onAssetRequest); - app.get(css_prefix ++ "/:asset", *DevServer, dev, onCssRequest); - app.get(internal_prefix ++ "/src/*", *DevServer, dev, onSrcRequest); + try dev.initServerRuntime(); - app.ws( - internal_prefix ++ "/hmr", - dev, - 0, - uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, false).apply(.{}), - ); + // Initialize the router + dev.router = router: { + var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len); + errdefer types.deinit(allocator); - app.get(internal_prefix ++ "/incremental_visualizer", *DevServer, dev, onIncrementalVisualizer); + for (options.framework.file_system_router_types, 0..) |fsr, i| { + const joined_root = bun.path.joinAbs(dev.root, .auto, fsr.root); + const entry = dev.server_bundler.resolver.readDirInfoIgnoreError(joined_root) orelse + continue; - if (!has_fallback) - app.any("/*", void, {}, onFallbackRoute); + const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true); + + try types.append(allocator, .{ + .abs_root = bun.strings.withoutTrailingSlash(entry.abs_path), + .prefix = fsr.prefix, + .ignore_underscores = fsr.ignore_underscores, + .ignore_dirs = fsr.ignore_dirs, + .extensions = fsr.extensions, + .style = fsr.style, + .server_file = toOpaqueFileId(.server, server_file), + .client_file = if (fsr.entry_client) |client| + toOpaqueFileId(.client, try dev.client_graph.insertStale(client, false)).toOptional() + else + .none, + .server_file_string = .{}, + }); - // Some indices at the start of the graph are reserved for framework files. - { - dev.graph_safety_lock.lock(); - defer dev.graph_safety_lock.unlock(); + try dev.route_lookup.put(allocator, server_file, .{ + .route_index = FrameworkRouter.Route.Index.init(@intCast(i)), + .should_recurse_when_visiting = true, + }); + } - assert(try dev.client_graph.insertStale(dev.framework.entry_client, false) == IncrementalGraph(.client).framework_entry_point_index); - assert(try dev.server_graph.insertStale(dev.framework.entry_server, false) == IncrementalGraph(.server).framework_entry_point_index); + break :router try FrameworkRouter.initEmpty(types.items, allocator); + }; - if (dev.framework.react_fast_refresh) |rfr| { - assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index); - } + // TODO: move pre-bundling to be one tick after server startup. + // this way the line saying the server is ready shows quicker + try dev.scanInitialRoutes(); - try dev.client_graph.ensureStaleBitCapacity(true); - try dev.server_graph.ensureStaleBitCapacity(true); + if (bun.FeatureFlags.bake_debugging_features and options.dump_state_on_crash) + try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash); - const client_files = dev.client_graph.bundled_files.values(); - client_files[IncrementalGraph(.client).framework_entry_point_index.get()].flags.is_special_framework_file = true; - } + return dev; +} - // Pre-bundle the framework code - { - // Since this will enter JavaScript to load code, ensure we have a lock. - const lock = dev.vm.jsc.getAPILock(); - defer lock.release(); - - dev.bundle(&.{ - BakeEntryPoint.init(dev.framework.entry_server, .server), - BakeEntryPoint.init(dev.framework.entry_client, .client), - }) catch |err| { - _ = &err; // autofix - bun.todoPanic(@src(), "handle error", .{}); - }; - } +fn initServerRuntime(dev: *DevServer) !void { + const runtime = bun.String.static(bun.bake.getHmrRuntime(.server)); - app.listenWithConfig(*DevServer, dev, onListen, options.listen_config); + const interface = c.BakeLoadInitialServerCode( + @ptrCast(dev.vm.global), + runtime, + if (dev.framework.server_components) |sc| sc.separate_ssr_graph else false, + ) catch |err| { + dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); + @panic("Server runtime failed to start. The above error is always a bug in Bun"); + }; - return dev; + if (!interface.isObject()) + @panic("Internal assertion failure: expected interface from HMR runtime to be an object"); + const fetch_function: JSValue = interface.get(dev.vm.global, "handleRequest") orelse + @panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest"); + bun.assert(fetch_function.isCallable(dev.vm.jsc)); + dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.vm.global); + const register_update = interface.get(dev.vm.global, "registerUpdate") orelse + @panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate"); + dev.server_register_update_callback = JSC.Strong.create(register_update, dev.vm.global); + + fetch_function.ensureStillAlive(); + register_update.ensureStillAlive(); } -fn deinit(dev: *DevServer) void { - const allocator = dev.allocator; - allocator.destroy(dev); - bun.todoPanic(@src(), "bake.DevServer.deinit()"); +/// Deferred one tick so that the server can be up faster +fn scanInitialRoutes(dev: *DevServer) !void { + try dev.router.scanAll( + dev.allocator, + &dev.server_bundler.resolver, + FrameworkRouter.InsertionContext.wrap(DevServer, dev), + ); + + try dev.server_graph.ensureStaleBitCapacity(true); + try dev.client_graph.ensureStaleBitCapacity(true); } -pub fn runLoopForever(dev: *DevServer) noreturn { - const lock = dev.vm.jsc.getAPILock(); - defer lock.release(); +pub fn attachRoutes(dev: *DevServer, server: anytype) !void { + dev.server = bun.JSC.API.AnyServer.from(server); + const app = server.app.?; - while (true) { - dev.vm.tick(); - dev.vm.eventLoop().autoTickActive(); + // For this to work, the route handlers need to be augmented to use the comptime + // SSL parameter. It's worth considering removing the SSL boolean. + if (@TypeOf(app) == *uws.NewApp(true)) { + bun.todoPanic(@src(), "DevServer does not support SSL yet", .{}); } -} -// uws handlers + app.get(client_prefix ++ "/:route", *DevServer, dev, onJsRequest); + app.get(asset_prefix ++ "/:asset", *DevServer, dev, onAssetRequest); + app.get(css_prefix ++ "/:asset", *DevServer, dev, onCssRequest); + app.get(internal_prefix ++ "/src/*", *DevServer, dev, onSrcRequest); -fn onListen(ctx: *DevServer, maybe_listen: ?*App.ListenSocket) void { - const listen: *App.ListenSocket = maybe_listen orelse { - bun.todoPanic(@src(), "handle listen failure", .{}); - }; + app.ws( + internal_prefix ++ "/hmr", + dev, + 0, + uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, false).apply(.{}), + ); - ctx.listener = listen; - ctx.address.port = @intCast(listen.getLocalPort()); + app.get(internal_prefix ++ "/incremental_visualizer", *DevServer, dev, onIncrementalVisualizer); - Output.prettyErrorln("--\\> http://{s}:{d}\n", .{ - bun.span(ctx.address.hostname), - ctx.address.port, - }); - Output.flush(); + app.any("/*", *DevServer, dev, onRequest); +} + +pub fn deinit(dev: *DevServer) void { + const allocator = dev.allocator; + if (dev.has_pre_crash_handler) + bun.crash_handler.removePreCrashHandler(dev); + allocator.destroy(dev); + bun.todoPanic(@src(), "bake.DevServer.deinit()", .{}); } fn onJsRequest(dev: *DevServer, req: *Request, resp: *Response) void { - const route = route: { + const route_bundle = route: { const route_id = req.parameter(0); if (!bun.strings.hasSuffixComptime(route_id, ".js")) return req.setYield(true); - const i = std.fmt.parseInt(u16, route_id[0 .. route_id.len - 3], 10) catch + if (!bun.strings.hasPrefixComptime(route_id, "route.")) return req.setYield(true); - if (i >= dev.routes.len) + const i = parseHexToInt(u64, route_id["route.".len .. route_id.len - ".js".len]) orelse + return req.setYield(true); + break :route dev.route_js_payloads.get(i) orelse return req.setYield(true); - break :route &dev.routes[i]; }; - const js_source = route.client_bundle orelse code: { - if (route.server_state == .unqueued) { - dev.bundleRouteFirstTime(route); - } - - switch (route.server_state) { - .unqueued => bun.assertWithLocation(false, @src()), - .possible_bundling_failures => { - if (dev.bundling_failures.count() > 0) { - resp.corked(sendSerializedFailures, .{ - dev, - resp, - dev.bundling_failures.keys(), - .bundler, - }); - return; - } else { - route.server_state = .loaded; - } - }, - .evaluation_failure => { - resp.corked(sendSerializedFailures, .{ - dev, - resp, - &.{route.evaluate_failure orelse @panic("missing error")}, - .evaluation, - }); - return; - }, - .loaded => {}, - } - - // TODO: there can be stale files in this if you request an asset after - // a watch but before the bundle task starts. - - const out = dev.generateClientBundle(route) catch bun.outOfMemory(); - route.client_bundle = out; - break :code out; - }; - sendTextFile(js_source, MimeType.javascript.value, resp); + dev.ensureRouteIsBundled(route_bundle, .js_payload, req, resp) catch bun.outOfMemory(); } fn onAssetRequest(dev: *DevServer, req: *Request, resp: *Response) void { @@ -515,6 +510,12 @@ fn onCssRequest(dev: *DevServer, req: *Request, resp: *Response) void { sendTextFile(css, MimeType.css.value, resp); } +fn parseHexToInt(comptime T: type, slice: []const u8) ?T { + var out: [@sizeOf(T)]u8 = undefined; + assert((std.fmt.hexToBytes(&out, slice) catch return null).len == @sizeOf(T)); + return @bitCast(out); +} + fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: *Response) void { resp.corked(onIncrementalVisualizerCorked, .{resp}); } @@ -528,38 +529,65 @@ fn onIncrementalVisualizerCorked(resp: *Response) void { resp.end(code, false); } -/// `route.server_state` must be `.unenqueued` -fn bundleRouteFirstTime(dev: *DevServer, route: *Route) void { - if (Environment.allow_assert) switch (route.server_state) { - .unqueued => {}, - .possible_bundling_failures => unreachable, // should watch affected files and bundle on save - .evaluation_failure => unreachable, // bundling again wont fix this issue - .loaded => unreachable, // should not be bundling since it already passed - }; - - if (dev.bundle(&.{ - BakeEntryPoint.route( - route.entry_point, - Route.Index.init(@intCast(bun.indexOfPointerInSlice(Route, dev.routes, route))), - ), - })) |_| { - route.server_state = .loaded; - } else |err| switch (err) { - error.OutOfMemory => bun.outOfMemory(), - error.BuildFailed => assert(route.server_state == .possible_bundling_failures), - error.ServerLoadFailed => route.server_state = .evaluation_failure, - } -} +fn ensureRouteIsBundled( + dev: *DevServer, + route_index: Route.Index, + kind: DeferredRequest.Data.Tag, + req: *Request, + resp: *Response, +) bun.OOM!void { + const bundle_index = if (dev.router.routePtr(route_index).bundle.unwrap()) |bundle_index| + bundle_index + else + try dev.insertRouteBundle(route_index); + + switch (dev.routeBundlePtr(bundle_index).server_state) { + .unqueued => { + const server_file_names = dev.server_graph.bundled_files.keys(); + const client_file_names = dev.client_graph.bundled_files.keys(); + + var sfa = std.heap.stackFallback(4096, dev.allocator); + const temp_alloc = sfa.get(); + + var entry_points = std.ArrayList(BakeEntryPoint).init(temp_alloc); + defer entry_points.deinit(); + + // Build a list of all files that have not yet been bundled. + var route = dev.router.routePtr(route_index); + const router_type = dev.router.typePtr(route.type); + try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, router_type.server_file); + try dev.appendOpaqueEntryPoint(client_file_names, &entry_points, .client, router_type.client_file); + try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_page); + try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_layout); + while (route.parent.unwrap()) |parent_index| { + route = dev.router.routePtr(parent_index); + try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_layout); + } -fn onServerRequest(route: *Route, req: *Request, resp: *Response) void { - const dev = route.dev; + if (entry_points.items.len == 0) { + @panic("TODO: trace graph for possible errors, so DevServer knows what state this should go to"); + } - if (route.server_state == .unqueued) { - dev.bundleRouteFirstTime(route); + const route_bundle = dev.routeBundlePtr(bundle_index); + if (dev.bundle(entry_points.items)) |_| { + route_bundle.server_state = .loaded; + } else |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.BuildFailed => assert(route_bundle.server_state == .possible_bundling_failures), + error.ServerLoadFailed => route_bundle.server_state = .evaluation_failure, + } + }, + .bundling => { + const prepared = dev.server.?.DebugHTTPServer.prepareJsRequestContext(req, resp) orelse + return; + _ = prepared; + @panic("TODO: Async Bundler"); + }, + else => {}, } - - switch (route.server_state) { - .unqueued => bun.assertWithLocation(false, @src()), + switch (dev.routeBundlePtr(bundle_index).server_state) { + .unqueued => unreachable, + .bundling => @panic("TODO: Async Bundler"), .possible_bundling_failures => { // TODO: perform a graph trace to find just the errors that are needed if (dev.bundling_failures.count() > 0) { @@ -571,14 +599,14 @@ fn onServerRequest(route: *Route, req: *Request, resp: *Response) void { }); return; } else { - route.server_state = .loaded; + dev.routeBundlePtr(bundle_index).server_state = .loaded; } }, .evaluation_failure => { resp.corked(sendSerializedFailures, .{ dev, resp, - (&(route.evaluate_failure orelse @panic("missing error")))[0..1], + (&(dev.routeBundlePtr(bundle_index).evaluate_failure orelse @panic("missing error")))[0..1], .evaluation, }); return; @@ -586,101 +614,94 @@ fn onServerRequest(route: *Route, req: *Request, resp: *Response) void { .loaded => {}, } - // TODO: this does not move the body, reuse memory, and many other things - // that server.zig does. - const url_bun_string = bun.String.init(req.url()); - defer url_bun_string.deref(); - - const headers = JSC.FetchHeaders.createFromUWS(req); - const request_object = JSC.WebCore.Request.init( - url_bun_string, - headers, - dev.vm.initRequestBodyValue(.Null) catch bun.outOfMemory(), - bun.http.Method.which(req.method()) orelse .GET, - ).new(); - - const js_request = request_object.toJS(dev.server_global.js()); - - const global = dev.server_global.js(); + switch (kind) { + .server_handler => dev.onRequestWithBundle(bundle_index, .{ .stack = req }, resp), + .js_payload => dev.onJsRequestWithBundle(bundle_index, resp), + } +} +fn onRequestWithBundle( + dev: *DevServer, + route_bundle_index: RouteBundle.Index, + req: bun.JSC.API.SavedRequest.Union, + resp: *Response, +) void { const server_request_callback = dev.server_fetch_function_callback.get() orelse unreachable; // did not bundle - var result = server_request_callback.call( - global, - .undefined, - &.{ - // req - js_request, - // routeModuleId - route.module_name_string.get() orelse str: { - const js = bun.String.createUTF8( - bun.path.relative(dev.cwd, route.entry_point), - ).toJS(dev.server_global.js()); - route.module_name_string = JSC.Strong.create(js, dev.server_global.js()); + const route_bundle = dev.routeBundlePtr(route_bundle_index); + + const router_type = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type); + + dev.server.?.onRequestFromSaved( + req, + resp, + server_request_callback, + 4, + .{ + // routerTypeMain + router_type.server_file_string.get() orelse str: { + const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()]; + const str = bun.String.createUTF8(name); + defer str.deref(); + const js = str.toJS(dev.vm.global); + router_type.server_file_string = JSC.Strong.create(js, dev.vm.global); break :str js; }, + // routeModules + route_bundle.cached_module_list.get() orelse arr: { + const global = dev.vm.global; + const keys = dev.server_graph.bundled_files.keys(); + var n: usize = 1; + var route = dev.router.routePtr(route_bundle.route); + while (true) { + if (route.file_layout != .none) n += 1; + route = dev.router.routePtr(route.parent.unwrap() orelse break); + } + const arr = JSValue.createEmptyArray(global, n); + route = dev.router.routePtr(route_bundle.route); + var route_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()])); + arr.putIndex(global, 0, route_name.transferToJS(global)); + n = 1; + while (true) { + if (route.file_layout.unwrap()) |layout| { + var layout_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, layout).get()])); + arr.putIndex(global, @intCast(n), layout_name.transferToJS(global)); + n += 1; + } + route = dev.router.routePtr(route.parent.unwrap() orelse break); + } + route_bundle.cached_module_list = JSC.Strong.create(arr, global); + break :arr arr; + }, // clientId - route.client_bundle_url_value.get() orelse str: { - const js = bun.String.createUTF8(route.client_bundled_url).toJS(global); - route.client_bundle_url_value = JSC.Strong.create(js, dev.server_global.js()); + route_bundle.cached_client_bundle_url.get() orelse str: { + const id = std.crypto.random.int(u64); + dev.route_js_payloads.put(dev.allocator, id, route_bundle.route) catch bun.outOfMemory(); + const str = bun.String.createFormat(client_prefix ++ "/route.{}.js", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&id))}) catch bun.outOfMemory(); + defer str.deref(); + const js = str.toJS(dev.vm.global); + route_bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global); break :str js; }, // styles - route.css_file_array.get() orelse arr: { - const js = dev.generateCssList(route) catch bun.outOfMemory(); - route.css_file_array = JSC.Strong.create(js, dev.server_global.js()); + route_bundle.cached_css_file_array.get() orelse arr: { + const js = dev.generateCssList(route_bundle) catch bun.outOfMemory(); + route_bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global); break :arr js; }, }, - ) catch |err| { - const exception = global.takeException(err); - dev.vm.printErrorLikeObjectToConsole(exception); - // const fail = try SerializedFailure.initFromJs(.none, exception); - // defer fail.deinit(); - // dev.sendSerializedFailures(resp, &.{fail}, .runtime); - dev.sendStubErrorMessage(route, resp, exception); - return; - }; - - if (result.asAnyPromise()) |promise| { - dev.vm.waitForPromise(promise); - switch (promise.unwrap(dev.vm.jsc, .mark_handled)) { - .pending => unreachable, // was waited for - .fulfilled => |r| result = r, - .rejected => |exception| { - dev.vm.printErrorLikeObjectToConsole(exception); - dev.sendStubErrorMessage(route, resp, exception); - // const fail = try SerializedFailure.initFromJs(.none, e); - // defer fail.deinit(); - // dev.sendSerializedFailures(resp, &.{fail}, .runtime); - return; - }, - } - } - - // TODO: This interface and implementation is very poor. It is fine as - // the runtime currently emulates returning a `new Response` - // - // It probably should use code from `server.zig`, but most importantly it should - // not have a tie to DevServer, but instead be generic with a context structure - // containing just a *uws.App, *JSC.EventLoop, and JSValue response object. - // - // This would allow us to support all of the nice things `new Response` allows - - bun.assert(result.isString()); - const bun_string = result.toBunString(dev.server_global.js()); - defer bun_string.deref(); - if (bun_string.tag == .Dead) { - bun.outOfMemory(); - } - - const utf8 = bun_string.toUTF8(dev.allocator); - defer utf8.deinit(); + ); +} - resp.writeStatus("200 OK"); - resp.writeHeader("Content-Type", MimeType.html.value); - resp.end(utf8.slice(), true); // TODO: You should never call res.end(huge buffer) +pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: *Response) void { + const route_bundle = dev.routeBundlePtr(bundle_index); + const code = route_bundle.client_bundle orelse code: { + const code = dev.generateClientBundle(route_bundle) catch bun.outOfMemory(); + route_bundle.client_bundle = code; + break :code code; + }; + sendTextFile(code, MimeType.javascript.value, resp); } pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: *App.Response) void { @@ -726,6 +747,15 @@ fn bundle(dev: *DevServer, files: []const BakeEntryPoint) BundleError!void { assert(files.len > 0); + const bundle_file_list = bun.Output.Scoped(.bundle_file_list, false); + + if (bundle_file_list.isVisible()) { + bundle_file_list.log("Start bundle {d} files", .{files.len}); + for (files) |f| { + bundle_file_list.log("- {s} (.{s})", .{ f.path, @tagName(f.graph) }); + } + } + var heap = try ThreadlocalArena.init(); defer heap.deinit(); @@ -804,82 +834,43 @@ fn bundle(dev: *DevServer, files: []const BakeEntryPoint) BundleError!void { const is_first_server_chunk = !dev.server_fetch_function_callback.has(); if (dev.server_graph.current_chunk_len > 0) { - const server_bundle = try dev.server_graph.takeBundle(if (is_first_server_chunk) .initial_response else .hmr_chunk); + const server_bundle = try dev.server_graph.takeBundle( + if (is_first_server_chunk) .initial_response else .hmr_chunk, + "", + ); defer dev.allocator.free(server_bundle); - if (is_first_server_chunk) { - const server_code = c.BakeLoadInitialServerCode(dev.server_global, bun.String.createLatin1(server_bundle)) catch |err| { - dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); - { - // TODO: document the technical reasons this should not be allowed to fail - bun.todoPanic(@src(), "First Server Load Fails. This should become a bundler bug.", .{}); - } - _ = &err; // autofix - // fail.* = Failure.fromJSServerLoad(dev.server_global.js().takeException(err), dev.server_global.js()); - return error.ServerLoadFailed; - }; - dev.vm.waitForPromise(.{ .internal = server_code.promise }); - - switch (server_code.promise.unwrap(dev.vm.jsc, .mark_handled)) { - .pending => unreachable, // promise is settled - .rejected => |err| { - dev.vm.printErrorLikeObjectToConsole(err); - { - bun.todoPanic(@src(), "First Server Load Fails. This should become a bundler bug.", .{}); - } - _ = &err; // autofix - // fail.* = Failure.fromJSServerLoad(err, dev.server_global.js()); - return error.ServerLoadFailed; - }, - .fulfilled => |v| bun.assert(v == .undefined), - } - - const default_export = c.BakeGetDefaultExportFromModule(dev.server_global.js(), server_code.key.toJS()); - if (!default_export.isObject()) - @panic("Internal assertion failure: expected interface from HMR runtime to be an object"); - const fetch_function: JSValue = default_export.get(dev.server_global.js(), "handleRequest") orelse - @panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest"); - bun.assert(fetch_function.isCallable(dev.vm.jsc)); - dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.server_global.js()); - const register_update = default_export.get(dev.server_global.js(), "registerUpdate") orelse - @panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate"); - dev.server_register_update_callback = JSC.Strong.create(register_update, dev.server_global.js()); - - fetch_function.ensureStillAlive(); - register_update.ensureStillAlive(); - } else { - const server_modules = c.BakeLoadServerHmrPatch(dev.server_global, bun.String.createLatin1(server_bundle)) catch |err| { - // No user code has been evaluated yet, since everything is to - // be wrapped in a function clousure. This means that the likely - // error is going to be a syntax error, or other mistake in the - // bundler. - dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); - @panic("Error thrown while evaluating server code. This is always a bug in the bundler."); - }; - const errors = dev.server_register_update_callback.get().?.call( - dev.server_global.js(), - dev.server_global.js().toJSValue(), - &.{ - server_modules, - dev.makeArrayForServerComponentsPatch(dev.server_global.js(), dev.incremental_result.client_components_added.items), - dev.makeArrayForServerComponentsPatch(dev.server_global.js(), dev.incremental_result.client_components_removed.items), - }, - ) catch |err| { - // One module replacement error should NOT prevent follow-up - // module replacements to fail. It is the HMR runtime's - // responsibility to collect all module load errors, and - // bubble them up. - dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); - @panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime."); - }; - _ = errors; // TODO: - } + const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.createLatin1(server_bundle)) catch |err| { + // No user code has been evaluated yet, since everything is to + // be wrapped in a function clousure. This means that the likely + // error is going to be a syntax error, or other mistake in the + // bundler. + dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); + @panic("Error thrown while evaluating server code. This is always a bug in the bundler."); + }; + const errors = dev.server_register_update_callback.get().?.call( + dev.vm.global, + dev.vm.global.toJSValue(), + &.{ + server_modules, + dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_added.items), + dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_removed.items), + }, + ) catch |err| { + // One module replacement error should NOT prevent follow-up + // module replacements to fail. It is the HMR runtime's + // responsibility to collect all module load errors, and + // bubble them up. + dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); + @panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime."); + }; + _ = errors; // TODO: } const css_chunks = bundle_result.cssChunks(); if ((dev.client_graph.current_chunk_len > 0 or css_chunks.len > 0) and - dev.app.num_subscribers(HmrSocket.global_topic) > 0) + dev.numSubscribers(HmrSocket.global_topic) > 0) { var sfb2 = std.heap.stackFallback(65536, bun.default_allocator); var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 65536) catch @@ -902,9 +893,9 @@ fn bundle(dev: *DevServer, files: []const BakeEntryPoint) BundleError!void { } if (dev.client_graph.current_chunk_len > 0) - try dev.client_graph.takeBundleToList(.hmr_chunk, &payload); + try dev.client_graph.takeBundleToList(.hmr_chunk, &payload, ""); - _ = dev.app.publish(HmrSocket.global_topic, payload.items, .binary, true); + dev.publish(HmrSocket.global_topic, payload.items, .binary); } if (dev.incremental_result.failures_added.items.len > 0) { @@ -954,15 +945,18 @@ fn indexFailures(dev: *DevServer) !void { } } - for (dev.incremental_result.routes_affected.items) |route_index| { - const route = &dev.routes[route_index.get()]; - route.server_state = .possible_bundling_failures; + { + @panic("TODO: revive"); } + // for (dev.incremental_result.routes_affected.items) |route_index| { + // const route = &dev.routes[route_index.get()]; + // route.server_state = .possible_bundling_failures; + // } - _ = dev.app.publish(HmrSocket.global_topic, payload.items, .binary, false); + dev.publish(HmrSocket.global_topic, payload.items, .binary); } else if (dev.incremental_result.failures_removed.items.len > 0) { if (dev.bundling_failures.count() == 0) { - _ = dev.app.publish(HmrSocket.global_topic, &.{MessageId.errors_cleared.char()}, .binary, false); + dev.publish(HmrSocket.global_topic, &.{MessageId.errors_cleared.char()}, .binary); for (dev.incremental_result.failures_removed.items) |removed| { removed.deinit(); } @@ -979,7 +973,7 @@ fn indexFailures(dev: *DevServer) !void { removed.deinit(); } - _ = dev.app.publish(HmrSocket.global_topic, payload.items, .binary, false); + dev.publish(HmrSocket.global_topic, payload.items, .binary); } } @@ -988,9 +982,9 @@ fn indexFailures(dev: *DevServer) !void { /// Used to generate the entry point. Unlike incremental patches, this always /// contains all needed files for a route. -fn generateClientBundle(dev: *DevServer, route: *Route) bun.OOM![]const u8 { - assert(route.client_bundle == null); - assert(route.server_state == .loaded); // page is unfit to load +fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]const u8 { + assert(route_bundle.client_bundle == null); + assert(route_bundle.server_state == .loaded); // page is unfit to load dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); @@ -1008,31 +1002,20 @@ fn generateClientBundle(dev: *DevServer, route: *Route) bun.OOM![]const u8 { // Run tracing dev.client_graph.reset(); + try dev.traceAllRouteImports(route_bundle, .{ .find_client_modules = true }); - // Framework entry point is always needed. - try dev.client_graph.traceImports( - IncrementalGraph(.client).framework_entry_point_index, - .{ .find_client_modules = true }, - ); - - // If react fast refresh is enabled, it will be imported by the runtime instantly. - if (dev.framework.react_fast_refresh != null) { - try dev.client_graph.traceImports(IncrementalGraph(.client).react_refresh_index, .{ .find_client_modules = true }); - } + const client_file = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type).client_file.unwrap() orelse + @panic("No client side entrypoint in client bundle"); - // Trace the route to the client components - try dev.server_graph.traceImports( - route.server_file.unwrap() orelse - Output.panic("File index for route not present", .{}), - .{ .find_client_modules = true }, + return dev.client_graph.takeBundle( + .initial_response, + dev.relativePath(dev.client_graph.bundled_files.keys()[fromOpaqueFileId(.client, client_file).get()]), ); - - return dev.client_graph.takeBundle(.initial_response); } -fn generateCssList(dev: *DevServer, route: *Route) bun.OOM!JSC.JSValue { - if (!Environment.allow_assert) assert(!route.css_file_array.has()); - assert(route.server_state == .loaded); // page is unfit to load +fn generateCssList(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.JSValue { + if (Environment.allow_assert) assert(!route_bundle.cached_css_file_array.has()); + assert(route_bundle.server_state == .loaded); // page is unfit to load dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); @@ -1049,36 +1032,48 @@ fn generateCssList(dev: *DevServer, route: *Route) bun.OOM!JSC.JSValue { // Run tracing dev.client_graph.reset(); - - // Framework entry point is allowed to include its own CSS - try dev.client_graph.traceImports( - IncrementalGraph(.client).framework_entry_point_index, - .{ .find_css = true }, - ); - - // Trace the route to the css files - try dev.server_graph.traceImports( - route.server_file.unwrap() orelse - Output.panic("File index for route not present", .{}), - .{ .find_css = true }, - ); + try dev.traceAllRouteImports(route_bundle, .{ .find_css = true }); const names = dev.client_graph.current_css_files.items; - const arr = JSC.JSArray.createEmpty(dev.server_global.js(), names.len); + const arr = JSC.JSArray.createEmpty(dev.vm.global, names.len); for (names, 0..) |item, i| { const str = bun.String.createUTF8(item); defer str.deref(); - arr.putIndex(dev.server_global.js(), @intCast(i), str.toJS(dev.server_global.js())); + arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global)); } return arr; } +fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, goal: TraceImportGoal) !void { + var route = dev.router.routePtr(route_bundle.route); + const router_type = dev.router.typePtr(route.type); + + // Both framework entry points are considered + try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), .{ .find_css = true }); + if (router_type.client_file.unwrap()) |id| { + try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), goal); + } + + // The route file is considered + if (route.file_page.unwrap()) |id| { + try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), goal); + } + + // For all parents, the layout is considered + while (true) { + if (route.file_layout.unwrap()) |id| { + try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), goal); + } + route = dev.router.routePtr(route.parent.unwrap() orelse break); + } +} + fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *JSC.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) JSValue { if (items.len == 0) return .null; const arr = JSC.JSArray.createEmpty(global, items.len); const names = dev.server_graph.bundled_files.keys(); for (items, 0..) |item, i| { - const str = bun.String.createUTF8(bun.path.relative(dev.cwd, names[item.get()])); + const str = bun.String.createUTF8(dev.relativePath(names[item.get()])); defer str.deref(); arr.putIndex(global, @intCast(i), str.toJS(global)); } @@ -1253,7 +1248,7 @@ pub fn handleParseTaskFailure( ) bun.OOM!void { // Print each error only once Output.prettyErrorln("Errors while bundling '{s}':", .{ - bun.path.relative(dev.cwd, abs_path), + dev.relativePath(abs_path), }); Output.flush(); log.print(Output.errorWriter()) catch {}; @@ -1287,10 +1282,75 @@ pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheE } } -fn onFallbackRoute(_: void, _: *Request, resp: *Response) void { +fn appendOpaqueEntryPoint( + dev: *DevServer, + file_names: [][]const u8, + entry_points: *std.ArrayList(BakeEntryPoint), + comptime side: bake.Side, + optional_id: anytype, +) !void { + const file = switch (@TypeOf(optional_id)) { + OpaqueFileId.Optional => optional_id.unwrap() orelse return, + OpaqueFileId => optional_id, + else => @compileError("invalid type here"), + }; + + const file_index = fromOpaqueFileId(side, file); + if (switch (side) { + .server => dev.server_graph.stale_files.isSet(file_index.get()), + .client => dev.client_graph.stale_files.isSet(file_index.get()), + }) { + try entry_points.append(.{ + .path = file_names[file_index.get()], + .graph = switch (side) { + .server => .server, + .client => .client, + }, + }); + } +} + +pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle { + return &dev.route_bundles.items[idx.get()]; +} + +fn onRequest(dev: *DevServer, req: *Request, resp: *Response) void { + var params: FrameworkRouter.MatchedParams = undefined; + if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| { + dev.ensureRouteIsBundled(route_index, .server_handler, req, resp) catch bun.outOfMemory(); + return; + } + sendBuiltInNotFound(resp); } +fn insertRouteBundle(dev: *DevServer, route: Route.Index) !RouteBundle.Index { + const full_pattern = full_pattern: { + var buf = bake.PatternBuffer.empty; + var current: *Route = dev.router.routePtr(route); + while (true) { + buf.prependPart(current.part); + current = dev.router.routePtr(current.parent.unwrap() orelse break); + } + break :full_pattern try dev.allocator.dupe(u8, buf.slice()); + }; + errdefer dev.allocator.free(full_pattern); + + try dev.route_bundles.append(dev.allocator, .{ + .route = route, + .server_state = .unqueued, + .full_pattern = full_pattern, + .client_bundle = null, + .evaluate_failure = null, + .cached_module_list = .{}, + .cached_client_bundle_url = .{}, + .cached_css_file_array = .{}, + }); + const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len - 1)); + dev.router.routePtr(route).bundle = bundle_index.toOptional(); + return bundle_index; +} + fn sendTextFile(code: []const u8, content_type: []const u8, resp: *Response) void { if (code.len == 0) { resp.writeStatus("202 No Content"); @@ -1376,7 +1436,7 @@ fn sendBuiltInNotFound(resp: *Response) void { resp.end(message, true); } -fn sendStubErrorMessage(dev: *DevServer, route: *Route, resp: *Response, err: JSValue) void { +fn sendStubErrorMessage(dev: *DevServer, route: *RouteBundle, resp: *Response, err: JSValue) void { var sfb = std.heap.stackFallback(65536, dev.allocator); var a = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch bun.outOfMemory(); @@ -1389,7 +1449,7 @@ fn sendStubErrorMessage(dev: *DevServer, route: *Route, resp: *Response, err: JS resp.end(a.items, true); // TODO: "You should never call res.end(huge buffer)" } -const FileKind = enum { +const FileKind = enum(u2) { /// Files that failed to bundle or do not exist on disk will appear in the /// graph as "unknown". unknown, @@ -1587,8 +1647,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import`, or `affected_by_trace` /// Top bits cannot be relied on due to `SerializedFailure.Owner.Packed` pub const FileIndex = bun.GenericIndex(u30, File); - pub const framework_entry_point_index = FileIndex.init(0); - pub const react_refresh_index = if (side == .client) FileIndex.init(1); + pub const react_refresh_index = if (side == .client) FileIndex.init(0); /// An index into `edges` const EdgeIndex = bun.GenericIndex(u32, Edge); @@ -1638,8 +1697,8 @@ pub fn IncrementalGraph(side: bake.Side) type { g.current_chunk_len += code.len; // Dump to filesystem if enabled - if (dev.dump_dir) |dump_dir| { - const cwd = dev.cwd; + if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |dump_dir| { + const cwd = dev.root; var a: bun.PathBuffer = undefined; var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined; const rel_path = bun.path.relativeBufZ(&a, cwd, abs_path); @@ -1653,7 +1712,7 @@ pub fn IncrementalGraph(side: bake.Side) type { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not dump bundle: {}", .{err}); }; - } + }; const gop = try g.bundled_files.getOrPut(dev.allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); @@ -1932,11 +1991,6 @@ pub fn IncrementalGraph(side: bake.Side) type { stop_at_boundary, no_stop, }; - const TraceImportGoal = struct { - // gts: *GraphTraceState, - find_css: bool = false, - find_client_modules: bool = false, - }; fn traceDependencies(g: *@This(), file_index: FileIndex, trace_kind: TraceDependencyKind) !void { g.owner().graph_safety_lock.assertLocked(); @@ -2061,16 +2115,10 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Never takes ownership of `abs_path` /// Marks a chunk but without any content. Used to track dependencies to files that don't exist. pub fn insertStale(g: *@This(), abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex { - return g.insertStaleExtra(abs_path, is_ssr_graph, false, {}); + return g.insertStaleExtra(abs_path, is_ssr_graph, false); } - pub fn insertStaleExtra( - g: *@This(), - abs_path: []const u8, - is_ssr_graph: bool, - comptime is_route: bool, - route_index: if (is_route) Route.Index else void, - ) bun.OOM!FileIndex { + pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex { g.owner().graph_safety_lock.assertLocked(); debug.log("Insert stale: {s}", .{abs_path}); @@ -2087,18 +2135,10 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - if (is_route) { - g.owner().routes[route_index.get()].server_file = file_index.toOptional(); - } - if (g.stale_files.bit_length > gop.index) { g.stale_files.set(gop.index); } - if (is_route) { - try g.owner().route_lookup.put(g.owner().allocator, file_index, route_index); - } - switch (side) { .client => { gop.value_ptr.* = File.init("", .{ @@ -2221,7 +2261,7 @@ pub fn IncrementalGraph(side: bake.Side) type { }; const failure = try SerializedFailure.initFromLog( fail_owner, - bun.path.relative(dev.cwd, abs_path), + dev.relativePath(abs_path), log.msgs.items, ); const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure); @@ -2232,7 +2272,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - pub fn ensureStaleBitCapacity(g: *@This(), val: bool) !void { + pub fn ensureStaleBitCapacity(g: *@This(), are_new_files_stale: bool) !void { try g.stale_files.resize( g.owner().allocator, std.mem.alignForward( @@ -2241,7 +2281,7 @@ pub fn IncrementalGraph(side: bake.Side) type { // allocate 8 in 8 usize chunks std.mem.byte_size_in_bits * @sizeOf(usize) * 8, ), - val, + are_new_files_stale, ); } @@ -2283,17 +2323,26 @@ pub fn IncrementalGraph(side: bake.Side) type { if (side == .client) g.current_css_files.clearRetainingCapacity(); } - pub fn takeBundle(g: *@This(), kind: ChunkKind) ![]const u8 { + pub fn takeBundle( + g: *@This(), + kind: ChunkKind, + initial_response_entry_point: []const u8, + ) ![]const u8 { var chunk = std.ArrayList(u8).init(g.owner().allocator); - try g.takeBundleToList(kind, &chunk); + try g.takeBundleToList(kind, &chunk, initial_response_entry_point); bun.assert(chunk.items.len == chunk.capacity); return chunk.items; } - pub fn takeBundleToList(g: *@This(), kind: ChunkKind, list: *std.ArrayList(u8)) !void { + pub fn takeBundleToList( + g: *@This(), + kind: ChunkKind, + list: *std.ArrayList(u8), + initial_response_entry_point: []const u8, + ) !void { g.owner().graph_safety_lock.assertLocked(); // initial bundle needs at least the entry point - // hot updates shouldnt be emitted if there are no chunks + // hot updates shouldn't be emitted if there are no chunks assert(g.current_chunk_len > 0); const runtime = switch (kind) { @@ -2314,12 +2363,8 @@ pub fn IncrementalGraph(side: bake.Side) type { .initial_response => { const fw = g.owner().framework; try w.writeAll("}, {\n main: "); - const entry = switch (side) { - .server => fw.entry_server, - .client => fw.entry_client, - }; try bun.js_printer.writeJSONString( - bun.path.relative(g.owner().cwd, entry), + g.owner().relativePath(initial_response_entry_point), @TypeOf(w), w, .utf8, @@ -2332,7 +2377,7 @@ pub fn IncrementalGraph(side: bake.Side) type { if (fw.react_fast_refresh) |rfr| { try w.writeAll(",\n refresh: "); try bun.js_printer.writeJSONString( - bun.path.relative(g.owner().cwd, rfr.import_source), + g.owner().relativePath(rfr.import_source), @TypeOf(w), w, .utf8, @@ -2375,7 +2420,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } list.appendSliceAssumeCapacity(end); - if (g.owner().dump_dir) |dump_dir| { + if (bun.FeatureFlags.bake_debugging_features) if (g.owner().dump_dir) |dump_dir| { const rel_path_escaped = "latest_chunk.js"; dumpBundle(dump_dir, switch (side) { .client => .client, @@ -2384,7 +2429,7 @@ pub fn IncrementalGraph(side: bake.Side) type { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not dump bundle: {}", .{err}); }; - } + }; } fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void { @@ -2406,6 +2451,12 @@ pub fn IncrementalGraph(side: bake.Side) type { } } + // TODO: it is infeasible to do this since FrameworkRouter contains file indices + // to the server graph + { + return; + } + g.bundled_files.swapRemoveAt(file_index.get()); // Move out-of-line data from `last` to replace `file_index` @@ -2478,8 +2529,10 @@ pub fn IncrementalGraph(side: bake.Side) type { const IncrementalResult = struct { /// When tracing a file's dependencies via `traceDependencies`, this is - /// populated with the hit routes. Tracing is used for many purposes. - routes_affected: ArrayListUnmanaged(Route.Index), + /// populated with the hit `Route.Index`s. To know what `RouteBundle`s + /// are affected, the route graph must be traced downwards. + /// Tracing is used for multiple purposes. + routes_affected: ArrayListUnmanaged(RouteIndexAndRecurseFlag), // Following three fields are populated during `receiveChunk` @@ -2508,6 +2561,7 @@ const IncrementalResult = struct { failures_added: ArrayListUnmanaged(SerializedFailure), /// Removing files clobbers indices, so removing anything is deferred. + // TODO: remove delete_client_files_later: ArrayListUnmanaged(IncrementalGraph(.client).FileIndex), const empty: IncrementalResult = .{ @@ -2545,6 +2599,12 @@ const GraphTraceState = struct { } }; +const TraceImportGoal = struct { + // gts: *GraphTraceState, + find_css: bool = false, + find_client_modules: bool = false, +}; + fn initGraphTraceState(dev: *const DevServer, sfa: Allocator) !GraphTraceState { const server_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count()); errdefer server_bits.deinit(sfa); @@ -2824,7 +2884,7 @@ pub const SerializedFailure = struct { /// is given to the HMR runtime as an opaque handle. pub const Owner = union(enum) { none, - route: Route.Index, + route: RouteBundle.Index, client: IncrementalGraph(.client).FileIndex, server: IncrementalGraph(.server).FileIndex, @@ -2846,7 +2906,7 @@ pub const SerializedFailure = struct { .none => .none, .client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) }, .server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) }, - .route => .{ .route = Route.Index.init(owner.data) }, + .route => .{ .route = RouteBundle.Index.init(owner.data) }, }; } }; @@ -3056,11 +3116,19 @@ fn dumpBundle(dump_dir: std.fs.Dir, side: bake.Graph, rel_path: []const u8, chun } fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void { + if (!bun.FeatureFlags.bake_debugging_features) return; if (dev.emit_visualizer_events == 0) return; var sfb = std.heap.stackFallback(65536, bun.default_allocator); var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 65536); defer payload.deinit(); + + try dev.writeVisualizerMessage(&payload); + + dev.publish(HmrSocket.visualizer_topic, payload.items, .binary); +} + +fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void { payload.appendAssumeCapacity(MessageId.visualizer.char()); const w = payload.writer(); @@ -3074,9 +3142,10 @@ fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void { g.bundled_files.values(), 0.., ) |k, v, i| { - try w.writeInt(u32, @intCast(k.len), .little); + const normalized_key = dev.relativePath(k); + try w.writeInt(u32, @intCast(normalized_key.len), .little); if (k.len == 0) continue; - try w.writeAll(k); + try w.writeAll(normalized_key); try w.writeByte(@intFromBool(g.stale_files.isSet(i) or switch (side) { .server => v.failed, .client => v.flags.failed, @@ -3103,8 +3172,6 @@ fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void { try w.writeInt(u32, @intCast(edge.imported.get()), .little); } } - - _ = dev.app.publish(HmrSocket.visualizer_topic, payload.items, .binary, false); } pub fn onWebSocketUpgrade( @@ -3198,7 +3265,7 @@ pub const MessageId = enum(u8) { /// - `u32`: File index of the imported file visualizer = 'v', - pub fn char(id: MessageId) u8 { + pub inline fn char(id: MessageId) u8 { return @intFromEnum(id); } }; @@ -3217,12 +3284,12 @@ const HmrSocket = struct { pub const global_topic = "*"; pub const visualizer_topic = "v"; - pub fn onOpen(dw: *HmrSocket, ws: AnyWebSocket) void { - _ = ws.send(&(.{MessageId.version.char()} ++ dw.dev.configuration_hash_key), .binary, false, true); + pub fn onOpen(s: *HmrSocket, ws: AnyWebSocket) void { + _ = ws.send(&(.{MessageId.version.char()} ++ s.dev.configuration_hash_key), .binary, false, true); _ = ws.subscribe(global_topic); } - pub fn onMessage(dw: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void { + pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void { _ = opcode; if (msg.len == 0) { @@ -3232,11 +3299,11 @@ const HmrSocket = struct { switch (@as(IncomingMessageId, @enumFromInt(msg[0]))) { .visualizer => { - if (!dw.emit_visualizer_events) { - dw.emit_visualizer_events = true; - dw.dev.emit_visualizer_events += 1; + if (!s.emit_visualizer_events) { + s.emit_visualizer_events = true; + s.dev.emit_visualizer_events += 1; _ = ws.subscribe(visualizer_topic); - dw.dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory(); + s.dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory(); } }, else => { @@ -3245,73 +3312,36 @@ const HmrSocket = struct { } } - pub fn onClose(dw: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void { + pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void { _ = ws; _ = exit_code; _ = message; - if (dw.emit_visualizer_events) { - dw.dev.emit_visualizer_events -= 1; + if (s.emit_visualizer_events) { + s.dev.emit_visualizer_events -= 1; } - defer dw.dev.allocator.destroy(dw); - } -}; - -/// Bake uses a special global object extending Zig::GlobalObject -pub const DevGlobalObject = opaque { - /// Safe downcast to use other Bun APIs - pub fn js(ptr: *DevGlobalObject) *JSC.JSGlobalObject { - return @ptrCast(ptr); - } - - pub fn vm(ptr: *DevGlobalObject) *JSC.VM { - return ptr.js().vm(); + defer s.dev.allocator.destroy(s); } }; -pub const BakeSourceProvider = opaque {}; - const c = struct { - // BakeDevGlobalObject.cpp - extern fn BakeCreateDevGlobal(owner: *DevServer, console: *JSC.ConsoleObject) *DevGlobalObject; - // BakeSourceProvider.cpp extern fn BakeGetDefaultExportFromModule(global: *JSC.JSGlobalObject, module: JSValue) JSValue; - const LoadServerCodeResult = struct { - promise: *JSInternalPromise, - key: *JSC.JSString, - }; - - fn BakeLoadServerHmrPatch(global: *DevGlobalObject, code: bun.String) !JSValue { - const f = @extern(*const fn (*DevGlobalObject, bun.String) callconv(.C) JSValue, .{ - .name = "BakeLoadServerHmrPatch", - }); - const result = f(global, code); - if (result == .zero) { - if (Environment.allow_assert) assert(global.js().hasException()); - return error.JSError; - } - return result; + fn BakeLoadServerHmrPatch(global: *JSC.JSGlobalObject, code: bun.String) !JSValue { + const f = @extern( + *const fn (*JSC.JSGlobalObject, bun.String) callconv(.C) JSValue.MaybeException, + .{ .name = "BakeLoadServerHmrPatch" }, + ); + return f(global, code).unwrap(); } - fn BakeLoadInitialServerCode(global: *DevGlobalObject, code: bun.String) bun.JSError!LoadServerCodeResult { - const Return = extern struct { - promise: ?*JSInternalPromise, - key: *JSC.JSString, - }; - const f = @extern(*const fn (*DevGlobalObject, bun.String) callconv(.C) Return, .{ + fn BakeLoadInitialServerCode(global: *JSC.JSGlobalObject, code: bun.String, separate_ssr_graph: bool) bun.JSError!JSValue { + const f = @extern(*const fn (*JSC.JSGlobalObject, bun.String, bool) callconv(.C) JSValue.MaybeException, .{ .name = "BakeLoadInitialServerCode", }); - const result = f(global, code); - return .{ - .promise = result.promise orelse { - if (Environment.allow_assert) assert(global.js().hasException()); - return error.JSError; - }, - .key = result.key, - }; + return f(global, code, separate_ssr_graph).unwrap(); } }; @@ -3331,7 +3361,7 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { @panic("timers unsupported"); var sfb = std.heap.stackFallback(4096, bun.default_allocator); - const temp_alloc = sfb.get(); + var temp_alloc = sfb.get(); // pre-allocate a few files worth of strings. it is unlikely but supported // to change more than 8 files in the same bundling round. @@ -3352,15 +3382,6 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { return; } - const reload_file_list = bun.Output.Scoped(.reload_file_list, false); - - if (reload_file_list.isVisible()) { - reload_file_list.log("Hot update hits {d} files", .{files.items.len}); - for (files.items) |f| { - reload_file_list.log("- {s} (.{s})", .{ f.path, @tagName(f.graph) }); - } - } - dev.incremental_result.reset(); defer { // Remove files last to start, to avoid issues where removing a file @@ -3387,23 +3408,48 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { // This list of routes affected excludes client code. This means changing // a client component wont count as a route to trigger a reload on. + // + // A second trace is required to determine what routes had changed bundles, + // since changing a layout affects all child routes. Additionally, routes + // that do not have a bundle will not be cleared (as there is nothing to + // clear for those) if (dev.incremental_result.routes_affected.items.len > 0) { + // re-use some earlier stack memory + files.clearAndFree(); + sfb = std.heap.stackFallback(4096, bun.default_allocator); + temp_alloc = sfb.get(); + + // A bit-set is used to avoid duplicate entries. This is not a problem + // with `dev.incremental_result.routes_affected` + var second_trace_result = try DynamicBitSetUnmanaged.initEmpty(temp_alloc, dev.route_bundles.items.len); + for (dev.incremental_result.routes_affected.items) |request| { + const route = dev.router.routePtr(request.route_index); + if (route.bundle.unwrap()) |id| second_trace_result.set(id.get()); + if (request.should_recurse_when_visiting) { + markAllRouteChildren(&dev.router, &second_trace_result, request.route_index); + } + } + var sfb2 = std.heap.stackFallback(65536, bun.default_allocator); var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 65536) catch unreachable; // enough space defer payload.deinit(); payload.appendAssumeCapacity(MessageId.route_update.char()); const w = payload.writer(); - try w.writeInt(u32, @intCast(dev.incremental_result.routes_affected.items.len), .little); - - for (dev.incremental_result.routes_affected.items) |route| { - try w.writeInt(u32, route.get(), .little); - const pattern = dev.routes[route.get()].pattern; + const count = second_trace_result.count(); + assert(count > 0); + try w.writeInt(u32, @intCast(count), .little); + + var it = second_trace_result.iterator(.{ .kind = .set }); + while (it.next()) |bundled_route_index| { + try w.writeInt(u32, @intCast(bundled_route_index), .little); + const pattern = dev.route_bundles.items[bundled_route_index].full_pattern; try w.writeInt(u32, @intCast(pattern.len), .little); try w.writeAll(pattern); } - _ = dev.app.publish(HmrSocket.global_topic, payload.items, .binary, true); + // Notify + dev.publish(HmrSocket.global_topic, payload.items, .binary); } // When client component roots get updated, the `client_components_affected` @@ -3423,13 +3469,14 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { try dev.server_graph.traceDependencies(index, .no_stop); } - for (dev.incremental_result.routes_affected.items) |route| { - // Free old bundles - if (dev.routes[route.get()].client_bundle) |old| { - dev.allocator.free(old); - } - dev.routes[route.get()].client_bundle = null; - } + // TODO: + // for (dev.incremental_result.routes_affected.items) |route| { + // // Free old bundles + // if (dev.routes[route.get()].client_bundle) |old| { + // dev.allocator.free(old); + // } + // dev.routes[route.get()].client_bundle = null; + // } } // TODO: improve this visual feedback @@ -3446,7 +3493,7 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { Output.prettyError("[x{d}] ", .{dev.bundles_since_last_error}); } - Output.prettyError("Reloaded in {d}ms: {s}", .{ @divFloor(timer.read(), std.time.ns_per_ms), bun.path.relative(dev.cwd, changed_file_paths[0]) }); + Output.prettyError("Reloaded in {d}ms: {s}", .{ @divFloor(timer.read(), std.time.ns_per_ms), dev.relativePath(changed_file_paths[0]) }); if (changed_file_paths.len > 1) { Output.prettyError(" + {d} more", .{files.items.len - 1}); } @@ -3455,6 +3502,16 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { } else {} } +fn markAllRouteChildren(router: *FrameworkRouter, bits: *DynamicBitSetUnmanaged, route_index: Route.Index) void { + var next = router.routePtr(route_index).first_child.unwrap(); + while (next) |child_index| { + const route = router.routePtr(child_index); + if (route.bundle.unwrap()) |index| bits.set(index.get()); + markAllRouteChildren(router, bits, child_index); + next = route.next_sibling.unwrap(); + } +} + pub const HotReloadTask = struct { /// Align to cache lines to reduce contention. const Aligned = struct { aligned: HotReloadTask align(std.atomic.cache_line) }; @@ -3585,7 +3642,8 @@ pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []? }, .directory => { // bust the directory cache since this directory has changed - _ = dev.server_bundler.resolver.bustDirCache(file_path); + // TODO: correctly solve https://github.com/oven-sh/bun/issues/14913 + _ = dev.server_bundler.resolver.bustDirCache(bun.strings.withoutTrailingSlash(file_path)); // if a directory watch exists for resolution // failures, check those now. @@ -3636,6 +3694,107 @@ pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void { } } +pub fn publish(dev: *DevServer, topic: []const u8, message: []const u8, opcode: uws.Opcode) void { + if (dev.server) |s| _ = s.publish(topic, message, opcode, false); +} + +pub fn numSubscribers(dev: *DevServer, topic: []const u8) u32 { + return if (dev.server) |s| s.numSubscribers(topic) else 0; +} + +const SafeFileId = packed struct(u32) { + side: bake.Side, + index: u30, + unused: enum(u1) { unused = 0 } = .unused, +}; + +/// Interface function for FrameworkRouter +pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId { + const index = try dev.server_graph.insertStaleExtra(abs_path, false, true); + try dev.route_lookup.put(dev.allocator, index, .{ + .route_index = associated_route, + .should_recurse_when_visiting = file_kind == .layout, + }); + return toOpaqueFileId(.server, index); +} + +fn toOpaqueFileId(comptime side: bake.Side, index: IncrementalGraph(side).FileIndex) OpaqueFileId { + if (Environment.allow_assert) { + return OpaqueFileId.init(@bitCast(SafeFileId{ + .side = side, + .index = index.get(), + })); + } + + return OpaqueFileId.init(index.get()); +} + +fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph(side).FileIndex { + if (Environment.allow_assert) { + const safe: SafeFileId = @bitCast(id.get()); + assert(side == safe.side); + return IncrementalGraph(side).FileIndex.init(safe.index); + } + return IncrementalGraph(side).FileIndex.init(@intCast(id.get())); +} + +fn relativePath(dev: *const DevServer, path: []const u8) []const u8 { + // TODO: windows slash normalization + bun.assert(dev.root[dev.root.len - 1] != '/'); + if (path.len >= dev.root.len + 1 and + path[dev.root.len] == '/' and + bun.strings.startsWith(path, dev.root)) + { + return path[dev.root.len + 1 ..]; + } + return bun.path.relative(dev.root, path); +} + +fn dumpStateDueToCrash(dev: *DevServer) !void { + comptime assert(bun.FeatureFlags.bake_debugging_features); + + // being conservative about how much stuff is put on the stack. + var filepath_buf: [@min(4096, bun.MAX_PATH_BYTES)]u8 = undefined; + const filepath = std.fmt.bufPrintZ(&filepath_buf, "incremental-graph-crash-dump.{d}.html", .{std.time.timestamp()}) catch "incremental-graph-crash-dump.html"; + const file = std.fs.cwd().createFileZ(filepath, .{}) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + Output.warn("Could not open directory for dumping sources: {}", .{err}); + return; + }; + defer file.close(); + + const start, const end = comptime brk: { + const visualizer = @embedFile("incremental_visualizer.html"); + const i = (std.mem.indexOf(u8, visualizer, ""); + } catch { + // The chunk cannot be embedded as a UTF-8 string in the script tag. + // No data should have been written yet, so a base64 fallback can be used. + const base64 = btoa(String.fromCodePoint(...chunk)); + controller.write(`Uint8Array.from(atob(\"${base64}\"),m=>m.codePointAt(0))`); + } +} + +/** + * Attempts to combine RSC chunks together to minimize the number of chunks the + * client processes. + */ +function writeManyFlightScriptData( + chunks: Uint8Array[], + decoder: TextDecoder, + controller: { write: (str: string) => void }, +) { + if (chunks.length === 1) return writeSingleFlightScriptData(chunks[0], decoder, controller); + + let i = 0; + try { + // Combine all chunks into a single string if possible. + for (; i < chunks.length; i++) { + // `decode()` will throw on invalid UTF-8 sequences. + const str = toSingleQuote(decoder.decode(chunks[i], { stream: true })); + if (i === 0) controller.write("'"); + controller.write(str); + } + controller.write("')"); + } catch { + // The chunk cannot be embedded as a UTF-8 string in the script tag. + // Since this is rare, just make the rest of the chunks base64. + if (i > 0) controller.write("');__bun_f.push("); + controller.write('Uint8Array.from(atob("'); + for (; i < chunks.length; i++) { + const chunk = chunks[i]; + const base64 = btoa(String.fromCodePoint(...chunk)); + controller.write(base64.slice(1, -1)); + } + controller.write('"),m=>m.codePointAt(0))'); + } +} + +// Instead of using `JSON.stringify`, this uses a single quote variant of it, since +// the RSC payload includes a ton of " characters. This is slower, but an easy +// component to move into native code. +function toSingleQuote(str: string): string { + return ( + str // Escape single quotes, backslashes, and newlines + .replace(/\\/g, "\\\\") + .replace(/'/g, "\\'") + .replace(/\n/g, "\\n") + // Escape closing script tags and HTML comments in JS content. + .replace(/