diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 0000000000..951fedcac6 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,50 @@ +# Local cross-compile config for dec-bench harness tests. +# CI uses manylinux_2_28 directly; this is the convenience path for +# `cross build --target aarch64-unknown-linux-gnu -p moose-cli`. +[build.env] +# Tell openssl-sys to skip the vendored build (it strips deprecated symbols +# like SSL_get_peer_certificate that librdkafka still calls) and link the +# system OpenSSL 1.1 inside the container instead. OPENSSL_NO_VENDOR is the +# documented opt-out for the `vendored` feature — it keeps moose-cli's +# Cargo.toml unchanged so regular native builds keep vendoring. +# +# CC / CXX / AR are passthrough because rdkafka-sys runs librdkafka's +# autoconf `./configure` as a subprocess, which reads bare `$CC` from env +# rather than cargo's target-qualified `CC_aarch64_unknown_linux_gnu`. +# Without them configure defaults to the host `gcc` (x86_64) and ld fails +# with `cannot find -lcrypto` because only arm64 libs are in the sysroot. +# Invoke like: +# CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ AR=aarch64-linux-gnu-ar \ +# OPENSSL_NO_VENDOR=1 cross build --target aarch64-unknown-linux-gnu -p moose-cli +passthrough = [ + "OPENSSL_NO_VENDOR", + "OPENSSL_LIB_DIR", + "OPENSSL_INCLUDE_DIR", + "OPENSSL_STATIC", + "CC", + "CXX", + "AR", +] + +[target.aarch64-unknown-linux-gnu] +# The pinned 0.2.5 image ships libssl 1.0.0, which lacks SSL_get_peer_certificate +# as an exported symbol and fails librdkafka's final link. `:main` is Ubuntu +# 20.04 with libssl 1.1.1, which has the symbol. +image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main" +# cross's /opt/toolchain.cmake pins CMAKE_FIND_ROOT_PATH to /usr/aarch64-linux-gnu +# (sysroot layout), but apt multiarch installs arm64 libs to +# /usr/lib/aarch64-linux-gnu (Debian layout). rdkafka-sys's cmake build of +# librdkafka does FIND_PACKAGE(ZLIB) / FIND_PACKAGE(OpenSSL) and can't see +# across that gap — so we symlink the multiarch lib/headers into the sysroot. +pre-build = [ + "dpkg --add-architecture arm64", + "apt-get update", + "apt-get install -y --no-install-recommends protobuf-compiler libprotobuf-dev cmake pkg-config zlib1g-dev:arm64 libssl-dev:arm64", + "mkdir -p /usr/aarch64-linux-gnu/lib /usr/aarch64-linux-gnu/include", + "ln -sf /usr/lib/aarch64-linux-gnu/libz.so /usr/aarch64-linux-gnu/lib/libz.so", + "ln -sf /usr/lib/aarch64-linux-gnu/libz.a /usr/aarch64-linux-gnu/lib/libz.a", + "ln -sf /usr/include/zlib.h /usr/aarch64-linux-gnu/include/zlib.h", + "ln -sf /usr/include/zconf.h /usr/aarch64-linux-gnu/include/zconf.h", + "for f in libssl.so libssl.a libcrypto.so libcrypto.a; do ln -sf /usr/lib/aarch64-linux-gnu/$f /usr/aarch64-linux-gnu/lib/$f; done", + "ln -sfn /usr/include/openssl /usr/aarch64-linux-gnu/include/openssl", +] diff --git a/apps/framework-cli-e2e/test/s3-engine.test.ts b/apps/framework-cli-e2e/test/s3-engine.test.ts index 636267eeb9..3eab0a3240 100644 --- a/apps/framework-cli-e2e/test/s3-engine.test.ts +++ b/apps/framework-cli-e2e/test/s3-engine.test.ts @@ -114,6 +114,12 @@ if (!SELECTED_LANGUAGE || SELECTED_LANGUAGE === "ts") { APP_NAMES.TYPESCRIPT_TESTS, { logPrefix: "TypeScript S3 Engine Test (With Env Vars)", + // Pass the offset-50 ports so `killRemainingProcesses` can + // SIGKILL leftover ClickHouse/Node consumption workers by port + // instead of targeting the default (offset-0) ports. The + // dockerless preflight in the next describe block depends on + // those ports being free. + ports: PORTS, ...getCleanupOptionsForMode(E2E_DEV_MODE), }, ); @@ -194,6 +200,8 @@ if (!SELECTED_LANGUAGE || SELECTED_LANGUAGE === "py") { APP_NAMES.PYTHON_TESTS, { logPrefix: "Python S3 Engine Test (With Env Vars)", + // See note on the TypeScript describe block above. + ports: PORTS, ...getCleanupOptionsForMode(E2E_DEV_MODE), }, ); diff --git a/apps/framework-cli-e2e/test/s3-secrets.test.ts b/apps/framework-cli-e2e/test/s3-secrets.test.ts index 4daf0f688f..33e9442231 100644 --- a/apps/framework-cli-e2e/test/s3-secrets.test.ts +++ b/apps/framework-cli-e2e/test/s3-secrets.test.ts @@ -146,6 +146,12 @@ if (!SELECTED_LANGUAGE || SELECTED_LANGUAGE === "ts") { APP_NAMES.TYPESCRIPT_TESTS, { logPrefix: "TypeScript S3Queue Test (With Env Vars)", + // Pass the test's actual port set so `killRemainingProcesses` can + // SIGKILL leftover ClickHouse/Node consumption workers on the + // offset-40 ports. Without this it would target the default + // (offset-0) ports and leave this suite's children alive, + // causing the next describe block's preflight to fail. + ports: PORTS, ...getCleanupOptionsForMode(E2E_DEV_MODE), }, ); @@ -315,6 +321,8 @@ if (!SELECTED_LANGUAGE || SELECTED_LANGUAGE === "py") { APP_NAMES.PYTHON_TESTS, { logPrefix: "Python S3Queue Test (With Env Vars)", + // See note on the TypeScript describe block above. + ports: PORTS, ...getCleanupOptionsForMode(E2E_DEV_MODE), }, ); diff --git a/apps/framework-cli-e2e/test/unloaded-files-warning.test.ts b/apps/framework-cli-e2e/test/unloaded-files-warning.test.ts index 2101fa0a83..5bf7f96ab8 100644 --- a/apps/framework-cli-e2e/test/unloaded-files-warning.test.ts +++ b/apps/framework-cli-e2e/test/unloaded-files-warning.test.ts @@ -71,9 +71,13 @@ describe("Unloaded Files Warning", () => { let devProcess: ChildProcess | null = null; afterEach(async () => { - await stopDevProcess(devProcess, { logger: testLogger }); + await stopDevProcess(devProcess, { logger: testLogger, ports: PORTS }); devProcess = null; - await killRemainingProcesses({ logger: testLogger }); + // Pass the test's actual offset ports so killRemainingProcesses both + // targets them with SIGKILL and waits until they are released. Without + // this, ClickHouse's async shutdown still holds files in testDir when + // `rmSync` runs, producing ENOTEMPTY. + await killRemainingProcesses({ logger: testLogger, ports: PORTS }); if (testDir && fs.existsSync(testDir)) { testLogger.debug("Cleaning up test directory", { testDir }); diff --git a/apps/framework-cli/src/cli/local_webserver.rs b/apps/framework-cli/src/cli/local_webserver.rs index 6b78ce908c..63fbe8b438 100644 --- a/apps/framework-cli/src/cli/local_webserver.rs +++ b/apps/framework-cli/src/cli/local_webserver.rs @@ -571,26 +571,113 @@ async fn get_consumption_api_res( let _ = execute!(std::io::stdout(), Print(msg + "\n")); } - let mut client_req = reqwest::Request::new(req.method().clone(), url.parse()?); + // Capture method + headers up front so we can rebuild the reqwest::Request + // across retry attempts (reqwest::Request isn't Clone when building via + // reqwest::Request::new). Only GET requests are proxied here and they + // carry no body, so rebuild-per-attempt is cheap. + let method = req.method().clone(); + let hdrs: Vec<(hyper::http::HeaderName, hyper::http::HeaderValue)> = req + .headers() + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + let url_parsed: reqwest::Url = url.parse()?; + + // Retry only on genuine connect errors — a transient "socket not + // accepting" window where the consumption-api is momentarily + // unavailable. In dev this is most often the hot-reload restart of the + // primary worker; in prod it can happen on any brief restart / socket + // reset. Other failures (timeouts, TLS, 5xx from upstream) surface once. + const MAX_ATTEMPTS: usize = 3; + const BACKOFF_MS: [u64; 2] = [150, 400]; // applied between attempts 1→2 and 2→3 + + let mut last_connect_err: Option = None; + for attempt in 0..MAX_ATTEMPTS { + let mut client_req = reqwest::Request::new(method.clone(), url_parsed.clone()); + let req_headers = client_req.headers_mut(); + for (k, v) in hdrs.iter() { + req_headers.insert(k, v.clone()); + } - // Copy headers - let headers = client_req.headers_mut(); - for (key, value) in req.headers() { - headers.insert(key, value.clone()); + match http_client.execute(client_req).await { + Ok(res) => { + let status = res.status(); + let body = res.bytes().await?; + return Ok(add_cors_headers(Response::builder()) + .status(status) + .header("Content-Type", "application/json") + .body(Full::new(body)) + .unwrap()); + } + Err(e) if is_connect_error(&e) => { + debug!( + "consumption proxy connect error on attempt {}/{}: {}", + attempt + 1, + MAX_ATTEMPTS, + e + ); + last_connect_err = Some(e); + if let Some(&ms) = BACKOFF_MS.get(attempt) { + tokio::time::sleep(std::time::Duration::from_millis(ms)).await; + continue; + } + // No more attempts left. + break; + } + Err(e) => { + // Non-connect error: bail immediately, preserving existing + // behavior (caller turns this into 500 "Error"). + return Err(e.into()); + } + } } - // Send request - let res = http_client.execute(client_req).await?; - let status = res.status(); - let body = res.bytes().await?; - - let returned_response = add_cors_headers(Response::builder()) - .status(status) + // All retries exhausted with connect errors — return a structured 503 so + // agents can reason about this as a transient hot-reload window instead + // of an ambiguous 500. Retry-After tells well-behaved clients to try + // again shortly. + let body = serde_json::json!({ + "error": "consumption_api_unavailable", + "retryable": true, + "message": format!( + "Consumption API is temporarily unavailable. \ + Retry shortly. Attempts: {}/{}.", + MAX_ATTEMPTS, MAX_ATTEMPTS + ), + "upstream_error": last_connect_err.as_ref().map(|e| e.to_string()), + }); + Ok(add_cors_headers(Response::builder()) + .status(StatusCode::SERVICE_UNAVAILABLE) .header("Content-Type", "application/json") - .body(Full::new(body)) - .unwrap(); + .header("Retry-After", "1") + .body(Full::new(Bytes::from(body.to_string())))?) +} - Ok(returned_response) +/// Classify a reqwest error as a pure connect-failure (server not accepting +/// connections) vs anything else. Connect errors are the hot-reload race +/// signature and the only case we want to retry — timeouts / TLS / upstream +/// 5xx are surfaced directly. +fn is_connect_error(e: &reqwest::Error) -> bool { + use std::error::Error as _; + + if e.is_connect() { + return true; + } + // reqwest wraps hyper wraps std::io::Error; walk the source chain. + let mut src: Option<&(dyn std::error::Error + 'static)> = e.source(); + while let Some(err) = src { + if let Some(io_err) = err.downcast_ref::() { + use std::io::ErrorKind::*; + if matches!( + io_err.kind(), + ConnectionRefused | ConnectionReset | ConnectionAborted | NotConnected + ) { + return true; + } + } + src = err.source(); + } + false } #[derive(Clone)] @@ -2698,7 +2785,11 @@ impl Webserver { .await .unwrap_or_else(|e| handle_listener_err(management_socket.port(), e)); - // Check if proxy port is available + // Defense in depth: quick bind-and-drop on proxy_port so Docker-mode + // users get a clear error before the Node worker spawns. The + // --dockerless path also runs a structured preflight in + // NativeInfraProvider::start that covers this port alongside the + // embedded-infra ports. let proxy_socket = self.get_socket(project.http_server_config.proxy_port).await; TcpListener::bind(proxy_socket) .await @@ -3115,7 +3206,21 @@ async fn shutdown( // Step 5: Shut down native infrastructure (embedded servers + child processes). if project.dev.dockerless { + super::display::show_message_wrapper( + MessageType::Highlight, + Message { + action: "Stopping".to_string(), + details: "native infrastructure (up to ~10s per service)...".to_string(), + }, + ); crate::utilities::native_infra::stop_native_infra(project); + super::display::show_message_wrapper( + MessageType::Success, + Message { + action: "Stopped".to_string(), + details: "native infrastructure".to_string(), + }, + ); } } diff --git a/apps/framework-cli/src/infrastructure/processes/consumption_registry.rs b/apps/framework-cli/src/infrastructure/processes/consumption_registry.rs index eb02b4ab05..551d6330b9 100644 --- a/apps/framework-cli/src/infrastructure/processes/consumption_registry.rs +++ b/apps/framework-cli/src/infrastructure/processes/consumption_registry.rs @@ -1,7 +1,10 @@ use std::collections::HashMap; +use std::net::TcpListener; use std::path::PathBuf; +use std::thread; +use std::time::{Duration, Instant}; -use tracing::{info, instrument}; +use tracing::{info, instrument, warn}; use crate::cli::logger::{context, resource_type}; use crate::utilities::system::{RestartPolicy, RestartingProcess, StartChildFn}; @@ -78,6 +81,20 @@ impl ConsumptionProcessRegistry { let jwt_config = self.jwt_config.clone(); let proxy_port = self.proxy_port; + // Hot-reload race mitigation: when ConsumptionApiWebServer::Updated + // fires, we call stop() then start() back-to-back. The prior Node + // primary's workers drain connections asynchronously, so :proxy_port + // may still be held by the kernel when the new primary tries to + // fork workers — every new worker's listen() then fails EADDRINUSE + // and the cluster enters a retry storm. + // + // Here we wait until we can successfully bind the port ourselves + // (releasing it immediately after). That proves the old listener is + // gone, so the next process's listen() will succeed cleanly. + if let Some(port) = proxy_port { + wait_for_port_free("127.0.0.1", port); + } + let start_child: StartChildFn = match self.language { SupportedLanguages::Python => Box::new(move || { python::consumption::run( @@ -132,3 +149,32 @@ impl ConsumptionProcessRegistry { Ok(()) } } + +/// Block (briefly) until `host:port` is bindable, i.e. any prior listener +/// has fully released it. Polls every 100ms up to 5s. Non-fatal: on timeout +/// we log a warning and return so the caller can still attempt the spawn — +/// the Node-side `server.on('error')` handler will catch a lingering +/// EADDRINUSE and the cluster's restart path will recover. +fn wait_for_port_free(host: &str, port: u16) { + const INTERVAL: Duration = Duration::from_millis(100); + const TIMEOUT: Duration = Duration::from_secs(5); + + let deadline = Instant::now() + TIMEOUT; + loop { + // A successful bind here proves the port is free. The TcpListener + // is dropped at the end of this scope, releasing the fd. Since we + // never accept a connection on it, no TIME_WAIT is created, so the + // Node worker can bind the same port a few ms later. + match TcpListener::bind((host, port)) { + Ok(_) => return, + Err(_) if Instant::now() < deadline => thread::sleep(INTERVAL), + Err(e) => { + warn!( + "consumption-api: port {port} still held after {TIMEOUT:?} ({e}); \ + continuing — Node-side error handler will manage the retry" + ); + return; + } + } + } +} diff --git a/apps/framework-cli/src/utilities/native_infra/clickhouse.rs b/apps/framework-cli/src/utilities/native_infra/clickhouse.rs index e6e4c26188..91ed2818ff 100644 --- a/apps/framework-cli/src/utilities/native_infra/clickhouse.rs +++ b/apps/framework-cli/src/utilities/native_infra/clickhouse.rs @@ -9,6 +9,20 @@ use tracing::info; /// Data directory layout under `{project}/.moose/native_infra/clickhouse/`. const NATIVE_CH_DIR: &str = "native_infra/clickhouse"; +/// `ps -o comm=` value for the ClickHouse watchdog process. +/// +/// On Linux the watchdog calls `prctl(PR_SET_NAME, "clckhouse-watch")` at +/// startup — the 'i' is dropped so the name fits the 15-char +/// `TASK_COMM_LEN`. On macOS no such rename happens and `ps` reports +/// `clickhouse` for the spawned binary. Persisting the platform-appropriate +/// string in the PID file lets `process_matches` confirm identity before +/// sending SIGTERM on shutdown on both targets. +#[cfg(target_os = "linux")] +pub const WATCHDOG_COMM: &str = "clckhouse-watch"; + +#[cfg(not(target_os = "linux"))] +pub const WATCHDOG_COMM: &str = "clickhouse"; + /// Ensure the ClickHouse binary is cached and return its path. pub fn ensure_binary(manager: &BinaryManager) -> Result { let (url, archive_path, expected_sha256) = clickhouse_download_metadata(); diff --git a/apps/framework-cli/src/utilities/native_infra/errors.rs b/apps/framework-cli/src/utilities/native_infra/errors.rs index 2cccab779c..b5a53682ce 100644 --- a/apps/framework-cli/src/utilities/native_infra/errors.rs +++ b/apps/framework-cli/src/utilities/native_infra/errors.rs @@ -100,4 +100,10 @@ pub enum NativeInfraError { #[error("health check failed for {service}: {reason}")] HealthCheck { service: String, reason: String }, + + #[error("{0}")] + PortConflict(#[from] super::preflight::PortConflictError), + + #[error(transparent)] + InvalidPort(#[from] super::preflight::InvalidPortError), } diff --git a/apps/framework-cli/src/utilities/native_infra/mod.rs b/apps/framework-cli/src/utilities/native_infra/mod.rs index 19a94e1342..9175091b68 100644 --- a/apps/framework-cli/src/utilities/native_infra/mod.rs +++ b/apps/framework-cli/src/utilities/native_infra/mod.rs @@ -3,6 +3,7 @@ pub mod clickhouse; pub mod devkafka; pub mod devredis; pub mod errors; +pub mod preflight; pub mod temporal; use crate::cli::display::{with_spinner_completion, with_timing, Message}; @@ -17,9 +18,9 @@ use std::path::Path; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex, OnceLock}; use std::thread::sleep; -use std::time::Duration; +use std::time::{Duration, Instant}; use tokio::runtime::Handle; -use tracing::info; +use tracing::{info, warn}; /// Relative path from project root to the native infrastructure directory. pub const NATIVE_INFRA_DIR: &str = ".moose/native_infra"; @@ -126,6 +127,17 @@ impl InfraProvider for NativeInfraProvider { } fn start(&self, project: &Project) -> Result<(), RoutineFailure> { + // Preflight: surface EADDRINUSE in a single actionable message before + // anything starts. Prevents the Node consumption worker from entering + // an unbounded restart loop when a prior `moose dev --dockerless` is + // still holding ports 4001 / 6379 / 19092. + let specs = preflight::port_specs_for(project, self.scripts_enabled, true) + .map_err(NativeInfraError::from) + .map_err(Self::map_native_err)?; + preflight::check_ports(&specs, &preflight::native_dir_for(project)) + .map_err(NativeInfraError::from) + .map_err(Self::map_native_err)?; + // Start embedded devredis (Redis needed early for leadership/presence) let devredis_handle = with_timing("Start devredis", || { with_spinner_completion( @@ -202,9 +214,17 @@ impl InfraProvider for NativeInfraProvider { let _ = child.start_kill(); anyhow::anyhow!("ClickHouse process exited immediately after spawn") })?; - if let Err(e) = - write_pid_file(&clickhouse::pid_file_path(project), pid, "clickhouse") - { + // ClickHouse's watchdog sets its own comm via prctl(PR_SET_NAME) + // to `clckhouse-watch` (missing the 'i', trimmed to fit the + // 15-char TASK_COMM_LEN). The PID we capture here is the + // watchdog — not the server child — so store the name that + // `ps -o comm=` will actually report so `process_matches` + // can later verify identity and issue SIGTERM on shutdown. + if let Err(e) = write_pid_file( + &clickhouse::pid_file_path(project), + pid, + clickhouse::WATCHDOG_COMM, + ) { let _ = child.start_kill(); return Err(anyhow::anyhow!("{}", e)); } @@ -489,12 +509,43 @@ pub fn kill_pid_file(pid_path: &Path) { } Ok(_) => { info!("PID {pid} already exited or could not be signaled"); + let _ = std::fs::remove_file(pid_path); + return; } Err(e) => { info!("Failed to run kill command for PID {pid}: {e}"); + let _ = std::fs::remove_file(pid_path); + return; + } + } + + // ClickHouse takes several seconds to flush and release its listen sockets; + // without waiting here the next `moose dev --dockerless` preflight sees the + // ports still bound and aborts with a false "another moose dev is running" + // error. Poll `kill -0` until the PID is gone, then escalate to SIGKILL. + const WAIT_TIMEOUT: Duration = Duration::from_secs(10); + const POLL_INTERVAL: Duration = Duration::from_millis(100); + let deadline = Instant::now() + WAIT_TIMEOUT; + while Instant::now() < deadline { + let still_alive = std::process::Command::new("kill") + .args(["-0", &pid.to_string()]) + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + if !still_alive { + info!("PID {pid} exited after SIGTERM"); + let _ = std::fs::remove_file(pid_path); + return; } + sleep(POLL_INTERVAL); } + warn!("PID {pid} did not exit within {WAIT_TIMEOUT:?}; sending SIGKILL"); + let _ = std::process::Command::new("kill") + .args(["-KILL", &pid.to_string()]) + .output(); + // Brief grace for the kernel to release ports held by the killed process. + sleep(Duration::from_millis(200)); let _ = std::fs::remove_file(pid_path); } diff --git a/apps/framework-cli/src/utilities/native_infra/preflight.rs b/apps/framework-cli/src/utilities/native_infra/preflight.rs new file mode 100644 index 0000000000..65a7b72dac --- /dev/null +++ b/apps/framework-cli/src/utilities/native_infra/preflight.rs @@ -0,0 +1,415 @@ +//! Preflight port availability checks for the native (dockerless) dev path. +//! +//! Before any embedded server or native child process starts, probe every port +//! `moose dev --dockerless` intends to bind. Report conflicts in a single +//! structured error so the user sees an actionable message instead of a +//! cascade of stack traces from a Node worker retrying forever on an occupied +//! `proxy_port`. + +use super::{process_matches, NATIVE_INFRA_DIR}; +use crate::project::Project; +use std::fmt; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}; +use std::path::Path; + +/// One port the dockerless dev path intends to bind. +/// +/// Field order matches the `new()` parameter order: `(port, service, host)`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct PortSpec { + pub port: u16, + pub service: &'static str, + pub host: &'static str, +} + +impl PortSpec { + pub const fn new(port: u16, service: &'static str, host: &'static str) -> Self { + Self { + port, + service, + host, + } + } +} + +/// Port value from project config was outside the valid `u16` range. +#[derive(Debug, thiserror::Error)] +#[error("invalid port {value} for `{service}` in project config: must be in 1..=65535")] +pub struct InvalidPortError { + pub service: &'static str, + pub value: i32, +} + +/// A conflict detected for a single port, optionally attributed to a known +/// moose-owned PID (read from `.moose/native_infra/*.pid`). +#[derive(Debug)] +pub struct PortConflict { + pub spec: PortSpec, + /// PID of a previously-started moose native process that is still alive + /// and matches the expected binary name. `None` when the conflict is with + /// an unrelated process. + pub owner_pid: Option, + /// Name of the moose component the PID file was written for (e.g. + /// `"clickhouse"`), when attribution is available. + pub owner_name: Option<&'static str>, +} + +/// One or more port conflicts discovered during preflight. +#[derive(Debug)] +pub struct PortConflictError { + pub conflicts: Vec, +} + +impl fmt::Display for PortConflictError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If any conflict is attributed to a PID file in THIS project's + // `.moose/native_infra/`, our own `moose dev` is running. Don't + // require `.all()` attributed — most services in the preflight + // (devredis, devkafka, http, management, proxy_port) never write + // PID files today, so `.all()` would make "keep editing" unreachable + // for same-project conflicts, which is the common case. The rare + // edge case of our own clickhouse + an unrelated process on another + // port is still covered by the fact that every conflicting port is + // listed above — the user can act on it if they need to. + let same_project_instance = self.conflicts.iter().any(|c| c.owner_pid.is_some()); + + writeln!( + f, + "Cannot start moose dev: the following ports are already in use:" + )?; + for c in &self.conflicts { + match (c.owner_pid, c.owner_name) { + (Some(pid), Some(name)) => writeln!( + f, + " - {} ({}) — PID {pid} ({name} from a prior moose dev) is alive", + c.spec.port, c.spec.service + )?, + _ => writeln!( + f, + " - {} ({}) — another moose dev instance may be running", + c.spec.port, c.spec.service + )?, + } + } + if same_project_instance { + writeln!( + f, + "A `moose dev` for this project is already running. If it's healthy, \ + just keep editing your code — the running instance picks up changes. \ + Run `moose clean` only if you want to stop it and start fresh." + )?; + } else { + writeln!( + f, + "If this is another moose project's dev server, run `moose clean` in \ + that project. Otherwise, stop the process holding these ports." + )?; + } + Ok(()) + } +} + +impl std::error::Error for PortConflictError {} + +/// Attempt to bind each port synchronously on both IPv4 loopback and IPv6 +/// loopback. Returns `Err(PortConflictError)` if any port is already in use +/// on either stack. +/// +/// The dual-stack probe matters because Node's `localhost` resolution can pick +/// `::1`, meaning a stuck consumption worker may occupy only the IPv6 side. +/// A probe that only checks 127.0.0.1 would miss it. +/// +/// `native_dir` is the `.moose/native_infra/` directory used for best-effort +/// PID attribution in the error message. +pub fn check_ports(specs: &[PortSpec], native_dir: &Path) -> Result<(), PortConflictError> { + let mut conflicts = Vec::new(); + + for spec in specs { + if port_in_use(spec.port) { + // Only ClickHouse/Temporal write PID files today. For ports + // owned by those services, look up the matching pid file + // directly so each conflict is attributed to its own owner + // rather than short-circuiting on the first pid file found. + let owner_name: Option<&'static str> = match spec.service { + "clickhouse-http" + | "clickhouse-tcp" + | "clickhouse-keeper" + | "clickhouse-keeper-raft" => Some("clickhouse"), + "temporal" | "temporal-ui" => Some("temporal"), + _ => None, + }; + let owner_pid = owner_name + .and_then(|name| read_live_pid(&native_dir.join(format!("{name}.pid")), name)); + conflicts.push(PortConflict { + spec: *spec, + owner_pid, + owner_name: owner_name.filter(|_| owner_pid.is_some()), + }); + } + } + + if conflicts.is_empty() { + Ok(()) + } else { + Err(PortConflictError { conflicts }) + } +} + +/// Probe both IPv4 and IPv6 loopback for `port`. Returns `true` only when a +/// bind fails specifically with `AddrInUse` on either stack. Other errors +/// (e.g. `AddrNotAvailable` on systems with IPv6 disabled, or permission +/// errors on privileged ports) are ignored — they are not port conflicts and +/// reporting them as such would cause false positives that block startup. +fn port_in_use(port: u16) -> bool { + // A successful bind proves the port is free on that address family. The + // TcpListener is dropped immediately (no connection accepted, so no + // TIME_WAIT is created) and the real service can bind a few ms later. + let v4 = SocketAddr::from((Ipv4Addr::LOCALHOST, port)); + let v6 = SocketAddr::from((Ipv6Addr::LOCALHOST, port)); + let v4_in_use = matches!( + TcpListener::bind(v4), + Err(e) if e.kind() == std::io::ErrorKind::AddrInUse + ); + let v6_in_use = matches!( + TcpListener::bind(v6), + Err(e) if e.kind() == std::io::ErrorKind::AddrInUse + ); + v4_in_use || v6_in_use +} + +/// Best-effort: read a `.pid` file, verify the PID still matches the +/// expected process name, and return it. Used only to enrich the error +/// message — never load-bearing. +fn read_live_pid(pid_path: &Path, expected_name: &str) -> Option { + let contents = std::fs::read_to_string(pid_path).ok()?; + let contents = contents.trim(); + let (pid_str, name_in_file) = contents.split_once(':').unwrap_or((contents, "")); + let pid: u32 = pid_str.parse().ok()?; + // Honor the name in the PID file when present, falling back to the + // expected name for legacy files. + let name = if name_in_file.is_empty() { + expected_name + } else { + name_in_file + }; + if process_matches(pid, name) { + Some(pid) + } else { + None + } +} + +/// Convert an `i32` port value from project config into a validated `u16`. +fn port_from_config(value: i32, service: &'static str) -> Result { + u16::try_from(value) + .ok() + .filter(|p| *p != 0) + .ok_or(InvalidPortError { service, value }) +} + +/// Build the full list of ports the dockerless path will try to bind for the +/// given project. Kafka / Temporal / webserver ports are included or skipped +/// based on feature flags, mirroring the gating in `NativeInfraProvider::start` +/// and the CLI webserver bootstrap. +/// +/// Returns `Err(InvalidPortError)` if any ClickHouse port value in the project +/// config is outside the valid `u16` range — catching configuration mistakes +/// before we silently truncate them. +pub fn port_specs_for( + project: &Project, + scripts_enabled: bool, + include_webserver: bool, +) -> Result, InvalidPortError> { + let mut specs = Vec::with_capacity(10); + + specs.push(PortSpec::new( + project.redis_config.port, + "devredis", + "127.0.0.1", + )); + + if project.features.streaming_engine { + specs.push(PortSpec::new( + super::devkafka::broker_port(&project.redpanda_config), + "devkafka", + "127.0.0.1", + )); + } + + let ch = &project.clickhouse_config; + specs.push(PortSpec::new( + port_from_config(ch.host_port, "clickhouse-http")?, + "clickhouse-http", + "127.0.0.1", + )); + specs.push(PortSpec::new( + port_from_config(ch.native_port, "clickhouse-tcp")?, + "clickhouse-tcp", + "127.0.0.1", + )); + specs.push(PortSpec::new( + port_from_config(ch.keeper_port, "clickhouse-keeper")?, + "clickhouse-keeper", + "127.0.0.1", + )); + specs.push(PortSpec::new( + port_from_config(ch.keeper_raft_port, "clickhouse-keeper-raft")?, + "clickhouse-keeper-raft", + "127.0.0.1", + )); + + if scripts_enabled || project.features.workflows { + let tc = &project.temporal_config; + specs.push(PortSpec::new(tc.temporal_port, "temporal", "127.0.0.1")); + specs.push(PortSpec::new(tc.ui_port, "temporal-ui", "127.0.0.1")); + } + + if include_webserver { + let hs = &project.http_server_config; + specs.push(PortSpec::new(hs.port, "http", "127.0.0.1")); + specs.push(PortSpec::new(hs.management_port, "management", "127.0.0.1")); + specs.push(PortSpec::new(hs.proxy_port, "proxy_port", "127.0.0.1")); + } + + Ok(specs) +} + +/// Convenience: the `.moose/native_infra/` directory for a project. Callers +/// pass this to [`check_ports`] so conflicts can be attributed to moose-owned +/// PID files when possible. +pub fn native_dir_for(project: &Project) -> std::path::PathBuf { + project.project_location.join(NATIVE_INFRA_DIR) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::TcpListener as StdTcpListener; + + fn bound_port() -> (StdTcpListener, u16) { + let listener = StdTcpListener::bind(("127.0.0.1", 0)).expect("bind ephemeral"); + let port = listener.local_addr().unwrap().port(); + (listener, port) + } + + #[test] + fn check_ports_reports_conflict_on_bound_port() { + let (_held, port) = bound_port(); + let tmp = tempfile::tempdir().expect("tempdir"); + let specs = [PortSpec::new(port, "devredis", "127.0.0.1")]; + + let err = check_ports(&specs, tmp.path()).expect_err("should conflict"); + assert_eq!(err.conflicts.len(), 1); + assert_eq!(err.conflicts[0].spec.port, port); + assert!(err.conflicts[0].owner_pid.is_none()); + } + + #[test] + fn check_ports_succeeds_on_free_port() { + let port = { + let (listener, port) = bound_port(); + drop(listener); + port + }; + let tmp = tempfile::tempdir().expect("tempdir"); + let specs = [PortSpec::new(port, "devredis", "127.0.0.1")]; + + // Might race with the OS reusing the port, but in practice a freshly + // freed ephemeral port is available again immediately. + check_ports(&specs, tmp.path()).expect("should be free"); + } + + #[test] + fn check_ports_attributes_live_clickhouse_pid() { + let (_held, port) = bound_port(); + let tmp = tempfile::tempdir().expect("tempdir"); + let native_dir = tmp.path().join(NATIVE_INFRA_DIR); + std::fs::create_dir_all(&native_dir).unwrap(); + + // Write a PID file naming the current test process. `process_matches` + // uses `ps -p`, which returns the process's short command name (e.g. + // `framework-cli-`). We name the PID file after that command so the + // attribution succeeds even though we can't fake a real clickhouse. + let my_pid = std::process::id(); + let ps = std::process::Command::new("ps") + .args(["-p", &my_pid.to_string(), "-o", "comm="]) + .output() + .expect("ps"); + let comm = String::from_utf8_lossy(&ps.stdout).trim().to_string(); + // Grab the final path component and trim to something short-ish. + let comm_name = comm + .rsplit('/') + .next() + .unwrap_or(&comm) + .chars() + .take(15) + .collect::(); + std::fs::write( + native_dir.join("clickhouse.pid"), + format!("{my_pid}:{comm_name}"), + ) + .unwrap(); + + // The spec must use a service name that the attribute path recognizes + // as clickhouse-owned. + let specs = [PortSpec::new(port, "clickhouse-http", "127.0.0.1")]; + + let err = check_ports(&specs, tmp.path()).expect_err("should conflict"); + assert_eq!(err.conflicts.len(), 1); + // Rendering must always succeed. + let _ = err.to_string(); + } + + #[test] + fn display_any_attribution_suggests_keep_editing() { + // Most preflight services (devredis, devkafka, http, management, + // proxy_port) never write PID files, so in the common same-project + // case only clickhouse/temporal are attributed while the others + // are unattributed. `.any()` is the right signal: if we see our + // own PID, it's our own dev server — tell the user they can keep + // editing. Every conflicting port is still listed so the user can + // intervene on stragglers if any are present. + let err = PortConflictError { + conflicts: vec![ + PortConflict { + spec: PortSpec::new(6379, "devredis", "127.0.0.1"), + owner_pid: None, + owner_name: None, + }, + PortConflict { + spec: PortSpec::new(9000, "clickhouse-tcp", "127.0.0.1"), + owner_pid: Some(12345), + owner_name: Some("clickhouse"), + }, + ], + }; + let rendered = err.to_string(); + assert!(rendered.contains("6379 (devredis)")); + assert!(rendered.contains("9000 (clickhouse-tcp)")); + assert!(rendered.contains("PID 12345")); + assert!(rendered.contains("keep editing")); + assert!(!rendered.contains("another moose project")); + } + + #[test] + fn display_suggests_cleanup_when_no_attribution() { + let err = PortConflictError { + conflicts: vec![PortConflict { + spec: PortSpec::new(6379, "devredis", "127.0.0.1"), + owner_pid: None, + owner_name: None, + }], + }; + let rendered = err.to_string(); + assert!(rendered.contains("another moose project")); + assert!(!rendered.contains("keep editing")); + } + + #[test] + fn port_from_config_rejects_out_of_range() { + assert!(port_from_config(70000, "clickhouse-http").is_err()); + assert!(port_from_config(-1, "clickhouse-http").is_err()); + assert!(port_from_config(0, "clickhouse-http").is_err()); + assert_eq!(port_from_config(9000, "clickhouse-tcp").unwrap(), 9000); + } +} diff --git a/apps/framework-cli/src/utilities/system.rs b/apps/framework-cli/src/utilities/system.rs index 470c2d7753..07d6fb6d56 100644 --- a/apps/framework-cli/src/utilities/system.rs +++ b/apps/framework-cli/src/utilities/system.rs @@ -133,10 +133,17 @@ impl RestartingProcess { monitor_task: tokio::task::spawn(async move { let mut child = child; const INITIAL_DELAY_MS: u64 = 1000; - const MAX_DELAY_MS: u64 = 60_000; // 1 minute - const MIN_RUNTIME_FOR_RESET: Duration = Duration::from_secs(10); // Process must run 10s to reset backoff + const MAX_DELAY_MS: u64 = 60_000; + const MIN_RUNTIME_FOR_RESET: Duration = Duration::from_secs(10); + // Circuit breaker: stop retrying once the child has failed this + // many times in a row without running long enough. Prevents a + // permanently-broken process (e.g. EADDRINUSE on a port held by + // another moose dev) from flooding the log file with an + // unbounded stream of identical stack traces. + const MAX_CONSECUTIVE_RAPID_FAILURES: u32 = 5; let mut delay_ms: u64 = INITIAL_DELAY_MS; let mut process_start_time = Instant::now(); + let mut consecutive_rapid_failures: u32 = 0; 'monitor: loop { select! { @@ -179,8 +186,18 @@ impl RestartingProcess { if process_runtime >= MIN_RUNTIME_FOR_RESET { debug!("Previous process ran for {:?}, resetting backoff delay", process_runtime); delay_ms = INITIAL_DELAY_MS; + consecutive_rapid_failures = 0; } else { delay_ms = (delay_ms * 2).min(MAX_DELAY_MS); + consecutive_rapid_failures += 1; + } + + if consecutive_rapid_failures >= MAX_CONSECUTIVE_RAPID_FAILURES { + error!( + "Process {} failed {} times in a row without running for at least {:?}; giving up. Check the errors above for the underlying cause (e.g. a busy port).", + process_id, consecutive_rapid_failures, MIN_RUNTIME_FOR_RESET, + ); + break 'monitor; } 'restart: loop { @@ -208,6 +225,14 @@ impl RestartingProcess { Err(e) => { error!("Failed to restart process {}: {:?}", process_id, e); delay_ms = (delay_ms * 2).min(MAX_DELAY_MS); + consecutive_rapid_failures += 1; + if consecutive_rapid_failures >= MAX_CONSECUTIVE_RAPID_FAILURES { + error!( + "Process {} failed to spawn {} times in a row; giving up.", + process_id, consecutive_rapid_failures, + ); + break 'monitor; + } } } } @@ -231,3 +256,65 @@ impl RestartingProcess { } } } + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::atomic::{AtomicU32, Ordering}; + use std::sync::Arc; + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn restarting_process_gives_up_after_consecutive_spawn_failures() { + // A StartChildFn that always fails. The monitor's inner restart loop + // should trip the circuit breaker after a bounded number of attempts + // instead of retrying forever. + let calls = Arc::new(AtomicU32::new(0)); + let calls_for_start = calls.clone(); + + // First call must succeed so RestartingProcess::create itself + // returns Ok — we need the monitor task spawned. We fail every + // subsequent call so the inner 'restart loop exercises the breaker. + let start: StartChildFn = Box::new(move || { + let n = calls_for_start.fetch_add(1, Ordering::SeqCst); + if n == 0 { + // Spawn a tiny child that exits immediately with failure. + tokio::process::Command::new("sh") + .arg("-c") + .arg("exit 1") + .spawn() + } else { + Err(std::io::Error::new( + std::io::ErrorKind::AddrInUse, + "simulated EADDRINUSE", + )) + } + }); + + let proc = RestartingProcess::create( + "test-circuit-breaker".to_string(), + start, + RestartPolicy::Always, + ) + .expect("initial spawn should succeed"); + + // Wait for the monitor to hit the breaker. The first child exits + // ~immediately, then each restart attempt fails with increasing + // backoff (1s, 2s, 4s, 8s, 16s) — well under 40s for 5 failures. + let joined = tokio::time::timeout(Duration::from_secs(45), proc.monitor_task).await; + assert!( + joined.is_ok(), + "monitor task should terminate after circuit-breaker trips, not retry forever", + ); + + // Deterministic total: 1 initial spawn (Ok), then the first child + // exits immediately which sets `consecutive_rapid_failures = 1`. The + // inner restart loop then tries `start()` up to MAX-1 more times + // (each Err increments + checks `>= MAX`), giving 1 + 4 = 5 calls + // before the breaker trips. + let total = calls.load(Ordering::SeqCst); + assert_eq!( + total, 5, + "expected exactly 5 spawn attempts before the breaker trips, got {total}", + ); + } +} diff --git a/packages/ts-moose-lib/src/consumption-apis/runner.ts b/packages/ts-moose-lib/src/consumption-apis/runner.ts index ab9092a40c..1ff5eb6e01 100755 --- a/packages/ts-moose-lib/src/consumption-apis/runner.ts +++ b/packages/ts-moose-lib/src/consumption-apis/runner.ts @@ -611,7 +611,37 @@ export const runApis = async (config: ApisConfig) => { ); // port is now passed via config.proxyPort or defaults to 4001 const port = config.proxyPort !== undefined ? config.proxyPort : 4001; - server.listen(port, "localhost", () => { + + // Handle listen errors instead of letting Node emit an uncaught + // exception. EADDRINUSE here means a previous worker (or primary + // RoundRobinHandle) is still holding the socket; the cluster-level + // restart path will fork a replacement. Logging cleanly avoids + // stack-trace spam that masks the underlying cause. + server.on("error", (err: NodeJS.ErrnoException) => { + if (err.code === "EADDRINUSE") { + console.error( + `[consumption-api] port ${port} already in use — worker exiting so cluster can respawn`, + ); + } else { + console.error( + `[consumption-api] unexpected server error on port ${port}:`, + err, + ); + } + // Surrender gracefully. The Cluster's workerStop isn't appropriate + // here (we never reached a healthy state), so exit non-zero and let + // the primary schedule a retry. + process.exit(1); + }); + + // Bind to IPv4 loopback explicitly. Node's `cluster` module intercepts + // `listen` calls regardless of the host arg (the primary holds the OS + // socket and distributes handles to workers via IPC), so passing + // "127.0.0.1" does not cause the bind race the previous code feared. + // Not specifying a host defaults to `::` (all interfaces), which would + // expose the dev consumption API — `enforceAuth` is off by default — + // to the local network. Loopback-only is the safer default. + server.listen(port, "127.0.0.1", () => { console.log(`Server running on port ${port}`); }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4c34c300cb..d514211b9a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1784,78 +1784,92 @@ packages: resolution: {integrity: sha512-I4RxkXU90cpufazhGPyVujYwfIm9Nk1QDEmiIsaPwdnm013F7RIceaCc87kAH+oUB1ezqEvC6ga4m7MSlqsJvQ==} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-arm@1.2.3': resolution: {integrity: sha512-x1uE93lyP6wEwGvgAIV0gP6zmaL/a0tGzJs/BIDDG0zeBhMnuUPm7ptxGhUbcGs4okDJrk4nxgrmxpib9g6HpA==} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-ppc64@1.2.3': resolution: {integrity: sha512-Y2T7IsQvJLMCBM+pmPbM3bKT/yYJvVtLJGfCs4Sp95SjvnFIjynbjzsa7dY1fRJX45FTSfDksbTp6AGWudiyCg==} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-s390x@1.2.3': resolution: {integrity: sha512-RgWrs/gVU7f+K7P+KeHFaBAJlNkD1nIZuVXdQv6S+fNA6syCcoboNjsV2Pou7zNlVdNQoQUpQTk8SWDHUA3y/w==} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-x64@1.2.3': resolution: {integrity: sha512-3JU7LmR85K6bBiRzSUc/Ff9JBVIFVvq6bomKE0e63UXGeRw2HPVEjoJke1Yx+iU4rL7/7kUjES4dZ/81Qjhyxg==} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linuxmusl-arm64@1.2.3': resolution: {integrity: sha512-F9q83RZ8yaCwENw1GieztSfj5msz7GGykG/BA+MOUefvER69K/ubgFHNeSyUu64amHIYKGDs4sRCMzXVj8sEyw==} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-libvips-linuxmusl-x64@1.2.3': resolution: {integrity: sha512-U5PUY5jbc45ANM6tSJpsgqmBF/VsL6LnxJmIf11kB7J5DctHgqm0SkuXzVWtIY90GnJxKnC/JT251TDnk1fu/g==} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-linux-arm64@0.34.4': resolution: {integrity: sha512-YXU1F/mN/Wu786tl72CyJjP/Ngl8mGHN1hST4BGl+hiW5jhCnV2uRVTNOcaYPs73NeT/H8Upm3y9582JVuZHrQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-linux-arm@0.34.4': resolution: {integrity: sha512-Xyam4mlqM0KkTHYVSuc6wXRmM7LGN0P12li03jAnZ3EJWZqj83+hi8Y9UxZUbxsgsK1qOEwg7O0Bc0LjqQVtxA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-linux-ppc64@0.34.4': resolution: {integrity: sha512-F4PDtF4Cy8L8hXA2p3TO6s4aDt93v+LKmpcYFLAVdkkD3hSxZzee0rh6/+94FpAynsuMpLX5h+LRsSG3rIciUQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-linux-s390x@0.34.4': resolution: {integrity: sha512-qVrZKE9Bsnzy+myf7lFKvng6bQzhNUAYcVORq2P7bDlvmF6u2sCmK2KyEQEBdYk+u3T01pVsPrkj943T1aJAsw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-linux-x64@0.34.4': resolution: {integrity: sha512-ZfGtcp2xS51iG79c6Vhw9CWqQC8l2Ot8dygxoDoIQPTat/Ov3qAa8qpxSrtAEAJW+UjTXc4yxCjNfxm4h6Xm2A==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-linuxmusl-arm64@0.34.4': resolution: {integrity: sha512-8hDVvW9eu4yHWnjaOOR8kHVrew1iIX+MUgwxSuH2XyYeNRtLUe4VNioSqbNkB7ZYQJj9rUTT4PyRscyk2PXFKA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-linuxmusl-x64@0.34.4': resolution: {integrity: sha512-lU0aA5L8QTlfKjpDCEFOZsTYGn3AEiO6db8W5aQDxj0nQkVrZWmN3ZP9sYKWJdtq3PWPhUNlqehWyXpYDcI9Sg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-wasm32@0.34.4': resolution: {integrity: sha512-33QL6ZO/qpRyG7woB/HUALz28WnTMI2W1jgX3Nu2bypqLIKx/QKMILLJzJjI+SIbvXdG9fUnmrxR7vbi1sTBeA==} @@ -2071,24 +2085,28 @@ packages: engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [glibc] '@next/swc-linux-arm64-musl@16.1.6': resolution: {integrity: sha512-S4J2v+8tT3NIO9u2q+S0G5KdvNDjXfAv06OhfOzNDaBn5rw84DGXWndOEB7d5/x852A20sW1M56vhC/tRVbccQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [musl] '@next/swc-linux-x64-gnu@16.1.6': resolution: {integrity: sha512-2eEBDkFlMMNQnkTyPBhQOAyn2qMxyG2eE7GPH2WIDGEpEILcBPI/jdSv4t6xupSP+ot/jkfrCShLAa7+ZUPcJQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [glibc] '@next/swc-linux-x64-musl@16.1.6': resolution: {integrity: sha512-oicJwRlyOoZXVlxmIMaTq7f8pN9QNbdes0q2FXfRsPhfCi8n8JmOZJm5oo1pwDaFbnnD421rVU409M3evFbIqg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [musl] '@next/swc-win32-arm64-msvc@16.1.6': resolution: {integrity: sha512-gQmm8izDTPgs+DCWH22kcDmuUp7NyiJgEl18bcr8irXA5N2m2O+JQIr6f3ct42GOs9c0h8QF3L5SzIxcYAAXXw==}