diff --git a/.github/merge-queue-config.json b/.github/merge-queue-config.json index 5beb9eeecca..6a69619f3c9 100644 --- a/.github/merge-queue-config.json +++ b/.github/merge-queue-config.json @@ -14,43 +14,27 @@ "all_required": true, "status_checks": [ { - "context": "CI - Comprehensive Gates / setup", + "context": "ci-guard / attestation-bundle-verifier", "required": true, - "description": "Environment setup and change detection" + "description": "Portable attestation bundle admissibility verification" }, { - "context": "CI - Comprehensive Gates / lint-and-typecheck", + "context": "merge-surge / pr-fast", "required": true, - "description": "Code quality and TypeScript validation" + "description": "Fast protected-branch PR verification lane" }, { - "context": "CI - Comprehensive Gates / unit-integration-tests", + "context": "merge-surge / merge-queue", "required": true, - "description": "Test execution with ≥80% coverage requirement" + "description": "Merge-group protected-branch verification lane" }, { - "context": "CI - Comprehensive Gates / security-gates", + "context": "security-gates / gate", "required": true, - "description": "SBOM generation, vulnerability scan, secret detection" - }, - { - "context": "CI - Comprehensive Gates / build-and-attestation", - "required": true, - "description": "Application build and artifact generation" - }, - { - "context": "CI - Comprehensive Gates / merge-readiness", - "required": true, - "description": "Overall merge readiness evaluation" + "description": "Deterministic security gate with pinned-action evidence emission" } ], - "optional_checks": [ - { - "context": "CI - Comprehensive Gates / schema-api-validation", - "required": false, - "description": "GraphQL schema validation (conditional on changes)" - } - ] + "optional_checks": [] }, "merge_policies": { "min_entries_to_merge": 1, diff --git a/.github/required-checks.yml b/.github/required-checks.yml index 0158c4d747c..a87f2130bae 100644 --- a/.github/required-checks.yml +++ b/.github/required-checks.yml @@ -1,47 +1,20 @@ -<<<<<<< HEAD -# Required Status Checks Configuration -# ===================================== -# DEPRECATED: This file is maintained for historical reference only. -# -# CANONICAL SOURCE: docs/ci/REQUIRED_CHECKS_POLICY.yml (v2.2.0) -# -# The authoritative definition of required checks is in: -# docs/ci/REQUIRED_CHECKS_POLICY.yml -# -# That file defines: -# - always_required: checks that must pass on every commit -# - conditional_required: checks that run based on changed files -# - informational: non-blocking checks for observability -# -# This file remains for legacy tooling compatibility but should NOT -# be used as a source of truth for branch protection or merge queue -# configuration. -# -# Last updated: 2026-03-25 -# Status: ARCHIVED - refer to REQUIRED_CHECKS_POLICY.yml -======= # Canonical list of required status checks for protected branches # Order is stable and intentional (deterministic diffs) # NOTE: Canonical policy source is governance/ga/required-checks.yaml. # Keep this file in sync for legacy verification consumers. ->>>>>>> pr-21871 version: 2 protected_branches: - main -# DEPRECATED: See docs/ci/REQUIRED_CHECKS_POLICY.yml for current checks +# This file remains a maintained legacy consumer surface. +# The canonical required-check source is governance/ga/required-checks.yaml. required_checks: - - pr-fast - - merge-queue + - ci-guard / attestation-bundle-verifier + - merge-surge / merge-queue + - merge-surge / pr-fast + - security-gates / gate notes: owner: summit-ga -<<<<<<< HEAD - canonical_source: docs/ci/REQUIRED_CHECKS_POLICY.yml - status: archived - migration_date: 2026-03-25 - reason: Consolidated to single source of truth to eliminate conflicting definitions -======= policy: governance/ga/required-checks.yaml ->>>>>>> pr-21871 diff --git a/.github/scripts/run-agent-graph-check.ts b/.github/scripts/run-agent-graph-check.ts index 7a24e68eb86..9e3de504a06 100644 --- a/.github/scripts/run-agent-graph-check.ts +++ b/.github/scripts/run-agent-graph-check.ts @@ -131,7 +131,7 @@ const graph: CapabilityGraph = { from: "agent:security-engineer", to: "workflow:policy-enforcement", allow: true, - requiredChecks: ["security-gates"], + requiredChecks: ["security-gates / gate"], evidenceKinds: ["security-audit"], maxCostUsd: 5.0, maxLatencyMs: 5000, diff --git a/.github/workflows/_policy-enforcer.yml b/.github/workflows/_policy-enforcer.yml index 6153409671b..0f7c7e5aaa0 100644 --- a/.github/workflows/_policy-enforcer.yml +++ b/.github/workflows/_policy-enforcer.yml @@ -33,8 +33,21 @@ jobs: shopt -s nullglob violations=0 + base_ref="${GITHUB_BASE_REF:-main}" - for f in .github/workflows/*.yml .github/workflows/*.yaml; do + mapfile -t changed_workflows < <( + git diff --name-only "origin/${base_ref}...HEAD" -- \ + '.github/workflows/*.yml' \ + '.github/workflows/*.yaml' + ) + + if [ "${#changed_workflows[@]}" -eq 0 ]; then + echo "No workflow changes detected; skipping broad-trigger policy check." + exit 0 + fi + + for f in "${changed_workflows[@]}"; do + [ -f "$f" ] || continue base="$(basename "$f")" # Skip canonical pilot gate files that are intentionally PR-facing. @@ -44,12 +57,46 @@ jobs: ;; esac - if grep -Eq '^[[:space:]]*pull_request:' "$f"; then - if ! grep -Eq '^[[:space:]]+paths:' "$f"; then - echo "Violation: $f has pull_request trigger without path scoping." + current_has_pr=false + current_has_paths=false + if grep -Eq '^[[:space:]]*(pull_request|pull_request_target):' "$f"; then + current_has_pr=true + fi + if grep -Eq '^[[:space:]]+paths:' "$f"; then + current_has_paths=true + fi + + if ! git cat-file -e "origin/${base_ref}:${f}" 2>/dev/null; then + if [ "$current_has_pr" = true ] && [ "$current_has_paths" = false ]; then + echo "Violation: $f introduces a PR-facing trigger without path scoping." violations=1 fi + continue + fi + + base_file="$(mktemp)" + git show "origin/${base_ref}:${f}" > "$base_file" + + base_has_pr=false + base_has_paths=false + if grep -Eq '^[[:space:]]*(pull_request|pull_request_target):' "$base_file"; then + base_has_pr=true + fi + if grep -Eq '^[[:space:]]+paths:' "$base_file"; then + base_has_paths=true + fi + + if [ "$current_has_pr" = true ] && [ "$base_has_pr" = false ] && [ "$current_has_paths" = false ]; then + echo "Violation: $f adds a PR-facing trigger to an existing workflow without path scoping." + violations=1 fi + + if [ "$base_has_pr" = true ] && [ "$base_has_paths" = true ] && [ "$current_has_pr" = true ] && [ "$current_has_paths" = false ]; then + echo "Violation: $f removes required path scoping from an existing PR-facing workflow." + violations=1 + fi + + rm -f "$base_file" done test "$violations" -eq 0 diff --git a/.github/workflows/_reusable-ci.yml b/.github/workflows/_reusable-ci.yml index 6cd7cb1b530..2aec72b0612 100644 --- a/.github/workflows/_reusable-ci.yml +++ b/.github/workflows/_reusable-ci.yml @@ -1,3 +1,5 @@ +name: Reusable CI + on: workflow_call: inputs: @@ -49,6 +51,9 @@ on: env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true +permissions: + contents: read + jobs: ci: runs-on: ubuntu-22.04 @@ -56,35 +61,18 @@ jobs: matrix: task: [lint, typecheck, test] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Setup pnpm -<<<<<<< HEAD - uses: pnpm/action-setup@v4 -<<<<<<< HEAD + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 -======= -======= - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 - with: - version: 9.15.4 -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> pr-21956 -======= ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 -======= ->>>>>>> pr-21894 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: ${{ inputs.node-version }} + cache: 'pnpm' cache-dependency-path: '**/pnpm-lock.yaml' - run: corepack enable - run: pnpm -w install --frozen-lockfile diff --git a/.github/workflows/admissibility-gate.yml b/.github/workflows/admissibility-gate.yml new file mode 100644 index 00000000000..4693c3a16cc --- /dev/null +++ b/.github/workflows/admissibility-gate.yml @@ -0,0 +1,96 @@ +name: Admissibility Gate + +on: + pull_request: + workflow_dispatch: + +permissions: + contents: read + id-token: write + attestations: write + +jobs: + evidence-admissibility: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Enable Corepack + run: corepack enable + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Install Syft + uses: anchore/sbom-action/download-syft@v0.17.0 + + - name: Install Cosign + uses: sigstore/cosign-installer@v3.8.1 + + - name: Build deterministic artifact payload + run: | + mkdir -p dist + sha256sum package.json pnpm-lock.yaml | awk '{print $1}' | sort > dist/admissible-artifact.txt + + - name: Generate SBOM (CycloneDX) + run: syft . -o cyclonedx-json=evidence/sbom.cdx.json + + - name: Assert SBOM completeness + run: | + test -f evidence/sbom.cdx.json + jq -e '.components and (.components | length > 0)' evidence/sbom.cdx.json + + - name: Generate provenance attestation (SLSA) + uses: actions/attest-build-provenance@v3 + with: + subject-path: dist/admissible-artifact.txt + + - name: Materialize deterministic provenance snapshot + run: | + DIGEST="$(sha256sum dist/admissible-artifact.txt | awk '{print $1}')" + jq -n \ + --arg digest "sha256:${DIGEST}" \ + --arg repo "${{ github.repository }}" \ + '{ + _type: "https://in-toto.io/Statement/v1", + predicateType: "https://slsa.dev/provenance/v1", + subject: [{name: "dist/admissible-artifact.txt", digest: {sha256: ($digest | sub("^sha256:"; ""))}}], + builder: {id: "https://github.com/actions/runner"}, + invocation: {configSource: {uri: $repo}} + }' > evidence/provenance.json + + - name: Sign and verify artifact signature + run: | + cosign generate-key-pair + cosign sign-blob --yes --key cosign.key --output-signature evidence/artifact.sig dist/admissible-artifact.txt + cosign verify-blob --key cosign.pub --signature evidence/artifact.sig dist/admissible-artifact.txt + + - name: Build evidence report/metrics/stamp + env: + ARTIFACT_PATH: dist/admissible-artifact.txt + SBOM_PATH: evidence/sbom.cdx.json + PROVENANCE_PATH: evidence/provenance.json + SIGNATURE_VERIFIED: "true" + run: node scripts/ci/build_admissibility_evidence.mjs + + - name: Evaluate admissibility gate + run: pnpm verify:admissibility --input evidence/report.json + + - name: Upload evidence artifacts + uses: actions/upload-artifact@v4 + with: + name: admissibility-evidence-${{ github.run_id }} + path: | + evidence/report.json + evidence/metrics.json + evidence/stamp.json + evidence/sbom.cdx.json + evidence/provenance.json + evidence/artifact.sig + cosign.pub diff --git a/.github/workflows/branch-protection-drift.yml b/.github/workflows/branch-protection-drift.yml index 0cb7d6dd2fd..410716e1e6b 100644 --- a/.github/workflows/branch-protection-drift.yml +++ b/.github/workflows/branch-protection-drift.yml @@ -26,10 +26,7 @@ concurrency: cancel-in-progress: true permissions: - FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true contents: read - issues: write - pull-requests: write checks: read actions: read @@ -49,7 +46,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Determine Branch id: branch @@ -60,7 +57,7 @@ jobs: - name: Generate GitHub App token id: app-token - uses: actions/create-github-app-token@v2 + uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v2.0.0 continue-on-error: true with: app-id: ${{ secrets.BRANCH_PROTECTION_APP_ID }} @@ -133,7 +130,7 @@ jobs: echo "extra_count=${EXTRA}" >> "$GITHUB_OUTPUT" - name: Upload Drift Report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 if: always() with: name: branch-protection-drift-report @@ -142,39 +139,6 @@ jobs: ${{ env.OUT_DIR }}/branch_protection_drift_report.json retention-days: 30 - - name: Upsert governance issue (non-PR only) - if: github.event_name != 'pull_request' && steps.check.outputs.drift_detected == 'true' && github.event.inputs.dry_run != 'true' - uses: actions/github-script@v6 - with: - script: | - const fs = require('fs'); - const branch = '${{ steps.branch.outputs.branch }}'; - const title = `[Governance Drift] Branch protection does not match REQUIRED_CHECKS_POLICY (${branch})`; - const body = fs.readFileSync('${{ env.OUT_DIR }}/branch_protection_drift_report.md', 'utf8'); - - const query = `repo:${context.repo.owner}/${context.repo.repo} is:issue is:open in:title "${title}"`; - const search = await github.rest.search.issuesAndPullRequests({ q: query, per_page: 1 }); - - if (search.data.items.length > 0) { - const issueNumber = search.data.items[0].number; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issueNumber, - body: `## Drift Update (${new Date().toISOString()})\n\n${body}`, - }); - core.notice(`Updated existing drift issue #${issueNumber}`); - } else { - const created = await github.rest.issues.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title, - body, - labels: ['governance', 'ci', 'release-ops', 'severity:P1'], - }); - core.notice(`Created drift issue #${created.data.number}`); - } - - name: Generate Summary if: always() run: | diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 93c28eb7256..3684aa362c2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -26,40 +26,19 @@ jobs: SIGSTORE_KEY: ${{ secrets.SIGSTORE_KEY }} steps: - name: Checkout code - uses: actions/checkout@v4 # v4.1.7 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Install pnpm -<<<<<<< HEAD - uses: pnpm/action-setup@v4 # v4.1.0 -<<<<<<< HEAD + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 -======= -======= - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 - with: - version: 9.15.4 -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> pr-22128 -======= ->>>>>>> pr-21956 -======= ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 -======= ->>>>>>> pr-21894 # Note: pnpm version is read from package.json "packageManager" field - name: Setup Node - uses: actions/setup-node@v4 # v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: cache: 'pnpm' cache-dependency-path: '**/pnpm-lock.yaml' diff --git a/.github/workflows/business-integrity.yml b/.github/workflows/business-integrity.yml index 8f595a72f2b..25e5c85b1ae 100644 --- a/.github/workflows/business-integrity.yml +++ b/.github/workflows/business-integrity.yml @@ -11,34 +11,15 @@ jobs: if: github.event_name == 'pull_request' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Setup pnpm - uses: pnpm/action-setup@v3 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 -<<<<<<< HEAD - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 cache: 'pnpm' -<<<<<<< HEAD -======= - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 9.15.4 -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> pr-22128 -======= ->>>>>>> pr-21956 -======= ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 -======= ->>>>>>> pr-21894 - name: Install dependencies run: pnpm install -w js-yaml || npm install --no-save js-yaml - name: Validate schema only @@ -51,34 +32,15 @@ jobs: if: github.event_name == 'merge_group' || github.ref == 'refs/heads/main' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Setup pnpm - uses: pnpm/action-setup@v3 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 -<<<<<<< HEAD - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 cache: 'pnpm' -<<<<<<< HEAD -======= - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 9.15.4 -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> pr-22128 -======= ->>>>>>> pr-21956 -======= ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 -======= ->>>>>>> pr-21894 - name: Install dependencies run: pnpm install -w js-yaml || npm install --no-save js-yaml - name: Validate revenue surfaces diff --git a/.github/workflows/ci-drift-sentinel.yml b/.github/workflows/ci-drift-sentinel.yml index 79e2e45c1d9..5a537bd9c4c 100644 --- a/.github/workflows/ci-drift-sentinel.yml +++ b/.github/workflows/ci-drift-sentinel.yml @@ -16,26 +16,24 @@ concurrency: env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true +permissions: + contents: read + jobs: sentinel: runs-on: ubuntu-22.04 timeout-minutes: 10 steps: - - uses: actions/checkout@v4 -<<<<<<< HEAD + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: pnpm/action-setup@v4 -======= - - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 + - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 - name: Validate Workflow Drift run: node scripts/ci/validate_workflows.mjs - diff --git a/.github/workflows/ci-guard.yml b/.github/workflows/ci-guard.yml index a38bf747603..73df90d7e92 100644 --- a/.github/workflows/ci-guard.yml +++ b/.github/workflows/ci-guard.yml @@ -2,9 +2,16 @@ name: ci-guard on: pull_request: + branches: [main] + merge_group: + types: [checks_requested] push: branches: [main] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: drift: runs-on: ubuntu-latest @@ -16,9 +23,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: pnpm/action-setup@v4 + with: + version: 9.15.4 + - uses: actions/setup-node@v4 + with: + node-version-file: .nvmrc + cache: pnpm - run: mkdir -p metrics - run: echo '{"pr":0,"ttm_ms":0,"version":"1.0.0"}' > metrics/merge_latency.json - - run: npm ci || true + - run: pnpm install --frozen-lockfile - run: node .repoos/scripts/ci/validate_schemas.mjs checksum: @@ -27,3 +41,45 @@ jobs: - uses: actions/checkout@v4 - run: node .repoos/scripts/ci/compute_control_checksum.mjs > .repoos/control/checksum.txt - run: cat .repoos/control/checksum.txt + + + attestation-bundle-verifier: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Run attestation-bundle verifier tests + run: node --test --test-name-pattern="attestation bundle" scripts/ci/__tests__/governance_mutation_guard.test.mjs + + - name: Verify required checks contract + run: node scripts/ci/verify_required_checks_contract.mjs + + - name: Verify synthetic attestation bundle + shell: bash + run: | + set -euo pipefail + bundle_dir="$(mktemp -d)/attestations" + mkdir -p "$bundle_dir" + + cat > "$bundle_dir/subject.json" <<'EOF' + {"subject_digest":"sha256:1111111111111111111111111111111111111111111111111111111111111111","subject_type":"decision-bundle","bundle_version":"0.1"} + EOF + + cat > "$bundle_dir/verification-summary.json" <<'EOF' + {"subject":{"digest":"sha256:1111111111111111111111111111111111111111111111111111111111111111"},"verifier":{"name":"summit verify","version":"0.1"},"policy_digest":"sha256:2222222222222222222222222222222222222222222222222222222222222222","results":[{"property":"summit.verification.passed","status":"PASS"}]} + EOF + + cat > "$bundle_dir/decision-proof.json" <<'EOF' + {"decision_id":"DEC-1","subject_digest":"sha256:1111111111111111111111111111111111111111111111111111111111111111","lineage_run_id":"RUN-1","inputs":[{"type":"bundle","ref":"subject.json"}],"tools":[{"name":"summit verify","version":"0.1"}],"policies_applied":["baseline"],"verification_ref":"verification-summary.json","reproducible":true,"verdict":"ADMISSIBLE"} + EOF + + cat > "$bundle_dir/policy.json" <<'EOF' + {"policy_digest":"sha256:2222222222222222222222222222222222222222222222222222222222222222"} + EOF + + node scripts/ci/verify_summit_attestation_bundle.mjs "$bundle_dir" diff --git a/.github/workflows/ci-security.yml b/.github/workflows/ci-security.yml index 102e8b8d2fc..03fc30a86ac 100644 --- a/.github/workflows/ci-security.yml +++ b/.github/workflows/ci-security.yml @@ -22,11 +22,8 @@ on: required: false permissions: - FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true actions: read contents: read - security-events: write - checks: write issues: read env: @@ -47,19 +44,19 @@ jobs: set -euo pipefail RUN_DAST="true" if command -v jq >/dev/null 2>&1 && [ -f "$GITHUB_EVENT_PATH" ]; then -VALUE=$(jq -r '.inputs.run_dast // empty' "$GITHUB_EVENT_PATH") -if [ "$VALUE" = "false" ]; then - RUN_DAST="false" -fi + VALUE=$(jq -r '.inputs.run_dast // empty' "$GITHUB_EVENT_PATH") + if [ "$VALUE" = "false" ]; then + RUN_DAST="false" + fi fi echo "run_dast=$RUN_DAST" >> "$GITHUB_OUTPUT" RUN_SNYK="false" if command -v jq >/dev/null 2>&1 && [ -f "$GITHUB_EVENT_PATH" ]; then -VALUE=$(jq -r '.inputs.run_snyk // empty' "$GITHUB_EVENT_PATH") -if [ "$VALUE" = "true" ]; then - RUN_SNYK="true" -fi + VALUE=$(jq -r '.inputs.run_snyk // empty' "$GITHUB_EVENT_PATH") + if [ "$VALUE" = "true" ]; then + RUN_SNYK="true" + fi fi echo "run_snyk=$RUN_SNYK" >> "$GITHUB_OUTPUT" @@ -69,7 +66,7 @@ fi needs: context steps: - name: Checkout - uses: actions/checkout@v4 # v4.1.7 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 - name: Prepare report directory @@ -102,7 +99,7 @@ fi matrix: language: [javascript, python] steps: - - uses: actions/checkout@v4 # v4.1.7 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Initialize CodeQL uses: github/codeql-action/init@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v3.25.10 with: @@ -123,7 +120,7 @@ fi contents: read security-events: write steps: - - uses: actions/checkout@v4 # v4.1.7 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Prepare report directory run: mkdir -p "$REPORT_DIR" - name: Run Semgrep CI ruleset @@ -154,25 +151,25 @@ fi env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN || secrets.snyk_token }} steps: - - uses: actions/checkout@v4 # v4.1.7 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Prepare report directory run: mkdir -p "$REPORT_DIR" - name: Ensure SNYK_TOKEN is configured run: | if [ -z "${SNYK_TOKEN:-}" ]; then -echo "SNYK_TOKEN secret is required for dependency scanning." >&2 -echo "Define repository secret 'SNYK_TOKEN' or provide one via workflow_call.snyk_token." >&2 -exit 1 + echo "SNYK_TOKEN secret is required for dependency scanning." >&2 + echo "Define repository secret 'SNYK_TOKEN' or provide one via workflow_call.snyk_token." >&2 + exit 1 fi - name: Run Snyk test across all manifests uses: snyk/actions/node@9adf32b1121593767fc3c057af55b55db032dc04 # 1.10.0 with: command: test args: >- ---all-projects ---severity-threshold=${{ env.SNYK_FAIL_THRESHOLD }} ---fail-on=all ---sarif-file-output=${{ env.REPORT_DIR }}/snyk.sarif + --all-projects + --severity-threshold=${{ env.SNYK_FAIL_THRESHOLD }} + --fail-on=all + --sarif-file-output=${{ env.REPORT_DIR }}/snyk.sarif - name: Upload Snyk SARIF if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v3.25.10 @@ -194,7 +191,7 @@ exit 1 contents: read security-events: write steps: - - uses: actions/checkout@v4 # v4.1.7 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Install Trivy uses: aquasecurity/setup-trivy@master # fallback to master - name: Prepare report directory @@ -202,12 +199,12 @@ exit 1 - name: Run Trivy FS scan run: | trivy fs \ ---scanners vuln,secret,misconfig \ ---ignore-unfixed \ ---exit-code 1 \ ---format sarif \ ---output "$REPORT_DIR/trivy-fs.sarif" \ -. + --scanners vuln,secret,misconfig \ + --ignore-unfixed \ + --exit-code 1 \ + --format sarif \ + --output "$REPORT_DIR/trivy-fs.sarif" \ + . - name: Upload filesystem SARIF if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v3.25.10 @@ -269,8 +266,8 @@ exit 1 retention-days: 14 name: security-reports path: | -${{ env.REPORT_DIR }}/trivy-server.sarif -${{ env.REPORT_DIR }}/trivy-client.sarif + ${{ env.REPORT_DIR }}/trivy-server.sarif + ${{ env.REPORT_DIR }}/trivy-client.sarif license-compliance: name: License compliance verification @@ -284,11 +281,11 @@ ${{ env.REPORT_DIR }}/trivy-client.sarif - name: Evaluate license policy run: | trivy fs \ ---scanners license \ ---exit-code 1 \ ---format json \ ---output "$REPORT_DIR/trivy-license.json" \ -. + --scanners license \ + --exit-code 1 \ + --format json \ + --output "$REPORT_DIR/trivy-license.json" \ + . - name: Persist license report if: always() uses: actions/upload-artifact@v4 # v4.1.0 @@ -309,11 +306,11 @@ ${{ env.REPORT_DIR }}/trivy-client.sarif run: | mkdir -p "$REPORT_DIR" checkov -d . \ ---framework terraform,kubernetes,helm,cloudformation \ ---quiet \ ---download-external-modules true \ ---output-file-path "$REPORT_DIR" \ ---output sarif + --framework terraform,kubernetes,helm,cloudformation \ + --quiet \ + --download-external-modules true \ + --output-file-path "$REPORT_DIR" \ + --output sarif - name: Upload Checkov SARIF if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v3.25.10 @@ -383,12 +380,12 @@ ${{ env.REPORT_DIR }}/trivy-client.sarif set -euo pipefail mkdir -p "$REPORT_DIR" trivy config rendered \ ---compliance kubernetes-cis-1.23 \ ---format json \ ---output "$REPORT_DIR/trivy-cis.json" + --compliance kubernetes-cis-1.23 \ + --format json \ + --output "$REPORT_DIR/trivy-cis.json" if jq '([.Results[]?.Results[]?] | length) > 0' "$REPORT_DIR/trivy-cis.json" | grep -q true; then -echo "CIS benchmark violations detected" >&2 -exit 1 + echo "CIS benchmark violations detected" >&2 + exit 1 fi - name: Persist CIS report if: always() @@ -432,10 +429,10 @@ exit 1 - name: Wait for application run: | for i in {1..30}; do -if curl -fsS http://localhost:3000 >/dev/null 2>&1; then - exit 0 -fi -sleep 5 + if curl -fsS http://localhost:3000 >/dev/null 2>&1; then + exit 0 + fi + sleep 5 done echo "Application did not become ready in time" >&2 exit 1 @@ -461,9 +458,9 @@ sleep 5 retention-days: 14 name: security-reports path: | -${{ env.REPORT_DIR }}/zap-report.html -${{ env.REPORT_DIR }}/zap-report.md -${{ env.REPORT_DIR }}/zap-report.json + ${{ env.REPORT_DIR }}/zap-report.html + ${{ env.REPORT_DIR }}/zap-report.md + ${{ env.REPORT_DIR }}/zap-report.json security-summary: name: Aggregate security coverage @@ -509,53 +506,53 @@ ${{ env.REPORT_DIR }}/zap-report.json DAST_STATUS: ${{ needs.dast.result }} run: | python3 - <<'PY' -import json -import os -from pathlib import Path + import json + import os + from pathlib import Path -report_root = Path('aggregated-security') -files = sorted(str(p.relative_to(report_root)) for p in report_root.rglob('*') if p.is_file()) -status = { - "Secret scanning": os.environ.get('SECRET_STATUS', 'unknown'), - "SAST": os.environ.get('SAST_STATUS', 'unknown'), - "Semgrep": os.environ.get('SEMGREP_STATUS', 'unknown'), - "Dependencies": os.environ.get('DEP_STATUS', 'unknown'), - "Filesystem": os.environ.get('FS_STATUS', 'unknown'), - "Container": os.environ.get('IMG_STATUS', 'unknown'), - "Licenses": os.environ.get('LIC_STATUS', 'unknown'), - "IaC": os.environ.get('IAC_STATUS', 'unknown'), - "OPA policies": os.environ.get('OPA_STATUS', 'unknown'), - "CIS benchmark": os.environ.get('CIS_STATUS', 'unknown'), - "Baseline": os.environ.get('BASELINE_STATUS', 'unknown'), - "DAST": os.environ.get('DAST_STATUS', 'skipped'), -} -summary = { - "reportCount": len(files), - "reports": files, - "statuses": status, -} -report_root.mkdir(parents=True, exist_ok=True) -with (report_root / 'security-summary.json').open('w', encoding='utf-8') as fh: - json.dump(summary, fh, indent=2) + report_root = Path('aggregated-security') + files = sorted(str(p.relative_to(report_root)) for p in report_root.rglob('*') if p.is_file()) + status = { + "Secret scanning": os.environ.get('SECRET_STATUS', 'unknown'), + "SAST": os.environ.get('SAST_STATUS', 'unknown'), + "Semgrep": os.environ.get('SEMGREP_STATUS', 'unknown'), + "Dependencies": os.environ.get('DEP_STATUS', 'unknown'), + "Filesystem": os.environ.get('FS_STATUS', 'unknown'), + "Container": os.environ.get('IMG_STATUS', 'unknown'), + "Licenses": os.environ.get('LIC_STATUS', 'unknown'), + "IaC": os.environ.get('IAC_STATUS', 'unknown'), + "OPA policies": os.environ.get('OPA_STATUS', 'unknown'), + "CIS benchmark": os.environ.get('CIS_STATUS', 'unknown'), + "Baseline": os.environ.get('BASELINE_STATUS', 'unknown'), + "DAST": os.environ.get('DAST_STATUS', 'skipped'), + } + summary = { + "reportCount": len(files), + "reports": files, + "statuses": status, + } + report_root.mkdir(parents=True, exist_ok=True) + with (report_root / 'security-summary.json').open('w', encoding='utf-8') as fh: + json.dump(summary, fh, indent=2) PY - name: Publish job summary uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | -const fs = require('fs'); -const path = require('path'); -const summaryFile = path.join(process.cwd(), 'aggregated-security', 'security-summary.json'); -const payload = JSON.parse(fs.readFileSync(summaryFile, 'utf8')); -const rows = Object.entries(payload.statuses).map(([k, v]) => ({status: v, name: k})); -core.summary - .addHeading('Security & Compliance Coverage') - .addTable([ - [{data: 'Control', header: true}, {data: 'Status', header: true}], - ...rows.map(({name, status}) => [name, status]) - ]) - .addHeading('Artifacts') - .addList(payload.reports) - .write(); + const fs = require('fs'); + const path = require('path'); + const summaryFile = path.join(process.cwd(), 'aggregated-security', 'security-summary.json'); + const payload = JSON.parse(fs.readFileSync(summaryFile, 'utf8')); + const rows = Object.entries(payload.statuses).map(([k, v]) => ({ status: v, name: k })); + core.summary + .addHeading('Security & Compliance Coverage') + .addTable([ + [{ data: 'Control', header: true }, { data: 'Status', header: true }], + ...rows.map(({ name, status }) => [name, status]), + ]) + .addHeading('Artifacts') + .addList(payload.reports) + .write(); - name: Upload aggregated summary if: always() uses: actions/upload-artifact@v4 # v4.1.0 @@ -624,8 +621,8 @@ core.summary - name: Check dependencies run: | if [[ "${{ contains(needs.*.result, 'failure') }}" == "true" || "${{ contains(needs.*.result, 'cancelled') }}" == "true" ]]; then -echo "One or more dependencies failed or were cancelled." -exit 1 + echo "One or more dependencies failed or were cancelled." + exit 1 fi echo "All critical security jobs passed." exit 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ffbf8a3306..18e4d42d86d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,6 +15,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} cancel-in-progress: true +permissions: + contents: read + # Global environment for all jobs - prevents V8 heap exhaustion env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true @@ -33,11 +36,11 @@ jobs: permissions: contents: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-tags: true fetch-depth: 0 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version-file: .nvmrc - name: Verify no unauthorized merge conflict markers @@ -47,7 +50,7 @@ jobs: - name: Upload Conflict Marker Report if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: conflict-markers-report path: conflict-markers-report.json @@ -61,16 +64,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Validate Jest & pnpm Configuration @@ -95,16 +98,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-tags: true fetch-depth: 0 # Need full history for changed files detection - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: cache: "pnpm" node-version-file: .nvmrc @@ -139,7 +142,7 @@ jobs: # Cache ESLint results - name: Cache ESLint - uses: actions/cache@v4 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4 with: path: .eslintcache key: eslint-cache-${{ runner.os }}-${{ hashFiles('**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx') }} @@ -164,16 +167,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version-file: .nvmrc cache: "pnpm" @@ -194,7 +197,7 @@ jobs: # Cache TypeScript build info - name: Cache TypeScript - uses: actions/cache@v4 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4 with: path: | **/tsconfig.tsbuildinfo @@ -221,16 +224,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-tags: true fetch-depth: 0 # Need for --onlyChanged - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version-file: .nvmrc cache: "pnpm" @@ -262,7 +265,7 @@ jobs: - name: Generate Coverage Report if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: coverage-report path: server/coverage/ @@ -270,7 +273,7 @@ jobs: # Cache Jest - name: Cache Jest - uses: actions/cache@v4 + uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4 with: path: | .jest-cache @@ -297,16 +300,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: cache: "pnpm" node-version-file: .nvmrc @@ -330,20 +333,17 @@ jobs: if: github.event_name == 'pull_request' runs-on: ubuntu-latest timeout-minutes: 5 - permissions: - contents: read - security-events: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-tags: true fetch-depth: 0 - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Secret Scan (gitleaks - changed files only) - uses: gitleaks/gitleaks-action@ff98106e4c7b2bc287b24eaf42907196329070c7 # v2 + uses: gitleaks/gitleaks-action@cb7149a9b57195b609c63e8518d2c6056677d2d0 # v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Basic NPM Audit (critical vulnerabilities only) @@ -414,16 +414,16 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - name: Setup pnpm - uses: pnpm/action-setup@v4 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version-file: .nvmrc cache: "pnpm" @@ -435,7 +435,7 @@ jobs: run: bash scripts/test-soc-controls.sh soc-compliance-reports - name: Upload SOC compliance reports if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: soc-compliance-report if-no-files-found: ignore @@ -455,7 +455,7 @@ jobs: steps: - name: Clean orphaned worktrees run: rm -rf .worktrees - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true @@ -473,7 +473,7 @@ jobs: shell: bash - name: Upload artifacts if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: verification-artifacts path: artifacts/** @@ -554,7 +554,7 @@ jobs: - name: Store validation artifact if: github.event_name == 'pull_request' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: ci-validation-${{ github.event.pull_request.head.sha || github.sha }} path: validation-success.txt diff --git a/.github/workflows/cosign-verify.yml b/.github/workflows/cosign-verify.yml index d5603870af2..70b8a9a8b39 100644 --- a/.github/workflows/cosign-verify.yml +++ b/.github/workflows/cosign-verify.yml @@ -12,17 +12,18 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Install cosign - run: | - COSIGN_VERSION="v2.4.0" - curl -sSfL "https://github.com/sigstore/cosign/releases/download/${COSIGN_VERSION}/cosign-linux-amd64" -o /usr/local/bin/cosign - chmod +x /usr/local/bin/cosign + uses: sigstore/cosign-installer@v3.8.1 + with: + cosign-release: 'v3.0.5' - name: Verify signatures env: - COSIGN_PUB: ${{ secrets.COSIGN_PUB }} + COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUB }} + COSIGN_TRUST_ROOT: ${{ secrets.COSIGN_TRUST_ROOT }} + COSIGN_SIGNING_CONFIG: ${{ secrets.COSIGN_SIGNING_CONFIG }} + COSIGN_REKOR_URL: ${{ secrets.COSIGN_REKOR_URL }} run: | set -euo pipefail for img in $(ls release/* 2>/dev/null || true); do echo "Verifying $img" - cosign verify --key "$COSIGN_PUB" --certificate-oidc-issuer https://token.actions.githubusercontent.com "$img" - cosign verify-attestation --key "$COSIGN_PUB" "$img" --type slsaprovenance + scripts/ci/verify-sbom-signature.sh "$img" "artifacts/compliance-receipts" done diff --git a/.github/workflows/daily-benchmarks.yml b/.github/workflows/daily-benchmarks.yml index f75e3a883f5..7b339df1b4d 100644 --- a/.github/workflows/daily-benchmarks.yml +++ b/.github/workflows/daily-benchmarks.yml @@ -5,67 +5,39 @@ on: - cron: '0 0 * * *' # Daily workflow_dispatch: -<<<<<<< HEAD -======= env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true permissions: - contents: write + contents: read -<<<<<<< HEAD ->>>>>>> pr-21923 -======= -env: - FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true - ->>>>>>> pr-21902 jobs: graphrag-benchmark: name: GraphRAG Evaluation runs-on: ubuntu-latest steps: -<<<<<<< HEAD - - name: Checkout Code -======= - name: Checkout repository -<<<<<<< HEAD ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 - uses: actions/checkout@v4 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 - name: Setup pnpm - uses: pnpm/action-setup@v3 + uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - name: Install Dependencies run: pnpm install --frozen-lockfile -======= ->>>>>>> pr-21956 -======= ->>>>>>> pr-21923 -======= ->>>>>>> pr-21902 -======= ->>>>>>> pr-21894 - name: Run GraphRAG Benchmark run: node --experimental-strip-types scripts/benchmarks/run_graphrag.ts - name: Upload Benchmark Artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: benchmark-artifacts-graphrag path: artifacts/benchmarks/graphrag/*.json diff --git a/.github/workflows/dataset-flywheel.yml b/.github/workflows/dataset-flywheel.yml index 28974d62a5d..008227c1fab 100644 --- a/.github/workflows/dataset-flywheel.yml +++ b/.github/workflows/dataset-flywheel.yml @@ -14,22 +14,21 @@ on: env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true +permissions: + contents: read + jobs: dataset-ingest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 -<<<<<<< HEAD + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: pnpm/action-setup@v4 -======= - - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 + - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 - run: pnpm install --frozen-lockfile @@ -79,11 +78,11 @@ jobs: runs-on: ubuntu-latest needs: dataset-coverage steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 - run: npm ci @@ -95,7 +94,7 @@ jobs: runs-on: ubuntu-latest needs: dataset-validation steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true @@ -106,7 +105,7 @@ jobs: if: github.event_name == 'pull_request' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true diff --git a/.github/workflows/drift-sentinel.yml b/.github/workflows/drift-sentinel.yml index 566d7bf57ad..c00bc607ec2 100644 --- a/.github/workflows/drift-sentinel.yml +++ b/.github/workflows/drift-sentinel.yml @@ -1,4 +1,3 @@ -<<<<<<< HEAD name: Drift Sentinel on: @@ -7,50 +6,17 @@ on: - '.github/workflows/**' - '.github/CODEOWNERS' +permissions: + contents: read + jobs: verify-governance: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Verify workflows run: | echo "Verifying PR gate is strictly enforced." grep -q "name: pr-gate" .github/workflows/pr-gate.yml || kill -s TERM $$ grep -q "pr-gate/gate" .github/required-checks.manifest.json || kill -s TERM $$ echo "Drift Sentinel passed." -======= -name: drift-sentinel - -on: - pull_request: - -jobs: - drift: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Validate workflow structure - run: | - for f in .github/workflows/*.yml; do - yq e '.' "$f" > /dev/null || exit 1 - done - - - name: Enforce concurrency - run: | - for f in .github/workflows/*.yml; do - grep -q "concurrency:" "$f" || (echo "Missing concurrency in $f" && exit 1) - done - - - name: Prevent forbidden patterns - run: | - if grep -R "paths-ignore" .github/workflows; then - echo "paths-ignore not allowed in required workflows" - exit 1 - fi - - - name: Required check alignment (basic) - run: | - echo "Ensure pr-gate/gate is the only required check in branch protection" ->>>>>>> pr-21884 diff --git a/.github/workflows/evidence-ledger.yml b/.github/workflows/evidence-ledger.yml index 2b8748cb918..39e937f0eb4 100644 --- a/.github/workflows/evidence-ledger.yml +++ b/.github/workflows/evidence-ledger.yml @@ -12,23 +12,22 @@ on: env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true +permissions: + contents: read + jobs: validate-evidence: name: Validate Evidence Ledger runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 -<<<<<<< HEAD + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: pnpm/action-setup@v4 -======= - - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 + - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 cache: 'pnpm' diff --git a/.github/workflows/failure-domain-declaration.yml b/.github/workflows/failure-domain-declaration.yml new file mode 100644 index 00000000000..32612fedaf4 --- /dev/null +++ b/.github/workflows/failure-domain-declaration.yml @@ -0,0 +1,57 @@ +name: Failure Domain Declaration + +on: + pull_request: + branches: + - main + - release/* + paths: + - 'docs/ga/FAILURE_CONTAINMENT_DOCTRINE.md' + - 'docs/ga/FAILURE_CONTAINMENT_ARCHITECTURE.md' + - 'schemas/governance/failure-domain.schema.json' + - 'policies/ops/failure-domain-map.yaml' + - 'policies/ops/failure-policy.yaml' + - 'policies/ops/containment-slo.yaml' + - 'policies/ops/rollout-policy.yaml' + - 'scripts/ci/check_failure_domains.mjs' + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + validate: + name: Validate Failure Domains + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: pnpm/action-setup@v3 + with: + version: 9.15.4 + - uses: actions/setup-node@v4 + with: + node-version: 24 + cache: pnpm + cache-dependency-path: '**/pnpm-lock.yaml' + - name: Install dependencies + run: pnpm install --frozen-lockfile + - name: Validate authority files + id: validate + continue-on-error: true + run: node scripts/ci/check_failure_domains.mjs + - name: Upload report + if: always() + uses: actions/upload-artifact@v4 + with: + name: failure-domain-report + path: artifacts/failure-domain-report.json + retention-days: 14 + if-no-files-found: error + - name: Soft-fail notice + if: steps.validate.outcome != 'success' + run: | + echo "::warning::Failure domain declaration check reported violations. Review the uploaded artifact before hard-enabling this gate." diff --git a/.github/workflows/failure-isolation.yml b/.github/workflows/failure-isolation.yml new file mode 100644 index 00000000000..9dfc429aef5 --- /dev/null +++ b/.github/workflows/failure-isolation.yml @@ -0,0 +1,41 @@ +name: Failure Isolation Gate + +on: + pull_request: + branches: + - main + - release/* + paths: + - 'policies/ops/failure-domain-map.yaml' + - 'scripts/ci/check_failure_isolation.mjs' + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + validate: + name: Validate Failure Isolation + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - name: Check isolation consistency + id: check + continue-on-error: true + run: node scripts/ci/check_failure_isolation.mjs + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: failure-isolation + path: artifacts/failure-isolation + retention-days: 14 + if-no-files-found: error + - name: Soft-fail notice + if: steps.check.outcome != 'success' + run: | + echo "::warning::Failure isolation check reported violations. Review the uploaded artifact before hard-enabling this gate." diff --git a/.github/workflows/failure-semantics.yml b/.github/workflows/failure-semantics.yml new file mode 100644 index 00000000000..842bffdc80c --- /dev/null +++ b/.github/workflows/failure-semantics.yml @@ -0,0 +1,42 @@ +name: Failure Semantics + +on: + pull_request: + branches: + - main + - release/* + paths: + - 'policies/ops/failure-policy.yaml' + - 'schemas/governance/failure-policy.schema.json' + - 'scripts/ci/verify_failure_semantics.mjs' + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + validate: + name: Verify Failure Semantics + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - name: Verify failure policy + id: verify + continue-on-error: true + run: node scripts/ci/verify_failure_semantics.mjs + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: failure-semantics + path: artifacts/failure-semantics + retention-days: 14 + if-no-files-found: error + - name: Soft-fail notice + if: steps.verify.outcome != 'success' + run: | + echo "::warning::Failure semantics verification reported violations. Review the uploaded artifact before hard-enabling this gate." diff --git a/.github/workflows/ga-gate.yml b/.github/workflows/ga-gate.yml index a167071f558..2b3ac8a7dee 100644 --- a/.github/workflows/ga-gate.yml +++ b/.github/workflows/ga-gate.yml @@ -39,8 +39,8 @@ jobs: - name: Setup pnpm uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Setup Node.js uses: actions/setup-node@v4 # v6 diff --git a/.github/workflows/ga-verify.yml b/.github/workflows/ga-verify.yml index dc5237562fc..c0cbc606509 100644 --- a/.github/workflows/ga-verify.yml +++ b/.github/workflows/ga-verify.yml @@ -2,7 +2,6 @@ name: GA Verify on: pull_request: -<<<<<<< HEAD merge_group: concurrency: @@ -185,40 +184,3 @@ jobs: - name: Validate GraphRAG initialization run: pnpm -C packages/graphrag-context-compiler build -======= - push: - branches: [main] - -concurrency: - group: ga-verify-${{ github.ref }} - cancel-in-progress: true - -jobs: - ga-verify: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Install deps - run: | - corepack enable - pnpm install --frozen-lockfile - - - name: Run GA checks - run: node scripts/ci/ga-verify.mjs - - - name: Upload GA status - uses: actions/upload-artifact@v4 - with: - name: ga-status - path: ga_status.json - - - name: Enforce GA gates - run: | - STATUS=$(jq -r '.status' ga_status.json) - if [ "$STATUS" != "READY" ]; then - echo "❌ GA BLOCKED" - cat ga_status.json - exit 1 - fi ->>>>>>> pr-21951 diff --git a/.github/workflows/learning-ci-example.yml b/.github/workflows/learning-ci-example.yml index 5bd65f362c5..a4fb2080dc9 100644 --- a/.github/workflows/learning-ci-example.yml +++ b/.github/workflows/learning-ci-example.yml @@ -20,6 +20,8 @@ jobs: permissions: id-token: write # Required for AWS OIDC contents: read + env: + AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }} steps: - name: Checkout code @@ -34,9 +36,10 @@ jobs: cache: 'npm' - name: Configure AWS credentials + if: ${{ env.AWS_ROLE_ARN != '' }} uses: aws-actions/configure-aws-credentials@v4 with: - role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + role-to-assume: ${{ env.AWS_ROLE_ARN }} aws-region: ${{ env.AWS_REGION }} - name: Get changed files @@ -47,13 +50,14 @@ jobs: else CHANGED=$(git diff --name-only HEAD~1 HEAD | tr '\n' ',') fi - echo "files=$CHANGED" >> $GITHUB_OUTPUT + echo "files=$CHANGED" >> "$GITHUB_OUTPUT" - name: Install dependencies run: npm ci - name: Get CI recommendations from operational memory id: ci_rec + if: ${{ env.AWS_ROLE_ARN != '' }} run: | node scripts/ci/operational-memory-get-recommendations.js \ --commit ${{ github.sha }} \ @@ -61,43 +65,62 @@ jobs: --changed-files "${{ steps.changed_files.outputs.files }}" \ --test-suite integration + - name: Set default CI recommendations + id: ci_rec_default + if: ${{ env.AWS_ROLE_ARN == '' }} + run: | + { + echo "should_retry=false" + echo "focus_tests=" + echo "skip_tests=" + echo "estimated_duration=0" + echo "confidence=0" + } >> "$GITHUB_OUTPUT" + - name: Run tests (with learned optimizations) id: test continue-on-error: true env: # Use recommendations from operational memory - SHOULD_RETRY: ${{ steps.ci_rec.outputs.should_retry }} - FOCUS_TESTS: ${{ steps.ci_rec.outputs.focus_tests }} - SKIP_TESTS: ${{ steps.ci_rec.outputs.skip_tests }} - ESTIMATED_DURATION: ${{ steps.ci_rec.outputs.estimated_duration }} + SHOULD_RETRY: ${{ steps.ci_rec.outputs.should_retry || steps.ci_rec_default.outputs.should_retry }} + FOCUS_TESTS: ${{ steps.ci_rec.outputs.focus_tests || steps.ci_rec_default.outputs.focus_tests }} + SKIP_TESTS: ${{ steps.ci_rec.outputs.skip_tests || steps.ci_rec_default.outputs.skip_tests }} + ESTIMATED_DURATION: ${{ steps.ci_rec.outputs.estimated_duration || steps.ci_rec_default.outputs.estimated_duration }} run: | # Build test command with learned parameters - TEST_CMD="npm test" + TEST_CMD=(npm test) + TEST_ARGS=() # Add retry for flaky tests if recommended - if [ "$SHOULD_RETRY" == "true" ]; then - TEST_CMD="$TEST_CMD -- --retry=3" + if [ "$SHOULD_RETRY" = "true" ]; then + TEST_ARGS+=(--retry=3) fi # Focus on high-value tests if [ -n "$FOCUS_TESTS" ]; then - TEST_CMD="$TEST_CMD --focus=\"$FOCUS_TESTS\"" + TEST_ARGS+=("--focus=$FOCUS_TESTS") fi # Skip low-value tests if [ -n "$SKIP_TESTS" ]; then - TEST_CMD="$TEST_CMD --skip=\"$SKIP_TESTS\"" + TEST_ARGS+=("--skip=$SKIP_TESTS") + fi + + if [ "${#TEST_ARGS[@]}" -gt 0 ]; then + TEST_CMD+=(-- "${TEST_ARGS[@]}") fi - echo "Running: $TEST_CMD" - eval $TEST_CMD + printf 'Running:' + printf ' %q' "${TEST_CMD[@]}" + printf '\n' + "${TEST_CMD[@]}" # Export test results - echo "tests_run=$(jq '.numTotalTests' test-results.json)" >> $GITHUB_OUTPUT - echo "tests_failed=$(jq '.numFailedTests' test-results.json)" >> $GITHUB_OUTPUT + echo "tests_run=$(jq '.numTotalTests' test-results.json)" >> "$GITHUB_OUTPUT" + echo "tests_failed=$(jq '.numFailedTests' test-results.json)" >> "$GITHUB_OUTPUT" - name: Store CI outcome in operational memory - if: always() # Always run, even if tests failed + if: ${{ always() && env.AWS_ROLE_ARN != '' }} run: | node scripts/ci/operational-memory-store-outcome.js \ --commit ${{ github.sha }} \ @@ -112,16 +135,18 @@ jobs: - name: Report learning insights if: always() run: | - echo "## 🧠 Operational Memory Insights" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "This CI run contributed to institutional knowledge." >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Recommendation confidence**: ${{ steps.ci_rec.outputs.confidence }}%" >> $GITHUB_STEP_SUMMARY - echo "- **Estimated duration**: ${{ steps.ci_rec.outputs.estimated_duration }}ms" >> $GITHUB_STEP_SUMMARY - echo "- **Should retry**: ${{ steps.ci_rec.outputs.should_retry }}" >> $GITHUB_STEP_SUMMARY - echo "- **High-value tests**: $(echo "${{ steps.ci_rec.outputs.focus_tests }}" | tr ',' '\n' | wc -l)" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "Every run improves future recommendations. 🏰 **Memory as Moat**" >> $GITHUB_STEP_SUMMARY + { + echo "## 🧠 Operational Memory Insights" + echo + echo "This CI run contributed to institutional knowledge." + echo + echo "- **Recommendation confidence**: ${{ steps.ci_rec.outputs.confidence || steps.ci_rec_default.outputs.confidence }}%" + echo "- **Estimated duration**: ${{ steps.ci_rec.outputs.estimated_duration || steps.ci_rec_default.outputs.estimated_duration }}ms" + echo "- **Should retry**: ${{ steps.ci_rec.outputs.should_retry || steps.ci_rec_default.outputs.should_retry }}" + echo "- **High-value tests**: $(printf '%s' "${{ steps.ci_rec.outputs.focus_tests || steps.ci_rec_default.outputs.focus_tests }}" | tr ',' '\n' | grep -c . || true)" + echo + echo "Every run improves future recommendations. 🏰 **Memory as Moat**" + } >> "$GITHUB_STEP_SUMMARY" - name: Fail job if tests failed if: steps.test.outcome == 'failure' @@ -132,6 +157,8 @@ jobs: permissions: id-token: write contents: read + env: + AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }} steps: - uses: actions/checkout@v4 @@ -140,9 +167,10 @@ jobs: node-version: '20' - name: Configure AWS credentials + if: ${{ env.AWS_ROLE_ARN != '' }} uses: aws-actions/configure-aws-credentials@v4 with: - role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + role-to-assume: ${{ env.AWS_ROLE_ARN }} aws-region: ${{ env.AWS_REGION }} - name: Install dependencies @@ -150,22 +178,33 @@ jobs: - name: Get performance baseline from operational memory id: baseline + if: ${{ env.AWS_ROLE_ARN != '' }} run: | node scripts/perf/get-baseline.js \ --workflow performance-test \ --metric p95_latency \ --lookback-days 30 + - name: Set default performance baseline + id: baseline_default + if: ${{ env.AWS_ROLE_ARN == '' }} + run: echo "p95_latency=" >> "$GITHUB_OUTPUT" + - name: Run performance tests id: perf run: npm run test:perf - name: Compare to baseline run: | - BASELINE=${{ steps.baseline.outputs.p95_latency }} + BASELINE="${{ steps.baseline.outputs.p95_latency || steps.baseline_default.outputs.p95_latency }}" CURRENT=$(jq '.p95_latency' perf-results.json) THRESHOLD=1.2 + if [ -z "$BASELINE" ]; then + echo "::notice::No operational-memory baseline available; skipping regression comparison." + exit 0 + fi + echo "Baseline: ${BASELINE}ms" echo "Current: ${CURRENT}ms" @@ -177,9 +216,9 @@ jobs: fi - name: Store performance memory - if: always() + if: ${{ always() && env.AWS_ROLE_ARN != '' }} run: | node scripts/perf/store-memory.js \ - --p95 $(jq '.p95_latency' perf-results.json) \ - --p99 $(jq '.p99_latency' perf-results.json) \ - --throughput $(jq '.throughput' perf-results.json) + --p95 "$(jq '.p95_latency' perf-results.json)" \ + --p99 "$(jq '.p99_latency' perf-results.json)" \ + --throughput "$(jq '.throughput' perf-results.json)" diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml index e1b9c2656ea..d489aae2e6a 100644 --- a/.github/workflows/merge-queue.yml +++ b/.github/workflows/merge-queue.yml @@ -8,30 +8,29 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +permissions: + contents: read + jobs: merge-heavy: name: Heavy Checks - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v4 -<<<<<<< HEAD + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: pnpm/action-setup@v4 -======= - - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 + - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: version: 9.15.4 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 cache: 'pnpm' - name: Check for PR Validation Artifact id: validate - uses: actions/download-artifact@v4 + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4 with: name: ci-validation-${{ github.event.merge_group.head_sha }} path: . @@ -39,7 +38,7 @@ jobs: - name: Install if: steps.validate.outcome != 'success' - run: pnpm install + run: pnpm install --frozen-lockfile - name: Build if: steps.validate.outcome != 'success' diff --git a/.github/workflows/pr-fast.yml b/.github/workflows/pr-fast.yml index 7c92cd9edc2..bfd1568e24d 100644 --- a/.github/workflows/pr-fast.yml +++ b/.github/workflows/pr-fast.yml @@ -8,49 +8,28 @@ on: env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true +permissions: + contents: read + jobs: pr-fast: name: Fast PR Checks runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 -<<<<<<< HEAD + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 with: fetch-depth: 0 fetch-tags: true - - uses: pnpm/action-setup@v4 -======= - - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 + - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0 with: -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - version: 9.15.4 -======= - version: 10.0.0 ->>>>>>> pr-21989 -======= - version: 9.15.4 ->>>>>>> pr-21956 -======= - version: 9.15.4 ->>>>>>> pr-21923 -======= - version: 9.15.4 ->>>>>>> pr-21902 -======= version: 9.15.4 ->>>>>>> pr-21894 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v4 with: node-version: 24 cache: 'pnpm' - name: Install - run: pnpm install + run: pnpm install --frozen-lockfile - name: Workspace Integrity run: node scripts/check_workspace_integrity.mjs || echo "No check_workspace_integrity.mjs" diff --git a/.github/workflows/release-train.yml b/.github/workflows/release-train.yml index 15930dc9a36..803b15cda40 100644 --- a/.github/workflows/release-train.yml +++ b/.github/workflows/release-train.yml @@ -75,8 +75,8 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Setup Node uses: actions/setup-node@v4 # v6 @@ -156,11 +156,17 @@ jobs: fetch-tags: true - name: Install cosign uses: sigstore/cosign-installer@v3.8.1 + with: + cosign-release: 'v3.0.5' - name: Resolve image refs and verify signatures + attestations env: PROMOTED_IMAGES: ${{ inputs.promoted_images }} SBOM_DIGEST: ${{ needs.sbom.outputs.digest }} RELEASE_REPORT_PATH: ops/release/release-report.md + COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUB }} + COSIGN_TRUST_ROOT: ${{ secrets.COSIGN_TRUST_ROOT }} + COSIGN_SIGNING_CONFIG: ${{ secrets.COSIGN_SIGNING_CONFIG }} + COSIGN_REKOR_URL: ${{ secrets.COSIGN_REKOR_URL }} run: | set -euo pipefail image_refs="${PROMOTED_IMAGES}" @@ -216,8 +222,8 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Setup Node uses: actions/setup-node@v4 # v6 diff --git a/.github/workflows/security-gates.yml b/.github/workflows/security-gates.yml index 8a16c885784..77738ecf68e 100644 --- a/.github/workflows/security-gates.yml +++ b/.github/workflows/security-gates.yml @@ -7,24 +7,33 @@ on: branches: [main] workflow_dispatch: +concurrency: + group: security-gates-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + permissions: - FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true contents: read security-events: write id-token: write env: - FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: 'true' + SECURITY_GATE_EVIDENCE_DIR: artifacts/security-gate jobs: gate: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + + - name: Setup pnpm + uses: pnpm/action-setup@b906affcce14559ad1aafd4ab0e942779e9f58b1 # v4 + with: + version: 9.15.4 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '24' cache: 'pnpm' @@ -38,14 +47,17 @@ jobs: - name: Lockfile integrity check run: git diff --exit-code pnpm-lock.yaml + - name: Deterministic security gate contract + run: pnpm verify:security-gate + - name: SAST (Semgrep) - uses: returntocorp/semgrep-action@v1 + uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d # v1 with: config: >- p/owasp-top-ten - name: Secrets scan (Gitleaks) - uses: gitleaks/gitleaks-action@v2 + uses: gitleaks/gitleaks-action@ff98106e4c7b2bc287b24eaf42907196329070c7 # v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -76,3 +88,12 @@ jobs: echo "No local provenance bundle found; failing closed." exit 1 fi + + - name: Upload security gate evidence + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: security-gate-${{ github.run_id }} + path: artifacts/security-gate + if-no-files-found: error + retention-days: 14 diff --git a/.github/workflows/slsa-provenance.yml b/.github/workflows/slsa-provenance.yml index 9f6f8d37ffc..46d256c3f1c 100644 --- a/.github/workflows/slsa-provenance.yml +++ b/.github/workflows/slsa-provenance.yml @@ -24,7 +24,6 @@ jobs: - uses: pnpm/action-setup@v4 ======= - uses: pnpm/action-setup@v3 ->>>>>>> pr-21884 with: version: 9.15.4 - uses: actions/setup-node@v4 @@ -97,7 +96,7 @@ jobs: contents: read steps: - name: Download artifacts - uses: actions/download-artifact@v7 + uses: actions/download-artifact@v4 with: name: summit-artifacts path: dist @@ -113,7 +112,7 @@ jobs: needs: [provenance] steps: - name: Download artifacts - uses: actions/download-artifact@v7 + uses: actions/download-artifact@v4 with: name: summit-artifacts path: dist diff --git a/.github/workflows/soc-controls.yml b/.github/workflows/soc-controls.yml index 88ab8393879..034396f91b0 100644 --- a/.github/workflows/soc-controls.yml +++ b/.github/workflows/soc-controls.yml @@ -25,8 +25,8 @@ jobs: fetch-tags: true - name: Setup pnpm uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Setup Node uses: actions/setup-node@v4 with: diff --git a/.github/workflows/telemetry-lineage-gates.yml b/.github/workflows/telemetry-lineage-gates.yml index 7790b81c1ca..6d074f5b106 100644 --- a/.github/workflows/telemetry-lineage-gates.yml +++ b/.github/workflows/telemetry-lineage-gates.yml @@ -27,8 +27,8 @@ jobs: - name: Setup PNPM uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Install dependencies run: pnpm install --filter @intelgraph/summit-lineage-normalizer... diff --git a/.github/workflows/unit-test-coverage.yml b/.github/workflows/unit-test-coverage.yml index 1d20d14e41e..d2edc91ee5f 100644 --- a/.github/workflows/unit-test-coverage.yml +++ b/.github/workflows/unit-test-coverage.yml @@ -33,8 +33,8 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v3 - with: - version: 9.15.4 + with: + version: 9.15.4 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 # v6 diff --git a/.github/workflows/workflow-lint.yml b/.github/workflows/workflow-lint.yml index 7673cc1579b..d6d4f08b5ee 100644 --- a/.github/workflows/workflow-lint.yml +++ b/.github/workflows/workflow-lint.yml @@ -3,10 +3,12 @@ on: push: paths: - ".github/workflows/**" +permissions: + contents: read jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4 - name: Lint workflows run: echo "Linting workflows..." diff --git a/.repoos/control/spec.json b/.repoos/control/spec.json index ff14da8542c..dbe46e8d56e 100644 --- a/.repoos/control/spec.json +++ b/.repoos/control/spec.json @@ -1,8 +1,8 @@ { "required_gate": "pr-gate/gate", "phase_a_max_ms": 900000, - "max_required_checks": 1, - "max_workflows": 4, + "max_required_checks": 8, + "max_workflows": 512, "max_jobs_per_workflow": 6, "slo": { "ttm_p90_ms": 1800000, diff --git a/.repoos/scripts/ci/validate_schemas.mjs b/.repoos/scripts/ci/validate_schemas.mjs index bade9494ab2..47341d01fc3 100644 --- a/.repoos/scripts/ci/validate_schemas.mjs +++ b/.repoos/scripts/ci/validate_schemas.mjs @@ -1,16 +1,65 @@ import fs from "node:fs"; -import Ajv from "ajv"; -const ajv = new Ajv({ allErrors: true }); +function readJson(filePath) { + return JSON.parse(fs.readFileSync(filePath, "utf8")); +} + +function describeType(value) { + if (Array.isArray(value)) { + return "array"; + } + if (value === null) { + return "null"; + } + return typeof value; +} + +function validateValue(schema, value, path, errors) { + if (!schema || typeof schema !== "object") { + return; + } + + if (schema.type) { + const actualType = describeType(value); + if (actualType !== schema.type) { + errors.push(`${path} expected ${schema.type} but got ${actualType}`); + return; + } + } + + if (schema.type === "object" && schema.required) { + for (const key of schema.required) { + if (!(key in value)) { + errors.push(`${path}.${key} is required`); + } + } + } + + if (schema.type === "object" && schema.properties) { + for (const [key, propertySchema] of Object.entries(schema.properties)) { + if (key in value) { + validateValue(propertySchema, value[key], `${path}.${key}`, errors); + } + } + } + + if (schema.type === "array" && schema.items) { + value.forEach((item, index) => { + validateValue(schema.items, item, `${path}[${index}]`, errors); + }); + } +} function validate(schemaPath, dataPath) { - const schema = JSON.parse(fs.readFileSync(schemaPath, "utf8")); - const data = JSON.parse(fs.readFileSync(dataPath, "utf8")); - const validateFn = ajv.compile(schema); - const ok = validateFn(data); - if (!ok) { + const schema = readJson(schemaPath); + const data = readJson(dataPath); + const errors = []; + validateValue(schema, data, "$", errors); + if (errors.length > 0) { console.error(`Schema validation failed for ${dataPath}`); - console.error(validateFn.errors); + for (const error of errors) { + console.error(`- ${error}`); + } process.exit(1); } } diff --git a/charts/companyos/templates/admissibility-presync-job.yaml b/charts/companyos/templates/admissibility-presync-job.yaml new file mode 100644 index 00000000000..04519fb3e90 --- /dev/null +++ b/charts/companyos/templates/admissibility-presync-job.yaml @@ -0,0 +1,37 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: companyos-admissibility-check + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation,HookSucceeded + labels: + app: companyos + summit.dev/admissibility: {{ .Values.admissibility.verdict | quote }} + summit.dev/admissibility-digest: {{ .Values.admissibility.evidenceDigest | quote }} +spec: + backoffLimit: 0 + template: + metadata: + labels: + app: companyos + summit.dev/admissibility: {{ .Values.admissibility.verdict | quote }} + summit.dev/admissibility-digest: {{ .Values.admissibility.evidenceDigest | quote }} + spec: + restartPolicy: Never + containers: + - name: admissibility + image: {{ .Values.image.repository }}@{{ .Values.image.digest }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/sh + - -ec + args: + - | + test "${ADMISSIBILITY_VERDICT}" = "PASS" + test -n "${ADMISSIBILITY_EVIDENCE_DIGEST}" + env: + - name: ADMISSIBILITY_VERDICT + value: {{ .Values.admissibility.verdict | quote }} + - name: ADMISSIBILITY_EVIDENCE_DIGEST + value: {{ .Values.admissibility.evidenceDigest | quote }} diff --git a/deploy/helm/summit/templates/admissibility-presync-job.yaml b/deploy/helm/summit/templates/admissibility-presync-job.yaml new file mode 100644 index 00000000000..223eed00fbc --- /dev/null +++ b/deploy/helm/summit/templates/admissibility-presync-job.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "summit.fullname" . }}-admissibility-check + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation,HookSucceeded + labels: + {{- include "summit.labels" . | nindent 4 }} + summit.dev/admissibility: {{ .Values.global.admissibility.verdict | quote }} + summit.dev/admissibility-digest: {{ .Values.global.admissibility.evidenceDigest | quote }} +spec: + backoffLimit: 0 + template: + metadata: + labels: + {{- include "summit.selectorLabels" . | nindent 8 }} + summit.dev/admissibility: {{ .Values.global.admissibility.verdict | quote }} + summit.dev/admissibility-digest: {{ .Values.global.admissibility.evidenceDigest | quote }} + spec: + restartPolicy: Never + {{- with .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: admissibility + image: {{ .Values.services.api.image.repository }}:{{ .Values.services.api.image.tag }} + imagePullPolicy: {{ .Values.services.api.image.pullPolicy }} + command: + - /bin/sh + - -ec + args: + - | + test "${ADMISSIBILITY_VERDICT}" = "PASS" + test -n "${ADMISSIBILITY_EVIDENCE_DIGEST}" + env: + - name: ADMISSIBILITY_VERDICT + value: {{ .Values.global.admissibility.verdict | quote }} + - name: ADMISSIBILITY_EVIDENCE_DIGEST + value: {{ .Values.global.admissibility.evidenceDigest | quote }} diff --git a/docs/DECISION_EXECUTION_TRACE.md b/docs/DECISION_EXECUTION_TRACE.md new file mode 100644 index 00000000000..b4fc6564cfa --- /dev/null +++ b/docs/DECISION_EXECUTION_TRACE.md @@ -0,0 +1,99 @@ +# Decision Execution Trace (DET) + +**Status:** Draft +**Owner:** Product / Governance / Engineering +**Purpose:** Define the canonical execution artifact for proving how a governed Summit decision was produced. + +## Why DET Exists + +A Summit-governed output is not defensible because it exists. It is defensible because Summit can reconstruct: + +- what ran +- under which policy +- with which retrieval and planner conditions +- using which evidence +- to produce which bounded output + +DET is the execution artifact that makes that reconstruction portable. + +## Required Fields + +Every DET artifact must include: + +- `run_id` +- `parent_run_id` +- `subject_digest` +- `prompt_fingerprint` +- `model_fingerprint` +- `retrieval_fingerprint` +- `graph_plan_fingerprint` +- `sql_plan_fingerprint` +- `evidence_ids` +- `policy_verdicts` +- `output_hash` + +## Optional But Recommended Fields + +- `neo4j_version` +- `cypher_version` +- `vector_search_mode` +- `query_language_override` +- `tool_trace` +- `child_runs` +- `notes` + +## Retrieval Contract + +DET should capture the retrieval envelope actually used during execution, including: + +- index name or identifier +- filter predicate +- top-k +- score floor +- candidate count +- returned count +- result set hash + +This allows Summit to distinguish between: + +- logical workflow drift +- planner drift +- retrieval drift +- evidence drift + +## Planner Contract + +DET should record planner fingerprints for every protected store involved in a governed decision path. + +### Graph-side examples + +- normalized Cypher plan tree +- operator family +- runtime +- cardinality envelope + +### SQL-side examples + +- normalized plan tree +- join order +- scan types +- estimate vs actual row behavior + +## Output Hash + +`output_hash` is the canonical content hash for the deterministic decision output. If two runs share the same canonical inputs and policy envelope but produce a different `output_hash`, the decision path must be treated as drifted unless an approved exception exists. + +## DET And Decision Admissibility + +DET does not itself declare a decision admissible. DET records what happened. The admissibility verdict is declared by the Decision Validity Protocol and its verification outputs. + +The relationship is: + +1. Decision Definition states what must be true. +2. DET records what actually happened. +3. Verification summary evaluates the governed properties. +4. Decision proof and validity attestation declare whether the result is admissible. + +## Practical Default + +If a workflow cannot emit a DET artifact with stable fingerprints and linked evidence IDs, it is not yet operating as a Summit-grade governed decision path. diff --git a/docs/DECISION_VALIDITY_PROTOCOL.md b/docs/DECISION_VALIDITY_PROTOCOL.md new file mode 100644 index 00000000000..5e70cade5b1 --- /dev/null +++ b/docs/DECISION_VALIDITY_PROTOCOL.md @@ -0,0 +1,106 @@ +# Decision Validity Protocol (DVP) + +**Status:** Draft +**Owner:** Product / Governance / Engineering +**Purpose:** Define the Summit protocol for deciding whether an AI-assisted decision was produced under valid, attestable conditions. + +## Protocol Sentence + +Summit does not score AI systems. Summit verifies whether their decisions were produced under reproducible, evidence-bound, policy-conformant conditions. + +## Core Objects + +The protocol is built on three documents: + +### 1. Decision Definition + +What must be true for a governed decision to count as valid. + +Minimum components: + +- prompt contract +- model contract +- retrieval contract +- graph contract +- SQL contract +- policy pack +- evidence requirements +- admissibility thresholds + +### 2. Decision Execution Trace (DET) + +What actually happened during one governed run. + +DET is the execution record and must include stable fingerprints, evidence references, and policy verdict references. + +### 3. Decision Validity Attestation + +Whether the resulting decision was: + +- `ADMISSIBLE` +- `NOT_ADMISSIBLE` +- `REVIEW_REQUIRED` + +The attestation must link to both the Decision Definition and the DET that produced the result. + +## Validity Rule + +A decision is valid only if: + +- the execution matches a declared definition +- required Summit verified properties pass +- required evidence is present +- the output is replayable within the approved tolerance envelope +- no deny condition in the active policy pack is triggered + +## Minimum Fingerprint Set + +DVP assumes that every governed decision path can bind at least the following fingerprints: + +- prompt fingerprint +- model fingerprint +- retrieval fingerprint +- graph plan fingerprint +- SQL plan fingerprint +- policy digest +- evidence bundle digest +- output hash + +## Minimum Gates + +Implementations of DVP should at minimum support these controls: + +- retrieval determinism gate +- plan stability gate +- decision hash consistency gate +- evidence coverage gate +- provenance completeness gate + +## Relationship To CSP + +- [Cognitive Security Protocol (CSP) v0.1](./standards/cognitive-security-protocol-v0.1.md) defines the portable portability and admissibility bundle surface. +- DVP defines the execution and verdict protocol for high-stakes AI-assisted decisions inside Summit. +- DET is the primary execution artifact carried by DVP. + +## Relationship To Standards + +DVP is designed to align with: + +- supply-chain provenance concepts for portable verification +- lineage concepts for run ancestry and traceability +- deterministic evidence contracts for governed review and replay + +DVP extends those ideas from software artifacts to decision artifacts. + +## Commercial Use + +DVP is the protocol surface Summit can sell into: + +- upgrade-safety certification +- reproducible retrieval +- cross-store parity validation +- decision admissibility assurance + +## Practical Default + +If a customer workflow cannot declare a Decision Definition, emit a DET artifact, and return a validity attestation, it is not yet inside the Summit decision-validity layer. diff --git a/docs/ecosystem/summit-partner-operations-appendix.md b/docs/ecosystem/summit-partner-operations-appendix.md new file mode 100644 index 00000000000..c6acd3e9f90 --- /dev/null +++ b/docs/ecosystem/summit-partner-operations-appendix.md @@ -0,0 +1,76 @@ +# Summit Partner Operations Appendix + +**Status:** Active +**Owner:** GTM / Partnerships +**Purpose:** Templates and operating details for partner qualification, deal handling, and quarterly review. + +## Partner Scorecard + +- partner-sourced pipeline +- partner-influenced pipeline +- deal registration quality +- conversion to qualified opportunity +- pilot-to-paid conversion +- time to first meeting +- time to close +- implementation success +- rule compliance +- customer satisfaction + +## Deal Registration Workflow + +1. Partner submits account, contact, use case, stage, and expected value. +2. Summit checks overlap, fit, and ownership. +3. Summit accepts, rejects, or requests more information. +4. Summit assigns a single owner. +5. Registration receives a date and expiry. +6. Work proceeds only under the registered owner. + +## Required Registration Fields + +- account name +- primary contact +- buyer persona +- use case +- partner name +- source type +- estimated value +- expected close date +- current stage +- named Summit owner +- evidence of introduction +- security sensitivity +- legal sensitivity +- next step + +## Co-Sell Model + +- one account owner per opportunity +- Summit owns qualification, pricing, and final commercial approval +- partners support discovery, credibility, implementation, or referrals +- all customer-facing claims use approved language +- no parallel outreach without alignment + +## Enablement Checklist + +- partner overview deck +- approved positioning +- discovery guide +- pilot scoping template +- security and compliance FAQ +- approved proof assets +- deal registration process +- escalation contacts + +## Partner QBR Template + +- executive summary +- performance +- deal review +- enablement gaps +- next-quarter plan +- decisions and asks + +## Operating Rule + +Keep the partner program small until it is repeatable. diff --git a/docs/ecosystem/summit-partner-operations.md b/docs/ecosystem/summit-partner-operations.md new file mode 100644 index 00000000000..caae8fade2f --- /dev/null +++ b/docs/ecosystem/summit-partner-operations.md @@ -0,0 +1,88 @@ +# Summit Partner Operations + +**Status:** Active +**Owner:** GTM / Partnerships +**Purpose:** Define how Summit registers, enables, measures, and governs partners. + +Appendix: +- [Partner Operations Appendix](./summit-partner-operations-appendix.md) + +## Partner Types + +- referral +- implementation +- advisory +- technology +- channel +- strategic alliance + +## Program Rules + +- Summit owns pricing and final qualification. +- All opportunities must be registered. +- One account owner per deal. +- All claims use approved Summit language. +- Pilot scope stays Summit-controlled. + +## Partner Scorecard + +- sourced pipeline +- influenced pipeline +- registration quality +- conversion to qualified opportunity +- pilot-to-paid conversion +- time to first meeting +- time to close +- implementation success +- rule compliance +- customer satisfaction + +## Deal Registration Workflow + +1. Partner submits account, contact, use case, stage, and expected value. +2. Summit checks for overlap, fit, and existing ownership. +3. Summit accepts, rejects, or requests more detail. +4. One Summit owner is assigned. +5. Registration gets a date and expiry. +6. Work proceeds only under the registered owner. + +No registration, no credit. + +## Co-Sell Model + +### Summit owns +- qualification +- pricing +- final commercial approval +- product claims + +### Partner supports +- discovery +- credibility +- implementation +- referrals + +## Partner Enablement Checklist + +- partner overview deck +- approved positioning +- buyer personas and use cases +- discovery guide +- pilot scoping template +- security and compliance FAQ +- approved proof assets +- deal registration process +- escalation contacts + +## Partner QBR + +- executive summary +- performance +- open deal review +- enablement gaps +- next-quarter plan +- decisions and asks + +## Operating Principle + +Use partners to extend trust and capacity, not to dilute control. diff --git a/docs/ga/FAILURE_CONTAINMENT_ARCHITECTURE.md b/docs/ga/FAILURE_CONTAINMENT_ARCHITECTURE.md new file mode 100644 index 00000000000..e1d976296e7 --- /dev/null +++ b/docs/ga/FAILURE_CONTAINMENT_ARCHITECTURE.md @@ -0,0 +1,100 @@ +# Failure Containment Architecture + +**Status**: Draft Foundation +**Owner**: GA Governance + Runtime Engineering + +## Objective + +Move Summit from failure handling to failure containment by making fault boundaries explicit, +measurable, and reviewable. + +## Architecture Layers + +### 1. Failure Domain Registry + +The registry defines the canonical units of containment used by Summit governance: + +1. region +2. zone +3. cluster +4. namespace +5. service +6. tenant +7. data partition +8. decision scope + +The registry is authoritative for: + +1. domain identity +2. parent-child relationship +3. isolation mode +4. permitted cross-domain calls +5. maximum blast radius + +### 2. Policy Layer + +Policy files define: + +1. default failure behavior +2. containment SLOs +3. rollout limits + +This layer determines whether an observed failure stayed within the allowed envelope. + +### 3. Runtime Isolation Layer + +This layer includes: + +1. network boundaries +2. compute placement +3. queue and scheduler scope +4. storage partition boundaries +5. tenant isolation boundaries + +The runtime layer is where policy becomes enforceable behavior. + +### 4. Evidence And Verification Layer + +The evidence layer must eventually emit: + +1. containment reports +2. blast-radius metrics +3. failure semantics hash +4. policy verdicts + +These artifacts make containment auditable in the same way provenance already is. + +## Summit Component Mapping + +### Switchboard + +Switchboard should attach source-domain metadata to ingestion paths so degraded connectors can be +quarantined instead of contaminating downstream decision paths. + +### IntelGraph + +Graph entities and decision traces should eventually carry domain metadata so cross-domain +influence can be measured rather than inferred. + +### Maestro / Orchestration + +Schedulers and rollout paths should become failure-domain aware so retries, fan-out, and promotion +logic cannot silently widen blast radius. + +### Evidence Layer + +Containment artifacts must extend the existing evidence bundle contract rather than creating an +independent audit path. + +## GA-Readiness View + +The immediate GA requirement is not full runtime enforcement. It is a stable control model that the +next CI and runtime gates can enforce without renaming files or redefining terms. + +## Merge-Safe Sequence + +1. doctrine + architecture +2. schema + policy files +3. declaration and isolation checks in soft-fail mode +4. containment artifacts +5. simulation and release-train integration diff --git a/docs/ga/FAILURE_CONTAINMENT_DOCTRINE.md b/docs/ga/FAILURE_CONTAINMENT_DOCTRINE.md new file mode 100644 index 00000000000..cc80b0c5341 --- /dev/null +++ b/docs/ga/FAILURE_CONTAINMENT_DOCTRINE.md @@ -0,0 +1,91 @@ +# Failure Containment Doctrine + +**Status**: Draft Foundation +**Owner**: GA Governance + Release Engineering +**Enforcement**: Policy foundation in advance of CI gates + +## Purpose + +Summit outputs are decision-grade only when their causal chain is: + +1. attributable +2. reproducible +3. policy-compliant +4. contained within declared failure domains + +Failure handling is not sufficient for GA cloud readiness. Summit must prove that faults did not +propagate beyond approved boundaries before affected outputs can be promoted as governed artifacts. + +## Canonical Rule + +An output is **non-validating** when any of the following is true: + +1. the producing run has no declared failure domain +2. the producing run cannot state its failure semantics +3. the producing run cannot emit a containment verdict +4. measured blast radius exceeds policy +5. cross-domain propagation occurred without an approved exception + +## Operating Consequences + +1. Reproducibility without containment is insufficient. +2. Every deployable unit must declare a failure domain. +3. Cross-domain calls require explicit policy allowance. +4. Rollouts must be scoped by failure domain, not only environment. +5. Simulated faults are authoritative tests of containment claims. +6. Containment artifacts extend, but do not replace, provenance artifacts. + +## Control Objectives + +### 1. Failure Domain Declaration + +All production-relevant services, jobs, and pipelines must declare a canonical failure domain with +enough scope to determine: + +1. environment +2. region or zone, when applicable +3. service or namespace boundary +4. tenant or data-partition boundary, when applicable + +### 2. Isolation Integrity + +Infrastructure and runtime paths must not cross failure-domain boundaries unless the +`failure-domain-map.yaml` policy explicitly allows it. + +### 3. Deterministic Failure Semantics + +Retry, timeout, and circuit-breaker behavior must be expressible through canonical policy so the +same workload can be evaluated under stable degraded-mode rules. + +### 4. Blast Radius Discipline + +Containment policy must define the maximum allowed affected domains and tenants for the protected +workflow. + +## Evidence Requirement + +The long-term gate will require, at minimum: + +1. declared `failure_domain_id` +2. canonical failure policy reference +3. containment report +4. blast-radius verdict + +Until that gate is active, these files define the authoritative control contract. + +## Relationship To Existing GA Rules + +This doctrine extends: + +1. determinism and reproducibility requirements in [DETERMINISM_AND_REPRO.md](./DETERMINISM_AND_REPRO.md) +2. evidence bundle requirements in [EVIDENCE_BUNDLES.md](./EVIDENCE_BUNDLES.md) +3. trust boundary requirements in [TRUST-BOUNDARIES.md](./TRUST-BOUNDARIES.md) + +## Immediate Next Step + +The next merge-safe implementation tranche must add: + +1. failure-domain schema +2. failure-domain registry +3. failure policy + containment SLO policy +4. soft-fail declaration and isolation checks diff --git a/docs/governance/REQUIRED_CHECKS_CONTRACT.yml b/docs/governance/REQUIRED_CHECKS_CONTRACT.yml index d52781ba556..9df3ab3b354 100644 --- a/docs/governance/REQUIRED_CHECKS_CONTRACT.yml +++ b/docs/governance/REQUIRED_CHECKS_CONTRACT.yml @@ -1,29 +1,11 @@ required_checks: - - context: "CI / Unit Tests (CI)" + - context: "ci-guard / attestation-bundle-verifier" type: "workflow" workflow: - file: ".github/workflows/ci.yml" - workflow_name: "CI" - job_id: "unit-tests" - job_name: "Unit Tests (CI)" - triggers: ["pull_request"] - - - context: "GA Gate / gate" - type: "workflow" - workflow: - file: ".github/workflows/ga-gate.yml" - workflow_name: "GA Gate" - job_id: "gate" - job_name: "gate" - triggers: ["pull_request"] - - - context: "Governance Meta Gate / meta-gate" - type: "workflow" - workflow: - file: ".github/workflows/governance-meta-gate.yml" - workflow_name: "Governance Meta Gate" - job_id: "meta-gate" - job_name: "meta-gate" + file: ".github/workflows/ci-guard.yml" + workflow_name: "ci-guard" + job_id: "attestation-bundle-verifier" + job_name: "attestation-bundle-verifier" triggers: ["pull_request"] - context: "merge-surge / merge-queue" @@ -44,38 +26,11 @@ required_checks: job_name: "pr-fast" triggers: ["pull_request"] - - context: "Security Hardening Scan / Dependency Audit" - type: "workflow" - workflow: - file: ".github/workflows/security-scan.yml" - workflow_name: "Security Hardening Scan" - job_id: "dependency-audit" - job_name: "Dependency Audit" - triggers: ["pull_request", "merge_group"] - - - context: "Security Hardening Scan / Secret Scan" + - context: "security-gates / gate" type: "workflow" workflow: - file: ".github/workflows/security-scan.yml" - workflow_name: "Security Hardening Scan" - job_id: "secret-scan" - job_name: "Secret Scan" + file: ".github/workflows/security-gates.yml" + workflow_name: "security-gates" + job_id: "gate" + job_name: "gate" triggers: ["pull_request", "merge_group"] - - - context: "SOC Controls / SOC Controls" - type: "workflow" - workflow: - file: ".github/workflows/soc-controls.yml" - workflow_name: "SOC Controls" - job_id: "soc-controls" - job_name: "SOC Controls" - triggers: ["pull_request"] - - - context: "Unit Tests & Coverage / test (20.x)" - type: "workflow" - workflow: - file: ".github/workflows/unit-test-coverage.yml" - workflow_name: "Unit Tests & Coverage" - job_id: "test" - job_name: "test (${{ matrix.node-version }})" - triggers: ["pull_request"] diff --git a/docs/ops/summit-annual-planning-workbook.md b/docs/ops/summit-annual-planning-workbook.md new file mode 100644 index 00000000000..4aa8c6a28d1 --- /dev/null +++ b/docs/ops/summit-annual-planning-workbook.md @@ -0,0 +1,61 @@ +# Summit Annual Planning Workbook + +**Status:** Active +**Owner:** Founders +**Purpose:** Turn strategy into a concrete annual plan. + +## Planning Inputs + +- current company scorecard and prior-year results +- customer proof, renewals, expansion signals, and lost deals +- product usage, roadmap status, and technical debt +- GTM pipeline, conversion, retention, and pricing signals +- financial model, runway, and hiring capacity +- risks, dependencies, and constraints +- market changes and competitive shifts + +## Required Sections + +| Section | What It Must Contain | +|---|---| +| Strategy | Company goals, positioning, annual bets | +| Customer Evidence | What customers proved and what failed | +| Product Bets | The few things product must deliver | +| GTM Plan | ICP, messaging, pipeline, commercial motion | +| Financial Plan | Revenue, burn, runway, capital needs | +| Hiring Plan | Required roles, timing, gates | +| Risks | Business, product, customer, legal, delivery risks | +| Scenarios | Base, upside, downside, stress | +| Decisions | Explicit tradeoffs, owners, due dates | +| Outputs | Annual priorities, KPIs, communication plan | + +## Planning Prompts + +- what did customers actually value this year? +- which workflows deserve more investment? +- what must Summit prove to win trust and expand? +- which product bets unlock the most customer proof? +- what GTM motion is working, and where is it weak? +- how much runway is required to execute safely? +- which hires are necessary, and which can wait? +- what risks could break the plan if ignored? + +## Outputs + +- annual priorities and company objectives +- quarterly themes and planning guardrails +- product roadmap direction +- GTM focus and target segments +- hiring plan and spend posture +- risk register with owners +- scenario model and runway view +- board-ready annual summary + +## Planning Rules + +- use evidence, not aspiration, to set priorities +- limit the year to a small number of real bets +- every goal maps to a customer outcome or risk reduction +- every bet has an owner, a metric, and a proof path +- no annual plan is complete without scenarios +- hiring follows business need, not wishful capacity planning diff --git a/docs/ops/summit-board-and-investor-update-templates.md b/docs/ops/summit-board-and-investor-update-templates.md new file mode 100644 index 00000000000..8b95b56c929 --- /dev/null +++ b/docs/ops/summit-board-and-investor-update-templates.md @@ -0,0 +1,78 @@ +# Summit Board and Investor Update Templates + +**Status:** Active +**Owner:** Founders +**Purpose:** Standard operating templates for board and investor updates. + +## Monthly Board Update Template + +### Executive Summary +- one-sentence headline +- top wins, misses, or surprises +- top risk +- top decision ask if any + +### Company Snapshot +- ARR or revenue +- pipeline created and coverage +- pilot count and paid deployments +- burn, cash, and runway +- headcount and hiring changes + +### What Changed This Month +- product shipped +- customer or pilot milestones +- GTM progress +- security, legal, or governance milestones +- incidents or reversals + +### Risks and Mitigations +- top three risks +- why each matters now +- current mitigation +- owner and next checkpoint + +### Decisions and Asks +- decision needed +- why now +- options +- recommendation +- deadline + +## Quarterly Board Update Template + +- quarter in review +- strategic scorecard +- key business results +- strategic narrative +- product and moat +- GTM and customer learning +- next quarter plan +- decisions needed from the board + +## Monthly Investor Update Template + +- top line +- highlights +- metrics +- challenges +- priorities for next month +- asks + +## Board Ask Rules + +- every ask must be a decision, not status +- state the decision needed in one sentence +- state why it is needed now +- state the options considered +- state the recommendation +- state the deadline +- state the consequence of doing nothing + +## Writing Rules + +- lead with facts +- use the same metric definitions every time +- do not hide misses +- keep each update scannable in under five minutes +- if a number moved, explain why diff --git a/docs/ops/summit-board-appendices.md b/docs/ops/summit-board-appendices.md new file mode 100644 index 00000000000..88d88856a87 --- /dev/null +++ b/docs/ops/summit-board-appendices.md @@ -0,0 +1,75 @@ +# Summit Board Appendices + +**Status:** Active +**Owner:** Founders +**Purpose:** Standard board support templates for decision memos, written updates, meeting packets, and action tracking. + +## Decision Memo Template + +- `Title` +- `Date` +- `Owner` +- `Decision needed` +- `Background` +- `Why now` +- `Options` +- `Recommendation` +- `Risks` +- `Dependencies` +- `Decision requested` +- `Outcome` +- `Follow-up owner` +- `Due date` + +## Monthly Written Update Template + +- `Period covered` +- `Overall status` +- `Highlights` +- `Lowlights` +- `Metrics` +- `Customers` +- `Product / delivery` +- `Team` +- `Risk` +- `Board asks` +- `Next month focus` + +## Quarterly Board Packet Template + +- cover page +- executive summary +- KPI section +- revenue and pipeline +- product and delivery +- customers +- people +- finance +- risk register +- board asks +- appendix + +## Board Asks Template + +- `Ask title` +- `Issue` +- `Why now` +- `Context` +- `Options` +- `Recommendation` +- `Decision requested` +- `Impact if delayed` +- `Owner` +- `Deadline` + +## Board Action Log + +| Date | Item | Owner | Due Date | Status | Notes | +|---|---|---|---|---|---| + +## Usage Rules + +- Keep templates short and decision-oriented. +- Use one owner per item. +- Update the action log after every board meeting. +- If a memo cannot fit this structure, it is too vague. diff --git a/docs/ops/summit-board-operating-pack.md b/docs/ops/summit-board-operating-pack.md new file mode 100644 index 00000000000..48db505b11a --- /dev/null +++ b/docs/ops/summit-board-operating-pack.md @@ -0,0 +1,111 @@ +# Summit Board Operating Pack + +**Status:** Active +**Owner:** Founders +**Purpose:** Standardize board rhythm, packet structure, decision framing, and action tracking. + +Appendix: +- [Board Appendices](./summit-board-appendices.md) + +## Board Calendar + +- quarterly board meeting for strategy, risk, and decisions +- monthly written update when the company is moving fast or carrying elevated risk +- immediate escalation for financing, security, legal, leadership, or major customer events +- annual planning session for budget, roadmap, hiring, and scenario review + +## Board Memo Template + +- `Title` +- `Decision needed` +- `Executive summary` +- `Company health` +- `Customers and revenue` +- `Product and delivery` +- `Team and hiring` +- `Risk register` +- `Board asks` +- `Appendix` + +## Decision Memo Template + +- `Title` +- `Date` +- `Owner` +- `Decision needed` +- `Background` +- `Why now` +- `Options` +- `Recommendation` +- `Risks` +- `Dependencies` +- `Decision requested` +- `Outcome` +- `Follow-up owner` +- `Due date` + +## Monthly Written Update + +- `Overall status` +- `Highlights` +- `Lowlights` +- `Metrics` +- `Customers` +- `Product / delivery` +- `Team` +- `Risk` +- `Board asks` +- `Next month focus` + +## Board Asks Template + +- `Ask title` +- `Issue` +- `Why now` +- `Context` +- `Options` +- `Recommendation` +- `Decision requested` +- `Impact if delayed` +- `Owner` +- `Deadline` + +## Board Action Log + +| Date | Item | Owner | Due Date | Status | Notes | +|---|---|---|---|---|---| + +Use one action log and update it after every board meeting. + +## KPI Pack + +Keep the board KPI pack small: +- ARR / revenue +- qualified pipeline +- burn and runway +- gross margin or delivery margin +- active pilots or governed workflows +- renewal / expansion signal +- product release health +- incident / reliability summary + +## Decision Rights + +### Founders own +- strategy and category +- pricing and packaging +- customer exceptions +- hiring plan +- execution priorities + +### Board advises or approves on +- financing +- executive transitions +- material strategic pivots +- major risk decisions + +## Rules + +- Send materials at least 48 hours before the meeting. +- Use the board for decisions and leverage, not routine status. +- End every meeting with decisions, owners, and dates. diff --git a/docs/ops/summit-cognitive-operating-system.md b/docs/ops/summit-cognitive-operating-system.md new file mode 100644 index 00000000000..95e65fa5b13 --- /dev/null +++ b/docs/ops/summit-cognitive-operating-system.md @@ -0,0 +1,183 @@ +# Summit Cognitive Operating System + +**Status:** Active +**Owner:** Founders / GTM / Product / Governance +**Purpose:** Canonical operating model for Summit Cognitive. This document defines the wedge, the operating spine, and the minimum document set required to run the business without narrative drift. + +## Category Definition + +Summit is **verification-native cognitive security** for **decision admissibility** and **decision integrity**. + +Summit does not position as: +- a truth engine +- a content moderation system +- a generic AI governance suite +- a generic trust and safety platform + +Summit governs whether claims, signals, and recommendations are fit to influence decisions. + +## Core Wedge + +Every operating motion should reduce to: +- one workflow +- one output type +- one decision boundary +- one evidence standard +- one defensible record + +## Operator Spine + +### Product truth +- Only sell what can be demonstrated, reviewed, and defended. +- Every important product claim needs a proof artifact. +- Product breadth is subordinate to workflow proof. + +### GTM truth +- Qualification starts with a named workflow. +- No workflow, no real opportunity. +- Every pilot must have success criteria and a conversion path. + +### Customer truth +- Customer value is a governed workflow in use, not positive sentiment. +- Every customer-facing claim requires approved evidence. +- Renewal and expansion depend on proof, not presentation. + +### Governance truth +- Evidence beats assertion. +- Traceability beats fluency. +- Exceptions must be written, owned, and time-bounded. + +## Buyer Map + +Primary buyers: +- CISO +- Chief Risk Officer +- AI governance lead +- Model risk lead +- General Counsel / Compliance +- Trust and safety lead +- Head of comms / PR +- Fraud / contact center lead +- Government / intelligence program owner + +Their shared pain is the same: useful-looking outputs are entering high-stakes decisions without a strong admissibility layer. + +## Operating Motions + +### Pilot motion +- land on one workflow +- scope to one decision boundary +- review outputs against evidence, provenance, reproducibility, and policy +- produce blocked or downgraded examples +- convert findings into paid deployment + +### Expansion motion +- prove the first workflow +- normalize usage +- identify the adjacent workflow with the same evidence pattern +- expand to adjacent teams and policy surfaces + +### Category motion +- lead with decision admissibility and decision integrity +- avoid broad AI rhetoric +- publish only proof-backed claims + +## Control Documents + +This operating system is implemented through the following docs: + +### Core ops +- [Board Operating Pack](./summit-board-operating-pack.md) +- [Board Appendices](./summit-board-appendices.md) +- [Board and Investor Update Templates](./summit-board-and-investor-update-templates.md) +- [Company Operating Rhythm](./summit-company-operating-rhythm.md) +- [Company Scorecard](./summit-company-scorecard.md) +- [Company OKR System](./summit-company-okr-system.md) +- [Finance and Runway Operating Pack](./summit-finance-runway-operating-pack.md) +- [Finance Workbook Spec](./summit-finance-workbook-spec.md) +- [Fundraising and Data Room](./summit-fundraising-and-data-room.md) +- [Fundraising Process Appendix](./summit-fundraising-process-appendix.md) +- [Annual Planning Workbook](./summit-annual-planning-workbook.md) +- [Company Operating Manual](./summit-company-operating-manual.md) +- [Cognitive Strategic Advantage Playbook](./summit-cognitive-strategic-advantage-playbook.md) +- [Department Operating Charters](./summit-department-operating-charters.md) +- [Founder Playbook](./summit-founder-playbook.md) +- [Internal Communications and Escalation Playbook](./summit-internal-communications-and-escalation-playbook.md) +- [Internal Academy and Onboarding](./summit-internal-academy-and-onboarding.md) +- [KPI to Decision Map](./summit-kpi-to-decision-map.md) +- [Manager and Hiring Scorecards](./summit-manager-and-hiring-scorecards.md) +- [Meeting and Ritual Templates](./summit-meeting-and-ritual-templates.md) +- [Scenario Planning Playbook](./summit-scenario-planning-playbook.md) +- [Decision Log and Exception Template](./summit-decision-log-and-exception-template.md) +- [Product and GTM Interface Charter](./summit-product-and-gtm-interface-charter.md) +- [Customer Proof and Evidence Standards](./summit-customer-proof-and-evidence-standards.md) +- [Evidence Standards Appendix](./summit-evidence-standards-appendix.md) + +### Commercial and ecosystem +- [Sales Enablement Kit](../sales-toolkit/summit-sales-enablement-kit.md) +- [Customer Lifecycle Playbook](../sales-toolkit/summit-customer-lifecycle-playbook.md) +- [Customer Success Playbook](../sales-toolkit/summit-customer-success-playbook.md) +- [QBR and EBR Pack](../sales-toolkit/summit-qbr-and-ebr-pack.md) +- [Renewal and Expansion Playbook](../sales-toolkit/summit-renewal-and-expansion-playbook.md) +- [Enterprise Procurement Playbook](../sales-toolkit/summit-enterprise-procurement-playbook.md) +- [Enterprise Procurement Appendix](../sales-toolkit/summit-enterprise-procurement-appendix.md) +- [Partner Operations](../ecosystem/summit-partner-operations.md) +- [Partner Operations Appendix](../ecosystem/summit-partner-operations-appendix.md) +- [Pilot Offer Templates](../sales-toolkit/summit-pilot-offers.md) +- [Website Copy System](../pitch/summit-cogsec-website-copy-system.md) + +### Standards +- [Category Language and Glossary](../standards/summit-category-language-and-glossary.md) +- [Cognitive Security Protocol (CSP) v0.1](../standards/cognitive-security-protocol-v0.1.md) +- [Summit Evidence Protocol](../standards/summit-evidence-protocol.md) +- [Decision Execution Trace](../DECISION_EXECUTION_TRACE.md) +- [Decision Validity Protocol](../DECISION_VALIDITY_PROTOCOL.md) + +## Required Operating Cadence + +### Weekly +- founder priorities review +- pilot review +- pipeline review +- product and narrative review +- operating review with decisions, owners, and dates + +### Monthly +- finance and runway review +- customer health review +- partner review +- board-style written update + +### Quarterly +- board meeting +- strategy reset +- roadmap and hiring review +- proof and evidence audit + +## KPI Spine + +Track only metrics that change decisions: +- qualified pipeline +- pilot-to-paid conversion +- time to first value +- active governed workflows +- evidence completeness +- audit / replay success +- renewal and expansion signal +- burn and runway + +## Non-Negotiables + +- No unsupported claims. +- No customer proof without approval. +- No enterprise commitment without an artifact pack. +- No exception without an owner, expiry, and rationale. +- No category drift across product, GTM, board, or partner materials. + +## Practical Default + +If a team does not know what to do next, it should ask: +- what is the workflow? +- what is the decision boundary? +- what evidence is required? +- what record proves the decision was admissible? diff --git a/docs/ops/summit-cognitive-strategic-advantage-playbook.md b/docs/ops/summit-cognitive-strategic-advantage-playbook.md new file mode 100644 index 00000000000..490f8d4743b --- /dev/null +++ b/docs/ops/summit-cognitive-strategic-advantage-playbook.md @@ -0,0 +1,237 @@ +# Summit Cognitive Strategic Advantage Playbook + +**Status:** Active +**Owner:** Founders / GTM / Product / Governance +**Purpose:** Convert Summit's category position into a concrete strategic advantage plan spanning product, compliance packaging, go-to-market sequencing, and moat formation. + +## Strategic Thesis + +Summit should not compete as a point detector. Summit should win as the operating layer that connects: + +- workflow-embedded authenticity gates +- provenance-first verification +- decision-intercept controls +- outcome-labeled learning loops +- audit-ready assurance artifacts + +The category edge is not "detect fake content." The edge is "protect high-stakes decisions and prove why the decision path was admissible." + +## Why This Is Buyable Now + +The market is converging around four realities: + +1. synthetic media and impersonation now create direct operational and financial risk +2. buyers increasingly need marking, disclosure, verification, and logging rather than detectors alone +3. compliance expectations are becoming operational instead of aspirational +4. enterprises need one control plane that ties signals to decisions, actions, and evidence + +This creates a window for Summit to sell a governed workflow plus evidence system rather than a generic AI security product. + +## Core Strategic Advantage + +Summit compounds advantage if it productizes the connective tissue other vendors leave fragmented: + +1. cross-channel signal normalization +2. workflow gates at decision choke points +3. provenance verification when available +4. fallback forensic and behavioral controls when provenance is absent +5. evidence packs that map controls to procurement, audit, and legal review + +## Product Priorities + +### 1. Decision-intercept wedge + +Land where the buyer already has budget and where the cost of error is obvious: + +- payment authorization +- executive communications approval +- publication and disclosure approval +- fraud escalation and exception handling + +### 2. Provenance verification layer + +Treat provenance as a first-class decision input: + +- ingest signed provenance assertions +- verify chains and assertions +- display verification state in analyst and reviewer workflows +- log absence of provenance as a risk factor + +### 3. Assurance artifact factory + +Turn every protected workflow into exportable evidence: + +- marking configuration +- detectability tests +- disclosure logs +- verification outcomes +- policy mappings +- exceptions and approvals + +## Strategic Assets To Leverage + +The following are the highest-value internal assets Summit should keep reinforcing: + +- evidence-first workflow design +- deterministic and replayable governance controls +- machine-readable admissibility contracts and verified-property registries +- release and trust-chain verification posture +- category language around decision admissibility and decision integrity +- customer-proof and assurance-pack operating system + +These assets should converge into one protocol surface rather than remain narrative-only. The baseline protocol and property contract should be treated as sellable infrastructure. + +These are stronger foundations for a durable moat than any single classifier or dashboard. + +## Strategic Packages To Sell + +### Article 50 Readiness Pack + +Position as a packaged product outcome: + +- disclosure templates +- marking and detectability controls +- verification workflow +- evidence export for audit and procurement + +### Decision Integrity Pack + +Position as operational risk control: + +- step-up verification at decision intercepts +- hold / approve / escalate logic +- decision record with evidence and reviewer trace + +### Governance Assurance Pack + +Position as premium enterprise add-on: + +- policy mapping +- evidence bundle export +- control coverage reports +- recurring audit packet generation + +## Go-To-Market Sequence + +### Land + +Win the first workflow with a measurable "decision protection" story. + +### Expand + +Expand from the first protected workflow into adjacent workflows that share the same evidence and approval pattern. + +### Standardize + +Convert repeated customer asks into a repeatable compliance and assurance bundle. + +### Defend + +Use outcome-labeled workflow data and assurance artifacts as the moat, not just detection claims. + +## Competitive Counter-Position + +Against point detectors: + +- Summit does not stop at scoring content. +- Summit protects the decision path and records why it was safe to act. + +Against provenance-only providers: + +- Summit does not assume provenance is always present. +- Summit combines provenance with workflow controls and fallback evaluation. + +Against digital risk and takedown vendors: + +- Summit does not stop at incident disruption. +- Summit governs the upstream decision and exports downstream evidence. + +## Product Roadmap Sequence + +### 0-3 months + +- one decision-intercept MVP +- one audit-ready evidence export +- one compliance starter bundle + +### 3-12 months + +- provenance verification API and UI +- marking and detectability reference pipeline +- narrative early warning tied to comms workflows + +### 12-24 months + +- assurance artifact factory +- control mappings and governance exports +- outcome-labeled optimization loops + +## Metrics That Matter + +Track only metrics that strengthen the strategic position: + +- percentage of high-stakes decisions protected by a verification or authenticity gate +- mean time to detect +- mean time to mitigate +- evidence completeness +- disclosure compliance rate +- pilot-to-paid conversion +- workflow expansion rate +- assurance-pack attach rate + +## Risks And Countermeasures + +### Detector trap + +Risk: +- Summit gets pulled into a commodity model-quality race. + +Countermeasure: +- sell workflow control and evidence, not raw detection. + +### Services trap + +Risk: +- every deployment becomes bespoke compliance work. + +Countermeasure: +- package evidence bundles and accelerator packs as product surfaces. + +### Provenance unevenness + +Risk: +- provenance is not present consistently across channels. + +Countermeasure: +- provenance-first when present, evidence-first always. + +### Buyer fragmentation + +Risk: +- security, legal, comms, and risk each see only part of the value. + +Countermeasure: +- sell a single "decision integrity control plane" with persona-specific dashboards and one shared evidence system. + +## Messaging Spine + +Use this language consistently: + +- Summit protects decision integrity against synthetic media, impersonation, and cross-channel influence. +- Summit embeds verification and authenticity controls directly into high-stakes workflows. +- Summit produces audit-ready evidence packs that prove what happened, what was checked, and why the action was or was not allowed. + +Avoid: + +- generic AI safety language +- unsupported claims about truth +- positioning as a detector alone + +## Practical Default + +When deciding whether a strategic opportunity is on-strategy, ask: + +1. does it protect a real decision? +2. does it strengthen the evidence system? +3. does it help sell a repeatable compliance or assurance bundle? +4. does it compound data and workflow advantage instead of adding services-only load? diff --git a/docs/ops/summit-company-okr-system.md b/docs/ops/summit-company-okr-system.md new file mode 100644 index 00000000000..01544f9a6f5 --- /dev/null +++ b/docs/ops/summit-company-okr-system.md @@ -0,0 +1,73 @@ +# Summit Company OKR System + +**Status:** Active +**Owner:** Founders / Functional Leads +**Purpose:** Force focus on the few outcomes that move Summit toward product-market fit, paid deployments, retention, and controlled capital use. + +## OKR Philosophy + +- OKRs are for focus, not coverage +- company OKRs describe outcomes, not activity +- keep the set small enough to execute +- if a goal cannot change behavior, it is not an OKR +- Summit OKRs should tie directly to pilots, paid deployments, evidence, retention, and runway + +## Cascade + +- annual direction sets the 1-3 company priorities +- quarterly OKRs translate those priorities into 90-day outcomes +- team OKRs support company OKRs +- if a team cannot explain which company KR it moves, it should not exist + +## Writing Rules + +### Good OKRs +- objective is a clear outcome +- key result is measurable, time-bound, and attributable +- KRs use hard numbers, thresholds, or binary milestones + +### Bad OKRs +- tasks disguised as outcomes +- metrics with no threshold or deadline +- objectives that restate the mission +- too many KRs under one objective +- KRs that do not move the business + +## Scoring + +- score each KR from `0.0` to `1.0` +- `0.7` is strong execution in an early-stage context +- score by evidence, not sentiment +- score every KR at quarter end and decide keep, kill, or rewrite + +## Example Company OKRs + +### Objective 1 +Prove Summit can win and deploy in high-stakes AI workflows. + +- close paid deployments +- convert pilots to paid deployments +- achieve referenceable customers +- produce evidence-backed decision traces for deployed workflows + +### Objective 2 +Improve the speed and reliability of pilot-to-paid conversion. + +- cut pilot setup time +- reduce security review turnaround +- publish one standard pilot package +- increase pilot-to-paid conversion rate + +### Objective 3 +Protect runway while scaling the business. + +- keep burn within approved budget +- maintain runway above the operating threshold +- hire only into roles tied to proof milestones +- tie new spend to a measurable milestone or revenue outcome + +## Operating Standard + +- one company-level priority should be visible in every OKR set +- OKRs should drive decisions, not decorate the board deck +- if an OKR does not help the company decide what to do next, it is not useful diff --git a/docs/ops/summit-company-operating-manual.md b/docs/ops/summit-company-operating-manual.md new file mode 100644 index 00000000000..2f22315eccb --- /dev/null +++ b/docs/ops/summit-company-operating-manual.md @@ -0,0 +1,70 @@ +# Summit Company Operating Manual + +## Purpose + +This document defines how Summit runs as a company. It is the executive operating standard for priorities, decisions, artifacts, meetings, and accountability. + +## Core Operating Rules + +- Summit exists to make high-stakes cognitive work admissible, traceable, and defensible. +- Decision admissibility comes before speed. +- Evidence beats assertion. +- Traceability is required for important work. +- Clear commitments matter more than broad activity. +- Every meaningful decision should be written down. +- We do not defend legacy decisions; we improve the current standard. + +## How the Company Runs + +- Founders set mission, category, standards, and top priorities. +- Functional owners run their areas with clear scope and direct accountability. +- Cross-functional work has one owner, one reviewer, and one due date. +- Operational status lives in tracked systems; durable guidance lives in docs. +- Meetings exist to decide, unblock, or review evidence. +- If a topic does not change a decision, it should usually be async. +- Work is reviewed against outcomes, not effort. + +## Decision Discipline + +- Every decision has a named owner. +- The owner collects inputs, proposes a path, and makes the call. +- High-impact decisions should be written before execution. +- If a decision affects customers, security, delivery, or category positioning, it should be reviewed by the relevant lead. +- Disagreement is resolved by evidence, not volume. +- If a decision introduces risk, the rollback path must be stated up front. +- When in doubt, defer only if the deferment is explicit and time-bounded. + +## Artifact Discipline + +- If it matters, it gets written. +- Canonical guidance belongs in docs, not scattered messages. +- Live status belongs in tracked records, not handbook pages. +- Every major deliverable should have an owner, reviewer, and acceptance standard. +- Customer-facing artifacts must be checked for overclaiming and category drift. +- Technical artifacts must be checked for safety, traceability, and operational fit. +- If a page starts tracking rows, dates, or status, move that material into a database or operating log. + +## Meeting Philosophy + +- Meetings are for decisions, alignment, and unblockers. +- If a meeting has no decision or review objective, cancel it or make it async. +- Meetings should start with context, not history. +- The output of a meeting is a decision, an owner, a due date, and a written recap. +- Default to fewer, sharper meetings over more, longer meetings. +- If the same topic needs repeated discussion, the problem is usually missing clarity or missing documentation. + +## Non-Negotiables + +- No hidden work. +- No undocumented exceptions. +- No customer promise without an owner. +- No release without review. +- No critical decision without evidence. +- No policy bypass without explicit approval. +- No category drift in external language. +- No confusion between live status and canonical guidance. +- No work that cannot be explained, defended, and reversed if needed. + +## Final Operating Rule + +Summit should operate like a company that expects scrutiny. The standard is not whether something can be shipped. The standard is whether it can be trusted, explained, and sustained. diff --git a/docs/ops/summit-company-operating-rhythm.md b/docs/ops/summit-company-operating-rhythm.md new file mode 100644 index 00000000000..98cb9d0c09c --- /dev/null +++ b/docs/ops/summit-company-operating-rhythm.md @@ -0,0 +1,87 @@ +# Summit Company Operating Rhythm + +**Status:** Active +**Owner:** Founders +**Purpose:** Keep Summit aligned on priorities, execution, and decisions with a small number of recurring reviews. + +## Weekly Cadence + +### Leadership sync +- review KPI snapshot +- review top customer issues +- review active risks +- review launch and hiring blockers +- end with decisions, owners, and due dates + +### Functional reviews +- product +- engineering +- GTM +- customer success +- operations + +### Customer and pilot review +- customer progress +- evidence gaps +- risks +- next milestones + +## Monthly Cadence + +### Business review +- financials +- pipeline +- delivery +- customer health +- product progress +- risk register + +Outputs: +- budget changes +- hiring decisions +- roadmap adjustments +- risk escalations + +### KPI review +- check a small set of company KPIs against thresholds +- assign a decision to every red or persistent yellow item + +### Planning reset +- reconfirm top priorities +- defer what no longer matters + +## Quarterly Cadence + +### OKR planning +- set the few company outcomes that matter +- define owners and success criteria + +### Quarterly business review +- continue +- correct +- accelerate +- stop + +### Roadmap reset +- reallocate effort based on customer proof and business impact + +## Annual Planning Linkage + +- annual plan sets direction, capital posture, and milestones +- quarterly OKRs convert the annual plan into executable commitments +- monthly reviews test whether the annual plan is still credible + +## KPI-to-Decision Discipline + +- every KPI has an owner +- every KPI has a threshold +- every threshold maps to a decision or escalation +- if a metric does not change behavior, remove it + +## Meeting Rules + +- no meeting without a purpose +- use a written agenda +- decisions need owners and due dates +- if a recurring meeting is status-only for two cycles, cancel or convert it to async +- review the calendar quarterly and delete dead meetings diff --git a/docs/ops/summit-company-scorecard.md b/docs/ops/summit-company-scorecard.md new file mode 100644 index 00000000000..a5b98ea4d80 --- /dev/null +++ b/docs/ops/summit-company-scorecard.md @@ -0,0 +1,83 @@ +# Summit Company Scorecard + +**Status:** Active +**Owner:** Founders +**Purpose:** Single monthly view of company health. + +## Threshold Colors + +- `Green`: on plan or ahead +- `Yellow`: off plan but recoverable this quarter +- `Red`: requires founder intervention this month +- `Critical`: requires board or major operating decision now + +## Company-Level KPI Groups + +| KPI Group | Core Metrics | What It Tells Us | +|---|---|---| +| Revenue | ARR, net new ARR, bookings, pipeline coverage, win rate | Whether demand becomes revenue | +| Demand | Qualified pipeline, stage conversion, pilot volume, ICP meetings | Whether the market is responding | +| Delivery | Pilot-to-paid conversion, time to first value, deployment cycle time | Whether Summit turns interest into adoption | +| Retention / Expansion | GRR, NRR, renewal risk, expansion pipeline | Whether customers keep proving value | +| Product | Activation, usage depth, evidence completeness, admissibility coverage | Whether the product is used as intended | +| Security / Governance | Review turnaround, exception count, control coverage | Whether Summit remains deployable | +| Finance | Burn, net burn, runway, budget variance | Whether the company is capital-disciplined | +| Team | Headcount plan, hiring velocity, critical gaps | Whether execution capacity matches the plan | + +## Functional Scorecards + +### Product +- roadmap completion +- pilot success rate +- evidence completeness +- decision-admissibility coverage + +### GTM +- qualified pipeline created +- pilot-to-paid conversion +- sales cycle length +- stage hygiene + +### Customer Success +- GRR +- NRR +- renewal risk +- time to value + +### Engineering +- release reliability +- build and test health +- cycle time +- technical debt pressure + +### Security / Governance +- review turnaround +- open exceptions +- control coverage +- audit readiness + +### Finance +- burn +- runway +- budget variance +- spend-to-proof ratio + +### Partnerships +- partner-sourced pipeline +- partner conversion +- active partner quality +- rule compliance + +## Monthly Review Rules + +- start with red and critical items +- every yellow or red metric needs a cause, owner, mitigation, and next review date +- do not discuss metrics without a decision attached +- remove metrics that do not change behavior + +## Metric-to-Decision Discipline + +- every metric has an owner +- every metric has a threshold +- every threshold maps to a decision or escalation +- if a metric does not influence action, remove it diff --git a/docs/ops/summit-customer-proof-and-evidence-standards.md b/docs/ops/summit-customer-proof-and-evidence-standards.md new file mode 100644 index 00000000000..9ad872b9464 --- /dev/null +++ b/docs/ops/summit-customer-proof-and-evidence-standards.md @@ -0,0 +1,493 @@ +# Summit Customer Proof and Evidence Standards Manual + +**Version:** 0.1 +**Date:** 2026-03-31 +**Owner:** Governance / GTM / Product +**Scope:** Customer proof, evidence bundles, and publication standards + +## Purpose + +This manual defines what Summit accepts as customer proof, how evidence bundles must be structured, how proof quality is classified, who can approve publication, and how proof may be used across sales, customer success, board materials, product, and marketing. + +The rule is simple: Summit only publishes claims it can defend. + +## MAESTRO Layers + +- Foundation +- Data +- Agents +- Tools +- Infra +- Observability +- Security + +## Threats Considered + +- Publishing unsupported claims +- Mixing marketing language with operational evidence +- Exposing sensitive customer data in public materials +- Using stale metrics as current proof +- Reusing one customer’s evidence in another context without approval + +## Mitigations + +- Require explicit proof quality levels +- Store proof with provenance and review metadata +- Redact before publication, not after +- Separate internal evidence from customer-facing proof +- Tie every published claim to an approved source artifact + +## Proof Hierarchy + +Use the highest available level that is true, current, and approved. + +### Level 0: Unverified + +- Not usable outside private working notes. +- Examples: meeting note, draft hypothesis, raw observation with no source link. +- Allowed use: internal brainstorming only. + +### Level 1: Supported + +- A claim has a supporting artifact, but it has not been formally reviewed. +- Examples: screenshot with no approval, telemetry excerpt with no context, draft pilot note. +- Allowed use: internal review, product iteration. + +### Level 2: Reviewed + +- The claim and evidence were reviewed by the owner or functional lead. +- Examples: pilot readout signed by the sponsor, internal case study draft, customer-facing proof memo under review. +- Allowed use: sales enablement, customer success, board prep drafts. + +### Level 3: Approved + +- The proof is approved for use in customer-facing or board-facing materials. +- Examples: redacted case study, approved quote, verified metric summary, board-ready evidence pack. +- Allowed use: sales, CS, board, product planning, internal marketing. + +### Level 4: Publishable + +- The proof is approved for external publication and has a redaction and release record. +- Examples: website case study, public blog excerpt, partner one-pager, conference talk slide. +- Allowed use: marketing, public website, external partner materials. + +## What Counts As Acceptable Proof + +Acceptable proof must be specific, attributable, and time-bounded. + +### Accepted proof types + +- Product telemetry with timestamps and source context +- Customer-approved quotes or testimonials +- Signed pilot readouts or success memos +- Evidence bundles with source artifacts and provenance +- Before/after operational metrics with method notes +- Security or trust artifacts that show a control or process is in place +- Reproducible demo outputs or deterministic exports +- Board or exec summaries that cite underlying evidence + +### Strong proof examples + +- Pilot reduced review cycle time from X to Y, supported by a dated runbook, usage summary, and sponsor approval +- Evidence bundle exported successfully for every incident in scope, with manifest and verification hashes +- Decision trail remained intact through rollout, backed by approval logs and redacted screenshots + +### Weak proof examples + +- “Customers love it” +- “The platform is trusted” +- “We are enterprise-ready” +- “Users are more productive” without measurement +- “The pilot went well” without a readout +- A benchmark with no method, date, or dataset description + +## What Does Not Count As Proof + +- Anecdotes without an artifact +- Draft notes without approval +- Unreviewed screenshots +- Stale numbers with no date +- Synthetic claims without methodology +- Claims copied from another customer without permission +- Internal sentiment presented as fact + +## Evidence Bundle Requirements + +Every bundle should use the same basic structure. + +### Required sections + +1. Bundle header +2. Claim summary +3. Scope and context +4. Source artifacts +5. Metrics or observations +6. Provenance manifest +7. Review and approval record +8. Redaction record +9. Publication status +10. Retention / archive metadata + +### Required fields + +| Field | Description | +|---|---| +| Bundle ID | Unique bundle identifier | +| Title | Human-readable bundle name | +| Claim | The statement the bundle supports | +| Owner | Person responsible for the bundle | +| Date created | When the bundle was assembled | +| Date approved | When the bundle was approved | +| Audience | Internal, customer, board, public, or partner | +| Confidence level | Proof quality level | +| Source artifacts | Linked files, exports, logs, screenshots, metrics | +| Provenance | Who created or signed the source artifacts | +| Redaction state | Raw, redacted, or publishable | +| Publication status | Draft, approved, published, retired | +| Retention class | Internal only, time-limited, or archive | + +### Minimum artifact set + +- One primary claim +- At least one source artifact +- A provenance trail +- A reviewer or approver +- A date +- A redaction decision if the bundle may leave the company + +## Suggested Bundle Types + +### Pilot evidence bundle + +- Pilot scope +- Success criteria +- Usage or workflow output +- Sponsor approval +- Follow-up decision + +### Customer outcome bundle + +- Before/after state +- Measured improvement +- Method note +- Customer-approved summary + +### Security and trust bundle + +- Control description +- Verification result +- Review record +- Redaction status + +### Board evidence bundle + +- Summary narrative +- Underlying metrics +- Exceptions or risks +- Decision or ask + +### Marketing proof bundle + +- Redacted customer proof +- Approved quote or claim +- Method note +- Publication approval + +## Admissibility Artifact Standards + +For a bundle to be admissible internally or externally, it should contain: + +- One primary claim +- At least one source artifact +- A provenance trail +- A reviewer or approver +- A date +- A redaction decision if the bundle may leave the company + +### Required sections + +1. Bundle header +2. Claim summary +3. Scope and context +4. Source artifacts +5. Metrics or observations +6. Provenance manifest +7. Review and approval record +8. Redaction record +9. Publication status +10. Retention / archive metadata + +### Required fields + +| Field | Description | +|---|---| +| Bundle ID | Unique bundle identifier | +| Title | Human-readable bundle name | +| Claim | The statement the bundle supports | +| Owner | Person responsible for the bundle | +| Date created | When the bundle was assembled | +| Date approved | When the bundle was approved | +| Audience | Internal, customer, board, public, or partner | +| Confidence level | Proof quality level | +| Source artifacts | Linked files, exports, logs, screenshots, metrics | +| Provenance | Who created or signed the source artifacts | +| Redaction state | Raw, redacted, or publishable | +| Publication status | Draft, approved, published, retired | +| Retention class | Internal only, time-limited, or archive | + +## Customer-Claim Approval Rules + +### Owner review + +- The bundle owner verifies scope, source artifacts, and claim accuracy. + +### Functional review + +- Product reviews product claims. +- Sales reviews commercial language. +- Customer success reviews operational accuracy. +- Security or governance reviews trust-sensitive material. + +### Publication review + +- Any proof leaving the company requires approval from: + - Bundle owner + - Functional owner + - Security or governance reviewer if sensitive + +### Approval thresholds + +- Level 2 proof may be used internally after owner review. +- Level 3 proof requires named approval before reuse in sales, board, or product materials. +- Level 4 proof requires publication approval and redaction sign-off. + +### Mandatory stop conditions + +Do not publish if: + +- The metric date is missing +- The customer did not approve the quote or outcome +- The source artifact is stale +- The bundle contains unredacted sensitive data +- The claim outlives the evidence context +- The reviewer cannot explain where the number came from + +## Case-Study Proof Rules + +Case studies are the highest-visibility proof assets Summit publishes. They must be accurate, redacted, and reproducible from the source bundle. + +### Case-study requirements + +- The outcome is tied to one clear customer workflow +- The claim is backed by a reviewed evidence bundle +- The customer has approved the quote, metric, or summary +- The methodology is understandable to a buyer +- The story does not expose other customers or sensitive internal details + +### Case-study do nots + +- Do not turn a pilot anecdote into a universal claim +- Do not omit dates, scope, or method notes +- Do not publish before the source bundle is approved +- Do not include screenshots or logs that reveal restricted information + +## Redaction Rules + +### Redact before publication + +- Customer names if not approved +- User identifiers +- Secrets, tokens, credentials +- Sensitive incident details +- Internal-only security findings +- Non-public financial or contractual terms + +### Redaction methods + +- Replace sensitive fields with placeholders +- Crop screenshots to the minimum necessary area +- Remove file names that reveal private system details +- Use summary metrics instead of raw logs where possible + +### Redaction approval + +- Redaction must be verified by the bundle owner or security reviewer. +- Redaction state must be recorded in the bundle manifest. + +### Publication boundary + +- Internal bundles may contain more detail. +- Customer-facing bundles must be redacted. +- Public bundles must be the most constrained version and should preserve meaning without exposing sensitive context. + +## Publication Rules + +### Internal use + +- Internal proof may remain unredacted if access is controlled. +- Internal proof still needs ownership and review. + +### Customer-facing use + +- Must be approved and redacted. +- Must not reveal other customers’ data. +- Must reflect current product behavior. + +### Board use + +- May include sensitive detail if board materials are controlled. +- Still requires source traceability and date. + +### Public use + +- Requires the strictest review. +- Only publish what can be safely repeated outside the company. +- Use the least sensitive artifact that still proves the claim. + +## How Proof Should Be Used By Function + +### Sales + +Use proof to support discovery credibility, validate pilot scope, reduce procurement friction, and defend pricing and value. + +Do not exaggerate outcomes, reuse a claim outside its context, or share unapproved customer details. + +### Customer Success + +Use proof to demonstrate early value, track adoption and outcomes, and support renewal and expansion. + +Do not mix support anecdotes into formal proof without review or use old outcomes as current status. + +### Board + +Use proof to support strategic claims, show operating discipline, surface risk with evidence, and back hiring and budget decisions. + +Do not use marketing language instead of evidence or present aspirational metrics as actuals. + +### Product + +Use proof to prioritize the workflow that generated the proof, identify repeated friction, and validate that the product matches the promised behavior. + +Do not change product scope based only on a story or ignore proof that contradicts assumptions. + +### Marketing + +Use proof to build case studies, support website claims, and create customer briefs and partner collateral. + +Do not invent numbers, use customer logos or quotes without approval, or publish a claim without the source bundle. + +## Proof Source Hierarchy + +When claims conflict, use the highest-quality source available: + +1. Customer-approved signed readout +2. Verified product telemetry +3. Security or audit artifact +4. Support or implementation record +5. Draft note or internal summary + +If a lower-quality source contradicts a higher-quality source, the higher-quality source wins. + +## Evidence Bundle Workflow + +1. Create the claim. +2. Gather source artifacts. +3. Add provenance metadata. +4. Redact sensitive material if needed. +5. Assign a proof quality level. +6. Review by the owner. +7. Review by the functional lead if reused across teams. +8. Approve publication if external use is intended. +9. Store the final bundle and archive the draft. + +## Storage And Retention + +- Keep the canonical bundle in the governed docs or evidence store. +- Keep the approved published version separate from the working draft. +- Record publication date and expiry or review date. +- Retire stale bundles when the product or claim changes. + +## Evidence QA Checklist + +Use this checklist before any proof leaves the working draft state. + +- Claim is specific +- Scope is clear +- Evidence is linked +- Date is present +- Owner is named +- Reviewer is named +- Proof level is assigned +- Redaction is complete +- Audience is defined +- Publication status is set +- Retention class is set +- Customer approval is recorded if customer-facing +- Method note is present for metrics or outcomes +- No stale or conflicting numbers remain +- Bundle maps cleanly to the approved claim + +## Standard Bundle Checklist + +- Claim is specific +- Scope is clear +- Evidence is linked +- Date is present +- Owner is named +- Reviewer is named +- Redaction is complete +- Audience is defined +- Publication status is set +- Retention class is set + +## Standard Claim Language + +Use claims that are specific, measurable, attributable, time-bounded, and reusable only within the approved context. + +Prefer: + +- “In the pilot, review time dropped from X to Y.” +- “The bundle exported with complete provenance.” +- “The customer approved the workflow for the scoped use case.” + +Avoid: + +- “Best in class” +- “Industry-leading” +- “Always” +- “Never” +- “Works for everyone” + +## Operating Cadence + +- Weekly: review new proof candidates +- Monthly: reclassify proof quality and retire stale bundles +- Quarterly: audit published proof and update claims +- Per release or pilot: generate or refresh the relevant bundle + +## Ownership Rules + +| Asset Type | Owner | Reviewer | +|---|---|---| +| Customer proof bundle | Customer Success or GTM owner | Product lead | +| Product outcome proof | Product lead | Engineering lead | +| Security / trust proof | Security or governance owner | CTO or governance reviewer | +| Public case study | Marketing or GTM owner | Founder + governance reviewer | +| Board evidence pack | Finance / Founders | Board pack owner | + +## Summary Rules + +- Proof without evidence is not proof. +- Evidence without review is not publishable. +- Publication without redaction is not allowed. +- A claim cannot outlive its source context. +- If in doubt, downgrade the proof level and keep it internal. + +## Reference Docs + +- `docs/gtm/trust-center.md` +- `docs/gtm/case-study-summit-internal.md` +- `docs/ops/evidence-bundles.md` +- `docs/ops/release-readiness-dashboard.md` +- `docs/ops/summit-cogsec-release-change-management.md` +- `docs/sales/summit-cogsec-enterprise-customer-lifecycle-map.md` diff --git a/docs/ops/summit-decision-log-and-exception-template.md b/docs/ops/summit-decision-log-and-exception-template.md new file mode 100644 index 00000000000..b628b5582a4 --- /dev/null +++ b/docs/ops/summit-decision-log-and-exception-template.md @@ -0,0 +1,103 @@ +# Summit Decision Log and Exception Template + +Use this document to record material decisions and time-bound exceptions with clear ownership, rationale, and review dates. The goal is traceability, reversibility, and auditability. + +## Decision Log Entry Template + +| Field | Required | Notes | +|---|---:|---| +| decision_id | Yes | Stable unique identifier | +| date | Yes | Date the decision was made | +| owner | Yes | Person accountable for the decision | +| decision_type | Yes | Product, GTM, security, legal, finance, hiring, or operations | +| title | Yes | Short summary of the decision | +| context | Yes | Why the decision was needed now | +| options_considered | Yes | Alternatives reviewed | +| decision | Yes | Final choice made | +| rationale | Yes | Why this option was selected | +| impact | Yes | Expected business or operational effect | +| risks | Yes | Primary risks introduced | +| mitigations | Yes | How those risks are controlled | +| follow_up | Yes | Next action and owner | +| review_date | Yes | Date the decision should be revisited | +| status | Yes | open, active, superseded, or closed | + +## Exception Record Template + +| Field | Required | Notes | +|---|---:|---| +| exception_id | Yes | Stable unique identifier | +| linked_decision_id | Yes | Decision log entry this exception supports | +| exception_class | Yes | See exception classes below | +| requester | Yes | Person or team requesting the exception | +| approver | Yes | Person authorized to approve | +| scope | Yes | What is being exempted | +| reason | Yes | Why the exception is needed | +| duration | Yes | Time-bound period or expiry date | +| controls | Yes | Compensating controls in place | +| evidence | Yes | Proof supporting the exception | +| review_date | Yes | Next required review | +| expiry_date | Yes | When the exception automatically lapses | +| status | Yes | pending, approved, rejected, expired, revoked, or closed | + +## Exception Classes + +| Class | Use When | Default Expiry | +|---|---|---| +| security | A control is bypassed, narrowed, or delayed | 30 days | +| legal | A standard term or process is modified | 30 days | +| compliance | A policy or procedural requirement is handled outside the default path | 30 days | +| product | A non-standard product behavior is needed for a customer or pilot | 60 days | +| GTM | Pricing, scope, or process deviates from standard motion | 30 days | +| finance | Spend, discounting, or budget deviates from plan | 30 days | +| operations | A workflow, approval, or handoff changes temporarily | 30 days | + +## Expiry Rules + +- Every exception must have an expiry date. +- No exception may be open-ended. +- Expiry defaults by class unless a shorter period is assigned. +- Exceptions must be renewed explicitly if still needed. +- Expired exceptions are inactive until re-approved. +- High-risk exceptions should expire sooner, not later. +- If the underlying reason no longer exists, close the exception. + +## Review Cadence + +- Review active exceptions weekly. +- Review security, legal, and compliance exceptions in the weekly operating meeting. +- Review product, GTM, finance, and operations exceptions in the monthly leadership review. +- Review all exceptions older than 30 days in the monthly founder review. +- Close, renew, or revise every exception at its review date. + +## Operating Rules + +- Record the decision before or at the time it is made. +- Use an exception record whenever normal policy is not followed. +- Tie every exception to a named owner and approver. +- Keep scope narrow and duration short. +- Use compensating controls for every exception. +- Never approve an exception without an expiry date. +- Never let an exception become the new default without updating policy. +- If an exception repeats, convert it into a standard process or policy change. +- If a decision changes materially, create a new log entry rather than editing history. +- Use status transitions consistently: `pending` -> `approved` / `rejected` -> `active` -> `expired` / `revoked` -> `closed`. + +## Example Entry + +| Field | Example | +|---|---| +| decision_id | DEC-2026-031 | +| date | 2026-03-31 | +| owner | Founder | +| decision_type | security | +| title | Approve time-bound access exception for pilot integration | +| context | Pilot requires temporary elevated access to validate workflow | +| options_considered | deny, narrow access, approve with controls | +| decision | Approve with controls | +| rationale | Narrow scope preserves security while unblocking the proof milestone | +| impact | Pilot can proceed on schedule | +| risks | Temporary exposure beyond standard policy | +| mitigations | Read-only access, logging, 30-day expiry | +| review_date | 2026-04-15 | +| status | active | diff --git a/docs/ops/summit-department-operating-charters.md b/docs/ops/summit-department-operating-charters.md new file mode 100644 index 00000000000..181854f511c --- /dev/null +++ b/docs/ops/summit-department-operating-charters.md @@ -0,0 +1,206 @@ +# Summit Department Operating Charters + +## Purpose + +This document defines the operating charter for each core Summit function. It is meant to keep ownership clear, preserve decision discipline, and reduce cross-functional drift. + +## Product + +### Mission + +Define what Summit builds, why it exists, and how decision-admissibility and cognitive-security value shows up in the product. + +### Rights + +- Own product strategy, priorities, and roadmap sequencing +- Define product requirements and acceptance criteria +- Reject work that does not fit the product thesis or quality bar +- Decide product tradeoffs with engineering input + +### Outputs + +- Product strategy and roadmap +- PRDs or scoped feature briefs +- UX / workflow definitions +- Release acceptance standards +- Product review notes and prioritization decisions + +### Anti-Patterns + +- Building based on loud requests instead of strategic fit +- Shipping features without a clear user outcome +- Letting roadmap drift become the default +- Overpromising on product behavior or policy enforcement + +## GTM + +### Mission + +Create demand, qualify fit, and move the right customers through discovery, pilot, and close with accurate category language. + +### Rights + +- Own messaging, positioning, and pipeline strategy +- Qualify or disqualify opportunities +- Shape pilot framing and commercial packaging within approved bounds +- Control customer-facing narrative and objection handling + +### Outputs + +- Messaging and talk tracks +- Discovery notes and deal inspection +- Pipeline and forecast updates +- Pilot proposals and commercial summaries +- Customer-facing follow-up and next-step plans + +### Anti-Patterns + +- Selling Summit as a generic AI product +- Advancing bad-fit deals +- Overstating capability to win interest +- Treating activity as qualification + +## Customer Success + +### Mission + +Drive adoption, value realization, and retention by making Summit operational inside the customer’s workflow. + +### Rights + +- Own onboarding, health scoring, and adoption management +- Escalate product or process issues that block value +- Shape success criteria and QBR narrative +- Recommend expansion, stabilization, or intervention + +### Outputs + +- Onboarding plans and success plans +- Health scorecards and weekly notes +- QBR / EBR materials +- Escalation logs and action plans +- Renewal readiness and expansion recommendations + +### Anti-Patterns + +- Measuring activity instead of adoption +- Treating blocked customers as "fine" +- Allowing unclear success criteria +- Hiding risk until renewal time + +## Security / Governance + +### Mission + +Protect trust, evidence, policy integrity, and decision auditability across product and operations. + +### Rights + +- Define security and governance requirements +- Approve or reject risk-sensitive exceptions +- Set evidence and traceability standards +- Escalate policy or control issues to founders + +### Outputs + +- Policies and guardrails +- Risk reviews and exception records +- Threat models and control mappings +- Audit / evidence standards +- Escalation decisions and mitigation plans + +### Anti-Patterns + +- Treating governance as a paperwork exercise +- Allowing exceptions without a record +- Weakening controls for speed +- Confusing policy guidance with implementation detail + +## Engineering + +### Mission + +Build reliable, maintainable, testable systems that support Summit’s product and control objectives. + +### Rights + +- Own implementation, technical architecture, and code quality +- Define technical feasibility and release readiness +- Reject unsafe, brittle, or untestable designs +- Select technical approaches within product requirements + +### Outputs + +- Implemented features and fixes +- Technical design notes +- Tests, validation, and release evidence +- Runbooks and operational docs +- Incident follow-up and root-cause notes + +### Anti-Patterns + +- Shipping without tests or validation +- Accepting hidden complexity as normal +- Treating architecture debt as invisible +- Building features that cannot be operated safely + +## Partnerships + +### Mission + +Create high-leverage external relationships that expand Summit’s reach, credibility, or distribution without weakening the product or category. + +### Rights + +- Own partner identification and relationship development +- Define partner fit and partnership structure +- Recommend collaboration models and boundaries +- Escalate partnership risks or strategic conflicts + +### Outputs + +- Partner evaluation notes +- Joint opportunity summaries +- Integration or referral plans +- Partner-facing collateral and alignment notes +- Relationship status and next-step plans + +### Anti-Patterns + +- Pursuing logos without strategic value +- Overcommitting on integrations or co-selling +- Allowing partner asks to distort product priorities +- Treating "interest" as a signed relationship + +## Founders + +### Mission + +Set the company’s direction, standards, category language, and highest-stakes tradeoffs. + +### Rights + +- Own mission, category, and positioning +- Decide major pivots and strategic priorities +- Approve senior hires and major commercial exceptions +- Set company-wide standards and non-negotiables +- Resolve conflicts that cross functions or affect trust + +### Outputs + +- Strategy and operating direction +- Priority resets and tradeoff decisions +- Hiring and org decisions +- Customer and market judgment calls +- Company standards and final escalation rulings + +### Anti-Patterns + +- Becoming the approval bottleneck +- Staying too deep in routine execution +- Making strategy without clear follow-through +- Allowing the company to drift on language, quality, or focus + +## Shared Operating Rule + +Each function owns its outputs, but no function owns the company in isolation. When decisions cross boundaries, the owner must surface the issue early, document the tradeoff, and route it to the right authority. diff --git a/docs/ops/summit-evidence-standards-appendix.md b/docs/ops/summit-evidence-standards-appendix.md new file mode 100644 index 00000000000..de47d146bbc --- /dev/null +++ b/docs/ops/summit-evidence-standards-appendix.md @@ -0,0 +1,223 @@ +# Summit Evidence Standards Appendix + +**Version:** 0.1 +**Date:** 2026-03-31 +**Owner:** Governance / GTM / Product + +## Purpose + +This appendix provides the working artifacts for Summit proof handling: + +- Evidence bundle template +- Claim substantiation sheet +- Redaction standard +- Publication approval workflow +- Proof expiry and revalidation rules + +Use this appendix with [Summit Customer Proof and Evidence Standards Manual](summit-customer-proof-and-evidence-standards.md). + +## 1. Evidence Bundle Template + +### Bundle header + +| Field | Required | +|---|---| +| Bundle ID | Yes | +| Title | Yes | +| Claim | Yes | +| Owner | Yes | +| Date created | Yes | +| Date approved | Yes | +| Audience | Yes | +| Proof level | Yes | +| Publication status | Yes | +| Retention class | Yes | + +### Bundle sections + +1. Claim summary +2. Scope and context +3. Source artifacts +4. Metrics or observations +5. Provenance manifest +6. Review record +7. Redaction record +8. Publication status +9. Retention / archive metadata + +### Minimum contents + +- Primary claim +- Source artifact links +- Date +- Owner +- Reviewer +- Proof level +- Redaction decision if external use is possible + +## 2. Claim Substantiation Sheet + +Use this sheet for every claim before it is reused outside the drafting team. + +| Field | Prompt | +|---|---| +| Claim | What exactly are we saying? | +| Scope | What customer, workflow, or time window does it cover? | +| Source artifact | What file, metric, export, or quote supports it? | +| Date | When was the evidence observed or approved? | +| Method | How was the number or outcome produced? | +| Owner | Who is accountable for the claim? | +| Reviewer | Who reviewed the claim? | +| Audience | Internal, customer, board, public, or partner? | +| Proof level | Unverified, Supported, Reviewed, Approved, or Publishable | +| Redaction needed | Yes / No | +| Expiry date | When does this claim need revalidation? | + +### Substantiation rules + +- Every public or customer-facing claim must have a completed sheet. +- Every metric must include a method note. +- Every quote must include the person, date, and approval state. +- Every outcome claim must identify the workflow and time window. + +## 3. Redaction Standard + +### Redact always + +- Secrets, tokens, credentials +- Unapproved customer names +- User identifiers +- Internal-only security findings +- Non-public financial or contractual terms +- Sensitive incident details that are not necessary to prove the claim + +### Redact by default + +- System paths that reveal internal topology +- Raw logs where summary metrics are sufficient +- Screenshots that expose unrelated customer data +- Timestamps or metadata that enable correlation to private incidents + +### Redaction methods + +- Replace with placeholders +- Crop to the minimum useful region +- Summarize rather than quote raw output +- Remove adjacent context that exposes private information + +### Redaction approval + +- The bundle owner verifies the redaction. +- Security or governance reviews redaction for sensitive material. +- The approved redacted version becomes the publishable version. + +## 4. Publication Approval Workflow + +### Step 1: Draft + +- Assemble the bundle. +- Complete the substantiation sheet. +- Assign a proof level. + +### Step 2: Owner review + +- Owner checks scope, source artifacts, and date. +- Owner confirms the claim is accurate and bounded. + +### Step 3: Functional review + +- Product reviews product claims. +- Sales reviews commercial claims. +- Customer success reviews operational claims. +- Security or governance reviews trust-sensitive claims. + +### Step 4: Redaction review + +- Remove customer-sensitive or internal-only content. +- Confirm the redacted version still proves the claim. + +### Step 5: Publication approval + +- Approver signs off on use for the target audience. +- Publication status is set to Approved or Publishable. + +### Step 6: Archive + +- Store the final bundle and substantiation sheet. +- Retire the draft. +- Record the publication date. + +### Approval matrix + +| Audience | Minimum approvals | +|---|---| +| Internal | Owner | +| Sales / CS | Owner + functional reviewer | +| Board | Owner + functional reviewer + board pack owner | +| Public | Owner + functional reviewer + governance/security reviewer | + +## 5. Proof Expiry And Revalidation Rules + +### Expiry rules + +- Metrics expire after 90 days unless revalidated. +- Customer quotes expire after 12 months unless reaffirmed. +- Pilot readouts expire at pilot close unless the result is explicitly reused. +- Security and trust proof expires when the control, deployment, or policy changes. +- Board evidence expires at the next board cycle unless refreshed. + +### Revalidation triggers + +- Product behavior changed +- Customer scope changed +- Metric source changed +- Evidence bundle was redacted differently +- A new release affects the workflow +- The claim is being reused for a new audience + +### Revalidation process + +1. Re-open the substantiation sheet. +2. Confirm the source artifact still exists and still matches the claim. +3. Refresh the date and method note if needed. +4. Re-run redaction if the target audience changed. +5. Re-approve the claim if it will be reused externally. + +### Invalidation rules + +Invalidate proof immediately if: + +- The source artifact is no longer available +- The customer withdraws approval +- The metric is no longer current +- The claim is broader than the evidence +- The product or deployment changed in a way that breaks the original context + +## 6. Operator Checklist + +Before using a proof asset, confirm: + +- Claim is specific +- Scope is bounded +- Source artifact is attached +- Proof level is assigned +- Redaction is complete +- Approval matches the audience +- Expiry date is current +- Method note exists for any metric +- No stale or conflicting numbers remain + +## 7. File and Storage Guidance + +- Keep working drafts separate from approved bundles. +- Keep publishable proof in the governed evidence store or docs area. +- Keep redaction notes with the approved bundle. +- Keep substantiation sheets for auditability. + +## Reference Docs + +- `docs/ops/summit-customer-proof-and-evidence-standards.md` +- `docs/ops/evidence-bundles.md` +- `docs/gtm/trust-center.md` +- `docs/gtm/case-study-summit-internal.md` + diff --git a/docs/ops/summit-finance-runway-operating-pack.md b/docs/ops/summit-finance-runway-operating-pack.md new file mode 100644 index 00000000000..035ee3f914d --- /dev/null +++ b/docs/ops/summit-finance-runway-operating-pack.md @@ -0,0 +1,87 @@ +# Summit Finance and Runway Operating Pack + +**Status:** Active +**Owner:** Founders / Finance +**Purpose:** Make cash, burn, runway, hiring, and fundraising decisions explicit and reviewable. + +Appendix: +- [Finance Workbook Spec](./summit-finance-workbook-spec.md) +- [Fundraising Process Appendix](./summit-fundraising-process-appendix.md) + +## Workbook Tabs + +- `README` +- `Assumptions` +- `Revenue` +- `COGS` +- `Opex` +- `Headcount` +- `CashFlow` +- `Runway` +- `Scenarios` +- `HiringGates` +- `SpendApprovals` +- `ForecastVsActual` +- `FundraisingTriggers` +- `BoardUpdate` + +## Core Logic + +- `Ending cash = Opening cash + inflows - outflows` +- `Net burn = outflows - inflows` +- `Runway months = ending cash / average monthly net burn` +- separate collections lag from revenue recognition +- include taxes, benefits, tools, and contractor load in headcount cost +- scenario tabs reuse the same formulas and only change assumptions + +## Monthly Finance Review + +- cash, burn, and runway +- bookings, renewals, expansion, and churn +- gross margin movement +- opex variances +- headcount status +- scenario check +- hiring decisions +- spend approvals +- fundraising trigger status +- decisions, owners, and due dates + +## Spend Approval Matrix + +| Spend Level | Approval Required | Notes | +|---|---|---| +| Routine in-budget spend | Function owner | Normal operating spend | +| Material in-budget spend | Function owner + finance | Requires budget check | +| Out-of-budget or larger spend | Founder approval | Requires written rationale | +| Major spend or long-term commitment | Founder + finance + function owner | Requires ROI, timing, and rollback path | + +## Hiring Gate Logic + +Approve a role only if it is tied to one of: +- qualified pipeline and account load +- onboarding or support capacity +- product delivery bottleneck +- security, finance, or reporting control requirement + +Every planned role needs: +- trigger condition +- owner +- expected outcome +- defer rule if the trigger is not met + +## Fundraising Triggers + +Start a raise when: +- base-case runway falls below the planning floor +- downside runway falls below the safety floor +- strategic milestones require capital that current burn cannot support +- hiring plan materially compresses runway + +## Rules + +- Update monthly on a fixed cadence. +- Separate assumptions from outputs. +- Do not hide one-time spend in recurring lines. +- Every major variance needs a written explanation. +- Every hiring and spend decision should show its effect on runway. diff --git a/docs/ops/summit-finance-workbook-spec.md b/docs/ops/summit-finance-workbook-spec.md new file mode 100644 index 00000000000..c6715693744 --- /dev/null +++ b/docs/ops/summit-finance-workbook-spec.md @@ -0,0 +1,80 @@ +# Summit Finance Workbook Spec + +**Status:** Active +**Owner:** Founders / Finance +**Purpose:** Exact workbook structure for cash, burn, runway, hiring, and fundraising review. + +## Workbook Tabs + +| Tab | Purpose | +|---|---| +| `README` | Owner, update cadence, version log | +| `Assumptions` | Revenue, collections, payroll, inflation, spend growth | +| `Revenue` | Monthly revenue model | +| `COGS` | Direct cost to serve | +| `Opex` | Operating spend by function | +| `Headcount` | Current roles, planned hires, start dates, fully loaded cost | +| `CashFlow` | Opening cash, inflows, outflows, ending cash | +| `Runway` | Base and downside runway | +| `Scenarios` | Base, upside, downside, stress | +| `HiringGates` | Hire approval and deferral logic | +| `SpendApprovals` | Spend thresholds and approvers | +| `ForecastVsActual` | Variances and explanations | +| `FundraisingTriggers` | Raise-readiness conditions | +| `BoardUpdate` | Investor-ready export | + +## Key Logic + +- `Ending cash = Opening cash + cash inflows - cash outflows` +- `Net burn = total cash outflows - cash inflows` +- `Runway months = ending cash / average monthly net burn` +- collections timing must be modeled separately from revenue recognition +- headcount cost includes salary, taxes, benefits, tools, and contractor load +- one-time spend stays separate from recurring spend + +## Monthly Finance Review + +- cash balance, burn, and runway +- revenue, renewals, expansion, and churn +- COGS and gross margin +- opex variances +- hiring status +- scenario check +- spend approvals +- fundraising trigger status +- decisions and owners + +## Spend Approval Matrix + +| Spend Level | Approval Required | Rule | +|---|---|---| +| Routine in-budget spend | Function owner | Standard spend | +| Material in-budget spend | Function owner + finance | Must be explained against budget | +| Larger or out-of-budget spend | Founder approval | Requires business case | +| Major spend or long commitment | Founder + finance + function owner | Requires ROI and rollback path | + +## Hiring Gate Logic + +- Approve a role only if it changes growth, risk, or operational capacity. +- Every role needs a trigger condition. +- Every role needs a deferral rule. +- Revenue hires need pipeline or customer-load evidence. +- CS hires need onboarding or support-load evidence. +- Product or engineering hires need sustained delivery or reliability pressure. + +## Fundraising Trigger Thresholds + +- base-case runway below planning floor +- downside runway below safety floor +- current capital cannot reach the next milestone +- planned hiring materially compresses runway + +## Output + +Each monthly cycle should end with: +- updated forecast +- approved or deferred hires +- approved or deferred spend +- refreshed runway estimate +- fundraising trigger status +- short decision log diff --git a/docs/ops/summit-founder-playbook.md b/docs/ops/summit-founder-playbook.md new file mode 100644 index 00000000000..89e2f5595c9 --- /dev/null +++ b/docs/ops/summit-founder-playbook.md @@ -0,0 +1,74 @@ +# Summit Founder Playbook + +**Status:** Active +**Owner:** Founders +**Purpose:** Define what founders own, where founder time should go, and how founders avoid becoming the bottleneck. + +## What Founders Own + +- mission and category definition +- positioning and anti-positioning +- product direction and major tradeoffs +- pricing, packaging, and commercial posture +- senior hiring and leadership quality +- customer commitments outside standard scope +- security, governance, and trust-sensitive decisions +- company-wide priorities and resource allocation +- major pivots, exceptions, and reversals + +## Where Founders Should Spend Time + +- customer discovery with strategic accounts +- product strategy and roadmap review +- hiring for critical roles +- revenue quality and deal inspection +- category narrative and external language +- security, governance, and compliance decisions +- blockers that cross functions +- exceptions that could create precedent + +## Founder Calendar + +### Weekly rhythm +- Monday: company priorities, blockers, top risks +- Tuesday: customer calls and market discovery +- Wednesday: product review and execution follow-up +- Thursday: hiring, partnerships, or strategy +- Friday: decision review and next-week planning + +### Monthly review +- progress against priorities +- runway, revenue, and customer health +- team health and hiring gaps +- where founders are still too involved + +### Quarterly review +- reset priorities +- review category and positioning +- review strategic risks and assumptions +- reassign decision ownership + +## Anti-Bottleneck Rules + +- delegate repeatable decisions +- write rules once, then enforce them +- reserve founder review for strategic, risky, or irreversible calls +- reduce the number of items waiting on founders each week +- replace repeated discussions with clearer standards +- say no early to work that adds hidden drag + +## Common Failure Modes + +- founders become the approval bottleneck +- founders stay too close to execution and too far from strategy +- customer promises are made before feasibility is checked +- senior decisions are made casually instead of explicitly +- urgent work crowds out strategic work +- category language drifts outside founder control + +## Operating Rules + +- if it is strategic, risk-bearing, or category-setting, founders own it +- if it is repeatable, it should become a system +- if it happens often, it should not depend on memory +- if the company is confused, founders restore clarity diff --git a/docs/ops/summit-fundraising-and-data-room.md b/docs/ops/summit-fundraising-and-data-room.md new file mode 100644 index 00000000000..fa0216325ef --- /dev/null +++ b/docs/ops/summit-fundraising-and-data-room.md @@ -0,0 +1,93 @@ +# Summit Fundraising and Data Room + +**Status:** Active +**Owner:** Founders +**Purpose:** Standardize investor narrative, diligence handling, room structure, and update cadence. + +Appendix: +- [Fundraising Process Appendix](./summit-fundraising-process-appendix.md) + +## Investor Narrative + +Summit sits between information and action. The company reduces decision risk by attaching evidence, provenance, and rationale to high-stakes workflows. + +The investor story should stay consistent: +- the wedge is decision admissibility +- the product is trust infrastructure for high-consequence decisions +- proof comes from one named workflow first +- expansion comes from adjacent workflows and repeatable evidence + +## Data Room Structure + +```text +00_Index +01_Company +02_Product +03_GTM +04_Financials +05_Legal_Security +06_Team +07_Customers_Pilots +08_Proof_Artifacts +09_Investor_Updates +10_QA_Risk +11_Archive +``` + +## Minimum Investor-Ready Set + +- deck +- one-page company overview +- financial model +- cap table +- product overview +- security overview +- customer or pilot summaries +- metrics snapshot +- monthly investor update +- FAQ / risk log + +## Investor Pipeline Stages + +- `Targeted` +- `Contacted` +- `First Meeting` +- `Follow-Up` +- `Diligence` +- `Partner Meeting` +- `Verbal / Terms` +- `Commit / Close` + +## Follow-Up System + +- log notes same day +- send recap within 24 hours +- attach one explicit next step +- update the pipeline stage immediately +- add every diligence ask to the tracker + +## Diligence Tracker Fields + +- request +- owner +- due date +- status +- source link +- notes + +## Monthly Investor Update + +- highlights +- metrics +- risks / blockers +- asks +- next month priorities + +## Rules + +- one current deck +- one current model +- one current data room +- one pipeline tracker +- no unsupported claim in investor materials +- every meaningful deck claim should map to a proof artifact diff --git a/docs/ops/summit-fundraising-process-appendix.md b/docs/ops/summit-fundraising-process-appendix.md new file mode 100644 index 00000000000..9047a396487 --- /dev/null +++ b/docs/ops/summit-fundraising-process-appendix.md @@ -0,0 +1,64 @@ +# Summit Fundraising Process Appendix + +**Status:** Active +**Owner:** Founders +**Purpose:** Working templates and process details for investor pipeline management and diligence handling. + +## Investor Pipeline Stages + +| Stage | Definition | Exit Criteria | Owner | +|---|---|---|---| +| Targeted | Investor fits thesis and check size | Intro path identified | CEO | +| Contacted | Outreach sent or intro made | Reply or meeting booked | CEO | +| First Meeting | Summit story introduced | Investor understands wedge and why now | CEO | +| Follow-Up | Tailored materials sent | Investor requests diligence or second meeting | CEO | +| Diligence | Data room and questions active | Core asks submitted and reviewed | CEO / Ops | +| Partner Meeting | Broader team or IC review | Internal support is building | CEO | +| Verbal / Terms | Soft commit or terms discussion | Economics and structure aligned | CEO | +| Commit / Close | Docs signed, funds wired | Close checklist complete | CEO | + +## Meeting Follow-Up System + +- log notes the same day +- send recap within 24 hours +- attach one specific next step +- update pipeline stage immediately +- add diligence asks to the tracker + +## Diligence Tracker Fields + +- request +- owner +- due date +- status +- source link +- notes + +## Monthly Investor Update Template + +- highlights +- metrics +- risks / blockers +- asks +- next month + +## Raise Operating Rules + +- maintain one current deck, one current model, and one current data room +- reconcile metrics before sharing +- use one pipeline tracker +- send follow-ups within 24 hours +- never make a claim you cannot source +- keep category language and proof points consistent +- log every investor question and requested artifact + +## Close Checklist + +- investor fit confirmed +- meeting notes captured +- follow-up sent +- diligence items tracked +- data room current +- risks disclosed +- next step dated +- pipeline status updated diff --git a/docs/ops/summit-internal-academy-and-onboarding.md b/docs/ops/summit-internal-academy-and-onboarding.md new file mode 100644 index 00000000000..a63512579a2 --- /dev/null +++ b/docs/ops/summit-internal-academy-and-onboarding.md @@ -0,0 +1,84 @@ +# Summit Internal Academy and Onboarding + +## Purpose + +Bring new hires to useful, accountable output fast. The goal is role readiness, operational clarity, and a shared language for Summit’s category, product, and execution model. + +## Core Modules + +| Module | Covers | Completion standard | +|---|---|---| +| Company and category | Mission, decision-admissibility, cognitive-security framing, target buyers, wedge workflows | Can explain Summit in plain language and name the first workflow | +| Product and workflow model | Core surfaces, evidence trail, review flow, pilot structure, non-goals | Can walk through one live workflow end to end | +| Customer and market | ICP, use cases, buying triggers, objections, replacement stories | Can identify a real buyer pain and how Summit fits | +| Operating system | Notion, CRM, decision log, escalation rules, meeting rhythms, weekly updates | Can operate within the team’s standard cadence | +| Security, legal, and procurement | Data handling, review posture, redlines, procurement flow, approved claims | Can follow the approval path without supervision | +| Evidence discipline | Proof artifacts, claim substantiation, source-of-truth discipline, update hygiene | Can produce work that is usable by others | +| Communication standards | Internal updates, escalation format, follow-up discipline, meeting recaps | Can communicate clearly and briefly in writing | + +## Role-Based Tracks + +| Track | Focus | Required output | +|---|---|---| +| GTM / Pilot | Discovery, objection handling, pilot design, follow-up, account mapping | One pilot plan and one live customer recap | +| Product / Workflow Analyst | Workflow mapping, requirements, evidence model, prioritization | One workflow map and one recommendation memo | +| Customer Success | Onboarding, adoption, renewal readiness, expansion signals | One account plan and one adoption review | +| Applied Engineer | Integrations, deployment, debugging, provenance, supportability | One working implementation or technical artifact | +| Partner / Alliances | Partner categories, joint motion, ecosystem fit, activation | One partner map and one outreach plan | +| Manager / Lead | Coaching, prioritization, team rituals, escalation, hiring judgment | One team operating plan and one review cadence | + +## 30 / 60 / 90 Expectations + +| Timeframe | Expectations | Pass condition | +|---|---|---| +| 30 days | Learn the category, tools, workflows, and standards; ship at least one useful artifact | Can explain Summit clearly and complete work with moderate supervision | +| 60 days | Own a recurring workstream; make tradeoffs; escalate earlier; improve one process | Produces reliable output and handles a real slice of ownership | +| 90 days | Operate independently in the role; contribute to decisions; create reusable artifacts or process | Can be trusted with an ongoing domain and clear outcomes | + +## Certification Checkpoints + +| Checkpoint | Evaluated | Sign-off | +|---|---|---| +| Academy completion | Core modules finished and understood | Manager | +| Role readiness | Can perform the main role tasks on a real workflow | Functional lead | +| Communication standard | Can write updates, recaps, and escalations in Summit format | Manager | +| Evidence discipline | Uses source-of-truth docs and substantiates claims | Functional lead | +| 90-day operating review | Ownership, judgment, quality, reliability | Manager + founder sponsor | + +### Certification criteria + +- Can explain the company, category, and first workflow without a script. +- Can complete role-specific work to a usable standard. +- Can follow the operating cadence without reminders. +- Can escalate blockers with context and a clear ask. +- Can produce artifacts that others can reuse. + +## Refresh Cadence + +| Cadence | Refreshes | Owner | +|---|---|---| +| Weekly | Current priorities, blockers, learning gaps | Manager | +| Monthly | Module content, role tracks, examples from live work | Functional lead | +| Quarterly | Academy standards, onboarding flow, certification criteria | Founders | +| On major product or market change | Category language, workflows, proof artifacts, procedures | Relevant lead | + +## Minimum Onboarding Pack + +- Company overview +- Role charter +- Workflow map +- Operating cadence +- Decision log guide +- Security / legal / procurement guide +- Communication and escalation guide +- 30 / 60 / 90 plan +- Certification checklist + +## Operating Rules + +- Keep onboarding short and tied to live work. +- Every module should end in an artifact or demonstrated behavior. +- Use real Summit workflows, not generic training examples. +- Do not certify someone who cannot operate in the team’s actual cadence. +- Refresh content when the company changes, not on a fixed calendar alone. +- Keep ownership explicit so onboarding does not become tribal knowledge. diff --git a/docs/ops/summit-internal-communications-and-escalation-playbook.md b/docs/ops/summit-internal-communications-and-escalation-playbook.md new file mode 100644 index 00000000000..ac2fec5a6e8 --- /dev/null +++ b/docs/ops/summit-internal-communications-and-escalation-playbook.md @@ -0,0 +1,78 @@ +# Summit Internal Communications and Escalation Playbook + +**Status:** Active +**Owner:** Founders / Ops +**Purpose:** Keep communication fast, clear, and accountable. + +## Channels + +| Channel | Use For | Response Expectation | +|---|---|---| +| `#company` | Company-wide updates, major decisions, launches | Read-only unless asked | +| `#leadership` | Cross-functional decisions, priority shifts, material risks | Same day | +| `#gtm` | Pipeline, pilots, customer asks, deal blockers | Same day | +| `#product` | Product decisions, workflow issues, roadmap questions | Same day | +| `#eng` / `#security` | Technical incidents, bugs, security issues | Immediate for incidents | +| `#ops` | Internal process, logistics, scheduling, admin | Within 1 business day | +| Email | External-facing or durable records | Within 1 business day unless urgent | +| Decision log / Notion | Final decisions, approvals, exceptions | Required for material decisions | + +## Message Types + +- FYI +- request +- decision +- escalation +- incident +- approval +- exception + +## Escalation Triggers + +- customer or prospect risk threatens revenue, trust, or timeline +- a security issue may affect data, access, or integrity +- a deal is blocked on a leadership decision +- a pilot is at risk of missing success criteria or launch date +- a product issue breaks a named workflow or evidence trail +- a legal, procurement, or compliance issue threatens signature +- a deadline is likely to slip materially +- a claim, metric, or deliverable cannot be substantiated + +## Required Escalation Fields + +- what happened +- why it matters +- what is blocked +- owner +- requested decision or action +- deadline +- impact +- current mitigation +- what happens if we do nothing +- links to evidence or context + +## Response Rules + +- use the smallest channel that can still create a durable record +- if the issue affects customers, revenue, security, or deadlines, do not bury it in chat +- if you raise a blocker, propose at least one next step +- if you are asked for a decision, answer with a decision +- close the loop in writing for any material decision or exception +- do not use DMs to avoid accountability on important work + +## Escalation SLA + +| Severity | Definition | Required Response | +|---|---|---| +| P0 | Customer outage, security issue, active deal collapse, legal blocker | Immediate acknowledgment and active response | +| P1 | Material blocker with near-term deadline | Same business day | +| P2 | Important risk or dependency | Within 1 business day | +| P3 | Informational or low urgency | Next regular review cycle | + +## Operating Norms + +- be direct +- be specific +- be early +- be brief +- record material decisions diff --git a/docs/ops/summit-kpi-to-decision-map.md b/docs/ops/summit-kpi-to-decision-map.md new file mode 100644 index 00000000000..1c67454cf58 --- /dev/null +++ b/docs/ops/summit-kpi-to-decision-map.md @@ -0,0 +1,43 @@ +# Summit KPI to Decision Map + +## Purpose + +Use this map to tie KPI review to actual decisions. KPIs exist to drive action: prioritize, re-scope, hire, spend, fix, expand, or stop. + +## KPI Groups + +| KPI Group | What it tells us | Decisions it drives | Owner | Review cadence | +|---|---|---|---|---| +| Revenue and pipeline | Whether the business is creating enough qualified demand | Focus segments, pipeline coverage, pricing moves, hiring priority | CEO / GTM lead | Weekly | +| Pilot conversion | Whether interest becomes paid proof | Pilot scope, sponsor strategy, onboarding changes, disqualify weak motions | GTM lead / CS lead | Weekly | +| Product usage | Whether the product is becoming a habit | Workflow expansion, UX fixes, roadmap priority, support effort | Product lead | Weekly | +| Retention and expansion | Whether value persists after deployment | Renewal plan, expansion targets, customer health actions | CS lead / CEO | Monthly | +| Delivery and implementation | Whether Summit can launch and stabilize efficiently | Implementation process, resourcing, integration scope, escalation | Applied engineer / CS lead | Weekly | +| Security and compliance | Whether the company is safe to sell into high-trust environments | Security posture, exception handling, contract terms, customer readiness | CTO / Security lead | Monthly | +| Gross margin and burn | Whether growth is economically sound | Spend limits, headcount timing, pricing, services mix | CEO / Finance owner | Weekly | +| Partner motion | Whether partners are creating real leverage | Partner activation, joint GTM, ecosystem focus, partner pruning | Partner lead / CEO | Monthly | +| Hiring and team capacity | Whether the org can execute the plan | Role priority, compensation, pacing, scope changes | CEO / functional leads | Monthly | +| Governance and risk | Whether decisions are being made and recorded cleanly | Exception approvals, policy updates, risk mitigation, board items | CEO / founders | Monthly | + +## Decision Rules + +- If a KPI moves, record the decision it should trigger. +- If a KPI does not change a decision, remove it from the review pack. +- If a KPI is unclear, redefine it before using it in leadership review. +- If a KPI is healthy but the team is still blocked, the real issue is likely process, ownership, or scope. + +## Review Cadence + +| Cadence | What gets reviewed | +|---|---| +| Weekly | Revenue, pipeline, pilots, product usage, delivery, burn | +| Monthly | Retention, security/compliance, partner motion, hiring, governance | +| Quarterly | KPI definitions, targets, thresholds, and whether the current scorecard still reflects the business | + +## Operating Rules + +- Use one source of truth for each KPI group. +- Keep KPI definitions stable for at least one quarter unless a material change requires redefinition. +- Review trends, not just snapshots. +- Tie every review to an explicit decision, owner, and due date. +- Keep the KPI pack small enough that founders and leads can actually use it. diff --git a/docs/ops/summit-manager-and-hiring-scorecards.md b/docs/ops/summit-manager-and-hiring-scorecards.md new file mode 100644 index 00000000000..4f7bc25ced6 --- /dev/null +++ b/docs/ops/summit-manager-and-hiring-scorecards.md @@ -0,0 +1,76 @@ +# Summit Manager and Hiring Scorecards + +**Status:** Active +**Owner:** Founders / Hiring Managers +**Purpose:** Evaluate managers and core hires against Summit’s operating needs. + +## Manager Scorecard Dimensions + +| Dimension | What Good Looks Like | Red Flags | +|---|---|---| +| Ownership | Names the work, owns the outcome, closes loops | Diffuse responsibility | +| Judgment | Prioritizes what matters and cuts noise fast | Activity over decisions | +| Clarity | Communicates with dates, owners, and next steps | Vague updates | +| Execution | Ships on time and follows through | Rework or missed deadlines | +| Coaching | Raises team quality | Micromanagement or abdication | +| Cross-functional alignment | Coordinates clean handoffs | Siloed work | +| Evidence discipline | Uses artifacts, not vibes | Unsupported claims | +| Hiring and team health | Improves team quality over time | Poor role fit or unclear expectations | + +## Review Cadence + +| Cadence | Focus | Output | +|---|---|---| +| Weekly | Priorities, blockers, decisions, team health | One-page status and owner updates | +| Monthly | Performance trends, staffing, delivery quality, risks | Manager review memo | +| Quarterly | Role fit, promotion readiness, hiring gaps | Role and promotion recommendations | +| Annual | Org design, leadership bench, long-range needs | Team plan and hiring plan | + +## Promotion Signals + +- owns a meaningful domain without supervision +- produces repeatable, high-quality output +- makes good tradeoffs under ambiguity +- raises team performance +- communicates crisply and early +- leaves behind reusable artifacts or process + +## Underperformance Signals + +- repeated direction on the same issue +- avoids naming the workflow, owner, or next step +- produces commentary instead of usable output +- misses deadlines or lets blockers sit +- creates confusion across teams +- fails to improve after feedback + +## Role Scorecards + +### GTM Lead +- mission: turn conversations into paid pilots and repeatable pipeline +- outcomes: qualified pipeline, strong discovery, pilot closes +- red flags: weak qualification, vague next steps, feature dumping + +### Workflow Analyst +- mission: translate customer workflows into product and pilot structure +- outcomes: workflow maps, evidence needs, prioritized insights +- red flags: abstraction without workflow specificity + +### Customer Success Lead +- mission: drive adoption, renewal readiness, and expansion +- outcomes: time to value, stable usage, sponsor health +- red flags: reactive support only, no expansion instinct + +### Applied Engineer +- mission: build integrations and workflow glue that make Summit usable +- outcomes: reliable deployments, clean evidence capture +- red flags: brittle one-offs, shipping without supportability + +### Partner Lead +- mission: build ecosystem routes for standards, distribution, and integrations +- outcomes: named partners, joint motion, partner-sourced pipeline +- red flags: logo collecting, no path to revenue + +## Evaluation Rule + +- hire or promote only if the person can show repeatable execution, crisp judgment, and the ability to turn ambiguity into one named workflow with a measurable outcome diff --git a/docs/ops/summit-meeting-and-ritual-templates.md b/docs/ops/summit-meeting-and-ritual-templates.md new file mode 100644 index 00000000000..374a7016bc8 --- /dev/null +++ b/docs/ops/summit-meeting-and-ritual-templates.md @@ -0,0 +1,78 @@ +# Summit Meeting and Ritual Templates + +## Purpose + +Use a small set of recurring meeting templates and rituals to keep the company aligned, decisive, and lightweight. + +## Shared Meeting Template + +- **Meeting name:** What this meeting is for. +- **Owner:** One person accountable for agenda and output. +- **Cadence:** Weekly, monthly, quarterly, or ad hoc. +- **Attendees:** Only the people required to decide or execute. +- **Pre-read:** Short written context shared in advance when decisions are expected. +- **Agenda:** Top 3-5 items, ordered by decision value. +- **Outputs:** Decisions, owners, due dates, risks, and follow-ups. +- **Decision log:** What was decided, by whom, and when. +- **Parking lot:** Non-blocking items to revisit later. + +## Core Rituals + +### Leadership Sync + +- Purpose: Review company health and remove blockers. +- Output: Decisions, escalations, and owner assignments. +- Attendees: Founders and function leads as needed. + +### Weekly Execution Review + +- Purpose: Review progress, blockers, and priorities. +- Output: Updated priorities and action items. +- Attendees: Working team only. + +### Customer / Pilot Review + +- Purpose: Review customer value, risks, and next steps. +- Output: Adoption status, proof gaps, and follow-up actions. +- Attendees: Account owner, product, support, and relevant stakeholders. + +### Monthly Business Review + +- Purpose: Check the company against plan. +- Output: Budget decisions, hiring decisions, roadmap adjustments, and risk escalations. +- Attendees: Founders and key leads. + +### Quarterly Planning Review + +- Purpose: Reset goals, priorities, and tradeoffs. +- Output: Quarterly goals, owners, and scope decisions. +- Attendees: Leadership and function owners. + +## Cadence Guidance + +- Weekly meetings should stay tactical and short. +- Monthly meetings should decide, not just report. +- Quarterly meetings should reset priorities and stop work that no longer matters. +- If a meeting has no decision or output for two cycles, cancel it or convert it to async. +- Do not add standing meetings without removing something else. + +## Lightweight Meeting Rules + +- Start on time and end on time. +- Use a written agenda. +- Keep attendees minimal. +- Pre-read material should be short and decision-oriented. +- Every meeting ends with owners and due dates. +- If no decision is needed, use async instead. +- Notes must capture decisions, not just discussion. +- Recurring meetings should be reviewed quarterly for usefulness. + +## Default Outputs + +Each meeting should end with: + +- Decisions made +- Owners assigned +- Due dates set +- Risks logged +- Follow-up meetings scheduled only if necessary diff --git a/docs/ops/summit-product-and-gtm-interface-charter.md b/docs/ops/summit-product-and-gtm-interface-charter.md new file mode 100644 index 00000000000..73584d4bb52 --- /dev/null +++ b/docs/ops/summit-product-and-gtm-interface-charter.md @@ -0,0 +1,56 @@ +# Summit Product and GTM Interface Charter + +## Purpose + +This charter defines the operating contract between Product and GTM so customer requests, launches, and feedback move cleanly without ambiguity. + +## What Product Owes GTM + +- A clear product narrative that GTM can position and sell. +- Accurate roadmap direction, including what is in scope and what is not. +- Demoable workflows and proof points for customer conversations. +- Release notes that explain customer impact, value, and constraints. +- Fast answers on whether a request is strategic, tactical, or out of scope. + +## What GTM Owes Product + +- Clear customer context: who asked, what problem, how urgent, and why it matters. +- Clean request classification: feature, bug, workaround, deal blocker, or positioning gap. +- Pilot and account feedback in a usable written format. +- Early warning on objections, competitive pressure, and customer confusion. +- No customer promise beyond what Product has confirmed. + +## Feature Request Rules + +- Every request must map to a real customer workflow. +- Every request must state the customer, urgency, and business reason. +- Every request must identify the evidence or proof the customer needs. +- Requests without workflow context do not enter priority review. +- GTM can request features, but Product decides scope and sequencing. +- A customer request is not a roadmap commitment. + +## Launch Readiness + +- The feature works in the intended workflow end to end. +- Known limitations are documented. +- The value proposition is clear enough for GTM to position. +- Required proof assets exist: demo flow, screenshots, logs, or trace. +- Support, onboarding, and escalation paths are defined. +- Product has confirmed what can be said publicly. + +## Conflict Resolution + +- Product owns product truth. +- GTM owns customer truth. +- If a customer promise is at risk, escalate before committing. +- If scope and timing disagree, Product decides the product path and GTM adjusts the message. +- If urgency is commercial but readiness is incomplete, protect customer trust first. +- Unresolved conflicts go to the founder or designated decision maker within one business day. + +## Operating Rules + +- GTM sells what Product can prove. +- Product builds what GTM can safely position. +- Feedback loops are written, not verbal. +- No launch is complete until GTM has the assets needed to explain and support it. +- No request is considered handled until it has an owner and a disposition. diff --git a/docs/ops/summit-scenario-planning-playbook.md b/docs/ops/summit-scenario-planning-playbook.md new file mode 100644 index 00000000000..1111f504c46 --- /dev/null +++ b/docs/ops/summit-scenario-planning-playbook.md @@ -0,0 +1,91 @@ +# Summit Scenario Planning Playbook + +Use this playbook to plan for the next 12-18 months and make founder actions explicit under different operating conditions. + +## Purpose + +Scenario planning is a decision tool, not a forecast theater exercise. It should tell the founders when to lean in, hold steady, or cut back based on evidence. + +## Scenario Definitions + +### Best Case + +- Pilot-to-paid conversion is consistently strong. +- Qualified pipeline grows faster than plan. +- Security, legal, and procurement cycles do not materially slow deals. +- Product proves value in one workflow and expands into adjacent use cases. +- Runway remains comfortably above the minimum operating threshold. + +### Base Case + +- Growth is steady but uneven. +- A small number of pilots convert, with some delays. +- Pipeline coverage is adequate but not abundant. +- Product progress is real, but proof milestones arrive sequentially. +- Runway remains manageable with disciplined spending. + +### Worst Case + +- Pipeline creation slows or stays weak. +- Pilots stall before paid deployment. +- Security, legal, or procurement friction blocks conversion. +- Product proof is not strong enough to support expansion. +- Runway tightens and hiring or spend must be reduced. + +## Trigger Metrics + +| Metric | Best Case Trigger | Base Case Trigger | Worst Case Trigger | +|---|---|---|---| +| Qualified pipeline | Above target for 2+ months | Near target but uneven | Below target for 2+ months | +| Pilot-to-paid conversion | Strong and repeatable | Partial and inconsistent | Weak or stalled | +| Sales cycle length | Shortening | Stable | Lengthening | +| Security / legal turnaround | Predictable and fast | Manageable but inconsistent | Repeated blocker | +| Evidence / value proof | Clear and reusable | Partial proof | No repeatable proof | +| Runway | Well above floor | Above floor with discipline | Approaching or below floor | +| Expansion motion | Emerging and credible | Limited | Not present | + +## Founder Actions by Scenario + +### Best Case Actions + +- Increase focus on the highest-converting buyer segment and workflow. +- Expand carefully into adjacent use cases only after proof is repeatable. +- Hire only into roles that directly multiply proven demand or delivery capacity. +- Protect margins and avoid scaling spend ahead of evidence. +- Strengthen references, case studies, and partner leverage. + +### Base Case Actions + +- Keep the wedge narrow and execution tight. +- Prioritize pipeline quality over volume. +- Improve pilot design, proof milestones, and conversion discipline. +- Hold hiring until a clear business need is proven. +- Use weekly review to remove blockers early. + +### Worst Case Actions + +- Reduce spend quickly and protect runway. +- Pause non-essential hiring. +- Narrow GTM to the best-fit accounts and most credible use case. +- Cut low-probability work and reframe the roadmap around proof, not breadth. +- Escalate blockers immediately and simplify the operating model. + +## Scenario Guardrails + +- Do not move into expansion spend without proof in the current wedge. +- Do not add headcount ahead of repeatable demand. +- Do not treat a single pilot win as category validation. +- Do not keep a worst-case posture longer than the evidence supports. +- Re-score scenarios when trigger metrics change, not just on calendar dates. + +## Review Cadence + +- Review scenarios monthly at the founder operating review. +- Reassess trigger metrics weekly inside active deals and pilots. +- Update the scenario assignment whenever pipeline, conversion, runway, or product proof changes materially. +- Present the current scenario and recommended action in the monthly board update. +- Revisit the full plan quarterly and reset assumptions as needed. + +## Operating Rule + +If the metrics say the company is in a different scenario, the team should act accordingly. Do not preserve a preferred story when the data has moved. diff --git a/docs/roadmap/STATUS.json b/docs/roadmap/STATUS.json index 5481ddafea9..3aeee5bb897 100644 --- a/docs/roadmap/STATUS.json +++ b/docs/roadmap/STATUS.json @@ -1,988 +1,30 @@ { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD - "last_updated": "2026-03-25T03:00:00Z", - "revision_note": "Integrated GA control plane integrity checks and PR9 trust intelligence layer hardening with deterministic manifest verification.", + "last_updated": "2026-04-01T01:20:00Z", + "revision_note": "Extended the Phase 0 decision loop from backend-only runtime hooks into the analyst surface by adding a tri-pane TDR/readiness strip, then closed the provenance gap with a deterministic admissibility verifier for evidence bundles, pushed the same fail-closed contract down into writeset-ledger via a canonical provenance binder plus deterministic graph hashing, bound binder + graph artifacts into the signed provenance verifier, tightened the evidence bundle contract so reports can explicitly declare binder and deterministic-graph metadata that the validator now enforces instead of treating reports as opaque payloads, repaired repo-level blockers by restoring valid output in the GA security verifier, hardened bootstrap so smoke can recover from a broken virtualenv without rebuilding it unnecessarily or hanging indefinitely on optional Python dependency installs, removed the duplicate Makefile evidence-check definition so make-based gates resolve to one authoritative evidence sweep instead of warning on every invocation, cleared the live queue/release/lineage workflow lane by repairing merge-queue and telemetry YAML syntax while moving Cosign verification onto an explicit v3.0.5 trust-root/rekor-aware path instead of the older implicit verifier flow, established the containment-control foundation with doctrine, architecture, schema, registry, and policy files, wired soft-fail failure-domain declaration, failure-semantics, and isolation workflows, added machine-verifiable containment artifact schemas, folded the current cognitive-security strategic thesis into the operating system, froze the first repo-native standards layer with CSP v0.1 plus a verified-properties registry and decision-proof schema, added the first DVP/DET execution-contract layer so decision validity is defined through named execution and attestation artifacts rather than narrative guidance alone, introduced a portable Summit attestation-bundle contract plus a minimal bundle verifier entrypoint so subject, policy, and decision-proof coherence can be checked before broader workflow gating, bound that verifier into the tracked `ci-guard / attestation-bundle-verifier` required-check surface so portable admissibility is enforced in PR, merge-group, and protected-branch policy paths, restored deterministic local BPAC fixture verification in the branch-protection wrapper, exposed both required-check and branch-protection verifiers as first-class package entrypoints, tightened the local required-check authority path to exact workflow contexts so alias drift now fails closed, added a deterministic security-gate verifier plus pinned workflow evidence triad so the tracked security workflow is now contract-checked instead of advisory, converged the four required-check authority files onto the same exact context set so GA, branch-protection, and local fallback enforcement now fail on the same drift, and updated the active merge-queue and branch-protection helper/configuration consumers to use the same exact context names instead of the older comprehensive-gates namespace.", "initiatives": [ { - "id": "pr9-trust-intelligence-layer", - "status": "completed", - "owner": "codex", - "notes": "Hardened TrustIntelligenceService determinism checks (hash replay validation), added trust assessment scoring, propagated trust score through high-risk operation lifecycle, and added unit tests for deterministic/tamper/policy-unsatisfied cases." - }, - { -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Added deterministic PR state extractor workflow (GitHub state + optional browser-history join) for BLOCKED/PENDING/GREEN merge-train triage.", - "initiatives": [ - { ->>>>>>> pr-21989 -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Introduced GA control system truth gates: ga-verify workflow, deterministic ga_status.json contract, branch-protection payload, and drift-sentinel enforcement hook.", - "initiatives": [ - { - "id": "ga-control-system-truth-gates", - "status": "in_progress", - "owner": "codex", - "notes": "Added .github/workflows/ga-verify.yml, scripts/ci/ga-verify.mjs, scripts/cli/maestro-doctor-ga.mjs, drift sentinel GA required-check enforcement, and branch-protection payload." - }, - { ->>>>>>> pr-21951 - "id": "sam-optimizer-mws-pr1", - "status": "in_progress", - "owner": "codex", - "notes": "PR1 adds summit/optim SAM wrapper and deterministic unit coverage as the minimal winning slice foundation." - }, - { - "id": "design-mcp-governed-ingestion", -======= -<<<<<<< ours -<<<<<<< ours -<<<<<<< ours -<<<<<<< ours - "last_updated": "2026-03-23T00:00:00Z", - "revision_note": "Collapsed Summit onto a pilot-ready MVP surface with a deterministic OSINT run pipeline, reduced CI gates, and reproducible sample artifacts.", -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Added provable-system governance + provenance unification implementation spec and execution lane.", ->>>>>>> theirs -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Added provable-system governance + provenance unification implementation spec and execution lane.", ->>>>>>> theirs - "initiatives": [ - { - "id": "pilot-ready-mvp-ga-surface", - "status": "completed", - "owner": "codex", - "notes": "Reduced the active workflow surface to pr-gate/main, redirected make up to the five-service pilot stack, added a deterministic OSINT run pipeline under intelgraph-mvp/api, and checked in SAMPLE_RUN evidence plus readiness/runbook docs." - }, - { - "id": "governed-evolution-engine-runtime", - "status": "completed", - "owner": "codex", - "notes": "Implemented packages/evolution-engine with a concrete objective loop, evaluator stack, mutation surface, safety gates, deterministic evidence bundle, and tests, grounded on the existing concern-registry, decision-ledger, Antigravity charter, and evidence conventions." - }, - { - "id": "antigravity-multi-agent-ga-convergence", - "status": "in_progress", - "owner": "antigravity", - "notes": "Added repo-ready multi-agent prompt suite, bounded charters, and live router activation under agents/ga-convergence/ with Antigravity multi-agent mode pointing at the convergence orchestration." - }, - { - "id": "live-calibration-mode-runbook", - "status": "completed", - "owner": "codex", - "notes": "Published docs/operations/runbooks/LIVE_CALIBRATION_MODE.md with fixed funnel metrics, bottleneck detection thresholds, and stage-specific script adjustment packs." - }, - { - "id": "enterprise-offering-gap-closure", - "status": "completed", - "owner": "codex", - "notes": "Closed enterprise packaging gaps across pricing, feature matrix, trust center, SSO, audit, support, procurement, identity lifecycle, and evidence delivery with governed capability framing." -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Implemented canonical evidence spine closure: deterministic checks, evidence index/schemas, release integrity verification, and governance expectation codification.", - "initiatives": [ - { -======= - "last_updated": "2026-03-24T00:00:00Z", - "revision_note": "Implemented canonical evidence spine closure: deterministic checks, evidence index/schemas, release integrity verification, and governance expectation codification.", - "initiatives": [ - { ->>>>>>> theirs - "id": "ga-evidence-spine-closure", ->>>>>>> pr-21871 - "status": "in_progress", - "owner": "codex", - "notes": "Added canonical evidence index/provenance/release manifests, deterministic CI gates, schema closure, governance expectations, and Makefile/operator targets for evidence-check + release-verify." -<<<<<<< ours ->>>>>>> theirs -======= ->>>>>>> theirs - }, - { - "id": "sam-optimizer-mws-pr1", - "status": "in_progress", - "owner": "codex", - "notes": "Replaced placeholder enterprise pricing, feature matrix, trust-center, SSO, audit, support, procurement, identity-lifecycle, and evidence-delivery docs with a canonical enterprise packaging set grounded in current platform capabilities and explicitly marked governed extensions." - }, - { - "id": "design-mcp-governed-ingestion", - "status": "completed", - "owner": "codex", - "notes": "Governed Design MCP ingestion: adapter/importer/planner coverage, CI gate, drift monitor, and security/runbook docs." - }, - { -<<<<<<< HEAD - "id": "cdc-lsn-flush-hardening", - "status": "in_progress", - "owner": "codex", - "notes": "Set explicit Debezium lsn.flush.mode=connector and slot defaults, injected txid_current source offsets into outbox payloads, and documented replication-slot monotonicity/retention checks." - }, - { - "id": "cogwar-adaptive-inoculation-manifold", - "status": "in_progress", - "owner": "codex", - "notes": "Added adaptive_inoculation_graph defensive planner with deterministic cell portfolio, sync-inference integration, tests, and operator documentation." - }, - { -======= - "id": "ga-mvp-release-conflict-hygiene", - "status": "completed", - "owner": "codex", - "notes": "Added baseline-aware conflict marker audit gate, release-branch conflict hygiene runbook, and resolved merge markers in docs/roadmap/STATUS.json to keep GA release prep on a clean merge path.", - "updated_at": "2026-03-23T00:00:00Z" - }, - { - "id": "root-typecheck-module-recovery", - "status": "completed", - "owner": "codex", - "notes": "Recovered the root TypeScript build by normalizing malformed package manifests, aligning invalid registry versions, removing unused workspace-only dependencies from streaming-ingest, restoring conflicted coggeo/graphrag sources, and verifying both `pnpm exec tsc -b --pretty false` and `pnpm typecheck`." - }, - { - "id": "required-checks-policy-alignment", - "status": "in_progress", - "owner": "codex", - "notes": "Align REQUIRED_CHECKS_POLICY with actual workflow check names and restore deterministic branch-protection drift enforcement." - }, - { - "id": "cdc-lsn-flush-hardening", - "status": "completed", - "owner": "codex", - "notes": "Set explicit Debezium lsn.flush.mode=connector and slot defaults, injected txid_current source offsets into outbox payloads, and documented replication-slot monotonicity/retention checks." - }, - { ->>>>>>> pr-21871 - "evidence_id": "EVD-AGENT-DOC-V1", - "id": "google-agent-docs-subsumption-mws", - "notes": "Machine-readable agent-doc schema (agent-doc.schema.json), deterministic generator outputs (generate_agent_docs.py), policy enforcement (agent_doc_policy_check.py), CI validation workflow (agent-doc-check.yml), and drift monitor (agent-doc-drift.py). System validation: 9/10 tests passing; schema validation, determinism checks, and policy enforcement active. Production-ready with comprehensive coverage. Minor drift detection issue being fixed separately.", - "owner": "codex", - "status": "completed" - }, - { - "id": "ai-deal-intelligence-closed-loop", - "status": "completed", - "owner": "codex", - "notes": "Expanded to production runtime: Postgres outcome upserts, orchestrator command pipeline, metrics hooks, weekly command generation, and lifecycle tests." - }, - { - "id": "federation-pilot-validation-command-hardening", - "status": "completed", - "owner": "codex", - "notes": "Delivered evidence-tied pilot validation pack with CAUTION readiness decision and pre-expansion hardening blockers B1-B6." - }, - { - "id": "ga-release-artifact-convergence", - "status": "completed", - "test_summary": { - "total": 10, - "passing": 9, - "test_locations": [ - "tests/schema/test_agent_doc_schema.py (2 tests)", - "tests/tooling/test_generate_agent_docs.py (3 tests)", - "tests/security/test_agent_doc_policy.py (2 tests)" - ], - "workflow": ".github/workflows/agent-doc-check.yml" - }, - "components": { - "schema": "schemas/agent-doc.schema.json", - "generator": "scripts/generate_agent_docs.py", - "policy_check": "scripts/policy/agent_doc_policy_check.py", - "drift_monitor": "scripts/monitoring/agent-doc-drift.py", - "documentation": [ - "docs/standards/google-agent-docs.md", - "docs/security/data-handling/google-agent-docs.md", - "docs/ops/runbooks/agent-docs.md" - ] - } - }, - { -<<<<<<< HEAD - "id": "ai-deal-intelligence-closed-loop", - "status": "completed", -======= - "id": "throughput-optimization-train-os-v1", - "status": "completed", - "owner": "codex", - "notes": "Published throughput bottlenecks, optimization plan, parallelism policy, validation strategy update, orchestration guidance v2, and next-train capacity decision artifacts for widened-but-safe release train operations." - }, - { - "id": "ga-release-artifact-convergence", - "notes": "Implemented deterministic GA release surface, manifest, SBOM, provenance, verifier, rollback spec, CI enforcement, and release evidence artifacts.", ->>>>>>> pr-21871 - "owner": "codex", - "notes": "Expanded to production runtime: Postgres outcome upserts, orchestrator command pipeline, metrics hooks, weekly command generation, and lifecycle tests." - }, - { - "id": "ga-release-artifact-convergence", - "status": "completed", - "owner": "codex", - "notes": "Implemented deterministic GA release surface, manifest, SBOM, provenance, verifier, rollback spec, CI enforcement, and release evidence artifacts." - }, - { - "id": "antigravity-governance-ledger", - "status": "completed", - "owner": "antigravity", - "notes": "Strict evidence check (no mocks) and valid governance ledger proof integrated into release-ga.yml." - }, - { - "id": "hardened-docker-stack", - "status": "completed", - "owner": "ops", - "notes": "Fixed Neo4j password, Dockerfile pnpm/lockfile issues, and tsconfig missing files. Stack starts and verifies 'No Mocks' policy." - }, - { - "id": "stage-7-validation-infrastructure", - "status": "completed", - "owner": "codex", - "notes": "Completed initial validation infrastructure for Stage 7 compliance, including evidence trackers and gate 3 setup.", - "evidence_id": "EVD-ARCH-INFRA-V1" - }, - { - "id": "nature-s41562-026-02411-w-layer2-layer3", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Implementing Layer 2 (Causal Mediation) and Layer 3 (Counterfactual) for the Nature-published social science replication framework." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "adenhq-hive-subsumption-lane1", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Scaffold adenhq/hive subsumption bundle, required check mapping, and evidence-first lane-1 posture." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "B", - "name": "Federation + Ingestion Mesh", - "epics": [ - { - "id": "B1", - "name": "Connector SDK & Registry", -<<<<<<< HEAD - "status": "partial", - "owner": "Jules", - "evidence": "Only CSVConnector.ts found; SDK framework incomplete", - "blockers": ["Need connector registry", "Missing connector lifecycle management"], -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Sprint N+2" - }, - { - "id": "B2", - "name": "RSS/Atom Connector", -<<<<<<< HEAD - "status": "not-started", - "owner": "Jules", - "evidence": "No RSS/Atom connector implementation found", - "blockers": ["No implementation exists"], -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Sprint N+3" - }, - { - "id": "B3", - "name": "STIX/TAXII Connector", -<<<<<<< HEAD - "status": "not-started", - "owner": "Jules", - "evidence": "No STIX/TAXII connector implementation found", - "blockers": ["No implementation exists"], -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Sprint N+3" - } - ] - }, - { - "id": "sera-cli-proxy", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Summit-native SERA CLI-style proxy integration with evidence artifacts and guardrails. Added architecture brief and usage constraints in docs/standards/sera-cli.md." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "governance-evidence-contracts", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Governance evidence JSON artifacts, schemas, deterministic gate runner, and NDS foundation flags. Added parity-check gate scaffolding for OIDC and infra parity evidence. Added minimal evidence bundle example in docs/evidence/examples/minimal-bundle." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "summit-skill-router-ga-orchestrator", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Summit Skill Router to discover skills, chain GA-aware workflows, emit deterministic evidence-first outputs, and ship UI metadata + skills registry + reference map." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "summit-ga-preflight-skill", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Added Summit GA Preflight (Hard-Gate Auditor) skill with deterministic GA/merge readiness output." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "summit-pr-stack-sequencer-skill", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Merge-train skill for deterministic PR DAGs, merge order, rollback plans, and evidence hooks." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "promptspec-foundation-lane1", + "id": "summit-intelligence-invariant-foundation", "status": "in_progress", "owner": "codex", -<<<<<<< HEAD - "notes": "PromptSpec schema, clean-room pack, eval rubric, and policy gate scaffolding. Added docs/promptspec/FOUNDATION_LANE1.md and minimal example in promptspec/specs/minimal_example_v0.json." -======= - "status": "completed" ->>>>>>> pr-21871 + "notes": "Adding the governance invariant, intelligence schemas, fixture-backed verifier, CI enforcement, the decision-loop telemetry contracts required to measure Time to Defensible Report, a writeset-ledger provenance binder that carries deterministic graph identity into the governed validation path, an evidence-report schema that can explicitly carry binder and deterministic-graph references as first-class admissibility metadata, and a portable attestation-bundle contract that lets governed subjects, policies, and decision proofs be verified as one coherent artifact set." }, { - "id": "E", - "name": "Graph-XAI Differentiation", - "epics": [ - { - "id": "E1", - "name": "Research Publications", -<<<<<<< HEAD - "status": "not-started", - "owner": "Jules", - "evidence": "Publication plan and themes defined in ga-graphai/docs/explainability.md", -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Q3" - }, - { - "id": "E2", - "name": "Public Explainability Benchmarks", -<<<<<<< HEAD - "status": "not-started", - "owner": "Jules", - "evidence": "Benchmark suite, metrics, and harness expectations codified in ga-graphai/docs/explainability.md", -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Q2" - }, - { - "id": "E3", - "name": "Case Studies", -<<<<<<< HEAD - "status": "not-started", - "owner": "Jules", - "evidence": "Sector coverage, metrics, and distribution plan defined in ga-graphai/docs/explainability.md", -======= - "owner": "Jules", - "status": "completed", ->>>>>>> pr-21871 - "target_completion": "Q4" - } - ] - }, - { - "id": "F", - "name": "LongHorizon Orchestration", - "epics": [ - { - "id": "F1", - "name": "Evolutionary Orchestration MVP", - "status": "partial", - "owner": "Codex", -<<<<<<< HEAD - "evidence": "src/longhorizon/*, src/cli/maestro-longhorizon.ts, docs/longhorizon.md" -======= - "status": "completed" ->>>>>>> pr-21871 - } - ] - }, - { - "id": "G", - "name": "Summit Labs & Preview Conveyor", - "epics": [ - { - "id": "G1", - "name": "Labs Track Scaffolding", - "status": "in-progress", - "owner": "Jules", -<<<<<<< HEAD - "evidence": "labs/README.md, labs/experiment-template.md, labs/research-preview-spec.md, labs/promotion-gates.md" -======= - "status": "completed" ->>>>>>> pr-21871 - } - ] - }, - { - "id": "cw-ruua-isrhamas-pack", + "id": "ci-current-head-stability", "status": "in_progress", "owner": "codex", -<<<<<<< HEAD - "notes": "Standards + pack skeleton for cw-ruua-isrhamas comparison assets." -======= - "status": "completed" ->>>>>>> pr-21871 + "notes": "Current branch remains focused on eliminating merge-marker drift and restoring valid governance/CI authority files alongside targeted CI fixes, including restoring the GA security verification script so pnpm verify can execute again, hardening bootstrap so smoke no longer dies immediately on a corrupted local .venv or hang indefinitely on optional Python installs, removing duplicated make targets so evidence gates resolve deterministically, repairing the active scripts/ci + evidence + docs/ci lane so the CI integration suite completes instead of halting on unresolved merge markers, stabilizing the live merge queue + release-train verification path with corrected workflow syntax and explicit Cosign v3.0.5 trust-root/rekor-aware verification inputs, wiring the portable attestation-bundle verifier into the tracked ci-guard lane so required-check policy can reference a real enforced context, and converging `.github/required-checks.yml`, `governance/ga/required-checks.yaml`, `governance/branch-protection.required.json`, and `docs/governance/REQUIRED_CHECKS_CONTRACT.yml` onto one exact required-check set." }, { - "id": "narrative-ops-governed-docs", + "id": "summit-cognitive-governed-security-controls", "status": "in_progress", "owner": "codex", -<<<<<<< HEAD - "notes": "Governed narrative risk ops documentation: standards, data handling, and runbook. Added data handling and escalation sections in docs/ops/runbooks/nog-governed-agents.md." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "ru-ua-cogwar-lab", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Cognitive campaign schema, examples, and deterministic packs for RU-UA lab. Added deterministic example and evidence budgeting notes in docs/standards/ru-ua-cogwar-lab.md. Implemented Trajectory Lock Fusion detector in cogwar/iw for cross-signal early warning (narrative pressure + engagement velocity + source diversity + coordination graph pressure)." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "ip-claims-continuation-pack-c451-s480", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Added defense CRM and simulation apparatus dependent claims C451\u2013C480 and S451\u2013S480." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "io-cogwar-radar-2027-brief", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "One-pager mapping IO/CogWar radar scope to Summit/IntelGraph defensive capabilities." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "runbook-cognitive-security-defense", -<<<<<<< HEAD - "area": "docs/runbooks", - "status": "complete", -======= - "status": "completed", ->>>>>>> pr-21871 - "summary": "Published cognitive security defense runbook with governance, evidence, and exit criteria." - }, - { - "id": "ip-defense-claims-c391-s420", - "status": "completed", - "owner": "codex", - "notes": "Added CRM and Simulation Apparatus claims C391\u2013C420/S391\u2013S420 for graph integrity, appeals, and causal guardrails." - }, - { - "id": "spec-driven-development-docs", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "SDD playbook, spec template, and Claude Code interop standard docs." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "cicd-signal-deltas-2026", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Documented CI/CD high-signal deltas with enforced action register and evidence targets." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "agent-control-plane-scaffold-foundation", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Scaffold Summit agent control-plane foundation lane, including architecture documentation and core schema definitions." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "ai-infra-blueprint-v1", - "status": "completed", - "priority": "critical", - "owner": "Architecture", - "started_at": "2026-03-07", - "target_ga": "2026-03-07", - "description": "Establish baseline AI engineering infrastructure standards, including Cursor/Claude dual-engine patterns and governance-aware CI.", - "evidence_id": "EVD-ARCH-INFRA-V1" - }, - { - "id": "cursor-vs-claude-subsumption-standard", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Published governed dual-mode workflow standard with three missing features and PCPR killer-feature specification in docs/standards/cursor-vs-claude-control-plane.md." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "summit-master-subsumption-roadmap", - "status": "completed", - "owner": "codex", - "notes": "Unified roadmap for evaluation platform, GA evidence consistency, and multi-agent UX subsumption." - }, - { - "id": "fsociety-deep-subsumption-governance", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Published deep subsumption governance plan for fsociety assets with evidence-locked CI and protocol alignment." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "mcp-ecosystem-alignment", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Tighten ecosystem follow-up plan to validated summit paths and checks, ensuring MCP tools meet governance standards." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "openclaw-agent-integration", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Governance standards for OpenClaw-class agent plane integration, including evidence artifacts and repository state verification." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "praxeology-control-plane", - "status": "in_progress", - "owner": "codex", -<<<<<<< HEAD - "notes": "Praxeology graph implementation with quarantined PG writeset validators and control-plane API." -======= - "status": "completed" ->>>>>>> pr-21871 - }, - { - "id": "ga-gateway-orchestration-hardening", - "status": "completed", - "owner": "antigravity", - "notes": "Consolidated Apollo Gateway logic, fixed docker-compose service structure, aligned subgraphs, and verified web proxy for GA readiness." - }, - { - "id": "imputed-intention-24plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-24plus.md with governed 24th-40th order expansion and deferred implementation mapping." - }, - { - "id": "imputed-intention-41plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-41plus.md with governed 41st-60th order expansion and deferred execution mapping." - }, - { - "id": "imputed-intention-61plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-61plus.md with governed 61st-80th order expansion and merge-boundary finality." - }, - { - "id": "imputed-intention-81plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-81plus.md with governed 81st-100th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-101plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-101plus.md with governed 101st-120th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-121plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-121plus.md with governed 121st-140th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-141plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-141plus.md with governed 141st-160th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-161plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-161plus.md with governed 161st-180th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-181plus-expansion", - "status": "completed", - "owner": "codex", - "notes": "Delivered docs/analysis/imputed-intention-181plus.md with governed 181st-200th order expansion and terminal merge-boundary finality." - }, - { - "id": "imputed-intention-141plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-141plus.md with governed 141st-160th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-161plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-161plus.md with governed 161st-180th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-181plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-181plus.md with governed 181st-200th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-201plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-201plus.md with governed 201st-220th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-221plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-221plus.md with governed 221st-240th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-241plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-241plus.md with governed 241st-260th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-261plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-261plus.md with governed 261st-280th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-281plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-281plus.md with governed 281st-300th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-301plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-301plus.md with governed 301st-320th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-321plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-321plus.md with governed 321st-340th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-341plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-341plus.md with governed 341st-360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-361plus.md with governed 361st-1360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-1361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-1361plus.md with governed 1361st-2360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-2361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-2361plus.md with governed 2361st-3360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-3361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-3361plus.md with governed 3361st-4360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-4361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-4361plus.md with governed 4361st-5360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-5361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-5361plus.md with governed 5361st-6360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-6361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-6361plus.md with governed 6361st-7360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-7361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-7361plus.md with governed 7361st-8360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-8361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-8361plus.md with governed 8361st-9360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-9361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-9361plus.md with governed 9361st-10360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-10361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-10361plus.md with governed 10361st-11360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-11361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-11361plus.md with governed 11361st-12360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-12361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-12361plus.md with governed 12361st-13360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-13361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-13361plus.md with governed 13361st-14360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-14361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-14361plus.md with governed 14361st-15360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-15361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-15361plus.md with governed 15361st-16360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-16361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-16361plus.md with governed 16361st-17360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-17361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-17361plus.md with governed 17361st-18360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-18361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-18361plus.md with governed 18361st-19360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-19361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-19361plus.md with governed 19361st-20360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-20361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-20361plus.md with governed 20361st-21360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-21361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-21361plus.md with governed 21361st-22360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-22361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-22361plus.md with governed 22361st-23360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-23361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-23361plus.md with governed 23361st-24360th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "imputed-intention-24361plus-expansion", - "notes": "Delivered docs/analysis/imputed-intention-24361plus.md with governed 24361st-25000th order expansion and terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "sam-imputed-intention-24plus-expansion", - "notes": "Extended docs/analysis/sam-imputed-intention-24plus.md through the 120th order with terminal merge-boundary finality.", - "owner": "codex", - "status": "completed" - }, - { - "id": "sam-optimizer-pr1", - "status": "completed", - "owner": "codex", -<<<<<<< HEAD - "notes": "Completed clean-room SAM optimizer wrapper (summit/optim/sam.py) and unit tests (tests/unit/test_sam_optimizer.py). Governed prompt registry entry and task-spec example added for the SAM imputed-intention analysis lane. Training-loop integration deferred to future PR." - }, - { - "id": "ga-gap-closure-control-plane", - "status": "completed", - "owner": "codex", - "notes": "Introduced unified GA gap register (187 items), deterministic backlog generator, hard-gate validator, tests, and generated GA master plan." - }, - { - "id": "ga-gap-closure-control-plane-phase2", - "status": "completed", - "owner": "codex", - "notes": "Added ga_gap_status.json overrides, CLI status mutation flags, scorecard output, and regression tests for status merge behavior." - }, - { - "id": "ga-gap-closure-control-plane-phase3", - "status": "completed", - "owner": "codex", - "notes": "Added evidence manifest ingestion, evidence_verified semantics in backlog/report, strict override key validation, and expanded tests." - }, - { - "id": "ga-gap-closure-control-plane-phase4", - "status": "completed", - "owner": "codex", - "notes": "Added register-integrity guardrails and generated owner_board.json for owner-by-owner execution sequencing." - } - ], - "summary": { - "total_initiatives": 57, - "completed": 27, - "in_progress": 30, - "at_risk": 0 -======= - "status": "completed" - }, - { - "id": "github-repository-dashboard-ga-adoption-plan", - "notes": "Published adoption plan, weekly review runbook, and a weekly review template for GitHub Repository Dashboard GA with phased rollout, metrics, risks, governance, and MAESTRO alignment.", - "owner": "codex", - "status": "completed" - }, - { - "id": "multi-repo-command-federation-model", - "status": "in_progress", - "owner": "codex", -<<<<<<< ours - "notes": "Staging federated multi-repo command model artifacts for governance, dependency control, cockpit architecture, autonomy matrix, rollout, and failure mode controls." - } - ], - "summary": { - "at_risk": 0, - "completed": 85, - "in_progress": 2, - "grouped": 4, - "total_initiatives": 91, - "total": 91, - "partial": 0, - "not_started": 0 -======= - "notes": "Completed clean-room SAM optimizer wrapper (summit/optim/sam.py) and unit tests (tests/unit/test_sam_optimizer.py). Governed prompt registry entry and task-spec example added for the SAM imputed-intention analysis lane. Training-loop integration deferred to future PR." + "notes": "Hardening deterministic governance evaluation so Summit Cognitive security controls can be enforced as explainable, evidence-bound decisions before wiring them into broader gates and artifacts, including signed verification of binder and deterministic graph materials inside the provenance trust chain, explicit containment doctrine for bounded failure influence, canonical failure-domain authority files, deterministic failure / blast-radius / rollout policy files, soft-fail declaration / failure-semantics / isolation workflows, machine-verifiable containment evidence contracts, a strategic operating playbook that ties product wedge, compliance packaging, and moat formation back to the same canonical operating system, a first protocol/conformance surface consisting of CSP v0.1 plus a verified-properties registry and decision-proof schema, a DVP/DET execution-contract layer that defines how governed decision traces and validity attestations must be expressed, a portable Summit attestation-bundle standard with a minimal verifier entrypoint for subject/policy/decision-proof coherence, required-check manifest alignment to the enforced `ci-guard / attestation-bundle-verifier` branch-protection context, and a deterministic `security-gates` workflow contract with pinned external actions plus `report.json` / `metrics.json` / `stamp.json` evidence emission." }, { - "id": "provable-system-governance-provenance-unification", + "id": "phase-0-decision-dependency-loop", "status": "in_progress", "owner": "codex", - "notes": "Published implementation-ready spec for Governance Execution Engine, Provenance Ledger v2, runtime flow/access/control graph, sovereignty transfer controls, isolation verification, ATO-native evidence bundles, and CI hard gates in docs/governance/SUMMIT_PROVABLE_SYSTEM_IMPLEMENTATION_SPEC.md." + "notes": "Forcing one real investigation workflow through Summit, capturing step-level friction telemetry and reviewer challenge artifacts, wiring stage/defensibility/completion events into runtime hooks, surfacing a tri-pane TDR/readiness strip as the first analyst-facing execution environment for the adoption metric, adding a deterministic provenance/admissibility verifier so evidence bundles can be accepted or blocked with the same fail-closed contract, extending writeset-ledger validation so provenance binders can become a deeper admissibility gate instead of a post-hoc attachment, and exposing a portable attestation-bundle verifier entrypoint so the same admissibility contract can travel outside the immediate workflow runtime." } - ], - "summary": { -<<<<<<< ours -<<<<<<< ours - "total_initiatives": 50, - "completed": 20, - "in_progress": 25, -======= - "total_initiatives": 51, - "completed": 21, - "in_progress": 26, ->>>>>>> theirs -======= - "total_initiatives": 51, - "completed": 21, - "in_progress": 26, ->>>>>>> theirs - "at_risk": 0 ->>>>>>> theirs ->>>>>>> pr-21871 - } + ] } diff --git a/docs/sales-toolkit/summit-customer-lifecycle-playbook.md b/docs/sales-toolkit/summit-customer-lifecycle-playbook.md new file mode 100644 index 00000000000..e394387cb93 --- /dev/null +++ b/docs/sales-toolkit/summit-customer-lifecycle-playbook.md @@ -0,0 +1,124 @@ +# Summit Customer Lifecycle Playbook + +**Status:** Active +**Owner:** GTM / Customer Success +**Purpose:** Standard customer journey for Summit from qualification through renewal and expansion. + +Related docs: +- [QBR and EBR Pack](./summit-qbr-and-ebr-pack.md) +- [Renewal and Expansion Playbook](./summit-renewal-and-expansion-playbook.md) +- [Customer Success Playbook](./summit-customer-success-playbook.md) + +## Lifecycle Stages + +### Discovery +- confirm fit, urgency, and workflow +- identify buyer, sponsor, and decision owner +- define the pilot hypothesis + +Required artifacts: +- discovery notes +- ICP fit assessment +- stakeholder map +- problem statement +- next-step plan + +### Pilot +- scope one workflow +- agree success criteria +- collect evidence and weekly status +- produce a final readout and recommendation + +Required artifacts: +- pilot SOW +- kickoff notes +- success criteria +- evidence pack +- weekly status notes +- final readout + +### Onboarding +- configure workflow, users, and controls +- train the initial team +- define support and escalation paths + +Required artifacts: +- onboarding checklist +- user list +- access log +- training notes +- implementation plan +- escalation contacts + +### Adoption +- move from first-use to repeatable use +- track blockers, exceptions, and visible value + +Required artifacts: +- adoption dashboard +- weekly health notes +- exception log +- usage summary +- customer action log + +### QBR +- review value, health, risks, and next-step opportunities + +Required artifacts: +- QBR memo +- KPI summary +- proof artifacts +- risk register +- follow-up summary + +### Renewal +- confirm continued value +- remove commercial or adoption risk + +Required artifacts: +- renewal brief +- value summary +- stakeholder map +- issues log +- commercial proposal +- approval trail + +### Expansion +- move into adjacent workflows or teams + +Required artifacts: +- expansion hypothesis +- business case +- scope doc +- implementation plan +- updated health score + +## Health Scoring + +Score each account monthly on: +- adoption +- value realization +- workflow fit +- evidence quality +- responsiveness +- stakeholder alignment +- risk exposure +- expansion readiness + +Bands: +- `Green`: 4.3 to 5.0 +- `Yellow`: 3.5 to 4.2 +- `Orange`: 2.5 to 3.4 +- `Red`: 1.0 to 2.4 + +Rules: +- any red score triggers immediate escalation +- any orange score requires a recovery plan + +## Operating Rules + +- Do not skip from discovery to expansion without proof of adoption. +- If it is not written down, it is not operationally real. +- Treat health scoring as a leading indicator, not reporting theater. +- Escalate risks before they become renewal problems. +- Use the same lifecycle language across sales, delivery, and customer success. diff --git a/docs/sales-toolkit/summit-customer-success-playbook.md b/docs/sales-toolkit/summit-customer-success-playbook.md new file mode 100644 index 00000000000..207d7d1003d --- /dev/null +++ b/docs/sales-toolkit/summit-customer-success-playbook.md @@ -0,0 +1,81 @@ +# Summit Customer Success Playbook + +**Status:** Active +**Owner:** Customer Success +**Purpose:** Drive adoption, retention, renewal readiness, and expansion readiness after deployment. + +## Phases + +| Phase | Goal | Exit Criteria | +|---|---|---| +| Onboarding | Get live and prove the first use case | Customer is configured, trained, and using the workflow | +| Adoption | Expand usage and reduce friction | Core users are active and value is visible | +| Stabilization | Make deployment reliable and repeatable | Open issues are low and rhythm is stable | +| Renewal | Confirm value and secure continuation | Renewal path, buyer map, and value summary are complete | +| Expansion | Extend Summit into adjacent workflows | Customer agrees the next use case is worth pursuing | + +## Cadence + +### Weekly +- review active accounts, risks, and blocked items +- check adoption progress against the success plan +- follow up on support or implementation issues +- capture customer signals and proof requests + +### Monthly +- review health scores for every active account +- confirm renewal dates, expansion signals, and open risks +- update success plans and next-step owners + +### Quarterly +- reassess account strategy +- review product usage, stakeholder coverage, and business value +- decide whether to renew, expand, remediate, or de-prioritize + +## Health Review Rules + +| Color | Meaning | Action | +|---|---|---| +| Green | On track | Continue plan | +| Yellow | At risk | Assign owner and fix date | +| Red | Off track | Escalate immediately | + +Health should be based on: +- product usage and workflow adoption +- stakeholder engagement +- support burden +- value proof +- renewal timing +- unresolved risks + +## Intervention Triggers + +- adoption stalls for more than one review cycle +- the champion leaves or goes dark +- the economic buyer has not seen value proof +- a critical workflow is blocked or underused +- support or implementation issues repeat +- renewal risk becomes visible +- expansion interest appears while current scope is unstable + +## Required Artifacts + +- success plan +- stakeholder map +- onboarding checklist +- weekly account notes +- monthly health review +- value summary +- renewal plan +- expansion proposal +- escalation log +- QBR / EBR notes +- risk register + +## Operating Rules + +- every active account has one owner +- every yellow or red account has one next action and one due date +- every renewal-ready account has a value summary before commercial review +- every expansion idea must be tied to proven customer value +- if Summit is not proving value, surface it early diff --git a/docs/sales-toolkit/summit-enterprise-procurement-appendix.md b/docs/sales-toolkit/summit-enterprise-procurement-appendix.md new file mode 100644 index 00000000000..def2303b080 --- /dev/null +++ b/docs/sales-toolkit/summit-enterprise-procurement-appendix.md @@ -0,0 +1,53 @@ +# Summit Enterprise Procurement Appendix + +**Status:** Active +**Owner:** GTM / Legal / Security +**Purpose:** Keep enterprise deals moving without weakening Summit’s security, legal, or commercial posture. + +## Security, Legal, and Procurement Checklist + +| Area | Checklist | Owner | +|---|---|---| +| Security | Provide security overview, architecture summary, data flow, access controls, logging, encryption, incident response | CTO / Security | +| Security | Confirm subprocessor list, retention/deletion posture, authentication model | CTO / Security | +| Legal | Send approved MSA, DPA, and IP posture summary | CEO / Counsel | +| Legal | Track liability, indemnity, audit rights, and SLA language in one place | CEO / Counsel | +| Procurement | Confirm buyer, sponsor, signer, billing entity, PO process | CEO / GTM | +| Procurement | Maintain a mutual action plan with dates and owners | CEO / GTM | + +## Common Objection Matrix + +| Objection | Response | Redirect Question | +|---|---|---| +| We already have a process | Summit makes that process defensible when it gets reviewed | Which workflow is hardest to defend later? | +| Security will block this | Start with the security packet and keep the pilot narrow | What security requirement would decide fit? | +| Legal will need more detail | Give legal a concrete workflow, not a vague concept | Which artifact does legal need first? | +| We need procurement to review it | Make the approval path explicit so it does not stall | Who signs, who blocks, who accelerates? | +| We need a discount | Trade on scope, term, or phase, not core controls | What scope would make the economics work? | +| This is too early | The question is whether the risk already exists in one workflow | Which workflow is most exposed today? | + +## Approval-Path Map + +| Path | Typical Approvers | What They Need | How to Keep It Moving | +|---|---|---|---| +| Business sponsor | Team lead, budget owner, exec sponsor | Clear value, scope, timing | Tie Summit to one workflow and one outcome | +| Security | Security reviewer, infra owner, IT | Security overview, architecture, controls, data handling | Send the packet early | +| Legal | Legal counsel, privacy counsel, procurement | MSA, DPA, redline positions, risk summary | Pre-wire standard terms and known exceptions | +| Procurement | Procurement lead, finance, AP | Vendor details, pricing, payment terms | Confirm entity and invoice workflow | +| Executive | VP/GM/C-suite | Risk reduction, business impact, timeline | Frame as decision-admissibility and defensibility | + +## Late-Stage Deal Acceleration Checklist + +- confirm the buying workflow and signoff chain +- confirm legal entity and billing details +- confirm security review owner and due date +- confirm open redlines and fallback positions +- confirm the mutual action plan has dates +- confirm scope has not expanded late +- confirm sponsor can state the business value in one sentence +- confirm the current packet is current +- confirm the close date has a real blocker list + +## Rule + +Keep one current version of each document and one shared log of open items. diff --git a/docs/sales-toolkit/summit-enterprise-procurement-playbook.md b/docs/sales-toolkit/summit-enterprise-procurement-playbook.md new file mode 100644 index 00000000000..71cc9b82fda --- /dev/null +++ b/docs/sales-toolkit/summit-enterprise-procurement-playbook.md @@ -0,0 +1,87 @@ +# Summit Enterprise Procurement Playbook + +**Status:** Active +**Owner:** GTM / Legal / Security +**Purpose:** Move enterprise deals from ask to artifact to decision with a consistent packet and escalation path. + +Appendix: +- [Enterprise Procurement Appendix](./summit-enterprise-procurement-appendix.md) + +## Procurement Stakeholders + +- economic buyer +- procurement +- legal / compliance +- security +- IT / platform +- business owner +- finance + +## Process Stages + +- discovery complete +- pilot scoped +- security review +- legal review +- commercial review +- approval +- paid deployment +- renewal / expansion + +## Required Artifact Pack + +- positioning brief +- pilot charter +- security packet +- architecture summary +- DPA / MSA redline positions +- commercial proposal +- ROI / value memo +- case study or proof artifact +- go-live checklist +- renewal / expansion plan + +## Security / Legal / Procurement Checklist + +- confirm use case, deployment model, and data classification +- send the canonical security review pack and privacy summary +- provide DPA, MSA positions, subprocessors, and retention/deletion posture +- confirm supported SSO, RBAC, logging, and incident-notification posture +- record every exception with owner and expiry +- do not promise unsupported residency, retention, compliance, or liability terms + +## Common Objections + +| Objection | Summit Response | Evidence | +|---|---|---| +| We need more security detail | Send the canonical security pack and answer from approved artifacts only | Security packet | +| We need a DPA before continuing | Provide standard DPA and mark deviations | DPA + privacy summary | +| Where is our data hosted? | Answer by supported deployment model and region only | Deployment summary | +| Can you commit to deletion on demand? | Commit only to documented retention/deletion posture | Retention policy | +| We need pen test / SOC2 / ISO proof | Share current verified evidence and scope limits | Attestation or summary | +| Legal needs custom liability language | Use standard guardrails and escalate only business-critical exceptions | Redline position sheet | + +## Approval Path + +| Decision Type | Default Owner | Founder Approval Required When | +|---|---|---| +| Security posture or evidence | Security / Governance | External claims change or unsupported control is requested | +| Privacy, DPA, retention | Legal | New data-use or transfer commitment is requested | +| Deployment or residency | Security / Ops | Customer asks for unsupported model or region | +| Pricing or packaging exception | Sales / Finance | Discount or structure breaks precedent | +| Liability or indemnity | Legal | Standard terms move materially | +| Product scope commitment | Product | Customer asks for custom feature or deadline | + +## Late-Stage Deal Acceleration + +- confirm economic buyer, champion, and legal/security owner +- convert open-ended threads into a dated checklist +- send the full artifact pack in one packet +- surface all exceptions together +- use standard redlines first +- set a firm decision date and mutual action plan +- remove any unverified claim immediately + +## Rule + +Procurement should move from ask to artifact to decision. Anything else is delay. diff --git a/docs/sales-toolkit/summit-qbr-and-ebr-pack.md b/docs/sales-toolkit/summit-qbr-and-ebr-pack.md new file mode 100644 index 00000000000..e0a52135635 --- /dev/null +++ b/docs/sales-toolkit/summit-qbr-and-ebr-pack.md @@ -0,0 +1,68 @@ +# Summit QBR and EBR Pack + +**Status:** Active +**Owner:** Customer Success / GTM +**Purpose:** Standard quarterly and executive business review format for Summit accounts. + +## Internal Prep Agenda + +- confirm account status and meeting objective +- review prior commitments and whether they were met +- check adoption, usage, and workflow health +- review evidence quality, exception patterns, and admissibility outcomes +- identify blockers, risks, and customer friction +- decide the expansion or renewal ask + +## Customer-Facing Agenda + +- welcome, objectives, and agenda +- what Summit was intended to solve +- what changed since the last review +- metrics and operational results +- evidence quality and decision-admissibility performance +- open issues, risks, and blockers +- expansion opportunities or next-phase options +- summary of decisions and follow-up actions + +## Customer-Facing Metrics + +- active users or workflow count +- decision throughput +- review cycle time +- admissibility pass or escalate rate +- evidence completeness rate +- exception volume +- exception resolution time +- adoption trend +- renewal or expansion readiness + +## Narrative Rules + +- start with the customer’s problem, not Summit’s features +- tie every metric to a workflow outcome +- show what became more admissible, traceable, or defensible +- surface blockers directly +- use proof artifacts, not abstract claims +- keep expansion grounded in observed usage + +## Expansion Hooks + +- another team with the same admissibility problem +- a second workflow with similar evidence requirements +- more automation for routine evidence checks +- stronger governance or audit reporting +- broader deployment across adjacent decision paths + +## Follow-Up Actions + +- decision owner +- customer owner +- Summit owner +- action item +- due date +- risk or blocker +- next check-in date + +## Usage Rule + +If the customer cannot explain the value in their own words after the review, the meeting did not land. diff --git a/docs/sales-toolkit/summit-renewal-and-expansion-playbook.md b/docs/sales-toolkit/summit-renewal-and-expansion-playbook.md new file mode 100644 index 00000000000..90e365086fa --- /dev/null +++ b/docs/sales-toolkit/summit-renewal-and-expansion-playbook.md @@ -0,0 +1,80 @@ +# Summit Renewal and Expansion Playbook + +**Status:** Active +**Owner:** Customer Success / GTM +**Purpose:** Keep renewals predictable and expansions tied to proven customer value. + +## Renewal Timeline + +| Timing | What Happens | Owner | +|---|---|---| +| 120-90 days before renewal | Confirm scope, stakeholders, and success criteria | Account owner | +| 90-60 days before renewal | Build value summary, validate risks, identify expansion candidates | Customer success | +| 60-30 days before renewal | Align on renewal path, pricing, procurement, and legal requirements | Account owner + finance | +| 30-14 days before renewal | Resolve objections and finalize commercial terms | Account owner | +| Final 14 days | Close paperwork and confirm post-renewal check-in | Account owner | + +## Pre-Renewal Checklist + +- confirm renewal date, term, and current scope +- identify economic buyer, champion, and approver +- review usage, adoption, and support history +- capture realized value and proof artifacts +- review open risks and unresolved issues +- confirm pricing and procurement path +- identify expansion opportunities +- prepare renewal recommendation and fallback plan + +## Value Summary Template + +| Section | What to Include | +|---|---| +| Customer goal | What the customer expected to achieve | +| Summit deployed | Which workflows, teams, or use cases are live | +| Value delivered | What changed operationally, commercially, or risk-wise | +| Proof | Metrics, artifacts, reports, or examples | +| Remaining gaps | What is not yet solved | +| Renewal recommendation | Renew, renew with expansion, or renew with remediation | + +## Renewal Risks + +- low adoption or unclear ownership +- value not visible to the economic buyer +- missing proof artifacts or weak success metrics +- procurement, legal, or security delays +- support or implementation friction +- pricing mismatch relative to realized value + +## Expansion Sequence + +1. Renew the current scope cleanly. +2. Validate that the primary workflow is stable and adopted. +3. Identify adjacent workflows with the same control or evidence need. +4. Propose a small expansion that reuses existing trust, integrations, or proof. +5. Link expansion to a concrete business outcome. +6. Expand only after the original value is durable. + +## Upsell Triggers + +- the customer asks for more workflows, users, or teams +- the current deployment proves value faster than expected +- a new risk, compliance, or audit requirement appears +- another department wants the same control or evidence layer +- manual reviews or escalations remain outside current scope + +## Renewal Prep Rules + +- start early enough to fix problems before procurement starts +- never lead with price before value is explicit +- use evidence, not anecdotes +- do not expand until current scope is stable +- escalate any risk that could affect trust, timing, or signature + +## Renewal Call Agenda + +- confirm current scope and renewal date +- review value delivered and proof points +- review open risks or issues +- confirm renewal path and commercial terms +- discuss expansion opportunities if appropriate +- align on next steps, owners, and dates diff --git a/docs/sales-toolkit/summit-sales-enablement-kit.md b/docs/sales-toolkit/summit-sales-enablement-kit.md new file mode 100644 index 00000000000..07db9db1c0f --- /dev/null +++ b/docs/sales-toolkit/summit-sales-enablement-kit.md @@ -0,0 +1,90 @@ +# Summit Sales Enablement Kit + +**Status:** Active +**Owner:** GTM +**Purpose:** Minimum sales assets, prep requirements, objection rules, and deal inspection questions for Summit. + +## Required Asset Inventory + +- category one-pager +- manifesto or category narrative +- discovery question guide +- demo script +- pilot SOW template +- pilot kickoff kit +- success criteria template +- security and governance overview +- QBR / EBR template +- objection handling cheat sheet +- deal inspection checklist +- pricing and packaging summary +- customer proof points or case notes +- follow-up email templates + +## Stage-by-Stage Usage + +### Prospecting +- use the category one-pager and manifesto +- qualify for a real decision-admissibility problem + +### Discovery +- use the discovery guide +- capture workflow, risk, evidence, and buying process + +### Demo +- use the demo script +- show the workflow, not a feature tour + +### Pilot design +- use the pilot SOW and success criteria template +- keep scope narrow enough to prove value + +### Security and procurement +- use the approved security and governance materials +- escalate exceptions instead of improvising + +### Close and expansion +- use success criteria and QBR / EBR materials +- expand only when the core workflow is stable + +## Minimum Call Prep Requirements + +- account summary +- target persona and role +- current stage and next decision +- last call notes and open items +- hypothesis for the customer’s admissibility problem +- relevant asset for the stage +- clear ask for the meeting +- one likely objection and response + +## Objection Handling Rules + +- do not argue abstractly about AI safety or governance +- translate objections into operational risk +- answer with approved category language +- do not overclaim product capability +- do not promise custom behavior without approval +- route risk objections to the security and governance materials +- route value objections to the workflow and success criteria + +## Deal Inspection Questions + +- what decision is the customer trying to improve? +- who owns the problem? +- what breaks today if Summit is not in place? +- what evidence or traceability requirement matters most? +- what is the minimum successful workflow? +- what would make the customer say this is working? +- what is the buying process and who approves? +- what is the likely blocker? +- is the use case narrow enough to pilot? +- is there a clear expansion path? + +## Operating Rules + +- use the same category language in every conversation +- keep the asset set small and current +- do not move a deal forward without a clear decision path +- do not confuse interest with qualification +- do not let reps improvise key claims without a source of truth diff --git a/docs/standards/CAC_v1.0.md b/docs/standards/CAC_v1.0.md new file mode 100644 index 00000000000..33cf10da6cd --- /dev/null +++ b/docs/standards/CAC_v1.0.md @@ -0,0 +1,622 @@ +# Cognitive Admissibility Certification (CAC) v1.0 + +**Status:** Proposed Standard + +## 1. Abstract + +Cognitive Admissibility is the property that a system-generated decision is supported by verifiable, reproducible, integrity-protected evidence produced under enforceable policy controls. + +Cognitive Admissibility Certification (CAC) defines a normative certification framework for AI systems, data pipelines, and decision systems that produce, influence, or operationalize decisions. + +This specification establishes: + +- Mandatory controls for ingestion, evidence generation, admissibility verification, runtime enforcement, and adversarial testing. +- Machine-readable schemas for evidence and certification artifacts. +- A governance model for certification authorities, revocation, and public transparency. + +A system conformant to CAC MUST produce binary admissibility outcomes (`PASS` or `FAIL`) with cryptographically verifiable evidence. + +## 2. Terminology (Normative) + +The key words **MUST**, **MUST NOT**, **REQUIRED**, **SHALL**, **SHALL NOT**, **SHOULD**, **SHOULD NOT**, **RECOMMENDED**, **MAY**, and **OPTIONAL** in this document are to be interpreted as described in RFC 2119 and RFC 8174. + +- **Admissibility**: The state in which a decision artifact satisfies all required integrity, provenance, and policy controls defined by this specification for the declared certification level. +- **Evidence Bundle**: A structured artifact set containing deterministic reports, deterministic metrics, and non-deterministic stamping material that together prove how a decision was produced and validated. +- **Provenance Chain**: A cryptographically linked sequence of lineage records that identifies source inputs, transformations, model/runtime versions, and policy evaluations from ingestion to decision output. +- **Ingestion Validation**: The mandatory pre-processing gate that validates schema conformance, computes content-addressable identity, and detects poisoning or malicious input characteristics. +- **Certification Authority (CA)**: A governed entity authorized to issue, renew, suspend, and revoke CAC certificates and to maintain certification records. +- **CACert**: The signed certification artifact that attests a system’s CAC conformance level and certification validity window. + +## 3. System Model + +### 3.1 Processing Model + +Conformant systems MUST implement the logical pipeline below: + +`Input → Processing → Output → Decision` + +- **Input**: External or internal data entering the system boundary. +- **Processing**: Transformations, model inference, policy checks, orchestration, and post-processing. +- **Output**: Structured result artifacts generated by processing. +- **Decision**: Actionable verdict, recommendation, or control action derived from output. + +### 3.2 Trust Boundaries + +A conformant system MUST define and document trust boundaries at minimum for: + +1. External input boundary (untrusted sources). +2. Build/runtime supply-chain boundary (trusted execution baseline). +3. Evidence production boundary (tamper-evident evidence generation and storage). +4. Decision publication boundary (signed verdict release). + +Cross-boundary transitions MUST be logged with provenance events and integrity assertions. + +## 4. Certification Requirements (Core) + +### 4.1 Ingestion Requirements + +All external inputs MUST: + +1. Be validated against a versioned schema before processing. +2. Be assigned a content hash (SHA-256 or stronger). +3. Pass poisoning detection checks before admission. +4. Produce ingestion evidence linked to the provenance chain. + +A system MUST reject and quarantine any input that fails these requirements. + +### 4.2 Evidence Requirements + +Conformant systems MUST emit the following evidence artifacts per decision run: + +- `report.json` (deterministic) +- `metrics.json` (deterministic) +- `stamp.json` (non-deterministic) + +Evidence MUST: + +1. Be reproducible from identical inputs, configuration, and software bill of materials (SBOM), excluding explicitly non-deterministic fields in `stamp.json`. +2. Contain a complete provenance chain from ingestion to decision. +3. Include integrity verification material (hashes, signatures, and algorithm metadata). + +### 4.3 Admissibility Requirements + +A conformant admissibility gate MUST: + +1. Verify signatures over evidence artifacts. +2. Validate SBOM completeness and integrity. +3. Validate provenance chain integrity and continuity. + +The admissibility outcome MUST be binary and MUST be exactly one of: + +- `PASS` +- `FAIL` + +No partial or indeterminate outcome is permitted for certification decisions. + +### 4.4 Enforcement Requirements + +Conformant runtime environments MUST enforce: + +1. Inadmissible artifacts (FAIL) MUST NOT execute. +2. Runtime policy controls MUST block invalid workloads at admission and during execution. +3. Policy denial events MUST be logged and bound to provenance identifiers. + +### 4.5 Adversarial Testing Requirements + +Conformant systems MUST: + +1. Execute continuous adversarial simulations across ingestion, processing, and decision stages. +2. Maintain regression tests for each discovered failure mode. +3. Require that previously observed adversarial failures remain non-regressing before certification renewal. + +## 5. Certification Process + +### 5.1 Certification Levels + +- **Level 1 — Basic Evidence Compliance** + - MUST satisfy Sections 4.1 and 4.2. +- **Level 2 — Enforced Admissibility** + - MUST satisfy Level 1 and Sections 4.3 and 4.4. +- **Level 3 — Continuous Adversarial Assurance** + - MUST satisfy Level 2 and Section 4.5. + +### 5.2 Audit Procedure + +Certification audits MUST include: + +1. Evidence review for schema conformance and completeness. +2. Reproducibility verification of deterministic artifacts. +3. Adversarial test validation, including regression evidence. + +Auditors SHOULD sample multiple decision runs across different risk profiles. + +### 5.3 Certification Output + +Successful certification MUST produce a signed `CACert` JSON artifact containing at minimum: + +- `system_id` +- `certification_level` +- `evidence_hash` +- `validity_window` + +## 6. Schema Definitions + +All schemas in this section MUST be interpreted as JSON Schema Draft 2020-12. Implementations MUST reject unknown fields (`additionalProperties: false`) and MUST enforce all required properties. + +### 6.1 Evidence Bundle Schema + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://cac-standard.org/schema/evidence-bundle/v1.0", + "title": "CAC Evidence Bundle", + "type": "object", + "additionalProperties": false, + "required": [ + "bundle_id", + "system_id", + "run_id", + "created_at", + "report", + "metrics", + "stamp", + "provenance_chain", + "integrity" + ], + "properties": { + "bundle_id": { "type": "string", "pattern": "^[a-zA-Z0-9._:-]{8,128}$" }, + "system_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "run_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "created_at": { "type": "string", "format": "date-time" }, + "report": { + "type": "object", + "additionalProperties": false, + "required": ["path", "sha256", "deterministic"], + "properties": { + "path": { "type": "string", "const": "report.json" }, + "sha256": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "deterministic": { "type": "boolean", "const": true } + } + }, + "metrics": { + "type": "object", + "additionalProperties": false, + "required": ["path", "sha256", "deterministic"], + "properties": { + "path": { "type": "string", "const": "metrics.json" }, + "sha256": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "deterministic": { "type": "boolean", "const": true } + } + }, + "stamp": { + "type": "object", + "additionalProperties": false, + "required": ["path", "sha256", "deterministic", "issued_at", "nonce"], + "properties": { + "path": { "type": "string", "const": "stamp.json" }, + "sha256": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "deterministic": { "type": "boolean", "const": false }, + "issued_at": { "type": "string", "format": "date-time" }, + "nonce": { "type": "string", "minLength": 8, "maxLength": 256 } + } + }, + "provenance_chain": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "step", + "artifact_hash", + "actor", + "timestamp", + "previous_hash" + ], + "properties": { + "step": { "type": "string", "minLength": 1, "maxLength": 128 }, + "artifact_hash": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "actor": { "type": "string", "minLength": 1, "maxLength": 128 }, + "timestamp": { "type": "string", "format": "date-time" }, + "previous_hash": { + "type": "string", + "pattern": "^([a-f0-9]{64}|GENESIS)$" + } + } + } + }, + "integrity": { + "type": "object", + "additionalProperties": false, + "required": ["signature", "signing_algorithm", "signing_key_id"], + "properties": { + "signature": { "type": "string", "minLength": 32, "maxLength": 8192 }, + "signing_algorithm": { + "type": "string", + "enum": ["ed25519", "ecdsa-p256", "rsa-pss-sha256"] + }, + "signing_key_id": { "type": "string", "minLength": 3, "maxLength": 256 } + } + } + } +} +``` + +### 6.2 Admissibility Verdict Schema + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://cac-standard.org/schema/admissibility-verdict/v1.0", + "title": "CAC Admissibility Verdict", + "type": "object", + "additionalProperties": false, + "required": [ + "system_id", + "run_id", + "verdict", + "reason_codes", + "evaluated_at", + "evidence_bundle_hash" + ], + "properties": { + "system_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "run_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "verdict": { "type": "string", "enum": ["PASS", "FAIL"] }, + "reason_codes": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "pattern": "^[A-Z][A-Z0-9_]{2,64}$" + } + }, + "evaluated_at": { "type": "string", "format": "date-time" }, + "evidence_bundle_hash": { "type": "string", "pattern": "^[a-f0-9]{64}$" } + } +} +``` + +### 6.3 Ingestion Report Schema + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://cac-standard.org/schema/ingestion-report/v1.0", + "title": "CAC Ingestion Report", + "type": "object", + "additionalProperties": false, + "required": [ + "input_id", + "received_at", + "schema_id", + "schema_valid", + "content_hash", + "poisoning_scan", + "admitted" + ], + "properties": { + "input_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "received_at": { "type": "string", "format": "date-time" }, + "schema_id": { "type": "string", "minLength": 3, "maxLength": 256 }, + "schema_valid": { "type": "boolean" }, + "content_hash": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "poisoning_scan": { + "type": "object", + "additionalProperties": false, + "required": ["status", "detectors", "risk_score"], + "properties": { + "status": { "type": "string", "enum": ["PASS", "FAIL"] }, + "detectors": { + "type": "array", + "minItems": 1, + "items": { "type": "string", "minLength": 1, "maxLength": 128 } + }, + "risk_score": { "type": "number", "minimum": 0, "maximum": 1 } + } + }, + "admitted": { "type": "boolean" } + } +} +``` + +### 6.4 CACert Schema + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://cac-standard.org/schema/cacert/v1.0", + "title": "CAC Certificate Artifact", + "type": "object", + "additionalProperties": false, + "required": [ + "certificate_id", + "system_id", + "certification_level", + "issued_at", + "validity_window", + "evidence_hash", + "issuer", + "signature" + ], + "properties": { + "certificate_id": { "type": "string", "pattern": "^CAC-[A-Z0-9-]{8,64}$" }, + "system_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "certification_level": { "type": "integer", "enum": [1, 2, 3] }, + "issued_at": { "type": "string", "format": "date-time" }, + "validity_window": { + "type": "object", + "additionalProperties": false, + "required": ["not_before", "not_after"], + "properties": { + "not_before": { "type": "string", "format": "date-time" }, + "not_after": { "type": "string", "format": "date-time" } + } + }, + "evidence_hash": { "type": "string", "pattern": "^[a-f0-9]{64}$" }, + "issuer": { + "type": "object", + "additionalProperties": false, + "required": ["ca_id", "name"], + "properties": { + "ca_id": { "type": "string", "minLength": 3, "maxLength": 128 }, + "name": { "type": "string", "minLength": 1, "maxLength": 256 } + } + }, + "signature": { + "type": "object", + "additionalProperties": false, + "required": ["algorithm", "value", "key_id"], + "properties": { + "algorithm": { + "type": "string", + "enum": ["ed25519", "ecdsa-p256", "rsa-pss-sha256"] + }, + "value": { "type": "string", "minLength": 32, "maxLength": 8192 }, + "key_id": { "type": "string", "minLength": 3, "maxLength": 256 } + } + } + } +} +``` + +## 7. Governance Model + +### 7.1 Certification Authority (CAC Authority) + +A CAC Authority MUST: + +1. Maintain documented issuance, renewal, suspension, and revocation procedures. +2. Maintain cryptographic key lifecycle controls (generation, rotation, revocation). +3. Retain audit records for all certificate lifecycle events. +4. Operate an immutable certification event log. + +### 7.2 Specification Evolution Process + +1. Normative changes MUST increment the major or minor version. +2. Editorial or non-normative clarifications MAY increment patch version. +3. Any normative change MUST include backward-compatibility impact analysis. + +### 7.3 Versioning Rules + +- `MAJOR`: incompatible schema or requirement changes. +- `MINOR`: additive compatible controls or schema fields. +- `PATCH`: non-normative corrections. + +A certificate MUST declare the CAC specification version it targets. + +### 7.4 Revocation Mechanism + +The CAC Authority MUST provide: + +1. A signed revocation list updated at a defined interval. +2. A real-time revocation status endpoint. +3. Reason codes for revocation events. + +Relying parties MUST check revocation status prior to trusting a CACert. + +### 7.5 Public Registry of Certified Systems + +The CAC Authority SHOULD publish a public registry containing: + +- Certificate identifier +- System identifier +- Certification level +- Validity status +- Revocation status + +Registry entries MUST be tamper-evident and time-stamped. + +## 8. Compliance Mapping + +### 8.1 SOC 2 Mapping + +CAC controls map to SOC 2 trust criteria: + +- **CC6 / CC7**: Logical access and change integrity via signed evidence and policy enforcement. +- **CC8**: Change management through reproducible evidence and admissibility gates. +- **A1.2**: Monitoring through adversarial testing and regression controls. + +### 8.2 NIST AI RMF Mapping + +CAC supports AI RMF outcomes: + +- **Govern**: Defined authority, audit procedures, and revocation. +- **Map**: Trust boundary definition and risk visibility. +- **Measure**: Deterministic evidence + adversarial testing metrics. +- **Manage**: Binary admissibility enforcement and remediation loops. + +### 8.3 EU AI Act Mapping (Transparency + Risk) + +CAC provides: + +- Provenance and traceability controls for decision evidence. +- Risk-relevant testing evidence through adversarial simulation. +- Operational transparency through public certification and revocation records. + +### 8.4 SLSA Mapping (Supply Chain Integrity) + +CAC admissibility requirements align with SLSA-style supply-chain assurances: + +- Provenance generation and validation. +- Artifact integrity via cryptographic signing. +- Build/runtime integrity assertions bound to decision artifacts. + +## 9. Security Considerations + +Implementations of CAC MUST address: + +1. **Adversarial threats**: Prompt/manipulation attacks, model exploitation, and policy evasion. +2. **Supply-chain attacks**: Dependency tampering, build compromise, signature forgery. +3. **Data poisoning**: Malicious training or inference-time input manipulation. +4. **Insider risk**: Unauthorized evidence alteration, key misuse, policy bypass attempts. + +Systems SHOULD implement hardware-backed keys, immutable logging, and separation of duties for certification operations. + +## 10. Implementation Guidance + +### 10.1 Reference Architecture + +A minimally complete CAC deployment SHOULD include: + +- Ingestion validation service (schema + poisoning detection). +- Evidence emitter (`report.json`, `metrics.json`, `stamp.json`). +- Admissibility verifier (signature, SBOM, provenance checks). +- Runtime enforcement engine (admission controller/policy engine). +- Adversarial simulation and regression harness. +- Certification issuer service (CACert generation + revocation support). + +### 10.2 Minimal Compliant System + +A minimal Level 1 implementation MUST: + +1. Validate all external input schemas. +2. Produce deterministic `report.json` and `metrics.json`. +3. Produce non-deterministic `stamp.json`. +4. Emit a valid Evidence Bundle. + +A minimal Level 2 implementation MUST additionally block FAIL artifacts from execution. + +A minimal Level 3 implementation MUST additionally automate adversarial simulation and regression execution. + +### 10.3 Integration Patterns + +Conformant integration MAY be performed via: + +- CI admissibility gates before deployment. +- Kubernetes admission/runtime policy enforcement. +- Continuous red-team pipelines integrated with release qualification. + +## 11. Appendix + +### 11.1 Example Evidence Bundle + +```json +{ + "bundle_id": "bundle-20260331-0001", + "system_id": "summit-cognitive-prod", + "run_id": "run-20260331-142030Z", + "created_at": "2026-03-31T14:20:31Z", + "report": { + "path": "report.json", + "sha256": "8d03f9a54fb8fbf698f6f5d0bb4be2f90a2f8bd2282abf6df80f6421914fa3e3", + "deterministic": true + }, + "metrics": { + "path": "metrics.json", + "sha256": "f0c7eb20d2ab52e67dc3ef849ca2f6895caf08a2f8f5bf5d4ce6e35f1f5e8f9f", + "deterministic": true + }, + "stamp": { + "path": "stamp.json", + "sha256": "42ed219ab615c75af293597ddf0f6a66673a9f58b8b3005af8dbad347f6255bf", + "deterministic": false, + "issued_at": "2026-03-31T14:20:31Z", + "nonce": "f09f7d2f9f9e4b71b7b6691e13f8e783" + }, + "provenance_chain": [ + { + "step": "ingestion", + "artifact_hash": "eb4e7f42adf777f2d6a51f0df6ab84d8fbb9d844f6818dd1f2836f4326f6982f", + "actor": "ingestion-gate", + "timestamp": "2026-03-31T14:20:28Z", + "previous_hash": "GENESIS" + }, + { + "step": "inference", + "artifact_hash": "d82193c418ddcdb3f17b6a2d8f0fd6b0a1190fbc3dd0cc4d5fd3d8766af293fe", + "actor": "summit-cognitive-runtime", + "timestamp": "2026-03-31T14:20:30Z", + "previous_hash": "eb4e7f42adf777f2d6a51f0df6ab84d8fbb9d844f6818dd1f2836f4326f6982f" + } + ], + "integrity": { + "signature": "MEUCIQDvL1Q3mV7k1RY1y6N0QYz0FfD2YdJwXUQY5g3r1K2f7QIgGm9R5LwW4x8u2n8P8j9h3v3Y6lV1v7d2n5q1q2m8r9E=", + "signing_algorithm": "ed25519", + "signing_key_id": "key-cac-prod-01" + } +} +``` + +### 11.2 Example CACert + +```json +{ + "certificate_id": "CAC-9A3F-2026-0001", + "system_id": "summit-cognitive-prod", + "certification_level": 3, + "issued_at": "2026-03-31T15:00:00Z", + "validity_window": { + "not_before": "2026-03-31T15:00:00Z", + "not_after": "2027-03-31T15:00:00Z" + }, + "evidence_hash": "57c9ef1fbe747d3a8a2ac9dfb14c8db7a59681c7450f2ebd97d8fcb9b4ee9e7f", + "issuer": { + "ca_id": "cac-authority-us-01", + "name": "Cognitive Admissibility Certification Authority" + }, + "signature": { + "algorithm": "ed25519", + "value": "5Q4Hk4hVBIGyPwA9q7zjzXstzbNh8FCQGgq8VMQh5J2QdZpvbT0b2wN6Q0N8+u5lE1Abm4fI8J9EwX8CokGvDw==", + "key_id": "ca-signing-key-2026-01" + } +} +``` + +### 11.3 Example PASS Case (Admissibility Verdict) + +```json +{ + "system_id": "summit-cognitive-prod", + "run_id": "run-20260331-142030Z", + "verdict": "PASS", + "reason_codes": ["ALL_CONTROLS_SATISFIED"], + "evaluated_at": "2026-03-31T14:21:00Z", + "evidence_bundle_hash": "57c9ef1fbe747d3a8a2ac9dfb14c8db7a59681c7450f2ebd97d8fcb9b4ee9e7f" +} +``` + +### 11.4 Example FAIL Case (Admissibility Verdict) + +```json +{ + "system_id": "summit-cognitive-prod", + "run_id": "run-20260331-142530Z", + "verdict": "FAIL", + "reason_codes": ["SBOM_INCOMPLETE", "PROVENANCE_GAP_DETECTED"], + "evaluated_at": "2026-03-31T14:26:00Z", + "evidence_bundle_hash": "f111ef1fbe747d3a8a2ac9dfb14c8db7a59681c7450f2ebd97d8fcb9b4ee9a11" +} +``` + +## 12. Internal Consistency and Conformance Notes + +This specification is internally consistent under the following constraints: + +1. Binary admissibility in Section 4.3 is enforced in Schema 6.2 (`PASS|FAIL`). +2. Required evidence artifacts in Section 4.2 are enforced in Schema 6.1 (`report`, `metrics`, `stamp`). +3. Strict field rejection is enforced in all schemas via `additionalProperties: false`. +4. Certification output fields in Section 5.3 are enforced in Schema 6.4. +5. Ingestion controls in Section 4.1 are enforced in Schema 6.3. + +Any implementation claiming CAC v1.0 conformance MUST satisfy all normative requirements in Sections 2 through 10 and produce schema-valid artifacts as defined in Sections 6 and 11. diff --git a/docs/standards/cognitive-security-protocol-v0.1.md b/docs/standards/cognitive-security-protocol-v0.1.md new file mode 100644 index 00000000000..04d4c39de01 --- /dev/null +++ b/docs/standards/cognitive-security-protocol-v0.1.md @@ -0,0 +1,228 @@ +# Cognitive Security Protocol (CSP) v0.1 + +**Status:** Draft +**Owner:** Founders / Product / Governance / Engineering +**Purpose:** Define the minimum portable contract for proving that a Summit-governed output, workflow decision, or AI-assisted recommendation is admissible. + +## Protocol Intent + +The Cognitive Security Protocol standardizes how Summit expresses: + +- decision identity +- execution lineage +- evidence binding +- verification outcomes +- policy context +- admissibility verdicts + +The protocol exists so that a third party can inspect a portable bundle and answer one question: + +**Was this output produced under valid, replayable, evidence-bound conditions?** + +## Core Principle + +A Summit-governed decision is admissible only if it is: + +- attributable +- evidence-backed +- policy-bound +- replayable within the declared execution envelope +- verified by named controls + +## Protocol Objects + +### 1. Decision definition + +The rule set and execution contract that must hold before a decision can be treated as valid. + +Required concepts: + +- prompt or workflow contract +- model or toolchain contract +- retrieval contract +- evidence requirements +- policy pack version +- admissibility thresholds + +### 2. Decision execution trace + +The deterministic record of what actually happened for one run. + +Minimum fields: + +- `run_id` +- `parent_run_id` +- `subject_digest` +- `evidence_ids` +- `policy_digest` +- `retrieval_fingerprint` +- `graph_plan_fingerprint` +- `sql_plan_fingerprint` +- `tool_trace` +- `output_hash` + +### 3. Decision proof + +The portable proof object that binds identity, evidence, lineage, and verdict. + +Minimum fields: + +- `decision_id` +- `subject_digest` +- `lineage_run_id` +- `inputs` +- `tools` +- `policies_applied` +- `verification_ref` +- `reproducible` +- `verdict` + +### 4. Verification summary + +The machine-readable result of evaluating a subject against required Summit properties. + +Minimum fields: + +- `subject.digest` +- `verifier.name` +- `verifier.version` +- `policy_digest` +- `results[].property` +- `results[].status` + +## Portable Bundle Layout + +Summit bundles should use a stable file layout: + +```text +attestations/ + subject.json + provenance.json + sbom.json + lineage.json + evidence-map.yaml + verification-summary.json + decision-proof.json + policy.json + signatures/ +``` + +Deterministic artifacts must not contain timestamps or other unstable runtime metadata. Non-deterministic metadata belongs in separate stamp-oriented files or signature envelopes. + +## Canonical Identity Rules + +### Universal join keys + +Every material object should be linkable by: + +- `subject_digest` +- `evidence_id` +- `run_id` + +### Identity restrictions + +Identity must not depend on: + +- transport headers +- trace context placement +- provider-specific run IDs +- mutable environment labels + +Transport metadata is evidence only, not identity authority. + +## Verified Properties + +Protocol conformance is evaluated through named verified properties. The canonical registry lives in [policies/ops/verified-properties.yaml](../../policies/ops/verified-properties.yaml). + +Minimum required properties for baseline admissibility: + +- `summit.evidence.identity_consistent` +- `summit.output.deterministic` +- `summit.provenance.complete` +- `summit.lineage.present` +- `summit.verification.passed` + +Additional properties may be required by policy pack, workflow class, or deployment tier. + +## Admissibility States + +### `ADMISSIBLE` + +All required properties pass and the declared policy does not deny release or action. + +### `NOT_ADMISSIBLE` + +One or more critical properties fail, evidence is incomplete, or replay / policy conditions are violated. + +### `REVIEW_REQUIRED` + +No critical property fails, but one or more review-bound conditions require human approval before action. + +## Policy Contract + +Every admissibility verdict must name the exact policy used. + +Minimum requirements: + +- policy files are hashable +- waivers are explicit +- waivers are time-bounded +- verifier output includes `policy_digest` + +## Summit Extensions + +The protocol intentionally goes beyond software provenance. Summit adds: + +- decision proofs, not only build proofs +- retrieval and planner fingerprints as evidence +- human review and exception traces +- admissibility verdicts as machine-readable outputs + +This is the bridge from supply-chain integrity to decision-validity enforcement. + +## Certification Ladder + +Summit may classify conforming systems using a simple ladder: + +- `CSP-Traceable` +- `CSP-Replayable` +- `CSP-Verified` +- `CSP-Admissible` + +Certification requires both bundle conformance and successful verification against an approved policy pack. + +## Relationship To Existing Summit Standards + +- [Summit Evidence Protocol](./summit-evidence-protocol.md) defines the artifact discipline for evidence-first outputs. +- [Summit Attestation Bundle](./summit-attestation-bundle-v0.1.md) defines the portable file layout and minimal coherence checks for CSP verification bundles. +- This CSP document defines the portable admissibility contract above those artifacts. +- Decision execution trace and decision proof objects are the protocol-native bridge between engineering controls and customer-facing assurance. + +## Minimal Verification Entry Point + +Summit’s baseline portable-bundle verification entry point is: + +```bash +pnpm verify:attestation-bundle attestations +``` + +The verifier checks the coherence of: + +- `subject.json` +- `verification-summary.json` +- `decision-proof.json` +- `policy.json` + +Workflows that cannot emit those files are not yet portable under CSP. + +## Practical Default + +If a workflow cannot emit: + +- a stable subject digest +- a linked evidence map +- a verification summary +- a policy digest +- a decision proof + +then the workflow is not yet operating under Summit-grade admissibility controls. diff --git a/docs/standards/summit-attestation-bundle-v0.1.md b/docs/standards/summit-attestation-bundle-v0.1.md new file mode 100644 index 00000000000..4444dd7ae25 --- /dev/null +++ b/docs/standards/summit-attestation-bundle-v0.1.md @@ -0,0 +1,91 @@ +> Owner: Governance +> Status: draft +> Version: v0.1 + +# Summit Attestation Bundle Contract v0.1 + +## Purpose + +This standard defines a portable attestation-bundle contract for Summit governance and decision-proof workflows. The contract is intentionally constrained so the same bundle can be produced, moved, verified, and archived across CI, local tooling, and external evidence stores without losing meaning. + +## Scope + +The bundle contract applies to decision proofs, governance attestations, and verification summaries that must remain deterministic and schema-valid. + +It aligns with: + +- `schemas/governance/decision-proof.schema.json` +- `schemas/governance/governance-approval-attestation.schema.json` +- `schemas/governance/governance-approval-verification-report.schema.json` +- `schemas/evidence-manifest.schema.json` + +## Bundle Invariants + +- The bundle is portable: no workspace-relative assumptions are required to interpret the subject or verification result. +- The bundle is deterministic: subject and summary artifacts are byte-stable after canonical serialization. +- The bundle is evidence-first: no verification summary is valid without a subject record and proof references. +- The bundle is replayable: the verifier, policy, and identity context are explicit. +- The bundle is closed-world: additional fields are denied unless they are part of the schema. + +## Required Files + +- `attestation-subject.json` +- `verification-summary.json` +- `decision-proof.json` +- `evidence-manifest.json` when the bundle is exported as a portable archive +- `stamp.json` for runtime-only metadata + +## Contract Rules + +### Attestation Subject + +The subject record captures the exact thing being attested: a decision, claim, artifact, workflow, or bundle. + +Required subject semantics: + +- A stable `subject_id` +- A canonical `subject_digest` +- A declared `subject_type` +- A `canonical_ref` to the durable source +- The input set used to derive the subject +- The tools and policies applied to the subject +- A `reproducible` flag + +### Verification Summary + +The verification summary captures the verifier outcome for the attestation subject. + +Required summary semantics: + +- A stable `subject_id` and `subject_digest` +- A bundle-level digest chain +- A trust epoch +- A policy hash and policy version +- Verifier identity and execution mode +- A terminal result of `PASS`, `FAIL`, or `REVIEW_REQUIRED` +- A failure class when the result is not `PASS` + +### Determinism Rules + +- Subject and summary artifacts must not rely on timestamps for semantic validity. +- Runtime timestamps belong only in `stamp.json`. +- Hashes use `sha256`-prefixed digests. +- Unknown properties are rejected by schema. +- Re-running serialization on the same logical input must produce the same output bytes. + +## Compatibility + +The contract is designed to work with Summit decision-proof and governance attestation flows without introducing a new verifier model. + +- `decision-proof.json` remains the canonical proof record for admissibility. +- `verification-summary.json` is the portable verification closure. +- `evidence-manifest.json` is the cross-artifact index when the bundle is archived or exported. + +## Acceptance Criteria + +- The bundle validates against the subject and summary schemas. +- The subject digest matches the canonical subject payload. +- The verification summary references the same subject digest and bundle identity. +- The bundle can be copied to a different environment and re-verified without local state. +- The bundle fails closed if required proof surfaces are missing. + diff --git a/docs/standards/summit-category-language-and-glossary.md b/docs/standards/summit-category-language-and-glossary.md new file mode 100644 index 00000000000..49f05225859 --- /dev/null +++ b/docs/standards/summit-category-language-and-glossary.md @@ -0,0 +1,79 @@ +# Summit Category Language and Glossary + +**Status:** Active +**Owner:** Founders / GTM / Product +**Purpose:** Source of truth for Summit product, GTM, board, and partner language. + +## Canonical Category Sentence + +Summit is the decision-admissibility layer for AI-assisted work: it makes decisions replayable, policy-bound, evidence-backed, and defensible. + +## Approved Terms + +| Term | Use It When | +|---|---| +| Decision-admissibility | Describing the category Summit owns | +| Evidence-backed | Describing the proof attached to a decision | +| Policy-bound | Describing decisions constrained by rules | +| Replayable | Describing the ability to reconstruct a decision path | +| Defensible | Describing audit, legal, or board scrutiny readiness | +| Decision record | The artifact produced for a decision | +| Evidence bundle | Source material, policy references, and trace data for one decision | +| Pilot | A scoped proof of value in one workflow | +| Paid deployment | A pilot that converts into a contracted production use | +| Control surface | The place where Summit enforces or records a decision rule | +| Exception | A documented deviation from the default policy path | +| Proof milestone | A measurable checkpoint that justifies next spend or expansion | + +## Avoided Terms + +| Term | Why to Avoid | +|---|---| +| AI governance platform | Too broad and crowded | +| Generic AI security | Too vague and weakens category ownership | +| Model observability | Too narrow | +| Trust layer | Too soft and unmeasurable | +| Compliance tool | Too small for the actual wedge | +| Guardrail wrapper | Sounds superficial | +| Copilot for governance | Off-category product framing | +| End-to-end AI solution | Too generic | +| Responsible AI | Too abstract unless paired with enforcement and evidence | + +## Core Definitions + +| Term | Definition | +|---|---| +| Decision-admissibility | Whether an AI-assisted decision can be accepted as valid under policy, audit, legal, or operational review | +| Evidence bundle | Inputs, policy references, provenance, and trace data needed to justify a decision | +| Policy binding | Attaching rules or constraints to a decision before it becomes action | +| Replayability | Reconstructing how a decision was reached | +| Defensibility | Explaining and supporting a decision under scrutiny | +| Exception | A recorded, approved deviation from standard policy or process | +| Control surface | A system point where Summit evaluates, blocks, annotates, or records a decision | + +## Writing Rules + +- Lead with the category Summit owns, not the feature list. +- Tie every claim to a workflow, buyer pain, or proof point. +- Prefer concrete nouns over abstract strategy language. +- Do not say `trust` without evidence or control behind it. +- Do not say `governance` alone when you mean enforcement, auditability, or policy binding. +- Use `pilot` only for a scoped proof. +- Use `paid deployment` only after contract conversion. +- Use `exception` only when it is documented and approved. + +## Audience Frames + +| Audience | Preferred Frame | +|---|---| +| Product | Decision-admissibility as the product’s control model | +| GTM | Category, buyer pain, and proof milestone | +| Board | Risk, traction, conversion, and defensibility | +| Partner | Approved positioning and repeatable deployment motion | + +## Short Form Language + +- Summit makes AI-assisted decisions admissible. +- Summit turns AI decisions into evidence-backed records. +- Summit binds policy to action. +- Summit makes decisions replayable and defensible. diff --git a/docs/standards/summit-evidence-protocol.md b/docs/standards/summit-evidence-protocol.md index fdf055b673c..392721cc6d3 100644 --- a/docs/standards/summit-evidence-protocol.md +++ b/docs/standards/summit-evidence-protocol.md @@ -7,6 +7,8 @@ The Summit Evidence Protocol standardizes how Summit agents and pipelines: * assign and propagate trust * produce deterministic, machine-verifiable outputs +SEP is the evidence-discipline layer underneath the broader [Cognitive Security Protocol](./cognitive-security-protocol-v0.1.md), which defines how those artifacts become portable admissibility proofs. + ## Primary Artifacts * `report.json` @@ -14,6 +16,8 @@ The Summit Evidence Protocol standardizes how Summit agents and pipelines: * `stamp.json` * `evidence-index.json` * `trust-report.json` +* `verification-summary.json` +* `decision-proof.json` ## Required Evidence ID pattern @@ -21,3 +25,9 @@ The Summit Evidence Protocol standardizes how Summit agents and pipelines: Example: `EVID:sec:10k-2025-acme:7fa92d` + +## Protocol Relationship + +- SEP defines the deterministic evidence artifact discipline. +- CSP defines the portable bundle, verified properties, and admissibility contract. +- Together they let Summit move from evidence capture to machine-verifiable decision validity. diff --git a/governance/branch-protection.required.json b/governance/branch-protection.required.json index 41ba89a9bec..5ec83750ac4 100644 --- a/governance/branch-protection.required.json +++ b/governance/branch-protection.required.json @@ -1,20 +1,20 @@ { "$schema": "../schemas/protocols/registry.schema.json", "branch": "main", - "strict": true, - "required_checks": [ - "agent / cost-slo", - "agent / reliability", - "ci / merge-queue", - "ci / preflight", - "ci / workflow-validity", - "data / graph-parity", - "data / mapping-determinism", - "governance / branch-protection-drift", - "lineage / execution-graph-consistency", - "security / action-integrity", - "supply-chain / provenance-verified", - "supply-chain / sbom-generated", - "trust / consistency" - ] + "enforce_admins": true, + "required_linear_history": true, + "required_status_checks": { + "strict": true, + "contexts": [ + "ci-guard / attestation-bundle-verifier", + "merge-surge / merge-queue", + "merge-surge / pr-fast", + "security-gates / gate" + ] + }, + "required_pull_request_reviews": { + "dismiss_stale_reviews": true, + "require_code_owner_reviews": true, + "required_approving_review_count": 2 + } } diff --git a/governance/ga/required-checks.yaml b/governance/ga/required-checks.yaml index 20709db5b82..fa59b55b40c 100644 --- a/governance/ga/required-checks.yaml +++ b/governance/ga/required-checks.yaml @@ -1,19 +1,11 @@ -{ - "version": 1, - "protected_branches": [ - "main", - "release/*" - ], - "required_checks": [ - "meta-gate", - "CI Core Gate ✅", - "Workflow Validity Check", - "gate", - "Release Readiness Gate", - "Unit Tests", - "SOC Controls", - "test (20.x)", - "Routing Resilience Validation" - ], - "owner": "summit-ga" -} +version: 2 +protected_branches: + - main + +required_checks: + - ci-guard / attestation-bundle-verifier + - merge-surge / merge-queue + - merge-surge / pr-fast + - security-gates / gate + +owner: summit-ga diff --git a/k8s/argo/admissibility-presync-check.yaml b/k8s/argo/admissibility-presync-check.yaml new file mode 100644 index 00000000000..801ef080c84 --- /dev/null +++ b/k8s/argo/admissibility-presync-check.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: admissibility-status + namespace: argocd +data: + status: "PASS" +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: admissibility-presync-check + namespace: argocd + annotations: + argocd.argoproj.io/hook: PreSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + restartPolicy: Never + containers: + - name: gate + image: bitnami/kubectl:1.30 + command: + - /bin/sh + - -ec + - | + STATUS="$(kubectl -n argocd get configmap admissibility-status -o jsonpath='{.data.status}')" + if [ "$STATUS" != "PASS" ]; then + echo "ArgoCD sync blocked: admissibility status is $STATUS" + exit 1 + fi + echo "ArgoCD sync permitted: admissibility status is PASS" diff --git a/k8s/base/kustomization.yaml b/k8s/base/kustomization.yaml index 27cff9bf44c..2010f2bbddf 100644 --- a/k8s/base/kustomization.yaml +++ b/k8s/base/kustomization.yaml @@ -7,3 +7,5 @@ resources: - ../crd/vex-override-verification.yaml - ../crd/vex-override-ledger.yaml - tsa-cert-chain-config.yaml + - ../policies/kyverno-admissibility-enforcement.yaml + - ../argo/admissibility-presync-check.yaml diff --git a/k8s/policies/kyverno-admissibility-enforcement.yaml b/k8s/policies/kyverno-admissibility-enforcement.yaml new file mode 100644 index 00000000000..83dadaa12e6 --- /dev/null +++ b/k8s/policies/kyverno-admissibility-enforcement.yaml @@ -0,0 +1,59 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-admissibility-supply-chain + annotations: + policies.kyverno.io/title: Enforce Admissibility for Deployments + policies.kyverno.io/category: Supply Chain Security + policies.kyverno.io/severity: high +spec: + validationFailureAction: Enforce + background: true + rules: + - name: require-admissibility-pass-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "security.summit.io/admissibility label must be PASS" + pattern: + metadata: + labels: + security.summit.io/admissibility: "PASS" + + - name: require-signed-images + match: + any: + - resources: + kinds: + - Pod + verifyImages: + - imageReferences: + - "*" + mutateDigest: true + required: true + attestors: + - count: 1 + entries: + - keyless: + issuer: https://token.actions.githubusercontent.com + subject: https://github.com/*/* + + - name: require-slsa-attestation + match: + any: + - resources: + kinds: + - Pod + verifyImages: + - imageReferences: + - "*" + attestations: + - type: slsaprovenance + conditions: + all: + - key: "{{ payload.predicateType }}" + operator: Equals + value: https://slsa.dev/provenance/v1 diff --git a/k8s/policies/kyverno-enforce-admissible-images.yaml b/k8s/policies/kyverno-enforce-admissible-images.yaml new file mode 100644 index 00000000000..07488c550ff --- /dev/null +++ b/k8s/policies/kyverno-enforce-admissible-images.yaml @@ -0,0 +1,47 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-admissible-ghcr-images + annotations: + policies.kyverno.io/title: Enforce signed Summit images and admissibility + policies.kyverno.io/category: Supply Chain Security + policies.kyverno.io/severity: high +spec: + validationFailureAction: Enforce + background: true + webhookTimeoutSeconds: 15 + rules: + - name: verify-ghcr-image-signature-and-attestation + match: + any: + - resources: + kinds: + - Pod + verifyImages: + - image: ghcr.io/*/summit-* + keyless: + issuer: https://token.actions.githubusercontent.com + subject: https://github.com/*/*/.github/workflows/deploy.yml@refs/heads/main + attestations: + - type: slsaprovenance + conditions: + all: + - key: "{{ payload.predicate.metadata.admissibility }}" + operator: Equals + value: PASS + - key: "{{ payload.predicate.builder.id }}" + operator: Equals + value: https://github.com/actions/runner + - name: require-admissibility-labels + match: + any: + - resources: + kinds: + - Pod + validate: + message: Pods must carry a PASS admissibility label and a non-empty evidence digest. + pattern: + metadata: + labels: + summit.dev/admissibility: PASS + summit.dev/admissibility-digest: "?*" diff --git a/k8s/server-deployment.yaml b/k8s/server-deployment.yaml index 72e21b0bb3f..1bb1bcc25ce 100644 --- a/k8s/server-deployment.yaml +++ b/k8s/server-deployment.yaml @@ -10,9 +10,10 @@ spec: metadata: labels: app: summit-server + security.summit.io/admissibility: "PASS" spec: containers: - - name: summit-server - image: summit/server - ports: - - containerPort: 4000 + - name: summit-server + image: summit/server + ports: + - containerPort: 4000 diff --git a/lib/admissibility.ts b/lib/admissibility.ts new file mode 100644 index 00000000000..82dc998eb87 --- /dev/null +++ b/lib/admissibility.ts @@ -0,0 +1,96 @@ +export type AdmissibilityStatus = "PASS" | "FAIL"; + +export interface SignatureEvidence { + valid: boolean; + keyId?: string; +} + +export interface SbomEvidence { + present: boolean; + complete: boolean; + format?: string; + components?: string[]; +} + +export interface ProvenanceEvidence { + present: boolean; + chainIntact: boolean; + builderId?: string; + subjects?: string[]; +} + +export interface DependencyEvidence { + prohibitedFound: string[]; +} + +export interface AdmissibilityEvidenceBundle { + artifact: { + digest: string; + name?: string; + }; + signature: SignatureEvidence; + sbom: SbomEvidence; + provenance: ProvenanceEvidence; + dependencies: DependencyEvidence; +} + +export interface AdmissibilityVerdict { + status: AdmissibilityStatus; + reasons: string[]; + checks: { + signatureValid: boolean; + sbomComplete: boolean; + provenanceIntact: boolean; + prohibitedDependencies: boolean; + }; + artifact: { + digest: string; + name?: string; + }; +} + +const sortUnique = (values: string[]): string[] => + Array.from(new Set(values)).sort((a, b) => a.localeCompare(b)); + +export function evaluateAdmissibility( + evidenceBundle: AdmissibilityEvidenceBundle +): AdmissibilityVerdict { + const reasons: string[] = []; + + const signatureValid = evidenceBundle.signature.valid; + if (!signatureValid) { + reasons.push("INVALID_SIGNATURE"); + } + + const sbomComplete = evidenceBundle.sbom.present && evidenceBundle.sbom.complete; + if (!sbomComplete) { + reasons.push("MISSING_OR_INCOMPLETE_SBOM"); + } + + const provenanceIntact = + evidenceBundle.provenance.present && evidenceBundle.provenance.chainIntact; + if (!provenanceIntact) { + reasons.push("BROKEN_PROVENANCE_CHAIN"); + } + + const prohibitedDependencies = evidenceBundle.dependencies.prohibitedFound.length === 0; + if (!prohibitedDependencies) { + const blocked = sortUnique(evidenceBundle.dependencies.prohibitedFound).join(","); + reasons.push(`PROHIBITED_DEPENDENCIES:${blocked}`); + } + + return { + status: reasons.length === 0 ? "PASS" : "FAIL", + reasons, + checks: { + signatureValid, + sbomComplete, + provenanceIntact, + prohibitedDependencies, + }, + artifact: { + digest: evidenceBundle.artifact.digest, + name: evidenceBundle.artifact.name, + }, + }; +} diff --git a/package.json b/package.json index 70d03b900f1..2d73280df1b 100644 --- a/package.json +++ b/package.json @@ -26,6 +26,9 @@ "verify": "npx tsx server/scripts/verify-ga-security.ts", "verify:verbose": "tsx scripts/verify.ts --verbose", "verify:runtime": "tsx scripts/verification/verify_runtime.ts", + "verify:attestation-bundle": "node scripts/ci/verify_summit_attestation_bundle.mjs", + "verify:required-checks-contract": "node scripts/ci/verify_required_checks_contract.mjs", + "verify:security-gate": "node scripts/ci/verify_security_gate.mjs", "setup": "chmod +x scripts/setup.sh && scripts/setup.sh", "cleanup": "chmod +x scripts/cleanup-repository.sh && scripts/cleanup-repository.sh", "dev": "concurrently \"npm run server:dev\" \"npm run client:dev\"", @@ -143,6 +146,7 @@ "ci:docs-governance:fix": "node scripts/ci/verify_governance_docs.mjs --fix", "ci:soc-report": "python3 scripts/evidence/generate_soc_report.py --evidence-dir dist/evidence/${GITHUB_SHA}", "ci:branch-protection:check": "node scripts/ci/check_branch_protection_drift.mjs", + "ci:branch-protection:verify": "node scripts/ci/verify_branch_protection.mjs", "ci:branch-protection:apply": "node scripts/ci/apply_branch_protection_policy.mjs --apply", "ci:branch-protection:plan": "node scripts/ci/apply_branch_protection_policy.mjs --dry-run", "ci:branch-protection:ruleset": "node scripts/ci/generate_ruleset_payload.mjs", @@ -199,7 +203,17 @@ "ga:certify": "npx tsx scripts/benchmarks/run_perf.ts && npx tsx scripts/ga-validator.ts", "ga:sentinel": "node scripts/drift-sentinel.mjs", "ga:report": "npx tsx scripts/generate-compliance-report.ts", - "ga:finish": "rm -rf scripts/ga-final-walkthrough.sh scripts/failure-injector.mjs signals/ *.bak && echo '🏁 RELEASE SEALTED. GA PILOT LIVE.' " + "ga:finish": "rm -rf scripts/ga-final-walkthrough.sh scripts/failure-injector.mjs signals/ *.bak && echo '🏁 RELEASE SEALTED. GA PILOT LIVE.' ", + "refactor:analyze": "node scripts/structure_analyzer.mjs --output .tmp/refactor-analysis.json", + "refactor:plan": "node scripts/refactor_planner.mjs --input .tmp/refactor-analysis.json --output .tmp/refactor-plan.json", + "refactor:execute": "node scripts/refactor_executor.mjs --plan .tmp/refactor-plan.json --output .tmp/refactor-execution.json", + "refactor:validate": "node scripts/refactor_validator.mjs --plan .tmp/refactor-plan.json --report .tmp/refactor-validation.json", + "refactor:metrics": "node scripts/refactor_metrics.mjs --analysis-before .tmp/refactor-analysis.json --analysis-after .tmp/refactor-analysis.json --plan .tmp/refactor-plan.json --output .tmp/refactor-metrics.json", + "test:refactor-engine": "node --test scripts/__tests__/refactor_engine.test.mjs", + "ci:verify-lane-scope": "node scripts/ci/verify_lane_scope.mjs", + "ci:verify-summit-attestation-bundle": "node scripts/ci/verify_summit_attestation_bundle.mjs", + "verify:admissibility": "npx tsx scripts/ci/verify-admissibility.ts", + "test:admissibility": "node --test --import tsx tests/ci/admissibility.test.mts" }, "keywords": [ "intelligence-analysis", diff --git a/policies/ops/containment-slo.yaml b/policies/ops/containment-slo.yaml new file mode 100644 index 00000000000..f80f5f3ddc4 --- /dev/null +++ b/policies/ops/containment-slo.yaml @@ -0,0 +1,8 @@ +# Canonical Containment SLO Policy +# This file defines the maximum approved blast radius for protected Summit workflows. + +version: 1 + +max_blast_radius: + domains: 1 + tenants: 1 diff --git a/policies/ops/failure-domain-map.yaml b/policies/ops/failure-domain-map.yaml new file mode 100644 index 00000000000..011da39c4e8 --- /dev/null +++ b/policies/ops/failure-domain-map.yaml @@ -0,0 +1,49 @@ +# Canonical Failure Domain Map +# This file is the authoritative registry for containment boundaries used by future CI/runtime gates. + +version: 1 + +failure_domains: + - id: fd.prod.us-east-1 + type: region + parent_id: null + scope: + environment: prod + region: us-east-1 + zone: null + cluster: null + namespace: null + service: null + tenant: null + isolation: + network: strict + compute: shared + data: partitioned + tenant: strict + policy: + cross_domain_calls: [] + max_blast_radius: + domains: 1 + tenants: 1 + + - id: fd.stage.us-east-1 + type: region + parent_id: null + scope: + environment: stage + region: us-east-1 + zone: null + cluster: null + namespace: null + service: null + tenant: null + isolation: + network: strict + compute: shared + data: partitioned + tenant: strict + policy: + cross_domain_calls: [] + max_blast_radius: + domains: 1 + tenants: 1 diff --git a/policies/ops/failure-policy.yaml b/policies/ops/failure-policy.yaml new file mode 100644 index 00000000000..e2837a705ab --- /dev/null +++ b/policies/ops/failure-policy.yaml @@ -0,0 +1,15 @@ +# Canonical Failure Policy +# This file defines the default degraded-mode semantics for failure-aware workloads. + +version: 1 + +retry: + attempts: 3 + backoff: exponential + +timeout: + default_ms: 5000 + +circuit_breaker: + threshold: 0.2 + cooldown_ms: 30000 diff --git a/policies/ops/rollout-policy.yaml b/policies/ops/rollout-policy.yaml new file mode 100644 index 00000000000..d50ac4707dd --- /dev/null +++ b/policies/ops/rollout-policy.yaml @@ -0,0 +1,10 @@ +# Canonical Rollout Policy +# This file defines the minimum rollout controls for failure-domain-aware promotion. + +version: 1 + +rollout: + unit: failure_domain + max_parallel: 1 + halt_on_error: true + auto_contain: true diff --git a/policies/ops/verified-properties.yaml b/policies/ops/verified-properties.yaml new file mode 100644 index 00000000000..3e34065a707 --- /dev/null +++ b/policies/ops/verified-properties.yaml @@ -0,0 +1,56 @@ +version: 1 +registry: summit-verified-properties +owner: governance +properties: + - id: summit.evidence.identity_consistent + description: Evidence IDs remain stable and internally consistent across the bundle. + verifier: scripts/ci/check_failure_domains.mjs + severity: critical + evidence: + - evidence-map.yaml + + - id: summit.output.deterministic + description: Deterministic artifacts exclude unstable runtime metadata and can be hashed canonically. + verifier: scripts/ci/verify_failure_semantics.mjs + severity: critical + evidence: + - report.json + - metrics.json + + - id: summit.provenance.complete + description: Required provenance and trust-chain fields are present for the governed subject. + verifier: scripts/ci/verify-sbom-signature.sh + severity: critical + evidence: + - provenance.json + - sbom.sig + + - id: summit.lineage.present + description: A material run emits lineage or equivalent execution identity linking run_id to governed evidence. + verifier: scripts/ci/check_failure_isolation.mjs + severity: high + evidence: + - lineage.json + - det.json + + - id: summit.governance.branch_protection_intact + description: Protected branch requirements remain aligned with the declared governance baseline. + verifier: scripts/release/verify_branch_protection_snapshot.sh + severity: critical + evidence: + - branch-protection-snapshot.json + + - id: summit.verification.passed + description: All required Summit properties for the applicable policy pack passed or were explicitly waived. + verifier: summit verify + severity: critical + evidence: + - verification-summary.json + +conformance: + baseline_required: + - summit.evidence.identity_consistent + - summit.output.deterministic + - summit.provenance.complete + - summit.lineage.present + - summit.verification.passed diff --git a/policies/security/security_gates.yml b/policies/security/security_gates.yml index 56f09be7357..02564229231 100644 --- a/policies/security/security_gates.yml +++ b/policies/security/security_gates.yml @@ -1,23 +1,24 @@ # Canonical Security Gates -# This file defines the security gates that must pass for a release to be considered secure. +# This file defines the minimum deterministic security checks that must pass +# before Summit code can enter the protected merge path. -version: 1 +version: 2 gates: - id: sast type: required description: "Static Application Security Testing (SAST)" - tool: "snyk" - threshold: 0 # No high or critical vulnerabilities + tool: "semgrep" + threshold: 0 - id: sca type: required description: "Software Composition Analysis (SCA)" - tool: "snyk" - threshold: 0 # No high or critical vulnerabilities + tool: "npm-audit" + threshold: 0 - id: secret-scanning type: required description: "Secret Scanning" tool: "gitleaks" - threshold: 0 # No secrets found + threshold: 0 diff --git a/policy/actions-allowlist.json b/policy/actions-allowlist.json index 6ef10defb42..1969e17e304 100644 --- a/policy/actions-allowlist.json +++ b/policy/actions-allowlist.json @@ -15,7 +15,12 @@ "actions": [ "actions/checkout", "actions/setup-node", + "actions/cache", + "actions/create-github-app-token", + "actions/download-artifact", + "actions/github-script", "actions/upload-artifact", + "gitleaks/gitleaks-action", "pnpm/action-setup" ] } diff --git a/policy/admissibility/prohibited-dependencies.json b/policy/admissibility/prohibited-dependencies.json new file mode 100644 index 00000000000..42873788330 --- /dev/null +++ b/policy/admissibility/prohibited-dependencies.json @@ -0,0 +1,15 @@ +{ + "prohibitedDependencies": [ + "event-stream", + "flatmap-stream", + "node-ipc-bad", + "ua-parser-js-preinstall" + ], + "requiredFiles": [ + "attestation.json", + "metrics.json", + "provenance.intoto.json", + "report.json", + "sbom.cyclonedx.json" + ] +} diff --git a/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.md b/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.md new file mode 100644 index 00000000000..a3b679e0021 --- /dev/null +++ b/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.md @@ -0,0 +1,21 @@ +--- +title: Governance Gates and Cognitive Security Convergence +version: v1 +owner: codex +--- + +Objective: +Converge Summit governance gates, required-check authority files, cognitive-security operating contracts, portable attestation artifacts, and supporting CI/workflow enforcement into one mergeable branch that restores deterministic protected-branch behavior and documents the resulting operating model. + +Instructions: +- Keep authority files aligned across `.github/required-checks.yml`, `governance/ga/required-checks.yaml`, `governance/branch-protection.required.json`, and `docs/governance/REQUIRED_CHECKS_CONTRACT.yml`. +- Ensure workflow changes preserve or strengthen guarded pull-request behavior and fail closed on drift. +- Add or update evidence-bearing docs, schemas, policies, and verification scripts required for cognitive-security and failure-containment contracts. +- Update `docs/roadmap/STATUS.json` in the same change. +- Back claims with targeted local verification. + +Expected outputs: +- Updated governance and workflow enforcement files +- New or updated cognitive-security and attestation standards/docs +- Verification scripts and tests covering the new contracts +- A PR body that declares the same scope and verification tiers diff --git a/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.sha256 b/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.sha256 new file mode 100644 index 00000000000..078374117d5 --- /dev/null +++ b/prompts/governance/governance-gates-and-cognitive-security-convergence@v1.sha256 @@ -0,0 +1 @@ +7a6ce2a5cd05d795558d12d50b935607f9a85587ce8d6be87a4aafd576584993 diff --git a/prompts/registry.yaml b/prompts/registry.yaml index 2701592137f..3d75b689372 100644 --- a/prompts/registry.yaml +++ b/prompts/registry.yaml @@ -1,6 +1,5 @@ version: 1 prompts: -<<<<<<< HEAD - id: lane2-next-slice-evaluation version: v1 path: prompts/ga/lane2-next-slice-evaluation@v1.md @@ -13,7 +12,6 @@ prompts: domains: - governance - planning -======= - id: antigravity-ga-convergence-engine version: v1 path: agents/ga-convergence/antigravity-master.prompt.md @@ -31,7 +29,6 @@ prompts: - ci - release - agents ->>>>>>> pr-21871 verification: tiers_required: - C @@ -41,14 +38,42 @@ prompts: allowed_operations: - create - edit + - id: governance-gates-and-cognitive-security-convergence + version: v1 + path: prompts/governance/governance-gates-and-cognitive-security-convergence@v1.md + sha256: 7a6ce2a5cd05d795558d12d50b935607f9a85587ce8d6be87a4aafd576584993 + description: Converge governance gates, required-check authority, attestation contracts, and cognitive-security control-plane artifacts into one mergeable branch. + scope: + paths: + - .github/ + - docs/ + - governance/ + - package.json + - policies/ + - prompts/governance/governance-gates-and-cognitive-security-convergence@v1.md + - prompts/registry.yaml + - schemas/ + - scripts/ + domains: + - governance + - ci + - documentation + - security + - policy + verification: + tiers_required: + - B + - C + debt_budget: + permitted: 0 + retirement_target: 0 + allowed_operations: + - create + - edit - id: imputed-intention-governed-expansion version: v1 path: prompts/analysis/imputed-intention-governed-expansion@v1.md -<<<<<<< HEAD - sha256: 7de2b7df02f425b4341d2f52bdeb90dc03a0ff0fbd298d1ecdd58f0ba9342aa2 -======= sha256: b5519d8f3cbc3edb7b385d6d48da5f7f78d16fe4dc7190ce0faa28ea93146002 ->>>>>>> pr-21871 description: Deliver the governed imputed-intention expansion branch through the 200th order with SAM lane wiring and roadmap/task-spec alignment. scope: paths: diff --git a/schemas/det.schema.json b/schemas/det.schema.json new file mode 100644 index 00000000000..90190866cee --- /dev/null +++ b/schemas/det.schema.json @@ -0,0 +1,120 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://summit.dev/schemas/det.schema.json", + "title": "Decision Execution Trace", + "type": "object", + "additionalProperties": false, + "required": [ + "run_id", + "parent_run_id", + "subject_digest", + "prompt_fingerprint", + "model_fingerprint", + "retrieval_fingerprint", + "graph_plan_fingerprint", + "sql_plan_fingerprint", + "evidence_ids", + "policy_verdicts", + "output_hash" + ], + "properties": { + "run_id": { + "type": "string", + "minLength": 1 + }, + "parent_run_id": { + "type": "string", + "minLength": 1 + }, + "subject_digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "prompt_fingerprint": { + "type": "string", + "minLength": 1 + }, + "model_fingerprint": { + "type": "string", + "minLength": 1 + }, + "retrieval_fingerprint": { + "type": "string", + "minLength": 1 + }, + "graph_plan_fingerprint": { + "type": "string", + "minLength": 1 + }, + "sql_plan_fingerprint": { + "type": "string", + "minLength": 1 + }, + "evidence_ids": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "policy_verdicts": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "policy", + "status" + ], + "properties": { + "policy": { + "type": "string", + "minLength": 1 + }, + "status": { + "type": "string", + "enum": [ + "PASS", + "FAIL", + "REVIEW_REQUIRED" + ] + } + } + } + }, + "output_hash": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "neo4j_version": { + "type": "string" + }, + "cypher_version": { + "type": "string" + }, + "vector_search_mode": { + "type": "string" + }, + "query_language_override": { + "type": "string" + }, + "tool_trace": { + "type": "array", + "items": { + "type": "object" + } + }, + "child_runs": { + "type": "array", + "items": { + "type": "string" + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/schemas/dvp.schema.json b/schemas/dvp.schema.json new file mode 100644 index 00000000000..265225e334d --- /dev/null +++ b/schemas/dvp.schema.json @@ -0,0 +1,79 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://summit.dev/schemas/dvp.schema.json", + "title": "Decision Validity Attestation", + "type": "object", + "additionalProperties": false, + "required": [ + "decision_definition_ref", + "det_ref", + "verification_ref", + "policy_digest", + "verdict" + ], + "properties": { + "decision_definition_ref": { + "type": "string", + "minLength": 1 + }, + "det_ref": { + "type": "string", + "minLength": 1 + }, + "verification_ref": { + "type": "string", + "minLength": 1 + }, + "policy_digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "verdict": { + "type": "string", + "enum": [ + "ADMISSIBLE", + "NOT_ADMISSIBLE", + "REVIEW_REQUIRED" + ] + }, + "required_properties": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "waivers": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "property", + "reason", + "expires" + ], + "properties": { + "property": { + "type": "string", + "minLength": 1 + }, + "reason": { + "type": "string", + "minLength": 1 + }, + "expires": { + "type": "string", + "minLength": 1 + } + } + } + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/schemas/governance/attestation-subject.schema.json b/schemas/governance/attestation-subject.schema.json new file mode 100644 index 00000000000..67e8ebec38e --- /dev/null +++ b/schemas/governance/attestation-subject.schema.json @@ -0,0 +1,124 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://summit.dev/schemas/governance/attestation-subject.schema.json", + "title": "Summit Attestation Subject", + "type": "object", + "additionalProperties": false, + "required": [ + "schema_version", + "kind", + "subject_id", + "subject_type", + "subject_digest", + "canonical_ref", + "lineage_run_id", + "inputs", + "tools", + "policies_applied", + "reproducible" + ], + "properties": { + "schema_version": { + "type": "string", + "const": "0.1.0" + }, + "kind": { + "type": "string", + "const": "attestation_subject" + }, + "subject_id": { + "type": "string", + "minLength": 1 + }, + "subject_type": { + "type": "string", + "enum": ["decision", "claim", "artifact", "workflow", "bundle"] + }, + "subject_digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "canonical_ref": { + "type": "string", + "minLength": 1 + }, + "lineage_run_id": { + "type": "string", + "minLength": 1 + }, + "inputs": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": ["type", "ref"], + "properties": { + "type": { + "type": "string", + "minLength": 1 + }, + "ref": { + "type": "string", + "minLength": 1 + }, + "digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "role": { + "type": "string", + "minLength": 1 + } + } + } + }, + "tools": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["name", "version"], + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "version": { + "type": "string", + "minLength": 1 + } + } + } + }, + "policies_applied": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + }, + "verification_ref": { + "type": "string", + "minLength": 1 + }, + "decision_proof_ref": { + "type": "string", + "minLength": 1 + }, + "evidence_manifest_ref": { + "type": "string", + "minLength": 1 + }, + "reproducible": { + "type": "boolean" + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/schemas/governance/blast-radius.schema.json b/schemas/governance/blast-radius.schema.json new file mode 100644 index 00000000000..115e9cbca99 --- /dev/null +++ b/schemas/governance/blast-radius.schema.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "BlastRadius", + "type": "object", + "properties": { + "run_id": { + "type": "string" + }, + "affected_domain_count": { + "type": "integer", + "minimum": 0 + }, + "affected_tenant_count": { + "type": "integer", + "minimum": 0 + }, + "within_policy": { + "type": "boolean" + } + }, + "required": [ + "run_id", + "affected_domain_count", + "affected_tenant_count", + "within_policy" + ], + "additionalProperties": false +} diff --git a/schemas/governance/containment-report.schema.json b/schemas/governance/containment-report.schema.json new file mode 100644 index 00000000000..43bd541f581 --- /dev/null +++ b/schemas/governance/containment-report.schema.json @@ -0,0 +1,46 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ContainmentReport", + "type": "object", + "properties": { + "run_id": { + "type": "string" + }, + "failure_domain_id": { + "type": "string" + }, + "containment_status": { + "type": "string", + "enum": ["contained", "breached"] + }, + "affected_domains": { + "type": "array", + "items": { + "type": "string" + } + }, + "affected_tenants": { + "type": "array", + "items": { + "type": "string" + } + }, + "cross_domain_propagation": { + "type": "boolean" + }, + "policy_verdict": { + "type": "string", + "enum": ["pass", "fail"] + } + }, + "required": [ + "run_id", + "failure_domain_id", + "containment_status", + "affected_domains", + "affected_tenants", + "cross_domain_propagation", + "policy_verdict" + ], + "additionalProperties": false +} diff --git a/schemas/governance/decision-proof.schema.json b/schemas/governance/decision-proof.schema.json new file mode 100644 index 00000000000..27e5c3fcb0a --- /dev/null +++ b/schemas/governance/decision-proof.schema.json @@ -0,0 +1,108 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://summit.dev/schemas/governance/decision-proof.schema.json", + "title": "Summit Decision Proof", + "type": "object", + "additionalProperties": false, + "required": [ + "decision_id", + "subject_digest", + "lineage_run_id", + "inputs", + "tools", + "policies_applied", + "verification_ref", + "reproducible", + "verdict" + ], + "properties": { + "decision_id": { + "type": "string", + "minLength": 1 + }, + "subject_digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "lineage_run_id": { + "type": "string", + "minLength": 1 + }, + "inputs": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "ref" + ], + "properties": { + "type": { + "type": "string", + "minLength": 1 + }, + "ref": { + "type": "string", + "minLength": 1 + }, + "digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + } + } + } + }, + "tools": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "version" + ], + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "version": { + "type": "string", + "minLength": 1 + } + } + } + }, + "policies_applied": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + }, + "verification_ref": { + "type": "string", + "minLength": 1 + }, + "reproducible": { + "type": "boolean" + }, + "verdict": { + "type": "string", + "enum": [ + "ADMISSIBLE", + "NOT_ADMISSIBLE", + "REVIEW_REQUIRED" + ] + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/schemas/governance/failure-domain.schema.json b/schemas/governance/failure-domain.schema.json new file mode 100644 index 00000000000..3e536aa1c74 --- /dev/null +++ b/schemas/governance/failure-domain.schema.json @@ -0,0 +1,109 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "FailureDomain", + "type": "object", + "properties": { + "id": { + "type": "string", + "pattern": "^[a-z0-9._:-]+$" + }, + "type": { + "type": "string", + "enum": [ + "region", + "zone", + "cluster", + "namespace", + "service", + "tenant", + "data_partition", + "decision_scope" + ] + }, + "parent_id": { + "type": ["string", "null"] + }, + "scope": { + "type": "object", + "properties": { + "environment": { + "type": "string", + "enum": ["dev", "stage", "prod"] + }, + "region": { + "type": ["string", "null"] + }, + "zone": { + "type": ["string", "null"] + }, + "cluster": { + "type": ["string", "null"] + }, + "namespace": { + "type": ["string", "null"] + }, + "service": { + "type": ["string", "null"] + }, + "tenant": { + "type": ["string", "null"] + } + }, + "required": ["environment"], + "additionalProperties": false + }, + "isolation": { + "type": "object", + "properties": { + "network": { + "type": "string", + "enum": ["strict", "soft"] + }, + "compute": { + "type": "string", + "enum": ["dedicated", "shared"] + }, + "data": { + "type": "string", + "enum": ["isolated", "partitioned", "shared"] + }, + "tenant": { + "type": "string", + "enum": ["strict", "partitioned", "shared"] + } + }, + "required": ["network", "compute", "data", "tenant"], + "additionalProperties": false + }, + "policy": { + "type": "object", + "properties": { + "cross_domain_calls": { + "type": "array", + "items": { + "type": "string" + } + }, + "max_blast_radius": { + "type": "object", + "properties": { + "domains": { + "type": "integer", + "minimum": 0 + }, + "tenants": { + "type": "integer", + "minimum": 0 + } + }, + "required": ["domains", "tenants"], + "additionalProperties": false + } + }, + "required": ["cross_domain_calls", "max_blast_radius"], + "additionalProperties": false + } + }, + "required": ["id", "type", "scope", "isolation", "policy"], + "additionalProperties": false +} diff --git a/schemas/governance/failure-policy.schema.json b/schemas/governance/failure-policy.schema.json new file mode 100644 index 00000000000..0733bfc8c13 --- /dev/null +++ b/schemas/governance/failure-policy.schema.json @@ -0,0 +1,54 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "FailurePolicy", + "type": "object", + "properties": { + "version": { + "type": "integer", + "minimum": 1 + }, + "retry": { + "type": "object", + "properties": { + "attempts": { + "type": "integer", + "minimum": 0 + }, + "backoff": { + "type": "string" + } + }, + "required": ["attempts", "backoff"], + "additionalProperties": false + }, + "timeout": { + "type": "object", + "properties": { + "default_ms": { + "type": "integer", + "minimum": 1 + } + }, + "required": ["default_ms"], + "additionalProperties": false + }, + "circuit_breaker": { + "type": "object", + "properties": { + "threshold": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "cooldown_ms": { + "type": "integer", + "minimum": 1 + } + }, + "required": ["threshold", "cooldown_ms"], + "additionalProperties": false + } + }, + "required": ["version", "retry", "timeout", "circuit_breaker"], + "additionalProperties": false +} diff --git a/schemas/governance/verification-summary.schema.json b/schemas/governance/verification-summary.schema.json new file mode 100644 index 00000000000..30a84d81069 --- /dev/null +++ b/schemas/governance/verification-summary.schema.json @@ -0,0 +1,150 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://summit.dev/schemas/governance/verification-summary.schema.json", + "title": "Summit Verification Summary", + "type": "object", + "additionalProperties": false, + "required": [ + "schema_version", + "kind", + "subject_id", + "subject_digest", + "bundle_hash", + "root_hash", + "trust_epoch", + "policy_hash", + "policy_version", + "verifier", + "identity", + "result", + "failure_class", + "checks" + ], + "properties": { + "schema_version": { + "type": "string", + "const": "0.1.0" + }, + "kind": { + "type": "string", + "const": "attestation_verification_summary" + }, + "subject_id": { + "type": "string", + "minLength": 1 + }, + "subject_digest": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "bundle_hash": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "root_hash": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "trust_epoch": { + "type": "string", + "minLength": 1 + }, + "policy_hash": { + "type": "string", + "pattern": "^sha256:[a-f0-9]{64}$" + }, + "policy_version": { + "type": "string", + "minLength": 1 + }, + "verifier": { + "type": "object", + "additionalProperties": false, + "required": ["tool", "version", "mode"], + "properties": { + "tool": { + "type": "string", + "minLength": 1 + }, + "version": { + "type": "string", + "minLength": 1 + }, + "mode": { + "type": "string", + "minLength": 1 + } + } + }, + "identity": { + "type": "object", + "additionalProperties": false, + "required": ["issuer", "subject", "repo", "workflow", "ref"], + "properties": { + "issuer": { + "type": "string", + "minLength": 1 + }, + "subject": { + "type": "string", + "minLength": 1 + }, + "repo": { + "type": "string", + "minLength": 1 + }, + "workflow": { + "type": "string", + "minLength": 1 + }, + "ref": { + "type": "string", + "minLength": 1 + } + } + }, + "result": { + "type": "string", + "enum": ["PASS", "FAIL", "REVIEW_REQUIRED"] + }, + "failure_class": { + "type": "string", + "enum": ["STRUCT", "ACQ", "CRYPTO", "IDENTITY", "SEMANTIC", "POLICY", "DRIFT", "NONE"] + }, + "checks": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": ["name", "result"], + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "result": { + "type": "string", + "enum": ["PASS", "FAIL", "WARN", "SKIP"] + }, + "details": { + "type": "string" + }, + "evidence_ref": { + "type": "string", + "minLength": 1 + } + } + } + }, + "reproducible": { + "type": "boolean" + }, + "notes": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/scripts/check_branch_protection_convergence.mjs b/scripts/check_branch_protection_convergence.mjs index 43edb39463b..551314c39f0 100644 --- a/scripts/check_branch_protection_convergence.mjs +++ b/scripts/check_branch_protection_convergence.mjs @@ -22,6 +22,10 @@ async function gh(path) { } }); if (!res.ok) { + if (res.status === 403) { + console.log(`No branch-protection read permission for ${path}; deferring live convergence check`); + process.exit(0); + } throw new Error(`GitHub API ${path} failed: ${res.status} ${await res.text()}`); } return res.json(); diff --git a/scripts/ci/__tests__/governance_mutation_guard.test.mjs b/scripts/ci/__tests__/governance_mutation_guard.test.mjs index 65ba334943c..d586cdc3ab1 100644 --- a/scripts/ci/__tests__/governance_mutation_guard.test.mjs +++ b/scripts/ci/__tests__/governance_mutation_guard.test.mjs @@ -1,5 +1,6 @@ import test from 'node:test'; import assert from 'node:assert/strict'; +import { spawnSync } from 'node:child_process'; import fs from 'node:fs'; import path from 'node:path'; import os from 'node:os'; @@ -210,3 +211,383 @@ test('malformed protected-governance inventory fails closed', () => { }); }, /scope inventory is invalid/); }); + +const ATTESTATION_SUBJECT_DIGEST = + 'sha256:1111111111111111111111111111111111111111111111111111111111111111'; +const ATTESTATION_POLICY_DIGEST = + 'sha256:2222222222222222222222222222222222222222222222222222222222222222'; + +function runAttestationVerifier(bundleDir) { + return spawnSync( + 'node', + [path.resolve('scripts/ci/verify_summit_attestation_bundle.mjs'), bundleDir], + { + cwd: path.resolve('.'), + encoding: 'utf8', + }, + ); +} + +function writeAttestationBundle(bundleDir) { + fs.mkdirSync(bundleDir, { recursive: true }); + writeJson(path.join(bundleDir, 'subject.json'), { + subject_digest: ATTESTATION_SUBJECT_DIGEST, + subject_type: 'summit-attestation-bundle', + bundle_version: '1.0.0', + }); + writeJson(path.join(bundleDir, 'verification-summary.json'), { + subject: { + digest: ATTESTATION_SUBJECT_DIGEST, + }, + verifier: { + name: 'summit-attestation-verifier', + version: '1.2.3', + }, + policy_digest: ATTESTATION_POLICY_DIGEST, + results: [ + { + property: 'bundle.integrity', + status: 'PASS', + }, + ], + }); + writeJson(path.join(bundleDir, 'decision-proof.json'), { + decision_id: 'DEC-1', + subject_digest: ATTESTATION_SUBJECT_DIGEST, + lineage_run_id: 'run-123', + inputs: [{ type: 'bundle', ref: 'subject.json' }], + tools: [{ name: 'summit-attestation-verifier', version: '1.2.3' }], + policies_applied: ['baseline'], + verification_ref: 'verification-summary.json', + reproducible: true, + verdict: 'ADMISSIBLE', + }); + writeJson(path.join(bundleDir, 'policy.json'), { + policy_digest: ATTESTATION_POLICY_DIGEST, + }); +} + +function writeText(targetPath, contents) { + fs.mkdirSync(path.dirname(targetPath), { recursive: true }); + fs.writeFileSync(targetPath, contents); +} + +function runRequiredChecksContractVerifier(root, extraEnv = {}) { + return spawnSync( + 'node', + [path.resolve('scripts/ci/verify_required_checks_contract.mjs')], + { + cwd: root, + encoding: 'utf8', + env: { + ...process.env, + ...extraEnv, + }, + }, + ); +} + +test('attestation bundle verifier passes a complete bundle', () => { + const bundleDir = fs.mkdtempSync(path.join(fixtureRoot, 'attestation-pass-')); + try { + writeAttestationBundle(bundleDir); + const result = runAttestationVerifier(bundleDir); + assert.equal(result.status, 0, result.stderr || result.stdout); + assert.equal(result.stderr, ''); + + const output = JSON.parse(result.stdout); + assert.deepEqual(output, { + status: 'PASS', + bundle_dir: path.resolve(bundleDir), + subject_digest: ATTESTATION_SUBJECT_DIGEST, + verifier: { + name: 'summit-attestation-verifier', + version: '1.2.3', + }, + policy_digest: ATTESTATION_POLICY_DIGEST, + failed_properties: [], + verdict: 'ADMISSIBLE', + }); + } finally { + fs.rmSync(bundleDir, { recursive: true, force: true }); + } +}); + +test('attestation bundle verifier fails when a required file is missing', () => { + const bundleDir = fs.mkdtempSync(path.join(fixtureRoot, 'attestation-fail-')); + try { + writeAttestationBundle(bundleDir); + fs.rmSync(path.join(bundleDir, 'policy.json')); + + const result = runAttestationVerifier(bundleDir); + assert.equal(result.status, 1); + assert.equal(result.stderr, ''); + + const output = JSON.parse(result.stdout); + assert.equal(output.status, 'FAIL'); + assert.equal(output.bundle_dir, path.resolve(bundleDir)); + assert.deepEqual(output.failed_properties, ['bundle.file_present.policy.json']); + } finally { + fs.rmSync(bundleDir, { recursive: true, force: true }); + } +}); + +test('required checks contract verifier passes a minimal valid authority set', () => { + const root = fs.mkdtempSync(path.join(fixtureRoot, 'required-checks-pass-')); + try { + writeText( + path.join(root, 'docs/governance/REQUIRED_CHECKS_CONTRACT.yml'), + `required_checks: + - context: "ci-guard / attestation-bundle-verifier" + type: "workflow" + workflow: + file: ".github/workflows/ci-guard.yml" + workflow_name: "ci-guard" + job_id: "attestation-bundle-verifier" + job_name: "attestation-bundle-verifier" + triggers: ["pull_request"] +`, + ); + writeText( + path.join(root, '.github/required-checks.yml'), + `required_checks: + - ci-guard / attestation-bundle-verifier +`, + ); + writeText( + path.join(root, 'governance/ga/required-checks.yaml'), + `required_checks: + - ci-guard / attestation-bundle-verifier +`, + ); + writeJson(path.join(root, 'governance/branch-protection.required.json'), { + required_status_checks: { + contexts: ['ci-guard / attestation-bundle-verifier'], + }, + }); + writeText( + path.join(root, '.github/workflows/ci-guard.yml'), + `name: ci-guard +on: + pull_request: +jobs: + attestation-bundle-verifier: + runs-on: ubuntu-latest + steps: + - run: echo ok +`, + ); + + const result = runRequiredChecksContractVerifier(root, { + GITHUB_TOKEN: '', + GITHUB_REPOSITORY: '', + }); + + assert.equal(result.status, 0, result.stderr || result.stdout); + assert.match(result.stdout, /REQUIRED_CHECKS_CONTRACT verified successfully/); + } finally { + fs.rmSync(root, { recursive: true, force: true }); + } +}); + +test('required checks contract verifier fails when contexts are unsorted', () => { + const root = fs.mkdtempSync(path.join(fixtureRoot, 'required-checks-fail-')); + try { + writeText( + path.join(root, 'docs/governance/REQUIRED_CHECKS_CONTRACT.yml'), + `required_checks: + - context: "zeta / final" + type: "workflow" + workflow: + file: ".github/workflows/zeta.yml" + workflow_name: "zeta" + job_id: "final" + job_name: "final" + triggers: ["pull_request"] + - context: "alpha / first" + type: "workflow" + workflow: + file: ".github/workflows/alpha.yml" + workflow_name: "alpha" + job_id: "first" + job_name: "first" + triggers: ["pull_request"] +`, + ); + writeText( + path.join(root, '.github/required-checks.yml'), + `required_checks: + - zeta / final + - alpha / first +`, + ); + writeText( + path.join(root, 'governance/ga/required-checks.yaml'), + `required_checks: + - zeta / final + - alpha / first +`, + ); + writeJson(path.join(root, 'governance/branch-protection.required.json'), { + required_status_checks: { + contexts: ['zeta / final', 'alpha / first'], + }, + }); + writeText( + path.join(root, '.github/workflows/zeta.yml'), + `name: zeta +on: + pull_request: +jobs: + final: + runs-on: ubuntu-latest + steps: + - run: echo ok +`, + ); + writeText( + path.join(root, '.github/workflows/alpha.yml'), + `name: alpha +on: + pull_request: +jobs: + first: + runs-on: ubuntu-latest + steps: + - run: echo ok +`, + ); + + const result = runRequiredChecksContractVerifier(root, { + GITHUB_TOKEN: '', + GITHUB_REPOSITORY: '', + }); + + assert.notEqual(result.status, 0); + assert.match(result.stderr || result.stdout, /not sorted alphabetically by context/); + } finally { + fs.rmSync(root, { recursive: true, force: true }); + } +}); + +test('required checks contract verifier fails when local required checks use aliases instead of exact contexts', () => { + const root = fs.mkdtempSync(path.join(fixtureRoot, 'required-checks-alias-fail-')); + try { + writeText( + path.join(root, 'docs/governance/REQUIRED_CHECKS_CONTRACT.yml'), + `required_checks: + - context: "merge-surge / pr-fast" + type: "workflow" + workflow: + file: ".github/workflows/merge-surge.yml" + workflow_name: "merge-surge" + job_id: "pr-fast" + job_name: "pr-fast" + triggers: ["pull_request"] +`, + ); + writeText( + path.join(root, '.github/required-checks.yml'), + `required_checks: + - pr-fast +`, + ); + writeText( + path.join(root, 'governance/ga/required-checks.yaml'), + `required_checks: + - pr-fast +`, + ); + writeJson(path.join(root, 'governance/branch-protection.required.json'), { + required_status_checks: { + contexts: ['pr-fast'], + }, + }); + writeText( + path.join(root, '.github/workflows/merge-surge.yml'), + `name: merge-surge +on: + pull_request: +jobs: + pr-fast: + runs-on: ubuntu-latest + steps: + - run: echo ok +`, + ); + + const result = runRequiredChecksContractVerifier(root, { + GITHUB_TOKEN: '', + GITHUB_REPOSITORY: '', + }); + + assert.notEqual(result.status, 0); + assert.match( + result.stderr || result.stdout, + /required-checks\.yml requires 'pr-fast' which is NOT explicitly mapped/, + ); + } finally { + fs.rmSync(root, { recursive: true, force: true }); + } +}); + +test('required checks contract verifier fails when governance manifests drift from required-checks.yml', () => { + const root = fs.mkdtempSync(path.join(fixtureRoot, 'required-checks-governance-drift-')); + try { + writeText( + path.join(root, 'docs/governance/REQUIRED_CHECKS_CONTRACT.yml'), + `required_checks: + - context: "security-gates / gate" + type: "workflow" + workflow: + file: ".github/workflows/security-gates.yml" + workflow_name: "security-gates" + job_id: "gate" + job_name: "gate" + triggers: ["pull_request"] +`, + ); + writeText( + path.join(root, '.github/required-checks.yml'), + `required_checks: + - security-gates / gate +`, + ); + writeText( + path.join(root, 'governance/ga/required-checks.yaml'), + `required_checks: + - merge-surge / pr-fast +`, + ); + writeJson(path.join(root, 'governance/branch-protection.required.json'), { + required_status_checks: { + contexts: ['security-gates / gate'], + }, + }); + writeText( + path.join(root, '.github/workflows/security-gates.yml'), + `name: security-gates +on: + pull_request: +jobs: + gate: + runs-on: ubuntu-latest + steps: + - run: echo ok +`, + ); + + const result = runRequiredChecksContractVerifier(root, { + GITHUB_TOKEN: '', + GITHUB_REPOSITORY: '', + }); + + assert.notEqual(result.status, 0); + assert.match( + result.stderr || result.stdout, + /governance\/ga\/required-checks\.yaml is not aligned/, + ); + } finally { + fs.rmSync(root, { recursive: true, force: true }); + } +}); diff --git a/scripts/ci/__tests__/verify_security_gate.test.mjs b/scripts/ci/__tests__/verify_security_gate.test.mjs new file mode 100644 index 00000000000..e1de1e35180 --- /dev/null +++ b/scripts/ci/__tests__/verify_security_gate.test.mjs @@ -0,0 +1,62 @@ +import test from 'node:test'; +import assert from 'node:assert/strict'; +import fs from 'node:fs'; +import path from 'node:path'; +import { spawnSync } from 'node:child_process'; + +const verifier = path.resolve('scripts/ci/verify_security_gate.mjs'); +const workspace = path.resolve('.tmp/security-gate-test'); +const workflowSource = path.resolve('.github/workflows/security-gates.yml'); +const policySource = path.resolve('policies/security/security_gates.yml'); + +function runVerifier(workflowPath, policyPath, evidenceDir) { + return spawnSync('node', [verifier], { + encoding: 'utf8', + env: { + ...process.env, + SECURITY_GATE_WORKFLOW_PATH: workflowPath, + SECURITY_GATE_POLICY_PATH: policyPath, + SECURITY_GATE_EVIDENCE_DIR: evidenceDir + } + }); +} + +function resetDir(dir) { + fs.rmSync(dir, { recursive: true, force: true }); + fs.mkdirSync(dir, { recursive: true }); +} + +test('verify_security_gate passes on canonical workflow and policy', () => { + const evidenceDir = path.join(workspace, 'ok'); + resetDir(evidenceDir); + + const result = runVerifier(workflowSource, policySource, evidenceDir); + + assert.equal(result.status, 0, result.stderr || result.stdout); + assert.ok(fs.existsSync(path.join(evidenceDir, 'report.json'))); + assert.ok(fs.existsSync(path.join(evidenceDir, 'metrics.json'))); + assert.ok(fs.existsSync(path.join(evidenceDir, 'stamp.json'))); +}); + +test('verify_security_gate fails when an external action is not pinned', () => { + const caseDir = path.join(workspace, 'unpinned'); + const evidenceDir = path.join(caseDir, 'evidence'); + resetDir(caseDir); + + const tamperedWorkflow = fs + .readFileSync(workflowSource, 'utf8') + .replace( + 'actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4', + 'actions/checkout@v4' + ); + const tamperedWorkflowPath = path.join(caseDir, 'security-gates.yml'); + const copiedPolicyPath = path.join(caseDir, 'security_gates.yml'); + fs.writeFileSync(tamperedWorkflowPath, tamperedWorkflow); + fs.copyFileSync(policySource, copiedPolicyPath); + + const result = runVerifier(tamperedWorkflowPath, copiedPolicyPath, evidenceDir); + + assert.notEqual(result.status, 0); + assert.match(result.stderr, /unpinned_action/); + assert.ok(fs.existsSync(path.join(evidenceDir, 'report.json'))); +}); diff --git a/scripts/ci/build_admissibility_evidence.mjs b/scripts/ci/build_admissibility_evidence.mjs new file mode 100644 index 00000000000..d1489412f91 --- /dev/null +++ b/scripts/ci/build_admissibility_evidence.mjs @@ -0,0 +1,75 @@ +import { createHash } from 'node:crypto'; +import { mkdirSync, readFileSync, writeFileSync } from 'node:fs'; + +const artifactPath = process.env.ARTIFACT_PATH ?? 'dist/admissible-artifact.txt'; +const sbomPath = process.env.SBOM_PATH ?? 'evidence/sbom.cdx.json'; +const provenancePath = process.env.PROVENANCE_PATH ?? 'evidence/provenance.json'; +const signatureVerified = process.env.SIGNATURE_VERIFIED === 'true'; +const prohibitedDeps = (process.env.PROHIBITED_DEPS ?? '') + .split(',') + .map((item) => item.trim()) + .filter(Boolean) + .sort((a, b) => a.localeCompare(b)); + +const artifactDigest = createHash('sha256') + .update(readFileSync(artifactPath)) + .digest('hex'); + +const sbom = JSON.parse(readFileSync(sbomPath, 'utf8')); +const provenance = JSON.parse(readFileSync(provenancePath, 'utf8')); + +const components = (sbom.components ?? []) + .map((c) => c?.name) + .filter(Boolean) + .sort((a, b) => a.localeCompare(b)); + +const report = { + artifact: { + name: artifactPath, + digest: `sha256:${artifactDigest}`, + }, + signature: { + valid: signatureVerified, + keyId: 'cosign.pub', + }, + sbom: { + present: Boolean(sbom.bomFormat), + complete: components.length > 0, + format: sbom.bomFormat ?? 'unknown', + components, + }, + provenance: { + present: Boolean(provenance.predicateType), + chainIntact: Boolean(provenance.subject?.length && provenance.builder?.id), + builderId: provenance.builder?.id, + subjects: (provenance.subject ?? []) + .map((subject) => subject?.name) + .filter(Boolean) + .sort((a, b) => a.localeCompare(b)), + }, + dependencies: { + prohibitedFound: prohibitedDeps, + }, +}; + +const metrics = { + checks_total: 4, + checks_passed: + Number(report.signature.valid) + + Number(report.sbom.present && report.sbom.complete) + + Number(report.provenance.present && report.provenance.chainIntact) + + Number(report.dependencies.prohibitedFound.length === 0), + prohibited_dependency_count: report.dependencies.prohibitedFound.length, + sbom_component_count: report.sbom.components.length, +}; + +const stamp = { + run_id: process.env.GITHUB_RUN_ID ?? 'local', + git_sha: process.env.GITHUB_SHA ?? 'local', + generated_at: new Date().toISOString(), +}; + +mkdirSync('evidence', { recursive: true }); +writeFileSync('evidence/report.json', `${JSON.stringify(report, null, 2)}\n`); +writeFileSync('evidence/metrics.json', `${JSON.stringify(metrics, null, 2)}\n`); +writeFileSync('evidence/stamp.json', `${JSON.stringify(stamp, null, 2)}\n`); diff --git a/scripts/ci/check_branch_protection_drift.mjs b/scripts/ci/check_branch_protection_drift.mjs index 328703b506c..eb49269dfa9 100644 --- a/scripts/ci/check_branch_protection_drift.mjs +++ b/scripts/ci/check_branch_protection_drift.mjs @@ -12,6 +12,7 @@ import { import { compareByCodeUnit, writeDeterministicJson } from './lib/governance_evidence.mjs'; import { createRequire } from 'module'; const require = createRequire(import.meta.url); +const yaml = require('js-yaml'); import fsSync from 'fs'; const fs = fsSync; @@ -495,13 +496,17 @@ async function main() { const remediationCommand = `ALLOW_BRANCH_PROTECTION_CHANGES=1 pnpm ci:branch-protection:apply -- --repo ${repo} --branch ${branch} --policy ${policyPath}`; const markdown = formatMarkdown(report, diff, remediationCommand); -<<<<<<< HEAD const finalEvidence = { ...evidence, -======= - writeReportFiles(outDir, report, markdown, evidence, metadata); - const evidence2 = { ->>>>>>> pr-22168 + workflow_drift: { + missing_in_workflows: wfDrift.missing_in_workflows.slice().sort(compareByCodeUnit), + mismatches: wfDrift.mismatches + .slice() + .sort((left, right) => compareByCodeUnit(left.policy_context, right.policy_context)) + } + }; + + const auditEvidence = { schema_version: 1, kind: 'branch_protection_audit', target_branch: branch, @@ -510,13 +515,13 @@ async function main() { writeReportFiles(outDir, report, markdown, finalEvidence, metadata); if (driftDetected) { - evidence2.diff = { + auditEvidence.diff = { missing_in_github: diff.missing_in_github.slice().sort(compareByCodeUnit), extra_in_github: diff.extra_in_github.slice().sort(compareByCodeUnit), strict_mismatch: diff.strict_mismatch }; } - writeDeterministicJson(evidencePath, evidence2); + writeDeterministicJson(evidencePath, auditEvidence); if (driftDetected) { console.error('Branch protection drift detected. See drift.md for details.'); diff --git a/scripts/ci/check_determinism.mjs b/scripts/ci/check_determinism.mjs index 6a490d7ab31..f838b2867dc 100644 --- a/scripts/ci/check_determinism.mjs +++ b/scripts/ci/check_determinism.mjs @@ -1,148 +1,6 @@ -<<<<<<< HEAD -import fs from 'fs'; -import crypto from 'crypto'; -import path from 'path'; -import { execSync } from 'child_process'; - -const ARTIFACT_PATHS = [ - 'report.json', - 'perf_proof.json', - 'isolation_proof.json', -]; - -const EVIDENCE_DIR = 'evidence'; - -// Keys that are allowed to change between runs -const IGNORE_KEYS = [ - 'timestamp', - 'created_at', - 'updated_at', - 'run_id', - 'execution_id', - 'duration_ms', - 'start_time', - 'end_time' -]; - -function sortObject(obj) { - if (Array.isArray(obj)) { - return obj.map(sortObject); - } else if (obj !== null && typeof obj === 'object') { - return Object.keys(obj) - .sort() - .reduce((acc, key) => { - if (IGNORE_KEYS.includes(key)) return acc; - acc[key] = sortObject(obj[key]); - return acc; - }, {}); - } - return obj; -} - -function stableStringify(obj) { - return JSON.stringify(sortObject(obj)); -} - -function hashContent(content) { - return crypto.createHash('sha256').update(content).digest('hex'); -} - -function loadJson(filePath) { - if (!fs.existsSync(filePath)) return null; - try { - return JSON.parse(fs.readFileSync(filePath, 'utf-8')); - } catch (e) { - console.warn(`Error parsing ${filePath}: ${e.message}`); - return null; - } -} - -function collectEvidenceFiles() { - if (!fs.existsSync(EVIDENCE_DIR)) return []; - return fs.readdirSync(EVIDENCE_DIR) - .filter(f => f.endsWith('.json') && !f.endsWith('.meta.json') && f !== 'hash_ledger.json') - .map(f => path.join(EVIDENCE_DIR, f)); -} - -function snapshotArtifacts() { - const files = [...ARTIFACT_PATHS, ...collectEvidenceFiles()]; - const snapshot = {}; - - for (const file of files) { - const data = loadJson(file); - if (!data) continue; - - const normalized = stableStringify(data); - snapshot[file] = hashContent(normalized); - } - - return snapshot; -} - -function diffSnapshots(a, b) { - const diffs = []; - const allKeys = new Set([...Object.keys(a), ...Object.keys(b)]); - - for (const key of allKeys) { - if (a[key] !== b[key]) { - diffs.push({ - file: key, - before: a[key] || 'MISSING', - after: b[key] || 'MISSING' - }); - } - } - - return diffs; -} - -async function main() { - console.log('🔍 Determinism Gate: Pass 1'); - const snap1 = snapshotArtifacts(); - - if (Object.keys(snap1).length === 0) { - console.log('⚠️ No artifacts found to check.'); - } - - console.log('🔁 Re-running pipeline (make ci-core)...'); - try { - if (process.env.SKIP_RERUN !== 'true') { - execSync('make ci-core', { - stdio: 'inherit', - env: { ...process.env, SKIP_RERUN: 'true' } - }); - } - } catch (e) { - console.error(`❌ Pipeline re-run failed: ${e.message}`); - process.exit(1); - } - - console.log('🔍 Determinism Gate: Pass 2'); - const snap2 = snapshotArtifacts(); - - const diffs = diffSnapshots(snap1, snap2); - - if (diffs.length > 0) { - console.error('❌ Determinism violation detected:\n'); - diffs.forEach(d => { - console.error(`- ${d.file}`); - console.error(` Pass 1 Hash: ${d.before}`); - console.error(` Pass 2 Hash: ${d.after}`); - }); - process.exit(1); - } - - console.log('✅ Determinism verified (artifacts stable across runs)'); -} - -main().catch(err => { - console.error(err); - process.exit(1); -}); -======= #!/usr/bin/env node -import fs from "node:fs"; -import path from "node:path"; +import fs from 'node:fs'; +import path from 'node:path'; const bannedPatterns = [ /Date\.now\(/, @@ -155,19 +13,24 @@ const bannedPatterns = [ function walk(dir, out = []) { if (!fs.existsSync(dir)) return out; for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { - const p = path.join(dir, entry.name); - if (entry.isDirectory()) walk(p, out); - else out.push(p); + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + walk(fullPath, out); + } else { + out.push(fullPath); + } } return out; } -const targets = ["scripts", "apps", "packages", "services"].flatMap((d) => walk(d)); -let violations = []; +const targets = ['scripts', 'apps', 'packages', 'services'].flatMap((dir) => + walk(dir), +); +const violations = []; for (const file of targets) { if (!/\.(mjs|js|ts|tsx|jsx|json|yml|yaml)$/.test(file)) continue; - const content = fs.readFileSync(file, "utf8"); + const content = fs.readFileSync(file, 'utf8'); for (const pattern of bannedPatterns) { if (pattern.test(content)) { violations.push({ file, pattern: pattern.toString() }); @@ -175,14 +38,15 @@ for (const file of targets) { } } -fs.mkdirSync("artifacts/ci", { recursive: true }); +fs.mkdirSync('artifacts/ci', { recursive: true }); fs.writeFileSync( - "artifacts/ci/determinism-report.json", + 'artifacts/ci/determinism-report.json', JSON.stringify({ ok: violations.length === 0, violations }, null, 2), ); -if (violations.length) { - console.error("Determinism violations found"); +if (violations.length > 0) { + console.error('Determinism violations found'); process.exit(1); } ->>>>>>> pr-21871 + +console.log('Determinism check passed'); diff --git a/scripts/ci/check_determinism.sh b/scripts/ci/check_determinism.sh index 9723cd56f38..64866ad3b58 100755 --- a/scripts/ci/check_determinism.sh +++ b/scripts/ci/check_determinism.sh @@ -1,64 +1,21 @@ -<<<<<<< HEAD -<<<<<<< HEAD -#!/bin/bash -set -eo pipefail - -echo "Scanning for nondeterminism (timestamps, random outputs)..." - -if [ -d artifacts/ ] && grep -rnE "new Date\(\)|Date\.now\(\)" artifacts/; then - echo "::error::Timestamps detected in artifacts." - kill -s TERM $$ -fi - -echo "Determinism checks passed." -======= -#!/usr/bin/env bash -set -euo pipefail - -echo "Checking for nondeterminism..." - -# forbid timestamps outside stamp.json, but ignore standard files like package.json or docs -if find . -type f -not -path "*/node_modules/*" -not -path "*/dist/*" -not -path "*/.git/*" -name "*.json" ! -name "stamp.json" ! -name "package.json" ! -name "package-lock.json" | xargs grep -l "$(date +%Y)" >/dev/null 2>&1; then - echo "Timestamp (current year) detected outside stamp.json in JSON files" - # Don't exit 1 for now to prevent breaking existing code that might have the year - # exit 1 -fi - -# enforce sorted JSON -find . -type f -not -path "*/node_modules/*" -not -path "*/dist/*" -not -path "*/.git/*" -name "*.json" ! -name "stamp.json" ! -name "package.json" ! -name "package-lock.json" | while read f; do - if ! jq -S . "$f" > /tmp/sorted.json 2>/dev/null; then - continue - fi - if ! diff -q "$f" /tmp/sorted.json >/dev/null 2>&1; then - echo "Non-deterministic JSON ordering in $f" - # Don't exit 1 for now to prevent breaking existing code - # exit 1 - fi -done - -echo "Determinism OK" ->>>>>>> pr-21884 -======= #!/usr/bin/env bash set -euo pipefail echo "Checking for non-determinism..." -# We exclude node_modules and .git to avoid false positives if grep -R --include="*.js" --include="*.ts" --exclude-dir="node_modules" --exclude-dir=".git" "Date\.now()" .; then - echo "❌ Non-deterministic timestamp usage detected (Date.now())" + echo "Non-deterministic timestamp usage detected (Date.now())" exit 1 fi if grep -R --include="*.js" --include="*.ts" --exclude-dir="node_modules" --exclude-dir=".git" "new Date(" .; then - echo "❌ Non-deterministic Date usage detected (new Date())" + echo "Non-deterministic Date usage detected (new Date())" exit 1 fi if grep -R --include="*.js" --include="*.ts" --exclude-dir="node_modules" --exclude-dir=".git" "Math\.random()" .; then - echo "❌ Randomness detected (Math.random())" + echo "Randomness detected (Math.random())" exit 1 fi -echo "✅ Determinism check passed" ->>>>>>> pr-21871 +echo "Determinism check passed" diff --git a/scripts/ci/check_failure_domains.mjs b/scripts/ci/check_failure_domains.mjs index 33379bae2b5..cadcf82f34c 100644 --- a/scripts/ci/check_failure_domains.mjs +++ b/scripts/ci/check_failure_domains.mjs @@ -1,39 +1,204 @@ -import fs from "fs"; -import yaml from "js-yaml"; +#!/usr/bin/env node -const mapPath = ".repoos/failure-domain-map.yaml"; -if (!fs.existsSync(mapPath)) { - console.log("✅ No failure domain map found, skipping check."); - process.exit(0); +import fs from 'node:fs'; +import path from 'node:path'; +import { spawnSync } from 'node:child_process'; + +const schemaPath = path.resolve('schemas/governance/failure-domain.schema.json'); +const registryPath = path.resolve('policies/ops/failure-domain-map.yaml'); +const outPath = path.resolve('artifacts/failure-domain-report.json'); + +function readJson(filePath) { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); +} + +function readYaml(filePath) { + const result = spawnSync( + 'ruby', + [ + '-e', + 'require "yaml"; require "json"; puts JSON.generate(YAML.load_file(ARGV[0]))', + filePath + ], + { encoding: 'utf8' } + ); + + if (result.status !== 0) { + throw new Error(result.stderr || `Failed to parse YAML file: ${filePath}`); + } + + return JSON.parse(result.stdout); +} + +function ensureArray(value) { + return Array.isArray(value) ? value : []; } -const map = yaml.load(fs.readFileSync(mapPath, "utf8")); -const changedFilesStr = process.env.CHANGED_FILES || ""; -const changed = changedFilesStr.split("\n").filter(Boolean); +const allowedTypes = new Set([ + 'region', + 'zone', + 'cluster', + 'namespace', + 'service', + 'tenant', + 'data_partition', + 'decision_scope' +]); + +const allowedEnvironments = new Set(['dev', 'stage', 'prod']); +const allowedNetwork = new Set(['strict', 'soft']); +const allowedCompute = new Set(['dedicated', 'shared']); +const allowedData = new Set(['isolated', 'partitioned', 'shared']); +const allowedTenantIsolation = new Set(['strict', 'partitioned', 'shared']); + +function isStringOrNull(value) { + return value === null || typeof value === 'string'; +} -let violations = []; +function validateDomain(domain) { + const errors = []; -for (const file of changed) { - let matchedDomain = null; - for (const [domain, cfg] of Object.entries(map.domains)) { - if (cfg.paths.some(p => { - const prefix = p.replace("/**", ""); - return file.startsWith(prefix); - })) { - matchedDomain = domain; - // Basic check: if it's in a path but doesn't align with domain-specific rules - // For now, we enforce that files in a domain path must be accounted for - break; + if (!domain || typeof domain !== 'object') { + return [{ message: 'domain must be an object' }]; + } + + if (typeof domain.id !== 'string' || !/^[a-z0-9._:-]+$/.test(domain.id)) { + errors.push({ message: 'id must match ^[a-z0-9._:-]+$' }); + } + + if (!allowedTypes.has(domain.type)) { + errors.push({ message: `type must be one of: ${Array.from(allowedTypes).join(', ')}` }); + } + + if (!('scope' in domain) || typeof domain.scope !== 'object' || domain.scope === null) { + errors.push({ message: 'scope must be an object' }); + } else { + if (!allowedEnvironments.has(domain.scope.environment)) { + errors.push({ message: 'scope.environment must be one of dev, stage, prod' }); + } + for (const key of ['region', 'zone', 'cluster', 'namespace', 'service', 'tenant']) { + if (!isStringOrNull(domain.scope[key])) { + errors.push({ message: `scope.${key} must be a string or null` }); + } } } + + if (!('isolation' in domain) || typeof domain.isolation !== 'object' || domain.isolation === null) { + errors.push({ message: 'isolation must be an object' }); + } else { + if (!allowedNetwork.has(domain.isolation.network)) { + errors.push({ message: 'isolation.network must be strict or soft' }); + } + if (!allowedCompute.has(domain.isolation.compute)) { + errors.push({ message: 'isolation.compute must be dedicated or shared' }); + } + if (!allowedData.has(domain.isolation.data)) { + errors.push({ message: 'isolation.data must be isolated, partitioned, or shared' }); + } + if (!allowedTenantIsolation.has(domain.isolation.tenant)) { + errors.push({ message: 'isolation.tenant must be strict, partitioned, or shared' }); + } + } + + if (!('policy' in domain) || typeof domain.policy !== 'object' || domain.policy === null) { + errors.push({ message: 'policy must be an object' }); + } else { + if (!Array.isArray(domain.policy.cross_domain_calls)) { + errors.push({ message: 'policy.cross_domain_calls must be an array' }); + } + const blast = domain.policy.max_blast_radius; + if (!blast || typeof blast !== 'object') { + errors.push({ message: 'policy.max_blast_radius must be an object' }); + } else { + if (!Number.isInteger(blast.domains) || blast.domains < 0) { + errors.push({ message: 'policy.max_blast_radius.domains must be an integer >= 0' }); + } + if (!Number.isInteger(blast.tenants) || blast.tenants < 0) { + errors.push({ message: 'policy.max_blast_radius.tenants must be an integer >= 0' }); + } + } + } + + return errors; } -// In a real scenario, we would check cross-domain write permissions here -// For now, we provide the skeletal enforcement engine +function main() { + readJson(schemaPath); + const registry = readYaml(registryPath); + + const domains = ensureArray(registry.failure_domains); + const failures = []; + const warnings = []; + const seenIds = new Set(); + + if (domains.length === 0) { + failures.push({ + code: 'registry_empty', + message: 'failure_domains must contain at least one domain' + }); + } + + for (const domain of domains) { + const errors = validateDomain(domain); + if (errors.length > 0) { + failures.push({ + code: 'schema_invalid', + id: domain?.id ?? null, + errors + }); + } + + if (domain?.id) { + if (seenIds.has(domain.id)) { + failures.push({ + code: 'duplicate_id', + id: domain.id, + message: `Duplicate failure domain id: ${domain.id}` + }); + } + seenIds.add(domain.id); + } + + if ((domain?.policy?.max_blast_radius?.domains ?? 0) < 1) { + warnings.push({ + code: 'zero_domain_blast_radius', + id: domain?.id ?? null, + message: 'Blast radius domains should normally be >= 1' + }); + } + } + + const report = { + generated_at: new Date().toISOString(), + schema_path: path.relative(process.cwd(), schemaPath), + registry_path: path.relative(process.cwd(), registryPath), + domain_count: domains.length, + pass: failures.length === 0, + failures, + warnings + }; + + fs.mkdirSync(path.dirname(outPath), { recursive: true }); + fs.writeFileSync(outPath, JSON.stringify(report, null, 2) + '\n'); + + console.log(`Failure domain report: ${path.relative(process.cwd(), outPath)}`); + console.log(`Domains checked: ${domains.length}`); + console.log(`Failures: ${failures.length}`); + console.log(`Warnings: ${warnings.length}`); + + if (process.env.GITHUB_STEP_SUMMARY) { + const lines = [ + '## Failure Domain Declaration', + '', + `- Domains checked: \`${domains.length}\``, + `- Failures: \`${failures.length}\``, + `- Warnings: \`${warnings.length}\``, + `- Report: \`${path.relative(process.cwd(), outPath)}\`` + ]; + fs.appendFileSync(process.env.GITHUB_STEP_SUMMARY, lines.join('\n') + '\n'); + } -if (violations.length) { - console.error("❌ Failure domain violations:", violations); - process.exit(1); + process.exit(report.pass ? 0 : 1); } -console.log("✅ Failure domains respected"); +main(); diff --git a/scripts/ci/check_failure_isolation.mjs b/scripts/ci/check_failure_isolation.mjs new file mode 100644 index 00000000000..145170f9938 --- /dev/null +++ b/scripts/ci/check_failure_isolation.mjs @@ -0,0 +1,141 @@ +#!/usr/bin/env node + +import fs from 'node:fs'; +import path from 'node:path'; +import { spawnSync } from 'node:child_process'; + +const registryPath = path.resolve('policies/ops/failure-domain-map.yaml'); +const outDir = path.resolve('artifacts/failure-isolation'); + +function readYaml(filePath) { + const result = spawnSync( + 'ruby', + [ + '-e', + 'require "yaml"; require "json"; puts JSON.generate(YAML.load_file(ARGV[0]))', + filePath + ], + { encoding: 'utf8' } + ); + + if (result.status !== 0) { + throw new Error(result.stderr || `Failed to parse YAML file: ${filePath}`); + } + + return JSON.parse(result.stdout); +} + +function ensureArray(value) { + return Array.isArray(value) ? value : []; +} + +function writeJson(filePath, payload) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, JSON.stringify(payload, null, 2) + '\n'); +} + +function main() { + const registry = readYaml(registryPath); + const domains = ensureArray(registry.failure_domains); + const ids = new Set(domains.map((domain) => domain.id).filter(Boolean)); + const failures = []; + const warnings = []; + + for (const domain of domains) { + const id = domain?.id ?? null; + const calls = ensureArray(domain?.policy?.cross_domain_calls); + const parentId = domain?.parent_id ?? null; + + if (parentId && !ids.has(parentId)) { + failures.push({ + code: 'missing_parent_domain', + id, + parent_id: parentId, + message: `parent_id ${parentId} does not exist in the registry` + }); + } + + for (const target of calls) { + if (!ids.has(target)) { + failures.push({ + code: 'unknown_cross_domain_target', + id, + target, + message: `cross-domain call target ${target} does not exist in the registry` + }); + continue; + } + if (target === id) { + warnings.push({ + code: 'self_referential_cross_domain_call', + id, + target, + message: 'cross_domain_calls should not point to the same domain id' + }); + } + } + + if (domain?.scope?.environment === 'prod' && domain?.isolation?.network !== 'strict') { + warnings.push({ + code: 'prod_network_not_strict', + id, + message: 'production domains should normally use strict network isolation' + }); + } + + if ( + domain?.scope?.environment === 'prod' && + ensureArray(domain?.policy?.cross_domain_calls).length > 0 && + (domain?.policy?.max_blast_radius?.domains ?? 0) <= 1 + ) { + warnings.push({ + code: 'prod_cross_domain_with_tight_blast_radius', + id, + message: 'production domain allows cross-domain calls while keeping a single-domain blast radius; verify that this is intentional' + }); + } + } + + const report = { + generated_at: new Date().toISOString(), + registry_path: path.relative(process.cwd(), registryPath), + domain_count: domains.length, + pass: failures.length === 0, + failures, + warnings + }; + + writeJson(path.join(outDir, 'report.json'), report); + writeJson(path.join(outDir, 'metrics.json'), { + failures_count: failures.length, + warnings_count: warnings.length, + domain_count: domains.length + }); + writeJson(path.join(outDir, 'stamp.json'), { + run_id: `failure-isolation-${Date.now()}`, + generated_at: report.generated_at + }); + + console.log(`Failure isolation artifacts: ${path.relative(process.cwd(), outDir)}`); + console.log(`Domains checked: ${domains.length}`); + console.log(`Failures: ${failures.length}`); + console.log(`Warnings: ${warnings.length}`); + + if (process.env.GITHUB_STEP_SUMMARY) { + fs.appendFileSync( + process.env.GITHUB_STEP_SUMMARY, + [ + '## Failure Isolation', + '', + `- Domains checked: \`${domains.length}\``, + `- Failures: \`${failures.length}\``, + `- Warnings: \`${warnings.length}\``, + `- Artifacts: \`${path.relative(process.cwd(), outDir)}\`` + ].join('\n') + '\n' + ); + } + + process.exit(failures.length === 0 ? 0 : 1); +} + +main(); diff --git a/scripts/ci/generate-admissibility-bundle.ts b/scripts/ci/generate-admissibility-bundle.ts new file mode 100644 index 00000000000..8d70fb0d34d --- /dev/null +++ b/scripts/ci/generate-admissibility-bundle.ts @@ -0,0 +1,204 @@ +import { mkdirSync, readFileSync, writeFileSync } from 'node:fs'; +import path from 'node:path'; +import { parseArgs } from 'node:util'; + +import { sha256Buffer, stableStringify } from '../../lib/admissibility.ts'; + +interface InventoryItem { + path: string; + sha256: string; + size: number; +} + +const options = { + 'evidence-dir': { type: 'string' as const, default: 'evidence/admissibility' }, + 'policy-path': { + type: 'string' as const, + default: 'policy/admissibility/prohibited-dependencies.json', + }, + 'subject-name': { type: 'string' as const, default: 'summit-deployable' }, + 'subject-path': { type: 'string' as const }, +}; + +const { values } = parseArgs({ options, allowPositionals: false }); + +if (!values['subject-path']) { + throw new Error('--subject-path is required.'); +} + +const evidenceDir = path.resolve(values['evidence-dir']); +const subjectPath = path.resolve(values['subject-path']); +const subjectName = values['subject-name']; +const policyPath = path.resolve(values['policy-path']); +const sbomPath = path.join(evidenceDir, 'sbom.cyclonedx.json'); +const reportPath = path.join(evidenceDir, 'report.json'); +const metricsPath = path.join(evidenceDir, 'metrics.json'); +const provenancePath = path.join(evidenceDir, 'provenance.intoto.json'); +const stampPath = path.join(evidenceDir, 'stamp.json'); +const attestationPath = path.join(evidenceDir, 'attestation.json'); + +mkdirSync(evidenceDir, { recursive: true }); + +const subjectBuffer = readFileSync(subjectPath); +const sbom = JSON.parse(readFileSync(sbomPath, 'utf8')) as Record; +const policy = JSON.parse(readFileSync(policyPath, 'utf8')) as Record; +const subjectDigest = sha256Buffer(subjectBuffer); +const sbomDigest = sha256Buffer(readFileSync(sbomPath)); +const sbomComponents = Array.isArray(sbom.components) ? sbom.components.length : 0; +const commitSha = process.env.GITHUB_SHA ?? 'local'; +const workflowRef = process.env.GITHUB_WORKFLOW_REF ?? 'local'; +const refName = process.env.GITHUB_REF_NAME ?? 'local'; + +const report = { + bundleSchema: 'summit/admissibility/v1', + policyPath: path.relative(process.cwd(), policyPath), + requiredFiles: [ + 'attestation.json', + 'metrics.json', + 'provenance.intoto.json', + 'report.json', + 'sbom.cyclonedx.json', + ], + sbom: { + componentCount: sbomComponents, + format: typeof sbom.bomFormat === 'string' ? sbom.bomFormat : 'CycloneDX', + sha256: sbomDigest, + }, + subject: { + name: subjectName, + path: path.relative(process.cwd(), subjectPath), + sha256: subjectDigest, + }, +}; + +writeFileSync(reportPath, `${stableStringify(report)}\n`); + +const provenance = { + _type: 'https://in-toto.io/Statement/v1', + predicateType: 'https://slsa.dev/provenance/v1', + subject: [ + { + name: subjectName, + digest: { + sha256: subjectDigest, + }, + }, + ], + predicate: { + buildDefinition: { + buildType: 'https://summit.dev/admissibility/build/v1', + externalParameters: { + workflow: workflowRef, + ref: refName, + }, + internalParameters: { + sbomSha256: sbomDigest, + }, + resolvedDependencies: [ + { + digest: { + sha256: sha256Buffer(readFileSync(policyPath)), + }, + name: path.relative(process.cwd(), policyPath), + }, + ], + }, + builder: { + id: process.env.GITHUB_ACTIONS ? 'https://github.com/actions/runner' : 'local-builder', + }, + runDetails: { + builder: { + id: process.env.GITHUB_ACTIONS ? 'https://github.com/actions/runner' : 'local-builder', + }, + metadata: { + invocationId: commitSha, + }, + }, + }, +}; + +writeFileSync(provenancePath, `${stableStringify(provenance)}\n`); + +const provisionalInventory = [ + ['provenance.intoto.json', readFileSync(provenancePath, 'utf8')], + ['report.json', readFileSync(reportPath, 'utf8')], + ['sbom.cyclonedx.json', readFileSync(sbomPath, 'utf8')], +].map(([fileName, content]) => { + const buffer = Buffer.isBuffer(content) ? content : Buffer.from(content); + return { + path: fileName, + sha256: sha256Buffer(buffer), + size: buffer.byteLength, + }; +}); + +const metrics = { + counts: { + bundleFileCount: provisionalInventory.length + 2, + prohibitedDependencyCount: 0, + sbomComponentCount: sbomComponents, + }, + digests: { + attestation: 'pending-signature', + provenance: + provisionalInventory.find(item => item.path === 'provenance.intoto.json')?.sha256 ?? '', + report: provisionalInventory.find(item => item.path === 'report.json')?.sha256 ?? '', + sbom: sbomDigest, + subject: subjectDigest, + }, + policy: { + prohibitedDependencies: Array.isArray(policy.prohibitedDependencies) + ? [...policy.prohibitedDependencies].sort((left, right) => + String(left).localeCompare(String(right)), + ) + : [], + }, +}; + +writeFileSync(metricsPath, `${stableStringify(metrics)}\n`); + +const metricsBuffer = readFileSync(metricsPath); +const inventory: InventoryItem[] = [ + { + path: 'metrics.json', + sha256: sha256Buffer(metricsBuffer), + size: metricsBuffer.byteLength, + }, + ...provisionalInventory, +].sort((left, right) => left.path.localeCompare(right.path)); + +const attestation = { + _type: 'https://in-toto.io/Statement/v1', + predicateType: 'https://summit.dev/admissibility/evidence/v1', + subject: [ + { + name: subjectName, + digest: { + sha256: subjectDigest, + }, + }, + ], + predicate: { + inventory, + policy: { + prohibitedDependencies: Array.isArray(policy.prohibitedDependencies) + ? [...policy.prohibitedDependencies].sort((left, right) => + String(left).localeCompare(String(right)), + ) + : [], + }, + reportSha256: sha256Buffer(readFileSync(reportPath)), + sbomSha256: sbomDigest, + }, +}; + +writeFileSync(attestationPath, `${stableStringify(attestation)}\n`); + +const stamp = { + generatedAt: new Date().toISOString(), + githubRunId: process.env.GITHUB_RUN_ID ?? 'local', + runner: process.env.RUNNER_NAME ?? 'local', + workflowRef, +}; + +writeFileSync(stampPath, `${JSON.stringify(stamp, null, 2)}\n`); diff --git a/scripts/ci/verify-admissibility.ts b/scripts/ci/verify-admissibility.ts new file mode 100644 index 00000000000..7c7c61d4e48 --- /dev/null +++ b/scripts/ci/verify-admissibility.ts @@ -0,0 +1,17 @@ +import { readFileSync } from "node:fs"; +import { resolve } from "node:path"; +import { evaluateAdmissibility, type AdmissibilityEvidenceBundle } from "../../lib/admissibility"; + +const args = process.argv.slice(2); +const inputFlagIndex = args.indexOf("--input"); +const inputPath = inputFlagIndex >= 0 ? args[inputFlagIndex + 1] : "evidence/report.json"; + +const reportPath = resolve(process.cwd(), inputPath); +const parsed = JSON.parse(readFileSync(reportPath, "utf8")) as AdmissibilityEvidenceBundle; +const verdict = evaluateAdmissibility(parsed); + +process.stdout.write(`${JSON.stringify(verdict, null, 2)}\n`); + +if (verdict.status !== "PASS") { + process.exit(1); +} diff --git a/scripts/ci/verify-sbom-signature.sh b/scripts/ci/verify-sbom-signature.sh index e805f2535a5..1b36775c30b 100644 --- a/scripts/ci/verify-sbom-signature.sh +++ b/scripts/ci/verify-sbom-signature.sh @@ -11,6 +11,11 @@ fi EXPECTED_OIDC_ISSUER=${EXPECTED_OIDC_ISSUER:-"https://token.actions.githubusercontent.com"} EXPECTED_IDENTITY_REGEX=${EXPECTED_IDENTITY_REGEX:-"^https://github.com/.+/.github/workflows/.+@.*$"} +COSIGN_PUBLIC_KEY=${COSIGN_PUBLIC_KEY:-${COSIGN_PUB:-}} +COSIGN_TRUST_ROOT=${COSIGN_TRUST_ROOT:-} +COSIGN_SIGNING_CONFIG=${COSIGN_SIGNING_CONFIG:-} +COSIGN_REKOR_URL=${COSIGN_REKOR_URL:-} +COSIGN_BUNDLE_PATH=${COSIGN_BUNDLE_PATH:-} TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ") SAFE_TIMESTAMP=$(echo "$TIMESTAMP" | tr ':' '-') @@ -26,12 +31,24 @@ if ! command -v cosign >/dev/null 2>&1; then FAILURES+=("cosign_missing") else VERIFY_ARGS=() - if [ -n "${COSIGN_PUBLIC_KEY:-}" ]; then + if [ -n "${COSIGN_PUBLIC_KEY}" ]; then VERIFY_ARGS+=(--key "$COSIGN_PUBLIC_KEY") else VERIFY_ARGS+=(--certificate-identity-regexp "$EXPECTED_IDENTITY_REGEX") VERIFY_ARGS+=(--certificate-oidc-issuer "$EXPECTED_OIDC_ISSUER") fi + if [ -n "${COSIGN_TRUST_ROOT}" ]; then + VERIFY_ARGS+=(--trusted-root "$COSIGN_TRUST_ROOT") + fi + if [ -n "${COSIGN_SIGNING_CONFIG}" ]; then + VERIFY_ARGS+=(--signing-config "$COSIGN_SIGNING_CONFIG") + fi + if [ -n "${COSIGN_REKOR_URL}" ]; then + VERIFY_ARGS+=(--rekor-url "$COSIGN_REKOR_URL") + fi + if [ -n "${COSIGN_BUNDLE_PATH}" ]; then + VERIFY_ARGS+=(--bundle "$COSIGN_BUNDLE_PATH") + fi signature_log="$RECEIPT_DIR/supply-chain-signature-$SAFE_TIMESTAMP.log" sbom_cdx_log="$RECEIPT_DIR/supply-chain-sbom-cyclonedx-$SAFE_TIMESTAMP.log" diff --git a/scripts/ci/verify_branch_protection.mjs b/scripts/ci/verify_branch_protection.mjs index a0e4428eb1a..ec58529dfdf 100644 --- a/scripts/ci/verify_branch_protection.mjs +++ b/scripts/ci/verify_branch_protection.mjs @@ -1,72 +1,219 @@ #!/usr/bin/env node -import assert from "node:assert"; -import process from "node:process"; +import fs from 'node:fs'; +import path from 'node:path'; +import process from 'node:process'; const { GITHUB_TOKEN, GITHUB_REPOSITORY, - GITHUB_API_URL = "https://api.github.com", + GITHUB_API_URL = 'https://api.github.com', + BPAC_POLICY_PATH, + BPAC_LIVE_SNAPSHOT, + BPAC_EVIDENCE_DIR, } = process.env; -assert(GITHUB_TOKEN, "GITHUB_TOKEN missing"); -assert(GITHUB_REPOSITORY, "GITHUB_REPOSITORY missing"); +function readJson(filePath) { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); +} + +function normalizeLiveProtection(live) { + return { + enforce_admins: + typeof live.enforce_admins === 'boolean' + ? live.enforce_admins + : Boolean(live.enforce_admins?.enabled), + required_linear_history: + typeof live.required_linear_history === 'boolean' + ? live.required_linear_history + : Boolean(live.required_linear_history?.enabled), + required_status_checks: { + strict: Boolean(live.required_status_checks?.strict), + contexts: [...(live.required_status_checks?.contexts || [])].sort(), + }, + required_pull_request_reviews: { + dismiss_stale_reviews: Boolean( + live.required_pull_request_reviews?.dismiss_stale_reviews, + ), + require_code_owner_reviews: Boolean( + live.required_pull_request_reviews?.require_code_owner_reviews, + ), + required_approving_review_count: Number( + live.required_pull_request_reviews?.required_approving_review_count || 0, + ), + }, + }; +} -const [owner, repo] = GITHUB_REPOSITORY.split("/"); +function compareAgainstPolicy(policy, live) { + const violations = []; + + if ( + policy.enforce_admins !== undefined && + Boolean(policy.enforce_admins) !== live.enforce_admins + ) { + violations.push( + `enforce_admins mismatch: expected ${Boolean(policy.enforce_admins)}, got ${live.enforce_admins}`, + ); + } + + if ( + policy.required_linear_history !== undefined && + Boolean(policy.required_linear_history) !== live.required_linear_history + ) { + violations.push( + `required_linear_history mismatch: expected ${Boolean(policy.required_linear_history)}, got ${live.required_linear_history}`, + ); + } + + if (policy.required_pull_request_reviews) { + const reviews = policy.required_pull_request_reviews; + if ( + reviews.dismiss_stale_reviews !== undefined && + Boolean(reviews.dismiss_stale_reviews) !== + live.required_pull_request_reviews.dismiss_stale_reviews + ) { + violations.push( + `dismiss_stale_reviews mismatch: expected ${Boolean(reviews.dismiss_stale_reviews)}, got ${live.required_pull_request_reviews.dismiss_stale_reviews}`, + ); + } + if ( + reviews.require_code_owner_reviews !== undefined && + Boolean(reviews.require_code_owner_reviews) !== + live.required_pull_request_reviews.require_code_owner_reviews + ) { + violations.push( + `require_code_owner_reviews mismatch: expected ${Boolean(reviews.require_code_owner_reviews)}, got ${live.required_pull_request_reviews.require_code_owner_reviews}`, + ); + } + if ( + reviews.required_approving_review_count !== undefined && + Number(reviews.required_approving_review_count) !== + live.required_pull_request_reviews.required_approving_review_count + ) { + violations.push( + `required_approving_review_count mismatch: expected ${Number(reviews.required_approving_review_count)}, got ${live.required_pull_request_reviews.required_approving_review_count}`, + ); + } + } + + if (policy.required_status_checks) { + const policyChecks = policy.required_status_checks; + if ( + policyChecks.strict !== undefined && + Boolean(policyChecks.strict) !== live.required_status_checks.strict + ) { + violations.push( + `required_status_checks.strict mismatch: expected ${Boolean(policyChecks.strict)}, got ${live.required_status_checks.strict}`, + ); + } + const expectedContexts = [...(policyChecks.contexts || [])].sort(); + const actualContexts = live.required_status_checks.contexts; + if (JSON.stringify(expectedContexts) !== JSON.stringify(actualContexts)) { + violations.push( + `required_status_checks.contexts mismatch: expected ${JSON.stringify(expectedContexts)}, got ${JSON.stringify(actualContexts)}`, + ); + } + } + + return violations; +} + +function writeEvidence(dir, report) { + if (!dir) return; + fs.mkdirSync(dir, { recursive: true }); + fs.writeFileSync(path.join(dir, 'report.json'), `${JSON.stringify(report, null, 2)}\n`); +} -async function gh(path) { - const res = await fetch(`${GITHUB_API_URL}${path}`, { - headers: { - Authorization: `Bearer ${GITHUB_TOKEN}`, - Accept: "application/vnd.github+json", +async function fetchBranchProtection(branch) { + if (!GITHUB_TOKEN) throw new Error('GITHUB_TOKEN missing'); + if (!GITHUB_REPOSITORY) throw new Error('GITHUB_REPOSITORY missing'); + const [owner, repo] = GITHUB_REPOSITORY.split('/'); + const res = await fetch( + `${GITHUB_API_URL}/repos/${owner}/${repo}/branches/${branch}/protection`, + { + headers: { + Authorization: `Bearer ${GITHUB_TOKEN}`, + Accept: 'application/vnd.github+json', + }, }, - }); + ); if (!res.ok) { - throw new Error(`GitHub API ${path} failed: ${res.status}`); + throw new Error(`GitHub API /branches/${branch}/protection failed: ${res.status}`); } return res.json(); } -const branches = ["main"]; +async function verifyLiveBranchProtection() { + const branches = ['main']; + const violations = []; -let violations = []; - -for (const branch of branches) { - const bp = await gh( - `/repos/${owner}/${repo}/branches/${branch}/protection` - ); + for (const branch of branches) { + const bp = await fetchBranchProtection(branch); - if (!bp.enforce_admins?.enabled) { - violations.push(`${branch}: enforce_admins disabled`); + if (!bp.enforce_admins?.enabled) { + violations.push(`${branch}: enforce_admins disabled`); + } + if (!bp.required_linear_history?.enabled) { + violations.push(`${branch}: linear history not enforced`); + } + if ( + !bp.required_pull_request_reviews?.required_approving_review_count || + bp.required_pull_request_reviews.required_approving_review_count < 2 + ) { + violations.push(`${branch}: <2 required reviewers`); + } + if (!bp.required_status_checks?.strict) { + violations.push(`${branch}: strict status checks disabled`); + } } - if (!bp.required_linear_history?.enabled) { - violations.push(`${branch}: linear history not enforced`); + if (violations.length) { + throw new Error( + `Branch protection violations:\n - ${violations.join('\n - ')}`, + ); } +} - if (!bp.required_pull_request_reviews?.required_approving_review_count || - bp.required_pull_request_reviews.required_approving_review_count < 2) { - violations.push(`${branch}: <2 required reviewers`); +async function verifyFixtureBranchProtection() { + if (!BPAC_POLICY_PATH) { + throw new Error('BPAC_POLICY_PATH missing'); } - - if (!bp.required_status_checks?.strict) { - violations.push(`${branch}: strict status checks disabled`); + if (!BPAC_LIVE_SNAPSHOT) { + throw new Error('BPAC_LIVE_SNAPSHOT missing'); } -} -if (violations.length) { - console.error("❌ Branch protection violations:"); - violations.forEach(v => console.error(` - ${v}`)); - process.exit(1); + const policy = readJson(BPAC_POLICY_PATH); + const live = normalizeLiveProtection(readJson(BPAC_LIVE_SNAPSHOT)); + const violations = compareAgainstPolicy(policy, live); + + writeEvidence(BPAC_EVIDENCE_DIR, { + policy_path: BPAC_POLICY_PATH, + live_snapshot_path: BPAC_LIVE_SNAPSHOT, + status: violations.length === 0 ? 'PASS' : 'FAIL', + violations, + }); + + if (violations.length) { + throw new Error( + `Branch protection policy drift detected:\n - ${violations.join('\n - ')}`, + ); + } } -console.log("✅ Branch protection verified"); +async function main() { + if (BPAC_POLICY_PATH || BPAC_LIVE_SNAPSHOT) { + await verifyFixtureBranchProtection(); + console.log('✅ Branch protection fixture verified'); + } else { + await verifyLiveBranchProtection(); + console.log('✅ Branch protection verified'); + } -// Enforce REQUIRED_CHECKS_CONTRACT.yml consistency -try { const { run } = await import('./verify_required_checks_contract.mjs'); await run(); -} catch (err) { - console.error("❌ Failed to verify required checks contract:"); - console.error(err); - process.exit(1); } + +main().catch((err) => { + console.error(`❌ ${err.message}`); + process.exit(1); +}); diff --git a/scripts/ci/verify_failure_semantics.mjs b/scripts/ci/verify_failure_semantics.mjs new file mode 100644 index 00000000000..2a969e81c33 --- /dev/null +++ b/scripts/ci/verify_failure_semantics.mjs @@ -0,0 +1,147 @@ +#!/usr/bin/env node + +import crypto from 'node:crypto'; +import fs from 'node:fs'; +import path from 'node:path'; +import { spawnSync } from 'node:child_process'; + +const policyPath = path.resolve('policies/ops/failure-policy.yaml'); +const schemaPath = path.resolve('schemas/governance/failure-policy.schema.json'); +const outDir = path.resolve('artifacts/failure-semantics'); + +function readJson(filePath) { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); +} + +function readYaml(filePath) { + const result = spawnSync( + 'ruby', + [ + '-e', + 'require "yaml"; require "json"; puts JSON.generate(YAML.load_file(ARGV[0]))', + filePath + ], + { encoding: 'utf8' } + ); + + if (result.status !== 0) { + throw new Error(result.stderr || `Failed to parse YAML file: ${filePath}`); + } + + return JSON.parse(result.stdout); +} + +function stableStringify(value) { + if (Array.isArray(value)) { + return `[${value.map(stableStringify).join(',')}]`; + } + if (value && typeof value === 'object') { + return `{${Object.keys(value) + .sort() + .map((key) => `${JSON.stringify(key)}:${stableStringify(value[key])}`) + .join(',')}}`; + } + return JSON.stringify(value); +} + +function validateFailurePolicy(policy) { + const errors = []; + + if (!policy || typeof policy !== 'object') { + return [{ message: 'policy must be an object' }]; + } + if (!Number.isInteger(policy.version) || policy.version < 1) { + errors.push({ message: 'version must be an integer >= 1' }); + } + if (!policy.retry || typeof policy.retry !== 'object') { + errors.push({ message: 'retry must be an object' }); + } else { + if (!Number.isInteger(policy.retry.attempts) || policy.retry.attempts < 0) { + errors.push({ message: 'retry.attempts must be an integer >= 0' }); + } + if (typeof policy.retry.backoff !== 'string' || policy.retry.backoff.length === 0) { + errors.push({ message: 'retry.backoff must be a non-empty string' }); + } + } + if (!policy.timeout || typeof policy.timeout !== 'object') { + errors.push({ message: 'timeout must be an object' }); + } else if (!Number.isInteger(policy.timeout.default_ms) || policy.timeout.default_ms < 1) { + errors.push({ message: 'timeout.default_ms must be an integer >= 1' }); + } + if (!policy.circuit_breaker || typeof policy.circuit_breaker !== 'object') { + errors.push({ message: 'circuit_breaker must be an object' }); + } else { + if (typeof policy.circuit_breaker.threshold !== 'number' || + policy.circuit_breaker.threshold < 0 || + policy.circuit_breaker.threshold > 1) { + errors.push({ message: 'circuit_breaker.threshold must be a number between 0 and 1' }); + } + if (!Number.isInteger(policy.circuit_breaker.cooldown_ms) || + policy.circuit_breaker.cooldown_ms < 1) { + errors.push({ message: 'circuit_breaker.cooldown_ms must be an integer >= 1' }); + } + } + + return errors; +} + +function writeJson(filePath, payload) { + fs.writeFileSync(filePath, JSON.stringify(payload, null, 2) + '\n'); +} + +function main() { + readJson(schemaPath); + const policy = readYaml(policyPath); + const errors = validateFailurePolicy(policy); + const canonical = stableStringify(policy); + const policyHash = crypto.createHash('sha256').update(canonical).digest('hex'); + const startedAt = new Date().toISOString(); + + fs.mkdirSync(outDir, { recursive: true }); + + const report = { + gate: 'failure-semantics', + policy_path: path.relative(process.cwd(), policyPath), + schema_path: path.relative(process.cwd(), schemaPath), + ok: errors.length === 0, + errors, + failure_semantics_hash: policyHash + }; + + const metrics = { + validation_errors: errors.length, + failure_semantics_hash: policyHash + }; + + const stamp = { + run_id: `failure-semantics-${Date.now()}`, + started_at: startedAt, + finished_at: new Date().toISOString(), + policy_path: path.relative(process.cwd(), policyPath) + }; + + writeJson(path.join(outDir, 'report.json'), report); + writeJson(path.join(outDir, 'metrics.json'), metrics); + writeJson(path.join(outDir, 'stamp.json'), stamp); + fs.writeFileSync(path.join(outDir, 'failure-semantics-hash.txt'), `${policyHash}\n`); + + console.log(`Failure semantics hash: ${policyHash}`); + console.log(`Artifacts: ${path.relative(process.cwd(), outDir)}`); + + if (process.env.GITHUB_STEP_SUMMARY) { + fs.appendFileSync( + process.env.GITHUB_STEP_SUMMARY, + [ + '## Failure Semantics', + '', + `- Hash: \`${policyHash}\``, + `- Errors: \`${errors.length}\``, + `- Artifacts: \`${path.relative(process.cwd(), outDir)}\`` + ].join('\n') + '\n' + ); + } + + process.exit(errors.length === 0 ? 0 : 1); +} + +main(); diff --git a/scripts/ci/verify_required_checks_contract.mjs b/scripts/ci/verify_required_checks_contract.mjs index a48529dacbc..420c1326d76 100644 --- a/scripts/ci/verify_required_checks_contract.mjs +++ b/scripts/ci/verify_required_checks_contract.mjs @@ -2,7 +2,7 @@ import fs from "node:fs"; import process from "node:process"; import assert from "node:assert"; -import yaml from "js-yaml"; +import { spawnSync } from "node:child_process"; const { GITHUB_TOKEN, @@ -17,6 +17,28 @@ if (!GITHUB_REPOSITORY) { console.log("⚠️ GITHUB_REPOSITORY missing, skipping live branch protection verification."); } +function loadYaml(file) { + const input = fs.readFileSync(file, "utf8"); + const result = spawnSync( + "python3", + [ + "-c", + "import json, sys, yaml; json.dump(yaml.load(sys.stdin.read(), Loader=yaml.BaseLoader), sys.stdout)", + ], + { + input, + encoding: "utf8", + }, + ); + + if (result.status !== 0) { + const detail = (result.stderr || result.stdout || "").trim(); + throw new Error(`Failed to parse YAML ${file}: ${detail || "unknown error"}`); + } + + return JSON.parse(result.stdout || "null"); +} + async function fetchBranchProtectionChecks() { if (!GITHUB_TOKEN || !GITHUB_REPOSITORY) return []; const [owner, repo] = GITHUB_REPOSITORY.split("/"); @@ -43,9 +65,12 @@ async function fetchBranchProtectionChecks() { } export async function run() { - const contractContent = fs.readFileSync("docs/governance/REQUIRED_CHECKS_CONTRACT.yml", "utf8"); - const contractData = yaml.load(contractContent); + const contractData = loadYaml("docs/governance/REQUIRED_CHECKS_CONTRACT.yml"); const contractChecks = contractData.required_checks || []; + const gaRequiredChecksData = loadYaml("governance/ga/required-checks.yaml"); + const branchProtectionPolicy = JSON.parse( + fs.readFileSync("governance/branch-protection.required.json", "utf8"), + ); let hasErrors = false; @@ -76,8 +101,7 @@ export async function run() { continue; } - const wfContent = fs.readFileSync(wf.file, "utf8"); - const wfData = yaml.load(wfContent); + const wfData = loadYaml(wf.file); if (wf.triggers && wf.triggers.includes("pull_request")) { const hasPullRequest = wfData.on && ( @@ -155,26 +179,33 @@ export async function run() { } } else { // Fallback to local file if API cannot be reached - const requiredChecksContent = fs.readFileSync(".github/required-checks.yml", "utf8"); - const localData = yaml.load(requiredChecksContent); + const localData = loadYaml(".github/required-checks.yml"); const branchRequiredChecks = localData.required_checks || []; for (const requiredCheck of branchRequiredChecks) { if (!uniqueContexts.has(requiredCheck)) { - // Find if any contract context implicitly satisfies this (if required checks is an alias or old name) - let found = false; - for (const context of uniqueContexts) { - if (context.includes(requiredCheck) || requiredCheck.includes(context)) { - found = true; - break; - } - } - - if (!found) { - console.error(`❌ required-checks.yml requires '${requiredCheck}' which is NOT explicitly mapped or listed in REQUIRED_CHECKS_CONTRACT.yml`); - hasErrors = true; - } + console.error(`❌ required-checks.yml requires '${requiredCheck}' which is NOT explicitly mapped or listed in REQUIRED_CHECKS_CONTRACT.yml`); + hasErrors = true; } } + + const gaRequiredChecks = gaRequiredChecksData.required_checks || []; + if (JSON.stringify(branchRequiredChecks) !== JSON.stringify(gaRequiredChecks)) { + console.error("❌ governance/ga/required-checks.yaml is not aligned with .github/required-checks.yml"); + console.error(`.github/required-checks.yml: ${JSON.stringify(branchRequiredChecks)}`); + console.error(`governance/ga/required-checks.yaml: ${JSON.stringify(gaRequiredChecks)}`); + hasErrors = true; + } + + const branchProtectionContexts = + branchProtectionPolicy.required_status_checks?.contexts || []; + if (JSON.stringify(branchRequiredChecks) !== JSON.stringify(branchProtectionContexts)) { + console.error("❌ governance/branch-protection.required.json is not aligned with .github/required-checks.yml"); + console.error(`.github/required-checks.yml: ${JSON.stringify(branchRequiredChecks)}`); + console.error( + `governance/branch-protection.required.json: ${JSON.stringify(branchProtectionContexts)}`, + ); + hasErrors = true; + } } if (hasErrors) { diff --git a/scripts/ci/verify_security_gate.mjs b/scripts/ci/verify_security_gate.mjs new file mode 100644 index 00000000000..2615fceee29 --- /dev/null +++ b/scripts/ci/verify_security_gate.mjs @@ -0,0 +1,318 @@ +import { createHash } from 'node:crypto'; +import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'; +import path from 'node:path'; +import { spawnSync } from 'node:child_process'; + +const REPO_ROOT = process.cwd(); +const DEFAULT_WORKFLOW_PATH = '.github/workflows/security-gates.yml'; +const DEFAULT_POLICY_PATH = 'policies/security/security_gates.yml'; +const DEFAULT_OUT_DIR = 'artifacts/security-gate'; +const REQUIRED_TRIGGERS = ['merge_group', 'pull_request', 'workflow_dispatch']; +const REQUIRED_PERMISSIONS = { + contents: 'read', + 'id-token': 'write', + 'security-events': 'write' +}; +const REQUIRED_POLICY_GATES = ['sast', 'sca', 'secret-scanning']; +const REQUIRED_EXTERNAL_ACTIONS = [ + 'actions/checkout', + 'actions/setup-node', + 'actions/upload-artifact', + 'aquasecurity/trivy-action', + 'gitleaks/gitleaks-action', + 'pnpm/action-setup', + 'returntocorp/semgrep-action' +]; + +function sha256(value) { + return createHash('sha256').update(value).digest('hex'); +} + +function normalize(value) { + if (Array.isArray(value)) { + return value.map(normalize); + } + if (value && typeof value === 'object') { + return Object.keys(value) + .sort() + .reduce((acc, key) => { + acc[key] = normalize(value[key]); + return acc; + }, {}); + } + return value; +} + +function parseYaml(filePath) { + const loader = [ + 'import json', + 'import sys', + 'import yaml', + 'from yaml import BaseLoader', + 'with open(sys.argv[1], "r", encoding="utf-8") as fh:', + ' data = yaml.load(fh, Loader=BaseLoader)', + 'json.dump(data, sys.stdout)' + ].join('\n'); + const result = spawnSync('python3', ['-c', loader, filePath], { + encoding: 'utf8' + }); + if (result.status !== 0) { + throw new Error(result.stderr.trim() || `Failed to parse YAML: ${filePath}`); + } + return JSON.parse(result.stdout); +} + +function isPinnedRef(ref) { + return /^[a-f0-9]{40}$/i.test(ref); +} + +function collectUses(workflow) { + const jobs = workflow.jobs ?? {}; + return Object.entries(jobs).flatMap(([jobId, job]) => + (job.steps ?? []).flatMap((step, index) => { + if (!step.uses || typeof step.uses !== 'string') { + return []; + } + const [action, ref = ''] = step.uses.split('@'); + return [ + { + action, + ref, + job_id: jobId, + step_index: index, + local: action.startsWith('./') + } + ]; + }) + ); +} + +function failure(check, detail) { + return { check, detail }; +} + +function main() { + const workflowPath = path.resolve( + REPO_ROOT, + process.env.SECURITY_GATE_WORKFLOW_PATH || DEFAULT_WORKFLOW_PATH + ); + const policyPath = path.resolve( + REPO_ROOT, + process.env.SECURITY_GATE_POLICY_PATH || DEFAULT_POLICY_PATH + ); + const outDir = path.resolve( + REPO_ROOT, + process.env.SECURITY_GATE_EVIDENCE_DIR || DEFAULT_OUT_DIR + ); + + if (!existsSync(workflowPath)) { + throw new Error(`Workflow not found: ${path.relative(REPO_ROOT, workflowPath)}`); + } + if (!existsSync(policyPath)) { + throw new Error(`Policy not found: ${path.relative(REPO_ROOT, policyPath)}`); + } + + const workflowSource = readFileSync(workflowPath, 'utf8'); + const policySource = readFileSync(policyPath, 'utf8'); + const workflow = parseYaml(workflowPath); + const policy = parseYaml(policyPath); + + const failures = []; + + if (workflow.name !== 'security-gates') { + failures.push( + failure('workflow_name', 'Workflow name must remain exactly "security-gates".') + ); + } + + const workflowOn = workflow.on ?? {}; + const triggerKeys = Object.keys(workflowOn).sort(); + for (const trigger of REQUIRED_TRIGGERS) { + if (!triggerKeys.includes(trigger)) { + failures.push( + failure('required_trigger', `Missing required trigger "${trigger}".`) + ); + } + } + + if (!workflow.concurrency) { + failures.push( + failure('concurrency', 'Workflow must declare concurrency for merge-queue safety.') + ); + } + + const permissions = workflow.permissions ?? {}; + for (const [key, expected] of Object.entries(REQUIRED_PERMISSIONS)) { + if ((permissions[key] ?? '') !== expected) { + failures.push( + failure( + 'permissions', + `Permission "${key}" must be "${expected}" (found "${permissions[key] ?? 'missing'}").` + ) + ); + } + } + if ('FORCE_JAVASCRIPT_ACTIONS_TO_NODE24' in permissions) { + failures.push( + failure( + 'permissions', + 'FORCE_JAVASCRIPT_ACTIONS_TO_NODE24 must be declared under env, not permissions.' + ) + ); + } + + const policyGateIds = [...new Set((policy.gates ?? []).map((gate) => gate.id))].sort(); + if (JSON.stringify(policyGateIds) !== JSON.stringify(REQUIRED_POLICY_GATES)) { + failures.push( + failure( + 'policy_gates', + `Policy gate ids must equal ${REQUIRED_POLICY_GATES.join(', ')} (found ${policyGateIds.join(', ') || 'none'}).` + ) + ); + } + + for (const gate of policy.gates ?? []) { + if (gate.type !== 'required') { + failures.push( + failure( + 'policy_gate_type', + `Policy gate "${gate.id}" must be typed as "required".` + ) + ); + } + if (!gate.tool) { + failures.push( + failure('policy_gate_tool', `Policy gate "${gate.id}" must declare a tool.`) + ); + } + } + + const usesEntries = collectUses(workflow); + const externalActions = usesEntries + .filter((entry) => !entry.local) + .map((entry) => ({ + action: entry.action, + ref: entry.ref, + pinned: isPinnedRef(entry.ref), + job_id: entry.job_id, + step_index: entry.step_index + })) + .sort((a, b) => a.action.localeCompare(b.action) || a.ref.localeCompare(b.ref)); + + for (const requiredAction of REQUIRED_EXTERNAL_ACTIONS) { + if (!externalActions.some((entry) => entry.action === requiredAction)) { + failures.push( + failure( + 'required_action', + `Workflow must include external action "${requiredAction}".` + ) + ); + } + } + + for (const entry of externalActions) { + if (!entry.pinned) { + failures.push( + failure( + 'unpinned_action', + `External action "${entry.action}@${entry.ref}" must be pinned to a full commit SHA.` + ) + ); + } + } + + const gateJob = workflow.jobs?.gate; + if (!gateJob) { + failures.push(failure('job_missing', 'Workflow must define a "gate" job.')); + } + + const stepsJson = JSON.stringify(gateJob?.steps ?? []); + if (!stepsJson.includes('verify:security-gate')) { + failures.push( + failure( + 'verifier_step', + 'Workflow must execute the deterministic security gate verifier.' + ) + ); + } + if (!stepsJson.includes('@cyclonedx/cyclonedx-npm')) { + failures.push( + failure( + 'sbom_step', + 'Workflow must generate an SBOM via @cyclonedx/cyclonedx-npm.' + ) + ); + } + if (!stepsJson.includes('verify-provenance.mjs')) { + failures.push( + failure( + 'provenance_step', + 'Workflow must include a provenance verification step.' + ) + ); + } + if (!stepsJson.includes('artifacts/security-gate')) { + failures.push( + failure( + 'artifact_upload', + 'Workflow must upload the security gate evidence triad from artifacts/security-gate.' + ) + ); + } + + const report = normalize({ + status: failures.length === 0 ? 'pass' : 'fail', + workflow_path: path.relative(REPO_ROOT, workflowPath), + policy_path: path.relative(REPO_ROOT, policyPath), + workflow_hash: sha256(workflowSource), + policy_hash: sha256(policySource), + required_triggers: REQUIRED_TRIGGERS, + observed_triggers: triggerKeys, + required_permissions: REQUIRED_PERMISSIONS, + observed_permissions: permissions, + required_policy_gates: REQUIRED_POLICY_GATES, + observed_policy_gates: policyGateIds, + external_actions: externalActions, + failures + }); + const metrics = normalize({ + failure_count: failures.length, + trigger_count: triggerKeys.length, + permission_count: Object.keys(permissions).length, + policy_gate_count: policyGateIds.length, + external_action_count: externalActions.length, + pinned_external_action_count: externalActions.filter((entry) => entry.pinned).length + }); + + mkdirSync(outDir, { recursive: true }); + const reportJson = JSON.stringify(report, null, 2); + const metricsJson = JSON.stringify(metrics, null, 2); + writeFileSync(path.join(outDir, 'report.json'), reportJson); + writeFileSync(path.join(outDir, 'metrics.json'), metricsJson); + writeFileSync( + path.join(outDir, 'stamp.json'), + JSON.stringify( + { + status: report.status, + workflow_path: report.workflow_path, + policy_path: report.policy_path, + report_hash: sha256(reportJson), + metrics_hash: sha256(metricsJson), + timestamp: new Date().toISOString() + }, + null, + 2 + ) + ); + + if (failures.length > 0) { + for (const item of failures) { + console.error(`security-gate:${item.check}: ${item.detail}`); + } + process.exit(1); + } + + console.log('✅ security-gate verifier passed.'); +} + +main(); diff --git a/scripts/ci/verify_summit_attestation_bundle.mjs b/scripts/ci/verify_summit_attestation_bundle.mjs new file mode 100644 index 00000000000..30ab42f8864 --- /dev/null +++ b/scripts/ci/verify_summit_attestation_bundle.mjs @@ -0,0 +1,156 @@ +#!/usr/bin/env node +import fs from 'node:fs'; +import path from 'node:path'; + +const REQUIRED_FILES = [ + 'subject.json', + 'verification-summary.json', + 'decision-proof.json', + 'policy.json', +]; + +const DECISION_VERDICTS = new Set([ + 'ADMISSIBLE', + 'NOT_ADMISSIBLE', + 'REVIEW_REQUIRED', +]); + +function fail(message) { + process.stderr.write(`${message}\n`); + process.exit(1); +} + +function readJson(bundleDir, file) { + const absolutePath = path.join(bundleDir, file); + if (!fs.existsSync(absolutePath)) { + return { + error: `missing required file: ${file}`, + }; + } + try { + return { + value: JSON.parse(fs.readFileSync(absolutePath, 'utf8')), + }; + } catch (error) { + return { + error: `invalid JSON in ${file}: ${error.message}`, + }; + } +} + +function main() { + const bundleDir = path.resolve(process.argv[2] ?? 'attestations'); + const failedProperties = []; + + for (const file of REQUIRED_FILES) { + if (!fs.existsSync(path.join(bundleDir, file))) { + failedProperties.push(`bundle.file_present.${file}`); + } + } + + if (failedProperties.length > 0) { + process.stdout.write( + `${JSON.stringify( + { + status: 'FAIL', + bundle_dir: bundleDir, + failed_properties: failedProperties, + }, + null, + 2, + )}\n`, + ); + process.exit(1); + } + + const subjectResult = readJson(bundleDir, 'subject.json'); + const verificationResult = readJson(bundleDir, 'verification-summary.json'); + const decisionResult = readJson(bundleDir, 'decision-proof.json'); + const policyResult = readJson(bundleDir, 'policy.json'); + + const parsingErrors = [ + subjectResult.error, + verificationResult.error, + decisionResult.error, + policyResult.error, + ].filter(Boolean); + + if (parsingErrors.length > 0) { + fail(parsingErrors.join('\n')); + } + + const subject = subjectResult.value; + const verificationSummary = verificationResult.value; + const decisionProof = decisionResult.value; + const policy = policyResult.value; + + if (!subject.subject_digest) { + failedProperties.push('subject.subject_digest_present'); + } + if (!subject.subject_type) { + failedProperties.push('subject.subject_type_present'); + } + if (!subject.bundle_version) { + failedProperties.push('subject.bundle_version_present'); + } + + if (verificationSummary?.subject?.digest !== subject.subject_digest) { + failedProperties.push('verification.subject_digest_matches'); + } + if (!verificationSummary?.verifier?.name) { + failedProperties.push('verification.verifier_name_present'); + } + if (!verificationSummary?.verifier?.version) { + failedProperties.push('verification.verifier_version_present'); + } + if (!Array.isArray(verificationSummary?.results) || verificationSummary.results.length === 0) { + failedProperties.push('verification.results_present'); + } + + if (!policy.policy_digest) { + failedProperties.push('policy.policy_digest_present'); + } + if (verificationSummary?.policy_digest !== policy.policy_digest) { + failedProperties.push('verification.policy_digest_matches'); + } + + if (decisionProof?.subject_digest !== subject.subject_digest) { + failedProperties.push('decision.subject_digest_matches'); + } + if (decisionProof?.verification_ref !== 'verification-summary.json') { + failedProperties.push('decision.verification_ref_canonical'); + } + if (!decisionProof?.lineage_run_id) { + failedProperties.push('decision.lineage_run_id_present'); + } + if (!DECISION_VERDICTS.has(decisionProof?.verdict)) { + failedProperties.push('decision.verdict_valid'); + } + + const failedVerificationResults = Array.isArray(verificationSummary?.results) + ? verificationSummary.results + .filter((result) => result?.status === 'FAIL') + .map((result) => result.property) + : []; + + const status = + failedProperties.length === 0 && failedVerificationResults.length === 0 ? 'PASS' : 'FAIL'; + + const output = { + status, + bundle_dir: bundleDir, + subject_digest: subject.subject_digest ?? null, + verifier: verificationSummary?.verifier ?? null, + policy_digest: policy.policy_digest ?? null, + failed_properties: [...failedProperties, ...failedVerificationResults], + verdict: decisionProof?.verdict ?? null, + }; + + process.stdout.write(`${JSON.stringify(output, null, 2)}\n`); + + if (status !== 'PASS') { + process.exit(1); + } +} + +main(); diff --git a/scripts/configure-branch-protection.sh b/scripts/configure-branch-protection.sh index 05764857880..e2692ae6f94 100644 --- a/scripts/configure-branch-protection.sh +++ b/scripts/configure-branch-protection.sh @@ -48,12 +48,10 @@ PROTECTION_CONFIG=$(cat << 'EOF' "required_status_checks": { "strict": true, "checks": [ - {"context": "CI - Comprehensive Gates / setup"}, - {"context": "CI - Comprehensive Gates / lint-and-typecheck"}, - {"context": "CI - Comprehensive Gates / unit-integration-tests"}, - {"context": "CI - Comprehensive Gates / security-gates"}, - {"context": "CI - Comprehensive Gates / build-and-attestation"}, - {"context": "CI - Comprehensive Gates / merge-readiness"} + {"context": "ci-guard / attestation-bundle-verifier"}, + {"context": "merge-surge / merge-queue"}, + {"context": "merge-surge / pr-fast"}, + {"context": "security-gates / gate"} ] }, "required_pull_request_reviews": { @@ -66,8 +64,8 @@ PROTECTION_CONFIG=$(cat << 'EOF' "teams": [] } }, - "enforce_admins": false, - "required_linear_history": false, + "enforce_admins": true, + "required_linear_history": true, "allow_force_pushes": false, "allow_deletions": false, "block_creations": false, @@ -103,7 +101,7 @@ if [[ "$merge_queue_available" == "true" ]]; then { "merge_method": "merge", "required_status_checks": [ - {"context": "CI - Comprehensive Gates / merge-readiness"} + {"context": "merge-surge / merge-queue"} ] } EOF @@ -168,11 +166,9 @@ cat << EOF ## Required Status Checks: 1. setup (environment preparation) -2. lint-and-typecheck (code quality) -3. unit-integration-tests (test coverage ≥80%) -4. security-gates (SBOM, vulnerability scan, secrets) -5. build-and-attestation (successful compilation) -6. merge-readiness (overall gate evaluation) +2. merge-surge / merge-queue (merge-group protected-branch verification) +3. merge-surge / pr-fast (pull-request protected-branch verification) +4. security-gates / gate (security evidence and pinned-action enforcement) ## Additional Protections: - ✅ Dismiss stale reviews on new pushes @@ -235,4 +231,4 @@ Safe to merge after validation." --label "test,green-train,protection-validation" echo "✅ Test PR created - check GitHub for validation results" -fi \ No newline at end of file +fi diff --git a/scripts/pr-orchestrator-config.js b/scripts/pr-orchestrator-config.js index bc4a3de6808..d7cc736c017 100644 --- a/scripts/pr-orchestrator-config.js +++ b/scripts/pr-orchestrator-config.js @@ -8,12 +8,10 @@ module.exports = { REQUIRED_CHECKS: process.env.REQUIRED_CHECKS ? process.env.REQUIRED_CHECKS.split(',') : [ - 'CI - Comprehensive Gates / setup', - 'CI - Comprehensive Gates / lint-and-typecheck', - 'CI - Comprehensive Gates / unit-integration-tests', - 'CI - Comprehensive Gates / security-gates', - 'CI - Comprehensive Gates / build-and-attestation', - 'CI - Comprehensive Gates / merge-readiness', + 'ci-guard / attestation-bundle-verifier', + 'merge-surge / merge-queue', + 'merge-surge / pr-fast', + 'security-gates / gate', ], INCLUDE_LABELS: process.env.INCLUDE_LABELS ? process.env.INCLUDE_LABELS.split(',') diff --git a/scripts/pr-orchestrator/README.md b/scripts/pr-orchestrator/README.md index b38f948bdef..0893fba748d 100644 --- a/scripts/pr-orchestrator/README.md +++ b/scripts/pr-orchestrator/README.md @@ -25,7 +25,7 @@ REPO=BrianCLong/intelgraph-platform BASE_BRANCH=main # Required checks that must pass for merge -REQUIRED_CHECKS="CI - Comprehensive Gates / setup,CI - Comprehensive Gates / lint-and-typecheck,CI - Comprehensive Gates / unit-integration-tests,CI - Comprehensive Gates / security-gates,CI - Comprehensive Gates / build-and-attestation,CI - Comprehensive Gates / merge-readiness" +REQUIRED_CHECKS="ci-guard / attestation-bundle-verifier,merge-surge / merge-queue,merge-surge / pr-fast,security-gates / gate" # Labels to prioritize or exclude INCLUDE_LABELS="bug,security,hotfix,priority" diff --git a/scripts/queue-performance-monitor.js b/scripts/queue-performance-monitor.js index 53cd825fd0b..316ff9eb4cd 100644 --- a/scripts/queue-performance-monitor.js +++ b/scripts/queue-performance-monitor.js @@ -378,9 +378,9 @@ class QueuePerformanceMonitor { const parallelConfig = { 'jobs-can-run-in-parallel': [ - 'lint-and-typecheck', - 'unit-integration-tests', - 'security-gates', + 'attestation-bundle-verifier', + 'pr-fast', + 'gate', ], 'matrix-strategies': { 'test-matrix': ['unit', 'integration', 'e2e'], diff --git a/tests/ci/admissibility.test.mts b/tests/ci/admissibility.test.mts new file mode 100644 index 00000000000..8105ab47651 --- /dev/null +++ b/tests/ci/admissibility.test.mts @@ -0,0 +1,32 @@ +import test from "node:test"; +import assert from "node:assert/strict"; +import { evaluateAdmissibility } from "../../lib/admissibility"; + +test("evaluateAdmissibility returns PASS for complete valid bundle", () => { + const verdict = evaluateAdmissibility({ + artifact: { digest: "sha256:abc" }, + signature: { valid: true }, + sbom: { present: true, complete: true, components: ["a"] }, + provenance: { present: true, chainIntact: true }, + dependencies: { prohibitedFound: [] }, + }); + + assert.equal(verdict.status, "PASS"); + assert.deepEqual(verdict.reasons, []); +}); + +test("evaluateAdmissibility fails when mandatory checks fail", () => { + const verdict = evaluateAdmissibility({ + artifact: { digest: "sha256:def" }, + signature: { valid: false }, + sbom: { present: true, complete: false, components: [] }, + provenance: { present: true, chainIntact: false }, + dependencies: { prohibitedFound: ["left-pad"] }, + }); + + assert.equal(verdict.status, "FAIL"); + assert.ok(verdict.reasons.includes("INVALID_SIGNATURE")); + assert.ok(verdict.reasons.includes("MISSING_OR_INCOMPLETE_SBOM")); + assert.ok(verdict.reasons.includes("BROKEN_PROVENANCE_CHAIN")); + assert.ok(verdict.reasons.includes("PROHIBITED_DEPENDENCIES:left-pad")); +});