Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/policies/narrative_ci/determinism.rego
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package narrative_ci.determinism

import future.keywords.in

default deny = []

forbidden_keys := {"ts", "timestamp", "created_at", "updated_at"}

deny[msg] {
payload := input.payloads[_]
walk(payload, [path, value])
is_object(value)
some key
key := object.keys(value)[_]
key in forbidden_keys
msg := sprintf("forbidden timestamp field %s at %v", [key, path])
}

test_determinism_pass {
input := data.fixtures.determinism_pass
count(deny with input as input) == 0
}

test_determinism_fail {
input := data.fixtures.determinism_fail
count(deny with input as input) > 0
}
5 changes: 5 additions & 0 deletions .github/policies/narrative_ci/fixtures/determinism_fail.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"payloads": [
{ "run_id": "fixture-run", "created_at": "2026-01-01T00:00:00Z" }
]
}
5 changes: 5 additions & 0 deletions .github/policies/narrative_ci/fixtures/determinism_pass.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"payloads": [
{ "run_id": "fixture-run", "values": [1, 2, 3] }
]
}
6 changes: 6 additions & 0 deletions .github/policies/narrative_ci/fixtures/traceability_fail.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"inferred_nodes": [
{ "id": "claim-1", "type": "Claim" }
],
"provenance_edges": []
}
10 changes: 10 additions & 0 deletions .github/policies/narrative_ci/fixtures/traceability_pass.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"inferred_nodes": [
{ "id": "claim-1", "type": "Claim" },
{ "id": "frame-1", "type": "Frame" }
],
"provenance_edges": [
{ "from_type": "Artifact", "to": "claim-1" },
{ "from_type": "Artifact", "to": "frame-1" }
]
}
27 changes: 27 additions & 0 deletions .github/policies/narrative_ci/traceability.rego
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
package narrative_ci.traceability

import future.keywords.in

default deny = []

deny[msg] {
node := input.inferred_nodes[_]
not has_provenance(node)
msg := sprintf("missing provenance for %s:%s", [node.type, node.id])
}

has_provenance(node) {
edge := input.provenance_edges[_]
edge.to == node.id
edge.from_type == "Artifact"
}

test_traceability_pass {
input := data.fixtures.traceability_pass
count(deny with input as input) == 0
}

test_traceability_fail {
input := data.fixtures.traceability_fail
count(deny with input as input) > 0
}
42 changes: 42 additions & 0 deletions .github/workflows/narrative-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: narrative-ci
on:
workflow_dispatch: {}
pull_request:
paths:
- "intelgraph/pipelines/narrative_ci/**"
- "intelgraph/schema/**"
- "schemas/narrative/**"
- ".github/policies/narrative_ci/**"
schedule:
- cron: "17 3 * * *"

jobs:
verify:
runs-on: ubuntu-latest
permissions:
contents: read
env:
NARRATIVE_CI_ENABLED: "false"
steps:
- uses: actions/checkout@v4
- name: Setup OPA
uses: open-policy-agent/setup-opa@950f159a49aa91f9323f36f1de81c7f6b5de9576
- name: Run fixture pipeline
run: |
npx tsx intelgraph/pipelines/narrative_ci/steps/50_bundle_evidence.ts --fixture
- name: Validate schemas
run: |
npx tsx intelgraph/pipelines/narrative_ci/lib/schema_validate.ts out schemas/narrative
- name: OPA policy tests
run: |
opa test .github/policies/narrative_ci -v
- name: Determinism gate (no timestamps)
run: |
! rg -n '"ts"|"timestamp"|"created_at"|"updated_at"' out || (echo "timestamp-like fields found in deterministic outputs" && exit 1)
- name: Upload evidence
uses: actions/upload-artifact@v4
with:
name: narrative-ci-evidence
path: |
evidence/**
out/**
12 changes: 9 additions & 3 deletions docs/roadmap/STATUS.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"last_updated": "2026-02-07T00:00:00Z",
"revision_note": "Added Summit PR Stack Sequencer skill scaffolding.",
"last_updated": "2026-02-10T12:00:00Z",
"revision_note": "Refined narrative CI lane-1 scaffold with state schema and deterministic output path.",
"initiatives": [
{
"id": "adenhq-hive-subsumption-lane1",
Expand Down Expand Up @@ -193,14 +193,20 @@
"status": "in_progress",
"owner": "codex",
"notes": "Documented CI/CD high-signal deltas with enforced action register and evidence targets."
},
{
"id": "narrative-ci-lane1",
"status": "in_progress",
"owner": "codex",
"notes": "Scaffolded narrative CI lane-1 pipeline, schemas, and policy gates with deterministic evidence bundling."
}
],
"summary": {
"rc_ready": 8,
"partial": 2,
"incomplete": 0,
"not_started": 5,
"total": 17,
"total": 21,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The total in the summary appears to be inconsistent. It was increased from 17 to 21 (a jump of 4), but only one new item was added to the initiatives array. Furthermore, the sum of rc_ready (8), partial (2), incomplete (0), and not_started (5) is 15, which does not match either the old or the new total. Please verify and correct the summary values to ensure data consistency.

"ga_blockers": []
}
}
10 changes: 9 additions & 1 deletion evidence/index.json
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,14 @@
"metrics": "evidence/EVD-INTSUM-2026-THREAT-HORIZON-001/metrics.json",
"stamp": "evidence/EVD-INTSUM-2026-THREAT-HORIZON-001/stamp.json"
}
},
{
"evidence_id": "EVD-NARRATIVE-CI-METRICS-001",
"files": {
"report": "evidence/EVD-NARRATIVE-CI-METRICS-001/report.json",
"metrics": "evidence/EVD-NARRATIVE-CI-METRICS-001/metrics.json",
"stamp": "evidence/EVD-NARRATIVE-CI-METRICS-001/stamp.json"
}
}
]
}
}
34 changes: 34 additions & 0 deletions intelgraph/pipelines/narrative_ci/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Narrative CI Pipeline

This pipeline scaffolds deterministic narrative scoring, state transitions, and evidence
bundling for Summit/IntelGraph. It is intentionally fixture-first and uses stable JSON
serialization so outputs are identical across runs with the same inputs.

## Local run (fixture mode)

```bash
npx tsx intelgraph/pipelines/narrative_ci/steps/50_bundle_evidence.ts --fixture
```

## Validate outputs

```bash
npx tsx intelgraph/pipelines/narrative_ci/lib/schema_validate.ts out schemas/narrative
```

## Evidence outputs

Each run emits:

- `out/metrics/*.json` deterministic payloads
- `out/narratives/fixture/state_transitions.json`
- `evidence/EVD-NARRATIVE-CI-METRICS-001/{report.json,metrics.json,stamp.json}`
- `evidence/index.json` updates tracked by `50_bundle_evidence.ts`

## Tuning thresholds

Edit `config/defaults.yml` only. All emitted evidence includes a config hash.

## Rollback

Set `NARRATIVE_CI_ENABLED=false` to disable the workflow and block graph writes.
26 changes: 26 additions & 0 deletions intelgraph/pipelines/narrative_ci/config/defaults.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
version: 1
thresholds:
seeding_density_high: 0.65
handoff_score_high: 0.7
compression_ratio_high: 0.6
contested_counterclaim_rate: 0.4
dormant_volume_runs: 4
reactivation_similarity: 0.8
register_shift:
dictionary:
- "it is hereby"
- "pursuant to"
- "hereby"
- "therefore"
- "compliance"
tier_jump_scores:
fringe_to_mainstream: 1.0
adjacent: 0.5
same: 0.1
citation_circularity:
denylist:
- "lowcred.example"
- "mirror.example"
allowlist:
- "gov.example"
- "ngo.example"
5 changes: 5 additions & 0 deletions intelgraph/pipelines/narrative_ci/lib/hash.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import { createHash } from 'node:crypto';

export function sha256(value: string | Buffer): string {
return createHash('sha256').update(value).digest('hex');
}
11 changes: 11 additions & 0 deletions intelgraph/pipelines/narrative_ci/lib/ids.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import { createHash } from 'node:crypto';

export const ITEM_SLUG = 'NARRATIVE-CI';

export function buildEvidenceId(area: string, sequence: string): string {
return `EVD-${ITEM_SLUG}-${area}-${sequence}`;
}

export function stableHash(input: string): string {
return createHash('sha256').update(input).digest('hex');
}
Comment on lines +9 to +11
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This stableHash function duplicates the functionality of the sha256 function already defined in intelgraph/pipelines/narrative_ci/lib/hash.ts. To avoid code duplication and improve maintainability, this function should be removed. Please use the sha256 function from hash.ts instead.

28 changes: 28 additions & 0 deletions intelgraph/pipelines/narrative_ci/lib/json_stable.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
type JsonValue =
| string
| number
| boolean
| null
| JsonValue[]
| { [key: string]: JsonValue };

function sortValue(value: JsonValue): JsonValue {
if (Array.isArray(value)) {
return value.map(sortValue);
}

if (value && typeof value === 'object') {
const entries = Object.entries(value).sort(([a], [b]) => a.localeCompare(b));
const sorted: { [key: string]: JsonValue } = {};
for (const [key, entryValue] of entries) {
sorted[key] = sortValue(entryValue as JsonValue);
}
return sorted;
}

return value;
}

export function stableStringify(value: JsonValue): string {
return JSON.stringify(sortValue(value));
}
74 changes: 74 additions & 0 deletions intelgraph/pipelines/narrative_ci/lib/schema_validate.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import { readFile, readdir } from 'node:fs/promises';
import path from 'node:path';
import Ajv from 'ajv';
import addFormats from 'ajv-formats';

const [outDir, schemaDir] = process.argv.slice(2);

if (!outDir || !schemaDir) {
console.error('Usage: schema_validate.ts <outDir> <schemaDir>');
process.exit(1);
}

const ajv = new Ajv({ allErrors: true, strict: false });
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Using strict: false with AJV is not recommended as it can miss potential issues in schemas. Please consider setting strict: true to enable stricter validation and catch a wider range of schema errors. If strict: false is required for a specific reason, please add a comment explaining why.

const ajv = new Ajv({ allErrors: true, strict: true });

addFormats(ajv);

async function loadSchemas(directory: string) {
const files = await readdir(directory);
const schemas = await Promise.all(
files
.filter((file) => file.endsWith('.json'))
.map(async (file) => {
const schemaPath = path.join(directory, file);
const contents = await readFile(schemaPath, 'utf-8');
return { name: path.basename(file, '.json'), schema: JSON.parse(contents) };
}),
);

for (const entry of schemas) {
if (entry.schema.$id) {
ajv.addSchema(entry.schema);
}
}

return schemas;
}

async function validateOutputs(
directory: string,
schemas: Array<{ name: string; schema: unknown }>,
) {
const files = await readdir(directory, { withFileTypes: true });
for (const file of files) {
const fullPath = path.join(directory, file.name);
if (file.isDirectory()) {
await validateOutputs(fullPath, schemas);
continue;
}
if (!file.name.endsWith('.json')) {
continue;
}
const payload = JSON.parse(await readFile(fullPath, 'utf-8'));
const matching = schemas.find((entry) => {
const schemaObj = entry.schema as { ['x-targets']?: string[] };
if (schemaObj && Array.isArray(schemaObj['x-targets'])) {
return schemaObj['x-targets'].some((target) => fullPath.endsWith(target));
}
return fullPath.includes(entry.name);
});
if (!matching) {
continue;
}
const validate = ajv.compile(matching.schema);
const valid = validate(payload);
if (!valid) {
console.error(`Schema validation failed for ${fullPath}`);
console.error(validate.errors);
process.exit(1);
}
}
}

const schemas = await loadSchemas(schemaDir);
await validateOutputs(outDir, schemas);
console.log('Schema validation succeeded');
23 changes: 23 additions & 0 deletions intelgraph/pipelines/narrative_ci/steps/30_score_seeding.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { mkdir, readFile, writeFile } from 'node:fs/promises';
import path from 'node:path';
import { sha256 } from '../lib/hash.js';
import { stableStringify } from '../lib/json_stable.js';

const outDir = path.resolve('out/metrics');
await mkdir(outDir, { recursive: true });

const configPath = path.resolve('intelgraph/pipelines/narrative_ci/config/defaults.yml');
const configContents = await readFile(configPath, 'utf-8');
const payload = {
run_id: 'fixture-run',
config_hash: sha256(configContents),
scores: [],
};

await writeFile(
path.join(outDir, 'seeding_density.json'),
`${stableStringify(payload)}\n`,
'utf-8',
);

console.log('Seeding density scores written');
Comment on lines +1 to +23
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This script, along with 31_score_handoff.ts, 32_score_compression.ts, and 40_state_machine.ts, contains a significant amount of duplicated code for reading the configuration, hashing it, and writing a fixture file. To improve maintainability and adhere to the DRY (Don't Repeat Yourself) principle, consider refactoring this logic into a shared utility function.

For example, you could create a writeFixture function in a new lib/fixture_writer.ts file that encapsulates the common logic. The individual step files would then become much simpler, just calling this function with their specific data.

Loading
Loading