Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions libs/llm-gateway/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"name": "@intelgraph/llm-gateway",
"version": "0.0.1",
"main": "src/index.ts",
"types": "src/index.ts",
"private": true
}
1 change: 1 addition & 0 deletions libs/llm-gateway/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export * from './types.js';
28 changes: 28 additions & 0 deletions libs/llm-gateway/src/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
export type ConversationMode =
| "summit-managed"
| "openai-previous-response"
| "openai-conversations";

export interface GatewayRequest {
requestId: string;
tenantId: string;
userId: string;
instructions?: string;
input: string | Array<{ role: "user" | "system" | "assistant"; content: string }>;
tools?: Array<{ name: string; description: string; parameters: unknown }>;
conversationId?: string;
conversationMode: ConversationMode;
store?: boolean;
}

export interface GatewayResponse {
responseId: string;
outputText: string;
toolCalls?: Array<{ name: string; arguments: unknown }>;
usage?: { inputTokens: number; outputTokens: number; totalTokens: number };
}

export interface LlmGatewayAdapter {
generate(request: GatewayRequest): Promise<GatewayResponse>;
embed(request: { input: string | string[]; model?: string }): Promise<number[][]>;
}
10 changes: 10 additions & 0 deletions libs/providers/openai-responses/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"name": "@intelgraph/openai-responses",
"version": "0.0.1",
"main": "src/index.ts",
"types": "src/index.ts",
"private": true,
"dependencies": {
"@intelgraph/llm-gateway": "workspace:*"
}
}
1 change: 1 addition & 0 deletions libs/providers/openai-responses/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export * from './openai-responses-adapter.js';
18 changes: 18 additions & 0 deletions libs/providers/openai-responses/src/openai-responses-adapter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import { LlmGatewayAdapter, GatewayRequest, GatewayResponse } from '@intelgraph/llm-gateway';

export class OpenAIResponsesAdapter implements LlmGatewayAdapter {
async generate(request: GatewayRequest): Promise<GatewayResponse> {
// Stub implementation for OpenAI Responses Adapter
return {
responseId: `resp-${Date.now()}`,
outputText: 'This is a stub response from OpenAI Responses Adapter.',
usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
};
}

async embed(request: { input: string | string[]; model?: string }): Promise<number[][]> {
const input = Array.isArray(request.input) ? request.input : [request.input];
// Return dummy embeddings
return input.map(() => [0.1, 0.2, 0.3]);
}
}
22 changes: 22 additions & 0 deletions services/devtools/summit-mcp/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"name": "@intelgraph/summit-mcp",
"version": "0.0.1",
"main": "src/index.ts",
"private": true,
"scripts": {
"start": "ts-node src/index.ts"
},
"dependencies": {
"@modelcontextprotocol/sdk": "latest",
"@intelgraph/llm-gateway": "workspace:*",
"@intelgraph/openai-responses": "workspace:*",
"express": "^4.18.2",
"cors": "^2.8.5"
},
"devDependencies": {
"@types/express": "^4.17.21",
"@types/cors": "^2.8.17",
"typescript": "^5.0.0",
"ts-node": "^10.9.1"
}
}
54 changes: 54 additions & 0 deletions services/devtools/summit-mcp/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import { McpServer, ResourceTemplate } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { prGateTool } from './tools/pr_gate.js';
import { getPrGateDashboard } from './resources/pr_gate_dashboard.js';

// Create an MCP server
const server = new McpServer({
name: "summit-mcp",
version: "1.0.0"
});

// Register the PR Gate tool
server.tool(
prGateTool.name,
prGateTool.description,
prGateTool.parameters,
prGateTool.execute
);

// Register the PR Gate Dashboard resource
server.resource(
"pr-gate-dashboard",
new ResourceTemplate("ui://summit/pr-gate/dashboard", { list: undefined }),
async (uri, { prId }: { prId?: string } = {}) => {
// Note: The ResourceTemplate matcher might need adjustment depending on how params are passed in the URI
// But for now we assume the URI parsing or arguments passed separately handle it.
// Actually, ResourceTemplate extracts variables.
// Let's use a simpler resource definition or assume prId is passed as argument or query param if supported.
// For UI resources in MCP Apps, it's often fetched by URI.
// We'll extract prId from the uri query string manually if needed, or rely on the helper.
// Given getPrGateDashboard takes prId, we extract it.
const url = new URL(uri.href);
const id = url.searchParams.get("prId") || "unknown";

return {
contents: [{
uri: uri.href,
text: await getPrGateDashboard(id),
mimeType: "text/html" // Important for UI resources
}]
};
}
);

async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
console.error("Summit MCP Server running on stdio");
}

main().catch((error) => {
console.error("Fatal error in main():", error);
process.exit(1);
});
15 changes: 15 additions & 0 deletions services/devtools/summit-mcp/src/resources/pr_gate_dashboard.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// Stub for PR Gate UI resource
export const getPrGateDashboard = async (prId: string) => {
return `
<!DOCTYPE html>
<html>
<head>
<title>PR Gate Dashboard</title>
</head>
<body>
<h1>PR Gate Status: ${prId}</h1>
<div class="status">PASS</div>
</body>
</html>
`;
};
27 changes: 27 additions & 0 deletions services/devtools/summit-mcp/src/tools/pr_gate.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Stub for PR gate tool
export const prGateTool = {
name: "pr_gate",
description: "Check PR gate status",
parameters: {
type: "object",
properties: {
prId: { type: "string" }
},
required: ["prId"]
},
execute: async (args: { prId: string }) => {
return {
content: [
{
type: "text",
text: `PR Gate status for ${args.prId}: PASS`
}
],
_meta: {
ui: {
resourceUri: `ui://summit/pr-gate/dashboard?prId=${args.prId}`
}
}
};
}
};
106 changes: 106 additions & 0 deletions tools/ci/denylist_openai_assistants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
#!/usr/bin/env python3
import os
import re
import argparse
import json
import sys
from pathlib import Path

# Patterns to detect OpenAI Assistants API usage
DENYLIST_PATTERNS = [
(r"/v1/assistants\b", "Endpoint /v1/assistants"),
(r"/v1/threads\b", "Endpoint /v1/threads"),
(r"(?<!maestro)/v1/runs\b", "Endpoint /v1/runs (excluding maestro)"),
(r"\bbeta\.assistants\b", "SDK beta.assistants"),
(r"\bbeta\.threads\b", "SDK beta.threads"),
(r"\bbeta\.runs\b", "SDK beta.runs"),
(r"OpenAI-Beta:\s*assistants", "Header OpenAI-Beta: assistants"),
]

# Default exclusions
DEFAULT_EXCLUDES = {
".git",
"node_modules",
"dist",
"build",
"coverage",
".pytest_cache",
"__pycache__",
"artifacts",
}

def scan_file(filepath, patterns):
matches = []
try:
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
for pattern, description in patterns:
if re.search(pattern, content):
# Find line numbers
for i, line in enumerate(content.splitlines(), 1):
if re.search(pattern, line):
matches.append({
"pattern": pattern,
"description": description,
"line": i,
"match": line.strip()[:100] # Truncate long lines
})
except Exception as e:
# print(f"Warning: Could not read file {filepath}: {e}", file=sys.stderr)
pass
return matches

def scan_directory(root_dir, excludes, patterns):
results = []
root_path = Path(root_dir)

for path in root_path.rglob("*"):
if path.is_file():
# Check exclusions
parts = path.parts
if any(ex in parts for ex in excludes):
continue
if path.name == os.path.basename(__file__):
continue

file_matches = scan_file(path, patterns)
if file_matches:
results.append({
"file": str(path),
"matches": file_matches
})
return results

def main():
parser = argparse.ArgumentParser(description="Scan for OpenAI Assistants API usage.")
parser.add_argument("--root", default=".", help="Root directory to scan")
parser.add_argument("--report", default="denylist_report.json", help="Path to output JSON report")
parser.add_argument("--fail-on-match", action="store_true", help="Exit with error if matches found")
args = parser.parse_args()

print(f"Scanning {args.root} for OpenAI Assistants API usage...")
results = scan_directory(args.root, DEFAULT_EXCLUDES, DENYLIST_PATTERNS)

report = {
"scan_root": args.root,
"total_files_with_matches": len(results),
"results": results
}

with open(args.report, "w") as f:
json.dump(report, f, indent=2)

print(f"Report saved to {args.report}")

if results:
print(f"Found {len(results)} files with forbidden patterns:")
for res in results:
print(f" {res['file']} ({len(res['matches'])} matches)")

if args.fail_on_match:
sys.exit(1)
else:
print("No forbidden patterns found.")

if __name__ == "__main__":
main()
47 changes: 47 additions & 0 deletions tools/ci/test_denylist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import unittest
import tempfile
import shutil
import os
import json
from denylist_openai_assistants import scan_directory, DENYLIST_PATTERNS

class TestDenylistScanner(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()

def tearDown(self):
shutil.rmtree(self.test_dir)

def create_file(self, filename, content):
path = os.path.join(self.test_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write(content)
return path

def test_scan_finds_patterns(self):
self.create_file("clean.ts", "const a = 1;")
self.create_file("violation.ts", "const client = new OpenAI();\nclient.beta.assistants.create();")
self.create_file("api_call.py", "requests.post('https://api.openai.com/v1/assistants')")
# Matches raw header string
self.create_file("headers.txt", "OpenAI-Beta: assistants=v1")

results = scan_directory(self.test_dir, set(), DENYLIST_PATTERNS)

# We expect 3 files to match
self.assertEqual(len(results), 3)

files = {r['file'] for r in results}
self.assertTrue(any("violation.ts" in f for f in files))
self.assertTrue(any("api_call.py" in f for f in files))
self.assertTrue(any("headers.txt" in f for f in files))

def test_scan_excludes(self):
os.makedirs(os.path.join(self.test_dir, "node_modules"))
self.create_file("node_modules/lib.js", "beta.assistants.create()")

results = scan_directory(self.test_dir, {"node_modules"}, DENYLIST_PATTERNS)
self.assertEqual(len(results), 0)

if __name__ == '__main__':
unittest.main()
4 changes: 4 additions & 0 deletions tsconfig.base.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@
"@summitsec/vcr-kit/*": ["packages/vcr-kit/src/*"],
"@intelgraph/flags/*": ["libs/flags/node/*"],
"@intelgraph/audit/*": ["libs/audit/node/*"],
"@intelgraph/llm-gateway": ["libs/llm-gateway/src/index.ts"],
"@intelgraph/llm-gateway/*": ["libs/llm-gateway/src/*"],
"@intelgraph/openai-responses": ["libs/providers/openai-responses/src/index.ts"],
"@intelgraph/openai-responses/*": ["libs/providers/openai-responses/src/*"],
"@summit/agent-gym": ["packages/agent-gym/src/index.ts"],
"@summit/agent-gym-metrics": ["packages/agent-gym-metrics/src/index.ts"]
}
Expand Down
Loading