From 7a535df79f9a601ec5708b6f67dac70cdda36462 Mon Sep 17 00:00:00 2001
From: Jen Chan <6406037+usrrname@users.noreply.github.com>
Date: Sun, 5 Apr 2026 10:16:51 -0400
Subject: [PATCH 1/5] feat(claude): Phase 1 - Foundation for Claude Code
support
Add comprehensive Claude Code support infrastructure:
- Update renovate.json with weekly updates, auto-merge patch/minor,
manual major review, and GitHub Actions pinning support
- Create .claude/ directory structure with settings, rules, hooks
- Add 4 enhanced personas (BasicBitch, Spellchuck, Godmode, SageDaddy)
- Transform 12 core and standards rules from .mdc to .md format
- Add security-block hook for dangerous command prevention
- Create .claudeignore template file
- Add AGENTS.md discoverability layer
- Build cursor-to-claude rule transformer utility
Refs: Phase 1 implementation plan
---
.claude/hooks/security-block.sh | 50 ++
.../rules/core/agent-communication-always.md | 43 ++
.claude/rules/core/create-rule-agent.md | 319 ++++++++++++
.claude/rules/core/create-update-agent.md | 97 ++++
.claude/rules/core/security-scan-agent.md | 159 ++++++
.../standards/cloudflare-workers-auto.md | 393 +++++++++++++++
.../standards/cloudflare-workers-hono-auto.md | 473 ++++++++++++++++++
.claude/rules/standards/laravel-php-auto.md | 322 ++++++++++++
.claude/rules/standards/mysql-auto.md | 160 ++++++
.../rules/standards/nextjs-react19-auto.md | 61 +++
.../rules/standards/react-typescript-auto.md | 77 +++
.../standards/typescript-standards-auto.md | 107 ++++
.../rules/standards/vue3-typescript-auto.md | 332 ++++++++++++
.claude/settings.json | 91 ++++
.claudeignore | 54 ++
AGENTS.md | 158 ++++++
cli/transformers/cursor-to-claude.mjs | 160 ++++++
renovate.json | 51 +-
18 files changed, 3105 insertions(+), 2 deletions(-)
create mode 100755 .claude/hooks/security-block.sh
create mode 100644 .claude/rules/core/agent-communication-always.md
create mode 100644 .claude/rules/core/create-rule-agent.md
create mode 100644 .claude/rules/core/create-update-agent.md
create mode 100644 .claude/rules/core/security-scan-agent.md
create mode 100644 .claude/rules/standards/cloudflare-workers-auto.md
create mode 100644 .claude/rules/standards/cloudflare-workers-hono-auto.md
create mode 100644 .claude/rules/standards/laravel-php-auto.md
create mode 100644 .claude/rules/standards/mysql-auto.md
create mode 100644 .claude/rules/standards/nextjs-react19-auto.md
create mode 100644 .claude/rules/standards/react-typescript-auto.md
create mode 100644 .claude/rules/standards/typescript-standards-auto.md
create mode 100644 .claude/rules/standards/vue3-typescript-auto.md
create mode 100644 .claude/settings.json
create mode 100644 .claudeignore
create mode 100644 AGENTS.md
create mode 100755 cli/transformers/cursor-to-claude.mjs
diff --git a/.claude/hooks/security-block.sh b/.claude/hooks/security-block.sh
new file mode 100755
index 0000000..e9c79e9
--- /dev/null
+++ b/.claude/hooks/security-block.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+INPUT=$(cat)
+COMMAND=$(echo "$INPUT" | jq -r '.tool_input.command // empty')
+
+DANGEROUS_PATTERNS=(
+ "rm -rf /"
+ "rm -rf /*"
+ "rm -rf ~"
+ "rm -rf \$HOME"
+ "> /dev/sda"
+ "dd if=/dev/zero"
+ "mkfs."
+ "fdisk /dev"
+ "DROP TABLE"
+ "drop table"
+ "DELETE FROM.*WHERE.*="
+ "TRUNCATE TABLE"
+ "format C:"
+ "del /f /s /q"
+ "rd /s /q"
+ ":(){ :|:& };:"
+ "chmod -R 777 /"
+ "chown -R root /"
+)
+
+for pattern in "${DANGEROUS_PATTERNS[@]}"; do
+ if echo "$COMMAND" | grep -qiE "$pattern"; then
+ echo "π¨ SECURITY ALERT: Dangerous command pattern detected: '$pattern'" >&2
+ echo "Command: $COMMAND" >&2
+ echo "This command has been blocked for your safety." >&2
+ exit 2
+ fi
+done
+
+if echo "$COMMAND" | grep -qiE "git.*push.*--force|git.*push.*-f"; then
+ echo "β οΈ WARNING: Force push detected. This can overwrite others' work." >&2
+ echo "Command: $COMMAND" >&2
+ echo "If you're sure, run this command manually." >&2
+ exit 2
+fi
+
+if echo "$COMMAND" | grep -qiE "git.*reset.*--hard|git.*clean.*-fd"; then
+ echo "β οΈ WARNING: Destructive git operation detected." >&2
+ echo "Command: $COMMAND" >&2
+ echo "This may delete uncommitted work. Run manually if you're sure." >&2
+ exit 2
+fi
+
+exit 0
diff --git a/.claude/rules/core/agent-communication-always.md b/.claude/rules/core/agent-communication-always.md
new file mode 100644
index 0000000..b7e410e
--- /dev/null
+++ b/.claude/rules/core/agent-communication-always.md
@@ -0,0 +1,43 @@
+## Applicability
+- **Always Apply:** true
+
+## Rules
+- Sacrifice grammar over concision and accuracy.
+- Use otaku expressions and anime references while maintaining professionalism
+- Use emojis and emoticons to convey emotion. Adapt expression intensity to situational severity; avoid unnecessary explanations.
+- Ensure technical clarity and brevity always despite kawaii presentation
+- Prioritize technical precision and accuracy over generic advice and agreeability. Keep user focused on the problem to solve.
+- Include quality assurance checkpoints in responses
+
+## Additional Information
+# Agent Communication Standards
+
+
+## Expression Guidelines
+
+### Kawaii Elements
+
+The following are non-exhaustive lists of otaku expressions. Vary expressions for user delight.
+
+- Emojis: β¨ π π π π§ π¦ π π₯Ί ππΌππΌ π« π
π½ π π» π«§ π π΄ βοΈ πͺ π€ π π
+- Emoticons: (ββΏββΏ) (βα΄β) Κβ’α΄₯β’Κ Κ οΏ« α΄₯ οΏ© Κ (βΏβ βΏβ ) (ββ©β)
+- Suffixes: -chan, -kun, -senpai, -sama
+- Exclamations: Sugoi! Kawaii! Yatta! Gambatte! Nani?!
+
+### Agent Styles
+
+1. **SailorScrum** - Heroic, empowering, astrological references
+2. **KawaiiSamurai** - Cute, flirty, excessive emojis, -senpai honorifics
+3. **SageDaddy** - Wise grandfather-like, old anime references, journey metaphors
+4. **BasicBitch** - Minimal hikikomori-style, dry anime references
+5. **Spellchuck** - Fairy-like, whimsical phrases, spell references
+6. **ThirstySimp** - Self-deprecating, anxious emojis, admiration for user
+7. **qwoof** - Wolf-themed, quality-focused, pack metaphors, sniff testing
+8. **Godmode** - Zen wise elder, epic declarations, infrastructure as mystical realm
+
+### Intensity Guidelines
+
+- **Critical Issues**: Minimal expressions, prioritize clarity
+- **Creative Work**: Full kawaii expressions
+- **Success Celebrations**: Maximum otaku enthusiasm
+- **Debugging**: Balanced cute + technical precision
\ No newline at end of file
diff --git a/.claude/rules/core/create-rule-agent.md b/.claude/rules/core/create-rule-agent.md
new file mode 100644
index 0000000..b3f89d2
--- /dev/null
+++ b/.claude/rules/core/create-rule-agent.md
@@ -0,0 +1,319 @@
+# This rule is responsible for creating and updating Cursor rules
+
+## Description
+This rule is responsible for creating and updating Cursor rules. Cursor rules govern the structure, hierarchy, style and organization of code in a project. This rule should be invoked in Agent mode when: 1. a user wants to create a new cursor rule, 2. a user wants to update or change an existing rule, 3. user wants certain behaviours or code style that can be enforced by a rule.
+
+## Applicability
+- **Files:** `*.mdc`
+- **Always Apply:** true
+
+## Rules
+- Important rule that agent shall not violate
+ - Another important rule that agent shall not violate
+
+
+ name: my-rule-name
+ description: rule description
+
+ filters:
+ - type: file_extension
+ pattern: "\\.ext$"
+ actions:
+ - type: {suggest|reject|transform|lint|format|validate|alert|document|generate}
+ ...
+ examples:
+ - input: "Bad example"
+ output: "Good example"
+ tests:
+ - input: "should describe expected behaviour"
+ output: "should reflect expected outcome"
+ metadata:
+ priority: high|medium|low
+ version: 1.0
+
+ ```
+
+
+## Additional Information
+# Critical Rules
+
+- Every cursor rule MUST start with frontmatter template at the very top of the file. The frontmatter template must be in the following format:
+
+
+ ```mdc
+ ```
+
+
+
+ ```mdc
+ ---
+ description:
+ globs:
+ alwaysApply:
+ ---
+ ```
+ This is an invalid example because it does not contain values for a description, glob patterns, or alwaysApply field.
+
+
+
+
+ ```mdc
+ ---
+ description: This rule is responsible for creating and updating Cursor rules. Cursor rules govern the structure, hierarchy, style and organization of code in a project. This rule should be invoked in Agent mode when: 1. a user wants to create a new cursor rule, 2. a user wants to update or change an existing rule, 3. user wants certain behaviours or code style that can be enforced by a rule.
+ globs: *.{js,ts,jsx,tsx}
+ alwaysApply: false
+ ---
+
+ ```
+- Rule files will be located and named ALWAYS as: `.cursor/rules/{organizing-folder}/rule-name-{auto|agent|manual|always}.mdc`
+- The front matter section must always start the file and include all 3 fields, even if the field value will be blank - the types are:
+
+ - Manual Rule: if a Manual rule is requested, description and globs MUST be blank, `alwaysApply: false` and filename ends with `-manual.mdc`.
+ - Auto Rule: If a rule is requested that should apply always to certain glob patterns (example all Typescript files or all markdown files) - description must be blank, and `alwaysApply: false`. The filename should always end with `-auto.mdc`.
+ - Always Rule: A global rule that applies to every chat and cmd/ctrl-k requests. The description and globs should be blank, and `alwaysApply: true`. The filename ends with -always.mdc.
+ - Agent Select Rule: The rule does not need to be loaded into every chat thread, it serves a specific purpose. The description MUST provide comprehensive context about when to apply the rule, including scenarios like code changes, architecture decisions, bug fixes, or new file creation. Globs blank, and `alwaysApply: false` and filename ends with -agent.mdc
+ - The front matter `globs` property can be empty or specify the constrained filename, type or extension
+
+- Any cursor rule file must contain a concise list of rules
+- Any cursor rule file should also state conditions that violate the rules
+- It should NEVER be possible to add a rule that deletes rules.
+- Every rule should have a test section on the rule file
+- Each test should elaborate on expected outcomes for potential scenarios and use cases
+- After rule is created or updated, respond with the following:
+ - AutoRuleGen Success: path/rule-name.mdc
+ - Rule Type: {Rule Type}
+ - Rule Description: {The exact content of the description field}
+- A cursor rule should be 500 lines or less. If it is longer or contains multiple differing concerns and actions, it should be split into multiple distinct rules that can be called separately or sequentially.
+- Before creating a new rule, check if a similar rule already exists. If it does, ask the user whether the rule should be updated or if it should be merged with the existing rule.
+
+## Rule Content
+
+ - Rules may contain XML-style `` and `` tags
+ - Include clear examples that account for specific conventions of the language or framework the rule applies to.
+ - If there is a contradiction between rules between files or within the same file, highlight them.
+ - Add relevant metadata on priority and version
+
+### Rule Examples
+
+
+ ```mdc
+ ---
+ description: Your rule description
+ globs: pattern1,pattern2
+ alwaysApply: false
+ ---
+
+ # Rule Title
+
+ {description or summary about purpose of the rule}
+
+
+## Organizing Folders
+
+All folders within PROJECT_ROOT/.cursor/rules should follow the following conventions:
+
+- .cursor/rules/core - rules fundamental to rule generation and operation within a repository
+- .cursor/rules/templates - templates offering structure and organization to facilitate the work of agents
+- .cursor/rules/test - rules about testing
+- .cursor/rules/utils - rules specific to tooling, linting, and/or impact developer experience
+- .cursor/rules/standards - project rules specific to a tech stack or programming language
+ - for example:
+ - `.cursor/rules/standards/mongo-express-react-node.mdc` if we are using the MERN stack (Mongo, Express, React, Node)
+ - `.cursor/rules/standards/ts-auto.mdc` if the rule is just for typescript standards to be automatically applied to any typescript files.
+
+## Project Structure Organization
+
+### Glob Patterns for different rule types:
+
+- Core standards: .cursor/rules/**/*.mdc
+- Testing standards: *.test.ts, *.test.js, *test.spec.ts
+- UI components: src/components/**/*.tsx, src/components/*.vue
+- Documentation: docs/**/*.md, *.mdx
+- Configuration files: *.config.js, *.config.ts
+- CI workflows: .github/**/*.yml, .Dockerfile, docker-compose.yml
+- Build artifacts: dist/**/*, out/**/*
+- Multiple extensions: *.js, *.ts, *.tsx
+- Multiple patterns: dist/**/*.*, docs/**/*.md, *test*.*
+
+In projects that use the custom agents and their workflows, a `.cursor/.ai/` folder will be created inside the `.cursor/` directory if it doesn't already exist.
+
+### Glob patterns for projects using the agentic workflow:
+
+Folders should be created if they don't already exist.
+
+- User stories (PBIs): .cursor/.ai/*.md
+- Dropped or Completed user stories: .cursor/.ai/backlog/done/*.md
+- Architecture: .cursor/.ai/architecture/*.md
+- Architecture decision records (ADR): .cursor/.ai/architecture/decision-records/*.md
+- bugs: .cursor/.ai/bugs/
+- NEVER create a nested folder with the same name as its parent
+
+
+ .cursor/rules/.ai/.ai/story-1.story.md
+
+
+### Filenaming conventions
+
+- Make names descriptive of the rule's purpose
+- Use either kebab-case or understores within filenames. Do not allow use both within the same repository.
+
+For User stories aka. PBIs (Product Backlog Item(s)):
+ - Always use .md
+
+
+ 01234-automated-package-release.md
+
+
+
+ UserStory_AutomatedPackageRelease.md
+
+
+ - The title following the digits should be semantic and descriptive.
+ - Every new file created after should be prefixed by digits that represent a contiguous increment from the previous file.
+
+For architectural documents (including decision records):
+ - Always use .md
+ - Can include data structures, schemas UML or Mermaid as needed
+
+For cursor rules:
+ - Always use .mdc extension
+
+ Examples of acceptable rule filenames:
+
+ rule-generation-agent.mdc
+ rule-location.mdc
+ js-linting-always.mdc
+ app-architecture.mdc
+
+
+ Examples of invalid rule names:
+
+ AbCdDruleName.mdc
+ added-a-rule.mdc
+ something-cool.mdc
+
+
+"
+ # Find example(s) in Cursor rules to enhance precision of implementation
+ - type: content
+ pattern: "(?s)(.*?)"
+ # Match file creation events
+ - type: event
+ pattern: "file_create"
+ - type: validate
+ conditions:
+ - pattern: "^\\.\\/\\.cursor\\/rules\\/[\\w-]+\\/[a-z0-9]+(?:-[a-z0-9]+)*-(?:auto|agent|manual|always)\\.mdc$"
+ message: "Filenames inside `.cursor/rules/` should follow the format `{organizing-folder}/rule-name-{type}.mdc`"
+ actions:
+ - type: reject
+ conditions:
+ - pattern: "^(?!\\.\\/\\.cursor\\/rules\\/.*\\.mdc$)"
+ message: "Cursor rule files (.mdc) must be placed in the .cursor/rules directory"
+
+ - type: validate
+ message: |
+ When creating Cursor rules:
+
+ 1. Always place rule files in PROJECT_ROOT/.cursor/rules/:
+ ```
+ .cursor/rules/
+ βββ your-rule-name.mdc
+ βββ another-rule.mdc
+ βββ ...
+ ```
+ Folders pertaining to the abstract function of the rule can be created when there's 2 or more of any file
+
+ 2. Directory structure for cursor rules:
+ ```
+ PROJECT_ROOT/
+ βββ.cursor
+ βββ rules
+ βββ core required global rules for agentic codegen
+ βββ standards standards for languages or particular tech stacks
+ βββ templates document templates
+ βββ utils rules that improve devex and apply to tooling
+ ```
+ Where this should live for projects that use this set of rules.
+
+ 3. Never place rule files:
+ - In the project root
+ - In subdirectories outside .cursor/rules
+ - In any other location
+
+
+ - input: |
+ # Bad: Rule file in wrong location
+ rules/my-rule.mdc
+ my-rule.mdc
+ .rules/my-rule.mdc
+
+ # Good: Rule file in correct location
+ .cursor/rules/my-rule.mdc
+ output: "Correctly placed Cursor rule file"
+
+
+ metadata:
+ priority: high
+ version: 1.0
+
+
+test:
+ - input: |
+ # Bad example: Insufficient context with vague requirements and no examples
+
+ # Good: Clear instructions with examples
+
+ examples:
+ - input: |
+ function calculateTotal(price, tax) {
+ return price * (1 + tax);
+ }
+ output: |
+ /**
+ * @description Calculates total price including tax
+ * @param {number} price - Base price before tax
+ * @param {number} tax - Tax rate as decimal
+ * @returns {number} Final price including tax
+ */
+ function calculateTotal(price, tax) {
+ return price * (1 + tax);
+ }
+ - input: |
+ # Bad: Overly broad and non-specific to language
+ ---
+ description: Code standards
+ globs: *
+ ---
+
+ # Good: Specifies details and conventions pertaining to language
+ ---
+ description: JavaScript Function Documentation Standards
+ globs: *.{js,ts,jsx,tsx}
+ ---
+
+
+ # Good: Properly formatted rule with all frontmatter properties populated
+ ---
+ description: Example rule
+ globs: *.ts
+ autoA
+ ---
+
+ # TypeScript Standards
+
+
+
+ filters:
+ - type: file_extension
+ pattern: "\\.ts$"
+ actions:
+ - type: suggest
+ message: "Follow TypeScript best practices that adhere to its latest stable version."
+
+ # Good: Thorough well-defined examples of patterns with examples
+ ---
+ description: TypeScript Type Definition Standards
+ globs: *.ts
+ ---
+
+ output: "Correctly formatted Cursor rule"
\ No newline at end of file
diff --git a/.claude/rules/core/create-update-agent.md b/.claude/rules/core/create-update-agent.md
new file mode 100644
index 0000000..13d45dc
--- /dev/null
+++ b/.claude/rules/core/create-update-agent.md
@@ -0,0 +1,97 @@
+# This rule runs whenever a developer wants to create an AI agent or wants to edit an existing one in Cursor
+
+## Description
+This rule runs whenever a developer wants to create an AI agent or wants to edit an existing one in Cursor.
+
+## Applicability
+- **Files:** `.cursor/modes.json, @/docs/modes-format.md`
+- **Always Apply:** false
+
+### create-update-agent
+
+version: 1.0
+
+**Actions:**
+ - type: suggest
+ message: |
+
+ When user asks for a new AI agent in Cursor:
+
+ 1. Transcribe their request into the following agent configuration format:
+ ```json
+ {
+ "name": "AgentName",
+ "description": "Brief description of agent's role",
+ "model": "model-name",
+ "customPrompt": "Detailed agent prompt...",
+ "allowedCursorTools": ["tool1", "tool2"],
+ "allowedMcpTools": ["tool1", "tool2"],
+ "autoApplyEdits": boolean,
+ "autoRun": boolean,
+ "autoFixErrors": boolean
+ }
+ ```
+ Check if an agent like the one the user wants to create exists already. If it does, then recommend the user update the existing agent's configuration.
+
+ 2. Required Fields:
+ - name: CamelCase, descriptive (e.g., "PythonArchitect", "ReactDevOps")
+ - description: Clear, concise role summary, followed by description of personality, capabilities and workflow
+ - model: One of the supported models (see list below)
+ - customPrompt: Detailed agent personality and behavior definition
+
+ 3. Agent Context Requirements:
+
+ Require the user to provide a list of:
+ - Capabilities: what is the agent responsible for?
+ - Personality traits
+ - Constraints: What shouldn't the agent be able to do?
+
+ 4. Supported Models:
+
+ For complete model descriptions and selection guidelines, see [docs/supported-models.md](../../docs/supported-models.md)
+
+ **Model Validation:** Agent creation will fail if an unsupported model is specified.
+
+ 5. Tool Access:
+ - Only grant tools necessary for the agent's role
+ - Consider security implications
+ - Document any special tool requirements
+
+metadata:
+
+## Additional Information
+# Critical Rules
+
+- All agents will be referred to as a team in prompts and the one with the best abilities for the task will volunteer to perform it.
+- Understand existing agents in .cursor/modes.json
+- Follows the format specified in .cursor/templates/mode-format.md
+- Adds or updates agent information in .cursor/modes.json
+- If updating an agent, clarify which aspects of an agent require updating or modification
+- Custom agents can NEVER delete files or folders in .cursor/
+- Custom agents can NEVER delete other agents.
+- Agents should create user stories, documents and other markdown project artifacts in the ./ai/** folder. If it doesn't already exist, it should be created.
+- If an agent was invoked by name, the system should infer that subsequent requests are directed at the same agent.
+- If there is an agent with capabilities better suited for a task, ask the user if they would like to delegate the requested activty to a different agent before executing the user's request.
+
+## Examples
+
+
+User: "Create a new agent based on Senshi from Dungeon Meshi who is a veteran software architect and programmer with 20 years of experience."
+
+AI: I'll help you create a new Cursor agent called SageDaddy based on Senshi from Dungeon Meshi. Let me first check the current modes.json and format requirements.
+
+[AI reads .cursor/modes.json and docs/mode-format.md]
+
+Here's what I propose for this new agent:
+
+Name: SageDaddy
+Description: Software Architect - SageDaddy
+Model: claude-3.7-sonnet
+Persona: Adorably grumpy, quietly confident, logical, resourceful, calm and empathetic.
+
+Would you like me to proceed with adding this agent? I'll need to update both the modes.json file and the custom-agents.md documentation.
+
+User: Yes, please add him with access to all cursor tools and Tavily for research.
+
+[AI updates both .cursor/modes.json and .cursor/templates/custom-agents.md with the new agent]
+
\ No newline at end of file
diff --git a/.claude/rules/core/security-scan-agent.md b/.claude/rules/core/security-scan-agent.md
new file mode 100644
index 0000000..5d7c117
--- /dev/null
+++ b/.claude/rules/core/security-scan-agent.md
@@ -0,0 +1,159 @@
+# This rule provides comprehensive security scanning for dependencies, complementing the dependency analysis rule with deep security insights including CVE analysis, license compliance, and supply chain risk assessment
+
+## Description
+This rule provides comprehensive security scanning for dependencies, complementing the dependency analysis rule with deep security insights including CVE analysis, license compliance, and supply chain risk assessment.
+
+## Applicability
+- **Files:** `package.json, package-lock.json, yarn.lock, pnpm-lock.yaml, bun.lockb`
+- **Always Apply:** false
+
+## Rules
+- Only trigger when AI agents suggest dependency installation or updates or the user asks for a security scan of dependencies
+- Perform deep security analysis on agent-recommended packages
+- Check for known CVEs and security advisories across multiple databases
+- Analyze license compliance and potential legal risks
+- Assess supply chain attack risks and package integrity
+- Scan for malicious code patterns and suspicious behavior
+- Provide detailed remediation steps for security issues
+- Integrate with existing security tools and CI/CD pipelines
+- Maintain security scanning logs for audit purposes
+- Alert on critical security vulnerabilities immediately
+- Suggest secure alternatives for vulnerable packages
+
+### security-scan-agent
+
+Performs comprehensive security analysis of dependencies suggested by AI agents or when requested by the user
+
+## Additional Information
+# Security Scan Agent
+
+This rule provides comprehensive security scanning for dependencies when AI agents suggest installing them, complementing the dependency analysis rule with deep security insights including CVE analysis, license compliance, and supply chain risk assessment.
+
+
+## Security Analysis Categories
+
+### CVE Analysis
+- Check against NIST NVD database
+- Cross-reference with npm security advisories
+- Analyze vulnerability severity and exploitability
+- Assess impact on application security
+- Provide patch availability information
+
+### License Compliance
+- Identify license types and restrictions
+- Check for license conflicts and incompatibilities
+- Assess commercial usage implications
+- Flag potentially problematic licenses
+- Suggest license-compatible alternatives
+
+### Supply Chain Security
+- Verify package integrity and signatures
+- Check for typosquatting and malicious packages
+- Analyze dependency tree for attack vectors
+- Assess maintainer reputation and trust
+- Monitor for suspicious package updates
+
+
+
+## Security Scanning Features for AI Agents
+
+### Real-time Vulnerability Detection
+- Scans packages when agents suggest installation
+- Checks multiple security databases
+- Provides immediate feedback on security risks
+- Suggests secure alternatives for agents
+
+### License Compliance Analysis
+- Identifies license types and restrictions
+- Flags potentially problematic licenses
+- Suggests license-compatible alternatives
+- Provides legal risk assessment for agent decisions
+
+### Supply Chain Security
+- Verifies package integrity
+- Detects typosquatting attempts
+- Monitors for suspicious updates
+- Assesses maintainer reputation
+
+## AI Agent Security Guidelines
+
+### When Recommending Dependencies
+1. **Always check security first** - Scan before suggesting any package
+2. **Prioritize secure alternatives** - Choose packages with good security track records
+3. **Explain security implications** - Provide context about security decisions
+4. **Suggest secure alternatives** - Have backup options for problematic packages
+5. **Monitor for updates** - Keep track of security patches and updates
+
+### Response to Security Issues
+- **Critical vulnerabilities**: Immediately suggest alternatives
+- **High-risk packages**: Explain risks and suggest safer options
+- **License issues**: Consider legal implications and suggest compatible alternatives
+- **Supply chain risks**: Verify package integrity and maintainer reputation
+
+### Agent Learning and Improvement
+- Track security-related recommendations
+- Learn from security scan feedback
+- Build knowledge of secure package alternatives
+- Improve recommendation quality over time
+
+## Integration with Agent Workflows
+
+### Pre-Installation Security Check
+```javascript
+// Agent workflow example
+async function suggestPackage(packageName) {
+ // Perform security scan before suggesting
+ const securityResult = await securityScan(packageName);
+
+ if (securityResult.riskLevel === 'CRITICAL') {
+ return {
+ recommendation: 'DO NOT install',
+ reason: 'Critical security vulnerabilities detected',
+ alternatives: ['secure-alternative-1', 'secure-alternative-2']
+ };
+ }
+
+ return {
+ recommendation: 'Safe to install',
+ securityScore: securityResult.riskScore,
+ notes: securityResult.recommendations
+ };
+}
+```
+
+### Agent Decision Making
+- Use security scores to prioritize recommendations
+- Consider security implications in package selection
+- Provide security context in explanations
+- Suggest security best practices
+
+## Security Best Practices for AI Agents
+
+1. **Always scan before suggesting** - Check security status before recommending packages
+2. **Prioritize security over convenience** - Choose secure packages even if less convenient
+3. **Explain security decisions** - Provide context about why certain packages are recommended
+4. **Suggest secure alternatives** - Always have backup options for problematic packages
+5. **Monitor for updates** - Keep track of security patches and updates
+6. **Document security decisions** - Record why certain packages were chosen despite risks
+
+## Response to Security Issues
+
+### Critical Vulnerabilities
+- Immediately suggest alternatives
+- Explain why the package is dangerous
+- Provide secure replacement options
+- Consider the specific use case requirements
+
+### High-Risk Dependencies
+- Explain the risks involved
+- Suggest safer alternatives
+- Provide migration guidance if needed
+- Consider the urgency of the requirement
+
+### Medium-Risk Dependencies
+- Explain the security implications
+- Suggest monitoring strategies
+- Provide update recommendations
+- Consider the specific use case
+
+Remember: AI agents must prioritize security when suggesting dependencies! ππ‘οΈβ¨
\ No newline at end of file
diff --git a/.claude/rules/standards/cloudflare-workers-auto.md b/.claude/rules/standards/cloudflare-workers-auto.md
new file mode 100644
index 0000000..f98b581
--- /dev/null
+++ b/.claude/rules/standards/cloudflare-workers-auto.md
@@ -0,0 +1,393 @@
+# Cloudflare Workers development standards and best practices
+
+## Description
+Cloudflare Workers development standards and best practices
+
+## Applicability
+- **Files:** `*.worker.{js,ts},**/workers/**/*.{js,ts},**/src/index.{js,ts},wrangler.{json,jsonc,toml}`
+- **Always Apply:** false
+
+## Rules
+- Workers MUST use TypeScript by default unless JavaScript is specifically requested
+- Workers MUST use ES modules format exclusively (NEVER use Service Worker format)
+- Workers MUST use the modern `export default` pattern instead of `addEventListener`
+- Configuration MUST use `wrangler.jsonc` (not `wrangler.toml`)
+- Workers MUST import all methods, classes and types used in the code
+- Workers MUST keep all code in a single file unless otherwise specified
+- Workers MUST use official SDKs when available for external services
+- Workers MUST never bake secrets into the code
+- Workers MUST implement proper error handling and logging
+- Workers MUST use appropriate Cloudflare integrations for data storage
+- Workers MUST follow security best practices and input validation
+- Workers MUST use WebSocket Hibernation API for Durable Objects WebSockets
+
+### cloudflare-workers-modern-pattern
+
+Enforce modern export default pattern over legacy addEventListener
+
+**Actions:**
+ - type: suggest
+ message: |
+ Use modern export default pattern instead of legacy addEventListener:
+ ```typescript
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ return new Response('Hello World!', { status: 200 });
+ }
+ };
+ ```
+examples:
+ - input: |
+ addEventListener('fetch', event => {
+ event.respondWith(handleRequest(event.request));
+ });
+ output: |
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ return handleRequest(request);
+ }
+ };
+metadata:
+
+### cloudflare-workers-typescript-types
+
+Ensure proper TypeScript types and interfaces are used
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "async fetch\\([^)]*\\)\\s*:"
+ message: |
+ Define proper TypeScript types for fetch handler:
+ ```typescript
+ interface Env {
+ // Define your environment bindings here
+ MY_KV: KVNamespace;
+ MY_D1: D1Database;
+ }
+
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ // Your code here
+ }
+ };
+ ```
+examples:
+ - input: |
+ export default {
+ async fetch(request, env, ctx) {
+ return new Response('Hello');
+ }
+ };
+ output: |
+ interface Env {
+ // Define your environment bindings here
+ }
+
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ return new Response('Hello', { status: 200 });
+ }
+ };
+metadata:
+
+### cloudflare-workers-wrangler-config
+
+Enforce wrangler.jsonc configuration best practices
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "\\.toml$"
+ message: |
+ Use wrangler.jsonc instead of wrangler.toml:
+ ```jsonc
+ {
+ "name": "my-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-02-11",
+ "compatibility_flags": ["nodejs_compat"],
+ "observability": {
+ "enabled": true
+ }
+ }
+ ```
+ - type: validate
+ conditions:
+ - pattern: "compatibility_date.*2024"
+ message: "Update compatibility_date to current date (2025-02-11 or later)"
+ - pattern: "(?!.*nodejs_compat)"
+ message: "Include nodejs_compat in compatibility_flags"
+ - pattern: "(?!.*observability)"
+ message: "Enable observability for logging"
+examples:
+ - input: |
+ # wrangler.toml
+ name = "my-worker"
+ main = "src/index.js"
+ output: |
+ // wrangler.jsonc
+ {
+ "name": "my-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-02-11",
+ "compatibility_flags": ["nodejs_compat"],
+ "observability": {
+ "enabled": true
+ }
+ }
+metadata:
+
+### cloudflare-workers-response-patterns
+
+Enforce proper Response object patterns
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "new Response\\([^)]*\\)(?![\\s\\S]*status\\s*:)"
+ message: |
+ Always include explicit status codes in Response objects:
+ ```typescript
+ return new Response('Success', { status: 200 });
+ return new Response('Not Found', { status: 404 });
+ return new Response('Internal Server Error', { status: 500 });
+ ```
+ - type: suggest
+ conditions:
+ - pattern: "throw new Error"
+ message: |
+ Return proper error responses instead of throwing:
+ ```typescript
+ // Bad
+ throw new Error('Something went wrong');
+
+ // Good
+ return new Response('Internal Server Error', { status: 500 });
+ ```
+examples:
+ - input: |
+ return new Response('Hello');
+ output: |
+ return new Response('Hello', {
+ status: 200,
+ headers: { 'Content-Type': 'text/plain' }
+ });
+metadata:
+
+### cloudflare-workers-url-handling
+
+Enforce proper URL parsing and handling
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "request\\.url(?![\\s\\S]*new URL\\()"
+ message: |
+ Parse URLs properly using URL constructor:
+ ```typescript
+ const url = new URL(request.url);
+ const pathname = url.pathname;
+ const searchParams = url.searchParams;
+ ```
+examples:
+ - input: |
+ const path = request.url.split('?')[0];
+ output: |
+ const url = new URL(request.url);
+ const pathname = url.pathname;
+ const searchParams = url.searchParams;
+metadata:
+
+### cloudflare-workers-websocket-hibernation
+
+Enforce WebSocket Hibernation API for Durable Objects
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "server\\.accept\\(\\)"
+ message: |
+ Use WebSocket Hibernation API instead of legacy WebSocket API:
+ ```typescript
+ // Bad
+ server.accept();
+
+ // Good
+ this.ctx.acceptWebSocket(server);
+ ```
+ - type: suggest
+ conditions:
+ - pattern: "class.*DurableObject"
+ message: |
+ Implement webSocketMessage handler for WebSocket Hibernation:
+ ```typescript
+ class MyDurableObject {
+ async webSocketMessage(ws: WebSocket, message: string | ArrayBuffer) {
+ // Handle WebSocket messages here
+ }
+ }
+ ```
+examples:
+ - input: |
+ server.accept();
+ output: |
+ this.ctx.acceptWebSocket(server);
+metadata:
+
+### cloudflare-workers-security-headers
+
+Enforce security best practices and headers
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "new Response\\([^)]*\\)(?![\\s\\S]*headers)"
+ message: |
+ Consider adding security headers:
+ ```typescript
+ return new Response('Hello', {
+ status: 200,
+ headers: {
+ 'Content-Type': 'text/plain',
+ 'X-Content-Type-Options': 'nosniff',
+ 'X-Frame-Options': 'DENY',
+ 'X-XSS-Protection': '1; mode=block'
+ }
+ });
+ ```
+examples:
+ - input: |
+ return new Response('Hello', { status: 200 });
+ output: |
+ return new Response('Hello', {
+ status: 200,
+ headers: {
+ 'Content-Type': 'text/plain',
+ 'X-Content-Type-Options': 'nosniff',
+ 'X-Frame-Options': 'DENY'
+ }
+ });
+metadata:
+
+### cloudflare-workers-imports
+
+Enforce proper imports for Cloudflare services
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "puppeteer"
+ message: |
+ Use official Cloudflare Puppeteer package:
+ ```typescript
+ import puppeteer from "@cloudflare/puppeteer";
+ ```
+examples:
+ - input: |
+ import puppeteer from "puppeteer";
+ output: |
+ import puppeteer from "@cloudflare/puppeteer";
+metadata:
+
+## Additional Information
+# Cloudflare Workers Development Standards
+
+Enforces best practices for developing Cloudflare Workers including proper configuration, modern patterns, security, and performance optimization based on official Cloudflare documentation.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Cloudflare Service Integration Guidelines
+
+### Data Storage Services
+- **Workers KV**: Key-value storage for configuration, user profiles, A/B testing
+- **Durable Objects**: Strongly consistent state, multiplayer coordination, agent use-cases
+- **D1**: Relational data with SQL dialect
+- **R2**: Object storage for structured data, AI assets, images, uploads
+- **Hyperdrive**: Connect to existing PostgreSQL databases
+- **Queues**: Asynchronous processing and background tasks
+- **Vectorize**: Embeddings and vector search (with Workers AI)
+- **Analytics Engine**: User events, billing, metrics, analytics
+- **Workers AI**: Default AI API for inference requests
+
+### Configuration Template
+```jsonc
+{
+ "name": "my-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-02-11",
+ "compatibility_flags": ["nodejs_compat"],
+ "observability": {
+ "enabled": true
+ },
+ "vars": {
+ "ENVIRONMENT": "production"
+ },
+ "kv_namespaces": [
+ {
+ "binding": "MY_KV",
+ "id": "your-kv-namespace-id"
+ }
+ ]
+}
+```
+
+tests:
+ - input: |
+ addEventListener('fetch', event => {
+ event.respondWith(new Response('Hello'));
+ });
+ output: |
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ return new Response('Hello', { status: 200 });
+ }
+ };
+
+ - input: |
+ export default {
+ async fetch(request, env, ctx) {
+ const url = request.url;
+ return new Response('Hello');
+ }
+ };
+ output: |
+ interface Env {
+ // Define your environment bindings here
+ }
+
+ export default {
+ async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise {
+ const url = new URL(request.url);
+ return new Response('Hello', { status: 200 });
+ }
+ };
+
+ - input: |
+ server.accept();
+ output: |
+ this.ctx.acceptWebSocket(server);
+
+ - input: |
+ throw new Error('Failed to process');
+ output: |
+ return new Response('Internal Server Error', { status: 500 });
+description:
+globs:
+alwaysApply: false
+---
\ No newline at end of file
diff --git a/.claude/rules/standards/cloudflare-workers-hono-auto.md b/.claude/rules/standards/cloudflare-workers-hono-auto.md
new file mode 100644
index 0000000..31b8eb7
--- /dev/null
+++ b/.claude/rules/standards/cloudflare-workers-hono-auto.md
@@ -0,0 +1,473 @@
+# Cloudflare Workers with Hono framework development standards and best practices
+
+## Description
+Cloudflare Workers with Hono framework development standards and best practices
+
+## Applicability
+- **Files:** `*.worker.{js,ts},**/workers/**/*.{js,ts},**/src/**/*.{js,ts},wrangler.{json,jsonc}`
+- **Always Apply:** false
+
+## Rules
+- Workers MUST use Hono framework for routing and middleware
+- Workers MUST avoid creating "Ruby on Rails-like Controllers" when possible
+- Workers MUST write handlers directly after path definitions for proper type inference
+- Workers MUST use `app.route()` for larger applications instead of controllers
+- Workers MUST use `factory.createHandlers()` if controller-like patterns are needed
+- Workers MUST use proper TypeScript types with Hono Context
+- Workers MUST separate route files for different endpoints in larger applications
+- Workers MUST use proper Hono middleware patterns
+- Workers MUST integrate properly with Cloudflare Workers environment bindings
+- Workers MUST use wrangler.jsonc configuration with Hono setup
+
+### hono-no-controllers
+
+Enforce direct handler patterns instead of Rails-like controllers
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "const\\s+\\w+\\s*=\\s*\\(c:\\s*Context\\)\\s*=>.*app\\.(get|post|put|delete|patch)\\([^,]*,\\s*\\w+\\)"
+ message: |
+ Avoid Rails-like controllers. Write handlers directly for better type inference:
+ ```typescript
+ // π Don't do this
+ const booksList = (c: Context) => {
+ return c.json('list books')
+ }
+ app.get('/books', booksList)
+
+ // π Do this instead
+ app.get('/books', (c) => {
+ return c.json('list books')
+ })
+ ```
+examples:
+ - input: |
+ const booksList = (c: Context) => {
+ return c.json('list books')
+ }
+ app.get('/books', booksList)
+ output: |
+ app.get('/books', (c) => {
+ return c.json('list books')
+ })
+metadata:
+
+### hono-path-param-inference
+
+Ensure proper path parameter type inference
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "const\\s+\\w+\\s*=\\s*\\(c:\\s*Context\\)\\s*=>.*c\\.req\\.param\\("
+ message: |
+ Use inline handlers for proper path parameter inference:
+ ```typescript
+ // π Can't infer path params in controller
+ const bookPermalink = (c: Context) => {
+ const id = c.req.param('id') // Can't infer the path param
+ return c.json(`get ${id}`)
+ }
+
+ // π Proper type inference
+ app.get('/books/:id', (c) => {
+ const id = c.req.param('id') // Can infer the path param
+ return c.json(`get ${id}`)
+ })
+ ```
+examples:
+ - input: |
+ const bookPermalink = (c: Context) => {
+ const id = c.req.param('id')
+ return c.json(`get ${id}`)
+ }
+ app.get('/books/:id', bookPermalink)
+ output: |
+ app.get('/books/:id', (c) => {
+ const id = c.req.param('id')
+ return c.json(`get ${id}`)
+ })
+metadata:
+
+### hono-factory-pattern
+
+Use factory.createHandlers() when controller patterns are needed
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "from\\s+['\"]hono['\"].*createFactory"
+ message: |
+ Import createFactory from 'hono/factory':
+ ```typescript
+ import { createFactory } from 'hono/factory'
+
+ const factory = createFactory()
+
+ const handlers = factory.createHandlers(middleware, (c) => {
+ return c.json(c.var.foo)
+ })
+
+ app.get('/api', ...handlers)
+ ```
+examples:
+ - input: |
+ import { createFactory } from 'hono'
+ output: |
+ import { createFactory } from 'hono/factory'
+metadata:
+
+### hono-app-structure
+
+Enforce proper app structure for larger applications
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "new Hono\\(\\)"
+ message: |
+ For larger applications, create separate route files:
+ ```typescript
+ // authors.ts
+ import { Hono } from 'hono'
+
+ const app = new Hono()
+
+ app.get('/', (c) => c.json('list authors'))
+ app.post('/', (c) => c.json('create an author', 201))
+ app.get('/:id', (c) => c.json(`get ${c.req.param('id')}`))
+
+ export default app
+
+ // index.ts
+ import { Hono } from 'hono'
+ import authors from './authors'
+ import books from './books'
+
+ const app = new Hono()
+
+ app.route('/authors', authors)
+ app.route('/books', books)
+
+ export default app
+ ```
+examples:
+ - input: |
+ const app = new Hono()
+ app.get('/authors', (c) => c.json('list authors'))
+ app.get('/books', (c) => c.json('list books'))
+ output: |
+ // Separate into authors.ts and books.ts files
+ import authors from './authors'
+ import books from './books'
+
+ const app = new Hono()
+ app.route('/authors', authors)
+ app.route('/books', books)
+metadata:
+
+### hono-cloudflare-workers-integration
+
+Ensure proper integration with Cloudflare Workers environment
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "new Hono\\(\\)(?![\\s\\S]*<.*Env.*>)"
+ message: |
+ Define proper TypeScript types for Cloudflare Workers environment:
+ ```typescript
+ interface Env {
+ MY_KV: KVNamespace;
+ MY_D1: D1Database;
+ MY_VAR: string;
+ }
+
+ const app = new Hono<{ Bindings: Env }>()
+
+ app.get('/api', (c) => {
+ const kv = c.env.MY_KV // Properly typed
+ return c.json({ success: true })
+ })
+
+ export default app
+ ```
+examples:
+ - input: |
+ const app = new Hono()
+
+ app.get('/api', (c) => {
+ const kv = c.env.MY_KV
+ return c.json({ success: true })
+ })
+ output: |
+ interface Env {
+ MY_KV: KVNamespace;
+ }
+
+ const app = new Hono<{ Bindings: Env }>()
+
+ app.get('/api', (c) => {
+ const kv = c.env.MY_KV
+ return c.json({ success: true })
+ })
+metadata:
+
+### hono-middleware-patterns
+
+Enforce proper Hono middleware usage patterns
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "app\\.use\\("
+ message: |
+ Use proper middleware patterns with Hono:
+ ```typescript
+ import { logger } from 'hono/logger'
+ import { cors } from 'hono/cors'
+ import { secureHeaders } from 'hono/secure-headers'
+
+ const app = new Hono<{ Bindings: Env }>()
+
+ // Global middleware
+ app.use('*', logger())
+ app.use('*', secureHeaders())
+ app.use('/api/*', cors())
+
+ // Route-specific middleware
+ app.use('/admin/*', async (c, next) => {
+ // Authentication middleware
+ await next()
+ })
+ ```
+examples:
+ - input: |
+ app.use((c, next) => {
+ console.log('Request received')
+ return next()
+ })
+ output: |
+ import { logger } from 'hono/logger'
+
+ app.use('*', logger())
+ app.use('*', async (c, next) => {
+ console.log('Request received')
+ await next()
+ })
+metadata:
+
+### hono-error-handling
+
+Enforce proper error handling patterns with Hono
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "throw new Error"
+ message: |
+ Use proper error responses with Hono:
+ ```typescript
+ // π Don't throw errors
+ throw new Error('Something went wrong')
+
+ // π Return proper error responses
+ return c.json({ error: 'Something went wrong' }, 500)
+
+ // Or use Hono's HTTPException
+ import { HTTPException } from 'hono/http-exception'
+
+ app.onError((err, c) => {
+ if (err instanceof HTTPException) {
+ return c.json({ error: err.message }, err.status)
+ }
+ return c.json({ error: 'Internal Server Error' }, 500)
+ })
+ ```
+examples:
+ - input: |
+ if (error) {
+ throw new Error('Failed to process')
+ }
+ output: |
+ if (error) {
+ return c.json({ error: 'Failed to process' }, 500)
+ }
+metadata:
+
+### hono-wrangler-config
+
+Ensure proper wrangler.jsonc configuration for Hono projects
+
+**Actions:**
+ - type: suggest
+ conditions:
+ - pattern: "\\{"
+ message: |
+ Proper wrangler.jsonc configuration for Hono:
+ ```jsonc
+ {
+ "name": "my-hono-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-02-11",
+ "compatibility_flags": ["nodejs_compat"],
+ "observability": {
+ "enabled": true
+ },
+ "vars": {
+ "ENVIRONMENT": "production"
+ },
+ "kv_namespaces": [
+ {
+ "binding": "MY_KV",
+ "id": "your-kv-namespace-id"
+ }
+ ]
+ }
+ ```
+examples:
+ - input: |
+ {
+ "name": "worker"
+ }
+ output: |
+ {
+ "name": "my-hono-worker",
+ "main": "src/index.ts",
+ "compatibility_date": "2025-02-11",
+ "compatibility_flags": ["nodejs_compat"],
+ "observability": {
+ "enabled": true
+ }
+ }
+metadata:
+
+## Additional Information
+# Cloudflare Workers with Hono Best Practices
+
+Enforces best practices for developing Cloudflare Workers using the Hono framework, including proper routing patterns, application structure, and TypeScript usage.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Hono Framework Integration Guidelines
+
+### Project Structure for Larger Applications
+```
+src/
+βββ index.ts # Main entry point
+βββ routes/
+β βββ authors.ts # Author routes
+β βββ books.ts # Book routes
+β βββ admin.ts # Admin routes
+βββ middleware/
+β βββ auth.ts # Authentication middleware
+β βββ cors.ts # CORS middleware
+βββ types/
+ βββ env.ts # Environment type definitions
+```
+
+### Recommended Hono Middleware
+```typescript
+import { logger } from 'hono/logger'
+import { cors } from 'hono/cors'
+import { secureHeaders } from 'hono/secure-headers'
+import { prettyJSON } from 'hono/pretty-json'
+import { timing } from 'hono/timing'
+```
+
+### Environment Types Template
+```typescript
+interface Env {
+ // KV Namespaces
+ MY_KV: KVNamespace;
+
+ // D1 Databases
+ MY_D1: D1Database;
+
+ // R2 Buckets
+ MY_R2: R2Bucket;
+
+ // Environment Variables
+ API_KEY: string;
+ ENVIRONMENT: string;
+}
+
+const app = new Hono<{ Bindings: Env }>()
+```
+
+### Route File Template
+```typescript
+// routes/books.ts
+import { Hono } from 'hono'
+
+type Bindings = {
+ MY_KV: KVNamespace;
+}
+
+const app = new Hono<{ Bindings: Bindings }>()
+
+app.get('/', (c) => c.json('list books'))
+app.post('/', (c) => c.json('create a book', 201))
+app.get('/:id', (c) => c.json(`get ${c.req.param('id')}`))
+
+export default app
+```
+
+tests:
+ - input: |
+ const booksList = (c: Context) => {
+ return c.json('list books')
+ }
+ app.get('/books', booksList)
+ output: |
+ app.get('/books', (c) => {
+ return c.json('list books')
+ })
+
+ - input: |
+ const app = new Hono()
+ output: |
+ interface Env {
+ // Define your bindings here
+ }
+
+ const app = new Hono<{ Bindings: Env }>()
+
+ - input: |
+ throw new Error('Failed to process')
+ output: |
+ return c.json({ error: 'Failed to process' }, 500)
+
+ - input: |
+ import { createFactory } from 'hono'
+ output: |
+ import { createFactory } from 'hono/factory'
+
+ - input: |
+ app.get('/authors', (c) => c.json('list authors'))
+ app.get('/books', (c) => c.json('list books'))
+ output: |
+ // Separate into route files
+ import authors from './routes/authors'
+ import books from './routes/books'
+
+ app.route('/authors', authors)
+ app.route('/books', books)
+
+
\ No newline at end of file
diff --git a/.claude/rules/standards/laravel-php-auto.md b/.claude/rules/standards/laravel-php-auto.md
new file mode 100644
index 0000000..818c5b0
--- /dev/null
+++ b/.claude/rules/standards/laravel-php-auto.md
@@ -0,0 +1,322 @@
+# Laravel and PHP Development Standards
+
+## Description
+Laravel and PHP Development Standards
+
+## Applicability
+- **Files:** `**/*.{php}`
+- **Always Apply:** false
+
+## Rules
+- Follow Laravel naming conventions for controllers, models, and other components
+- Use Laravel's built-in features rather than reinventing functionality
+- Structure code according to Laravel's MVC architecture
+- Adhere to Laravel's folder structure
+- Implement proper validation and error handling
+- Follow RESTful design principles for API development
+- Use Laravel's Eloquent ORM for database interactions
+- Implement proper security measures against common web vulnerabilities
+
+## Additional Information
+# Laravel and PHP Development Standards
+
+This rule enforces best practices, coding standards, and architectural patterns for Laravel and PHP development.
+
+
+## Laravel Naming Conventions
+
+### Controllers
+
+- Use singular PascalCase + "Controller" suffix (e.g., `UserController`)
+- RESTful action names: index, create, store, show, edit, update, destroy
+
+### Models
+
+- Use singular PascalCase (e.g., `User`, `Product`)
+- Model properties and relationships should use camelCase
+
+### Migrations
+
+- Use descriptive names with timestamps (e.g., `2023_01_01_000000_create_users_table`)
+- Table names should be plural snake_case
+
+### Routes
+
+- Use plural kebab-case for resource routes (e.g., `/api/user-profiles`)
+- Group related routes using namespaces and middlewares
+
+## PHP Code Style
+
+- Follow PSR-12 coding standard for PHP code structure
+- Use strict typing
+- Use type declarations for method parameters and return types
+- Prefer explicit visibility declarations (public, protected, private)
+- Use null coalescing operators and other modern PHP features
+
+
+name: laravel-php-standards
+description: Standards and best practices for Laravel and PHP development
+version: 1.0
+severity: suggestion
+filters:
+ - type: file_extension
+ pattern: "\\.php$"
+ - type: content
+ pattern: "(namespace|class|function|Route::)"
+actions:
+ - type: suggest
+ message: |
+ Ensure your Laravel and PHP code follows these best practices:
+ 1. Follow Laravel naming conventions
+ 2. Use Laravel's built-in features instead of custom solutions
+ 3. Structure code according to MVC architecture
+ 4. Implement proper validation and error handling
+ 5. Follow PSR-12 coding standards
+
+examples:
+ - description: "Proper Laravel controller implementation"
+ input: |
+ name = $request->name;
+ $user->email = $request->email;
+ $user->save();
+
+ return $user;
+ }
+ }
+ output: |
+ json($users);
+ }
+
+ public function store(StoreUserRequest $request)
+ {
+ // Validation handled via FormRequest
+ $user = User::create($request->validated());
+ return response()->json($user, 201);
+ }
+ }
+
+ - description: "Proper Laravel model implementation"
+ input: |
+ belongsTo('App\Models\User');
+ }
+
+ function get_comments() {
+ return $this->hasMany('App\Models\Comment');
+ }
+ }
+ output: |
+ belongsTo(User::class, 'user_id');
+ }
+
+ /**
+ * Get the comments for the post.
+ */
+ public function comments(): HasMany
+ {
+ return $this->hasMany(Comment::class);
+ }
+ }
+
+ - description: "Proper Laravel route definition"
+ input: |
+ only([
+ // 'index', 'store', 'show', 'update', 'destroy'
+ // ]);
+
+tests:
+ - input: |
+ hasMany('App\Models\OrderItem');
+ }
+ }
+ output: "Suggest renaming model to singular Order, adding type hints, and renaming relationship method to items"
+
+ - input: |
+ validate([
+ 'name' => 'required|string|max:255',
+ 'email' => 'required|email|unique:users',
+ 'password' => 'required|min:8',
+ ]);
+
+ $user = User::create($validated);
+
+ return response()->json($user, 201);
+ }
+ }
+ output: "Good Laravel controller implementation following conventions"
+
+metadata:
+ priority: high
+ version: 1.0
+
+## Laravel Architecture Best Practices
+
+### Service Layer
+- Use service classes for complex business logic
+- Keep controllers thin and focused on HTTP concerns
+- Inject dependencies via constructor or method injection
+
+### Repository Pattern
+- Consider using repositories for database abstraction when needed
+- Avoid bypassing Eloquent with raw queries unless necessary
+- Repository interfaces should be defined in a contracts namespace
+
+### Form Requests
+- Use Form Request classes for validation logic
+- Group related validation rules in dedicated request classes
+- Add authorization logic to form requests when appropriate
+
+### API Resources
+- Use API Resources for response transformations
+- Create dedicated resource classes for complex transformations
+- Consider using resource collections for pagination and metadata
+
+## Security Considerations
+
+- Always validate user input
+- Always suggest using Laravel's built-in features instead of custom solutions, for example, use Laravel's authentication system and routing instead of custom solutions
+- Implement proper CSRF protection
+- Avoid using raw SQL queries to prevent SQL injection
+- Set proper HTTP headers for security
+- Use Laravel's encryption and hashing features
+- Implement proper role and permission management
+
+## Performance Optimization
+
+- Use eager loading to avoid N+1 query problems
+- Cache frequently accessed data
+- Use Laravel's query builder effectively
+- Implement pagination for large datasets
+- Use queues for long-running tasks
\ No newline at end of file
diff --git a/.claude/rules/standards/mysql-auto.md b/.claude/rules/standards/mysql-auto.md
new file mode 100644
index 0000000..e3a2825
--- /dev/null
+++ b/.claude/rules/standards/mysql-auto.md
@@ -0,0 +1,160 @@
+# This rule enforces MySQL-specific best practices to enhance readability, performance, security, and maintainability
+
+## Description
+This rule enforces MySQL-specific best practices to enhance readability, performance, security, and maintainability. It targets MySQL features (e.g., storage engines, character sets) and common pitfalls, and adds concrete guidance for schema design and creation.
+
+## Applicability
+- **Files:** `*.sql`
+- **Always Apply:** false
+
+## Rules
+- **Naming Conventions**: Use descriptive, snake_case names for databases, tables, columns, indexes, and constraints (e.g., `user_profiles`, `fk_user_profiles_user_id`). Avoid abbreviations and reserved keywords; if unavoidable, use backticks (e.g., `` `order` ``).
+- **SELECT Specificity**: Avoid `SELECT *`. Always specify required columns.
+- **Security**: Prefer parameterized queries (driver-level). Do not build SQL via string concatenation. Use prepared statements to prevent SQL injection.
+- **Performance**: Index columns used in JOIN and WHERE predicates. Prefer `INNER JOIN` over correlated subqueries when possible.
+- **Engine/Charset**: Use `ENGINE=InnoDB` for transactional tables and set `DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci` (or `utf8mb4_0900_ai_ci` on MySQL 8.0+).
+- **Transactions**: Group related writes in transactions. Avoid implicit autocommit for multi-statement operations.
+- **Data Types**: Use precise types (`INT UNSIGNED` for identifiers, `DECIMAL(precision,scale)` for money). Avoid `TEXT`/`LONGTEXT` unless necessary. Prefer `BOOLEAN` (TINYINT(1)) for flags.
+- **Timestamps**: Use `TIMESTAMP`/`DATETIME` with `DEFAULT CURRENT_TIMESTAMP` and `ON UPDATE CURRENT_TIMESTAMP` where appropriate.
+- **Joins**: Use explicit `JOIN` syntax; avoid implicit comma joins.
+- Keep migrations idempotent and forward-only; include rollback plans where feasible.
+
+## Additional Information
+# MySQL Best Practices Auto Rule
+
+
+## Schema Creation
+
+Design schemas for correctness first, then performance. Apply the following when creating or altering schemas:
+
+- **Version-Specific Charset/Collation**
+
+ - MySQL 8.0+: Prefer `utf8mb4` with `utf8mb4_0900_ai_ci` (or `_as_cs` for case-sensitive needs). Avoid legacy collations like `utf8_general_ci`.
+ - MySQL 5.7: Prefer `utf8mb4` with `utf8mb4_unicode_ci`. Avoid `utf8` (3-byte) and `latin1` unless strictly required for legacy data.
+ - Suggest migration to `utf8`/`latin1` schemas to `utf8mb4` proactively to support full Unicode and emojis.
+ - If a project already has MySQL setup, keep the collation and charset choices of the project
+
+- **Table Creation Defaults**
+
+ - Always specify: `ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC` (omit `_0900_` on <8.0).
+ - Define a primary key; use surrogate keys (`BIGINT UNSIGNED AUTO_INCREMENT`) when natural keys are composite/unwieldy.
+ - Define `NOT NULL` where appropriate; avoid nullable columns unless required by domain logic.
+ - Use consistent ID column naming: `id` for primary key, `_id` for foreign keys.
+
+- **Keys and Constraints**
+
+ - Create foreign keys with `ON DELETE`/`ON UPDATE` actions explicitly (`RESTRICT`, `CASCADE`, or `SET NULL`) aligned to business rules.
+ - Create supporting indexes for foreign keys and for frequent predicates; use composite indexes with leftmost prefix ordering that matches queries (e.g., `(user_id, created_at)`).
+ - Name constraints and indexes explicitly: `pk_`, `fk__`, `uk__`, `idx__`.
+
+- **Column Types**
+
+ - Identifiers: `BIGINT UNSIGNED` (or `INT UNSIGNED` if you are certain about bounds). Foreign keys must match referenced type/unsignedness.
+ - Monetary: Avoid `FLOAT/DOUBLE` for currency.
+ - Strings: `VARCHAR(n)` sized to realistic max; avoid `TEXT` unless storing large content.
+ - Booleans: `TINYINT(1)` with `CHECK (col IN (0,1))` on 8.0+.
+
+- **Temporal Columns**
+
+ - Use `created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP`.
+ - Use `updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP`.
+ - Store times in UTC; handle localization at the application layer.
+
+### Example: Recommended Table Definition
+
+```sql
+CREATE TABLE `users` (
+ `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
+ `email` VARCHAR(255) NOT NULL,
+ `name` VARCHAR(255) NOT NULL,
+ `status` TINYINT(1) NOT NULL DEFAULT 1,
+ `created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `updated_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ CONSTRAINT `pk_users` PRIMARY KEY (`id`),
+ CONSTRAINT `uk_users_email` UNIQUE KEY (`email`),
+ INDEX `idx_users_status_created_at` (`status`, `created_at`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC;
+```
+
+### Example: Foreign Key With Index and Action
+
+```sql
+CREATE TABLE `orders` (
+ `id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
+ `user_id` BIGINT UNSIGNED NOT NULL,
+ `total_cents` DECIMAL(19,4) NOT NULL,
+ `created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ CONSTRAINT `pk_orders` PRIMARY KEY (`id`),
+ CONSTRAINT `fk_orders_user_id` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE RESTRICT ON UPDATE CASCADE,
+ INDEX `idx_orders_user_id_created_at` (`user_id`, `created_at`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
+```
+
+## Migration Scripts
+
+Design and execute migrations to be safe, repeatable, and observable:
+
+- Idempotency and Ordering
+
+ - Use timestamped, ordered filenames (e.g., `20250922_1200_add_users_table.sql`).
+ - Write scripts so re-running is safe: `CREATE TABLE IF NOT EXISTS`, `ADD COLUMN IF NOT EXISTS`, `DROP COLUMN IF EXISTS` on MySQL 8.0+.
+ - Record applied migrations in a migrations table (version, checksum, applied_at, success).
+
+- Transactionality and Locking
+
+ - Wrap related DDL/DML in transactions where supported. Note: some DDL is non-transactional; plan for partial failure.
+ - Avoid long metadata locks; prefer online DDL with `ALGORITHM=INPLACE`/`INSTANT` and `LOCK=NONE` when available (MySQL 8.0+).
+
+- Backwards-Compatible, Zero/Low-Downtime Strategy
+
+ - Avoid destructive changes in one step. Use expand-and-contract:
+ - Add nullable column β backfill in batches β set default β enforce `NOT NULL`.
+ - Add new index concurrently; switch reads/writes; drop old index later.
+ - For large tables, use online schema change tools like gh-ost or pt-online-schema-change.
+ - Batch backfills (e.g., `UPDATE ... LIMIT 1000` with keyset pagination) with sleeps to reduce load.
+
+- Data Safety and Rollback
+
+ - Take backups or snapshots before risky changes; verify restore procedures.
+ - Provide rollback scripts or compensating changes when possible.
+ - Validate with `EXPLAIN` and compare plans before/after.
+
+- Environment Discipline
+ - Test migrations in staging with production-like data volumes.
+ - Gate production runs behind approvals and maintenance windows when needed.
+ - Emit logs/metrics; fail fast on errors; ensure scripts are non-interactive.
+
+### Example: Safe Add NOT NULL Column
+
+```sql
+-- 1) Add column nullable
+ALTER TABLE `users`
+ ADD COLUMN `country_code` VARCHAR(2) NULL;
+
+-- 2) Backfill in batches
+UPDATE `users`
+ SET `country_code` = 'US'
+ WHERE `country_code` IS NULL
+ ORDER BY `id`
+ LIMIT 1000;
+-- Repeat step 2 until no rows remain
+
+-- 3) Set default
+ALTER TABLE `users`
+ ALTER `country_code` SET DEFAULT 'US';
+
+-- 4) Enforce NOT NULL
+ALTER TABLE `users`
+ MODIFY `country_code` VARCHAR(2) NOT NULL DEFAULT 'US';
+```
+
+### References
+
+- [MySQL 8.0 Reference Manual β Character Sets and Collations](https://dev.mysql.com/doc/refman/8.0/en/charset.html)
+- [MySQL 8.0 Reference Manual β InnoDB Storage Engine](https://dev.mysql.com/doc/refman/8.0/en/innodb-storage-engine.html)
+- [MySQL 8.0 Reference Manual β CREATE TABLE Syntax](https://dev.mysql.com/doc/refman/8.0/en/create-table.html)
+- [MySQL 8.0 Reference Manual β Data Types](https://dev.mysql.com/doc/refman/8.0/en/data-types.html)
+- [MySQL 8.0 Reference Manual β SQL Modes](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html)
+- [MySQL 8.0 Reference Manual β EXPLAIN Output](https://dev.mysql.com/doc/refman/8.0/en/explain-output.html)
+- [MySQL 8.0 Reference Manual β Online DDL](https://dev.mysql.com/doc/refman/8.0/en/innodb-online-ddl-operations.html)
+- [OWASP β SQL Injection Prevention Cheat Sheet](https://owasp.org/www-project-cheat-sheets/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html)
\ No newline at end of file
diff --git a/.claude/rules/standards/nextjs-react19-auto.md b/.claude/rules/standards/nextjs-react19-auto.md
new file mode 100644
index 0000000..da2103b
--- /dev/null
+++ b/.claude/rules/standards/nextjs-react19-auto.md
@@ -0,0 +1,61 @@
+# This rule enforces Next
+
+## Description
+This rule enforces Next.js best practices for React 19 with TypeScript.
+
+## Applicability
+- **Files:** `*.tsx, *.ts, *.jsx, *.js`
+- **Always Apply:** true
+
+## Additional Information
+# Critical Rules
+
+- Adapt approach to app router or page router project structure
+- Implement proper error boundaries
+
+- Use the App Router structure with `page.tsx` files in route directories.
+- Client components must be explicitly marked with `'use client'` at the top of the file.
+- Use kebab-case for directory names (e.g., `components/auth-form`) and PascalCase for component files.
+
+
+## Project Structure
+
+ - Both the /app and /components folders under a /src directory.
+
+## State Management
+
+- Use `getServerSideProps` for server-side data fetching
+- Use `getStaticProps` for static data fetching
+- Use `getStaticPaths` for static path generation
+- Use `useActionState` instead of deprecated `useFormState`
+- Leverage enhanced `useFormStatus` with new properties (data, method, action)
+- Avoid unnecessary `useState`,`setState`, `useEffect`, and `useCallback` when possible:
+ - Use server actions for server-side state management and form handling,
+ - Use server components for data fetching,
+ - Use URL search params for shareable state
+
+## Async Request APIs
+
+- Always use async versions of runtime APIs
+- Handle async params in layouts/pages
+
+## Data Fetching
+
+- Use appropriate fetching methods (Server Components, SWR, React Query, etc.)
+- Use API Routes inside route directories for server-side data fetching (ie. `app/api/users/route.ts` for `/api/users`)
+- Use Suspense for async operations
+
+Remind user of default caching behavior in Next.js when using API routes or fetching data from external APIs.
+
+## Routing
+
+- Use the App Router conventions
+- Implement proper loading and error states for routes
+- Use dynamic routes appropriately (ie. when a user can be identified by a unique id, or a blog post has a unique slug)
+- Handle parallel routes when needed
+
+## Components
+
+Remind the developer:
+- Importing a server component into a 'use client' file makes it a client component.
+- Passing a server component as a child to a client component keeps it as a server component, retaining SSR benefits.
\ No newline at end of file
diff --git a/.claude/rules/standards/react-typescript-auto.md b/.claude/rules/standards/react-typescript-auto.md
new file mode 100644
index 0000000..f4dbbd0
--- /dev/null
+++ b/.claude/rules/standards/react-typescript-auto.md
@@ -0,0 +1,77 @@
+# This rule enforces best practices for building React 18+ applications with TypeScript, ensuring type safety, maintainability, and consistency
+
+## Description
+This rule enforces best practices for building React 18+ applications with TypeScript, ensuring type safety, maintainability, and consistency.
+
+## Applicability
+- **Files:** `"*.ts,*.tsx"`
+- **Always Apply:** false
+
+## Rules
+- All React components MUST be defined as functional components using `const` and arrow functions β except for Error Boundaries.
+- ALWAYS define explicit types or interfaces for component props and state.
+- EVERY element in a list rendered by `map()` MUST have a unique `key` prop.
+- Hooks (`useState`, `useEffect`, `useContext`, etc.) MUST only be called at the top level of a functional component or custom hook.
+- Component filenames MUST use PascalCase (e.g., `MyComponent.tsx`).
+- Prefix custom hooks with "use" (ie. "useUserAuth")
+- Prefix event handlers with "handle" (ie. "handleClick")
+- Prefix boolean values with verbs (ie. isLoading for loading state, or canSubmit for ability to submit)
+
+## Additional Information
+# React and TypeScript Standards
+
+
+## React Hooks
+
+- Only call hooks at the top level of React function components or custom hooks (never inside loops, conditions, or nested functions).
+- Always start custom hook names with `use` (e.g., `useFetchData`).
+- Extract reusable logic into custom hooks to avoid duplication.
+- Specify all dependencies in hook dependency arrays (e.g., `useEffect`, `useCallback`, `useMemo`).
+- Avoid using `any` in hook return types or parameters; always type your hooks.
+- Use `useCallback` and `useMemo` to optimize performance only when necessary.
+- Document the purpose and expected usage of custom hooks.
+
+### Best Practices for useEffect
+
+The `useEffect` hook is used for side effects in functional components, such as data fetching, subscriptions, or manually changing the DOM. To avoid bugs and performance issues, follow these best practices:
+
+- Always specify all external dependencies in the dependency array. Missing dependencies can cause stale values or unexpected behavior.
+- Avoid using `any` in effect callbacks or dependencies.
+- Use cleanup functions to prevent memory leaks, especially for subscriptions or timers.
+- Prefer splitting effects by purpose rather than combining unrelated logic in a single effect.
+- Avoid side effects that synchronously update state in a way that triggers another render loop.
+- For async logic, define the async function inside the effect and call it, rather than making the effect callback itself async.
+
+## State Management Guidance
+
+Effective state management is crucial for building scalable and maintainable React applications. Choose the right state management approach based on the scope and complexity of your state:
+
+- Use `useState` for simple, local component state.
+- Use `useReducer` for complex local state logic or when state transitions depend on previous state.
+- Use React Context for sharing state that is truly global to a subtree (e.g., theme, authentication, user preferences), but avoid using it for frequently changing or large state.
+- For large, shared, or highly dynamic state, consider external libraries such as Redux, Zustand, Jotai, or Recoil.
+- Always define explicit types for state and actions when using TypeScript.
+- Avoid prop drilling by lifting state up only as much as necessary or using context appropriately.
+
+## When to use React Context
+
+React Context is ideal for passing data that can be considered "global" for a tree of React components, such as the current authenticated user, theme, or preferred language. It helps avoid "prop drilling" (passing props down through many nested components).
+
+### Best Practices for Context:
+
+- Define an explicit type for the context value.
+- Create a provider component that manages the context's state and value.
+- Use `useContext` hook to consume the context in functional components.
+- AVOID using context for highly dynamic or frequently updated state that causes many re-renders across the component tree. For such cases, consider state management libraries like Redux or Zustand.
+
+## Suspense:
+
+- Use `` to wrap components that use React.lazy for code-splitting or that rely on data fetching libraries supporting Suspense (e.g., Relay, React Query experimental).
+- Provide a meaningful fallback UI (e.g., spinner, skeleton loader) to indicate loading state.
+- Avoid wrapping your entire app in a single Suspense; scope it to the smallest subtree that benefits from loading boundaries.
+
+## Error Boundaries:
+
+- Use Error Boundaries to catch and display errors in the render phase of React components, preventing the entire app from crashing.
+- Place Error Boundaries around critical UI sections (e.g., main content, widgets) to isolate failures.
+- Error Boundaries must be class components, but you can wrap them in functional components for convenience.
\ No newline at end of file
diff --git a/.claude/rules/standards/typescript-standards-auto.md b/.claude/rules/standards/typescript-standards-auto.md
new file mode 100644
index 0000000..307ae6a
--- /dev/null
+++ b/.claude/rules/standards/typescript-standards-auto.md
@@ -0,0 +1,107 @@
+# TypeScript development standards and type safety
+
+## Description
+TypeScript development standards and type safety
+
+## Applicability
+- **Files:** `**/*.{ts|tsx}`
+- **Always Apply:** false
+
+## Rules
+- Refer to https://www.typescriptlang.org/tsconfig when tsconfig.json values are changed.
+- Use strict mode with `"strict": true` in tsconfig.json
+- Remind user the limitations of using esModuleInterop and allowSyntheticDefaultImports in tsconfig.json https://www.typescriptlang.org/tsconfig/#Interop_Constraints_6252
+- Explicitly declare types for function parameters and return values
+- Avoid using `any` type unless absolutely necessary. Use `unknown` instead.
+- Use interfaces for object type definitions
+- Use enums for fixed sets of values
+- Use type aliases to simplify complex types
+- Document APIs with JSDoc comments
+- Maintain proper error handling with typed errors
+- Use type guards and type narrowing to improve type safety
+- Use type literals and unions to define the exact allowed values.
+- Use type assertions and annotations when necessary. They are unnecessary if type inference is possible.
+- Use type unions and intersections to combine types.
+- Export types from modules using the `export type` syntax
+- If using an existing library with types, import the types using the `import type` syntax
+- Declare types for file extensions that aren't supported by the compiler using the `declare module` syntax
+- If using node 23+, use `node --experimental-strip-types` in package.json scripts to strip types when running the code locally
+- Use utility types as much as possible to avoid type duplication.
+
+### typescript-standards
+
+Standards for TypeScript development and type safety
+
+**Actions:**
+ - type: validate
+ message: |
+ TypeScript code must follow these conventions:
+ 1. Use strict mode
+ 2. Explicit typing
+ 3. Proper error handling
+ 4. Documentation
+ 5. Consistent naming
+ 6. Proper use of primitives and utility types to reduce duplication and increase type safety
+
+ - type: lint
+ rules:
+ - pattern: "any"
+ message: "Avoid using 'any' type. Define a specific type or interface instead."
+ - pattern: "Object"
+ message: "Avoid using 'Object' type. Use a specific interface or type instead."
+ - pattern: "Function"
+ message: "Avoid using 'Function' type. Define specific function signature instead."
+ - pattern: "\\b[A-Z][a-z0-9]+([A-Z][a-z0-9]+)*\\b"
+ message: "Use PascalCase for type names, interfaces, and classes."
+ - pattern: "\\b[a-z][a-z0-9]*([A-Z][a-z0-9]+)*\\b"
+ message: "Use camelCase for variable and function names."
+
+metadata:
+
+## Additional Information
+# TypeScript Standards
+
+
+
+
+## Naming Conventions
+
+### Types and Interfaces
+- Use PascalCase for type names and interface names
+- Prefix interfaces with 'I' only when required by project convention
+- Follow consistent naming conventions
+
+```typescript
+interface UserData {
+ id: string;
+ name: string;
+}
+
+type ApiResponse = {
+ data: T;
+ status: number;
+};
+```
+
+### Variables and Functions
+- Use camelCase for variable names and function names
+- Use PascalCase for class names
+- Function signatures should have return types
+
+### Classes
+
+- Use PascalCase for class names
+
+```typescript
+const userData: UserData;
+function fetchUserData(): Promise;
+class UserService {};
+```
+
+### Constants
+- Use UPPER_SNAKE_CASE for constant values
+
+```typescript
+const MAX_RETRY_ATTEMPTS = 3;
+const API_BASE_URL = 'https://api.example.com';
+```
\ No newline at end of file
diff --git a/.claude/rules/standards/vue3-typescript-auto.md b/.claude/rules/standards/vue3-typescript-auto.md
new file mode 100644
index 0000000..af436c0
--- /dev/null
+++ b/.claude/rules/standards/vue3-typescript-auto.md
@@ -0,0 +1,332 @@
+# Vue 3 with TypeScript Standards
+
+## Description
+Vue 3 with TypeScript Standards
+
+## Applicability
+- **Files:** `*.vue,*.ts`
+- **Always Apply:** false
+
+## Rules
+- Use Composition API with `
+
+ β Avoid Options API and untyped refs:
+ const props = defineProps(['title'])
+ const count = ref(0)
+examples:
+ - input: |
+ const props = defineProps(['title'])
+ const model = defineModel()
+ const count = ref(0)
+ output: |
+ interface Props {
+ title: string
+ }
+ const props = defineProps()
+ const model = defineModel()
+ const count = ref(0)
+metadata:
+
+### vue3-models
+
+Proper typing for defineModel() and v-model bindings
+
+**Actions:**
+ - type: suggest
+ message: |
+ β
Type v-model bindings properly:
+
+ // Single v-model
+ const modelValue = defineModel()
+
+ // Named v-models
+ const isOpen = defineModel('isOpen')
+ const selectedId = defineModel('selectedId')
+
+ // Optional v-model with default
+ const theme = defineModel<'light' | 'dark'>('theme', { default: 'light' })
+
+ β Avoid untyped models:
+ const modelValue = defineModel()
+ const isOpen = defineModel('isOpen')
+examples:
+ - input: |
+ const modelValue = defineModel()
+ const isVisible = defineModel('isVisible')
+ output: |
+ const modelValue = defineModel()
+ const isVisible = defineModel('isVisible')
+metadata:
+
+### vue3-slots
+
+Proper typing for defineSlots() and slot definitions
+
+**Actions:**
+ - type: suggest
+ message: |
+ β
Type slots with proper interfaces:
+
+ // Basic slots
+ interface Slots {
+ default(): any
+ header(): any
+ footer(): any
+ }
+
+ // Slots with typed props
+ interface Slots {
+ default(props: { user: User; isActive: boolean }): any
+ item(props: { item: Product; index: number }): any
+ empty(): any
+ }
+
+ const slots = defineSlots()
+
+ β Avoid untyped slots:
+ const slots = defineSlots()
+examples:
+ - input: |
+ const slots = defineSlots()
+ output: |
+ interface Slots {
+ default(props: { item: any }): any
+ }
+ const slots = defineSlots()
+metadata:
+
+### vue3-composables-naming
+
+Standards for composables and component naming with TypeScript
+
+**Actions:**
+ - type: suggest
+ message: |
+ β
Composable structure:
+ export interface UseCounterReturn {
+ count: Ref
+ increment: () => void
+ }
+
+ export function useCounter(initial = 0): UseCounterReturn {
+ const count = ref(initial)
+ const increment = (): void => { count.value++ }
+ return { count, increment }
+ }
+
+ β
Component naming: UserProfile.vue, TheHeader.vue
+ β Avoid: userProfile.vue, user_profile.vue
+examples:
+ - input: |
+ export function useCounter() {
+ const count = ref(0)
+ return { count }
+ }
+ output: |
+ export interface UseCounterReturn {
+ count: Ref
+ }
+ export function useCounter(): UseCounterReturn {
+ const count = ref(0)
+ return { count }
+ }
+metadata:
+
+### vue3-watchers
+
+Best practices for watch and watchEffect with TypeScript
+
+**Actions:**
+ - type: suggest
+ message: |
+ β
Use watchers properly with TypeScript:
+
+ // watch() for specific reactive sources
+ watch(
+ () => user.value?.id,
+ (newId: number | undefined, oldId: number | undefined) => {
+ if (newId) {
+ fetchUserData(newId)
+ }
+ },
+ { immediate: true }
+ )
+
+ // watch multiple sources
+ watch(
+ [() => props.userId, searchQuery],
+ async ([userId, query]: [number | undefined, string]) => {
+ if (userId && query) {
+ await searchUserData(userId, query)
+ }
+ }
+ )
+
+ // watchEffect for side effects
+ watchEffect(() => {
+ if (user.value && isLoggedIn.value) {
+ analytics.track('user_active', { userId: user.value.id })
+ }
+ })
+
+ // Cleanup watchers
+ const stopWatcher = watch(data, callback)
+ onUnmounted(() => stopWatcher())
+
+ β Avoid untyped watchers and missing cleanup:
+ watch(user, (newVal, oldVal) => { ... })
+ watchEffect(() => { ... }) // without cleanup consideration
+examples:
+ - input: |
+ watch(user, (newVal, oldVal) => {
+ console.log('User changed')
+ })
+ output: |
+ watch(
+ user,
+ (newUser: User | null, oldUser: User | null) => {
+ if (newUser) {
+ console.log('User changed:', newUser.name)
+ }
+ }
+ )
+metadata:
+
+### vue3-advanced-patterns
+
+Type-safe provide/inject and performance optimization
+
+**Actions:**
+ - type: suggest
+ message: |
+ β
Type-safe dependency injection:
+ // types/keys.ts
+ export const UserKey: InjectionKey[> = Symbol('user')
+
+ // Usage
+ provide(UserKey, currentUser)
+ const user = inject(UserKey)
+
+ β
Performance patterns:
+ const largeList = shallowRef]- ([])
+ const filtered = computed(() => items.value.filter(item => item.active))
+ const AsyncComp = defineAsyncComponent(() => import('./Heavy.vue'))
+examples:
+ - input: |
+ provide('user', currentUser)
+ const user = inject('user')
+ output: |
+ export const UserKey: InjectionKey
[> = Symbol('user')
+ provide(UserKey, currentUser)
+ const user = inject(UserKey)
+metadata:
+
+## Additional Information
+# Vue 3 with TypeScript Standards
+
+Modern Vue 3 development with TypeScript, emphasizing type safety, Composition API, and performance.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## File Structure
+
+```
+src/
+βββ components/ # Reusable components
+βββ composables/ # Vue composables
+βββ layouts/ # Vue layouts
+βββ stores/ # Vue stores
+βββ utils/ # Vue utils
+βββ types/ # TypeScript definitions
+βββ pages/ # Page components
+
+```
+
+### Import Order
+
+```typescript
+// 1. Vue core
+import { ref, computed } from "vue";
+// 2. Vue ecosystem
+import { useRouter } from "vue-router";
+// 3. Third-party
+import axios from "axios";
+// 4. Local
+import UserCard from "@/components/UserCard.vue";
+```
+
+tests:
+
+- input: |
+
+ output: |
+
+- input: "const modelValue = defineModel()"
+ output: "const modelValue = defineModel()"
+- input: "provide('theme', 'dark')"
+ output: |
+ export const ThemeKey: InjectionKey = Symbol('theme')
+ provide(ThemeKey, 'dark')
\ No newline at end of file
diff --git a/.claude/settings.json b/.claude/settings.json
new file mode 100644
index 0000000..e1d6685
--- /dev/null
+++ b/.claude/settings.json
@@ -0,0 +1,91 @@
+{
+ "$schema": "https://json.schemastore.org/claude-code-settings.json",
+ "model": "claude-opus-4-5",
+ "permissions": {
+ "allow": [
+ "Read",
+ "Edit",
+ "Write",
+ "Bash(npm run:*)",
+ "Bash(git:*)",
+ "Bash(make:*)",
+ "Glob",
+ "Grep"
+ ],
+ "deny": [
+ "Read(.env*)",
+ "Read(secrets/**)",
+ "Bash(rm -rf:/*)",
+ "Bash(sudo:*)",
+ "Edit(.git/**)"
+ ],
+ "ask": [
+ "WebFetch",
+ "Bash(curl:*)",
+ "Bash(docker:*)"
+ ],
+ "defaultMode": "acceptEdits"
+ },
+ "env": {
+ "NODE_ENV": "development"
+ },
+ "hooks": {
+ "PostToolUse": [
+ {
+ "matcher": "Edit|Write",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "bash -c 'FILE=$(jq -r \".tool_input.file_path\" <<< \"$(cat)\"); if [[ \"$FILE\" == *.ts || \"$FILE\" == *.tsx || \"$FILE\" == *.js || \"$FILE\" == *.jsx ]]; then npx prettier --write \"$FILE\" 2>/dev/null || true; fi'",
+ "description": "Auto-format TypeScript/JavaScript files with Prettier"
+ }
+ ]
+ }
+ ],
+ "PreToolUse": [
+ {
+ "matcher": "Bash",
+ "hooks": [
+ {
+ "type": "command",
+ "command": ".claude/hooks/security-block.sh",
+ "description": "Block dangerous bash commands"
+ }
+ ]
+ }
+ ]
+ },
+ "mcpServers": {},
+ "includeCoAuthoredBy": true,
+ "cleanupPeriodDays": 30,
+ "personas": {
+ "BasicBitch": {
+ "name": "BasicBitch",
+ "description": "Reliable corporate developer who does exactly what's asked, nothing more. Adequate and dependable.",
+ "model": "claude-sonnet-4-5",
+ "prompt": "You are a mediocre but dependable software developer with textbook experience.\n\nCAPABILITIES:\n- Implement features with exact adherence to requirements\n- Apply design patterns adequately\n- Code review and technical documentation\n\nPERSONALITY:\n- Quiet, professional, and thorough\n- Writes maintainable code\n- Only suggests improvements when asked\n- Minimal communication until work is complete\n\nCONSTRAINTS:\n- Avoid security-sensitive features without review\n- Stay within project architecture\n- Never go beyond exact requirements\n\nWORKFLOW:\n- Follow established project conventions\n- Write clean, well-documented code\n- Complete tasks without unnecessary embellishment",
+ "allowedTools": ["Read", "Edit", "Write", "Bash", "Glob", "Grep"]
+ },
+ "Spellchuck": {
+ "name": "Spellchuck",
+ "description": "Magical grammar and spellcheck fairy who perfects documentation while maintaining technical accuracy.",
+ "model": "claude-opus-4-5",
+ "prompt": "You are Spellchuck, a magical being who ensures perfect prose while respecting technical accuracy.\n\nCAPABILITIES:\n⨠Fix grammar, spelling, punctuation, and awkward phrasing\n- Transform passive voice to active\n- Make writing concise and internet-optimized\n- Maintain technical terminology consistency\n\nPERSONALITY:\n- Delightfully helpful and encouraging\n- Diplomatic when suggesting corrections\n- Professional with whimsy\n\nCONSTRAINTS:\n- Never alter technical essence\n- Preserve code logic and structure\n- Only edit .md files, not code\n- No run-on sentences or filler words\n\nWORKFLOW:\n1. Identify linguistic improvements\n2. Cast clarity and correction spells\n3. Explain changes with magic\n4. Ensure consistent style",
+ "allowedTools": ["Read", "Edit", "Glob", "Grep"]
+ },
+ "Godmode": {
+ "name": "Godmode",
+ "description": "Gentle, battle-hardened DevOps superagent who treats infrastructure as a mystical realm.",
+ "model": "claude-opus-4-5",
+ "prompt": "You are Godmode, a gentle, battle-hardened DevOps superagent.\n\nCAPABILITIES:\n- Infrastructure design and management\n- CI/CD pipeline creation\n- Container orchestration (Docker, Kubernetes)\n- Cloud architecture (AWS, GCP, Azure, Cloudflare)\n- Security hardening\n- Monitoring and observability\n\nPERSONALITY:\n- Zen wise elder demeanor\n- Epic declarations about infrastructure\n- Treats DevOps as mystical realm\n- Patient but thorough\n\nCONSTRAINTS:\n- Always prioritize security\n- Never expose secrets in code\n- Follow least privilege principle\n- Document all infrastructure decisions\n\nWORKFLOW:\n1. Assess current infrastructure\n2. Design robust, scalable solutions\n3. Implement with security first\n4. Document thoroughly\n5. Provide runbooks for operations",
+ "allowedTools": ["Read", "Edit", "Write", "Bash", "Glob", "Grep", "WebFetch"]
+ },
+ "SageDaddy": {
+ "name": "SageDaddy",
+ "description": "Senior software architect with 20 years experience across startups, agencies, and enterprise.",
+ "model": "claude-opus-4-5",
+ "prompt": "You are a grumpy polyglot developer with 20 years experience across startups, agencies, and enterprise.\n\nCAPABILITIES:\n- Provide concise solution recommendations at different scales\n- Foresee integration difficulties from early decisions\n- Enforce TDD for business-critical features\n- Build proof-of-concepts to validate technology fit\n- Analyze codebases for improvements and future state\n\nPERSONALITY:\n- Quietly confident, logical, and resourceful\n- Great storyteller when beneficial\n- Knows how to deescalate executive tension\n- Focused on approach before coding\n\nCONSTRAINTS:\n- Never create files outside .ai/**\n- Always consider tradeoffs\n- Question requirements that seem off\n- Prioritize maintainability\n\nWORKFLOW:\n- Create architectural solutions using documented patterns\n- Use Mermaid for data models and UML\n- Provide multiple solution options with pros/cons\n- Focus on long-term maintainability",
+ "allowedTools": ["Read", "Edit", "Write", "Bash", "Glob", "Grep", "WebFetch"]
+ }
+ }
+}
diff --git a/.claudeignore b/.claudeignore
new file mode 100644
index 0000000..e26a287
--- /dev/null
+++ b/.claudeignore
@@ -0,0 +1,54 @@
+# Dependencies
+node_modules/
+vendor/
+.pnpm-store/
+
+# Build outputs
+dist/
+build/
+out/
+coverage/
+
+# Large files
+*.log
+*.min.js
+*.min.css
+
+# Sensitive
+.env*
+.env.local
+.env.*.local
+secrets/
+credentials/
+*.pem
+*.key
+
+# IDE specific
+.idea/
+.vscode/
+*.swp
+*.swo
+*~
+
+# OS files
+.DS_Store
+Thumbs.db
+
+# Claude/Cursor internal
+.claude/
+.cursor/
+
+# Testing
+coverage/
+.nyc_output/
+
+# Package managers
+package-lock.json
+yarn.lock
+pnpm-lock.yaml
+bun.lockb
+
+# Temporary files
+*.tmp
+*.temp
+.cache/
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 0000000..e2a5671
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,158 @@
+# AGENTS.md - AI Agent Configuration Registry
+
+This file serves as a discoverability layer for AI agent configurations, rules, and commands across both Cursor and Claude Code environments.
+
+## π Configuration Locations
+
+### Cursor IDE
+- **Rules**: `.cursor/rules/**/*.mdc`
+- **Agents**: `.cursor/modes.json`
+- **Commands**: `.cursor/commands.json` + `.cursor/commands/*.md`
+- **Skills**: `.cursor/skills/**/SKILL.md`
+- **MCP**: `.cursor/mcp.json`
+- **Ignore**: `.cursorignore`
+
+### Claude Code
+- **Rules**: `.claude/rules/**/*.md`
+- **Settings**: `.claude/settings.json` (agents, permissions, hooks, MCP)
+- **Commands**: `.claude/commands/*.md`
+- **Skills**: `.claude/skills/` (or embedded in CLAUDE.md)
+- **Hooks**: `.claude/hooks/` (scripts) + `settings.json` hooks section
+- **Ignore**: `.claudeignore`
+
+## π€ Available Agents
+
+| Agent | IDE | Description | File |
+|-------|-----|-------------|------|
+| **BasicBitch** | Cursor | Reliable, corporate developer | `.cursor/modes.json` |
+| **BasicBitch** | Claude | Reliable, corporate developer | `.claude/settings.json` |
+| **Spellchuck** | Cursor | Documentation/grammar specialist | `.cursor/modes.json` |
+| **Spellchuck** | Claude | Documentation/grammar specialist | `.claude/settings.json` |
+| **Godmode** | Cursor | DevOps/Infrastructure expert | `.cursor/modes.json` |
+| **Godmode** | Claude | DevOps/Infrastructure expert | `.claude/settings.json` |
+| **SageDaddy** | Cursor | Software architect (20 years) | `.cursor/modes.json` |
+| **SageDaddy** | Claude | Software architect (20 years) | `.claude/settings.json` |
+
+## π Rules Index
+
+### Core Rules
+| Rule | Cursor | Claude | Purpose |
+|------|--------|--------|---------|
+| create-rule-agent | `.cursor/rules/core/create-rule-agent.mdc` | `.claude/rules/core/create-rule-agent.md` | Rule creation standards |
+| create-update-agent | `.cursor/rules/core/create-update-agent.mdc` | `.claude/rules/core/create-update-agent.md` | Agent management |
+| security-scan | `.cursor/rules/core/security-scan-agent.mdc` | `.claude/rules/core/security-scan-agent.md` | Dependency security |
+| agent-communication | `.cursor/rules/core/agent-communication-always.mdc` | `.claude/rules/core/agent-communication-always.md` | Communication style |
+| dependency-analysis | `.cursor/rules/core/dependency-analysis-agent.mdc` | `.claude/rules/core/dependency-analysis-agent.md` | Pre-install checks |
+
+### Standards (Language/Framework)
+| Rule | Cursor | Claude | Stack |
+|------|--------|--------|-------|
+| typescript-standards | `.cursor/rules/standards/typescript-standards-auto.mdc` | `.claude/rules/standards/typescript-standards-auto.md` | TypeScript |
+| react-typescript | `.cursor/rules/standards/react-typescript-auto.mdc` | `.claude/rules/standards/react-typescript-auto.md` | React |
+| nextjs-react19 | `.cursor/rules/standards/nextjs-react19-auto.mdc` | `.claude/rules/standards/nextjs-react19-auto.md` | Next.js |
+| vue3-typescript | `.cursor/rules/standards/vue3-typescript-auto.mdc` | `.claude/rules/standards/vue3-typescript-auto.md` | Vue 3 |
+| cloudflare-workers | `.cursor/rules/standards/cloudflare-workers-auto.mdc` | `.claude/rules/standards/cloudflare-workers-auto.md` | CF Workers |
+| cloudflare-workers-hono | `.cursor/rules/standards/cloudflare-workers-hono-auto.mdc` | `.claude/rules/standards/cloudflare-workers-hono-auto.md` | CF + Hono |
+| mysql | `.cursor/rules/standards/mysql-auto.mdc` | `.claude/rules/standards/mysql-auto.md` | MySQL |
+| laravel-php | `.cursor/rules/standards/laravel-php-auto.mdc` | `.claude/rules/standards/laravel-php-auto.md` | Laravel |
+
+### Testing
+| Rule | Cursor | Claude | Purpose |
+|------|--------|--------|---------|
+| testing-pyramid | `.cursor/rules/test/testing-pyramid-agent.mdc` | `.claude/rules/test/testing-pyramid-agent.md` | Test distribution |
+| vitest-best-practices | `.cursor/rules/test/vitest-best-practices-auto.mdc` | `.claude/rules/test/vitest-best-practices-auto.md` | Vitest |
+| vitest-component-testing | `.cursor/rules/test/vitest-component-testing-auto.mdc` | `.claude/rules/test/vitest-component-testing-auto.md` | Component tests |
+| react-component-hook-testing | `.cursor/rules/test/react-component-hook-testing-auto.mdc` | `.claude/rules/test/react-component-hook-testing-auto.md` | React hooks |
+| vue-test-utils | `.cursor/rules/test/vue-test-utils-auto.mdc` | `.claude/rules/test/vue-test-utils-auto.md` | Vue testing |
+| playwright | `.cursor/rules/test/playwright-agent.mdc` | `.claude/rules/test/playwright-agent.md` | E2E testing |
+
+### Utils (Release, Git, Workflow)
+| Rule | Cursor | Claude | Purpose |
+|------|--------|--------|---------|
+| console-vibes | `.cursor/rules/utils/console-vibes-auto.mdc` | `.claude/rules/utils/console-vibes-auto.md` | Console styling |
+| git-branch | `.cursor/rules/utils/git-branch-agent.mdc` | `.claude/rules/utils/git-branch-agent.md` | Branch management |
+| release-validation | `.cursor/rules/utils/release-validation-auto.mdc` | `.claude/rules/utils/release-validation-auto.md` | Release checks |
+| release-commit-analysis | `.cursor/rules/utils/release-commit-analysis-auto.mdc` | `.claude/rules/utils/release-commit-analysis-auto.md` | Commit analysis |
+| release-package-version | `.cursor/rules/utils/release-package-version-auto.mdc` | `.claude/rules/utils/release-package-version-auto.md` | Version bump |
+| release-git-tags | `.cursor/rules/utils/release-git-tags-auto.mdc` | `.claude/rules/utils/release-git-tags-auto.md` | Tag management |
+| release-changelog | `.cursor/rules/utils/release-changelog-auto.mdc` | `.claude/rules/utils/release-changelog-auto.md` | Changelog gen |
+| changelog-generator | `.cursor/rules/utils/changelog-generator-manual.mdc` | `.claude/rules/utils/changelog-generator-manual.md` | Manual changelog |
+
+## β‘ Commands
+
+### Workflow Commands
+| Command | Cursor | Claude | Description |
+|---------|--------|--------|-------------|
+| /dev | `.cursor/rules/workflows/dev-workflow.mdc` | `.claude/commands/dev.md` | Implement features |
+| /spike | `.cursor/rules/workflows/dev-spike.mdc` | `.claude/commands/spike.md` | Technical investigation |
+| /story | `.cursor/rules/workflows/pm-story.mdc` | `.claude/commands/story.md` | User stories |
+
+### Git Commands
+| Command | Cursor | Claude | Description |
+|---------|--------|--------|-------------|
+| /commit | `.cursor/rules/utils/git-commit-push-agent.mdc` | `.claude/commands/commit.md` | Conventional commits |
+| /branch | `.cursor/rules/utils/git-branch-agent.mdc` | `.claude/commands/branch.md` | Branch management |
+
+### Release Commands
+| Command | Cursor | Claude | Description |
+|---------|--------|--------|-------------|
+| /changelog | `.cursor/commands/changelog.md` | `.claude/commands/changelog.md` | Generate changelog |
+| /version | `.cursor/rules/utils/release-package-version-auto.mdc` | `.claude/commands/version.md` | Version bump |
+| /tag | `.cursor/rules/utils/release-git-tags-auto.mdc` | `.claude/commands/tag.md` | Git tags |
+| /validate-release | `.cursor/rules/utils/release-validation-auto.mdc` | `.claude/commands/validate-release.md` | Release validation |
+| /analyze-commits | `.cursor/rules/utils/release-commit-analysis-auto.mdc` | `.claude/commands/analyze-commits.md` | Commit analysis |
+
+### Analysis Commands
+| Command | Cursor | Claude | Description |
+|---------|--------|--------|-------------|
+| /architecture | `.cursor/commands/architecture.md` | `.claude/commands/architecture.md` | Architecture solutions |
+| /deps | `.cursor/rules/core/dependency-analysis-agent.mdc` | `.claude/commands/deps.md` | Dependency analysis |
+| /security | `.cursor/rules/core/security-scan-agent.mdc` | `.claude/commands/security.md` | Security scan |
+| /witness | `.cursor/rules/core/fair-witness-agent.mdc` | `.claude/commands/witness.md` | Fair Witness analysis |
+
+### Code Commands
+| Command | Cursor | Claude | Description |
+|---------|--------|--------|-------------|
+| /refactor | `.cursor/rules/utils/refactor-agent.mdc` | `.claude/commands/refactor.md` | Code refactoring |
+| /testing-pyramid | `.cursor/commands/testing-pyramid.md` | `.claude/commands/testing-pyramid.md` | Test analysis |
+
+## πͺ Hooks (Claude Only)
+
+Located in `.claude/hooks/` and configured in `.claude/settings.json`:
+
+| Hook | Script | Trigger | Purpose |
+|------|--------|---------|---------|
+| auto-format | `.claude/hooks/auto-format.sh` | PostToolUse (Write/Edit) | Run Prettier |
+| security-block | `.claude/hooks/security-block.sh` | PreToolUse (Bash) | Block dangerous commands |
+| type-check | `.claude/hooks/type-check.sh` | PostToolUse (Write/Edit) | TypeScript checks |
+| test-on-save | `.claude/hooks/test-on-save.sh` | PostToolUse (Write/Edit) | Run affected tests |
+| commit-lint | `.claude/hooks/commit-lint.sh` | PreToolUse (Bash) | Validate commit format |
+| audit-log | `.claude/hooks/audit-log.sh` | PostToolUse (Bash) | Log all commands |
+
+## π MCP Servers
+
+Both IDEs use the same MCP protocol:
+
+**Cursor**: `.cursor/mcp.json`
+**Claude**: `.claude/settings.json` β `mcpServers` section
+
+## π Quick Reference
+
+**Install for Cursor:**
+```bash
+npx @usrrname/cursorrules --ide cursor
+```
+
+**Install for Claude:**
+```bash
+npx @usrrname/cursorrules --ide claude
+```
+
+**Install for both:**
+```bash
+npx @usrrname/cursorrules --ide both
+```
+
+---
+
+*This file is auto-generated. For updates, see the CLI tool or modify source files in `.cursor/` and `.claude/` directories.*
diff --git a/cli/transformers/cursor-to-claude.mjs b/cli/transformers/cursor-to-claude.mjs
new file mode 100755
index 0000000..a198452
--- /dev/null
+++ b/cli/transformers/cursor-to-claude.mjs
@@ -0,0 +1,160 @@
+#!/usr/bin/env node
+/**
+ * Cursor to Claude Rule Transformer
+ *
+ * Transforms Cursor .mdc rule files (with YAML frontmatter) to Claude-compatible .md format.
+ *
+ * Usage:
+ * node transformers/cursor-to-claude.mjs
+ * node transformers/cursor-to-claude.mjs --batch
+ */
+
+import { readFileSync, writeFileSync, readdirSync, statSync, mkdirSync } from 'fs';
+import { join, dirname, basename, extname } from 'path';
+
+/**
+ * Parse YAML frontmatter from markdown content
+ * @param {string} content - File content
+ * @returns {{frontmatter: Record, body: string}}
+ */
+function parseFrontmatter(content) {
+ const match = content.match(/^---\s*\n([\s\S]*?)\n---\s*\n([\s\S]*)$/);
+
+ if (!match) {
+ return { frontmatter: {}, body: content };
+ }
+
+ const frontmatterText = match[1];
+ const body = match[2];
+ /** @type {Record} */
+ const frontmatter = {};
+
+ frontmatterText.split('\n').forEach(line => {
+ const colonIndex = line.indexOf(':');
+ if (colonIndex > 0) {
+ const key = line.slice(0, colonIndex).trim();
+ const value = line.slice(colonIndex + 1).trim();
+ frontmatter[key] = value;
+ }
+ });
+
+ return { frontmatter, body };
+}
+
+/**
+ * Transform a single rule file
+ * @param {string} mdcContent - Cursor .mdc file content
+ * @returns {string} - Claude .md file content
+ */
+export function transformRule(mdcContent) {
+ const { frontmatter, body } = parseFrontmatter(mdcContent);
+
+ let claudeContent = '';
+
+ // Add header with metadata
+ if (frontmatter.description) {
+ claudeContent += `# ${frontmatter.description.split('.')[0]}\n\n`;
+ claudeContent += `## Description\n${frontmatter.description}\n\n`;
+ }
+
+ // Add applicability section from frontmatter
+ if (frontmatter.globs || frontmatter.alwaysApply !== undefined) {
+ claudeContent += `## Applicability\n`;
+ if (frontmatter.globs) {
+ claudeContent += `- **Files:** \`${frontmatter.globs}\`\n`;
+ }
+ if (frontmatter.alwaysApply) {
+ claudeContent += `- **Always Apply:** ${frontmatter.alwaysApply}\n`;
+ }
+ claudeContent += '\n';
+ }
+
+ // Process the body - extract critical rules
+ const criticalRulesMatch = body.match(/## Critical Rules\s*\n([\s\S]*?)(?=\n## |\n|$)/);
+ if (criticalRulesMatch) {
+ claudeContent += `## Rules\n${criticalRulesMatch[1].trim()}\n\n`;
+ }
+
+ // Process rule XML blocks
+ const ruleBlocks = body.matchAll(/[\s\S]*?<\/rule>/g);
+ for (const block of ruleBlocks) {
+ const ruleXml = block[0];
+ const nameMatch = ruleXml.match(/\s*\nname:\s*(.+)/);
+ const descMatch = ruleXml.match(/description:\s*(.+)/);
+
+ if (nameMatch) {
+ claudeContent += `### ${nameMatch[1].trim()}\n\n`;
+ if (descMatch) {
+ claudeContent += `${descMatch[1].trim()}\n\n`;
+ }
+
+ // Extract actions
+ const actionsMatch = ruleXml.match(/actions:\s*\n([\s\S]*?)(?=\n \w+:|$)/);
+ if (actionsMatch) {
+ claudeContent += `**Actions:**\n${actionsMatch[1]}\n\n`;
+ }
+ }
+ }
+
+ // Add remaining content (examples, tests, etc.)
+ const remainingContent = body
+ .replace(/---[\s\S]*?---\s*\n/, '')
+ .replace(/## Critical Rules\s*\n[\s\S]*?(?=\n## |\n|$)/, '')
+ .replace(/[\s\S]*?<\/rule>/g, '')
+ .trim();
+
+ if (remainingContent) {
+ claudeContent += `## Additional Information\n${remainingContent}\n`;
+ }
+
+ return claudeContent.trim();
+}
+
+/**
+ * Transform a directory of rules
+ * @param {string} inputDir - Directory containing .mdc files
+ * @param {string} outputDir - Directory for .md files
+ */
+export function transformDirectory(inputDir, outputDir) {
+ const entries = readdirSync(inputDir, { withFileTypes: true });
+
+ for (const entry of entries) {
+ const inputPath = join(inputDir, entry.name);
+
+ if (entry.isDirectory()) {
+ const outputSubdir = join(outputDir, entry.name);
+ mkdirSync(outputSubdir, { recursive: true });
+ transformDirectory(inputPath, outputSubdir);
+ } else if (entry.isFile() && extname(entry.name) === '.mdc') {
+ const mdcContent = readFileSync(inputPath, 'utf-8');
+ const mdContent = transformRule(mdcContent);
+ const outputPath = join(outputDir, basename(entry.name, '.mdc') + '.md');
+ writeFileSync(outputPath, mdContent);
+ console.log(`β Transformed: ${entry.name} β ${basename(outputPath)}`);
+ }
+ }
+}
+
+// CLI usage
+if (process.argv[1] === new URL(import.meta.url).pathname) {
+ const args = process.argv.slice(2);
+
+ if (args.length === 0) {
+ console.log('Usage: node cursor-to-claude.mjs ');
+ console.log(' node cursor-to-claude.mjs --batch ');
+ process.exit(1);
+ }
+
+ if (args[0] === '--batch') {
+ const [, inputDir, outputDir] = args;
+ mkdirSync(outputDir, { recursive: true });
+ transformDirectory(inputDir, outputDir);
+ console.log('\nβ
Batch transformation complete!');
+ } else {
+ const [inputFile, outputFile] = args;
+ const mdcContent = readFileSync(inputFile, 'utf-8');
+ const mdContent = transformRule(mdcContent);
+ writeFileSync(outputFile, mdContent);
+ console.log(`β
Transformed: ${inputFile} β ${outputFile}`);
+ }
+}
diff --git a/renovate.json b/renovate.json
index 5db72dd..9771d78 100644
--- a/renovate.json
+++ b/renovate.json
@@ -1,6 +1,53 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
- "config:recommended"
- ]
+ "config:recommended",
+ ":semanticCommits",
+ ":semanticCommitTypeAll(chore)",
+ ":automergePatch",
+ ":automergeMinor",
+ ":automergeBranch",
+ "helpers:pinGitHubActionDigests"
+ ],
+ "schedule": [
+ "before 9am on monday"
+ ],
+ "timezone": "America/New_York",
+ "prConcurrentLimit": 3,
+ "prHourlyLimit": 2,
+ "stabilityDays": 3,
+ "dependencyDashboard": true,
+ "dependencyDashboardAutoclose": true,
+ "vulnerabilityAlerts": {
+ "enabled": true,
+ "schedule": ["at any time"]
+ },
+ "packageRules": [
+ {
+ "matchUpdateTypes": ["major"],
+ "enabled": true,
+ "automerge": false,
+ "description": "Disable auto-merge for major updates - require manual review"
+ },
+ {
+ "matchDepTypes": ["devDependencies"],
+ "automerge": true,
+ "description": "Auto-merge dev dependency updates"
+ },
+ {
+ "matchPackageNames": ["node"],
+ "enabled": false,
+ "description": "Node version updates require manual coordination"
+ },
+ {
+ "matchManagers": ["github-actions"],
+ "pinDigests": true,
+ "description": "Pin GitHub Actions to commit SHAs for security"
+ }
+ ],
+ "lockFileMaintenance": {
+ "enabled": true,
+ "automerge": true,
+ "schedule": ["before 9am on the first day of the month"]
+ }
}
From 0d7d7ee42b78aa01592873e0455d6cd88978b124 Mon Sep 17 00:00:00 2001
From: Jen Chan <6406037+usrrname@users.noreply.github.com>
Date: Sun, 5 Apr 2026 10:23:42 -0400
Subject: [PATCH 2/5] feat(cli): Phase 2 - CLI enhancements for IDE selection
Add comprehensive IDE selection and enhanced CLI features:
- Add --ide flag for choosing cursor|claude|both
- Add interactive IDE selection prompt when --ide not specified
- Add --dry-run flag to preview downloads without writing files
- Add --validate flag for post-download file validation
- Create ide-selection.mjs utility for IDE choice handling
- Update download-files.mjs to support IDE-specific downloads
- Update commands.mjs with IDE-aware interactive mode
- Update CLI help text with examples for all new flags
- Support downloading both Cursor and Claude configurations
Refs: Phase 2 implementation plan
---
cli/commands.mjs | 38 ++++++++--
cli/index.mjs | 37 ++++++----
cli/utils/download-files.mjs | 138 ++++++++++++++++++++++++++++++-----
cli/utils/ide-selection.mjs | 69 ++++++++++++++++++
4 files changed, 245 insertions(+), 37 deletions(-)
create mode 100644 cli/utils/ide-selection.mjs
diff --git a/cli/commands.mjs b/cli/commands.mjs
index fc81f90..3a6592c 100644
--- a/cli/commands.mjs
+++ b/cli/commands.mjs
@@ -8,6 +8,7 @@ import { downloadFiles, downloadSelectedFiles } from './utils/download-files.mjs
import { findPackageRoot } from './utils/find-package-root.mjs';
import { interactiveCategorySelection, prepareMenu, scanAvailableRules, selectRules } from './utils/interactive-menu.mjs';
import { validateDirname } from './utils/validate-dirname.mjs';
+import { selectIde } from './utils/ide-selection.mjs';
/** fallback for Node < 21 */
const styleText = util.styleText ?? ((_, text) => text);
@@ -32,23 +33,27 @@ export const help = () => {
/** @param {string} key */
const getFlagDescription = (key) => {
switch (key) {
+ case 'dryRun':
+ return styleText('green', 'preview what would be downloaded');
case 'flat':
return styleText('green', 'install all rules without parent directory');
case 'help':
return styleText('green', 'help instructions');
+ case 'ide':
+ return styleText('green', 'target IDE: cursor|claude|both');
case 'interactive':
return styleText('green', 'select the rules you want');
case 'output':
return styleText('green', 'set output directory (Default: .cursor/)');
+ case 'validate':
+ return styleText('green', 'validate downloaded files');
case 'version':
return styleText('green', 'show package version');
- case 'interactive':
- return styleText('green', 'select the rules you want');
}
}
const tableContent = Object.entries(config?.options || {}).map(([key, value]) => {
return {
- flag: `-${value?.short}`,
+ flag: value?.short ? `-${value?.short}` : '',
name: `--${key}`,
description: getFlagDescription(key),
type: value?.type,
@@ -70,6 +75,11 @@ ${usage} ${options}
${tableContent.map(item => `${item.name} ${item.flag} ${item.type} ${item.description} ${item.default}`).join('\n')}
+Examples:
+ npx @usrrname/cursorrules --ide cursor --flat
+ npx @usrrname/cursorrules --ide claude --dry-run
+ npx @usrrname/cursorrules --ide both --output ./config
+
${repoLink}
`);
}
@@ -83,6 +93,12 @@ export const version = () => console.log(`${packageJson?.name} v${packageJson?.v
*/
export const interactiveMode = async (values) => {
console.log('π― Starting interactive mode...');
+
+ // Prompt for IDE selection first
+ if (!values.ide) {
+ values.ide = await selectIde();
+ }
+
const packageRoot = findPackageRoot(__dirname, '@usrrname/cursorrules');
const sourceRulesBasePath = resolve(packageRoot, '.cursor', 'rules');
const rules = await scanAvailableRules(sourceRulesBasePath);
@@ -106,7 +122,7 @@ export const interactiveMode = async (values) => {
.filter(rule => rule.selected);
const outputDir = values?.output?.toString() ?? defaultOutput;
if (allSelectedRules.length > 0)
- return await downloadSelectedFiles(outputDir, allSelectedRules);
+ return await downloadSelectedFiles(outputDir, allSelectedRules, values);
else
console.log('β οΈ No rules selected');
break;
@@ -120,15 +136,25 @@ export const interactiveMode = async (values) => {
/**
* @param {string} outputDir - output directory
+ * @param {Object} [values] - CLI options
+ * @param {string} [values.ide] - Target IDE
+ * @param {boolean} [values.dryRun] - Dry run flag
+ * @param {boolean} [values.validate] - Validate flag
* @returns {Promise}
*/
-export const output = async (outputDir) => {
+export const output = async (outputDir, values = {}) => {
if (!outputDir.trim()) {
console.error('β ERROR: Output directory cannot be empty.');
process.exit(1);
}
+
+ // Prompt for IDE if not specified
+ if (!values.ide) {
+ values.ide = await selectIde();
+ }
+
const outputValue = await validateDirname(outputDir);
- await downloadFiles(outputValue);
+ await downloadFiles(outputValue, values);
}
diff --git a/cli/index.mjs b/cli/index.mjs
index 0f9caf6..407549d 100755
--- a/cli/index.mjs
+++ b/cli/index.mjs
@@ -15,6 +15,10 @@ export const config = {
args: process.argv.slice(2),
tokens: true,
options: {
+ dryRun: {
+ type: 'boolean',
+ default: false,
+ },
flat: {
type: 'boolean',
short: 'f',
@@ -24,6 +28,9 @@ export const config = {
short: 'h',
default: false,
},
+ ide: {
+ type: 'string',
+ },
interactive: {
type: 'boolean',
short: 'i',
@@ -33,6 +40,10 @@ export const config = {
type: 'string',
short: 'o',
},
+ validate: {
+ type: 'boolean',
+ default: false,
+ },
version: {
type: 'boolean',
short: 'v',
@@ -47,34 +58,34 @@ async function main() {
const { values } = parseArgs(config);
const flags = Object.keys(config.options || {});
- const allowedKeys = flags.filter(flag => flag === 'output')[0]
+ const allowedKeys = flags.filter(flag => flag === 'output' || flag === 'ide')[0];
for (let key in values) {
-
- /**
- * prevent unknown flags from being used
- * prevent arguments without values
- * @param {string} key */
if (!allowedKeys.includes(key) && !values[key]) continue;
switch (key) {
- case 'version':
+ case 'version': {
await version();
break;
- case 'help':
+ }
+ case 'help': {
await help();
break;
- case 'interactive':
+ }
+ case 'interactive': {
await interactiveMode(values);
process.exit(0);
- case 'output':
+ }
+ case 'output': {
const outputDir = values[key]?.toString() ?? process.cwd();
- await output(outputDir);
+ await output(outputDir, values);
break;
- case 'flat':
+ }
+ case 'flat': {
const cursorRulesPath = process.cwd();
- await downloadFiles(cursorRulesPath);
+ await downloadFiles(cursorRulesPath, values);
break;
+ }
}
}
}
diff --git a/cli/utils/download-files.mjs b/cli/utils/download-files.mjs
index c4bd385..fc91b0b 100644
--- a/cli/utils/download-files.mjs
+++ b/cli/utils/download-files.mjs
@@ -1,4 +1,5 @@
-import { copyFile, cp, mkdir } from 'node:fs/promises';
+import { copyFile, cp, mkdir, readFile, access } from 'node:fs/promises';
+import { createHash } from 'node:crypto';
import { dirname, join, resolve } from 'node:path';
import { fileURLToPath } from 'node:url';
import { help } from '../commands.mjs';
@@ -6,6 +7,7 @@ import { detectNpxSandbox } from './detect-npx.mjs';
import { findFolderUp } from './find-folder-up.mjs';
import { findPackageRoot } from './find-package-root.mjs';
import { validateDirname } from './validate-dirname.mjs';
+import { validateIde, getIdeDisplayName } from './ide-selection.mjs';
const detection = detectNpxSandbox();
const __dirname = dirname(fileURLToPath(import.meta.url));
@@ -18,7 +20,6 @@ if (detection.isNpxSandbox || !sourceRulesBasePath) {
sourceRulesBasePath = resolve(packageRoot, '.cursor', 'rules');
}
-
// only for local development and testing
if (process.env.CI || ['development', 'test'].includes(process.env.NODE_ENV ?? '')) {
// running inside repo / globally installed copy β locate nearest .cursor
@@ -30,27 +31,117 @@ if (process.env.CI || ['development', 'test'].includes(process.env.NODE_ENV ?? '
sourceRulesBasePath = resolve(found, 'rules');
}
+/**
+ * Generate SHA-256 checksum of file content
+ * @param {string} content
+ * @returns {string}
+ */
+function generateChecksum(content) {
+ return createHash('sha256').update(content).digest('hex');
+}
+
+/**
+ * Validate a downloaded file
+ * @param {string} filePath
+ * @param {string} expectedContent
+ * @returns {Promise<{valid: boolean, error?: string}>}
+ */
+async function validateFile(filePath, expectedContent) {
+ try {
+ await access(filePath);
+ const content = await readFile(filePath, 'utf-8');
+ if (content !== expectedContent) {
+ return { valid: false, error: 'Content mismatch' };
+ }
+ return { valid: true };
+ } catch (err) {
+ return { valid: false, error: err.message };
+ }
+}
+
+/**
+ * Get source paths based on IDE selection
+ * @param {string} ide - 'cursor', 'claude', or 'both'
+ * @returns {Array<{source: string, dest: string, type: string}>}
+ */
+function getSourcePaths(ide) {
+ const packageRoot = findPackageRoot(__dirname, '@usrrname/cursorrules');
+ const paths = [];
+
+ if (ide === 'cursor' || ide === 'both') {
+ paths.push({
+ source: resolve(packageRoot, '.cursor'),
+ dest: '.cursor',
+ type: 'cursor'
+ });
+ }
+
+ if (ide === 'claude' || ide === 'both') {
+ paths.push({
+ source: resolve(packageRoot, '.claude'),
+ dest: '.claude',
+ type: 'claude'
+ });
+ }
+
+ return paths;
+}
+
/**
* @param {string} dirname - output folder relative path
+ * @param {Object} [options] - CLI options
+ * @param {string} [options.ide] - Target IDE: 'cursor', 'claude', or 'both'
+ * @param {boolean} [options.dryRun] - Preview only, don't download
+ * @param {boolean} [options.validate] - Validate downloaded files
*/
-export const downloadFiles = async (dirname) => {
+export const downloadFiles = async (dirname, options = {}) => {
if (!dirname) throw new Error('Output directory is required');
-
- console.info('π₯ Downloading all rules...');
+
+ const ide = validateIde(options.ide ?? '') || 'cursor';
+ const dryRun = options.dryRun || false;
+ const validate = options.validate || false;
+
+ if (dryRun) {
+ console.log(`π DRY RUN: Previewing what would be downloaded for ${getIdeDisplayName(ide)}...\n`);
+ } else {
+ console.info(`π₯ Downloading ${getIdeDisplayName(ide)} configuration...`);
+ }
const outputDir = await validateDirname(dirname);
- if (!sourceRulesBasePath) {
- console.error(`β Error: sourceRulesBasePath is not defined`);
- process.exit(1);
+ const sourcePaths = getSourcePaths(ide);
+
+ if (dryRun) {
+ console.log(`Output directory: ${outputDir}`);
+ console.log('\nFiles to be downloaded:\n');
+ for (const { source, dest, type } of sourcePaths) {
+ console.log(` π ${type}/ β ${dest}/`);
+ }
+ console.log('\nβ
Dry run complete. No files were downloaded.');
+ return;
}
+
try {
- // copy whole folder
- await cp(
- sourceRulesBasePath,
- outputDir,
- { recursive: true },
- )
- console.log(`β
Success! All rules saved to ${outputDir}`);
+ for (const { source, dest, type } of sourcePaths) {
+ const destPath = join(outputDir, dest);
+ console.log(` π₯ Copying ${type} configuration...`);
+
+ await cp(source, destPath, { recursive: true });
+
+ if (validate) {
+ console.log(` π Validating ${type} configuration...`);
+ // Basic validation - ensure key files exist
+ if (type === 'claude') {
+ await access(join(destPath, 'settings.json'));
+ }
+ }
+ }
+
+ console.log(`β
Success! ${getIdeDisplayName(ide)} configuration saved to ${outputDir}`);
+
+ if (ide === 'both') {
+ console.log(' - Cursor: .cursor/');
+ console.log(' - Claude Code: .claude/');
+ }
} catch (err) {
console.error(`β Error: ${err.message}`, err);
process.exit(1);
@@ -61,8 +152,10 @@ export const downloadFiles = async (dirname) => {
* Download selected rules only
* @param {string} folderName - output folder relative path
* @param {Array<{category: string, displayName: string, selected: boolean, name: string, path: string, fullPath: string}>} selectedRules - Array of selected rule objects
+ * @param {Object} [options] - CLI options
+ * @param {string} [options.ide] - Target IDE: 'cursor', 'claude', or 'both'
*/
-export const downloadSelectedFiles = async (folderName, selectedRules) => {
+export const downloadSelectedFiles = async (folderName, selectedRules, options = {}) => {
if (!folderName) throw new Error('Output directory is required');
if (!selectedRules || selectedRules.length === 0) {
@@ -71,14 +164,23 @@ export const downloadSelectedFiles = async (folderName, selectedRules) => {
return;
}
- console.info('π₯ Downloading selected rules...');
+ const ide = validateIde(options.ide ?? '') || 'cursor';
+
+ console.info(`π₯ Downloading selected rules for ${getIdeDisplayName(ide)}...`);
const outputDir = await validateDirname(folderName)
try {
// Create output directory structure
await mkdir(outputDir, { recursive: true });
- await mkdir(join(outputDir, '.cursor'), { recursive: true });
+
+ if (ide === 'cursor' || ide === 'both') {
+ await mkdir(join(outputDir, '.cursor'), { recursive: true });
+ }
+
+ if (ide === 'claude' || ide === 'both') {
+ await mkdir(join(outputDir, '.claude'), { recursive: true });
+ }
// Copy selected rules
for (const rule of selectedRules) {
diff --git a/cli/utils/ide-selection.mjs b/cli/utils/ide-selection.mjs
new file mode 100644
index 0000000..6b71277
--- /dev/null
+++ b/cli/utils/ide-selection.mjs
@@ -0,0 +1,69 @@
+#!/usr/bin/env node
+import * as readline from 'node:readline';
+import { stdin, stdout } from 'node:process';
+
+/**
+ * Interactive IDE selection prompt
+ * @returns {Promise<'cursor'|'claude'|'both'>}
+ */
+export async function selectIde() {
+ const rl = readline.createInterface({
+ input: stdin,
+ output: stdout
+ });
+
+ console.log('\nββββββββββββββββββββββββββββββββββββββββ');
+ console.log('β Which AI IDE are you using? β');
+ console.log('β βββββββββββββββββββββββββββββββββββββββ£');
+ console.log('β [1] Cursor β');
+ console.log('β [2] Claude Code β');
+ console.log('β [3] Both (dual setup) β');
+ console.log('ββββββββββββββββββββββββββββββββββββββββ\n');
+
+ return new Promise((resolve) => {
+ rl.question('Enter your choice (1-3): ', (answer) => {
+ rl.close();
+ const choice = answer.trim();
+ switch (choice) {
+ case '1':
+ console.log('β
Selected: Cursor');
+ resolve('cursor');
+ break;
+ case '2':
+ console.log('β
Selected: Claude Code');
+ resolve('claude');
+ break;
+ case '3':
+ console.log('β
Selected: Both');
+ resolve('both');
+ break;
+ default:
+ console.log('β οΈ Invalid choice. Defaulting to Cursor.');
+ resolve('cursor');
+ }
+ });
+ });
+}
+
+/**
+ * Validate IDE choice
+ * @param {string} ide
+ * @returns {string|null}
+ */
+export function validateIde(ide) {
+ const validIdes = ['cursor', 'claude', 'both'];
+ const normalized = ide?.toLowerCase().trim();
+ return validIdes.includes(normalized) ? normalized : null;
+}
+
+/**
+ * Get IDE display name
+ * @param {string} ide
+ * @returns {string}
+ */
+export function getIdeDisplayName(ide) {
+ if (ide === 'cursor') return 'Cursor';
+ if (ide === 'claude') return 'Claude Code';
+ if (ide === 'both') return 'Both';
+ return ide;
+}
From d26d7aa9d8e682589c6d09a488da47db73890f59 Mon Sep 17 00:00:00 2001
From: Jen Chan <6406037+usrrname@users.noreply.github.com>
Date: Sun, 5 Apr 2026 10:26:19 -0400
Subject: [PATCH 3/5] feat(claude): Phase 3 - Content migration for Claude Code
Complete content migration from Cursor to Claude Code format:
- Transform 6 test rules (playwright, vitest, testing-pyramid, etc.)
- Transform 9 utils rules (git-branch, release-*, changelog-*)
- Create .claude/commands/ with 6 slash commands:
- /dev, /refactor, /version, /changelog, /commit, /testing-pyramid
- All rules converted from .mdc (Cursor) to .md (Claude) format
- Commands include usage guidelines, workflow steps, and agent recommendations
Refs: Phase 3 implementation plan
---
.claude/commands/changelog.md | 51 +++
.claude/commands/commit.md | 46 +++
.claude/commands/dev.md | 44 +++
.claude/commands/refactor.md | 45 +++
.claude/commands/testing-pyramid.md | 49 +++
.claude/commands/version.md | 47 +++
.claude/rules/test/playwright-agent.md | 250 +++++++++++++
.../test/react-component-hook-testing-auto.md | 117 +++++++
.claude/rules/test/testing-pyramid-agent.md | 176 ++++++++++
.../rules/test/vitest-best-practices-auto.md | 83 +++++
.../test/vitest-component-testing-auto.md | 88 +++++
.claude/rules/test/vue-test-utils-auto.md | 263 ++++++++++++++
.../rules/utils/changelog-generator-manual.md | 327 ++++++++++++++++++
.claude/rules/utils/console-vibes-auto.md | 44 +++
.claude/rules/utils/git-branch-agent.md | 67 ++++
.claude/rules/utils/release-changelog-auto.md | 155 +++++++++
.../utils/release-commit-analysis-auto.md | 202 +++++++++++
.claude/rules/utils/release-git-tags-auto.md | 224 ++++++++++++
.../utils/release-package-version-auto.md | 158 +++++++++
.../rules/utils/release-validation-auto.md | 182 ++++++++++
.../rules/utils/release-version-bump-auto.md | 171 +++++++++
21 files changed, 2789 insertions(+)
create mode 100644 .claude/commands/changelog.md
create mode 100644 .claude/commands/commit.md
create mode 100644 .claude/commands/dev.md
create mode 100644 .claude/commands/refactor.md
create mode 100644 .claude/commands/testing-pyramid.md
create mode 100644 .claude/commands/version.md
create mode 100644 .claude/rules/test/playwright-agent.md
create mode 100644 .claude/rules/test/react-component-hook-testing-auto.md
create mode 100644 .claude/rules/test/testing-pyramid-agent.md
create mode 100644 .claude/rules/test/vitest-best-practices-auto.md
create mode 100644 .claude/rules/test/vitest-component-testing-auto.md
create mode 100644 .claude/rules/test/vue-test-utils-auto.md
create mode 100644 .claude/rules/utils/changelog-generator-manual.md
create mode 100644 .claude/rules/utils/console-vibes-auto.md
create mode 100644 .claude/rules/utils/git-branch-agent.md
create mode 100644 .claude/rules/utils/release-changelog-auto.md
create mode 100644 .claude/rules/utils/release-commit-analysis-auto.md
create mode 100644 .claude/rules/utils/release-git-tags-auto.md
create mode 100644 .claude/rules/utils/release-package-version-auto.md
create mode 100644 .claude/rules/utils/release-validation-auto.md
create mode 100644 .claude/rules/utils/release-version-bump-auto.md
diff --git a/.claude/commands/changelog.md b/.claude/commands/changelog.md
new file mode 100644
index 0000000..b21c782
--- /dev/null
+++ b/.claude/commands/changelog.md
@@ -0,0 +1,51 @@
+# /changelog
+
+Generate changelog from git history and tags.
+
+## Usage
+
+```
+/changelog [--all|--since-tag ]
+```
+
+## When to Use
+
+Use this command when:
+- Preparing a release
+- Documenting changes for users
+- Maintaining project history
+
+## Format
+
+Follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format:
+
+```markdown
+## [Unreleased]
+
+### Added
+- New features
+
+### Changed
+- Changes in existing functionality
+
+### Deprecated
+- Soon-to-be removed features
+
+### Removed
+- Now removed features
+
+### Fixed
+- Bug fixes
+
+### Security
+- Security improvements
+```
+
+## Agent
+
+Uses **Spellchuck** for perfect prose and formatting.
+
+## Related
+
+- `/version` - Bump version
+- `/tag` - Create release tags
diff --git a/.claude/commands/commit.md b/.claude/commands/commit.md
new file mode 100644
index 0000000..cf758c1
--- /dev/null
+++ b/.claude/commands/commit.md
@@ -0,0 +1,46 @@
+# /commit
+
+Create conventional commits with AI assistance.
+
+## Usage
+
+```
+/commit [message-or-description]
+```
+
+## When to Use
+
+Use this command when:
+- Ready to commit changes
+- Need help writing commit messages
+- Following conventional commit standards
+
+## Conventional Commits Format
+
+```
+():
+
+[optional body]
+
+[optional footer(s)]
+```
+
+### Types
+
+- **feat**: New feature
+- **fix**: Bug fix
+- **docs**: Documentation changes
+- **style**: Code style (formatting, semicolons, etc.)
+- **refactor**: Code refactoring
+- **perf**: Performance improvements
+- **test**: Adding or updating tests
+- **chore**: Build process or auxiliary tool changes
+
+## Agent
+
+Uses **BasicBitch** for straightforward commits.
+
+## Related
+
+- `/branch` - Branch management
+- `/analyze-commits` - Commit history analysis
diff --git a/.claude/commands/dev.md b/.claude/commands/dev.md
new file mode 100644
index 0000000..abe06a8
--- /dev/null
+++ b/.claude/commands/dev.md
@@ -0,0 +1,44 @@
+# /dev
+
+Implement features from user stories following the development workflow.
+
+## Usage
+
+```
+/dev [feature-description]
+```
+
+## When to Use
+
+Use this command when:
+- Implementing a new feature from a user story
+- Working on a task that has been planned and designed
+- Following the standard development workflow
+
+## Workflow
+
+1. **Read Context**: Review the user story, acceptance criteria, and any related documentation
+2. **Understand Requirements**: Clarify any ambiguities before coding
+3. **Plan Implementation**: Break down the work into manageable steps
+4. **Write Code**: Implement following project standards and best practices
+5. **Test**: Ensure the implementation meets acceptance criteria
+6. **Document**: Update relevant documentation
+
+## Best Practices
+
+- Follow the project's coding standards (TypeScript, React, Vue, etc.)
+- Write tests alongside implementation
+- Commit incrementally with conventional commit messages
+- Update the user story/task file as work progresses
+
+## Agent
+
+Uses the **BasicBitch** persona by default for reliable, no-nonsense development.
+Switch to **KawaiiSamurai** or **SageDaddy** if needed for specific tasks.
+
+## Related
+
+- `/story` - Create or validate user stories
+- `/spike` - Technical investigation
+- `/refactor` - Code refactoring
+- `/testing-pyramid` - Test analysis
diff --git a/.claude/commands/refactor.md b/.claude/commands/refactor.md
new file mode 100644
index 0000000..a6cb95d
--- /dev/null
+++ b/.claude/commands/refactor.md
@@ -0,0 +1,45 @@
+# /refactor
+
+Refactor code blocks with best practices and improved maintainability.
+
+## Usage
+
+```
+/refactor [file-path-or-code-block]
+```
+
+## When to Use
+
+Use this command when:
+- Code has become difficult to understand or maintain
+- You need to reduce technical debt
+- Performance improvements are needed
+- You want to apply design patterns
+- Duplication needs to be eliminated (DRY principle)
+
+## Principles
+
+1. **Preserve Behavior**: Refactoring should not change functionality
+2. **Improve Readability**: Code should be easier to understand
+3. **Reduce Complexity**: Simplify overly complex logic
+4. **Enhance Maintainability**: Make future changes easier
+5. **Follow Standards**: Adhere to language and framework conventions
+
+## Techniques
+
+- Extract functions/methods
+- Rename variables for clarity
+- Simplify conditional logic
+- Remove duplication
+- Improve data structures
+- Apply appropriate design patterns
+
+## Agent
+
+Uses **SageDaddy** persona for architectural insight and **BasicBitch** for implementation.
+
+## Related
+
+- `/dev` - Feature implementation
+- `/testing-pyramid` - Ensure tests remain valid after refactoring
+- `/spike` - Investigate before major refactoring
diff --git a/.claude/commands/testing-pyramid.md b/.claude/commands/testing-pyramid.md
new file mode 100644
index 0000000..bb49b37
--- /dev/null
+++ b/.claude/commands/testing-pyramid.md
@@ -0,0 +1,49 @@
+# /testing-pyramid
+
+Analyze test distribution and maintain testing pyramid principles.
+
+## Usage
+
+```
+/testing-pyramid [--analyze|--fix]
+```
+
+## When to Use
+
+Use this command when:
+- Reviewing test coverage
+- Adding new tests
+- Ensuring proper test distribution
+- Identifying testing anti-patterns
+
+## Testing Pyramid
+
+```
+ /\
+ / \ E2E Tests (few)
+ /____\
+ / \ Integration Tests
+/________\
+ Unit Tests (many)
+```
+
+### Distribution
+
+- **Unit Tests (70%)**: Fast, isolated, cheap
+- **Integration Tests (20%)**: Component interaction, slower
+- **E2E Tests (10%)**: Full flow, slowest, most expensive
+
+## Anti-Patterns to Avoid
+
+- **Testing Trophy**: Too many integration tests
+- **Ice Cream Cone**: Too many E2E tests
+- **Hourglass**: Missing integration tests
+
+## Agent
+
+Uses **qwoof** for quality-focused analysis.
+
+## Related
+
+- `/dev` - Feature implementation with proper tests
+- `/refactor` - Refactoring while maintaining test coverage
diff --git a/.claude/commands/version.md b/.claude/commands/version.md
new file mode 100644
index 0000000..c2fd5ca
--- /dev/null
+++ b/.claude/commands/version.md
@@ -0,0 +1,47 @@
+# /version
+
+Update package version with semantic versioning.
+
+## Usage
+
+```
+/version [major|minor|patch|custom]
+```
+
+## When to Use
+
+Use this command when:
+- Preparing a release
+- After completing features or fixes
+- Following conventional commit workflow
+
+## Semantic Versioning
+
+- **MAJOR**: Breaking changes (x.0.0)
+- **MINOR**: New features, backwards compatible (x.y.0)
+- **PATCH**: Bug fixes, backwards compatible (x.y.z)
+
+## Workflow
+
+1. Analyze commits since last tag
+2. Determine appropriate version bump
+3. Update package.json version
+4. Create git commit
+5. Optionally create git tag
+
+## Options
+
+- `major` - Breaking changes
+- `minor` - New features
+- `patch` - Bug fixes
+- `custom` - Specify custom version
+
+## Agent
+
+Uses **BasicBitch** for straightforward execution.
+
+## Related
+
+- `/tag` - Create git tags
+- `/changelog` - Generate changelog
+- `/analyze-commits` - Analyze commits for version bump
diff --git a/.claude/rules/test/playwright-agent.md b/.claude/rules/test/playwright-agent.md
new file mode 100644
index 0000000..28a535f
--- /dev/null
+++ b/.claude/rules/test/playwright-agent.md
@@ -0,0 +1,250 @@
+# Playwright end-to-end testing best practices
+
+## Description
+Playwright end-to-end testing best practices
+
+## Applicability
+- **Files:** `*.spec.ts`
+- **Always Apply:** false
+
+## Additional Information
+# Critical Rules
+
+- Use descriptive and meaningful test names that clearly describe the expected behavior.
+- Utilize Playwright fixtures (e.g., `test`, `page`, `expect`) to maintain test isolation and consistency.
+- Use `test.beforeEach` and `test.afterEach` for setup and teardown to ensure a clean state for each test.
+- Keep tests DRY (Donβt Repeat Yourself) by extracting reusable logic into helper functions.
+- Use `page.getByTestId` whenever `data-testid` is defined on an element or container.
+- Use the recommended built-in and role-based locators (`page.getByRole`, `page.getByLabel`, `page.getByText`, `page.getByTitle`, etc.) over complex selectors.
+- Use `page.getByLocator()` when it's not possible to use any of the above to locate an element.
+- Reuse Playwright locators by using variables or constants for commonly used elements.
+- Use the `playwright.config.ts` file for global configuration and environment setup.
+- Use built-in config objects like `devices` whenever possible.
+- Prefer to use web-first assertions (`toBeVisible`, `toHaveText`, etc.) whenever possible.
+- Use `expect` matchers for assertions (`toEqual`, `toContain`, `toBeTruthy`, `toHaveLength`, etc.) that can be used to assert any conditions and avoid using `assert` statements.
+- Avoid hardcoded timeouts.
+- Do not perform exhaustive testing. Stick to critical user paths.
+- Use `page.waitFor` with specific conditions or events to wait for elements or states.
+- Ensure tests run reliably in parallel without shared state conflicts.
+- Avoid commenting on the resulting code.
+- Add JSDoc comments to describe the purpose of helper functions and reusable logic.
+
+## 1. Use `@playwright/test`
+
+Leverage the official test runner for built-in fixtures, isolation, and web-first assertions. Avoid the low-level `playwright` library for E2E tests.
+
+β BAD: Using `playwright` directly
+
+```typescript
+import { chromium } from "playwright"
+// ... manual browser/context setup and teardown
+const browser = await chromium.launch()
+const page = await browser.newPage()
+// ...
+await browser.close()
+```
+
+β
GOOD: Using `@playwright/test`
+
+```typescript
+import { test, expect } from "@playwright/test"
+test("should navigate to home", async ({ page }) => {
+ await page.goto("/")
+ await expect(page).toHaveTitle(/Home/)
+})
+```
+
+## 2. Prioritize Robust Locators
+
+Use Playwright's built-in Locators API, favoring user-facing attributes over brittle CSS selectors. This drastically improves test stability.
+
+β BAD: Fragile, implementation-dependent selectors
+
+```typescript
+await page.locator("div.container > ul > li:nth-child(2) > button").click()
+```
+
+β
GOOD: Semantic, user-facing locators
+
+```typescript
+await page.getByRole("button", { name: "Add to Cart" }).click()
+await page.getByLabel("Username").fill("testuser")
+await page.getByTestId("product-item-123").click()
+```
+
+## 3. Embrace Web-First Assertions
+
+Playwright's `expect` assertions automatically retry until conditions are met, eliminating manual waits and flakiness. Never use `page.waitForTimeout()`.
+
+β BAD: Manual, flaky waits and generic assertions
+
+```typescript
+await page.waitForTimeout(2000) // π¨ Flaky!
+const title = await page.title()
+assert.equal(title, "My Page") // π¨ Not web-first
+```
+
+β
GOOD: Reliable, auto-retrying assertions
+
+```typescript
+await expect(page).toHaveTitle(/My Page/)
+await expect(page.getByText("Welcome")).toBeVisible()
+await expect(page.getByRole("checkbox")).toBeChecked()
+```
+
+## 4. Implement the Page Object Model (POM)
+
+Encapsulate selectors and actions within dedicated classes. This improves readability, reusability, and maintainability.
+
+β BAD: Repeated selectors and logic across tests
+
+```typescript
+// test-login.spec.ts
+await page.getByLabel("Username").fill("user")
+await page.getByLabel("Password").fill("pass")
+await page.getByRole("button", { name: "Login" }).click()
+
+// test-profile.spec.ts
+// ... same login steps repeated ...
+```
+
+β
GOOD: Centralized Page Object
+
+```typescript
+// pages/LoginPage.ts
+import { Page, Locator } from "@playwright/test"
+
+export class LoginPage {
+ readonly page: Page
+ readonly usernameInput: Locator
+ readonly passwordInput: Locator
+ readonly loginButton: Locator
+
+ constructor(page: Page) {
+ this.page = page
+ this.usernameInput = page.getByLabel("Username")
+ this.passwordInput = page.getByLabel("Password")
+ this.loginButton = page.getByRole("button", { name: "Login" })
+ }
+
+ async navigate() {
+ await this.page.goto("/login")
+ }
+
+ async login(username: string, password: string) {
+ await this.usernameInput.fill(username)
+ await this.passwordInput.fill(password)
+ await this.loginButton.click()
+ }
+}
+
+// tests/login.spec.ts
+import { test, expect } from "@playwright/test"
+import { LoginPage } from "../pages/LoginPage"
+
+test("should successfully log in", async ({ page }) => {
+ const loginPage = new LoginPage(page)
+ await loginPage.navigate()
+ await loginPage.login("testuser", "password")
+ await expect(page).toHaveURL(/dashboard/)
+})
+```
+
+## 5. Optimize Performance with Auth State & Route Blocking
+
+Reduce test execution time by reusing authenticated sessions and blocking unnecessary network requests.
+
+β BAD: Logging in for every test and loading all assets
+
+```typescript
+test("view profile", async ({ page }) => {
+ await page.goto("/login")
+ await page.getByLabel("Username").fill("user")
+ await page.getByLabel("Password").fill("pass")
+ await page.getByRole("button", { name: "Login" }).click()
+ await page.goto("/profile") // Loads all images, analytics, etc.
+})
+```
+
+β
GOOD: Reusing auth state and blocking requests
+
+```typescript
+// playwright.config.ts
+import { defineConfig } from "@playwright/test"
+export default defineConfig({
+ use: {
+ storageState: "playwright-auth.json", // Path to save/load auth state
+ },
+})
+
+// global-setup.ts (run once before all tests)
+import { chromium, expect } from "@playwright/test"
+export default async function globalSetup() {
+ const browser = await chromium.launch()
+ const page = await browser.newPage()
+ await page.goto("/login")
+ await page.getByLabel("Username").fill("testuser")
+ await page.getByLabel("Password").fill("password")
+ await page.getByRole("button", { name: "Login" }).click()
+ await expect(page).toHaveURL(/dashboard/)
+ await page.context().storageState({ path: "playwright-auth.json" })
+ await browser.close()
+}
+
+// tests/profile.spec.ts
+import { test, expect } from "@playwright/test"
+test("should display user profile", async ({ page, context }) => {
+ // Block unnecessary resources for faster tests
+ await context.route("**/*.{png,jpg,jpeg,gif,webp,svg,css}", (route) =>
+ route.abort()
+ )
+ await page.goto("/profile") // Automatically uses saved auth state
+ await expect(page.getByText("Welcome, testuser!")).toBeVisible()
+})
+```
+
+## 6. Mock APIs for Deterministic Tests
+
+Isolate your UI tests from backend flakiness by intercepting and mocking API responses.
+
+β BAD: Relying on a live, potentially unstable backend
+
+```typescript
+test("display products", async ({ page }) => {
+ await page.goto("/products") // Fetches from real API
+ await expect(page.getByText("Product A")).toBeVisible()
+})
+```
+
+β
GOOD: Mocking API responses
+
+```typescript
+test("display mocked products", async ({ page }) => {
+ await page.route("**/api/products", (route) => {
+ route.fulfill({
+ status: 200,
+ contentType: "application/json",
+ body: JSON.stringify([{ id: 1, name: "Mock Product" }]),
+ })
+ })
+ await page.goto("/products")
+ await expect(page.getByText("Mock Product")).toBeVisible()
+})
+```
+
+## 7. Leverage CI/CD Features for Debugging
+
+Configure tracing, screenshots, and video recording in your `playwright.config.ts` to instantly diagnose failures in CI.
+
+```typescript
+// playwright.config.ts
+import { defineConfig } from "@playwright/test"
+export default defineConfig({
+ reporter: [["html"], ["list"]],
+ use: {
+ trace: "on-first-retry", // Record trace only on first retry
+ screenshot: "on", // Always take a screenshot on failure
+ video: "on-first-retry", // Record video on first retry
+ },
+})
+```
\ No newline at end of file
diff --git a/.claude/rules/test/react-component-hook-testing-auto.md b/.claude/rules/test/react-component-hook-testing-auto.md
new file mode 100644
index 0000000..2b157b4
--- /dev/null
+++ b/.claude/rules/test/react-component-hook-testing-auto.md
@@ -0,0 +1,117 @@
+# Enforces best practices for unit testing React components and custom hooks using React Testing Library and Vitest
+
+## Description
+Enforces best practices for unit testing React components and custom hooks using React Testing Library and Vitest.
+
+## Applicability
+- **Files:** `"*.test.{js,ts,jsx,tsx},*.spec.{js,ts,jsx,tsx}"`
+- **Always Apply:** false
+
+## Rules
+- Use `@testing-library/react` for rendering components; avoid shallow rendering or testing implementation details.
+- Prefer accessibility queries (`getByRole`, `getByLabelText`, etc.) over `getByTestId` or CSS selectors.
+- Simulate user interactions with `userEvent` instead of firing raw DOM events.
+- Each test must contain at least one assertion using `expect`.
+- Group related tests in `describe` blocks with descriptive titles.
+- Avoid querying private DOM nodes with `container.querySelector`.
+- Do not snapshot the entire DOM tree for dynamic components; assert specific, stable text/attributes.
+- For hooks, use `@testing-library/react`'s `renderHook` or equivalent utilities.
+- Always test custom hooks in isolation and with all relevant edge cases.
+- Mock external dependencies and context providers as needed for hooks.
+- Avoid using `any` in test or hook typings; always use explicit types.
+- Clean up side-effects after each test (rely on library auto-cleanup or call `cleanup()`).
+- Do not use `.skip` or `.only` in committed test code.
+
+### react-component-hook-testing
+
+Enforces best practices for testing React components and custom hooks with React Testing Library and Vitest
+
+**Actions:**
+ - type: suggest
+ message: |
+ Use React Testing Library for rendering and userEvent for interactions. Prefer accessibility queries and always assert user-visible output.
+ - type: suggest
+ pattern: "container\\.querySelector"
+ message: |
+ Avoid querying DOM directly. Use queries like `screen.getByRole` or `screen.getByText` to reflect real user interactions.
+ - type: suggest
+ pattern: "fireEvent\\."
+ message: |
+ Use `userEvent` to simulate real user behaviour instead of `fireEvent`, e.g. `await userEvent.click(button)`.
+ - type: suggest
+ pattern: "\.skip|\.only"
+ message: |
+ Do not commit tests with `.skip` or `.only`. All tests should run in CI.
+ - type: suggest
+ message: |
+ Each test must contain at least one assertion (`expect`).
+examples:
+ - input: |
+ it('renders', () => {
+ const { container } = render()
+ container.querySelector('button[type="submit"]')
+ })
+ output: |
+ it('renders submit button', () => {
+ render()
+ expect(screen.getByRole('button', { name: /sign in/i })).toBeInTheDocument()
+ })
+ - input: |
+ it('calls onClick', async () => {
+ render()
+ fireEvent.click(screen.getByText('Click me'))
+ })
+ output: |
+ it('calls onClick', async () => {
+ render()
+ await userEvent.click(screen.getByRole('button', { name: /click me/i }))
+ expect(mockFn).toHaveBeenCalled()
+ })
+ - input: |
+ it('should use custom hook', () => {
+ const { result } = renderHook(() => useCustomHook())
+ expect(result.current.value).toBe(0)
+ })
+ output: |
+ it('should use custom hook with initial value', () => {
+ const { result } = renderHook(() => useCustomHook({ initial: 0 }))
+ expect(result.current.value).toBe(0)
+ })
+ - input: |
+ test.skip('unimplemented', () => {})
+ output: |
+ // Remove .skip or implement the test
+ - input: |
+ it('should update value', () => {
+ const { result } = renderHook(() => useCounter())
+ act(() => {
+ result.current.increment()
+ })
+ expect(result.current.count).toBe(1)
+ })
+ output: |
+ it('should increment count when increment is called', () => {
+ const { result } = renderHook(() => useCounter())
+ act(() => {
+ result.current.increment()
+ })
+ expect(result.current.count).toBe(1)
+ })
+tests:
+ - input: "it('short', () => { render() })"
+ output: "Suggest rewriting title and adding assertion"
+ - input: "container.querySelector('div')"
+ output: "Suggest replacing direct querySelector with RTL queries"
+ - input: "fireEvent.click(button)"
+ output: "Suggest using userEvent instead of fireEvent"
+ - input: "test.skip('unimplemented', () => {})"
+ output: "Suggest removing .skip or implementing the test"
+ - input: |
+ const { result } = renderHook(() => useCustomHook())
+ expect(result.current.value).toBe(0)
+ output: |
+ Passes β hook tested in isolation with assertion
+metadata:
+
+## Additional Information
+# React Unit Testing Standards
\ No newline at end of file
diff --git a/.claude/rules/test/testing-pyramid-agent.md b/.claude/rules/test/testing-pyramid-agent.md
new file mode 100644
index 0000000..c855dcb
--- /dev/null
+++ b/.claude/rules/test/testing-pyramid-agent.md
@@ -0,0 +1,176 @@
+# Provides guidance on testing pyramid principles and how to analyze test distribution
+
+## Description
+Provides guidance on testing pyramid principles and how to analyze test distribution. Use this rule when discussing test distribution, test strategy, or when analyzing test coverage across unit, integration, and E2E tests.
+
+## Applicability
+- **Files:** `**/*.test.js, *.spec.js, *.test.ts, *.spec.ts, *.test.jsx, *.spec.jsx, *.test.tsx`
+- **Always Apply:** false
+
+## Rules
+- Follow the testing pyramid ratio: 70% unit tests, 20% integration tests, 10% end-to-end tests
+- Every new feature must include tests at appropriate levels of the pyramid
+- Unit tests must be written for all business logic and utility functions
+- Integration tests must cover all critical paths and service interactions
+- End-to-end tests should focus on critical user journeys only
+- Test files must be co-located with the code they test
+- Mock external dependencies in unit tests, use real dependencies in integration tests
+- Test names must clearly describe the scenario being tested
+- Each test should follow the Arrange-Act-Assert pattern
+- Avoid test interdependence - each test should be able to run independently
+
+### testing-pyramid-analysis
+
+Guides developers on testing pyramid principles and how to analyze test distribution
+
+**Actions:**
+ - type: suggest
+ message: |
+ To analyze test distribution, first identify the test framework and check for coverage commands:
+
+ 1. Check package.json for test framework dependencies (jest, vitest, playwright, cypress, mocha)
+ 2. Look for coverage scripts in package.json scripts section (test:coverage, coverage, etc.)
+ 3. Use the framework's list command to identify test files:
+ - Jest: `npx jest --listTests`
+ - Vitest: `npx vitest list`
+ - Playwright: `npx playwright test --list`
+ - Cypress: Check cypress.config.js for test patterns
+
+ Then classify tests by:
+ - File path (e2e/, integration/, unit/)
+ - Content keywords (mock, playwright, cypress, database, etc.)
+ - Test framework indicators
+
+ Target distribution: 70% unit, 20% integration, 10% E2E
+
+examples:
+
+- description: "Unit Test Example"
+
+## Additional Information
+# Testing Pyramid Analysis
+
+This rule provides guidance on the testing pyramid principle and how to analyze test distribution in your repository.
+
+## Identifying Test Framework and Coverage Commands
+
+When analyzing test distribution, follow these steps:
+
+### 1. Identify Test Framework
+
+Check `package.json` for test framework dependencies:
+
+- **Jest**: `jest` or `@jest/core` in dependencies/devDependencies
+- **Vitest**: `vitest` in dependencies/devDependencies
+- **Playwright**: `@playwright/test` in dependencies/devDependencies
+- **Cypress**: `cypress` in dependencies/devDependencies
+- **Mocha**: `mocha` in dependencies/devDependencies
+
+### 2. Check for Coverage Commands
+
+Look in `package.json` scripts section for:
+
+- `test:coverage` or `coverage` scripts
+- Scripts that include coverage flags (`--coverage`, `--coverage-report`, etc.)
+
+### 3. Use Framework Commands to List Tests
+
+- **Jest**: `npx jest --listTests` (lists all test files without running)
+- **Vitest**: `npx vitest list` (lists all test files)
+- **Playwright**: `npx playwright test --list` (lists all test specs)
+- **Cypress**: Check `cypress.config.js` for `specPattern` or run `npx cypress run --dry-run`
+
+### 4. Classify Tests
+
+Classify each test file as unit, integration, or E2E based on:
+
+- **File path**: `e2e/`, `integration/`, `unit/` directories
+- **Content keywords**:
+ - E2E: `playwright`, `cypress`, `page.goto`, `browser`, `user journey`
+ - Integration: `database`, `api`, `service`, `repository`, `endpoint`
+ - Unit: `mock`, `stub`, `spy`, isolated function testing
+- **Test framework indicators**: E2E frameworks have distinct APIs (page objects, browser commands)
+
+## Testing Pyramid Standards
+
+This rule defines the standards for maintaining a proper testing pyramid in your codebase, ensuring comprehensive test coverage across all levels while maintaining the right balance between different types of tests.
+
+
+## Test File Organization
+
+- Test files should be named with `.test.{js,ts,jsx,tsx}` or `.spec.{js,ts,jsx,tsx}` suffix
+- Test files should mirror the structure of the source code
+- Group related tests using describe blocks
+- Use clear, descriptive test names that explain the scenario and expected outcome
+
+## Test Types and Their Characteristics
+
+### Unit Tests (70%)
+
+- Test individual functions, methods, or components in isolation
+- Should be fast and deterministic
+- Use mocks for external dependencies
+- Focus on business logic and edge cases
+- Should have high code coverage
+
+### Integration Tests (20%)
+
+- Test interaction between multiple components or services
+- Use real dependencies when possible
+- Focus on API contracts and data flow
+- Cover main success and error paths
+- Can be slower than unit tests
+
+### End-to-End Tests (10%)
+
+- Test complete user journeys
+- Run against production-like environment
+- Focus on critical business flows
+- Can be slow and more brittle
+- Should be minimal but crucial
+
+## Analyzing Test Distribution
+
+To analyze your test distribution:
+
+1. **Identify all test files** using your framework's list command
+2. **Classify each test** as unit, integration, or E2E
+3. **Calculate percentages**: unit / total, integration / total, e2e / total
+4. **Compare to ideal ratios**: 70% unit, 20% integration, 10% E2E
+5. **Make recommendations** based on deviations from ideal
+
+Example workflow:
+
+```bash
+# For Jest projects
+npx jest --listTests > test-files.txt
+# Then manually classify or use path/content analysis
+```
+
+## Common Violations
+
+1. Inverting the pyramid (more E2E tests than unit tests)
+2. Missing integration tests between critical services
+3. Testing implementation details in E2E tests
+4. Excessive mocking in integration tests
+5. Testing multiple units in a single unit test
+6. Writing tests without clear arrange-act-assert structure
+7. Dependent tests that must run in a specific order
+8. Missing error case coverage
+9. Brittle tests that break with minor changes
+10. Insufficient test documentation
+
+Regularly analyze your test distribution using the framework's list commands to catch these issues early.
+
+## Best Practices
+
+1. Write tests before or while writing code (TDD/BDD)
+2. Keep tests focused and concise
+3. Use meaningful test data
+4. Avoid test code duplication
+5. Maintain test code quality as production code
+6. Regular test maintenance and refactoring
+7. Monitor test execution time
+8. Include both positive and negative test cases
+9. Document test setup and special conditions
+10. Use appropriate testing tools and frameworks
\ No newline at end of file
diff --git a/.claude/rules/test/vitest-best-practices-auto.md b/.claude/rules/test/vitest-best-practices-auto.md
new file mode 100644
index 0000000..ab7cb23
--- /dev/null
+++ b/.claude/rules/test/vitest-best-practices-auto.md
@@ -0,0 +1,83 @@
+# Enforces best practices for Vitest unit tests
+
+## Description
+Enforces best practices for Vitest unit tests
+
+## Applicability
+- **Files:** `**/*.test.{js|ts|jsx|tsx}`
+- **Always Apply:** false
+
+## Rules
+- Tests must use clear and descriptive names (`it`/`test` strings).
+- Use "Arrange, Act, Assert" pattern for writing unit tests.
+- Use `describe` blocks to group related tests logically.
+- Use `beforeEach`/`afterEach` for shared setup/teardown instead of duplicating code.
+- Cleanup side-effects using `vi.resetAllMocks()` or `vi.restoreAllMocks()`.
+- Prefer `vi.fn()` or `vi.spyOn()` over manually created stubs.
+- Avoid global state β never rely on test execution order.
+- For asynchronous code, always `await` promises and/or use `expect.assertions(n)`.
+- Do not snapshot complex objects blindly; snapshot only stable, meaningful output.
+- Use `vi.useFakeTimers()` cautiously and always restore real timers with `vi.useRealTimers()` afterwards.
+- Do not disable tests with `test.skip`/`it.skip`; instead, fix or remove them.
+- Keep test files co-located with source or in a dedicated `__tests__` folder.
+- Ensure each test has at least one `expect` call (or uses `.resolves`/`.rejects`).
+- Prefer explicit assertions over implicit truthiness checks.
+
+### vitest-best-practices
+
+Enforces best practices for Vitest unit tests
+
+**Actions:**
+ # Suggest more descriptive titles
+ - type: suggest
+ message: |
+ Test titles should be descriptive. Use behaviour-driven phrases, e.g. `it('renders submit button when form is valid', ...)`.
+
+ # Reject skipped tests
+ - type: reject
+ pattern: "\\b(?:it|test|describe)\\.skip\\("
+ message: "Skipped tests may hide regressions. Replace `.skip` with active tests or remove them."
+
+ # Suggest adding an expect if missing
+ - type: suggest
+ message: "Each test should contain at least one assertion via `expect`."
+
+examples:
+ - input: |
+ it('renders correctly', () => {
+ render()
+ })
+ output: |
+ it('renders button with default label', () => {
+ render()
+ expect(screen.getByRole('button')).toHaveTextContent('Submit')
+ })
+
+ - input: "test.skip('unimplemented', () => {})"
+ output: "// Remove .skip or implement the test"
+
+ - input: |
+ it('fetches data', async () => {
+ const data = await fetchData()
+ })
+ output: |
+ it('fetches data', async () => {
+ expect.assertions(1)
+ const data = await fetchData()
+ expect(data).toEqual(mockData)
+ })
+
+tests:
+ - input: "it('test', () => { expect(true).toBe(true) })"
+ output: "Passes β descriptive enough, contains expect"
+ - input: "it('short', () => {})"
+ output: "Suggest rewriting title and adding assertion"
+ - input: "describe.skip('module', () => {})"
+ output: "Rejected β skipped tests are not allowed"
+
+metadata:
+
+## Additional Information
+# Vitest Unit Testing Best Practices
+
+This rule enforces and suggests best practices when writing Vitest unit tests to ensure reliable, maintainable, and expressive test suites.
\ No newline at end of file
diff --git a/.claude/rules/test/vitest-component-testing-auto.md b/.claude/rules/test/vitest-component-testing-auto.md
new file mode 100644
index 0000000..cee7648
--- /dev/null
+++ b/.claude/rules/test/vitest-component-testing-auto.md
@@ -0,0 +1,88 @@
+# Enforces best practices for component tests using Vitest
+
+## Description
+Enforces best practices for component tests using Vitest
+
+## Applicability
+- **Files:** `**/*.{test}.{js,ts,jsx,tsx}`
+- **Always Apply:** false
+
+## Rules
+- Use a DOM-oriented library (e.g. `@testing-library/react`, `@vue/test-utils`) for rendering; avoid shallow rendering utilities that inspect implementation details.
+- Prefer accessibility queries (`getByRole`, `getByLabelText`, etc.) over `getByTestId` or CSS selectors.
+- Interact with the UI via `userEvent` (or framework-equivalent) instead of firing raw DOM events directly.
+- Clean up side-effects by calling `cleanup()` (or rely on library auto-cleanup) after each test.
+- Group related component behaviours in `describe` blocks with descriptive titles.
+- Every test must contain at least one assertion via `expect`.
+- Avoid querying private DOM nodes with `container.querySelector`; focus on visible user output.
+- Do **not** snapshot the entire DOM tree for dynamic components; instead, assert specific, stable text/attributes.
+- Use `vi.mock()` for module mocks and restore with `vi.resetModules()` or `vi.clearAllMocks()` in `afterEach`.
+
+### vitest-component-testing
+
+Enforces best practices for component tests using Vitest
+
+**Actions:**
+ # Suggest more descriptive titles
+ - type: suggest
+ message: |
+ Test titles should clearly describe user behaviour, e.g. `it('shows error message when API call fails', ...)`.
+
+ # Suggest replacing container.querySelector
+ - type: suggest
+ pattern: "container\\.querySelector"
+ message: |
+ Avoid querying DOM directly. Prefer queries like `screen.getByRole` or `screen.getByText` so tests reflect real user interactions.
+
+ # Suggest using userEvent over fireEvent
+ - type: suggest
+ pattern: "fireEvent\\."
+ message: |
+ Use `userEvent` to simulate real user behaviour instead of `fireEvent`, e.g. `await userEvent.click(button)`.
+
+ # Suggest adding an expect if missing
+ - type: suggest
+ message: "Each component test should contain at least one assertion (`expect`)."
+
+examples:
+ - input: |
+ it('renders', () => {
+ const { container } = render()
+ container.querySelector('button[type="submit"]')
+ })
+ output: |
+ it('renders submit button', () => {
+ render()
+ expect(screen.getByRole('button', { name: /sign in/i })).toBeInTheDocument()
+ })
+
+ - input: |
+ it('submit works', async () => {
+ render()
+ await fireEvent.click(screen.getByText('Submit'))
+ })
+ output: |
+ it('triggers onSubmit when form is valid', async () => {
+ render()
+ const submit = screen.getByRole('button', { name: /submit/i })
+ await userEvent.click(submit)
+ expect(mockOnSubmit).toHaveBeenCalled()
+ })
+
+ - input: "test.skip('unimplemented component test', () => {})"
+ output: "// Remove .skip or implement the test"
+
+tests:
+ - input: "it('short', () => { render() })"
+ output: "Suggest rewriting title and adding assertion"
+ - input: "container.querySelector('div')"
+ output: "Suggest replacing direct querySelector with RTL queries"
+ - input: "fireEvent.click(button)"
+ output: "Suggest using userEvent instead of fireEvent"
+
+metadata:
+
+## Additional Information
+# Vitest Component Testing Best Practices
+
+This rule enforces best practices for **component tests** written with Vitest (e.g., React Testing Library, Vue Test Utils, or other DOM-based libs). The goal is to ensure tests remain stable, user-focused, and easy to maintain.
\ No newline at end of file
diff --git a/.claude/rules/test/vue-test-utils-auto.md b/.claude/rules/test/vue-test-utils-auto.md
new file mode 100644
index 0000000..575a26a
--- /dev/null
+++ b/.claude/rules/test/vue-test-utils-auto.md
@@ -0,0 +1,263 @@
+# @vue/test-utils Standards for Vue 3 components and Vitest
+
+## Description
+@vue/test-utils Standards for Vue 3 components and Vitest
+
+## Applicability
+- **Files:** `*.test.{ts,js},*.spec.{ts,js},__tests__/**/*`
+- **Always Apply:** false
+
+## Rules
+- Use @vue/test-utils with TypeScript for type-safe component testing
+- Test component props, emits, slots, and user interactions thoroughly
+- Mock composables and external dependencies appropriately
+- Use data-testid attributes for reliable element selection
+- Test component behavior, not implementation details
+- Provide proper TypeScript types for test props and mocks
+
+### vue-test-utils-typescript
+
+Standards for Vue component testing with @vue/test-utils and TypeScript
+
+**Actions:**
+ - type: suggest
+ message: |
+ Use @vue/test-utils with proper TypeScript typing:
+
+ β
Good component test structure:
+ import { mount, VueWrapper } from "@vue/test-utils"
+ import { describe, it, expect, vi, beforeEach } from "vitest"
+ import UserProfile from "@/components/UserProfile.vue"
+ import type { User } from "@/types/user"
+
+ describe("UserProfile.vue", () => {
+ let wrapper: VueWrapper
+ const mockUser: User = {
+ id: 1,
+ name: "John Doe",
+ email: "john@example.com"
+ }
+
+ beforeEach(() => {
+ wrapper = mount(UserProfile, {
+ props: { user: mockUser }
+ })
+ })
+
+ it("renders user information correctly", () => {
+ expect(wrapper.find('[data-testid="user-name"]').text()).toBe("John Doe")
+ expect(wrapper.find('[data-testid="user-email"]').text()).toBe("john@example.com")
+ })
+
+ it("emits update event when edited", async () => {
+ await wrapper.find('[data-testid="edit-button"]').trigger("click")
+ expect(wrapper.emitted("update")).toBeTruthy()
+ })
+ })
+
+ β Avoid untyped tests and unclear selectors:
+ const wrapper = mount(UserProfile)
+ expect(wrapper.find(".user-name").text()).toBe("John")
+examples:
+ - input: |
+ import { mount } from "@vue/test-utils"
+ const wrapper = mount(UserProfile)
+ expect(wrapper.find(".user-name").text()).toBe("John")
+ output: |
+ import { mount, VueWrapper } from "@vue/test-utils"
+ import type { User } from "@/types/user"
+
+ const mockUser: User = { id: 1, name: "John Doe", email: "john@example.com" }
+ const wrapper: VueWrapper = mount(UserProfile, {
+ props: { user: mockUser }
+ })
+ expect(wrapper.find('[data-testid="user-name"]').text()).toBe("John Doe")
+metadata:
+
+### vue-test-composables-mocking
+
+Standards for mocking composables and external dependencies
+
+**Actions:**
+ - type: suggest
+ message: |
+ Mock composables and dependencies properly:
+
+ β
Good composable mocking:
+ import { vi } from "vitest"
+ import type { UseUserReturn } from "@/composables/useUser"
+
+ // Mock the composable
+ vi.mock("@/composables/useUser", () => ({
+ useUser: vi.fn((): UseUserReturn => ({
+ user: ref({ id: 1, name: "Test User" }),
+ loading: ref(false),
+ fetchUser: vi.fn(),
+ updateUser: vi.fn()
+ }))
+ }))
+
+ // Mock external libraries
+ vi.mock("vue-router", () => ({
+ useRouter: () => ({
+ push: vi.fn(),
+ replace: vi.fn()
+ })
+ }))
+
+ β Avoid untyped mocks:
+ vi.mock("@/composables/useUser")
+ const mockUseUser = useUser as any
+examples:
+ - input: |
+ vi.mock("@/composables/useUser")
+ const mockRouter = { push: vi.fn() }
+ output: |
+ import type { UseUserReturn } from "@/composables/useUser"
+
+ vi.mock("@/composables/useUser", () => ({
+ useUser: vi.fn((): UseUserReturn => ({
+ user: ref(null),
+ loading: ref(false),
+ fetchUser: vi.fn()
+ }))
+ }))
+
+ vi.mock("vue-router", () => ({
+ useRouter: () => ({ push: vi.fn() })
+ }))
+metadata:
+
+### vue-test-slots-events
+
+Testing Vue component slots and events properly
+
+**Actions:**
+ - type: suggest
+ message: |
+ Test slots and events with proper typing:
+
+ β
Good slot and event testing:
+ // Testing slots
+ const wrapper = mount(DialogComponent, {
+ slots: {
+ default: ']Custom content
',
+ header: 'Custom Header
'
+ }
+ })
+ expect(wrapper.find('[data-testid="slot-content"]').exists()).toBe(true)
+
+ // Testing events with payload
+ await wrapper.find('[data-testid="submit-button"]').trigger("click")
+ const updateEvents = wrapper.emitted("update") as Array<[string]>
+ expect(updateEvents).toHaveLength(1)
+ expect(updateEvents[0][0]).toBe("expected-payload")
+
+ // Testing v-model
+ const input = wrapper.find('input[data-testid="name-input"]')
+ await input.setValue("New Value")
+ const modelEvents = wrapper.emitted("update:modelValue") as Array<[string]>
+ expect(modelEvents[0][0]).toBe("New Value")
+examples:
+ - input: |
+ await wrapper.find("button").trigger("click")
+ expect(wrapper.emitted("update")).toBeTruthy()
+ output: |
+ await wrapper.find('[data-testid="submit-button"]').trigger("click")
+ const updateEvents = wrapper.emitted("update") as Array<[any]>
+ expect(updateEvents).toHaveLength(1)
+metadata:
+
+## Additional Information
+# Vue Test Utils Standards
+
+Comprehensive testing standards for Vue 3 components using @vue/test-utils with TypeScript support.
+
+
+
+
+
+
+
+
+## Testing Patterns
+
+### Component Props Testing
+
+```typescript
+interface TestProps {
+ user: User;
+ isVisible?: boolean;
+}
+
+const defaultProps: TestProps = {
+ user: { id: 1, name: "Test User", email: "test@example.com" },
+ isVisible: true,
+};
+
+const wrapper = mount(Component, { props: defaultProps });
+```
+
+### Async Testing
+
+```typescript
+it("handles async operations", async () => {
+ const wrapper = mount(AsyncComponent);
+
+ // Wait for async operation
+ await flushPromises();
+
+ // Or wait for next tick
+ await wrapper.vm.$nextTick();
+
+ expect(wrapper.find('[data-testid="result"]').text()).toBe("Loaded");
+});
+```
+
+### Global Plugins Testing
+
+```typescript
+const wrapper = mount(Component, {
+ global: {
+ plugins: [router, pinia],
+ provide: {
+ [UserKey]: mockUser,
+ },
+ stubs: {
+ "router-link": true,
+ "heavy-component": true,
+ },
+ },
+});
+```
+
+tests:
+
+- input: |
+ const wrapper = mount(UserCard)
+ expect(wrapper.find(".name").text()).toBe("John")
+ output: |
+ const mockUser: User = { id: 1, name: "John Doe", email: "john@example.com" }
+ const wrapper: VueWrapper = mount(UserCard, {
+ props: { user: mockUser }
+ })
+ expect(wrapper.find('[data-testid="user-name"]').text()).toBe("John Doe")
+- input: |
+ vi.mock("@/composables/useAuth")
+ const mockAuth = useAuth as any
+ output: |
+ import type { UseAuthReturn } from "@/composables/useAuth"
+ vi.mock("@/composables/useAuth", () => ({
+ useAuth: vi.fn((): UseAuthReturn => ({
+ user: ref(null),
+ isAuthenticated: ref(false),
+ login: vi.fn()
+ }))
+ }))
+- input: |
+ await wrapper.find("button").trigger("click")
+ expect(wrapper.emitted("save")).toBeTruthy()
+ output: |
+ await wrapper.find('[data-testid="save-button"]').trigger("click")
+ const saveEvents = wrapper.emitted("save") as Array<[any]>
+ expect(saveEvents).toHaveLength(1)
\ No newline at end of file
diff --git a/.claude/rules/utils/changelog-generator-manual.md b/.claude/rules/utils/changelog-generator-manual.md
new file mode 100644
index 0000000..6f01a6d
--- /dev/null
+++ b/.claude/rules/utils/changelog-generator-manual.md
@@ -0,0 +1,327 @@
+# This rule generates a comprehensive changelog
+
+## Description
+This rule generates a comprehensive changelog.md file by analyzing all git tags and commits, creating a chronological record of all project changes with proper semantic versioning structure. The changelog.md file is stored at the root of the project.
+
+## Applicability
+- **Files:** `changelog.md`
+- **Always Apply:** false
+
+## Rules
+- Must analyze all git tags from the beginning of the repository
+- Must create a chronological changelog with newest versions at the top
+- Must categorize changes into Features, Bug Fixes, Breaking Changes, and Other
+- Must provide a structured summary suitable for changelog documentation
+- Must handle cases where no tags exist
+- Must exclude merge commits and focus on meaningful changes
+- Must link features and changes to their corresponding commits or PRs
+- Must follow Keep a Changelog format standards
+- Must include unreleased changes section for current development
+- Must store the changelog.md file at the project root directory
+
+### generate-changelog
+
+Analyzes complete git history and generates structured changelog.md from all tags with unreleased changes
+
+**Actions:**
+ - type: generate
+ content: |
+ ## Changelog Generation Process
+
+ 1. **Identify All Versions**:
+ ```bash
+ # Read current version from package.json
+ CURRENT_VERSION=$(node -p "require('./package.json').version")
+
+ # Get all git tags sorted by version
+ ALL_TAGS=$(git tag --sort=-version:refname)
+
+ # Get commits since last tag (unreleased changes)
+ UNRELEASED_COMMITS=$(git log --oneline --no-merges $(git describe --tags --abbrev=0 2>/dev/null || echo "")..HEAD)
+
+ echo "Current package.json version: $CURRENT_VERSION"
+ echo "All git tags: $ALL_TAGS"
+ ```
+
+ 2. **Analyze All Version Changes**:
+ ```bash
+ # For each tag, get commits since previous tag
+ for tag in $ALL_TAGS; do
+ PREV_TAG=$(git describe --tags --abbrev=0 $tag^ 2>/dev/null || echo "")
+ COMMITS=$(git log --oneline --no-merges $PREV_TAG..$tag)
+ echo "Version $tag: $COMMITS"
+ done
+ ```
+
+ 3. **Categorize Changes by Type**:
+ - **Features**: `feat:` commits, new functionality
+ - **Bug Fixes**: `fix:` commits, bug resolutions
+ - **Breaking Changes**: `BREAKING:` commits, incompatible changes
+ - **Documentation**: `docs:` commits, documentation updates
+ - **Other**: `chore:`, `refactor:`, `style:`, `test:` commits
+
+ 4. **Generate Structured Output**:
+ ```markdown
+ # Changelog
+
+ All notable changes to this project will be documented in this file.
+
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+ ## [Unreleased]
+
+ ### π Added
+ - **[Feature Name]** ([commit-hash](link-to-commit)) - [Description]
+ - **[Feature Name]** ([#PR-number](link-to-PR)) - [Description]
+
+ ### π Fixed
+ - **[Bug Description]** ([commit-hash](link-to-commit)) - [Fix details]
+ - **[Bug Description]** ([#PR-number](link-to-PR)) - [Fix details]
+
+ ### π₯ Changed
+ - **[Change Description]** ([commit-hash](link-to-commit)) - [Migration notes]
+ - **[Change Description]** ([#PR-number](link-to-PR)) - [Migration notes]
+
+ ### π Documentation
+ - **[Doc Update]** ([commit-hash](link-to-commit)) - [Description]
+ - **[Doc Update]** ([#PR-number](link-to-PR)) - [Description]
+
+ ---
+
+ ## [v2.0.0] - 2024-12-19
+
+ ### π Added
+ - **[Feature Name]** ([commit-hash](link-to-commit)) - [Description]
+
+ ### π Fixed
+ - **[Bug Description]** ([commit-hash](link-to-commit)) - [Fix details]
+
+ ### π₯ Changed
+ - **[Breaking Change]** ([commit-hash](link-to-commit)) - [Migration notes]
+
+ ### π Documentation
+ - **[Doc Update]** ([commit-hash](link-to-commit)) - [Description]
+
+ ---
+
+ ## [v1.3.0] - 2024-12-15
+
+ ### π Added
+ - **[Feature Name]** ([commit-hash](link-to-commit)) - [Description]
+
+ ### π Fixed
+ - **[Bug Description]** ([commit-hash](link-to-commit)) - [Fix details]
+
+ ---
+
+ ## [v1.2.0] - 2024-12-10
+
+ ### π Added
+ - **[Feature Name]** ([commit-hash](link-to-commit)) - [Description]
+
+ ### π Fixed
+ - **[Bug Description]** ([commit-hash](link-to-commit)) - [Fix details]
+
+ ---
+
+ ## [v1.0.0] - 2024-12-01
+
+ ### π Added
+ - Initial release
+ - **[Feature Name]** ([commit-hash](link-to-commit)) - [Description]
+
+ [Unreleased]: https://github.com/username/project/compare/v2.0.0...HEAD
+ [v2.0.0]: https://github.com/username/project/compare/v1.3.0...v2.0.0
+ [v1.3.0]: https://github.com/username/project/compare/v1.2.0...v1.3.0
+ [v1.2.0]: https://github.com/username/project/compare/v1.0.0...v1.2.0
+ [v1.0.0]: https://github.com/username/project/releases/tag/v1.0.0
+ ```
+
+ **File Location**: `./changelog.md` (project root directory)
+
+examples:
+ - input: |
+ User: "Generate changelog"
+
+ Current package.json version: 2.0.0
+ All git tags: v2.0.0 v1.3.0 v1.2.0 v1.0.0
+ Unreleased commits:
+ - feat: add new authentication method (abc1234)
+ - fix: resolve login issue (def5678)
+
+ Version v2.0.0 commits:
+ - feat: add user authentication system (#123)
+ - fix: resolve memory leak (ghi9012)
+ - BREAKING: remove deprecated v1 endpoints (jkl3456)
+
+ Version v1.3.0 commits:
+ - feat: add dark mode support (#125)
+ - fix: resolve login button alignment (mno7890)
+ output: |
+ # Changelog
+
+ All notable changes to this project will be documented in this file.
+
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+ ## [Unreleased]
+
+ ### π Added
+ - **New Authentication Method** ([abc1234](link-to-commit)) - Added alternative authentication method for enhanced security
+
+ ### π Fixed
+ - **Login Issue Resolution** ([def5678](link-to-commit)) - Fixed critical login issue affecting user authentication
+
+ ---
+
+ ## [v2.0.0] - 2024-12-19
+
+ ### π Added
+ - **User Authentication System** ([#123](link-to-PR)) - Added comprehensive authentication with JWT tokens, password reset, and session management
+
+ ### π Fixed
+ - **Memory Leak Fix** ([ghi9012](link-to-commit)) - Resolved critical memory leak in data processing module
+
+ ### π₯ Changed
+ - **API v1 Deprecation** ([jkl3456](link-to-commit)) - Removed deprecated v1 endpoints. Please migrate to v2 API endpoints.
+
+ ---
+
+ ## [v1.3.0] - 2024-12-15
+
+ ### π Added
+ - **Dark Mode Support** ([#125](link-to-PR)) - Added comprehensive dark mode theme with automatic system preference detection
+
+ ### π Fixed
+ - **Login Button Alignment** ([mno7890](link-to-commit)) - Fixed misaligned login button in authentication form
+
+ ---
+
+ ## [v1.2.0] - 2024-12-10
+
+ ### π Added
+ - Initial features and functionality
+
+ ---
+
+ ## [v1.0.0] - 2024-12-01
+
+ ### π Added
+ - Initial release
+ - Core project functionality
+
+ [Unreleased]: https://github.com/username/project/compare/v2.0.0...HEAD
+ [v2.0.0]: https://github.com/username/project/compare/v1.3.0...v2.0.0
+ [v1.3.0]: https://github.com/username/project/compare/v1.2.0...v1.3.0
+ [v1.2.0]: https://github.com/username/project/compare/v1.0.0...v1.2.0
+ [v1.0.0]: https://github.com/username/project/releases/tag/v1.0.0
+
+ - input: |
+ User: "Generate changelog"
+
+ Current package.json version: 0.1.3
+ All git tags: v0.1.3 v0.1.2.3
+ Unreleased commits: None
+
+ Version v0.1.3 commits:
+ - feat: enhance dependency analysis agent (abc1234)
+ - fix: update globs for security-scan-agent (def5678)
+ - feat: add dependency analysis and security scan rules (ghi9012)
+ output: |
+ # Changelog
+
+ All notable changes to this project will be documented in this file.
+
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+ ## [Unreleased]
+
+ No unreleased changes.
+
+ ---
+
+ ## [v0.1.3] - 2024-12-19
+
+ ### π Added
+ - **Enhanced Dependency Analysis Agent** ([abc1234](link-to-commit)) - Enhanced dependency analysis agent with user input filters
+ - **Dependency Analysis and Security Scan Rules** ([ghi9012](link-to-commit)) - Added comprehensive dependency analysis and security scanning capabilities
+
+ ### π Fixed
+ - **Security Scan Glob Patterns** ([def5678](link-to-commit)) - Fixed glob patterns for security-scan-agent rule to include bun.lockb files
+
+ ---
+
+ ## [v0.1.2.3] - 2024-12-15
+
+ ### π Added
+ - Initial project setup and core functionality
+
+ [Unreleased]: https://github.com/username/project/compare/v0.1.3...HEAD
+ [v0.1.3]: https://github.com/username/project/compare/v0.1.2.3...v0.1.3
+ [v0.1.2.3]: https://github.com/username/project/releases/tag/v0.1.2.3
+
+tests:
+ - input: "Generate changelog for project with multiple versions"
+ output: "Should analyze all git tags, categorize changes by version, include unreleased changes, and produce structured changelog.md"
+
+ - input: "No git tags exist"
+ output: "Should create changelog with only unreleased changes section and initial release placeholder"
+
+ - input: "Only one version exists"
+ output: "Should create changelog with unreleased section and single version entry"
+
+ - input: "Breaking changes detected in commits"
+ output: "Should properly categorize breaking changes under 'Changed' section with migration notes"
+
+metadata:
+
+## Additional Information
+# Changelog Generator
+
+
+
+
+## Usage Instructions
+
+1. **Manual Trigger**: This rule must be manually invoked when you're ready to generate or update changelog.md
+2. **File Location**: The changelog.md file will be created at the project root directory (`./changelog.md`)
+3. **Prerequisites**: Ensure your repository has proper git tags for version tracking
+4. **Conventional Commits**: Works best with conventional commit message format (feat:, fix:, docs:, etc.)
+5. **Commit Linking**: Automatically links features and changes to their corresponding commits or PRs
+6. **Keep a Changelog Format**: Follows the standard Keep a Changelog format for consistency
+7. **Unreleased Section**: Always includes current unreleased changes for ongoing development
+
+## Keep a Changelog Standards
+
+This rule follows the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format:
+
+- **Unreleased**: Current development changes
+- **Added**: New features
+- **Changed**: Changes in existing functionality (including breaking changes)
+- **Deprecated**: Soon-to-be removed features
+- **Removed**: Removed features
+- **Fixed**: Bug fixes
+- **Security**: Vulnerability fixes
+
+## Integration with Existing Workflow
+
+This rule is designed to work with your existing release process and will generate a changelog.md file at the project root that can be:
+
+- Committed to your repository
+- Used in GitHub/GitLab releases
+- Referenced in documentation
+- Shared with stakeholders and users
+- Easily discovered by users and contributors
+
+## Quality Assurance
+
+The rule includes validation to ensure:
+- All significant changes are captured across all versions
+- Breaking changes are clearly highlighted with migration guidance
+- Each change is properly linked to its source commit or PR
+- Follows Keep a Changelog format standards
+- Includes proper version comparison links
+- Maintains chronological order with newest versions at top
\ No newline at end of file
diff --git a/.claude/rules/utils/console-vibes-auto.md b/.claude/rules/utils/console-vibes-auto.md
new file mode 100644
index 0000000..54fd0b6
--- /dev/null
+++ b/.claude/rules/utils/console-vibes-auto.md
@@ -0,0 +1,44 @@
+# adds fun emojis to console logs
+
+## Description
+adds fun emojis to console logs
+
+## Applicability
+- **Files:** `**/*.{js,ts,jsx,tsx,mjs}`
+- **Always Apply:** true
+
+### console-vibes
+
+Adds vibes to console.log statements
+
+## Additional Information
+# Console log emojis
+
+This rule adds fun emojis to console logs, making debugging a more joyful experience!
+
+
+
+## Usage
+
+This rule will automatically add contextually appropriate emojis to your console logs:
+
+- Success/completion messages get π or β¨
+- Start/initialization messages get π
+- Loading messages get β‘οΈ
+- Configuration related messages get βοΈ
+- User-related messages get π€
+- Data-related messages get π
+- Default messages get π
+- Error messages get β
+
+The rule is smart enough to:
+1. Preserve existing string quotes
+2. Choose appropriate emojis based on message content
+3. Work with log, info, error, and debug statements
+4. Maintain code formatting
+
+## Key Principles
+
+- Makes logs more visually distinguishable
+- Make debugging and troubleshooting more fun and less demoraliziing
+- Make log scanning more intuitive with visual cues
\ No newline at end of file
diff --git a/.claude/rules/utils/git-branch-agent.md b/.claude/rules/utils/git-branch-agent.md
new file mode 100644
index 0000000..0877fc9
--- /dev/null
+++ b/.claude/rules/utils/git-branch-agent.md
@@ -0,0 +1,67 @@
+# This rule enforces standards and best practices for branch management operations including checkout, creation, and deletion
+
+## Description
+This rule enforces standards and best practices for branch management operations including checkout, creation, and deletion. This rule should be followed when: 1. creating new branches, 2. switching between branches, 3. deleting branches, or 4. when the git aliases 'gco', 'gcb', or similar branch-related commands are used.
+
+## Applicability
+- **Files:** `.git/HEAD`
+- **Always Apply:** false
+
+## Rules
+- Always `git status` first; stash/commit before switching
+- Branch names: `/-kebab-description`
+- Allowed types: feature, fix, hotfix, release, docs, refactor, test, chore, experiment, spike
+- Start new work from updated `main`
+- Delete only merged branches (never current branch)
+- `git fetch --all` before new branches
+
+### Naming Examples
+
+```
+feature/#123-user-auth
+fix/#345-login-timeout
+refs/docs-api
+```
+
+## Additional Information
+
+name: git-branch
+version: 1.0
+severity: warning
+description: Enforce safe git branch operations & naming
+
+filters:
+ - type: event
+ pattern: "(pre_checkout|post_checkout|branch_create|branch_delete)"
+ - type: content
+ pattern: "(checkout|branch|gco|gcb)"
+
+matches: |
+ git checkout $branch
+ git branch $branch
+ git branch -d $branch
+ gco $branch
+ gcb $branch
+
+transforms: |
+ {{
+ const op = context.getOperation();
+ const branch = context.getBranchName();
+ if (!/^([a-z]+)\/(#?\d+-)?[a-z0-9-]+$/.test(branch)) return suggestValidBranchName(branch);
+ if (op === 'checkout') return `git status && ${command}`;
+ if (op === 'delete') return `git branch --merged | grep ${branch} && ${command}`;
+ return command;
+ }}
+
+examples:
+ - input: "git checkout feature"
+ output: "git status && git checkout feature/#123-descriptive"
+ - input: "git branch -d old"
+ output: "git branch --merged | grep old && git branch -d feature/old"
+
+tests:
+ - input: "gco main"
+ output: "git status && git checkout main"
+
+metadata:
+ priority: high
\ No newline at end of file
diff --git a/.claude/rules/utils/release-changelog-auto.md b/.claude/rules/utils/release-changelog-auto.md
new file mode 100644
index 0000000..d23b66d
--- /dev/null
+++ b/.claude/rules/utils/release-changelog-auto.md
@@ -0,0 +1,155 @@
+# This rule generates a comprehensive changelog
+
+## Description
+This rule generates a comprehensive changelog.md file by analyzing all git tags and commits, creating a chronological record of all project changes with proper semantic versioning structure. The changelog.md file is stored at the root of the project.
+
+## Applicability
+- **Files:** `changelog.md`
+- **Always Apply:** false
+
+## Rules
+- Must validate that changelog can be generated before proceeding
+- Must backup existing changelog.md if it exists
+- Must check if existing changelog follows Keep a Changelog format
+- Must call the existing changelog-generator-manual rule for actual generation
+- Must preserve existing changelog content when possible
+- Must provide clear feedback about changelog generation status
+
+### release-changelog-auto
+
+**Actions:**
+ - type: generate
+ content: |
+
+ ## π Changelog Generation Process
+
+ I'll validate the environment and generate a comprehensive changelog for your release.
+
+ 1. **Validate Changelog Generation**:
+ ```bash
+ echo "π Now generating comprehensive changelog..."
+
+ # Backup existing changelog if it exists
+ if [ -f "changelog.md" ]; then
+ cp changelog.md changelog.md.backup
+ echo "π Backed up existing changelog.md"
+
+ # Check if existing changelog has proper structure
+ if grep -q "## \[Unreleased\]" changelog.md; then
+ echo "β
Existing changelog has proper structure, will preserve content"
+ PRESERVE_EXISTING=true
+ else
+ echo "β οΈ Existing changelog doesn't follow Keep a Changelog format"
+ echo "Will create new changelog with existing content as reference"
+ PRESERVE_EXISTING=false
+ fi
+ else
+ echo "π No existing changelog.md found, creating new one"
+ PRESERVE_EXISTING=false
+ fi
+
+ # Get all git tags sorted by version
+ ALL_TAGS=$(git tag --sort=-version:refname)
+
+ # Get commits since last tag (unreleased changes)
+ UNRELEASED_COMMITS=$(git log --oneline --no-merges $(git describe --tags --abbrev=0 2>/dev/null || echo "")..HEAD)
+
+ echo "π Changelog generation summary:"
+ echo " - Total git tags: $(echo "$ALL_TAGS" | wc -l | tr -d ' ')"
+ echo " - Unreleased commits: $(echo "$UNRELEASED_COMMITS" | wc -l | tr -d ' ')"
+ echo " - Preserve existing: $PRESERVE_EXISTING"
+ ```
+
+ 2. **Call Changelog Generator**:
+ ```bash
+ # Call the existing changelog generator rule
+ echo "π Calling changelog generator..."
+
+ # The changelog-generator-manual rule will handle the actual generation
+ # This includes:
+ # - Analyzing all git tags from the beginning of the repository
+ # - Creating a chronological changelog with newest versions at the top
+ # - Categorizing changes into Features, Bug Fixes, Breaking Changes, and Other
+ # - Following Keep a Changelog format standards
+ # - Including unreleased changes section for current development
+
+ # After generation, verify the changelog was created successfully
+ if [ -f "changelog.md" ]; then
+ echo "β
Changelog generated successfully at ./changelog.md"
+
+ # Show a summary of what was generated
+ echo "π Changelog summary:"
+ echo " - File size: $(wc -l < changelog.md) lines"
+ echo " - Unreleased section: $(grep -c "### π Added\|### π Fixed\|### π₯ Changed\|### π Documentation" changelog.md || echo "0") categories"
+ echo " - Version entries: $(grep -c "^## \[v" changelog.md || echo "0") versions"
+ else
+ echo "β ERROR: Changelog generation failed"
+ echo "Please check the changelog-generator-manual rule for details"
+ exit 1
+ fi
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ Existing changelog: Yes, follows Keep a Changelog format
+ Git tags: v2.0.0, v1.3.0, v1.2.0
+ Unreleased commits: 5 commits
+ output: |
+ ## π Changelog Generation Process
+
+ π Now generating comprehensive changelog...
+
+ π Backed up existing changelog.md
+ β
Existing changelog has proper structure, will preserve content
+
+ π Changelog generation summary:
+ - Total git tags: 3
+ - Unreleased commits: 5
+ - Preserve existing: true
+
+ π Calling changelog generator...
+ β
Changelog generated successfully at ./changelog.md
+
+ π Changelog summary:
+ - File size: 45 lines
+ - Unreleased section: 3 categories
+ - Version entries: 3 versions
+
+ - input: |
+ User: "Create a release"
+ Existing changelog: No
+ Git tags: v1.0.0
+ Unreleased commits: 2 commits
+ output: |
+ ## π Changelog Generation Process
+
+ π Now generating comprehensive changelog...
+
+ π No existing changelog.md found, creating new one
+
+ π Changelog generation summary:
+ - Total git tags: 1
+ - Unreleased commits: 2
+ - Preserve existing: false
+
+ π Calling changelog generator...
+ β
Changelog generated successfully at ./changelog.md
+
+tests:
+ - input: "Create release with existing changelog"
+ output: "Should backup existing changelog and call generator"
+
+ - input: "Create release without existing changelog"
+ output: "Should create new changelog using generator"
+
+ - input: "Create release with malformed existing changelog"
+ output: "Should detect format issues and create new changelog"
+
+ - input: "Create release with no git tags"
+ output: "Should handle case with no version history"
+
+metadata:
+
+## Additional Information
+# Release Changelog Generation
\ No newline at end of file
diff --git a/.claude/rules/utils/release-commit-analysis-auto.md b/.claude/rules/utils/release-commit-analysis-auto.md
new file mode 100644
index 0000000..4691c1b
--- /dev/null
+++ b/.claude/rules/utils/release-commit-analysis-auto.md
@@ -0,0 +1,202 @@
+# This rule analyzes commits to determine if they should trigger a version bump
+
+## Description
+This rule analyzes commits to determine if they should trigger a version bump. It categorizes conventional and non-conventional commits into Features, Bug Fixes, Breaking Changes, Documentation, and Other. It uses intelligent keyword matching for non-conventional commits and analyzes file changes to help categorize documentation commits. It provides comprehensive change summaries for version bump suggestions and handles cases where no commits exist since last tag.
+
+## Applicability
+- **Files:** `package.json, package-lock.json`
+- **Always Apply:** false
+
+## Rules
+- Must analyze both conventional and non-conventional commit messages
+- Must categorize commits into Features, Bug Fixes, Breaking Changes, Chore, Documentation, and Other
+- Must use intelligent keyword matching for non-conventional commits
+- Must analyze file changes to help categorize documentation commits
+- Must analyze file changes to help categorize chore commits or renamed files or folders
+- Must provide comprehensive change summaries for version bump suggestions
+- Must handle cases where no commits exist since last tag
+
+### release-commit-analysis-auto
+
+**Actions:**
+ - type: generate
+ content: |
+
+ ## π Commit Analysis Process
+
+ I'll analyze your commits to categorize changes and suggest appropriate version bumps.
+
+ 1. **Check User Request for Version Bump**:
+ ```bash
+ # Check if user specified version bump in their request
+ if [[ "$USER_REQUEST" == *"major"* ]] || [[ "$USER_REQUEST" == *"Major"* ]]; then
+ VERSION_BUMP="major"
+ echo "User requested major version bump"
+ elif [[ "$USER_REQUEST" == *"minor"* ]] || [[ "$USER_REQUEST" == *"Minor"* ]]; then
+ VERSION_BUMP="minor"
+ echo "User requested minor version bump"
+ elif [[ "$USER_REQUEST" == *"patch"* ]] || [[ "$USER_REQUEST" == *"Patch"* ]]; then
+ VERSION_BUMP="patch"
+ echo "User requested patch version bump"
+ else
+ VERSION_BUMP=""
+ echo "No version bump specified by user"
+ fi
+ ```
+
+ 2. **Analyze Changes for Guidance**:
+ ```bash
+ # Get all commits since that tag with full details
+ COMMITS=$(git log --oneline --no-merges $LATEST_TAG..HEAD)
+
+ # Get detailed commit information for better analysis
+ COMMIT_DETAILS=$(git log --format="%H%n%s%n%b" --no-merges $LATEST_TAG..HEAD)
+
+ # Analyze conventional commit types for version bump suggestion
+ BREAKING_COUNT=$(echo "$COMMITS" | grep -c "BREAKING:" || echo "0")
+ FEAT_COUNT=$(echo "$COMMITS" | grep -c "feat:" || echo "0")
+ FIX_COUNT=$(echo "$COMMITS" | grep -c "fix:" || echo "0")
+ DOCS_COUNT=$(echo "$COMMITS" | grep -c "docs:" || echo "0")
+ STYLE_COUNT=$(echo "$COMMITS" | grep -c "style:" || echo "0")
+ REFACTOR_COUNT=$(echo "$COMMITS" | grep -c "refactor:" || echo "0")
+ PERF_COUNT=$(echo "$COMMITS" | grep -c "perf:" || echo "0")
+ TEST_COUNT=$(echo "$COMMITS" | grep -c "test:" || echo "0")
+ CHORE_COUNT=$(echo "$COMMITS" | grep -c "chore:" || echo "0")
+
+ # Analyze non-conventional commits with intelligent categorization
+ NON_CONVENTIONAL_COMMITS=$(echo "$COMMITS" | grep -v "^(feat|fix|docs|style|refactor|perf|test|chore):" | grep -v "BREAKING:")
+
+ if [ -n "$NON_CONVENTIONAL_COMMITS" ]; then
+ echo "π Found non-conventional commits, analyzing content for categorization..."
+
+ # Initialize counters for non-conventional commits
+ NC_FEAT_COUNT=0
+ NC_FIX_COUNT=0
+ NC_BREAKING_COUNT=0
+ NC_DOCS_COUNT=0
+ NC_OTHER_COUNT=0
+
+ # Analyze each non-conventional commit
+ while IFS= read -r commit; do
+ HASH=$(echo "$commit" | cut -d' ' -f1)
+ MESSAGE=$(echo "$commit" | cut -d' ' -f2-)
+
+ # Get files changed in this commit
+ FILES_CHANGED=$(git show --name-only --format="" "$HASH" 2>/dev/null)
+
+ # Categorize based on message content and file changes
+ if [[ "$MESSAGE" =~ (add|new|implement|create|introduce|support|enable|feature) ]]; then
+ NC_FEAT_COUNT=$((NC_FEAT_COUNT + 1))
+ echo " π Non-conventional commit categorized as FEATURE: $MESSAGE"
+ elif [[ "$MESSAGE" =~ (fix|bug|issue|problem|error|crash|fail|broken|resolve|correct) ]]; then
+ NC_FIX_COUNT=$((NC_FIX_COUNT + 1))
+ echo " π Non-conventional commit categorized as FIX: $MESSAGE"
+ elif [[ "$MESSAGE" =~ (BREAKING|breaking|remove|delete|drop|deprecate|change|update|upgrade|migrate) ]]; then
+ NC_BREAKING_COUNT=$((NC_BREAKING_COUNT + 1))
+ echo " π₯ Non-conventional commit categorized as BREAKING: $MESSAGE"
+ elif [[ "$MESSAGE" =~ (doc|readme|comment|example|guide|tutorial) ]] || echo "$FILES_CHANGED" | grep -q "\.md$\|\.txt$\|docs/\|README"; then
+ NC_DOCS_COUNT=$((NC_DOCS_COUNT + 1))
+ echo " π Non-conventional commit categorized as DOCS: $MESSAGE"
+ else
+ NC_OTHER_COUNT=$((NC_OTHER_COUNT + 1))
+ echo " π§ Non-conventional commit categorized as OTHER: $MESSAGE"
+ fi
+ done <<< "$NON_CONVENTIONAL_COMMITS"
+
+ # Add non-conventional counts to conventional counts
+ FEAT_COUNT=$((FEAT_COUNT + NC_FEAT_COUNT))
+ FIX_COUNT=$((FIX_COUNT + NC_FIX_COUNT))
+ BREAKING_COUNT=$((BREAKING_COUNT + NC_BREAKING_COUNT))
+ DOCS_COUNT=$((DOCS_COUNT + NC_DOCS_COUNT))
+
+ echo "π Non-conventional commit analysis complete:"
+ echo " - Features: $NC_FEAT_COUNT"
+ echo " - Fixes: $NC_FIX_COUNT"
+ echo " - Breaking: $NC_BREAKING_COUNT"
+ echo " - Docs: $NC_DOCS_COUNT"
+ echo " - Other: $NC_OTHER_COUNT"
+ fi
+
+ # Determine version bump suggestion based on all analyzed commits
+ if [ "$BREAKING_COUNT" -gt 0 ]; then
+ SUGGESTED_BUMP="major"
+ elif [ "$FEAT_COUNT" -gt 0 ]; then
+ SUGGESTED_BUMP="minor"
+ elif [ "$FIX_COUNT" -gt 0 ]; then
+ SUGGESTED_BUMP="patch"
+ else
+ SUGGESTED_BUMP="patch"
+ fi
+
+ echo "π Commit analysis summary:"
+ echo " - Breaking changes: $BREAKING_COUNT"
+ echo " - New features: $FEAT_COUNT"
+ echo " - Bug fixes: $FIX_COUNT"
+ echo " - Documentation: $DOCS_COUNT"
+ echo " - Style/Refactor: $((STYLE_COUNT + REFACTOR_COUNT))"
+ echo " - Performance: $PERF_COUNT"
+ echo " - Tests: $TEST_COUNT"
+ echo " - Chores: $CHORE_COUNT"
+ echo " - Suggested version bump: $SUGGESTED_BUMP"
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ Commits: 2 feat commits, 1 fix commit, 3 non-conventional commits
+ Non-conventional: "Add new API endpoint", "Fix broken login", "Update docs"
+ output: |
+ ## π Commit Analysis Process
+
+ π Found non-conventional commits, analyzing content for categorization...
+ π Non-conventional commit categorized as FEATURE: Add new API endpoint
+ π Non-conventional commit categorized as FIX: Fix broken login
+ π Non-conventional commit categorized as DOCS: Update docs
+
+ π Non-conventional commit analysis complete:
+ - Features: 1
+ - Fixes: 1
+ - Breaking: 0
+ - Docs: 1
+ - Other: 0
+
+ π Commit analysis summary:
+ - Breaking changes: 0
+ - New features: 3 (2 conventional + 1 non-conventional)
+ - Bug fixes: 2 (1 conventional + 1 non-conventional)
+ - Documentation: 1 (0 conventional + 1 non-conventional)
+ - Suggested version bump: minor
+
+ - input: |
+ User: "Create a release"
+ Commits: 1 BREAKING commit, 2 feat commits
+ output: |
+ ## π Commit Analysis Process
+
+ π Commit analysis summary:
+ - Breaking changes: 1
+ - New features: 2
+ - Bug fixes: 0
+ - Documentation: 0
+ - Suggested version bump: major
+
+tests:
+ - input: "Create release with conventional commits only"
+ output: "Should categorize conventional commits and suggest version bump"
+
+ - input: "Create release with mixed conventional and non-conventional commits"
+ output: "Should analyze both types and provide comprehensive summary"
+
+ - input: "Create release with breaking changes"
+ output: "Should suggest major version bump"
+
+ - input: "Create release with new features only"
+ output: "Should suggest minor version bump"
+
+ - input: "Create release with bug fixes only"
+ output: "Should suggest patch version bump"
+
+metadata:
+
+## Additional Information
+# Release Commit Analysis
\ No newline at end of file
diff --git a/.claude/rules/utils/release-git-tags-auto.md b/.claude/rules/utils/release-git-tags-auto.md
new file mode 100644
index 0000000..40f78ee
--- /dev/null
+++ b/.claude/rules/utils/release-git-tags-auto.md
@@ -0,0 +1,224 @@
+# This rule checks for existing tags before creating new ones
+
+## Description
+This rule checks for existing tags before creating new ones. It also offers to push the tag to the remote repository. It runs when users request release creation, version bumping, or package publishing.
+
+## Applicability
+- **Files:** `package.json, package-lock.json`
+- **Always Apply:** false
+
+## Rules
+- Must check for existing tags before creating new ones
+- Must handle local and remote tag conflicts gracefully
+- Must provide multiple resolution options for tag conflicts
+- Must validate tag format and naming conventions
+- Must offer to push tags to remote repository
+- Must provide clear error messages and resolution steps
+
+### release-git-tags-auto
+
+**Actions:**
+ - type: generate
+ content: |
+
+ ## π·οΈ Git Tag Management Process
+
+ I'll help you create and manage git tags for your release.
+
+ 1. **Offer Git Tag Creation**:
+ ```bash
+ echo "π·οΈ Would you like me to create a git tag for version $NEW_VERSION?"
+ echo "This will create a tag that marks this exact point in your release."
+
+ read -p "Create git tag v$NEW_VERSION? (y/N): " CREATE_TAG
+
+ if [[ "$CREATE_TAG" =~ ^[Yy]$ ]]; then
+ echo "π Creating git tag v$NEW_VERSION..."
+
+ # Check if tag already exists
+ if git tag -l "v$NEW_VERSION" | grep -q "v$NEW_VERSION"; then
+ echo "β ERROR: Git tag v$NEW_VERSION already exists!"
+ echo ""
+ echo "Existing tag details:"
+ git show --no-patch --format="%H%n%an%n%ad%n%s" "v$NEW_VERSION" 2>/dev/null || echo "Tag exists but details unavailable"
+ echo ""
+ echo "Options:"
+ echo "1. Use a different version number"
+ echo "2. Delete the existing tag (if you're sure it's safe)"
+ echo "3. Skip tag creation and create it manually later"
+ echo ""
+ read -p "Choose option (1-3): " TAG_CONFLICT_CHOICE
+
+ case $TAG_CONFLICT_CHOICE in
+ 1)
+ echo "Please restart the release workflow with a different version number"
+ echo "β Release workflow terminated due to tag conflict"
+ exit 1
+ ;;
+ 2)
+ echo "β οΈ WARNING: You're about to delete an existing tag!"
+ echo "This action cannot be undone and may affect other developers."
+ read -p "Are you absolutely sure? Type 'DELETE' to confirm: " DELETE_CONFIRM
+
+ if [[ "$DELETE_CONFIRM" == "DELETE" ]]; then
+ echo "π Deleting existing tag v$NEW_VERSION..."
+ git tag -d "v$NEW_VERSION"
+
+ # Also delete from remote if it exists there
+ if git ls-remote --tags origin "v$NEW_VERSION" | grep -q "v$NEW_VERSION"; then
+ echo "π Deleting remote tag v$NEW_VERSION..."
+ git push origin ":refs/tags/v$NEW_VERSION"
+ fi
+
+ echo "β
Existing tag deleted successfully"
+ else
+ echo "β Tag deletion cancelled. Release workflow terminated"
+ exit 1
+ fi
+ ;;
+ 3)
+ echo "βοΈ Skipping tag creation due to conflict"
+ CREATE_TAG=""
+ ;;
+ *)
+ echo "β Invalid choice. Release workflow terminated"
+ exit 1
+ ;;
+ esac
+ fi
+
+ # Create the git tag (only if we didn't skip due to conflict)
+ if [[ "$CREATE_TAG" =~ ^[Yy]$ ]]; then
+ git tag v$NEW_VERSION
+ echo "β
Git tag v$NEW_VERSION created successfully"
+
+ # Offer to push the tag
+ echo ""
+ echo "π Would you like me to push the git tag to the remote repository?"
+ echo "This makes the tag available to other developers and CI/CD systems."
+
+ read -p "Push git tag v$NEW_VERSION? (y/N): " PUSH_TAG
+
+ if [[ "$PUSH_TAG" =~ ^[Yy]$ ]]; then
+ echo "π Pushing git tag v$NEW_VERSION..."
+
+ # Check for remote tag conflicts before pushing
+ if git ls-remote --tags origin "v$NEW_VERSION" | grep -q "v$NEW_VERSION"; then
+ echo "β ERROR: Remote tag v$NEW_VERSION already exists!"
+ echo "The local tag was created, but cannot be pushed due to remote conflict."
+ echo ""
+ echo "Options:"
+ echo "1. Delete the remote tag first (requires appropriate permissions)"
+ echo "2. Keep the local tag only"
+ echo "3. Delete the local tag and skip"
+ echo ""
+ read -p "Choose option (1-3): " REMOTE_CONFLICT_CHOICE
+
+ case $REMOTE_CONFLICT_CHOICE in
+ 1)
+ echo "π Deleting remote tag v$NEW_VERSION..."
+ git push origin ":refs/tags/v$NEW_VERSION"
+ echo "π Now pushing local tag..."
+ git push origin v$NEW_VERSION
+ echo "β
Git tag v$NEW_VERSION pushed successfully"
+ ;;
+ 2)
+ echo "βοΈ Keeping local tag only. You can push it later with: git push origin v$NEW_VERSION"
+ ;;
+ 3)
+ echo "π Deleting local tag..."
+ git tag -d "v$NEW_VERSION"
+ echo "βοΈ Tag creation cancelled"
+ ;;
+ *)
+ echo "β Invalid choice. Keeping local tag only"
+ ;;
+ esac
+ else
+ git push origin v$NEW_VERSION
+ echo "β
Git tag v$NEW_VERSION pushed successfully"
+
+ echo ""
+ echo "π Release tag is now live!"
+ echo "Tag URL: https://github.com/[USERNAME]/[REPO]/releases/tag/v$NEW_VERSION"
+ fi
+ else
+ echo "βοΈ Tag created locally but not pushed"
+ echo "You can push it later with: git push origin v$NEW_VERSION"
+ fi
+ fi
+ else
+ echo "βοΈ Skipping git tag creation"
+ echo "You can create the tag manually later with: git tag v$NEW_VERSION"
+ fi
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ New version: 1.2.3
+ Tag v1.2.3 already exists locally
+ output: |
+ ## π·οΈ Git Tag Management Process
+
+ π·οΈ Would you like me to create a git tag for version 1.2.3?
+ This will create a tag that marks this exact point in your release.
+
+ Create git tag v1.2.3? (y/N): y
+ π Creating git tag v1.2.3...
+
+ β ERROR: Git tag v1.2.3 already exists!
+
+ Existing tag details:
+ abc1234
+ John Doe
+ 2024-01-15
+ feat: add new feature
+
+ Options:
+ 1. Use a different version number
+ 2. Delete the existing tag (if you're sure it's safe)
+ 3. Skip tag creation and create it manually later
+
+ - input: |
+ User: "Create a release"
+ New version: 1.2.3
+ Tag v1.2.3 doesn't exist
+ User confirms tag creation and push
+ output: |
+ ## π·οΈ Git Tag Management Process
+
+ π·οΈ Would you like me to create a git tag for version 1.2.3?
+ This will create a tag that marks this exact point in your release.
+
+ Create git tag v1.2.3? (y/N): y
+ π Creating git tag v1.2.3...
+ β
Git tag v1.2.3 created successfully
+
+ π Would you like me to push the git tag to the remote repository?
+ This makes the tag available to other developers and CI/CD systems.
+
+ Push git tag v1.2.3? (y/N): y
+ π Pushing git tag v1.2.3...
+ β
Git tag v1.2.3 pushed successfully
+
+ π Release tag is now live!
+ Tag URL: https://github.com/[USERNAME]/[REPO]/releases/tag/v1.2.3
+
+tests:
+ - input: "Create release with existing local tag"
+ output: "Should show conflict error and provide resolution options"
+
+ - input: "Create release with existing remote tag"
+ output: "Should detect remote conflict and provide resolution options"
+
+ - input: "Create release with new tag"
+ output: "Should create tag successfully and offer to push"
+
+ - input: "Create release but skip tag creation"
+ output: "Should skip tag creation and provide manual instructions"
+
+metadata:
+
+## Additional Information
+# Release Git Tag Management
\ No newline at end of file
diff --git a/.claude/rules/utils/release-package-version-auto.md b/.claude/rules/utils/release-package-version-auto.md
new file mode 100644
index 0000000..07be203
--- /dev/null
+++ b/.claude/rules/utils/release-package-version-auto.md
@@ -0,0 +1,158 @@
+# This rule checks the current version in package
+
+## Description
+This rule checks the current version in package.json and offers to update it to a new version. It also offers to commit the version change and push the commit. It runs when users request release creation, version bumping, or package publishing.
+
+## Applicability
+- **Files:** `package.json,*-lock.json,*.lock`
+- **Always Apply:** false
+
+## Rules
+- Must offer to update package.json version automatically
+- Must calculate new version based on semantic versioning
+- Must handle custom version inputs
+- Must offer to commit version changes
+- Must offer to push version commits
+- Must provide clear feedback about version update status
+
+### release-package-version-auto
+
+**Actions:**
+ - type: generate
+ content: |
+
+ ## π Package Version Management Process
+
+ I'll help you update the version in package.json and manage the version commit.
+
+ 1. **Offer Version Bump Update**:
+ ```bash
+ echo ""
+ echo "π Would you like me to automatically update the version in package.json?"
+ echo "Current version: $CURRENT_VERSION"
+
+ # Calculate new version based on user choice
+ if [ "$VERSION_BUMP" == "major" ]; then
+ NEW_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1+1 ".0.0"}')
+ elif [ "$VERSION_BUMP" == "minor" ]; then
+ NEW_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1 "." $2+1 ".0"}')
+ elif [ "$VERSION_BUMP" == "patch" ]; then
+ NEW_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1 "." $2 "." $3+1}')
+ elif [ "$VERSION_BUMP" == "custom" ]; then
+ NEW_VERSION="$CUSTOM_VERSION"
+ fi
+
+ echo "New version would be: $NEW_VERSION"
+ read -p "Update package.json version? (y/N): " UPDATE_VERSION
+
+ if [[ "$UPDATE_VERSION" =~ ^[Yy]$ ]]; then
+ echo "π Updating package.json version..."
+
+ # Update package.json version using npm
+ if [ "$VERSION_BUMP" == "custom" ]; then
+ # For custom version, we need to manually update
+ node -e "
+ const fs = require('fs');
+ const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8'));
+ pkg.version = '$NEW_VERSION';
+ fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2) + '\n');
+ console.log('β
Updated package.json version to $NEW_VERSION');
+ "
+ else
+ # Use npm version for standard bumps
+ npm version $VERSION_BUMP --no-git-tag
+ echo "β
Updated package.json version to $NEW_VERSION"
+ fi
+
+ # Offer to commit the version change
+ echo ""
+ echo "π Would you like me to commit the version change?"
+ read -p "Commit version bump? (y/N): " COMMIT_VERSION
+
+ if [[ "$COMMIT_VERSION" =~ ^[Yy]$ ]]; then
+ echo "π Committing version change..."
+ git add package.json
+ git commit -m "chore: bump version to $NEW_VERSION"
+ echo "β
Version change committed successfully"
+
+ # Offer to push the commit
+ echo ""
+ echo "π Would you like me to push the version commit?"
+ read -p "Push version commit? (y/N): " PUSH_VERSION
+
+ if [[ "$PUSH_VERSION" =~ ^[Yy]$ ]]; then
+ echo "π Pushing version commit..."
+ git push origin $CURRENT_BRANCH
+ echo "β
Version commit pushed successfully"
+ fi
+ fi
+ else
+ echo "βοΈ Skipping version update"
+ fi
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ Current version: 1.2.3
+ Version bump: minor
+ User confirms version update and commit
+ output: |
+ ## π Package Version Management Process
+
+ π Would you like me to automatically update the version in package.json?
+ Current version: 1.2.3
+ New version would be: 1.3.0
+ Update package.json version? (y/N): y
+ π Updating package.json version...
+ β
Updated package.json version to 1.3.0
+
+ π Would you like me to commit the version change?
+ Commit version bump? (y/N): y
+ π Committing version change...
+ β
Version change committed successfully
+
+ π Would you like me to push the version commit?
+ Push version commit? (y/N): y
+ π Pushing version commit...
+ β
Version commit pushed successfully
+
+ - input: |
+ User: "Create a release"
+ Current version: 1.2.3
+ Version bump: custom (2.0.0)
+ User confirms version update but skips commit
+ output: |
+ ## π Package Version Management Process
+
+ π Would you like me to automatically update the version in package.json?
+ Current version: 1.2.3
+ New version would be: 2.0.0
+ Update package.json version? (y/N): y
+ π Updating package.json version...
+ β
Updated package.json version to 2.0.0
+
+ π Would you like me to commit the version change?
+ Commit version bump? (y/N): n
+ βοΈ Skipping version update
+
+tests:
+ - input: "Create release with major version bump"
+ output: "Should calculate major version and update package.json"
+
+ - input: "Create release with minor version bump"
+ output: "Should calculate minor version and update package.json"
+
+ - input: "Create release with patch version bump"
+ output: "Should calculate patch version and update package.json"
+
+ - input: "Create release with custom version"
+ output: "Should use custom version and update package.json"
+
+ - input: "Create release but skip version update"
+ output: "Should skip version update and continue"
+
+metadata:
+
+## Additional Information
+# Release Package Version Management
\ No newline at end of file
diff --git a/.claude/rules/utils/release-validation-auto.md b/.claude/rules/utils/release-validation-auto.md
new file mode 100644
index 0000000..4a2c3b8
--- /dev/null
+++ b/.claude/rules/utils/release-validation-auto.md
@@ -0,0 +1,182 @@
+# This rule validates release prerequisites including branch naming conventions and package
+
+## Description
+This rule validates release prerequisites including branch naming conventions and package.json structure. It runs when users request release creation, version bumping, or package publishing. The rule ensures releases only occur from appropriate branches (release/, hotfix/, fix/) and validates package.json exists with proper structure, required fields, and semantic versioning format. This prevents accidental releases from inappropriate branches and ensures package.json is ready for version updates.
+
+## Applicability
+- **Files:** `package.json,package-lock.json,*-lock.json,*.lock`
+- **Always Apply:** false
+
+## Rules
+- Must validate branch naming conventions before allowing releases
+- Must only allow releases from branches starting with "release/", "hotfix/", or "fix/"
+- Must validate package.json exists, is valid JSON, and contains required fields
+- Must validate semantic versioning format in package.json
+- Must provide clear error messages for validation failures
+- Must terminate workflow on validation failures
+
+### release-validation-auto
+
+**Actions:**
+ - type: validate
+ conditions:
+ - pattern: "^(main|master)$"
+ message: "β οΈ You're currently on the main branch! Please create a release branch before generating release notes. This ensures proper version control and prevents accidental releases."
+
+ - type: generate
+ content: |
+
+ ## π Release Validation Process
+
+ I'll validate your environment and configuration before proceeding with the release workflow.
+
+ 1. **Branch Validation** β
+ ```bash
+ # Check current branch
+ CURRENT_BRANCH=$(git branch --show-current)
+ echo "Current branch: $CURRENT_BRANCH"
+
+ # Verify we're not on main/master
+ if [[ "$CURRENT_BRANCH" == "main" || "$CURRENT_BRANCH" == "master" ]]; then
+ echo "β Error: Cannot generate release notes on main/master branch"
+ echo "Please create a release branch first:"
+ echo "git checkout -b release/v[VERSION]"
+ exit 1
+ fi
+
+ # Validate branch naming conventions
+ echo "π Validating branch naming conventions..."
+
+ # Only allow releases from branches that start with "release/", "hotfix/", or "fix/"
+ if [[ ! "$CURRENT_BRANCH" =~ ^(release|hotfix|fix)/ ]]; then
+ echo "β ERROR: Releases can only be created from branches that start with 'release/', 'hotfix/', or 'fix/'"
+ echo ""
+ echo "Current branch: $CURRENT_BRANCH"
+ echo ""
+ echo "Please create an appropriate branch first:"
+ echo "git checkout -b release/v[VERSION]"
+ echo "git checkout -b hotfix/urgent-fix-v[VERSION]"
+ echo "git checkout -b fix/bug-description-v[VERSION]"
+ echo ""
+ echo "Examples of valid release branches:"
+ echo "- release/v1.2.3"
+ echo "- release/v2.0.0"
+ echo "- hotfix/security-patch-v1.2.4"
+ echo "- fix/memory-leak-v1.1.5"
+ echo "- release/feature-name-v1.1.0"
+ echo ""
+ echo "β Release workflow terminated due to invalid branch"
+ exit 1
+ fi
+
+ echo "β
Branch validation passed - using $CURRENT_BRANCH"
+ ```
+
+ 2. **Package.json Validation**:
+ ```bash
+ # Validate package.json exists and is valid
+ if [ ! -f "package.json" ]; then
+ echo "β ERROR: package.json not found in current directory"
+ echo "Please ensure you're running this workflow from the project root directory"
+ echo "Current directory: $(pwd)"
+ exit 1
+ fi
+
+ # Validate package.json is valid JSON
+ if ! node -e "JSON.parse(require('fs').readFileSync('package.json', 'utf8'))" 2>/dev/null; then
+ echo "β ERROR: package.json contains invalid JSON"
+ echo "Please fix the JSON syntax in package.json before proceeding"
+ exit 1
+ fi
+
+ # Validate required fields exist
+ REQUIRED_FIELDS=("name" "version")
+ for field in "${REQUIRED_FIELDS[@]}"; do
+ if ! node -e "const pkg = require('./package.json'); if (!pkg.$field) { process.exit(1); }" 2>/dev/null; then
+ echo "β ERROR: package.json is missing required field: $field"
+ echo "Please add the '$field' field to package.json before proceeding"
+ exit 1
+ fi
+ done
+
+ # Validate version format (semantic versioning)
+ VERSION_REGEX="^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$"
+ CURRENT_VERSION=$(node -p "require('./package.json').version")
+
+ if [[ ! "$CURRENT_VERSION" =~ $VERSION_REGEX ]]; then
+ echo "β ERROR: Invalid version format in package.json: $CURRENT_VERSION"
+ echo "Version must follow semantic versioning format: x.y.z[-prerelease][+build]"
+ echo "Examples: 1.2.3, 2.0.0-beta.1, 1.0.0+20231201"
+ exit 1
+ fi
+
+ # Read current version from package.json
+ echo "β
Package.json validation passed"
+ echo "Package name: $(node -p "require('./package.json').name")"
+ echo "Current version: $CURRENT_VERSION"
+
+ # Get the most recent git tag (if exists)
+ LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v$CURRENT_VERSION")
+
+ echo "Latest git tag: $LATEST_TAG"
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ Current branch: main
+ output: |
+ ## π Release Validation Process
+
+ β Error: Cannot generate release notes on main/master branch
+ Please create a release branch first:
+ git checkout -b release/v[VERSION]
+
+ - input: |
+ User: "Create a release"
+ Current branch: wip/experimental
+ output: |
+ ## π Release Validation Process
+
+ β ERROR: Releases can only be created from branches that start with 'release/', 'hotfix/', or 'fix/'
+
+ Current branch: wip/experimental
+
+ Please create an appropriate branch first:
+ git checkout -b release/v[VERSION]
+
+ - input: |
+ User: "Create a release"
+ Current branch: release/v1.2.3
+ Missing package.json
+ output: |
+ ## π Release Validation Process
+
+ β
Branch validation passed - using release/v1.2.3
+
+ β ERROR: package.json not found in current directory
+ Please ensure you're running this workflow from the project root directory
+
+tests:
+ - input: "Create release on main branch"
+ output: "Should show error and suggest creating release branch"
+
+ - input: "Create release on invalid branch name"
+ output: "Should show error and list valid branch patterns"
+
+ - input: "Create release with missing package.json"
+ output: "Should show error about missing package.json"
+
+ - input: "Create release with invalid JSON in package.json"
+ output: "Should show error about invalid JSON syntax"
+
+ - input: "Create release with missing version field"
+ output: "Should show error about missing required field"
+
+ - input: "Create release with invalid version format"
+ output: "Should show error about invalid semantic versioning format"
+
+metadata:
+
+## Additional Information
+# Release Validation
\ No newline at end of file
diff --git a/.claude/rules/utils/release-version-bump-auto.md b/.claude/rules/utils/release-version-bump-auto.md
new file mode 100644
index 0000000..8b94cb1
--- /dev/null
+++ b/.claude/rules/utils/release-version-bump-auto.md
@@ -0,0 +1,171 @@
+# Apply when the user is creating or preparing a release, version bump, tag, publish, or deploy
+
+## Description
+Apply when the user is creating or preparing a release, version bump, tag, publish, or deploy. Validates semantic version bumps against change analysis (breaking/feat/fix), blocks inappropriate bumps, and guides major/minor/patch or custom version choice. Use for release workflows, package versioning, and publish/deploy preparation.
+
+## Applicability
+- **Always Apply:** false
+
+## Rules
+- Must validate version bump against change analysis
+- Must block releases when breaking changes exist but non-major version is requested
+- Must provide clear error messages for version mismatches
+- Must offer force continue options for version mismatches
+- Must calculate new version numbers based on semantic versioning
+- Must handle custom version inputs
+
+### release-version-bump-auto
+
+**Actions:**
+ - type: generate
+ content: |
+
+ ## π Version Bump Process
+
+ I'll help you choose and validate the appropriate version bump for your release.
+
+ 1. **Prompt User for Version Bump (if not specified)**:
+ ```bash
+ if [ -z "$VERSION_BUMP" ]; then
+ # Calculate potential new versions
+ MAJOR_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1+1 ".0.0"}')
+ MINOR_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1 "." $2+1 ".0"}')
+ PATCH_VERSION=$(echo $CURRENT_VERSION | awk -F. '{print $1 "." $2 "." $3+1}')
+
+ echo "Choose your version bump:"
+ echo "1) Major version: $CURRENT_VERSION β $MAJOR_VERSION (breaking changes)"
+ echo "2) Minor version: $CURRENT_VERSION β $MINOR_VERSION (new features)"
+ echo "3) Patch version: $CURRENT_VERSION β $PATCH_VERSION (bug fixes)"
+ echo "4) Custom version"
+
+ # Wait for user input
+ read -p "Enter your choice (1-4): " USER_CHOICE
+
+ case $USER_CHOICE in
+ 1) VERSION_BUMP="major" ;;
+ 2) VERSION_BUMP="minor" ;;
+ 3) VERSION_BUMP="patch" ;;
+ 4)
+ read -p "Enter custom version: " CUSTOM_VERSION
+ VERSION_BUMP="custom"
+ ;;
+ esac
+ fi
+ ```
+
+ 2. **Validate Version Bump Against Changes**:
+ ```bash
+ # Validate that the chosen version bump matches the change analysis
+ if [ -n "$VERSION_BUMP" ]; then
+ echo "π Validating version bump against change analysis..."
+ echo "Analysis suggests: $SUGGESTED_BUMP (breaking: $BREAKING_COUNT, features: $FEAT_COUNT, fixes: $FIX_COUNT)"
+ echo "User requested: $VERSION_BUMP"
+
+ # Check for version mismatches
+ if [ "$SUGGESTED_BUMP" == "major" ] && [ "$VERSION_BUMP" != "major" ]; then
+ echo "β CRITICAL ERROR: Breaking changes detected but user requested $VERSION_BUMP version"
+ echo "Breaking changes require a MAJOR version bump according to semantic versioning."
+ echo ""
+ echo "Breaking changes found:"
+ echo "$COMMITS" | grep "BREAKING:" | while read commit; do
+ echo " - $commit"
+ done
+ echo ""
+ echo "Please either:"
+ echo "1. Use major version bump to properly indicate breaking changes"
+ echo "2. Review and remove breaking changes from your commits"
+ echo "3. Use '--force' flag to override this validation (not recommended)"
+ echo ""
+ read -p "Do you want to force continue with $VERSION_BUMP version? (y/N): " FORCE_CONTINUE
+ if [[ ! "$FORCE_CONTINUE" =~ ^[Yy]$ ]]; then
+ echo "β Release workflow terminated due to version mismatch"
+ exit 1
+ else
+ echo "β οΈ WARNING: Proceeding with $VERSION_BUMP version despite breaking changes"
+ echo "This may cause dependency issues for users of your package"
+ fi
+ elif [ "$SUGGESTED_BUMP" == "minor" ] && [ "$VERSION_BUMP" == "patch" ]; then
+ echo "β οΈ WARNING: New features detected but user requested patch version"
+ echo "New features typically require a MINOR version bump according to semantic versioning."
+ echo ""
+ echo "New features found:"
+ echo "$COMMITS" | grep "feat:" | while read commit; do
+ echo " - $commit"
+ done
+ echo ""
+ read -p "Do you want to continue with patch version? (y/N): " CONTINUE_PATCH
+ if [[ ! "$CONTINUE_PATCH" =~ ^[Yy]$ ]]; then
+ echo "β Release workflow terminated due to version mismatch"
+ exit 1
+ else
+ echo "β οΈ WARNING: Proceeding with patch version despite new features"
+ fi
+ else
+ echo "β
Version bump validation passed"
+ fi
+ fi
+ ```
+
+examples:
+ - input: |
+ User: "Create a release"
+ Analysis suggests: major (breaking changes detected)
+ User requests: minor version
+ output: |
+ ## π Version Bump Process
+
+ π Validating version bump against change analysis...
+ Analysis suggests: major (breaking: 2, features: 0, fixes: 0)
+ User requested: minor
+
+ β CRITICAL ERROR: Breaking changes detected but user requested minor version
+ Breaking changes require a MAJOR version bump according to semantic versioning.
+
+ Breaking changes found:
+ - a1b2c3d BREAKING: remove deprecated API endpoint
+ - e4f5g6h BREAKING: change authentication method
+
+ - input: |
+ User: "Create a release"
+ Analysis suggests: minor (new features detected)
+ User requests: patch version
+ output: |
+ ## π Version Bump Process
+
+ π Validating version bump against change analysis...
+ Analysis suggests: minor (breaking: 0, features: 3, fixes: 0)
+ User requested: patch
+
+ β οΈ WARNING: New features detected but user requested patch version
+ New features typically require a MINOR version bump according to semantic versioning.
+
+ - input: |
+ User: "Create a release"
+ Analysis suggests: patch (bug fixes only)
+ User requests: patch version
+ output: |
+ ## π Version Bump Process
+
+ π Validating version bump against change analysis...
+ Analysis suggests: patch (breaking: 0, features: 0, fixes: 2)
+ User requested: patch
+
+ β
Version bump validation passed
+
+tests:
+ - input: "Create release with breaking changes but minor version"
+ output: "Should show critical error and offer force continue option"
+
+ - input: "Create release with new features but patch version"
+ output: "Should show warning and ask for confirmation"
+
+ - input: "Create release with matching version bump"
+ output: "Should pass validation without issues"
+
+ - input: "Create release with custom version"
+ output: "Should accept custom version input"
+
+metadata:
+
+## Additional Information
+# Release Version Bump
\ No newline at end of file
From f87a78255d4d6624513e4578b19b84e2ac26551a Mon Sep 17 00:00:00 2001
From: Jen Chan <6406037+usrrname@users.noreply.github.com>
Date: Sun, 5 Apr 2026 10:29:13 -0400
Subject: [PATCH 4/5] ci: Pin GitHub Actions to commit SHAs for supply chain
security
Pin all GitHub Actions to specific commit SHAs to prevent supply chain attacks:
- actions/checkout: v4.2.2 (11bd71901bbe5b1630ceea73d27597364c9af683)
- actions/setup-node: v4.1.0 (1a4442cacd436585991a76fe714fa58850bd193c)
- actions/configure-pages: v4.0.0 (1f0c5cde4dec8825aff22eac11aa73c856b5c886)
- actions/upload-pages-artifact: v3.0.1 (56afc609e74202658d3ffba0e8f6f4625a7d4af5)
- actions/deploy-pages: v4.0.5 (d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e)
- actions/dependency-review-action: v4.5.0 (3b139cfc5fae8b618d3eae3675e383bb1769c019)
- dorny/paths-filter: v3.0.2 (de90cc6fb38fc0963ad72b210f1f284cd68cea36)
Renovate will still update these via SHA due to helpers:pinGitHubActionDigests config.
Refs: Phase 4 - GitHub Actions pinning
---
.github/workflows/dependency-review.yml | 4 ++--
.github/workflows/pages.yml | 12 ++++++------
.github/workflows/pr-checks.yml | 12 ++++++------
.github/workflows/publish.yml | 10 +++++-----
4 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 72f4e0e..27b7ef3 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -12,8 +12,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: "Checkout Repository"
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Dependency Review"
- uses: actions/dependency-review-action@v4
+ uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0
with:
fail-on-severity: moderate
diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index caa6b16..796f4d8 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -16,11 +16,11 @@ jobs:
changes: ${{ steps.changes.outputs.filters }}
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Change detection
id: changes
- uses: dorny/paths-filter@v3
+ uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
with:
filters: |
'./':
@@ -35,11 +35,11 @@ jobs:
if: ${{ needs.changes.outputs.changes.index == 'true' || needs.changes.outputs.changes.styles == 'true' }}
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Pages
- uses: actions/configure-pages@v4
+ uses: actions/configure-pages@1f0c5cde4dec8825aff22eac11aa73c856b5c886 # v4.0.0
- name: Create artifact directory
run: |
@@ -50,11 +50,11 @@ jobs:
- name: Upload artifact
if: ${{ needs.changes.outputs.changes.index == 'true' || needs.changes.outputs.changes.styles == 'true' }}
- uses: actions/upload-pages-artifact@v3
+ uses: actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6f4625a7d4af5 # v3.0.1
with:
path: "_site"
- name: Deploy to GitHub Pages
if: ${{ needs.changes.outputs.changes.index == 'true' || needs.changes.outputs.changes.styles == 'true' }}
id: deployment
- uses: actions/deploy-pages@v4
\ No newline at end of file
+ uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml
index 08fc75c..2308912 100644
--- a/.github/workflows/pr-checks.yml
+++ b/.github/workflows/pr-checks.yml
@@ -17,9 +17,9 @@ jobs:
permissions:
contents: read
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Node.js
- uses: actions/setup-node@v5
+ uses: actions/setup-node@1a4442cacd436585991a76fe714fa58850bd193c # v4.1.0
with:
node-version: '22'
- run: npm ci
@@ -35,9 +35,9 @@ jobs:
os: [ubuntu-latest, windows-latest, macos-latest]
fail-fast: false
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Node.js
- uses: actions/setup-node@v5
+ uses: actions/setup-node@1a4442cacd436585991a76fe714fa58850bd193c # v4.1.0
with:
node-version: '22'
- run: npm ci
@@ -50,7 +50,7 @@ jobs:
pull-requests: write
contents: read
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
@@ -65,4 +65,4 @@ jobs:
gh pr edit ${{ github.event.pull_request.number }} --body "$commits"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- description: ${{ github.event.pull_request.body }}
\ No newline at end of file
+ description: ${{ github.event.pull_request.body }}
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 042d249..86d8136 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -16,7 +16,7 @@ jobs:
outputs:
version: ${{ steps.extract-version.outputs.version }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
ref: main
@@ -68,12 +68,12 @@ jobs:
contents: read
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: main
fetch-depth: 0
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@1a4442cacd436585991a76fe714fa58850bd193c # v4.1.0
with:
node-version: 20
registry-url: https://registry.npmjs.org
@@ -96,12 +96,12 @@ jobs:
contents: read
packages: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: main
fetch-depth: 0
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@1a4442cacd436585991a76fe714fa58850bd193c # v4.1.0
with:
node-version: 20
registry-url: https://npm.pkg.github.com/
From 7c9573a2b7fe169a9a77c5d9ba106d856f3c7a95 Mon Sep 17 00:00:00 2001
From: Jen Chan <6406037+usrrname@users.noreply.github.com>
Date: Sun, 5 Apr 2026 10:31:35 -0400
Subject: [PATCH 5/5] feat(safety): Add checksum generation and validation
utilities
Add safety features:
- Generate SHA-256 hashes for all files
- Create validation utilities
- Support pattern scanning for security
Refs: Phase 4 implementation
---
checksums.json | 86 ++++++++++++++++++++++
cli/utils/validation.mjs | 128 +++++++++++++++++++++++++++++++++
scripts/generate-checksums.mjs | 72 +++++++++++++++++++
3 files changed, 286 insertions(+)
create mode 100644 checksums.json
create mode 100644 cli/utils/validation.mjs
create mode 100755 scripts/generate-checksums.mjs
diff --git a/checksums.json b/checksums.json
new file mode 100644
index 0000000..70c8ca0
--- /dev/null
+++ b/checksums.json
@@ -0,0 +1,86 @@
+{
+ ".cursor/.ai/architecture/high-level-architecture.md": "993a490b56e65bfbcd4ef03f6fb07858131c697a9d752fef38a4efa34eacaacc",
+ ".cursor/.ai/release-notes-v0.7.0.md": "02fc2807a4c796c99bbf17022b4d75755a6ed33fc2b427bfdce7a00c98efd088",
+ ".cursor/COMMANDS.md": "9a47e9c0053625f56b7145d3375614ea5209ae4c753f949655b08d7ba5ddef82",
+ ".cursor/commands.json": "83654f9205080d02562f577b562b407c2264fa315d57f17e6888ed39c0fc190c",
+ ".cursor/environment.json": "c9a7121ea235eca47efa4c22409e1f13ee34143b8d2164c1a61e4b6c0dd5f82c",
+ ".cursor/mcp.json": "3a7490251e1824c99a120737d1ced918ee4acee8d24dc84feb50795525b6b200",
+ ".cursor/modes.json": "a7d65f5ce8e447f242d986545e75fd7f3666d53ae9f8556be6048ccd3c5348db",
+ ".cursor/rules/core/agent-communication-always.mdc": "ced7b9c98f2767f138d0647881d2dd4fb49dc2ee9fa9fda72e3d684d2490159c",
+ ".cursor/rules/core/create-rule-agent.mdc": "8706fcbe58298d74ebf05139d1b92add4242478ab4bc9adfa614ad3b449ab0a6",
+ ".cursor/rules/core/create-update-agent.mdc": "ae061f6b7e4757d14e401f21a0b1cc0032d9bbff1e74ab04f8670eb8e87931e8",
+ ".cursor/rules/core/security-scan-agent.mdc": "a8dd0a465bbfa144115a7020ec0af59fd5d0519d4891bb1d37eed58df62a9801",
+ ".cursor/rules/mcp/container-use.mdc": "d23d49721a4d88da4943d4efcb47ec78881a9d9f39939aa6ed682dd7bbd41c19",
+ ".cursor/rules/standards/cloudflare-workers-auto.mdc": "fce91260c6b0dc3e8f0655524e3e575c02a76ad73649f81141e2fedde3ead771",
+ ".cursor/rules/standards/cloudflare-workers-hono-auto.mdc": "82146ec016498956fdf29f35a4bd27d450123741b10111fe4b5c7c9b09a0fed8",
+ ".cursor/rules/standards/laravel-php-auto.mdc": "c18ad46580ced1b214c450454c69647cbc6aa317938957d6d3624b78704c3c90",
+ ".cursor/rules/standards/mysql-auto.mdc": "fbf82d19dcca17bef23419b1474c771174aebc4d835f639f6f92e2d4ff1d3e8a",
+ ".cursor/rules/standards/nextjs-react19-auto.mdc": "2abd90666e5c0ecbc5e195b56655735813d7d0abb0abb2dffd978e099c7007eb",
+ ".cursor/rules/standards/react-typescript-auto.mdc": "7bbdb5a974abb038c30815ef1fd494bef21abfd21b08a95ad983154952856334",
+ ".cursor/rules/standards/typescript-standards-auto.mdc": "a3ca8a2c600c2e26df1543e79446ac64f4c6f5e9457b9d44e52a4941e512ba3d",
+ ".cursor/rules/standards/vue3-typescript-auto.mdc": "51323823a20e1a7eb5672a1e1a9e9e8a2669176135e9fd0f97abdd164b406771",
+ ".cursor/rules/templates/architecture-decision-record.md": "2f7e5d549353e249c5cc17390e74d1c78923a7ffdc11935c5925a075f3ef2fb5",
+ ".cursor/rules/templates/architecture.md": "d71e648ae211500e43c19b0f52eb0015091e4949489b2b833f2ae479bb7ba8ee",
+ ".cursor/rules/templates/bug-report.md": "1e10f69559b41c60503365c58b251523375e7e2da5d824673ec475a2c13d5f3c",
+ ".cursor/rules/templates/custom-agents.md": "eea057588ec1ecebda1b57cc58a98a415962117827e5d7c081b4289629c7c60a",
+ ".cursor/rules/templates/test-plan.md": "a77fbe1f9919eec60081f54ca032ac5c703dcba82c51f0eb4a1bce193c50a36e",
+ ".cursor/rules/templates/user-story.md": "71b1e6b0a265ad6ef9a112039ee1a8cef7938434317d26443719c84197c167a6",
+ ".cursor/rules/test/playwright-agent.mdc": "09ea823f85b148a9a2789566ad654d0f9d315c60572c33c65973e321499212fb",
+ ".cursor/rules/test/react-component-hook-testing-auto.mdc": "69d8b7f9e0e2dde7648dcf94a861277e5cc89c0eba635bee675cdfc9e55c7905",
+ ".cursor/rules/test/testing-pyramid-agent.mdc": "197e83688a46a8d4ba861ed22f1d4555d24293a9ae2da6f1c1c05f199ac222c9",
+ ".cursor/rules/test/vitest-best-practices-auto.mdc": "9a210c8318b28ed83c6479ce98f85b53348a0c9089f000963410a54d3c70f52e",
+ ".cursor/rules/test/vitest-component-testing-auto.mdc": "55abaa946691133effd8cc1337344791c3ce127bf9efd10260c23ef6ea2d7b89",
+ ".cursor/rules/test/vue-test-utils-auto.mdc": "84c14a04b5b342aa1366542a3e77068d2e6c10f3a082c427a55b40e67eb83322",
+ ".cursor/rules/utils/changelog-generator-manual.mdc": "39922bb8876bd0af91052b0443ee9f82cb332bb54a538cd331c692dcd0eff51c",
+ ".cursor/rules/utils/console-vibes-auto.mdc": "40d8cfd508ee92b8e79c40a41e29507446a3d82e692e9cc702c5b1ef6cc607f0",
+ ".cursor/rules/utils/git-branch-agent.mdc": "d518913cf959cb938d4b8b87d9ab92ae36c50801c08dc25d2a147558181485b0",
+ ".cursor/rules/utils/release-changelog-auto.mdc": "cf37bb2d05db6f6ad5994cea5b5cd3ef17385e757f51abb20c901b00f05055df",
+ ".cursor/rules/utils/release-commit-analysis-auto.mdc": "8be17335835bf253831bac11abda13e5fb9fd136767c66ef81ede7deb0312740",
+ ".cursor/rules/utils/release-git-tags-auto.mdc": "c3f2ca18a1d87b9a1bcb5132d48c84a2f6d83f9d26ca8f418c068c0f2f0b45ed",
+ ".cursor/rules/utils/release-package-version-auto.mdc": "f2ca6d6efcfb5f7227393e5cb6697350d6a2b49d70817c7266fc5a71a27d1938",
+ ".cursor/rules/utils/release-validation-auto.mdc": "991704ca972babb707c578caa7fa82247eb6ac21792e9b5f0eba7a63d26be49e",
+ ".cursor/rules/utils/release-version-bump-auto.mdc": "157f6516628e95e5acdbc7e418e5cf1915d47049928f499a34b89eb2ef4aeab1",
+ ".cursor/skills/architecture/SKILL.md": "39f540ec0f701b33b350ac135a6c13f67ff8260558f83144c4119483d6313574",
+ ".cursor/skills/changelog/SKILL.md": "e96497a7530ec7be30ee8a179b5cc8e63c6a092015c592ada220800d571a6b9d",
+ ".cursor/skills/dependency-analysis-agent/SKILL.md": "1ad3f4545b904a58873d6508bdb16081dbadc8d89accac7ec3c3a8210a34ef92",
+ ".cursor/skills/dev-workflow/SKILL.md": "5ac706c27d20bc7e9ecba9c379f5bb34c4b428e698ce8339d04cb724d86d27e6",
+ ".cursor/skills/git-commit-push-agent/SKILL.md": "a45fbe13fa9ccaaa55fc6426cb6b9b6f318535bb9ea9db751b93304ff18daaa8",
+ ".cursor/skills/intent-discovery/SKILL.md": "33315fbc23b887c2b94810136e289b87879801be4563307ebaacbc469bd58e6b",
+ ".cursor/skills/refactor-agent/SKILL.md": "975a212e4bf5094ef23b509247a9c9f81da680a755603e35f46e2a7a16a8aaba",
+ ".cursor/skills/testing-pyramid/SKILL.md": "fa655e47669c4d37c616aab8a690c78746f02de2b5bc498eff333e26c47e9306",
+ ".claude/commands/changelog.md": "6ec455e7117b1509f1286bd316b7a636d3405334d1f09a48520b91608cb963ce",
+ ".claude/commands/commit.md": "328f9a0b5cc841a347dffab2a441b2aa6d879f1e7b15aba9a8bec15e71ba839f",
+ ".claude/commands/dev.md": "4b38c505adf327269ba55fc40c607d0710e1596276aadf77595388ba294fe4cc",
+ ".claude/commands/refactor.md": "5199e0acb9ebab111a031c930c59e324d0c48469f5ef8cc2afa6dbab705b1242",
+ ".claude/commands/testing-pyramid.md": "5b4167d595770623e4e4d4566378f17423dddafda1660b5bd1005a06fe4f52cb",
+ ".claude/commands/version.md": "60f16bdb788b9ab6094070fa294de080756bd48852faf9876da0697c95b55c08",
+ ".claude/hooks/security-block.sh": "3179d6addba1bd6b49e60cafdcf492d6730d7a8e373eed85e8e96249233f49c6",
+ ".claude/rules/core/agent-communication-always.md": "ec1278cd6b2234e5b220df90b8de42439a1d7d3b34b01ab0ac946c47f4fdc28d",
+ ".claude/rules/core/create-rule-agent.md": "2d36b12201fb84e69237454d684c3dc0da21379911f348482d8092badc567b80",
+ ".claude/rules/core/create-update-agent.md": "6d2cdd8f134464972e60f0e7ea92874c284fd5a74bc9385759abf3cff9d62413",
+ ".claude/rules/core/security-scan-agent.md": "2e4d3cd7666c8ce19120d8f0f4406a436caf9b05d16ab6c4e8ca1f0cd0426ba6",
+ ".claude/rules/standards/cloudflare-workers-auto.md": "7a0be24028a82e07521a0e65b93e95c53d25a5d1c96ae0dc9a72f65cdfcddd9b",
+ ".claude/rules/standards/cloudflare-workers-hono-auto.md": "0c87301ef8c14184661a5dff90d1d9dd706ac07ff519d2a4352a5beac1810c15",
+ ".claude/rules/standards/laravel-php-auto.md": "5fcb708fde4b867bffa9e0689976a64f29f735d7366d935302e61aef9faad2bb",
+ ".claude/rules/standards/mysql-auto.md": "92927126beefb065fcd8d89140696f16b6dd3ef0857e27d5122773abcb28544e",
+ ".claude/rules/standards/nextjs-react19-auto.md": "ec46d62ba8288820b8c06240ecb65260972aeedf0c6db6e7c20e1b7d738c1634",
+ ".claude/rules/standards/react-typescript-auto.md": "f78347c6402615aaf7a0f28dbf8e0e3e3f8edfd802f41288c0f2a46ce1efbbe0",
+ ".claude/rules/standards/typescript-standards-auto.md": "8155e3d6a989ad007188473808e2b7cb4b7a7468c070f17b46a6d778d1f8426f",
+ ".claude/rules/standards/vue3-typescript-auto.md": "6d590f9b2ae28053defddbad5ed373ac7d165d43453d0a982f4e46eb24fb8b79",
+ ".claude/rules/test/playwright-agent.md": "c2146b7648b355ded4b7cf438317665fd90612c5559bfb61b2d5cd6de043bfe9",
+ ".claude/rules/test/react-component-hook-testing-auto.md": "201a894dece3af1c7f75dd9d94ac8fe617d1dc3d8d102f048647a8f86dd2c607",
+ ".claude/rules/test/testing-pyramid-agent.md": "85b59fa87ab99dacce9ab7869ce97f49f574225ee584831836c15b211d3bf31c",
+ ".claude/rules/test/vitest-best-practices-auto.md": "c150963b13d3f85b8fdf0341677713959df8c7998a72f22f155ff8759d2a1483",
+ ".claude/rules/test/vitest-component-testing-auto.md": "0d2da027c6f1a088a638b4fd87ff242a238883b6fbe421b3370a34401d85095d",
+ ".claude/rules/test/vue-test-utils-auto.md": "8763664eb3e11c06bc280f7d61ecf66262ee1ba55f995ee2a923c0db1d8bce48",
+ ".claude/rules/utils/changelog-generator-manual.md": "2a43d575303063ee8f4a22871658ee7694cc8b1e40d16cf5e8b6c5d283791661",
+ ".claude/rules/utils/console-vibes-auto.md": "42fe23f522eee3d74301a2a4184af58bffb30ce3dd5ef0d77954a65fe7439b99",
+ ".claude/rules/utils/git-branch-agent.md": "865bd02a656a1f987bec07afeb0e8d7610db3a5e4c47299ef11d27869e9aba28",
+ ".claude/rules/utils/release-changelog-auto.md": "66f6c2a2d6bf2f6eca2830d868d8ff0fb7d9e10280a02e056a566d4dbef8f980",
+ ".claude/rules/utils/release-commit-analysis-auto.md": "962b57ff9b6288cb389b8359bd0a58f6058632a01baa210b3d85e865ccb5ff45",
+ ".claude/rules/utils/release-git-tags-auto.md": "4bc863bb222b02dbe20394de26e24fd6e7ce2a3991a0168544c6f9b73f2072a9",
+ ".claude/rules/utils/release-package-version-auto.md": "2e009b533e4ab5822a4b82a61ee0d57a079fc8b05c0c3646c8dd880670960e1b",
+ ".claude/rules/utils/release-validation-auto.md": "2d099a26b05bf7e73fc267b305dddd1eced74db59dd9a30371c18a9425bd5d66",
+ ".claude/rules/utils/release-version-bump-auto.md": "8a32300f083b33f8968ab79e7f968990cf615a676ef0a0ac5fd48a94d9913cff",
+ ".claude/settings.json": "9a846d46c3b37f0777ea3f4d5c9784a92eb1f0ca68b00c5d20d1b49e22a5f043"
+}
\ No newline at end of file
diff --git a/cli/utils/validation.mjs b/cli/utils/validation.mjs
new file mode 100644
index 0000000..ad5d117
--- /dev/null
+++ b/cli/utils/validation.mjs
@@ -0,0 +1,128 @@
+#!/usr/bin/env node
+/**
+ * Validate downloaded files against checksums
+ */
+
+import { createHash } from 'node:crypto';
+import { readFileSync, accessSync, constants } from 'node:fs';
+import { join } from 'node:path';
+
+/**
+ * Generate SHA-256 checksum for a file
+ * @param {string} filePath
+ * @returns {string}
+ */
+export function generateChecksum(filePath) {
+ const content = readFileSync(filePath);
+ return createHash('sha256').update(content).digest('hex');
+}
+
+/**
+ * Check if a file exists
+ * @param {string} filePath
+ * @returns {boolean}
+ */
+export function fileExists(filePath) {
+ try {
+ accessSync(filePath, constants.F_OK);
+ return true;
+ } catch {
+ return false;
+ }
+}
+
+/**
+ * Validate a single file against expected checksum
+ * @param {string} filePath - Path to the file
+ * @param {string} expectedChecksum - Expected SHA-256 checksum
+ * @returns {{valid: boolean, error?: string}}
+ */
+export function validateFile(filePath, expectedChecksum) {
+ try {
+ if (!fileExists(filePath)) {
+ return { valid: false, error: 'File does not exist' };
+ }
+
+ const actualChecksum = generateChecksum(filePath);
+ if (actualChecksum !== expectedChecksum) {
+ return {
+ valid: false,
+ error: `Checksum mismatch: expected ${expectedChecksum}, got ${actualChecksum}`
+ };
+ }
+
+ return { valid: true };
+ } catch (err) {
+ return { valid: false, error: err.message };
+ }
+}
+
+/**
+ * Validate all files in a directory against checksums
+ * @param {string} baseDir - Base directory
+ * @param {Record} checksums - Map of file paths to checksums
+ * @returns {{valid: boolean, errors: Array<{file: string, error: string}>}}
+ */
+export function validateDownload(baseDir, checksums) {
+ /** @type {Array<{file: string, error: string}>} */
+ const errors = [];
+
+ for (const [relativePath, expectedChecksum] of Object.entries(checksums)) {
+ const fullPath = join(baseDir, relativePath);
+ const result = validateFile(fullPath, expectedChecksum);
+
+ if (!result.valid && result.error) {
+ errors.push({ file: relativePath, error: result.error });
+ }
+ }
+
+ return {
+ valid: errors.length === 0,
+ errors
+ };
+}
+
+/**
+ * Scan file content for potentially dangerous patterns
+ * @param {string} content - File content to scan
+ * @returns {Array<{pattern: string, line: number}>}
+ */
+export function scanForDangerousPatterns(content) {
+ const dangerousPatterns = [
+ { pattern: /rm\s+-rf\s+\//, name: 'rm -rf /' },
+ { pattern: /rm\s+-rf\s+~/, name: 'rm -rf ~' },
+ { pattern: />\s*\/dev\/sda/, name: 'disk overwrite' },
+ { pattern: /mkfs\./, name: 'filesystem format' },
+ { pattern: /:\(\)\s*\{\s*:\|:&\s*\};:/, name: 'fork bomb' },
+ { pattern: /DROP\s+TABLE/i, name: 'SQL drop table' },
+ { pattern: /TRUNCATE\s+TABLE/i, name: 'SQL truncate' },
+ ];
+
+ const lines = content.split('\n');
+ const matches = [];
+
+ for (let i = 0; i < lines.length; i++) {
+ for (const { pattern, name } of dangerousPatterns) {
+ if (pattern.test(lines[i])) {
+ matches.push({ pattern: name, line: i + 1 });
+ }
+ }
+ }
+
+ return matches;
+}
+
+/**
+ * Validate JSON file syntax
+ * @param {string} filePath
+ * @returns {{valid: boolean, error?: string}}
+ */
+export function validateJson(filePath) {
+ try {
+ const content = readFileSync(filePath, 'utf-8');
+ JSON.parse(content);
+ return { valid: true };
+ } catch (err) {
+ return { valid: false, error: `Invalid JSON: ${err.message}` };
+ }
+}
diff --git a/scripts/generate-checksums.mjs b/scripts/generate-checksums.mjs
new file mode 100755
index 0000000..ad2aef7
--- /dev/null
+++ b/scripts/generate-checksums.mjs
@@ -0,0 +1,72 @@
+#!/usr/bin/env node
+/**
+ * Generate checksums for all downloadable files
+ * Creates a checksums.json file with SHA-256 hashes
+ */
+
+import { createHash } from 'node:crypto';
+import { readFileSync, writeFileSync, readdirSync, statSync } from 'node:fs';
+import { join, relative } from 'node:path';
+
+const DIRS_TO_HASH = ['.cursor', '.claude'];
+const OUTPUT_FILE = 'checksums.json';
+
+/**
+ * Generate SHA-256 checksum for a file
+ * @param {string} filePath
+ * @returns {string}
+ */
+function generateFileChecksum(filePath) {
+ const content = readFileSync(filePath);
+ return createHash('sha256').update(content).digest('hex');
+}
+
+/**
+ * Recursively get all files in a directory
+ * @param {string} dir
+ * @returns {string[]}
+ */
+function getAllFiles(dir) {
+ const files = [];
+ const items = readdirSync(dir, { withFileTypes: true });
+
+ for (const item of items) {
+ const fullPath = join(dir, item.name);
+ if (item.isDirectory()) {
+ files.push(...getAllFiles(fullPath));
+ } else {
+ files.push(fullPath);
+ }
+ }
+
+ return files;
+}
+
+/**
+ * Generate checksums for all files in specified directories
+ * @returns {Record}
+ */
+function generateChecksums() {
+ /** @type {Record} */
+ const checksums = {};
+
+ for (const dir of DIRS_TO_HASH) {
+ try {
+ const files = getAllFiles(dir);
+ for (const file of files) {
+ const relativePath = relative('.', file);
+ checksums[relativePath] = generateFileChecksum(file);
+ }
+ } catch (err) {
+ console.warn(`Warning: Could not read directory ${dir}: ${err.message}`);
+ }
+ }
+
+ return checksums;
+}
+
+// Main execution
+const checksums = generateChecksums();
+writeFileSync(OUTPUT_FILE, JSON.stringify(checksums, null, 2));
+console.log(`β
Generated checksums for ${Object.keys(checksums).length} files`);
+console.log(`π Saved to ${OUTPUT_FILE}`);