mirror of
https://github.com/langgenius/dify.git
synced 2026-05-03 17:08:03 +08:00
chore: remove frontend changes
This commit is contained in:
583
web/scripts/analyze-component.js
Executable file
583
web/scripts/analyze-component.js
Executable file
@ -0,0 +1,583 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { spawnSync } from 'node:child_process'
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import {
|
||||
ComponentAnalyzer,
|
||||
extractCopyContent,
|
||||
getComplexityLevel,
|
||||
listAnalyzableFiles,
|
||||
resolveDirectoryEntry,
|
||||
} from './component-analyzer.js'
|
||||
|
||||
// ============================================================================
|
||||
// Prompt Builder for AI Assistants
|
||||
// ============================================================================
|
||||
|
||||
class TestPromptBuilder {
|
||||
build(analysis) {
|
||||
const testPath = analysis.path.replace(/\.tsx?$/, '.spec.tsx')
|
||||
|
||||
return `
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ 📋 GENERATE TEST FOR DIFY COMPONENT ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
📍 Component: ${analysis.name}
|
||||
📂 Path: ${analysis.path}
|
||||
🎯 Test File: ${testPath}
|
||||
|
||||
📊 Component Analysis:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Type: ${analysis.type}
|
||||
Total Complexity: ${analysis.complexity}/100 ${getComplexityLevel(analysis.complexity)}
|
||||
Max Func Complexity: ${analysis.maxComplexity}/100 ${getComplexityLevel(analysis.maxComplexity)}
|
||||
Lines: ${analysis.lineCount}
|
||||
Usage: ${analysis.usageCount} reference${analysis.usageCount !== 1 ? 's' : ''}
|
||||
Test Priority: ${analysis.priority.score} ${analysis.priority.level}
|
||||
|
||||
Features Detected:
|
||||
${analysis.hasProps ? '✓' : '✗'} Props/TypeScript interfaces
|
||||
${analysis.hasState ? '✓' : '✗'} Local state (useState/useReducer)
|
||||
${analysis.hasEffects ? '✓' : '✗'} Side effects (useEffect)
|
||||
${analysis.hasCallbacks ? '✓' : '✗'} Callbacks (useCallback)
|
||||
${analysis.hasMemo ? '✓' : '✗'} Memoization (useMemo)
|
||||
${analysis.hasEvents ? '✓' : '✗'} Event handlers
|
||||
${analysis.hasRouter ? '✓' : '✗'} Next.js routing
|
||||
${analysis.hasAPI ? '✓' : '✗'} API calls
|
||||
${analysis.hasReactQuery ? '✓' : '✗'} React Query
|
||||
${analysis.hasAhooks ? '✓' : '✗'} ahooks
|
||||
${analysis.hasForwardRef ? '✓' : '✗'} Ref forwarding (forwardRef)
|
||||
${analysis.hasComponentMemo ? '✓' : '✗'} Component memoization (React.memo)
|
||||
${analysis.hasImperativeHandle ? '✓' : '✗'} Imperative handle
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
📝 TASK:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Please generate a comprehensive test file for this component at:
|
||||
${testPath}
|
||||
|
||||
The component is located at:
|
||||
${analysis.path}
|
||||
|
||||
${this.getSpecificGuidelines(analysis)}
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
📋 PROMPT FOR AI ASSISTANT (COPY THIS TO YOUR AI ASSISTANT):
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Generate a comprehensive test file for all files in @${path.dirname(analysis.path)}
|
||||
|
||||
Including but not limited to:
|
||||
${this.buildFocusPoints(analysis)}
|
||||
|
||||
Create the test file at: ${testPath}
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
`
|
||||
}
|
||||
|
||||
buildFocusPoints(analysis) {
|
||||
const points = []
|
||||
|
||||
if (analysis.hasState)
|
||||
points.push('- Testing state management and updates')
|
||||
if (analysis.hasEffects)
|
||||
points.push('- Testing side effects and cleanup')
|
||||
if (analysis.hasCallbacks)
|
||||
points.push('- Testing callback stability and memoization')
|
||||
if (analysis.hasMemo)
|
||||
points.push('- Testing memoization logic and dependencies')
|
||||
if (analysis.hasEvents)
|
||||
points.push('- Testing user interactions and event handlers')
|
||||
if (analysis.hasRouter)
|
||||
points.push('- Mocking Next.js router hooks')
|
||||
if (analysis.hasAPI)
|
||||
points.push('- Mocking API calls')
|
||||
if (analysis.hasForwardRef)
|
||||
points.push('- Testing ref forwarding behavior')
|
||||
if (analysis.hasComponentMemo)
|
||||
points.push('- Testing component memoization')
|
||||
if (analysis.hasSuspense)
|
||||
points.push('- Testing Suspense boundaries and lazy loading')
|
||||
if (analysis.hasPortal)
|
||||
points.push('- Testing Portal rendering')
|
||||
if (analysis.hasImperativeHandle)
|
||||
points.push('- Testing imperative handle methods')
|
||||
points.push('- Testing edge cases and error handling')
|
||||
points.push('- Testing all prop variations')
|
||||
|
||||
return points.join('\n')
|
||||
}
|
||||
|
||||
getSpecificGuidelines(analysis) {
|
||||
const guidelines = []
|
||||
|
||||
// ===== Test Priority Guidance =====
|
||||
if (analysis.priority.level.includes('CRITICAL')) {
|
||||
guidelines.push('🔴 CRITICAL PRIORITY component:')
|
||||
guidelines.push(` - Used in ${analysis.usageCount} places across the codebase`)
|
||||
guidelines.push(' - Changes will have WIDE impact')
|
||||
guidelines.push(' - Require comprehensive test coverage')
|
||||
guidelines.push(' - Add regression tests for all use cases')
|
||||
guidelines.push(' - Consider integration tests with dependent components')
|
||||
}
|
||||
else if (analysis.usageCount > 50) {
|
||||
guidelines.push('🟠 VERY HIGH USAGE component:')
|
||||
guidelines.push(` - Referenced ${analysis.usageCount} times in the codebase`)
|
||||
guidelines.push(' - Changes may affect many parts of the application')
|
||||
guidelines.push(' - Comprehensive test coverage is CRITICAL')
|
||||
guidelines.push(' - Add tests for all common usage patterns')
|
||||
guidelines.push(' - Consider regression tests')
|
||||
}
|
||||
else if (analysis.usageCount > 20) {
|
||||
guidelines.push('🟡 HIGH USAGE component:')
|
||||
guidelines.push(` - Referenced ${analysis.usageCount} times in the codebase`)
|
||||
guidelines.push(' - Test coverage is important to prevent widespread bugs')
|
||||
guidelines.push(' - Add tests for common usage patterns')
|
||||
}
|
||||
|
||||
// ===== Complexity Warning =====
|
||||
if (analysis.complexity > 75) {
|
||||
guidelines.push(`🔴 HIGH Total Complexity (${analysis.complexity}/100). Consider:`)
|
||||
guidelines.push(' - Splitting component into smaller pieces before testing')
|
||||
guidelines.push(' - Creating integration tests for complex workflows')
|
||||
guidelines.push(' - Using test.each() for data-driven tests')
|
||||
}
|
||||
else if (analysis.complexity > 50) {
|
||||
guidelines.push(`⚠️ MODERATE Total Complexity (${analysis.complexity}/100). Consider:`)
|
||||
guidelines.push(' - Breaking tests into multiple describe blocks')
|
||||
guidelines.push(' - Testing integration scenarios')
|
||||
guidelines.push(' - Grouping related test cases')
|
||||
}
|
||||
|
||||
// ===== Max Function Complexity Warning =====
|
||||
if (analysis.maxComplexity > 75) {
|
||||
guidelines.push(`🔴 HIGH Single Function Complexity (max: ${analysis.maxComplexity}/100). Consider:`)
|
||||
guidelines.push(' - Breaking down the complex function into smaller helpers')
|
||||
guidelines.push(' - Extracting logic into custom hooks or utility functions')
|
||||
}
|
||||
else if (analysis.maxComplexity > 50) {
|
||||
guidelines.push(`⚠️ MODERATE Single Function Complexity (max: ${analysis.maxComplexity}/100). Consider:`)
|
||||
guidelines.push(' - Simplifying conditional logic')
|
||||
guidelines.push(' - Using early returns to reduce nesting')
|
||||
}
|
||||
|
||||
// ===== State Management =====
|
||||
if (analysis.hasState && analysis.hasEffects) {
|
||||
guidelines.push('🔄 State + Effects detected:')
|
||||
guidelines.push(' - Test state initialization and updates')
|
||||
guidelines.push(' - Test useEffect dependencies array')
|
||||
guidelines.push(' - Test cleanup functions (return from useEffect)')
|
||||
guidelines.push(' - Use waitFor() for async state changes')
|
||||
}
|
||||
else if (analysis.hasState) {
|
||||
guidelines.push('📊 State management detected:')
|
||||
guidelines.push(' - Test initial state values')
|
||||
guidelines.push(' - Test all state transitions')
|
||||
guidelines.push(' - Test state reset/cleanup scenarios')
|
||||
}
|
||||
else if (analysis.hasEffects) {
|
||||
guidelines.push('⚡ Side effects detected:')
|
||||
guidelines.push(' - Test effect execution conditions')
|
||||
guidelines.push(' - Verify dependencies array correctness')
|
||||
guidelines.push(' - Test cleanup on unmount')
|
||||
}
|
||||
|
||||
// ===== Performance Optimization =====
|
||||
if (analysis.hasCallbacks || analysis.hasMemo || analysis.hasComponentMemo) {
|
||||
const features = []
|
||||
if (analysis.hasCallbacks)
|
||||
features.push('useCallback')
|
||||
if (analysis.hasMemo)
|
||||
features.push('useMemo')
|
||||
if (analysis.hasComponentMemo)
|
||||
features.push('React.memo')
|
||||
|
||||
guidelines.push(`🚀 Performance optimization (${features.join(', ')}):`)
|
||||
guidelines.push(' - Verify callbacks maintain referential equality')
|
||||
guidelines.push(' - Test memoization dependencies')
|
||||
guidelines.push(' - Ensure expensive computations are cached')
|
||||
if (analysis.hasComponentMemo) {
|
||||
guidelines.push(' - Test component re-render behavior with prop changes')
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Ref Forwarding =====
|
||||
if (analysis.hasForwardRef || analysis.hasImperativeHandle) {
|
||||
guidelines.push('🔗 Ref forwarding detected:')
|
||||
guidelines.push(' - Test ref attachment to DOM elements')
|
||||
if (analysis.hasImperativeHandle) {
|
||||
guidelines.push(' - Test all exposed imperative methods')
|
||||
guidelines.push(' - Verify method behavior with different ref types')
|
||||
}
|
||||
}
|
||||
|
||||
// ===== Suspense and Lazy Loading =====
|
||||
if (analysis.hasSuspense) {
|
||||
guidelines.push('⏳ Suspense/Lazy loading detected:')
|
||||
guidelines.push(' - Test fallback UI during loading')
|
||||
guidelines.push(' - Test component behavior after lazy load completes')
|
||||
guidelines.push(' - Test error boundaries with failed loads')
|
||||
}
|
||||
|
||||
// ===== Portal =====
|
||||
if (analysis.hasPortal) {
|
||||
guidelines.push('🚪 Portal rendering detected:')
|
||||
guidelines.push(' - Test content renders in portal target')
|
||||
guidelines.push(' - Test portal cleanup on unmount')
|
||||
guidelines.push(' - Verify event bubbling through portal')
|
||||
}
|
||||
|
||||
// ===== API Calls =====
|
||||
if (analysis.hasAPI) {
|
||||
guidelines.push('🌐 API calls detected:')
|
||||
guidelines.push(' - Mock API calls/hooks (useQuery, useMutation, fetch, etc.)')
|
||||
guidelines.push(' - Test loading, success, and error states')
|
||||
guidelines.push(' - Focus on component behavior, not the data fetching lib')
|
||||
}
|
||||
|
||||
// ===== ahooks =====
|
||||
if (analysis.hasAhooks) {
|
||||
guidelines.push('🪝 ahooks detected (mock only, no need to test the lib):')
|
||||
guidelines.push(' - Mock ahooks utilities (useBoolean, useRequest, etc.)')
|
||||
guidelines.push(' - Focus on testing how your component uses the hooks')
|
||||
guidelines.push(' - Use fake timers if debounce/throttle is involved')
|
||||
}
|
||||
|
||||
// ===== Routing =====
|
||||
if (analysis.hasRouter) {
|
||||
guidelines.push('🔀 Next.js routing detected:')
|
||||
guidelines.push(' - Mock useRouter, usePathname, useSearchParams')
|
||||
guidelines.push(' - Test navigation behavior and parameters')
|
||||
guidelines.push(' - Test query string handling')
|
||||
guidelines.push(' - Verify route guards/redirects if any')
|
||||
}
|
||||
|
||||
// ===== Event Handlers =====
|
||||
if (analysis.hasEvents) {
|
||||
guidelines.push('🎯 Event handlers detected:')
|
||||
guidelines.push(' - Test all onClick, onChange, onSubmit handlers')
|
||||
guidelines.push(' - Test keyboard events (Enter, Escape, etc.)')
|
||||
guidelines.push(' - Verify event.preventDefault() calls if needed')
|
||||
guidelines.push(' - Test event bubbling/propagation')
|
||||
}
|
||||
|
||||
// ===== Domain-Specific Components =====
|
||||
if (analysis.path.includes('workflow')) {
|
||||
guidelines.push('⚙️ Workflow component:')
|
||||
guidelines.push(' - Test node configuration and validation')
|
||||
guidelines.push(' - Test data flow and variable passing')
|
||||
guidelines.push(' - Test edge connections and graph structure')
|
||||
guidelines.push(' - Verify error handling for invalid configs')
|
||||
}
|
||||
|
||||
if (analysis.path.includes('dataset')) {
|
||||
guidelines.push('📚 Dataset component:')
|
||||
guidelines.push(' - Test file upload and validation')
|
||||
guidelines.push(' - Test pagination and data loading')
|
||||
guidelines.push(' - Test search and filtering')
|
||||
guidelines.push(' - Verify data format handling')
|
||||
}
|
||||
|
||||
if (analysis.path.includes('app/configuration') || analysis.path.includes('config')) {
|
||||
guidelines.push('⚙️ Configuration component:')
|
||||
guidelines.push(' - Test form validation thoroughly')
|
||||
guidelines.push(' - Test save/reset functionality')
|
||||
guidelines.push(' - Test required vs optional fields')
|
||||
guidelines.push(' - Verify configuration persistence')
|
||||
}
|
||||
|
||||
// ===== File Size Warning =====
|
||||
if (analysis.lineCount > 500) {
|
||||
guidelines.push('📏 Large component (500+ lines):')
|
||||
guidelines.push(' - Consider splitting into smaller components')
|
||||
guidelines.push(' - Test major sections separately')
|
||||
guidelines.push(' - Use helper functions to reduce test complexity')
|
||||
}
|
||||
|
||||
return guidelines.length > 0 ? `\n${guidelines.join('\n')}\n` : ''
|
||||
}
|
||||
}
|
||||
|
||||
class TestReviewPromptBuilder {
|
||||
build({ analysis, testPath, testCode, originalPromptSection }) {
|
||||
const formattedOriginalPrompt = originalPromptSection
|
||||
? originalPromptSection
|
||||
.split('\n')
|
||||
.map(line => (line.trim().length > 0 ? ` ${line}` : ''))
|
||||
.join('\n')
|
||||
.trimEnd()
|
||||
: ' (original generation prompt unavailable)'
|
||||
|
||||
return `
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ ✅ REVIEW TEST FOR DIFY COMPONENT ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
📂 Component Path: ${analysis.path}
|
||||
🧪 Test File: ${testPath}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
📝 REVIEW TASK:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
📋 PROMPT FOR AI ASSISTANT (COPY THIS TO YOUR AI ASSISTANT):
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
You are reviewing the frontend test coverage for @${analysis.path}.
|
||||
|
||||
Original generation requirements:
|
||||
${formattedOriginalPrompt}
|
||||
|
||||
Test file under review:
|
||||
${testPath}
|
||||
|
||||
Checklist (ensure every item is addressed in your review):
|
||||
- Confirm the tests satisfy all requirements listed above and in web/testing/TESTING.md.
|
||||
- Verify Arrange → Act → Assert structure, mocks, and cleanup follow project conventions.
|
||||
- Ensure all detected component features (state, effects, routing, API, events, etc.) are exercised, including edge cases and error paths.
|
||||
- Check coverage of prop variations, null/undefined inputs, and high-priority workflows implied by usage score.
|
||||
- Validate mocks/stubs interact correctly with Next.js router, network calls, and async updates.
|
||||
- Ensure naming, describe/it structure, and placement match repository standards.
|
||||
|
||||
Output format:
|
||||
1. Start with a single word verdict: PASS or FAIL.
|
||||
2. If FAIL, list each missing requirement or defect as a separate bullet with actionable fixes.
|
||||
3. Highlight any optional improvements or refactors after mandatory issues.
|
||||
4. Mention any additional tests or tooling steps (e.g., pnpm lint/test) the developer should run.
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main Function
|
||||
// ============================================================================
|
||||
|
||||
function showHelp() {
|
||||
console.log(`
|
||||
📋 Component Analyzer - Generate test prompts for AI assistants
|
||||
|
||||
Usage:
|
||||
node analyze-component.js <component-path> [options]
|
||||
pnpm analyze-component <component-path> [options]
|
||||
|
||||
Options:
|
||||
--help Show this help message
|
||||
--json Output analysis result as JSON (for programmatic use)
|
||||
--review Generate a review prompt for existing test file
|
||||
|
||||
Examples:
|
||||
# Analyze a component and generate test prompt
|
||||
pnpm analyze-component app/components/base/button/index.tsx
|
||||
|
||||
# Output as JSON
|
||||
pnpm analyze-component app/components/base/button/index.tsx --json
|
||||
|
||||
# Review existing test
|
||||
pnpm analyze-component app/components/base/button/index.tsx --review
|
||||
|
||||
For complete testing guidelines, see: web/testing/testing.md
|
||||
`)
|
||||
}
|
||||
|
||||
function main() {
|
||||
const rawArgs = process.argv.slice(2)
|
||||
|
||||
let isReviewMode = false
|
||||
let isJsonMode = false
|
||||
const args = []
|
||||
|
||||
rawArgs.forEach((arg) => {
|
||||
if (arg === '--review') {
|
||||
isReviewMode = true
|
||||
return
|
||||
}
|
||||
if (arg === '--json') {
|
||||
isJsonMode = true
|
||||
return
|
||||
}
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
showHelp()
|
||||
process.exit(0)
|
||||
}
|
||||
args.push(arg)
|
||||
})
|
||||
|
||||
if (args.length === 0) {
|
||||
showHelp()
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
let componentPath = args[0]
|
||||
let absolutePath = path.resolve(process.cwd(), componentPath)
|
||||
|
||||
// Check if path exists
|
||||
if (!fs.existsSync(absolutePath)) {
|
||||
console.error(`❌ Error: Path not found: ${componentPath}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// If directory, try to find entry file
|
||||
if (fs.statSync(absolutePath).isDirectory()) {
|
||||
const resolvedFile = resolveDirectoryEntry(absolutePath, componentPath)
|
||||
if (resolvedFile) {
|
||||
absolutePath = resolvedFile.absolutePath
|
||||
componentPath = resolvedFile.componentPath
|
||||
}
|
||||
else {
|
||||
// List available files for user to choose
|
||||
const availableFiles = listAnalyzableFiles(absolutePath)
|
||||
console.error(`❌ Error: Directory does not contain a recognizable entry file: ${componentPath}`)
|
||||
if (availableFiles.length > 0) {
|
||||
console.error(`\n Available files to analyze:`)
|
||||
availableFiles.forEach(f => console.error(` - ${path.join(componentPath, f)}`))
|
||||
console.error(`\n Please specify the exact file path, e.g.:`)
|
||||
console.error(` pnpm analyze-component ${path.join(componentPath, availableFiles[0])}`)
|
||||
}
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Read source code
|
||||
const sourceCode = fs.readFileSync(absolutePath, 'utf-8')
|
||||
|
||||
// Analyze
|
||||
const analyzer = new ComponentAnalyzer()
|
||||
const analysis = analyzer.analyze(sourceCode, componentPath, absolutePath)
|
||||
|
||||
// Check if component is too complex - suggest refactoring instead of testing
|
||||
// Skip this check in JSON mode to always output analysis result
|
||||
if (!isReviewMode && !isJsonMode && (analysis.complexity > 75 || analysis.lineCount > 300)) {
|
||||
console.log(`
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ ⚠️ COMPONENT TOO COMPLEX TO TEST ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
📍 Component: ${analysis.name}
|
||||
📂 Path: ${analysis.path}
|
||||
|
||||
📊 Component Metrics:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Total Complexity: ${analysis.complexity}/100 ${analysis.complexity > 75 ? '🔴 TOO HIGH' : analysis.complexity > 50 ? '⚠️ WARNING' : '🟢 OK'}
|
||||
Max Func Complexity: ${analysis.maxComplexity}/100 ${analysis.maxComplexity > 75 ? '🔴 TOO HIGH' : analysis.maxComplexity > 50 ? '⚠️ WARNING' : '🟢 OK'}
|
||||
Lines: ${analysis.lineCount} ${analysis.lineCount > 300 ? '🔴 TOO LARGE' : '🟢 OK'}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
🚫 RECOMMENDATION: REFACTOR BEFORE TESTING
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
This component is too complex to test effectively. Please consider:
|
||||
|
||||
1️⃣ **Split into smaller components**
|
||||
- Extract reusable UI sections into separate components
|
||||
- Separate business logic from presentation
|
||||
- Create smaller, focused components (< 300 lines each)
|
||||
|
||||
2️⃣ **Extract custom hooks**
|
||||
- Move state management logic to custom hooks
|
||||
- Extract complex data transformation logic
|
||||
- Separate API calls into dedicated hooks
|
||||
|
||||
3️⃣ **Simplify logic**
|
||||
- Reduce nesting depth
|
||||
- Break down complex conditions
|
||||
- Extract helper functions
|
||||
|
||||
4️⃣ **After refactoring**
|
||||
- Run this tool again on each smaller component
|
||||
- Generate tests for the refactored components
|
||||
- Tests will be easier to write and maintain
|
||||
|
||||
💡 TIP: Aim for components with:
|
||||
- Cognitive Complexity < 50/100 (preferably < 25/100)
|
||||
- Line count < 300 (preferably < 200)
|
||||
- Single responsibility principle
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
`)
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
// Build prompt for AI assistant
|
||||
const builder = new TestPromptBuilder()
|
||||
const generationPrompt = builder.build(analysis)
|
||||
|
||||
let prompt = generationPrompt
|
||||
|
||||
if (isReviewMode) {
|
||||
const providedTestPath = args[1]
|
||||
const inferredTestPath = inferTestPath(componentPath)
|
||||
const testPath = providedTestPath ?? inferredTestPath
|
||||
const absoluteTestPath = path.resolve(process.cwd(), testPath)
|
||||
|
||||
if (!fs.existsSync(absoluteTestPath)) {
|
||||
console.error(`❌ Error: Test file not found: ${testPath}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const testCode = fs.readFileSync(absoluteTestPath, 'utf-8')
|
||||
const reviewBuilder = new TestReviewPromptBuilder()
|
||||
const originalPromptSection = extractCopyContent(generationPrompt)
|
||||
const normalizedTestPath = path.relative(process.cwd(), absoluteTestPath) || testPath
|
||||
|
||||
prompt = reviewBuilder.build({
|
||||
analysis,
|
||||
testPath: normalizedTestPath,
|
||||
testCode,
|
||||
originalPromptSection,
|
||||
})
|
||||
}
|
||||
|
||||
// JSON output mode
|
||||
if (isJsonMode) {
|
||||
console.log(JSON.stringify(analysis, null, 2))
|
||||
return
|
||||
}
|
||||
|
||||
// Output
|
||||
console.log(prompt)
|
||||
|
||||
try {
|
||||
const checkPbcopy = spawnSync('which', ['pbcopy'], { stdio: 'pipe' })
|
||||
if (checkPbcopy.status !== 0)
|
||||
return
|
||||
const copyContent = extractCopyContent(prompt)
|
||||
if (!copyContent)
|
||||
return
|
||||
|
||||
const result = spawnSync('pbcopy', [], {
|
||||
input: copyContent,
|
||||
encoding: 'utf-8',
|
||||
})
|
||||
|
||||
if (result.status === 0) {
|
||||
console.log('\n📋 Prompt copied to clipboard!')
|
||||
console.log(' Paste it in your AI assistant:')
|
||||
console.log(' - Cursor: Cmd+L (Chat) or Cmd+I (Composer)')
|
||||
console.log(' - GitHub Copilot Chat: Cmd+I')
|
||||
console.log(' - Or any other AI coding tool\n')
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// pbcopy failed, but don't break the script
|
||||
}
|
||||
}
|
||||
|
||||
function inferTestPath(componentPath) {
|
||||
const ext = path.extname(componentPath)
|
||||
if (!ext)
|
||||
return `${componentPath}.spec.ts`
|
||||
return componentPath.replace(ext, `.spec${ext}`)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Run
|
||||
// ============================================================================
|
||||
|
||||
main()
|
||||
406
web/scripts/analyze-i18n-diff.ts
Normal file
406
web/scripts/analyze-i18n-diff.ts
Normal file
@ -0,0 +1,406 @@
|
||||
/**
|
||||
* This script compares i18n keys between current branch (flat JSON) and main branch (nested TS).
|
||||
*
|
||||
* It checks:
|
||||
* 1. All namespaces from main branch have corresponding JSON files
|
||||
* 2. No TS files exist in current branch (all should be converted to JSON)
|
||||
* 3. All keys from main branch exist in current branch
|
||||
* 4. Values for existing keys haven't changed
|
||||
* 5. Lists newly added keys and values
|
||||
*
|
||||
* Usage: npx tsx scripts/analyze-i18n-diff.ts
|
||||
*/
|
||||
|
||||
import { execSync } from 'node:child_process'
|
||||
import * as fs from 'node:fs'
|
||||
import * as path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url)
|
||||
const __dirname = path.dirname(__filename)
|
||||
|
||||
const I18N_DIR = path.join(__dirname, '../i18n/en-US')
|
||||
const LOCALE = 'en-US'
|
||||
|
||||
type TranslationValue = string | string[]
|
||||
|
||||
type FlatTranslation = {
|
||||
[key: string]: TranslationValue
|
||||
}
|
||||
|
||||
type NestedTranslation = {
|
||||
[key: string]: string | string[] | NestedTranslation
|
||||
}
|
||||
|
||||
type AnalysisResult = {
|
||||
file: string
|
||||
missingKeys: string[]
|
||||
changedValues: { key: string, oldValue: TranslationValue, newValue: TranslationValue }[]
|
||||
newKeys: { key: string, value: TranslationValue }[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten nested object to dot-separated keys
|
||||
* Arrays are preserved as-is (not split into .0, .1, etc.)
|
||||
*/
|
||||
function flattenObject(obj: NestedTranslation, prefix = ''): FlatTranslation {
|
||||
const result: FlatTranslation = {}
|
||||
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const newKey = prefix ? `${prefix}.${key}` : key
|
||||
|
||||
if (typeof value === 'string') {
|
||||
result[newKey] = value
|
||||
}
|
||||
else if (Array.isArray(value)) {
|
||||
// Preserve arrays as-is
|
||||
result[newKey] = value as string[]
|
||||
}
|
||||
else if (typeof value === 'object' && value !== null) {
|
||||
Object.assign(result, flattenObject(value as NestedTranslation, newKey))
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two translation values (string or array)
|
||||
*/
|
||||
function valuesEqual(a: TranslationValue, b: TranslationValue): boolean {
|
||||
if (typeof a === 'string' && typeof b === 'string') {
|
||||
return a === b
|
||||
}
|
||||
if (Array.isArray(a) && Array.isArray(b)) {
|
||||
if (a.length !== b.length)
|
||||
return false
|
||||
return a.every((item, index) => item === b[index])
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Format value for display
|
||||
*/
|
||||
function formatValue(value: TranslationValue): string {
|
||||
if (Array.isArray(value)) {
|
||||
return `[${value.map(v => `"${v}"`).join(', ')}]`
|
||||
}
|
||||
return `"${value}"`
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse TS file content to extract the translation object
|
||||
*/
|
||||
function parseTsContent(content: string): NestedTranslation {
|
||||
// Remove 'const translation = ' and 'export default translation'
|
||||
let cleaned = content
|
||||
.replace(/const\s+translation\s*=\s*/, '')
|
||||
.replace(/export\s+default\s+translation\s*(?:;\s*)?$/, '')
|
||||
.trim()
|
||||
|
||||
// Remove trailing semicolon if present
|
||||
if (cleaned.endsWith(';'))
|
||||
cleaned = cleaned.slice(0, -1)
|
||||
|
||||
// Use Function constructor to safely evaluate the object literal
|
||||
// This handles JS object syntax like unquoted keys, template literals, etc.
|
||||
try {
|
||||
// eslint-disable-next-line no-new-func, sonarjs/code-eval
|
||||
const fn = new Function(`return (${cleaned})`)
|
||||
return fn() as NestedTranslation
|
||||
}
|
||||
catch (e) {
|
||||
console.error('Failed to parse TS content:', e)
|
||||
console.error('Content preview:', cleaned.slice(0, 200))
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file content from main branch
|
||||
*/
|
||||
function getMainBranchFile(filePath: string): string | null {
|
||||
try {
|
||||
const relativePath = `./i18n/${LOCALE}/${filePath}`
|
||||
// eslint-disable-next-line sonarjs/os-command
|
||||
return execSync(`git show main:${relativePath}`, {
|
||||
encoding: 'utf-8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
})
|
||||
}
|
||||
catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of translation files
|
||||
*/
|
||||
function getTranslationFiles(): string[] {
|
||||
const files = fs.readdirSync(I18N_DIR)
|
||||
return files.filter(f => f.endsWith('.json')).map(f => f.replace('.json', ''))
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of namespaces from main branch (ts files)
|
||||
*/
|
||||
function getMainBranchNamespaces(): string[] {
|
||||
try {
|
||||
const relativePath = `./i18n/${LOCALE}`
|
||||
// eslint-disable-next-line sonarjs/os-command
|
||||
const output = execSync(`git ls-tree --name-only main ${relativePath}/`, {
|
||||
encoding: 'utf-8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
})
|
||||
// eslint-disable-next-line sonarjs/os-command
|
||||
return output
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter(f => f.endsWith('.ts'))
|
||||
.map(f => path.basename(f, '.ts'))
|
||||
}
|
||||
catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
type NamespaceCheckResult = {
|
||||
mainNamespaces: string[]
|
||||
currentJsonFiles: string[]
|
||||
currentTsFiles: string[]
|
||||
missingJsonFiles: string[]
|
||||
unexpectedTsFiles: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Check namespace file consistency between main and current branch
|
||||
*/
|
||||
function checkNamespaceFiles(): NamespaceCheckResult {
|
||||
const mainNamespaces = getMainBranchNamespaces()
|
||||
const currentFiles = fs.readdirSync(I18N_DIR)
|
||||
|
||||
const currentJsonFiles = currentFiles
|
||||
.filter(f => f.endsWith('.json'))
|
||||
.map(f => f.replace('.json', ''))
|
||||
|
||||
const currentTsFiles = currentFiles
|
||||
.filter(f => f.endsWith('.ts'))
|
||||
.map(f => f.replace('.ts', ''))
|
||||
|
||||
// Check which namespaces from main are missing json files
|
||||
const missingJsonFiles = mainNamespaces.filter(ns => !currentJsonFiles.includes(ns))
|
||||
|
||||
// ts files should not exist in current branch
|
||||
const unexpectedTsFiles = currentTsFiles
|
||||
|
||||
return {
|
||||
mainNamespaces,
|
||||
currentJsonFiles,
|
||||
currentTsFiles,
|
||||
missingJsonFiles,
|
||||
unexpectedTsFiles,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze a single translation file
|
||||
*/
|
||||
function analyzeFile(baseName: string): AnalysisResult {
|
||||
const result: AnalysisResult = {
|
||||
file: baseName,
|
||||
missingKeys: [],
|
||||
changedValues: [],
|
||||
newKeys: [],
|
||||
}
|
||||
|
||||
// Read current branch JSON file
|
||||
const jsonPath = path.join(I18N_DIR, `${baseName}.json`)
|
||||
const currentContent = JSON.parse(fs.readFileSync(jsonPath, 'utf-8')) as Record<string, TranslationValue>
|
||||
|
||||
// Read main branch TS file
|
||||
const tsContent = getMainBranchFile(`${baseName}.ts`)
|
||||
if (!tsContent) {
|
||||
// New file, all keys are new
|
||||
for (const [key, value] of Object.entries(currentContent)) {
|
||||
result.newKeys.push({ key, value })
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Parse and flatten the TS content
|
||||
const nestedObj = parseTsContent(tsContent)
|
||||
const mainFlat = flattenObject(nestedObj)
|
||||
|
||||
// Check for missing keys (in main but not in current)
|
||||
for (const key of Object.keys(mainFlat)) {
|
||||
if (!(key in currentContent)) {
|
||||
result.missingKeys.push(key)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for changed values
|
||||
for (const [key, oldValue] of Object.entries(mainFlat)) {
|
||||
if (key in currentContent && !valuesEqual(currentContent[key], oldValue)) {
|
||||
result.changedValues.push({
|
||||
key,
|
||||
oldValue,
|
||||
newValue: currentContent[key],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Find new keys (in current but not in main)
|
||||
for (const [key, value] of Object.entries(currentContent)) {
|
||||
if (!(key in mainFlat)) {
|
||||
result.newKeys.push({ key, value })
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
/**
|
||||
* Main analysis function
|
||||
*/
|
||||
function main() {
|
||||
console.log('🔍 Analyzing i18n differences between current branch (flat JSON) and main branch (nested TS)...\n')
|
||||
|
||||
// Check namespace file consistency first
|
||||
console.log('📂 Checking namespace files...')
|
||||
console.log('='.repeat(60))
|
||||
const nsCheck = checkNamespaceFiles()
|
||||
|
||||
console.log(`Namespaces in main branch (ts files): ${nsCheck.mainNamespaces.length}`)
|
||||
console.log(`JSON files in current branch: ${nsCheck.currentJsonFiles.length}`)
|
||||
console.log(`TS files in current branch: ${nsCheck.currentTsFiles.length}`)
|
||||
|
||||
let hasNamespaceError = false
|
||||
|
||||
if (nsCheck.missingJsonFiles.length > 0) {
|
||||
console.log('\n❌ Missing JSON files (namespace exists in main but no corresponding JSON):')
|
||||
for (const ns of nsCheck.missingJsonFiles) {
|
||||
console.log(` - ${ns}.json (was ${ns}.ts in main)`)
|
||||
}
|
||||
hasNamespaceError = true
|
||||
}
|
||||
else {
|
||||
console.log('\n✅ All namespaces from main branch have corresponding JSON files')
|
||||
}
|
||||
|
||||
if (nsCheck.unexpectedTsFiles.length > 0) {
|
||||
console.log('\n❌ Unexpected TS files (should be deleted):')
|
||||
for (const ns of nsCheck.unexpectedTsFiles) {
|
||||
console.log(` - ${ns}.ts`)
|
||||
}
|
||||
hasNamespaceError = true
|
||||
}
|
||||
else {
|
||||
console.log('✅ No TS files in current branch (all converted to JSON)')
|
||||
}
|
||||
|
||||
console.log()
|
||||
|
||||
const files = getTranslationFiles()
|
||||
const allResults: AnalysisResult[] = []
|
||||
|
||||
let totalMissing = 0
|
||||
let totalChanged = 0
|
||||
let totalNew = 0
|
||||
|
||||
for (const file of files) {
|
||||
const result = analyzeFile(file)
|
||||
allResults.push(result)
|
||||
|
||||
totalMissing += result.missingKeys.length
|
||||
totalChanged += result.changedValues.length
|
||||
totalNew += result.newKeys.length
|
||||
}
|
||||
|
||||
// Summary
|
||||
console.log('📊 Key Analysis Summary')
|
||||
console.log('='.repeat(60))
|
||||
console.log(`Total files analyzed: ${files.length}`)
|
||||
console.log(`Missing keys (in main but not in current): ${totalMissing}`)
|
||||
console.log(`Changed values: ${totalChanged}`)
|
||||
console.log(`New keys: ${totalNew}`)
|
||||
console.log()
|
||||
|
||||
// Detailed report
|
||||
if (totalMissing > 0) {
|
||||
console.log('\n❌ MISSING KEYS (exist in main but not in current branch)')
|
||||
console.log('='.repeat(60))
|
||||
for (const result of allResults) {
|
||||
if (result.missingKeys.length > 0) {
|
||||
console.log(`\n📁 ${result.file}:`)
|
||||
for (const key of result.missingKeys) {
|
||||
console.log(` - ${key}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (totalChanged > 0) {
|
||||
console.log('\n⚠️ CHANGED VALUES (same key, different value)')
|
||||
console.log('='.repeat(60))
|
||||
for (const result of allResults) {
|
||||
if (result.changedValues.length > 0) {
|
||||
console.log(`\n📁 ${result.file}:`)
|
||||
for (const { key, oldValue, newValue } of result.changedValues) {
|
||||
console.log(` Key: ${key}`)
|
||||
console.log(` Old: ${formatValue(oldValue)}`)
|
||||
console.log(` New: ${formatValue(newValue)}`)
|
||||
console.log()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (totalNew > 0) {
|
||||
console.log('\n✨ NEW KEYS (exist in current branch but not in main)')
|
||||
console.log('='.repeat(60))
|
||||
for (const result of allResults) {
|
||||
if (result.newKeys.length > 0) {
|
||||
console.log(`\n📁 ${result.file}:`)
|
||||
for (const { key, value } of result.newKeys) {
|
||||
console.log(` + ${key}: ${formatValue(value)}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write detailed report to JSON file
|
||||
const reportPath = path.join(__dirname, '../i18n-analysis-report.json')
|
||||
fs.writeFileSync(reportPath, JSON.stringify({
|
||||
summary: {
|
||||
totalFiles: files.length,
|
||||
missingKeys: totalMissing,
|
||||
changedValues: totalChanged,
|
||||
newKeys: totalNew,
|
||||
},
|
||||
namespaceCheck: {
|
||||
mainNamespaces: nsCheck.mainNamespaces,
|
||||
currentJsonFiles: nsCheck.currentJsonFiles,
|
||||
missingJsonFiles: nsCheck.missingJsonFiles,
|
||||
unexpectedTsFiles: nsCheck.unexpectedTsFiles,
|
||||
},
|
||||
details: allResults,
|
||||
}, null, 2))
|
||||
|
||||
console.log(`\n📄 Detailed report written to: i18n-analysis-report.json`)
|
||||
|
||||
// Exit with error code if there are issues
|
||||
if (hasNamespaceError) {
|
||||
console.log('\n⚠️ Warning: Namespace file issues detected!')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (totalMissing > 0) {
|
||||
console.log('\n⚠️ Warning: Some keys are missing in the current branch!')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
console.log('\n✅ All namespace files and keys from main branch exist in current branch.')
|
||||
}
|
||||
|
||||
main()
|
||||
336
web/scripts/auto-gen-i18n.js
Normal file
336
web/scripts/auto-gen-i18n.js
Normal file
@ -0,0 +1,336 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
import { translate } from 'bing-translate-api'
|
||||
import data from '../i18n-config/languages'
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url)
|
||||
const __dirname = path.dirname(__filename)
|
||||
|
||||
const targetLanguage = 'en-US'
|
||||
const i18nFolder = '../i18n' // Path to i18n folder relative to this script
|
||||
// https://github.com/plainheart/bing-translate-api/blob/master/src/met/lang.json
|
||||
const languageKeyMap = data.languages.reduce((map, language) => {
|
||||
if (language.supported) {
|
||||
if (language.value === 'zh-Hans' || language.value === 'zh-Hant')
|
||||
map[language.value] = language.value
|
||||
else
|
||||
map[language.value] = language.value.split('-')[0]
|
||||
}
|
||||
|
||||
return map
|
||||
}, {})
|
||||
|
||||
const supportedLanguages = Object.keys(languageKeyMap)
|
||||
|
||||
function parseArgs(argv) {
|
||||
const args = {
|
||||
files: [],
|
||||
languages: [],
|
||||
isDryRun: false,
|
||||
help: false,
|
||||
errors: [],
|
||||
}
|
||||
|
||||
const collectValues = (startIndex) => {
|
||||
const values = []
|
||||
let cursor = startIndex + 1
|
||||
while (cursor < argv.length && !argv[cursor].startsWith('--')) {
|
||||
const value = argv[cursor].trim()
|
||||
if (value)
|
||||
values.push(value)
|
||||
cursor++
|
||||
}
|
||||
return { values, nextIndex: cursor - 1 }
|
||||
}
|
||||
|
||||
const validateList = (values, flag) => {
|
||||
if (!values.length) {
|
||||
args.errors.push(`${flag} requires at least one value. Example: ${flag} app billing`)
|
||||
return false
|
||||
}
|
||||
|
||||
const invalid = values.find(value => value.includes(','))
|
||||
if (invalid) {
|
||||
args.errors.push(`${flag} expects space-separated values. Example: ${flag} app billing`)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for (let index = 2; index < argv.length; index++) {
|
||||
const arg = argv[index]
|
||||
|
||||
if (arg === '--dry-run') {
|
||||
args.isDryRun = true
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
args.help = true
|
||||
break
|
||||
}
|
||||
|
||||
if (arg.startsWith('--file=')) {
|
||||
args.errors.push('--file expects space-separated values. Example: --file app billing')
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--file') {
|
||||
const { values, nextIndex } = collectValues(index)
|
||||
if (validateList(values, '--file'))
|
||||
args.files.push(...values)
|
||||
index = nextIndex
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg.startsWith('--lang=')) {
|
||||
args.errors.push('--lang expects space-separated values. Example: --lang zh-Hans ja-JP')
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--lang') {
|
||||
const { values, nextIndex } = collectValues(index)
|
||||
if (validateList(values, '--lang'))
|
||||
args.languages.push(...values)
|
||||
index = nextIndex
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
function printHelp() {
|
||||
console.log(`Usage: pnpm run i18n:gen [options]
|
||||
|
||||
Options:
|
||||
--file <name...> Process only specific files; provide space-separated names and repeat --file if needed
|
||||
--lang <locale> Process only specific locales; provide space-separated locales and repeat --lang if needed (default: all supported except en-US)
|
||||
--dry-run Preview changes without writing files
|
||||
-h, --help Show help
|
||||
|
||||
Examples:
|
||||
pnpm run i18n:gen --file app common --lang zh-Hans ja-JP
|
||||
pnpm run i18n:gen --dry-run
|
||||
`)
|
||||
}
|
||||
|
||||
function protectPlaceholders(text) {
|
||||
const placeholders = []
|
||||
let safeText = text
|
||||
const patterns = [
|
||||
/\{\{[^{}]+\}\}/g, // mustache
|
||||
/\$\{[^{}]+\}/g, // template expressions
|
||||
/<[^>]+>/g, // html-like tags
|
||||
]
|
||||
|
||||
patterns.forEach((pattern) => {
|
||||
safeText = safeText.replace(pattern, (match) => {
|
||||
const token = `__PH_${placeholders.length}__`
|
||||
placeholders.push({ token, value: match })
|
||||
return token
|
||||
})
|
||||
})
|
||||
|
||||
return {
|
||||
safeText,
|
||||
restore(translated) {
|
||||
return placeholders.reduce((result, { token, value }) => result.replace(new RegExp(token, 'g'), value), translated)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async function translateText(source, toLanguage) {
|
||||
if (typeof source !== 'string')
|
||||
return { value: source, skipped: false }
|
||||
|
||||
const trimmed = source.trim()
|
||||
if (!trimmed)
|
||||
return { value: source, skipped: false }
|
||||
|
||||
const { safeText, restore } = protectPlaceholders(source)
|
||||
|
||||
try {
|
||||
const { translation } = await translate(safeText, null, languageKeyMap[toLanguage])
|
||||
return { value: restore(translation), skipped: false }
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`❌ Error translating to ${toLanguage}:`, error.message)
|
||||
return { value: source, skipped: true, error: error.message }
|
||||
}
|
||||
}
|
||||
|
||||
async function translateMissingKeys(sourceObj, targetObject, toLanguage) {
|
||||
const skippedKeys = []
|
||||
const translatedKeys = []
|
||||
|
||||
for (const key of Object.keys(sourceObj)) {
|
||||
const sourceValue = sourceObj[key]
|
||||
const targetValue = targetObject[key]
|
||||
|
||||
// Skip if target already has this key
|
||||
if (targetValue !== undefined)
|
||||
continue
|
||||
|
||||
const translationResult = await translateText(sourceValue, toLanguage)
|
||||
targetObject[key] = translationResult.value ?? ''
|
||||
if (translationResult.skipped)
|
||||
skippedKeys.push(`${key}: ${sourceValue}`)
|
||||
else
|
||||
translatedKeys.push(key)
|
||||
}
|
||||
|
||||
return { skipped: skippedKeys, translated: translatedKeys }
|
||||
}
|
||||
async function autoGenTrans(fileName, toGenLanguage, isDryRun = false) {
|
||||
const fullKeyFilePath = path.resolve(__dirname, i18nFolder, targetLanguage, `${fileName}.json`)
|
||||
const toGenLanguageFilePath = path.resolve(__dirname, i18nFolder, toGenLanguage, `${fileName}.json`)
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(fullKeyFilePath, 'utf8')
|
||||
const fullKeyContent = JSON.parse(content)
|
||||
|
||||
if (!fullKeyContent || typeof fullKeyContent !== 'object')
|
||||
throw new Error(`Failed to extract translation object from ${fullKeyFilePath}`)
|
||||
|
||||
// if toGenLanguageFilePath does not exist, create it with empty object
|
||||
let toGenOutPut = {}
|
||||
if (fs.existsSync(toGenLanguageFilePath)) {
|
||||
const existingContent = fs.readFileSync(toGenLanguageFilePath, 'utf8')
|
||||
toGenOutPut = JSON.parse(existingContent)
|
||||
}
|
||||
|
||||
console.log(`\n🌍 Processing ${fileName} for ${toGenLanguage}...`)
|
||||
const result = await translateMissingKeys(fullKeyContent, toGenOutPut, toGenLanguage)
|
||||
|
||||
// Generate summary report
|
||||
console.log(`\n📊 Translation Summary for ${fileName} -> ${toGenLanguage}:`)
|
||||
console.log(` ✅ Translated: ${result.translated.length} keys`)
|
||||
console.log(` ⏭️ Skipped: ${result.skipped.length} keys`)
|
||||
|
||||
if (result.skipped.length > 0) {
|
||||
console.log(`\n⚠️ Skipped keys in ${fileName} (${toGenLanguage}):`)
|
||||
result.skipped.slice(0, 5).forEach(item => console.log(` - ${item}`))
|
||||
if (result.skipped.length > 5)
|
||||
console.log(` ... and ${result.skipped.length - 5} more`)
|
||||
}
|
||||
|
||||
const res = `${JSON.stringify(toGenOutPut, null, 2)}\n`
|
||||
|
||||
if (!isDryRun) {
|
||||
fs.writeFileSync(toGenLanguageFilePath, res)
|
||||
console.log(`💾 Saved translations to ${toGenLanguageFilePath}`)
|
||||
}
|
||||
else {
|
||||
console.log(`🔍 [DRY RUN] Would save translations to ${toGenLanguageFilePath}`)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error processing file ${fullKeyFilePath}:`, error.message)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// Add command line argument support
|
||||
const args = parseArgs(process.argv)
|
||||
const isDryRun = args.isDryRun
|
||||
const targetFiles = args.files
|
||||
const targetLangs = args.languages
|
||||
|
||||
// Rate limiting helper
|
||||
function delay(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (args.help) {
|
||||
printHelp()
|
||||
return
|
||||
}
|
||||
|
||||
if (args.errors.length) {
|
||||
args.errors.forEach(message => console.error(`❌ ${message}`))
|
||||
printHelp()
|
||||
process.exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
console.log('🚀 Starting i18n:gen script...')
|
||||
console.log(`📋 Mode: ${isDryRun ? 'DRY RUN (no files will be modified)' : 'LIVE MODE'}`)
|
||||
|
||||
const filesInEn = fs
|
||||
.readdirSync(path.resolve(__dirname, i18nFolder, targetLanguage))
|
||||
.filter(file => /\.json$/.test(file)) // Only process .json files
|
||||
.map(file => file.replace(/\.json$/, ''))
|
||||
|
||||
// Filter by target files if specified
|
||||
const filesToProcess = targetFiles.length > 0 ? filesInEn.filter(f => targetFiles.includes(f)) : filesInEn
|
||||
const languagesToProcess = Array.from(new Set((targetLangs.length > 0 ? targetLangs : supportedLanguages)
|
||||
.filter(lang => lang !== targetLanguage)))
|
||||
|
||||
const unknownLangs = languagesToProcess.filter(lang => !languageKeyMap[lang])
|
||||
if (unknownLangs.length) {
|
||||
console.error(`❌ Unsupported languages: ${unknownLangs.join(', ')}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (!filesToProcess.length) {
|
||||
console.log('ℹ️ No files to process based on provided arguments')
|
||||
return
|
||||
}
|
||||
|
||||
if (!languagesToProcess.length) {
|
||||
console.log('ℹ️ No languages to process (did you only specify en-US?)')
|
||||
return
|
||||
}
|
||||
|
||||
console.log(`📁 Files to process: ${filesToProcess.join(', ')}`)
|
||||
console.log(`🌍 Languages to process: ${languagesToProcess.join(', ')}`)
|
||||
|
||||
let totalTranslated = 0
|
||||
let totalSkipped = 0
|
||||
let totalErrors = 0
|
||||
|
||||
// Process files sequentially to avoid API rate limits
|
||||
for (const file of filesToProcess) {
|
||||
console.log(`\n📄 Processing file: ${file}`)
|
||||
|
||||
// Process languages with rate limiting
|
||||
for (const language of languagesToProcess) {
|
||||
try {
|
||||
const result = await autoGenTrans(file, language, isDryRun)
|
||||
totalTranslated += result.translated.length
|
||||
totalSkipped += result.skipped.length
|
||||
|
||||
// Rate limiting: wait 500ms between language processing
|
||||
await delay(500)
|
||||
}
|
||||
catch (e) {
|
||||
console.error(`❌ Error translating ${file} to ${language}:`, e.message)
|
||||
totalErrors++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final summary
|
||||
console.log('\n🎉 Auto-translation completed!')
|
||||
console.log('📊 Final Summary:')
|
||||
console.log(` ✅ Total keys translated: ${totalTranslated}`)
|
||||
console.log(` ⏭️ Total keys skipped: ${totalSkipped}`)
|
||||
console.log(` ❌ Total errors: ${totalErrors}`)
|
||||
|
||||
if (isDryRun)
|
||||
console.log('\n💡 This was a dry run. To actually translate, run without --dry-run flag.')
|
||||
|
||||
if (totalErrors > 0)
|
||||
process.exitCode = 1
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error('❌ Unexpected error:', error.message)
|
||||
process.exit(1)
|
||||
})
|
||||
334
web/scripts/check-i18n.js
Normal file
334
web/scripts/check-i18n.js
Normal file
@ -0,0 +1,334 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
import data from '../i18n-config/languages'
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url)
|
||||
const __dirname = path.dirname(__filename)
|
||||
|
||||
const targetLanguage = 'en-US'
|
||||
|
||||
const languages = data.languages.filter(language => language.supported).map(language => language.value)
|
||||
|
||||
function parseArgs(argv) {
|
||||
const args = {
|
||||
files: [],
|
||||
languages: [],
|
||||
autoRemove: false,
|
||||
help: false,
|
||||
errors: [],
|
||||
}
|
||||
|
||||
const collectValues = (startIndex) => {
|
||||
const values = []
|
||||
let cursor = startIndex + 1
|
||||
while (cursor < argv.length && !argv[cursor].startsWith('--')) {
|
||||
const value = argv[cursor].trim()
|
||||
if (value)
|
||||
values.push(value)
|
||||
cursor++
|
||||
}
|
||||
return { values, nextIndex: cursor - 1 }
|
||||
}
|
||||
|
||||
const validateList = (values, flag) => {
|
||||
if (!values.length) {
|
||||
args.errors.push(`${flag} requires at least one value. Example: ${flag} app billing`)
|
||||
return false
|
||||
}
|
||||
|
||||
const invalid = values.find(value => value.includes(','))
|
||||
if (invalid) {
|
||||
args.errors.push(`${flag} expects space-separated values. Example: ${flag} app billing`)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for (let index = 2; index < argv.length; index++) {
|
||||
const arg = argv[index]
|
||||
|
||||
if (arg === '--auto-remove') {
|
||||
args.autoRemove = true
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
args.help = true
|
||||
break
|
||||
}
|
||||
|
||||
if (arg.startsWith('--file=')) {
|
||||
args.errors.push('--file expects space-separated values. Example: --file app billing')
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--file') {
|
||||
const { values, nextIndex } = collectValues(index)
|
||||
if (validateList(values, '--file'))
|
||||
args.files.push(...values)
|
||||
index = nextIndex
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg.startsWith('--lang=')) {
|
||||
args.errors.push('--lang expects space-separated values. Example: --lang zh-Hans ja-JP')
|
||||
continue
|
||||
}
|
||||
|
||||
if (arg === '--lang') {
|
||||
const { values, nextIndex } = collectValues(index)
|
||||
if (validateList(values, '--lang'))
|
||||
args.languages.push(...values)
|
||||
index = nextIndex
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
function printHelp() {
|
||||
console.log(`Usage: pnpm run i18n:check [options]
|
||||
|
||||
Options:
|
||||
--file <name...> Check only specific files; provide space-separated names and repeat --file if needed
|
||||
--lang <locale> Check only specific locales; provide space-separated locales and repeat --lang if needed
|
||||
--auto-remove Remove extra keys automatically
|
||||
-h, --help Show help
|
||||
|
||||
Examples:
|
||||
pnpm run i18n:check --file app billing --lang zh-Hans ja-JP
|
||||
pnpm run i18n:check --auto-remove
|
||||
`)
|
||||
}
|
||||
|
||||
async function getKeysFromLanguage(language) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const folderPath = path.resolve(__dirname, '../i18n', language)
|
||||
const allKeys = []
|
||||
fs.readdir(folderPath, (err, files) => {
|
||||
if (err) {
|
||||
console.error('Error reading folder:', err)
|
||||
reject(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Filter only .json files
|
||||
const translationFiles = files.filter(file => /\.json$/.test(file))
|
||||
|
||||
translationFiles.forEach((file) => {
|
||||
const filePath = path.join(folderPath, file)
|
||||
const fileName = file.replace(/\.json$/, '') // Remove file extension
|
||||
const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) =>
|
||||
c.toUpperCase()) // Convert to camel case
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(filePath, 'utf8')
|
||||
const translationObj = JSON.parse(content)
|
||||
|
||||
if (!translationObj || typeof translationObj !== 'object') {
|
||||
console.error(`Error parsing file: ${filePath}`)
|
||||
reject(new Error(`Error parsing file: ${filePath}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Flat structure: just get all keys directly
|
||||
const fileKeys = Object.keys(translationObj).map(key => `${camelCaseFileName}.${key}`)
|
||||
allKeys.push(...fileKeys)
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error processing file ${filePath}:`, error.message)
|
||||
reject(error)
|
||||
}
|
||||
})
|
||||
resolve(allKeys)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async function removeExtraKeysFromFile(language, fileName, extraKeys) {
|
||||
const filePath = path.resolve(__dirname, '../i18n', language, `${fileName}.json`)
|
||||
|
||||
if (!fs.existsSync(filePath)) {
|
||||
console.log(`⚠️ File not found: ${filePath}`)
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
// Filter keys that belong to this file
|
||||
const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) => c.toUpperCase())
|
||||
const fileSpecificKeys = extraKeys
|
||||
.filter(key => key.startsWith(`${camelCaseFileName}.`))
|
||||
.map(key => key.substring(camelCaseFileName.length + 1)) // Remove file prefix
|
||||
|
||||
if (fileSpecificKeys.length === 0)
|
||||
return false
|
||||
|
||||
console.log(`🔄 Processing file: ${filePath}`)
|
||||
|
||||
// Read and parse JSON
|
||||
const content = fs.readFileSync(filePath, 'utf8')
|
||||
const translationObj = JSON.parse(content)
|
||||
|
||||
let modified = false
|
||||
|
||||
// Remove each extra key (flat structure - direct property deletion)
|
||||
for (const keyToRemove of fileSpecificKeys) {
|
||||
if (keyToRemove in translationObj) {
|
||||
delete translationObj[keyToRemove]
|
||||
console.log(`🗑️ Removed key: ${keyToRemove}`)
|
||||
modified = true
|
||||
}
|
||||
else {
|
||||
console.log(`⚠️ Could not find key: ${keyToRemove}`)
|
||||
}
|
||||
}
|
||||
|
||||
if (modified) {
|
||||
// Write back to file
|
||||
const newContent = `${JSON.stringify(translationObj, null, 2)}\n`
|
||||
fs.writeFileSync(filePath, newContent)
|
||||
console.log(`💾 Updated file: ${filePath}`)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`Error processing file ${filePath}:`, error.message)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Add command line argument support
|
||||
const args = parseArgs(process.argv)
|
||||
const targetFiles = Array.from(new Set(args.files))
|
||||
const targetLangs = Array.from(new Set(args.languages))
|
||||
const autoRemove = args.autoRemove
|
||||
|
||||
async function main() {
|
||||
const compareKeysCount = async () => {
|
||||
let hasDiff = false
|
||||
const allTargetKeys = await getKeysFromLanguage(targetLanguage)
|
||||
|
||||
// Filter target keys by file if specified
|
||||
const camelTargetFiles = targetFiles.map(file => file.replace(/[-_](.)/g, (_, c) => c.toUpperCase()))
|
||||
const targetKeys = targetFiles.length
|
||||
? allTargetKeys.filter(key => camelTargetFiles.some(file => key.startsWith(`${file}.`)))
|
||||
: allTargetKeys
|
||||
|
||||
// Filter languages by target language if specified
|
||||
const languagesToProcess = targetLangs.length ? targetLangs : languages
|
||||
|
||||
const allLanguagesKeys = await Promise.all(languagesToProcess.map(language => getKeysFromLanguage(language)))
|
||||
|
||||
// Filter language keys by file if specified
|
||||
const languagesKeys = targetFiles.length
|
||||
? allLanguagesKeys.map(keys => keys.filter(key => camelTargetFiles.some(file => key.startsWith(`${file}.`))))
|
||||
: allLanguagesKeys
|
||||
|
||||
const keysCount = languagesKeys.map(keys => keys.length)
|
||||
const targetKeysCount = targetKeys.length
|
||||
|
||||
const comparison = languagesToProcess.reduce((result, language, index) => {
|
||||
const languageKeysCount = keysCount[index]
|
||||
const difference = targetKeysCount - languageKeysCount
|
||||
result[language] = difference
|
||||
return result
|
||||
}, {})
|
||||
|
||||
console.log(comparison)
|
||||
|
||||
// Print missing keys and extra keys
|
||||
for (let index = 0; index < languagesToProcess.length; index++) {
|
||||
const language = languagesToProcess[index]
|
||||
const languageKeys = languagesKeys[index]
|
||||
const missingKeys = targetKeys.filter(key => !languageKeys.includes(key))
|
||||
const extraKeys = languageKeys.filter(key => !targetKeys.includes(key))
|
||||
|
||||
console.log(`Missing keys in ${language}:`, missingKeys)
|
||||
if (missingKeys.length > 0)
|
||||
hasDiff = true
|
||||
|
||||
// Show extra keys only when there are extra keys (negative difference)
|
||||
if (extraKeys.length > 0) {
|
||||
console.log(`Extra keys in ${language} (not in ${targetLanguage}):`, extraKeys)
|
||||
|
||||
// Auto-remove extra keys if flag is set
|
||||
if (autoRemove) {
|
||||
console.log(`\n🤖 Auto-removing extra keys from ${language}...`)
|
||||
|
||||
// Get all translation files
|
||||
const i18nFolder = path.resolve(__dirname, '../i18n', language)
|
||||
const files = fs.readdirSync(i18nFolder)
|
||||
.filter(file => /\.json$/.test(file))
|
||||
.map(file => file.replace(/\.json$/, ''))
|
||||
.filter(f => targetFiles.length === 0 || targetFiles.includes(f))
|
||||
|
||||
let totalRemoved = 0
|
||||
for (const fileName of files) {
|
||||
const removed = await removeExtraKeysFromFile(language, fileName, extraKeys)
|
||||
if (removed)
|
||||
totalRemoved++
|
||||
}
|
||||
|
||||
console.log(`✅ Auto-removal completed for ${language}. Modified ${totalRemoved} files.`)
|
||||
}
|
||||
else {
|
||||
hasDiff = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasDiff
|
||||
}
|
||||
|
||||
console.log('🚀 Starting i18n:check script...')
|
||||
if (targetFiles.length)
|
||||
console.log(`📁 Checking files: ${targetFiles.join(', ')}`)
|
||||
|
||||
if (targetLangs.length)
|
||||
console.log(`🌍 Checking languages: ${targetLangs.join(', ')}`)
|
||||
|
||||
if (autoRemove)
|
||||
console.log('🤖 Auto-remove mode: ENABLED')
|
||||
|
||||
const hasDiff = await compareKeysCount()
|
||||
if (hasDiff) {
|
||||
console.error('\n❌ i18n keys are not aligned. Fix issues above.')
|
||||
process.exitCode = 1
|
||||
}
|
||||
else {
|
||||
console.log('\n✅ All i18n files are in sync')
|
||||
}
|
||||
}
|
||||
|
||||
async function bootstrap() {
|
||||
if (args.help) {
|
||||
printHelp()
|
||||
return
|
||||
}
|
||||
|
||||
if (args.errors.length) {
|
||||
args.errors.forEach(message => console.error(`❌ ${message}`))
|
||||
printHelp()
|
||||
process.exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
const unknownLangs = targetLangs.filter(lang => !languages.includes(lang))
|
||||
if (unknownLangs.length) {
|
||||
console.error(`❌ Unsupported languages: ${unknownLangs.join(', ')}`)
|
||||
process.exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
await main()
|
||||
}
|
||||
|
||||
bootstrap().catch((error) => {
|
||||
console.error('❌ Unexpected error:', error.message)
|
||||
process.exit(1)
|
||||
})
|
||||
484
web/scripts/component-analyzer.js
Normal file
484
web/scripts/component-analyzer.js
Normal file
@ -0,0 +1,484 @@
|
||||
/**
|
||||
* Component Analyzer - Shared module for analyzing React component complexity
|
||||
*
|
||||
* This module is used by:
|
||||
* - analyze-component.js (for test generation)
|
||||
* - refactor-component.js (for refactoring suggestions)
|
||||
*/
|
||||
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import tsParser from '@typescript-eslint/parser'
|
||||
import { Linter } from 'eslint'
|
||||
import sonarPlugin from 'eslint-plugin-sonarjs'
|
||||
|
||||
// ============================================================================
|
||||
// Component Analyzer
|
||||
// ============================================================================
|
||||
|
||||
export class ComponentAnalyzer {
|
||||
analyze(code, filePath, absolutePath) {
|
||||
const resolvedPath = absolutePath ?? path.resolve(process.cwd(), filePath)
|
||||
const fileName = path.basename(filePath, path.extname(filePath))
|
||||
const lineCount = code.split('\n').length
|
||||
const hasReactQuery = /\buse(?:Query|Queries|InfiniteQuery|SuspenseQuery|SuspenseInfiniteQuery|Mutation)\b/.test(code)
|
||||
|
||||
// Calculate complexity metrics
|
||||
const { total: rawComplexity, max: rawMaxComplexity } = this.calculateCognitiveComplexity(code)
|
||||
const complexity = this.normalizeComplexity(rawComplexity)
|
||||
const maxComplexity = this.normalizeComplexity(rawMaxComplexity)
|
||||
|
||||
// Count usage references (may take a few seconds)
|
||||
const usageCount = this.countUsageReferences(filePath, resolvedPath)
|
||||
|
||||
// Calculate test priority
|
||||
const priority = this.calculateTestPriority(complexity, usageCount)
|
||||
|
||||
return {
|
||||
name: fileName.charAt(0).toUpperCase() + fileName.slice(1),
|
||||
path: filePath,
|
||||
type: this.detectType(filePath, code),
|
||||
hasProps: code.includes('Props') || code.includes('interface'),
|
||||
hasState: code.includes('useState') || code.includes('useReducer'),
|
||||
hasEffects: code.includes('useEffect'),
|
||||
hasCallbacks: code.includes('useCallback'),
|
||||
hasMemo: code.includes('useMemo'),
|
||||
hasEvents: /on[A-Z]\w+/.test(code),
|
||||
hasRouter: code.includes('useRouter') || code.includes('usePathname'),
|
||||
hasAPI: code.includes('service/') || code.includes('fetch(') || hasReactQuery,
|
||||
hasForwardRef: code.includes('forwardRef'),
|
||||
hasComponentMemo: /React\.memo|memo\(/.test(code),
|
||||
hasSuspense: code.includes('Suspense') || /\blazy\(/.test(code),
|
||||
hasPortal: code.includes('createPortal'),
|
||||
hasImperativeHandle: code.includes('useImperativeHandle'),
|
||||
hasReactQuery,
|
||||
hasAhooks: code.includes('from \'ahooks\''),
|
||||
complexity,
|
||||
maxComplexity,
|
||||
rawComplexity,
|
||||
rawMaxComplexity,
|
||||
lineCount,
|
||||
usageCount,
|
||||
priority,
|
||||
}
|
||||
}
|
||||
|
||||
detectType(filePath, code) {
|
||||
const normalizedPath = filePath.replace(/\\/g, '/')
|
||||
if (normalizedPath.includes('/hooks/'))
|
||||
return 'hook'
|
||||
if (normalizedPath.includes('/utils/'))
|
||||
return 'util'
|
||||
if (/\/page\.(t|j)sx?$/.test(normalizedPath))
|
||||
return 'page'
|
||||
if (/\/layout\.(t|j)sx?$/.test(normalizedPath))
|
||||
return 'layout'
|
||||
if (/\/providers?\//.test(normalizedPath))
|
||||
return 'provider'
|
||||
// Dify-specific types
|
||||
if (normalizedPath.includes('/components/base/'))
|
||||
return 'base-component'
|
||||
if (normalizedPath.includes('/context/'))
|
||||
return 'context'
|
||||
if (normalizedPath.includes('/store/'))
|
||||
return 'store'
|
||||
if (normalizedPath.includes('/service/'))
|
||||
return 'service'
|
||||
if (/use[A-Z]\w+/.test(code))
|
||||
return 'component'
|
||||
return 'component'
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate Cognitive Complexity using SonarJS ESLint plugin
|
||||
* Reference: https://www.sonarsource.com/blog/5-clean-code-tips-for-reducing-cognitive-complexity/
|
||||
*
|
||||
* Returns raw (unnormalized) complexity values:
|
||||
* - total: sum of all functions' complexity in the file
|
||||
* - max: highest single function complexity in the file
|
||||
*
|
||||
* Raw Score Thresholds (per function):
|
||||
* 0-15: Simple | 16-30: Medium | 31-50: Complex | 51+: Very Complex
|
||||
*
|
||||
* @returns {{ total: number, max: number }} raw total and max complexity
|
||||
*/
|
||||
calculateCognitiveComplexity(code) {
|
||||
const linter = new Linter()
|
||||
const baseConfig = {
|
||||
languageOptions: {
|
||||
parser: tsParser,
|
||||
parserOptions: {
|
||||
ecmaVersion: 'latest',
|
||||
sourceType: 'module',
|
||||
ecmaFeatures: { jsx: true },
|
||||
},
|
||||
},
|
||||
plugins: { sonarjs: sonarPlugin },
|
||||
}
|
||||
|
||||
try {
|
||||
// Get total complexity using 'metric' option (more stable)
|
||||
const totalConfig = {
|
||||
...baseConfig,
|
||||
rules: { 'sonarjs/cognitive-complexity': ['error', 0, 'metric'] },
|
||||
}
|
||||
const totalMessages = linter.verify(code, totalConfig)
|
||||
const totalMsg = totalMessages.find(
|
||||
msg => msg.ruleId === 'sonarjs/cognitive-complexity'
|
||||
&& msg.messageId === 'fileComplexity',
|
||||
)
|
||||
const total = totalMsg ? Number.parseInt(totalMsg.message, 10) : 0
|
||||
|
||||
// Get max function complexity by analyzing each function
|
||||
const maxConfig = {
|
||||
...baseConfig,
|
||||
rules: { 'sonarjs/cognitive-complexity': ['error', 0] },
|
||||
}
|
||||
const maxMessages = linter.verify(code, maxConfig)
|
||||
let max = 0
|
||||
const complexityPattern = /reduce its Cognitive Complexity from (\d+)/
|
||||
|
||||
maxMessages.forEach((msg) => {
|
||||
if (msg.ruleId === 'sonarjs/cognitive-complexity') {
|
||||
const match = msg.message.match(complexityPattern)
|
||||
if (match && match[1])
|
||||
max = Math.max(max, Number.parseInt(match[1], 10))
|
||||
}
|
||||
})
|
||||
|
||||
return { total, max }
|
||||
}
|
||||
catch {
|
||||
return { total: 0, max: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize cognitive complexity to 0-100 scale
|
||||
*
|
||||
* Mapping (aligned with SonarJS thresholds):
|
||||
* Raw 0-15 (Simple) -> Normalized 0-25
|
||||
* Raw 16-30 (Medium) -> Normalized 25-50
|
||||
* Raw 31-50 (Complex) -> Normalized 50-75
|
||||
* Raw 51+ (Very Complex) -> Normalized 75-100 (asymptotic)
|
||||
*/
|
||||
normalizeComplexity(rawComplexity) {
|
||||
if (rawComplexity <= 15) {
|
||||
// Linear: 0-15 -> 0-25
|
||||
return Math.round((rawComplexity / 15) * 25)
|
||||
}
|
||||
else if (rawComplexity <= 30) {
|
||||
// Linear: 16-30 -> 25-50
|
||||
return Math.round(25 + ((rawComplexity - 15) / 15) * 25)
|
||||
}
|
||||
else if (rawComplexity <= 50) {
|
||||
// Linear: 31-50 -> 50-75
|
||||
return Math.round(50 + ((rawComplexity - 30) / 20) * 25)
|
||||
}
|
||||
else {
|
||||
// Asymptotic: 51+ -> 75-100
|
||||
// Formula ensures score approaches but never exceeds 100
|
||||
return Math.round(75 + 25 * (1 - 1 / (1 + (rawComplexity - 50) / 100)))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Count how many times a component is referenced in the codebase
|
||||
* Scans TypeScript sources for import statements referencing the component
|
||||
*/
|
||||
countUsageReferences(filePath, absolutePath) {
|
||||
try {
|
||||
const resolvedComponentPath = absolutePath ?? path.resolve(process.cwd(), filePath)
|
||||
const fileName = path.basename(resolvedComponentPath, path.extname(resolvedComponentPath))
|
||||
|
||||
let searchName = fileName
|
||||
if (fileName === 'index') {
|
||||
const parentDir = path.dirname(resolvedComponentPath)
|
||||
searchName = path.basename(parentDir)
|
||||
}
|
||||
|
||||
if (!searchName)
|
||||
return 0
|
||||
|
||||
const searchRoots = this.collectSearchRoots(resolvedComponentPath)
|
||||
if (searchRoots.length === 0)
|
||||
return 0
|
||||
|
||||
const escapedName = ComponentAnalyzer.escapeRegExp(searchName)
|
||||
const patterns = [
|
||||
new RegExp(`from\\s+['\"][^'\"]*(?:/|^)${escapedName}(?:['\"/]|$)`),
|
||||
new RegExp(`import\\s*\\(\\s*['\"][^'\"]*(?:/|^)${escapedName}(?:['\"/]|$)`),
|
||||
new RegExp(`export\\s+(?:\\*|{[^}]*})\\s*from\\s+['\"][^'\"]*(?:/|^)${escapedName}(?:['\"/]|$)`),
|
||||
new RegExp(`require\\(\\s*['\"][^'\"]*(?:/|^)${escapedName}(?:['\"/]|$)`),
|
||||
]
|
||||
|
||||
const visited = new Set()
|
||||
let usageCount = 0
|
||||
|
||||
const stack = [...searchRoots]
|
||||
while (stack.length > 0) {
|
||||
const currentDir = stack.pop()
|
||||
if (!currentDir || visited.has(currentDir))
|
||||
continue
|
||||
visited.add(currentDir)
|
||||
|
||||
const entries = fs.readdirSync(currentDir, { withFileTypes: true })
|
||||
|
||||
entries.forEach((entry) => {
|
||||
const entryPath = path.join(currentDir, entry.name)
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
if (this.shouldSkipDir(entry.name))
|
||||
return
|
||||
stack.push(entryPath)
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.shouldInspectFile(entry.name))
|
||||
return
|
||||
|
||||
const normalizedEntryPath = path.resolve(entryPath)
|
||||
if (normalizedEntryPath === path.resolve(resolvedComponentPath))
|
||||
return
|
||||
|
||||
const source = fs.readFileSync(entryPath, 'utf-8')
|
||||
if (!source.includes(searchName))
|
||||
return
|
||||
|
||||
if (patterns.some((pattern) => {
|
||||
pattern.lastIndex = 0
|
||||
return pattern.test(source)
|
||||
})) {
|
||||
usageCount += 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return usageCount
|
||||
}
|
||||
catch {
|
||||
// If command fails, return 0
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
collectSearchRoots(resolvedComponentPath) {
|
||||
const roots = new Set()
|
||||
|
||||
let currentDir = path.dirname(resolvedComponentPath)
|
||||
const workspaceRoot = process.cwd()
|
||||
|
||||
while (currentDir && currentDir !== path.dirname(currentDir)) {
|
||||
if (path.basename(currentDir) === 'app') {
|
||||
roots.add(currentDir)
|
||||
break
|
||||
}
|
||||
|
||||
if (currentDir === workspaceRoot)
|
||||
break
|
||||
currentDir = path.dirname(currentDir)
|
||||
}
|
||||
|
||||
const fallbackRoots = [
|
||||
path.join(workspaceRoot, 'app'),
|
||||
path.join(workspaceRoot, 'web', 'app'),
|
||||
path.join(workspaceRoot, 'src'),
|
||||
]
|
||||
|
||||
fallbackRoots.forEach((root) => {
|
||||
if (fs.existsSync(root) && fs.statSync(root).isDirectory())
|
||||
roots.add(root)
|
||||
})
|
||||
|
||||
return Array.from(roots)
|
||||
}
|
||||
|
||||
shouldSkipDir(dirName) {
|
||||
const normalized = dirName.toLowerCase()
|
||||
return [
|
||||
'node_modules',
|
||||
'.git',
|
||||
'.next',
|
||||
'dist',
|
||||
'out',
|
||||
'coverage',
|
||||
'build',
|
||||
'__tests__',
|
||||
'__mocks__',
|
||||
].includes(normalized)
|
||||
}
|
||||
|
||||
shouldInspectFile(fileName) {
|
||||
const normalized = fileName.toLowerCase()
|
||||
if (!(/\.(ts|tsx)$/i.test(fileName)))
|
||||
return false
|
||||
if (normalized.endsWith('.d.ts'))
|
||||
return false
|
||||
if (/\.(spec|test)\.(ts|tsx)$/.test(normalized))
|
||||
return false
|
||||
if (normalized.endsWith('.stories.tsx'))
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
static escapeRegExp(value) {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate test priority based on cognitive complexity and usage
|
||||
*
|
||||
* Priority Score = 0.7 * Complexity + 0.3 * Usage Score (all normalized to 0-100)
|
||||
* - Complexity Score: 0-100 (normalized from SonarJS)
|
||||
* - Usage Score: 0-100 (based on reference count)
|
||||
*
|
||||
* Priority Levels (0-100):
|
||||
* - 0-25: 🟢 LOW
|
||||
* - 26-50: 🟡 MEDIUM
|
||||
* - 51-75: 🟠 HIGH
|
||||
* - 76-100: 🔴 CRITICAL
|
||||
*/
|
||||
calculateTestPriority(complexity, usageCount) {
|
||||
const complexityScore = complexity
|
||||
|
||||
// Normalize usage score to 0-100
|
||||
let usageScore
|
||||
if (usageCount === 0)
|
||||
usageScore = 0
|
||||
else if (usageCount <= 5)
|
||||
usageScore = 20
|
||||
else if (usageCount <= 20)
|
||||
usageScore = 40
|
||||
else if (usageCount <= 50)
|
||||
usageScore = 70
|
||||
else
|
||||
usageScore = 100
|
||||
|
||||
// Weighted average: complexity (70%) + usage (30%)
|
||||
const totalScore = Math.round(0.7 * complexityScore + 0.3 * usageScore)
|
||||
|
||||
return {
|
||||
score: totalScore,
|
||||
level: this.getPriorityLevel(totalScore),
|
||||
usageScore,
|
||||
complexityScore,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get priority level based on score (0-100 scale)
|
||||
*/
|
||||
getPriorityLevel(score) {
|
||||
if (score > 75)
|
||||
return '🔴 CRITICAL'
|
||||
if (score > 50)
|
||||
return '🟠 HIGH'
|
||||
if (score > 25)
|
||||
return '🟡 MEDIUM'
|
||||
return '🟢 LOW'
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helper Functions
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Resolve directory to entry file
|
||||
* Priority: index files > common entry files (node.tsx, panel.tsx, etc.)
|
||||
*/
|
||||
export function resolveDirectoryEntry(absolutePath, componentPath) {
|
||||
// Entry files in priority order: index files first, then common entry files
|
||||
const entryFiles = [
|
||||
'index.tsx',
|
||||
'index.ts', // Priority 1: index files
|
||||
'node.tsx',
|
||||
'panel.tsx',
|
||||
'component.tsx',
|
||||
'main.tsx',
|
||||
'container.tsx', // Priority 2: common entry files
|
||||
]
|
||||
for (const entryFile of entryFiles) {
|
||||
const entryPath = path.join(absolutePath, entryFile)
|
||||
if (fs.existsSync(entryPath)) {
|
||||
return {
|
||||
absolutePath: entryPath,
|
||||
componentPath: path.join(componentPath, entryFile),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* List analyzable files in directory (for user guidance)
|
||||
*/
|
||||
export function listAnalyzableFiles(dirPath) {
|
||||
try {
|
||||
const entries = fs.readdirSync(dirPath, { withFileTypes: true })
|
||||
return entries
|
||||
.filter(entry => !entry.isDirectory() && /\.(tsx?|jsx?)$/.test(entry.name) && !entry.name.endsWith('.d.ts'))
|
||||
.map(entry => entry.name)
|
||||
.sort((a, b) => {
|
||||
// Prioritize common entry files
|
||||
const priority = ['index.tsx', 'index.ts', 'node.tsx', 'panel.tsx', 'component.tsx', 'main.tsx', 'container.tsx']
|
||||
const aIdx = priority.indexOf(a)
|
||||
const bIdx = priority.indexOf(b)
|
||||
if (aIdx !== -1 && bIdx !== -1)
|
||||
return aIdx - bIdx
|
||||
if (aIdx !== -1)
|
||||
return -1
|
||||
if (bIdx !== -1)
|
||||
return 1
|
||||
return a.localeCompare(b)
|
||||
})
|
||||
}
|
||||
catch {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract copy content from prompt (for clipboard)
|
||||
*/
|
||||
export function extractCopyContent(prompt) {
|
||||
const marker = '📋 PROMPT FOR AI ASSISTANT (COPY THIS TO YOUR AI ASSISTANT):'
|
||||
const markerIndex = prompt.indexOf(marker)
|
||||
if (markerIndex === -1)
|
||||
return ''
|
||||
|
||||
const section = prompt.slice(markerIndex)
|
||||
const lines = section.split('\n')
|
||||
const firstDivider = lines.findIndex(line => line.includes('━━━━━━━━'))
|
||||
if (firstDivider === -1)
|
||||
return ''
|
||||
|
||||
const startIdx = firstDivider + 1
|
||||
let endIdx = lines.length
|
||||
|
||||
for (let i = startIdx; i < lines.length; i++) {
|
||||
if (lines[i].includes('━━━━━━━━')) {
|
||||
endIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (startIdx >= endIdx)
|
||||
return ''
|
||||
|
||||
return lines.slice(startIdx, endIdx).join('\n').trim()
|
||||
}
|
||||
|
||||
/**
|
||||
* Get complexity level label
|
||||
*/
|
||||
export function getComplexityLevel(score) {
|
||||
if (score <= 25)
|
||||
return '🟢 Simple'
|
||||
if (score <= 50)
|
||||
return '🟡 Medium'
|
||||
if (score <= 75)
|
||||
return '🟠 Complex'
|
||||
return '🔴 Very Complex'
|
||||
}
|
||||
@ -4,8 +4,8 @@
|
||||
* It is intended to be used as a replacement for `next start`.
|
||||
*/
|
||||
|
||||
import { cp, mkdir, stat } from 'node:fs/promises'
|
||||
import { spawn } from 'node:child_process'
|
||||
import { cp, mkdir, stat } from 'node:fs/promises'
|
||||
import path from 'node:path'
|
||||
|
||||
// Configuration for directories to copy
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
const sharp = require('sharp');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
import path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
import sharp from 'sharp'
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url)
|
||||
const __dirname = path.dirname(__filename)
|
||||
|
||||
const sizes = [
|
||||
{ size: 192, name: 'icon-192x192.png' },
|
||||
@ -12,40 +15,41 @@ const sizes = [
|
||||
{ size: 128, name: 'icon-128x128.png' },
|
||||
{ size: 144, name: 'icon-144x144.png' },
|
||||
{ size: 152, name: 'icon-152x152.png' },
|
||||
];
|
||||
]
|
||||
|
||||
const inputPath = path.join(__dirname, '../public/icon.svg');
|
||||
const outputDir = path.join(__dirname, '../public');
|
||||
const inputPath = path.join(__dirname, '../public/icon.svg')
|
||||
const outputDir = path.join(__dirname, '../public')
|
||||
|
||||
// Generate icons
|
||||
async function generateIcons() {
|
||||
try {
|
||||
console.log('Generating PWA icons...');
|
||||
|
||||
console.log('Generating PWA icons...')
|
||||
|
||||
for (const { size, name } of sizes) {
|
||||
const outputPath = path.join(outputDir, name);
|
||||
|
||||
const outputPath = path.join(outputDir, name)
|
||||
|
||||
await sharp(inputPath)
|
||||
.resize(size, size)
|
||||
.png()
|
||||
.toFile(outputPath);
|
||||
|
||||
console.log(`✓ Generated ${name} (${size}x${size})`);
|
||||
.toFile(outputPath)
|
||||
|
||||
console.log(`✓ Generated ${name} (${size}x${size})`)
|
||||
}
|
||||
|
||||
|
||||
// Generate apple-touch-icon
|
||||
await sharp(inputPath)
|
||||
.resize(180, 180)
|
||||
.png()
|
||||
.toFile(path.join(outputDir, 'apple-touch-icon.png'));
|
||||
|
||||
console.log('✓ Generated apple-touch-icon.png (180x180)');
|
||||
|
||||
console.log('\n✅ All icons generated successfully!');
|
||||
} catch (error) {
|
||||
console.error('Error generating icons:', error);
|
||||
process.exit(1);
|
||||
.toFile(path.join(outputDir, 'apple-touch-icon.png'))
|
||||
|
||||
console.log('✓ Generated apple-touch-icon.png (180x180)')
|
||||
|
||||
console.log('\n✅ All icons generated successfully!')
|
||||
}
|
||||
catch (error) {
|
||||
console.error('Error generating icons:', error)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
generateIcons();
|
||||
generateIcons()
|
||||
|
||||
@ -3,17 +3,21 @@
|
||||
* Removes unnecessary files like jest-worker that are bundled with Next.js
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
|
||||
console.log('🔧 Optimizing standalone output...');
|
||||
const __filename = fileURLToPath(import.meta.url)
|
||||
const __dirname = path.dirname(__filename)
|
||||
|
||||
const standaloneDir = path.join(__dirname, '..', '.next', 'standalone');
|
||||
console.log('🔧 Optimizing standalone output...')
|
||||
|
||||
const standaloneDir = path.join(__dirname, '..', '.next', 'standalone')
|
||||
|
||||
// Check if standalone directory exists
|
||||
if (!fs.existsSync(standaloneDir)) {
|
||||
console.error('❌ Standalone directory not found. Please run "next build" first.');
|
||||
process.exit(1);
|
||||
console.error('❌ Standalone directory not found. Please run "next build" first.')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// List of paths to remove (relative to standalone directory)
|
||||
@ -24,126 +28,136 @@ const pathsToRemove = [
|
||||
'node_modules/.pnpm/terser-webpack-plugin@*/node_modules/jest-worker',
|
||||
// Remove actual jest-worker packages (directories only, not symlinks)
|
||||
'node_modules/.pnpm/jest-worker@*',
|
||||
];
|
||||
]
|
||||
|
||||
// Function to safely remove a path
|
||||
function removePath(basePath, relativePath) {
|
||||
const fullPath = path.join(basePath, relativePath);
|
||||
const fullPath = path.join(basePath, relativePath)
|
||||
|
||||
// Handle wildcard patterns
|
||||
if (relativePath.includes('*')) {
|
||||
const parts = relativePath.split('/');
|
||||
let currentPath = basePath;
|
||||
const parts = relativePath.split('/')
|
||||
let currentPath = basePath
|
||||
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const part = parts[i];
|
||||
const part = parts[i]
|
||||
if (part.includes('*')) {
|
||||
// Find matching directories
|
||||
if (fs.existsSync(currentPath)) {
|
||||
const entries = fs.readdirSync(currentPath);
|
||||
const entries = fs.readdirSync(currentPath)
|
||||
|
||||
// replace '*' with '.*'
|
||||
const regexPattern = part.replace(/\*/g, '.*');
|
||||
const regexPattern = part.replace(/\*/g, '.*')
|
||||
|
||||
const regex = new RegExp(`^${regexPattern}$`);
|
||||
const regex = new RegExp(`^${regexPattern}$`)
|
||||
|
||||
for (const entry of entries) {
|
||||
if (regex.test(entry)) {
|
||||
const remainingPath = parts.slice(i + 1).join('/');
|
||||
const matchedPath = path.join(currentPath, entry, remainingPath);
|
||||
const remainingPath = parts.slice(i + 1).join('/')
|
||||
const matchedPath = path.join(currentPath, entry, remainingPath)
|
||||
|
||||
try {
|
||||
// Use lstatSync to check if path exists (works for both files and symlinks)
|
||||
const stats = fs.lstatSync(matchedPath);
|
||||
const stats = fs.lstatSync(matchedPath)
|
||||
|
||||
if (stats.isSymbolicLink()) {
|
||||
// Remove symlink
|
||||
fs.unlinkSync(matchedPath);
|
||||
console.log(`✅ Removed symlink: ${path.relative(basePath, matchedPath)}`);
|
||||
} else {
|
||||
// Remove directory/file
|
||||
fs.rmSync(matchedPath, { recursive: true, force: true });
|
||||
console.log(`✅ Removed: ${path.relative(basePath, matchedPath)}`);
|
||||
fs.unlinkSync(matchedPath)
|
||||
console.log(`✅ Removed symlink: ${path.relative(basePath, matchedPath)}`)
|
||||
}
|
||||
} catch (error) {
|
||||
else {
|
||||
// Remove directory/file
|
||||
fs.rmSync(matchedPath, { recursive: true, force: true })
|
||||
console.log(`✅ Removed: ${path.relative(basePath, matchedPath)}`)
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
// Silently ignore ENOENT (path not found) errors
|
||||
if (error.code !== 'ENOENT') {
|
||||
console.error(`❌ Failed to remove ${matchedPath}: ${error.message}`);
|
||||
console.error(`❌ Failed to remove ${matchedPath}: ${error.message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
currentPath = path.join(currentPath, part);
|
||||
return
|
||||
}
|
||||
else {
|
||||
currentPath = path.join(currentPath, part)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
// Direct path removal
|
||||
if (fs.existsSync(fullPath)) {
|
||||
try {
|
||||
fs.rmSync(fullPath, { recursive: true, force: true });
|
||||
console.log(`✅ Removed: ${relativePath}`);
|
||||
} catch (error) {
|
||||
console.error(`❌ Failed to remove ${fullPath}: ${error.message}`);
|
||||
fs.rmSync(fullPath, { recursive: true, force: true })
|
||||
console.log(`✅ Removed: ${relativePath}`)
|
||||
}
|
||||
catch (error) {
|
||||
console.error(`❌ Failed to remove ${fullPath}: ${error.message}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove unnecessary paths
|
||||
console.log('🗑️ Removing unnecessary files...');
|
||||
console.log('🗑️ Removing unnecessary files...')
|
||||
for (const pathToRemove of pathsToRemove) {
|
||||
removePath(standaloneDir, pathToRemove);
|
||||
removePath(standaloneDir, pathToRemove)
|
||||
}
|
||||
|
||||
// Calculate size reduction
|
||||
console.log('\n📊 Optimization complete!');
|
||||
console.log('\n📊 Optimization complete!')
|
||||
|
||||
// Optional: Display the size of remaining jest-related files (if any)
|
||||
const checkForJest = (dir) => {
|
||||
const jestFiles = [];
|
||||
const jestFiles = []
|
||||
|
||||
function walk(currentPath) {
|
||||
if (!fs.existsSync(currentPath)) return;
|
||||
if (!fs.existsSync(currentPath))
|
||||
return
|
||||
|
||||
try {
|
||||
const entries = fs.readdirSync(currentPath);
|
||||
const entries = fs.readdirSync(currentPath)
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(currentPath, entry);
|
||||
const fullPath = path.join(currentPath, entry)
|
||||
|
||||
try {
|
||||
const stat = fs.lstatSync(fullPath); // Use lstatSync to handle symlinks
|
||||
const stat = fs.lstatSync(fullPath) // Use lstatSync to handle symlinks
|
||||
|
||||
if (stat.isDirectory() && !stat.isSymbolicLink()) {
|
||||
// Skip node_modules subdirectories to avoid deep traversal
|
||||
if (entry === 'node_modules' && currentPath !== standaloneDir) {
|
||||
continue;
|
||||
continue
|
||||
}
|
||||
walk(fullPath);
|
||||
} else if (stat.isFile() && entry.includes('jest')) {
|
||||
jestFiles.push(path.relative(standaloneDir, fullPath));
|
||||
walk(fullPath)
|
||||
}
|
||||
} catch (err) {
|
||||
else if (stat.isFile() && entry.includes('jest')) {
|
||||
jestFiles.push(path.relative(standaloneDir, fullPath))
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
// Skip files that can't be accessed
|
||||
continue;
|
||||
continue
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
}
|
||||
catch (err) {
|
||||
// Skip directories that can't be read
|
||||
return;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
walk(dir);
|
||||
return jestFiles;
|
||||
};
|
||||
|
||||
const remainingJestFiles = checkForJest(standaloneDir);
|
||||
if (remainingJestFiles.length > 0) {
|
||||
console.log('\n⚠️ Warning: Some jest-related files still remain:');
|
||||
remainingJestFiles.forEach(file => console.log(` - ${file}`));
|
||||
} else {
|
||||
console.log('\n✨ No jest-related files found in standalone output!');
|
||||
walk(dir)
|
||||
return jestFiles
|
||||
}
|
||||
|
||||
const remainingJestFiles = checkForJest(standaloneDir)
|
||||
if (remainingJestFiles.length > 0) {
|
||||
console.log('\n⚠️ Warning: Some jest-related files still remain:')
|
||||
remainingJestFiles.forEach(file => console.log(` - ${file}`))
|
||||
}
|
||||
else {
|
||||
console.log('\n✨ No jest-related files found in standalone output!')
|
||||
}
|
||||
|
||||
415
web/scripts/refactor-component.js
Normal file
415
web/scripts/refactor-component.js
Normal file
@ -0,0 +1,415 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { spawnSync } from 'node:child_process'
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import {
|
||||
ComponentAnalyzer,
|
||||
extractCopyContent,
|
||||
getComplexityLevel,
|
||||
listAnalyzableFiles,
|
||||
resolveDirectoryEntry,
|
||||
} from './component-analyzer.js'
|
||||
|
||||
// ============================================================================
|
||||
// Extended Analyzer for Refactoring
|
||||
// ============================================================================
|
||||
|
||||
class RefactorAnalyzer extends ComponentAnalyzer {
|
||||
analyze(code, filePath, absolutePath) {
|
||||
// Get base analysis from parent class
|
||||
const baseAnalysis = super.analyze(code, filePath, absolutePath)
|
||||
|
||||
// Add refactoring-specific metrics
|
||||
// Note: These counts use regex matching which may include import statements.
|
||||
// For most components this results in +1 over actual usage, which is acceptable
|
||||
// for heuristic analysis. For precise AST-based counting, consider using
|
||||
// @typescript-eslint/parser to traverse the AST.
|
||||
const stateCount = (code.match(/useState\s*[(<]/g) || []).length
|
||||
const effectCount = (code.match(/useEffect\s*\(/g) || []).length
|
||||
const callbackCount = (code.match(/useCallback\s*\(/g) || []).length
|
||||
const memoCount = (code.match(/useMemo\s*\(/g) || []).length
|
||||
const conditionalBlocks = this.countConditionalBlocks(code)
|
||||
const nestedTernaries = this.countNestedTernaries(code)
|
||||
const hasContext = code.includes('useContext') || code.includes('createContext')
|
||||
const hasReducer = code.includes('useReducer')
|
||||
const hasModals = this.countModals(code)
|
||||
|
||||
return {
|
||||
...baseAnalysis,
|
||||
stateCount,
|
||||
effectCount,
|
||||
callbackCount,
|
||||
memoCount,
|
||||
conditionalBlocks,
|
||||
nestedTernaries,
|
||||
hasContext,
|
||||
hasReducer,
|
||||
hasModals,
|
||||
}
|
||||
}
|
||||
|
||||
countModals(code) {
|
||||
const modalPatterns = [
|
||||
/Modal/g,
|
||||
/Dialog/g,
|
||||
/Drawer/g,
|
||||
/Confirm/g,
|
||||
/showModal|setShowModal|isShown|isShowing/g,
|
||||
]
|
||||
let count = 0
|
||||
modalPatterns.forEach((pattern) => {
|
||||
const matches = code.match(pattern)
|
||||
if (matches)
|
||||
count += matches.length
|
||||
})
|
||||
return Math.floor(count / 3) // Rough estimate of actual modals
|
||||
}
|
||||
|
||||
countConditionalBlocks(code) {
|
||||
const ifBlocks = (code.match(/\bif\s*\(/g) || []).length
|
||||
const ternaries = (code.match(/\?.*:/g) || []).length
|
||||
const switchCases = (code.match(/\bswitch\s*\(/g) || []).length
|
||||
return ifBlocks + ternaries + switchCases
|
||||
}
|
||||
|
||||
countNestedTernaries(code) {
|
||||
const nestedInTrueBranch = (code.match(/\?[^:?]*\?[^:]*:/g) || []).length
|
||||
const nestedInFalseBranch = (code.match(/\?[^:?]*:[^?]*\?[^:]*:/g) || []).length
|
||||
|
||||
return nestedInTrueBranch + nestedInFalseBranch
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Refactor Prompt Builder
|
||||
// ============================================================================
|
||||
|
||||
class RefactorPromptBuilder {
|
||||
build(analysis) {
|
||||
const refactorActions = this.identifyRefactorActions(analysis)
|
||||
|
||||
return `
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ 🔧 REFACTOR DIFY COMPONENT ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
📍 Component: ${analysis.name}
|
||||
📂 Path: ${analysis.path}
|
||||
|
||||
📊 Complexity Analysis:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Total Complexity: ${analysis.complexity}/100 ${getComplexityLevel(analysis.complexity)}
|
||||
Max Func Complexity: ${analysis.maxComplexity}/100 ${getComplexityLevel(analysis.maxComplexity)}
|
||||
Lines: ${analysis.lineCount} ${analysis.lineCount > 300 ? '⚠️ TOO LARGE' : ''}
|
||||
Usage: ${analysis.usageCount} reference${analysis.usageCount !== 1 ? 's' : ''}
|
||||
|
||||
📈 Code Metrics:
|
||||
useState calls: ${analysis.stateCount}
|
||||
useEffect calls: ${analysis.effectCount}
|
||||
useCallback calls: ${analysis.callbackCount}
|
||||
useMemo calls: ${analysis.memoCount}
|
||||
Conditional blocks: ${analysis.conditionalBlocks}
|
||||
Nested ternaries: ${analysis.nestedTernaries}
|
||||
Modal components: ${analysis.hasModals}
|
||||
|
||||
🔍 Features Detected:
|
||||
${analysis.hasState ? '✓' : '✗'} Local state (useState/useReducer)
|
||||
${analysis.hasEffects ? '✓' : '✗'} Side effects (useEffect)
|
||||
${analysis.hasCallbacks ? '✓' : '✗'} Callbacks (useCallback)
|
||||
${analysis.hasMemo ? '✓' : '✗'} Memoization (useMemo)
|
||||
${analysis.hasContext ? '✓' : '✗'} Context (useContext/createContext)
|
||||
${analysis.hasEvents ? '✓' : '✗'} Event handlers
|
||||
${analysis.hasRouter ? '✓' : '✗'} Next.js routing
|
||||
${analysis.hasAPI ? '✓' : '✗'} API calls
|
||||
${analysis.hasReactQuery ? '✓' : '✗'} React Query
|
||||
${analysis.hasAhooks ? '✓' : '✗'} ahooks
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
🎯 RECOMMENDED REFACTORING ACTIONS:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
${refactorActions.map((action, i) => `${i + 1}. ${action}`).join('\n')}
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
📋 PROMPT FOR AI ASSISTANT (COPY THIS TO YOUR AI ASSISTANT):
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Please refactor the component at @${analysis.path}
|
||||
|
||||
Component metrics:
|
||||
- Complexity: ${analysis.complexity}/100 (target: < 50)
|
||||
- Lines: ${analysis.lineCount} (target: < 300)
|
||||
- useState: ${analysis.stateCount}, useEffect: ${analysis.effectCount}
|
||||
|
||||
Refactoring tasks:
|
||||
${refactorActions.map(action => `- ${action}`).join('\n')}
|
||||
|
||||
Requirements:
|
||||
${this.buildRequirements(analysis)}
|
||||
|
||||
Follow Dify project conventions:
|
||||
- Place extracted hooks in \`hooks/\` subdirectory or as \`use-<feature>.ts\`
|
||||
- Use React Query (\`@tanstack/react-query\`) for data fetching
|
||||
- Follow existing patterns in \`web/service/use-*.ts\` for API hooks
|
||||
- Keep each new file under 300 lines
|
||||
- Maintain TypeScript strict typing
|
||||
|
||||
After refactoring, verify:
|
||||
- \`pnpm lint:fix\` passes
|
||||
- \`pnpm type-check:tsgo\` passes
|
||||
- Re-run \`pnpm refactor-component ${analysis.path}\` to confirm complexity < 50
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
`
|
||||
}
|
||||
|
||||
identifyRefactorActions(analysis) {
|
||||
const actions = []
|
||||
|
||||
// Priority 1: Extract hooks for complex state management
|
||||
if (analysis.stateCount >= 3 || (analysis.stateCount >= 2 && analysis.effectCount >= 2)) {
|
||||
actions.push(`🪝 EXTRACT CUSTOM HOOK: ${analysis.stateCount} useState + ${analysis.effectCount} useEffect detected. Extract related state and effects into a custom hook (e.g., \`use${analysis.name}State.ts\`)`)
|
||||
}
|
||||
|
||||
// Priority 2: Extract API/data logic
|
||||
if (analysis.hasAPI)
|
||||
actions.push('🌐 EXTRACT DATA HOOK: Move API calls and data fetching logic into a dedicated hook using React Query')
|
||||
|
||||
// Priority 3: Split large components
|
||||
if (analysis.lineCount > 300) {
|
||||
actions.push(`📦 SPLIT COMPONENT: ${analysis.lineCount} lines exceeds limit. Extract UI sections into sub-components`)
|
||||
}
|
||||
|
||||
// Priority 4: Extract modal management
|
||||
if (analysis.hasModals >= 2) {
|
||||
actions.push(`🔲 EXTRACT MODAL MANAGEMENT: ${analysis.hasModals} modal-related patterns detected. Create a useModalState hook or separate modal components`)
|
||||
}
|
||||
|
||||
// Priority 5: Simplify conditionals
|
||||
if (analysis.conditionalBlocks > 10 || analysis.nestedTernaries >= 2) {
|
||||
actions.push('🔀 SIMPLIFY CONDITIONALS: Use lookup tables, early returns, or extract complex conditions into named functions')
|
||||
}
|
||||
|
||||
// Priority 6: Extract callbacks
|
||||
if (analysis.callbackCount >= 4) {
|
||||
actions.push(`⚡ CONSOLIDATE CALLBACKS: ${analysis.callbackCount} useCallback calls. Consider extracting related callbacks into a custom hook`)
|
||||
}
|
||||
|
||||
// Priority 7: Context provider extraction
|
||||
if (analysis.hasContext && analysis.complexity > 50) {
|
||||
actions.push('🎯 EXTRACT CONTEXT LOGIC: Move context provider logic into separate files or split into domain-specific contexts')
|
||||
}
|
||||
|
||||
// Priority 8: Memoization review
|
||||
if (analysis.memoCount >= 3 && analysis.complexity > 50) {
|
||||
actions.push(`📝 REVIEW MEMOIZATION: ${analysis.memoCount} useMemo calls. Extract complex computations into utility functions or hooks`)
|
||||
}
|
||||
|
||||
// If no specific issues, provide general guidance
|
||||
if (actions.length === 0) {
|
||||
if (analysis.complexity > 50) {
|
||||
actions.push('🔍 ANALYZE FUNCTIONS: Review individual functions for complexity and extract helper functions')
|
||||
}
|
||||
else {
|
||||
actions.push('✅ Component complexity is acceptable. Consider minor improvements for maintainability')
|
||||
}
|
||||
}
|
||||
|
||||
return actions
|
||||
}
|
||||
|
||||
buildRequirements(analysis) {
|
||||
const requirements = []
|
||||
|
||||
if (analysis.stateCount >= 3) {
|
||||
requirements.push('- Group related useState calls into a single custom hook')
|
||||
requirements.push('- Move associated useEffect calls with the state they depend on')
|
||||
}
|
||||
|
||||
if (analysis.hasAPI) {
|
||||
requirements.push('- Create data fetching hook following web/service/use-*.ts patterns')
|
||||
requirements.push('- Use useQuery with proper queryKey and enabled options')
|
||||
requirements.push('- Export invalidation hook (useInvalidXxx) for cache management')
|
||||
}
|
||||
|
||||
if (analysis.lineCount > 300) {
|
||||
requirements.push('- Extract logical UI sections into separate components')
|
||||
requirements.push('- Keep parent component focused on orchestration')
|
||||
requirements.push('- Pass minimal props to child components')
|
||||
}
|
||||
|
||||
if (analysis.hasModals >= 2) {
|
||||
requirements.push('- Create unified modal state management')
|
||||
requirements.push('- Consider extracting modals to separate file')
|
||||
}
|
||||
|
||||
if (analysis.conditionalBlocks > 10) {
|
||||
requirements.push('- Replace switch statements with lookup tables')
|
||||
requirements.push('- Use early returns to reduce nesting')
|
||||
requirements.push('- Extract complex boolean logic to named functions')
|
||||
}
|
||||
|
||||
if (requirements.length === 0) {
|
||||
requirements.push('- Maintain existing code structure')
|
||||
requirements.push('- Focus on readability improvements')
|
||||
}
|
||||
|
||||
return requirements.join('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main Function
|
||||
// ============================================================================
|
||||
|
||||
function showHelp() {
|
||||
console.log(`
|
||||
🔧 Component Refactor Tool - Generate refactoring prompts for AI assistants
|
||||
|
||||
Usage:
|
||||
node refactor-component.js <component-path> [options]
|
||||
pnpm refactor-component <component-path> [options]
|
||||
|
||||
Options:
|
||||
--help Show this help message
|
||||
--json Output analysis result as JSON (for programmatic use)
|
||||
|
||||
Examples:
|
||||
# Analyze and generate refactoring prompt
|
||||
pnpm refactor-component app/components/app/configuration/index.tsx
|
||||
|
||||
# Output as JSON
|
||||
pnpm refactor-component app/components/tools/mcp/modal.tsx --json
|
||||
|
||||
Complexity Thresholds:
|
||||
🟢 0-25: Simple (no refactoring needed)
|
||||
🟡 26-50: Medium (consider minor refactoring)
|
||||
🟠 51-75: Complex (should refactor)
|
||||
🔴 76-100: Very Complex (must refactor)
|
||||
|
||||
For complete refactoring guidelines, see:
|
||||
.claude/skills/component-refactoring/SKILL.md
|
||||
`)
|
||||
}
|
||||
|
||||
function main() {
|
||||
const rawArgs = process.argv.slice(2)
|
||||
|
||||
let isJsonMode = false
|
||||
const args = []
|
||||
|
||||
rawArgs.forEach((arg) => {
|
||||
if (arg === '--json') {
|
||||
isJsonMode = true
|
||||
return
|
||||
}
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
showHelp()
|
||||
process.exit(0)
|
||||
}
|
||||
args.push(arg)
|
||||
})
|
||||
|
||||
if (args.length === 0) {
|
||||
showHelp()
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
let componentPath = args[0]
|
||||
let absolutePath = path.resolve(process.cwd(), componentPath)
|
||||
|
||||
if (!fs.existsSync(absolutePath)) {
|
||||
console.error(`❌ Error: Path not found: ${componentPath}`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
if (fs.statSync(absolutePath).isDirectory()) {
|
||||
const resolvedFile = resolveDirectoryEntry(absolutePath, componentPath)
|
||||
if (resolvedFile) {
|
||||
absolutePath = resolvedFile.absolutePath
|
||||
componentPath = resolvedFile.componentPath
|
||||
}
|
||||
else {
|
||||
const availableFiles = listAnalyzableFiles(absolutePath)
|
||||
console.error(`❌ Error: Directory does not contain a recognizable entry file: ${componentPath}`)
|
||||
if (availableFiles.length > 0) {
|
||||
console.error(`\n Available files to analyze:`)
|
||||
availableFiles.forEach(f => console.error(` - ${path.join(componentPath, f)}`))
|
||||
console.error(`\n Please specify the exact file path, e.g.:`)
|
||||
console.error(` pnpm refactor-component ${path.join(componentPath, availableFiles[0])}`)
|
||||
}
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const sourceCode = fs.readFileSync(absolutePath, 'utf-8')
|
||||
|
||||
const analyzer = new RefactorAnalyzer()
|
||||
const analysis = analyzer.analyze(sourceCode, componentPath, absolutePath)
|
||||
|
||||
// JSON output mode
|
||||
if (isJsonMode) {
|
||||
console.log(JSON.stringify(analysis, null, 2))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if refactoring is needed
|
||||
if (analysis.complexity <= 25 && analysis.lineCount <= 200) {
|
||||
console.log(`
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ ✅ COMPONENT IS WELL-STRUCTURED ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
📍 Component: ${analysis.name}
|
||||
📂 Path: ${analysis.path}
|
||||
|
||||
📊 Metrics:
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Complexity: ${analysis.complexity}/100 🟢 Simple
|
||||
Lines: ${analysis.lineCount} ✓ Within limits
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
This component has good structure. No immediate refactoring needed.
|
||||
You can proceed with testing using: pnpm analyze-component ${componentPath}
|
||||
`)
|
||||
return
|
||||
}
|
||||
|
||||
// Build refactoring prompt
|
||||
const builder = new RefactorPromptBuilder()
|
||||
const prompt = builder.build(analysis)
|
||||
|
||||
console.log(prompt)
|
||||
|
||||
// Copy to clipboard (macOS)
|
||||
try {
|
||||
const checkPbcopy = spawnSync('which', ['pbcopy'], { stdio: 'pipe' })
|
||||
if (checkPbcopy.status !== 0)
|
||||
return
|
||||
const copyContent = extractCopyContent(prompt)
|
||||
if (!copyContent)
|
||||
return
|
||||
|
||||
const result = spawnSync('pbcopy', [], {
|
||||
input: copyContent,
|
||||
encoding: 'utf-8',
|
||||
})
|
||||
|
||||
if (result.status === 0) {
|
||||
console.log('\n📋 Refactoring prompt copied to clipboard!')
|
||||
console.log(' Paste it in your AI assistant:')
|
||||
console.log(' - Cursor: Cmd+L (Chat) or Cmd+I (Composer)')
|
||||
console.log(' - GitHub Copilot Chat: Cmd+I')
|
||||
console.log(' - Or any other AI coding tool\n')
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// pbcopy failed, but don't break the script
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Run
|
||||
// ============================================================================
|
||||
|
||||
main()
|
||||
Reference in New Issue
Block a user