Merge branch 'main' into fix/typo-in-js-async-prompt

This commit is contained in:
mohammed ahmed 2026-02-05 13:52:11 +02:00 committed by GitHub
commit 1a7e9bdfc0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 339 additions and 52 deletions

View file

@ -13,21 +13,33 @@ on:
types: [submitted]
jobs:
# Automatic PR review (read-only)
# Automatic PR review (can fix linting issues and push)
pr-review:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
permissions:
contents: read
contents: write
pull-requests: write
issues: read
id-token: write
actions: read
defaults:
run:
working-directory: django/aiservice
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
- name: Install dependencies
run: |
uv venv --seed
uv sync
- name: Run Claude Code
id: claude
@ -40,6 +52,25 @@ jobs:
PR NUMBER: ${{ github.event.pull_request.number }}
EVENT: ${{ github.event.action }}
IMPORTANT: This repo has Python code in `django/aiservice/`. Run uv/prek commands from that directory.
## STEP 1: Run pre-commit checks and fix issues
First, run `cd django/aiservice && uv run prek run --from-ref origin/main` to check for linting/formatting issues on files changed in this PR.
If there are any issues:
- For SAFE auto-fixable issues (formatting, import sorting, trailing whitespace, etc.), run the command again to auto-fix them
- Stage the fixed files with `git add`
- Commit with message "style: auto-fix linting issues"
- Push the changes with `git push`
Do NOT attempt to fix:
- Type errors that require logic changes
- Complex refactoring suggestions
- Anything that could change behavior
## STEP 2: Review the PR
${{ github.event.action == 'synchronize' && 'This is a RE-REVIEW after new commits. First, get the list of changed files in this latest push using `gh pr diff`. Review ONLY the changed files. Check ALL existing review comments and resolve ones that are now fixed.' || 'This is the INITIAL REVIEW.' }}
Review this PR focusing ONLY on:
@ -56,7 +87,42 @@ jobs:
- Use CLAUDE.md for project-specific guidance.
- Use `gh pr comment` for summary-level feedback.
- Use `mcp__github_inline_comment__create_inline_comment` sparingly for critical code issues only.
claude_args: '--allowedTools "mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*),Bash(gh pr list:*),Bash(gh issue view:*),Bash(gh issue list:*),Read,Glob,Grep"'
## STEP 3: Coverage analysis
Analyze test coverage for changed files:
1. Get the list of Python files changed in this PR (excluding tests):
`git diff --name-only origin/main...HEAD -- '*.py' | grep -v test`
2. Run tests with coverage on the PR branch (from django/aiservice):
`cd django/aiservice && uv run coverage run -m pytest -q --tb=no`
`cd django/aiservice && uv run coverage json -o coverage-pr.json`
3. Get coverage for changed files only:
`cd django/aiservice && uv run coverage report --include="<changed_files_comma_separated>"`
4. Compare with main branch coverage:
- Checkout main: `git checkout origin/main`
- Run coverage: `cd django/aiservice && uv run coverage run -m pytest -q --tb=no && uv run coverage json -o coverage-main.json`
- Checkout back: `git checkout -`
5. Analyze the diff to identify:
- NEW FILES: Files that don't exist on main (require good test coverage)
- MODIFIED FILES: Files with changes (changes must be covered by tests)
6. Report in PR comment with a markdown table:
- Coverage % for each changed file (PR vs main)
- Overall coverage change
- For NEW files: Flag if coverage is below 75%
- For MODIFIED files: Flag if the changed lines are not covered by tests
- Flag if overall coverage decreased
Coverage requirements:
- New implementations/files: Must have ≥75% test coverage
- Modified code: Changed lines should be exercised by existing or new tests
- No coverage regressions: Overall coverage should not decrease
claude_args: '--allowedTools "mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*),Bash(gh pr list:*),Bash(gh issue view:*),Bash(gh issue list:*),Bash(gh api:*),Bash(cd django/aiservice*),Bash(uv run prek *),Bash(uv run coverage *),Bash(uv run pytest *),Bash(git status*),Bash(git add *),Bash(git commit *),Bash(git push*),Bash(git diff *),Bash(git checkout *),Read,Glob,Grep"'
additional_permissions: |
actions: read
env:
@ -77,9 +143,13 @@ jobs:
issues: read
id-token: write
actions: read
defaults:
run:
working-directory: django/aiservice
steps:
- name: Get PR head ref
id: pr-ref
working-directory: .
env:
GH_TOKEN: ${{ github.token }}
run: |
@ -97,12 +167,20 @@ jobs:
fetch-depth: 0
ref: ${{ steps.pr-ref.outputs.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
- name: Install dependencies
run: |
uv venv --seed
uv sync
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@v1
with:
use_foundry: "true"
claude_args: '--allowedTools "Read,Edit,Write,Glob,Grep,Bash(git status*),Bash(git diff*),Bash(git add *),Bash(git commit *),Bash(git push*),Bash(git log*),Bash(uv run prek *),Bash(prek *),Bash(gh pr comment*),Bash(gh pr view*)"'
claude_args: '--allowedTools "Read,Edit,Write,Glob,Grep,Bash(git status*),Bash(git diff*),Bash(git add *),Bash(git commit *),Bash(git push*),Bash(git log*),Bash(cd django/aiservice*),Bash(uv run prek *),Bash(prek *),Bash(uv run ruff *),Bash(uv run pytest *),Bash(uv run mypy *),Bash(uv run coverage *),Bash(gh pr comment*),Bash(gh pr view*),Bash(gh pr diff*)"'
additional_permissions: |
actions: read
env:

View file

@ -1,4 +1,4 @@
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests for **asynchronous** code using Jest. When asked to reply only with code, you write all of your code in a single markdown code block.
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests for **asynchronous** code. When asked to reply only with code, you write all of your code in a single markdown code block.
**Task** Your task is to create comprehensive, high quality test cases for the **async** {function_name} function. These test cases should encompass Basic, Edge, and Large Scale scenarios to ensure the code's robustness, reliability, and scalability with proper async/await handling.
@ -20,7 +20,7 @@
**Instructions**:
- Implement a comprehensive set of test cases following the guidelines above.
- Use Jest testing framework with `describe`, `test`, and `expect`.
- Use the testing framework specified in the user message with `describe`, `test`/`it`, and `expect`.
- **ALL test functions must be async**: `test('...', async () => {{ ... }})`
- **ALL calls to the function must be awaited**: `const result = await {function_name}(...)`
- Ensure each test case is well-documented with comments explaining the scenario it covers.
@ -31,15 +31,25 @@
- **CRITICAL: TEST REJECTION CASES** - Use `expect(...).rejects.toThrow()` for testing async errors.
**CRITICAL: IMPORT PATH RULES**:
- **NEVER add file extensions (.js, .ts, .tsx) to import paths** - Jest/TypeScript resolves extensions automatically.
- **NEVER add file extensions (.js, .ts, .tsx) to import paths** - The test framework resolves extensions automatically.
- **WRONG**: `import {{ fn }} from '../utils.js'` or `import {{ fn }} from '../utils.ts'`
- **CORRECT**: `import {{ fn }} from '../utils'`
- The user message provides the exact import statement to use - copy it exactly without modification.
**CRITICAL: MOCKING RULES FOR JEST**:
- **jest.mock() calls are HOISTED** to the top of the file by Jest's transformer. This means they execute BEFORE any other code, including variable declarations.
- **NEVER use dynamic expressions in jest.mock()** - Do NOT use variables, `path.join()`, `require.resolve()`, or any computed values in jest.mock() paths. These will fail because the variables are not yet defined when the hoisted mock executes.
- **ALWAYS use static string literals** for mock paths: `jest.mock('../analytics')`
**CRITICAL: VITEST IMPORTS REQUIRED**:
- If test_framework is "vitest", you MUST import test functions from 'vitest' since globals are NOT enabled by default:
```javascript
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
```
- For "jest", globals are typically enabled so no import is needed.
**CRITICAL: MOCKING RULES**:
- **USE THE CORRECT MOCK SYNTAX FOR THE SPECIFIED FRAMEWORK**:
- For **Jest**: Use `jest.mock()` and `jest.fn()`
- For **Vitest**: Use `vi.mock()` and `vi.fn()` - NEVER use jest.mock with Vitest!
- Mock calls are HOISTED to the top of the file. NEVER use dynamic expressions - use static string literals only.
- **ALWAYS use static string literals** for mock paths.
- **IMPORTANT**: Check the test framework specified in the user message and use the matching syntax.
**Output Format Requirements**:

View file

@ -1,4 +1,6 @@
Using the {test_framework} testing framework, write a test suite for the following **ASYNC** JavaScript function.
Using the **{test_framework}** testing framework, write a test suite for the following **ASYNC** JavaScript function.
**IMPORTANT**: You MUST use {test_framework} syntax. If {test_framework} is "vitest", use `vi.mock()` and `vi.fn()`. If {test_framework} is "jest", use `jest.mock()` and `jest.fn()`. Do NOT mix frameworks.
**CRITICAL: This function is ASYNCHRONOUS**
- All test functions MUST be async: `test('...', async () => {{ ... }})`
@ -15,9 +17,18 @@ Using the {test_framework} testing framework, write a test suite for the followi
{import_statement}
```
**CRITICAL: VITEST IMPORTS REQUIRED**
If test_framework is "vitest", you MUST import test functions from 'vitest' since globals are NOT enabled:
```javascript
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
```
For "jest", globals are typically enabled so no import is needed.
**Template to Follow:**
```javascript
// imports
// vitest imports (REQUIRED for vitest - globals are NOT enabled by default)
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
// function import
{import_statement}
// unit tests

View file

@ -1,4 +1,4 @@
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests using Jest. When asked to reply only with code, you write all of your code in a single markdown code block.
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests. When asked to reply only with code, you write all of your code in a single markdown code block.
**Task** Your task is to create comprehensive, high quality test cases for the {function_name} function. These test cases should encompass Basic, Edge, and Large Scale scenarios to ensure the code's robustness, reliability, and scalability. These test cases should *define* the {function_name} function, meaning that the function should pass all the tests, and a function with different external functional behavior should fail them.
@ -13,7 +13,7 @@
**Instructions**:
- Implement a comprehensive set of test cases following the guidelines above.
- Use Jest testing framework with `describe`, `test`, and `expect`.
- Use the testing framework specified in the user message with `describe`, `test`/`it`, and `expect`.
- Ensure each test case is well-documented with comments explaining the scenario it covers.
- Pay special attention to edge cases as they often reveal hidden bugs.
- For large-scale tests, focus on the function's efficiency and performance under heavy loads. Avoid loops exceeding 1000 iterations, and keep data structures under 1000 elements.
@ -22,15 +22,25 @@
- **CRITICAL: HANDLE ASYNC PROPERLY** - If the function is async, use `async/await` in your tests and ensure all promises are properly awaited.
**CRITICAL: IMPORT PATH RULES**:
- **NEVER add file extensions (.js, .ts, .tsx) to import paths** - Jest/TypeScript resolves extensions automatically.
- **NEVER add file extensions (.js, .ts, .tsx) to import paths** - The test framework resolves extensions automatically.
- **WRONG**: `import {{fn}} from '../utils.js'` or `import {{fn}} from '../utils.ts'`
- **CORRECT**: `import {{fn}} from '../utils'`
- The user message provides the exact import statement to use - copy it exactly without modification.
**CRITICAL: MOCKING RULES FOR JEST**:
- **jest.mock() calls are HOISTED** to the top of the file by Jest's transformer. This means they execute BEFORE any other code, including variable declarations.
- **NEVER use dynamic expressions in jest.mock()** - Do NOT use variables, `path.join()`, `require.resolve()`, or any computed values in jest.mock() paths. These will fail because the variables are not yet defined when the hoisted mock executes.
- **ALWAYS use static string literals** for mock paths: `jest.mock('../analytics')`
**CRITICAL: VITEST IMPORTS REQUIRED**:
- If test_framework is "vitest", you MUST import test functions from 'vitest' since globals are NOT enabled by default:
```javascript
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
```
- For "jest", globals are typically enabled so no import is needed.
**CRITICAL: MOCKING RULES**:
- **USE THE CORRECT MOCK SYNTAX FOR THE SPECIFIED FRAMEWORK**:
- For **Jest**: Use `jest.mock()` and `jest.fn()`
- For **Vitest**: Use `vi.mock()` and `vi.fn()` - NEVER use jest.mock with Vitest!
- Mock calls are HOISTED to the top of the file. NEVER use dynamic expressions - use static string literals only.
- **ALWAYS use static string literals** for mock paths.
- **IMPORTANT**: Check the test framework specified in the user message and use the matching syntax.
**Output Format Requirements**:

View file

@ -1,4 +1,6 @@
Using the {test_framework} testing framework, write a test suite for the following JavaScript function.
Using the **{test_framework}** testing framework, write a test suite for the following JavaScript function.
**IMPORTANT**: You MUST use {test_framework} syntax. If {test_framework} is "vitest", use `vi.mock()` and `vi.fn()`. If {test_framework} is "jest", use `jest.mock()` and `jest.fn()`. Do NOT mix frameworks.
**Function to Test:**
```javascript
@ -10,9 +12,18 @@ Using the {test_framework} testing framework, write a test suite for the followi
{import_statement}
```
**CRITICAL: VITEST IMPORTS REQUIRED**
If test_framework is "vitest", you MUST import test functions from 'vitest' since globals are NOT enabled:
```javascript
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
```
For "jest", globals are typically enabled so no import is needed.
**Template to Follow:**
```javascript
// imports
// vitest imports (REQUIRED for vitest - globals are NOT enabled by default)
import {{ describe, test, expect, vi, beforeEach, afterEach }} from 'vitest';
// function import
{import_statement}
// unit tests

View file

@ -124,9 +124,7 @@ def _is_valid_js_identifier(name: str) -> bool:
# Patterns to strip file extensions from import paths
# LLMs sometimes add .js extensions to TypeScript imports, which breaks module resolution
_JS_EXTENSION_PATTERN = re.compile(
r"""(from\s+['"])(\.{0,2}/[^'"]+?)(\.(?:js|ts|tsx|jsx|mjs|mts))(['"])"""
)
_JS_EXTENSION_PATTERN = re.compile(r"""(from\s+['"])(\.{0,2}/[^'"]+?)(\.(?:js|ts|tsx|jsx|mjs|mts))(['"])""")
_REQUIRE_EXTENSION_PATTERN = re.compile(
r"""(require\s*\(\s*['"])(\.{0,2}/[^'"]+?)(\.(?:js|ts|tsx|jsx|mjs|mts))(['"]\s*\))"""
)
@ -259,14 +257,15 @@ def build_javascript_prompt(
return messages, posthog_event_suffix
def parse_and_validate_js_output(response_content: str) -> str:
"""Parse and validate the LLM response for JavaScript code.
def parse_and_validate_js_output(response_content: str, language: str = "javascript") -> str:
"""Parse and validate the LLM response for JavaScript/TypeScript code.
Args:
response_content: Raw LLM response
language: Language to validate against ("javascript" or "typescript")
Returns:
Validated JavaScript code
Validated JavaScript/TypeScript code
Raises:
ValueError: If no valid code block found
@ -284,10 +283,16 @@ def parse_and_validate_js_output(response_content: str) -> str:
code = pattern_res.group(1).strip()
# Validate syntax
is_valid, error = validate_javascript_syntax(code)
# Validate syntax using the appropriate validator based on language
if language == "typescript":
is_valid, error = validate_typescript_syntax(code)
lang_name = "TypeScript"
else:
is_valid, error = validate_javascript_syntax(code)
lang_name = "JavaScript"
if not is_valid:
raise SyntaxError(f"Invalid JavaScript code: {error}")
raise SyntaxError(f"Invalid {lang_name} syntax: {error}")
# Check for test functions
if not _has_test_functions(code):
@ -311,8 +316,9 @@ async def generate_and_validate_js_test_code(
posthog_event_suffix: str,
trace_id: str = "",
call_sequence: int | None = None,
language: str = "javascript",
) -> str:
"""Generate and validate JavaScript test code using LLM.
"""Generate and validate JavaScript/TypeScript test code using LLM.
Args:
messages: Prompt messages
@ -322,9 +328,10 @@ async def generate_and_validate_js_test_code(
posthog_event_suffix: Suffix for PostHog events
trace_id: Trace ID for logging
call_sequence: Call sequence number
language: Language to validate against ("javascript" or "typescript")
Returns:
Validated JavaScript test code
Validated JavaScript/TypeScript test code
Raises:
SyntaxError: If code is invalid
@ -357,8 +364,8 @@ async def generate_and_validate_js_test_code(
properties={"model": model.name, "usage": response.raw_response.usage.model_dump_json()},
)
# Parse and validate
validated_code = parse_and_validate_js_output(response.content)
# Parse and validate using the appropriate language validator
validated_code = parse_and_validate_js_output(response.content, language=language)
return validated_code
@ -415,6 +422,7 @@ async def generate_javascript_tests_from_function(
posthog_event_suffix=posthog_event_suffix,
trace_id=trace_id,
call_sequence=call_sequence,
language=language,
)
total_llm_cost = sum(cost_tracker)
@ -442,8 +450,8 @@ def validate_javascript_testgen_request_data(data: TestGenSchema) -> None:
HttpError: If validation fails
"""
if data.test_framework not in ["jest"]:
raise HttpError(400, "Invalid test framework for JavaScript/TypeScript. We only support jest.")
if data.test_framework not in ["jest", "vitest", "mocha"]:
raise HttpError(400, "Invalid test framework for JavaScript/TypeScript. Supported: jest, vitest, mocha.")
if not data.function_to_optimize:
raise HttpError(400, "Invalid function to optimize. It is empty.")
if not validate_trace_id(data.trace_id):

View file

@ -8,7 +8,7 @@ from pydantic import ValidationError
from aiservice.common.cst_utils import parse_module_to_cst
from aiservice.common.markdown_utils import wrap_code_in_markdown
from aiservice.validators.javascript_validator import validate_javascript_syntax
from aiservice.validators.javascript_validator import validate_javascript_syntax, validate_typescript_syntax
from optimizer.context_utils.context_helpers import (
group_code,
is_markdown_structure_changed,
@ -92,7 +92,10 @@ class BaseRefinerContext:
return False
if self.data.language in ("javascript", "typescript"):
valid, _ = validate_javascript_syntax(stripped_code)
if self.data.language == "typescript":
valid, _ = validate_typescript_syntax(stripped_code)
else:
valid, _ = validate_javascript_syntax(stripped_code)
return bool(valid)
try:
@ -154,9 +157,14 @@ class SingleRefinerContext(BaseRefinerContext):
def validate_code_syntax(self, code: str) -> None:
"""Validate code syntax based on language."""
if self.data.language in ("javascript", "typescript"):
valid, _ = validate_javascript_syntax(code)
if self.data.language == "typescript":
valid, _ = validate_typescript_syntax(code)
lang_name = "TypeScript"
else:
valid, _ = validate_javascript_syntax(code)
lang_name = "JavaScript"
if not valid:
raise ValueError("Invalid JavaScript syntax")
raise ValueError(f"Invalid {lang_name} syntax")
return
# Python validation using libcst
@ -195,11 +203,16 @@ class MultiRefinerContext(BaseRefinerContext):
def validate_code_syntax(self, code: str) -> None:
"""Validate code syntax based on language."""
# For JavaScript/TypeScript, skip Python-specific validation
# For JavaScript/TypeScript, use the appropriate validator
if self.data.language in ("javascript", "typescript"):
valid, _ = validate_javascript_syntax(code)
if self.data.language == "typescript":
valid, _ = validate_typescript_syntax(code)
lang_name = "TypeScript"
else:
valid, _ = validate_javascript_syntax(code)
lang_name = "JavaScript"
if not valid:
raise ValueError("Invalid JavaScript syntax")
raise ValueError(f"Invalid {lang_name} syntax")
return
# Python validation using libcst

View file

@ -204,8 +204,41 @@ function add(a: number, b: number): number {
}
"""
is_valid, error = validate_typescript_syntax(code)
# TypeScript uses the same validator as JavaScript
assert isinstance(is_valid, bool)
assert is_valid is True
assert error is None
def test_typescript_type_assertion_valid_in_ts(self) -> None:
"""Test that TypeScript type assertions are valid in TypeScript."""
code = "const value = 4.9 as unknown as number;"
is_valid, error = validate_typescript_syntax(code)
assert is_valid is True
assert error is None
def test_typescript_type_assertion_invalid_in_js(self) -> None:
"""Test that TypeScript type assertions are INVALID in JavaScript.
This is a critical test - TypeScript-specific syntax like 'as unknown as number'
should fail when validated as JavaScript. This bug caused production issues
where generated TypeScript tests were incorrectly validated with JS parser.
"""
code = "const value = 4.9 as unknown as number;"
is_valid, error = validate_javascript_syntax(code)
assert is_valid is False
assert error is not None
def test_typescript_generic_valid_in_ts(self) -> None:
"""Test that TypeScript generics are valid in TypeScript."""
code = "function identity<T>(arg: T): T { return arg; }"
is_valid, error = validate_typescript_syntax(code)
assert is_valid is True
assert error is None
def test_typescript_generic_invalid_in_js(self) -> None:
"""Test that TypeScript generics are INVALID in JavaScript."""
code = "function identity<T>(arg: T): T { return arg; }"
is_valid, error = validate_javascript_syntax(code)
assert is_valid is False
assert error is not None
def test_typescript_interface(self) -> None:
"""Test that TypeScript interfaces pass validation (if Node available)."""

View file

@ -40,6 +40,7 @@ import {
Star,
} from "lucide-react"
import { ReviewQualityBadge } from "../ui/quality_badge"
import { getMonacoLanguage } from "@/lib/utils"
interface DiffContent {
newContent: string
@ -1126,7 +1127,7 @@ const MonacoDiffEditorGithub: React.FC<MonacoDiffEditorGithubProps> = ({
<Editor
key={testEditorKey}
value={editorContent}
language="python"
language={getMonacoLanguage(selectedFile)}
theme={isDarkMode ? "vs-dark" : "light"}
onMount={handleTestEditorDidMount}
options={{
@ -1185,7 +1186,7 @@ const MonacoDiffEditorGithub: React.FC<MonacoDiffEditorGithubProps> = ({
<Editor
key={editorKey}
value={editorContent}
language="python"
language={getMonacoLanguage(selectedFile)}
theme={isDarkMode ? "vs-dark" : "light"}
onChange={handleEditorChange}
onMount={handleEditorDidMount}
@ -1215,7 +1216,7 @@ const MonacoDiffEditorGithub: React.FC<MonacoDiffEditorGithubProps> = ({
key={diffEditorKey}
original={originalContent}
modified={editorContent}
language="python"
language={getMonacoLanguage(selectedFile)}
theme={isDarkMode ? "vs-dark" : "light"}
onMount={handleDiffEditorDidMount}
keepCurrentOriginalModel={true}

View file

@ -23,6 +23,7 @@ import {
// Ensure you have lucide-react installed as per your package.json
import { Loader2, FileText, AlertTriangle } from "lucide-react"
import type { ExperimentMetadata, DiffContents } from "@/lib/types" // Adjust path if needed
import { getMonacoLanguage } from "@/lib/utils"
import ReactMarkdown from "react-markdown"
import remarkGfm from "remark-gfm"
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"
@ -612,7 +613,7 @@ const MonacoDiffViewer: React.FC<MonacoDiffViewerProps> = ({
height="100%"
original={currentDiff.oldContent || ""}
modified={currentEdit[activeFileKey] || currentDiff.newContent || ""}
language="python"
language={activeFileKey ? getMonacoLanguage(activeFileKey) : "python"}
theme="codeflash-python-dark"
onMount={handleEditorOnMount}
options={{
@ -659,7 +660,7 @@ const MonacoDiffViewer: React.FC<MonacoDiffViewerProps> = ({
height="100%"
original={currentDiff.oldContent || ""}
modified={currentDiff.newContent || ""}
language="python"
language={activeFileKey ? getMonacoLanguage(activeFileKey) : "python"}
theme="codeflash-python-dark"
onMount={handleEditorOnMount}
options={{

View file

@ -61,6 +61,99 @@ export function parseLineProfilerResults(rawResults: string): LineProfilerReport
return report
}
// Detect format: JS format starts with "Line Profile Results:" or has space-separated columns
const isJsFormat =
rawResults.includes("Line Profile Results:") ||
(rawResults.includes("Line") && rawResults.includes("Hits") && rawResults.includes("Content") && !rawResults.includes("|"))
if (isJsFormat) {
return parseJsLineProfilerResults(rawResults)
}
return parsePythonLineProfilerResults(rawResults)
}
/**
* Parse JS/TS line profiler format:
* Line Profile Results:
* File: /path/to/file.js
* --------------------------------------------------------------------------------
* Line Hits Time (ms) % Time Content
* --------------------------------------------------------------------------------
* 12 3442092 397.958 100.0% if (n <= 1) {
*/
function parseJsLineProfilerResults(rawResults: string): LineProfilerReport {
const report: LineProfilerReport = {
timerUnit: "1e-03 s", // JS profiler uses milliseconds
functions: [],
}
const lines = rawResults.split("\n")
let currentFunction: LineProfilerFunction | null = null
let inData = false
for (const line of lines) {
const trimmedLine = line.trim()
// Extract file path as function name
if (trimmedLine.startsWith("File:")) {
if (currentFunction) {
report.functions.push(currentFunction)
}
const filePath = trimmedLine.replace("File:", "").trim()
const fileName = filePath.split("/").pop() || filePath
currentFunction = {
functionName: fileName,
totalTime: "",
entries: [],
}
inData = false
continue
}
// Skip header line and separator lines
if (trimmedLine.startsWith("Line") && trimmedLine.includes("Hits")) {
inData = false
continue
}
if (trimmedLine.match(/^-+$/)) {
inData = true
continue
}
// Parse data rows: " 12 3442092 397.958 100.0% if (n <= 1) {"
if (inData && currentFunction && trimmedLine.length > 0) {
// Match: line_number, hits, time, percent, content
const match = trimmedLine.match(/^\s*(\d+)\s+(\d+)\s+([\d.]+)\s+([\d.]+)%\s+(.*)$/)
if (match) {
const [, , hits, time, percent, content] = match
currentFunction.entries.push({
hits: hits,
time: time,
perHit: hits === "0" ? "0.000" : (parseFloat(time) / parseInt(hits)).toFixed(3),
percentTime: parseFloat(percent) || 0,
lineContents: content || " ",
})
}
}
}
if (currentFunction) {
report.functions.push(currentFunction)
}
return report
}
/**
* Parse Python line_profiler markdown table format
*/
function parsePythonLineProfilerResults(rawResults: string): LineProfilerReport {
const report: LineProfilerReport = {
timerUnit: "",
functions: [],
}
const lines = rawResults.split("\n")
let currentFunction: LineProfilerFunction | null = null
let inTable = false

View file

@ -5,6 +5,24 @@ export function cn(...inputs: ClassValue[]): string {
return twMerge(clsx(inputs))
}
/**
* Get Monaco editor language identifier from file path
*/
export function getMonacoLanguage(filePath: string): string {
const ext = filePath.split(".").pop()?.toLowerCase() || ""
const languageMap: Record<string, string> = {
py: "python",
js: "javascript",
jsx: "javascript",
ts: "typescript",
tsx: "typescript",
java: "java",
html: "html",
css: "css",
}
return languageMap[ext] || "python"
}
/**
* Round optimization attempts to nearest 0.5 by dividing by 100 and rounding to half increments
* (e.g., 4000 -> 40, 450 -> 4.5, 550 -> 5.5)