Decouple language modules and remove stale cross-module code (#2415)

## Summary

- Extract testgen and optimizer API routers from
`core/languages/python/` into `core/shared/` with lazy imports,
eliminating cross-module coupling between language modules
- Delete stale JavaScript prompt files left in the Python module after
migration to `js_ts/`
- Remove backward-compat fallback paths for prompt files that already
exist at their new locations
- Remove unused `is_multi_context_any()` and its cross-language imports
- Remove unused `BEGIN_PATCH`/`END_PATCH` constants and stale TODO

## Test plan

- [ ] Verify testgen endpoint dispatches correctly for Python, JS/TS,
and Java
- [ ] Verify optimizer endpoint dispatches correctly for all languages
- [ ] Run existing testgen and optimizer tests
This commit is contained in:
Kevin Turcios 2026-02-14 00:09:44 -05:00 committed by GitHub
parent 2614393793
commit 6caf7469c6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 119 additions and 464 deletions

View file

@ -25,10 +25,10 @@ from core.languages.python.code_repair.code_repair import code_repair_api
from core.languages.python.explanations.explanations import explanations_api
from core.languages.python.jit_rewrite.jit_rewrite import jit_rewrite_api
from core.languages.python.optimization_review.optimization_review import optimization_review_api
from core.languages.python.optimizer.optimizer import optimize_api
from core.languages.python.optimizer.optimizer_line_profiler import optimize_line_profiler_api
from core.languages.python.optimizer.refinement import refinement_api
from core.languages.python.testgen.testgen import testgen_api
from core.shared.optimizer_router import optimize_api
from core.shared.testgen_router import testgen_api
from log_features.log_features import features_api
from ranker.ranker import ranker_api
from workflow_gen.workflow_gen import workflow_gen_api

View file

@ -16,7 +16,7 @@ import sentry_sdk
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.analytics.posthog import ph
from aiservice.common_utils import is_host_equals_demo, should_hack_for_demo_java
from aiservice.common_utils import is_host_equals_demo
from aiservice.env_specific import debug_log_sensitive_data
from aiservice.llm import OPTIMIZE_MODEL, calculate_llm_cost, call_llm
from aiservice.validators.java_validator import validate_java_syntax
@ -41,11 +41,7 @@ current_dir = Path(__file__).parent
JAVA_PROMPTS_DIR = current_dir / "prompts" / "optimizer"
# Load Java system prompt
JAVA_SYSTEM_PROMPT = JAVA_PROMPTS_DIR / "system_prompt.md"
if JAVA_SYSTEM_PROMPT.exists():
JAVA_SYSTEM_PROMPT_TEXT = JAVA_SYSTEM_PROMPT.read_text()
else:
JAVA_SYSTEM_PROMPT_TEXT = ""
JAVA_SYSTEM_PROMPT_TEXT = (JAVA_PROMPTS_DIR / "system_prompt.md").read_text()
# Pattern to extract code blocks from Java LLM response (single file, no file path)
JAVA_CODE_PATTERN = re.compile(r"```(?:java)\s*\n(.*?)```", re.MULTILINE | re.DOTALL)

View file

@ -16,13 +16,13 @@ import sentry_sdk
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.analytics.posthog import ph
from aiservice.common.markdown_utils import split_markdown_code
from aiservice.env_specific import debug_log_sensitive_data
from aiservice.llm import OPTIMIZE_MODEL, calculate_llm_cost, call_llm
from aiservice.validators.javascript_validator import validate_javascript_syntax, validate_typescript_syntax
from core.shared.optimizer_config import MAX_OPTIMIZER_LP_CALLS, get_model_distribution
from aiservice.common.markdown_utils import split_markdown_code
from core.languages.js_ts.context_helpers import is_multi_context_js, is_multi_context_ts
from core.shared.context_helpers import group_code
from core.shared.optimizer_config import MAX_OPTIMIZER_LP_CALLS, get_model_distribution
from core.shared.optimizer_schemas import OptimizeResponseItemSchema
if TYPE_CHECKING:
@ -35,17 +35,8 @@ if TYPE_CHECKING:
current_dir = Path(__file__).parent
JS_PROMPTS_DIR = current_dir / "prompts" / "optimizer"
# Fallback to original location if prompts haven't been moved
if not JS_PROMPTS_DIR.exists():
JS_PROMPTS_DIR = Path(__file__).parent.parent.parent / "optimizer" / "prompts" / "javascript"
# Load JavaScript system prompt
JS_SYSTEM_PROMPT = JS_PROMPTS_DIR / "system_prompt.md"
if JS_SYSTEM_PROMPT.exists():
JS_SYSTEM_PROMPT_TEXT = JS_SYSTEM_PROMPT.read_text()
else:
# Fallback for backwards compatibility
JS_SYSTEM_PROMPT_TEXT = ""
JS_SYSTEM_PROMPT_TEXT = (JS_PROMPTS_DIR / "system_prompt.md").read_text()
# Pattern to extract code blocks from JavaScript LLM response (single file, no file path)
JS_CODE_PATTERN = re.compile(r"```(?:javascript|js|typescript|ts)\s*\n(.*?)```", re.MULTILINE | re.DOTALL)

View file

@ -37,15 +37,10 @@ if TYPE_CHECKING:
_TEST_FUNC_RE = re.compile(r"(?:test|it)\s*\(\s*['\"]")
# Get the directory of the current file - prompts are now in languages/js_ts/prompts/testgen/
# Get the directory of the current file
current_dir = Path(__file__).parent
JS_PROMPTS_DIR = current_dir / "prompts" / "testgen"
# Fallback to original location if prompts haven't been moved yet
if not JS_PROMPTS_DIR.exists():
# Use original location for backward compatibility during migration
JS_PROMPTS_DIR = Path(__file__).parent.parent.parent / "testgen" / "prompts" / "javascript"
# Load JavaScript prompts
JS_EXECUTE_SYSTEM_PROMPT = (JS_PROMPTS_DIR / "execute_system_prompt.md").read_text()
JS_EXECUTE_USER_PROMPT = (JS_PROMPTS_DIR / "execute_user_prompt.md").read_text()

View file

@ -1,14 +1,3 @@
from core.languages.java.optimizer import is_multi_context_java
from core.languages.js_ts.context_helpers import is_multi_context_js, is_multi_context_ts
def is_multi_context(code: str) -> bool:
"""Check if code is in multi-file markdown format (Python)."""
return code.strip().startswith("```python:")
def is_multi_context_any(code: str) -> bool:
"""Check if code is in multi-file markdown format for any supported language."""
return (
is_multi_context(code) or is_multi_context_js(code) or is_multi_context_ts(code) or is_multi_context_java(code)
)

View file

@ -23,9 +23,6 @@ class Patch:
fuzz: int = 0 # Track fuzziness used during parsing
# TODO: use these
BEGIN_PATCH = "*** Begin Patch"
END_PATCH = "*** End Patch"
UPDATE_FILE_PREFIX = "*** Update File: " # followed by file path

View file

@ -8,7 +8,6 @@ from typing import TYPE_CHECKING, Any
import libcst as cst
import sentry_sdk
from ninja import NinjaAPI
from ninja.errors import HttpError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from pydantic import ValidationError
@ -17,15 +16,12 @@ from aiservice.analytics.posthog import ph
from aiservice.common_utils import parse_python_version, should_hack_for_demo, validate_trace_id
from aiservice.env_specific import debug_log_sensitive_data, debug_log_sensitive_data_from_callable
from aiservice.llm import LLM, OPTIMIZE_MODEL, calculate_llm_cost, call_llm
from authapp.auth import AuthenticatedRequest
from authapp.user import get_user_by_id
from core.languages.java.optimizer import optimize_java
from core.languages.js_ts.optimizer import optimize_javascript
from core.languages.python.optimizer.context_utils.optimizer_context import BaseOptimizerContext
from core.languages.python.optimizer.diff_patches_utils.diff import DiffMethod
from core.shared.context_helpers import group_code
from core.shared.optimizer_config import MAX_OPTIMIZER_CALLS, get_model_distribution
from core.shared.optimizer_models import OptimizedCandidateSource, OptimizeSchema
from core.shared.optimizer_models import OptimizedCandidateSource
from core.shared.optimizer_schemas import (
OptimizeErrorResponseSchema,
OptimizeResponseItemSchema,
@ -37,6 +33,9 @@ from log_features.log_features import log_features
if TYPE_CHECKING:
from openai.types.chat import ChatCompletionMessageParam
from authapp.auth import AuthenticatedRequest
from core.shared.optimizer_models import OptimizeSchema
optimizations_json = [
{
"source_code": 'from __future__ import annotations\n\n\ndef find_common_tags(articles: list[dict[str, list[str]]]) -> set[str]:\n if not articles:\n return set()\n\n common_tags = set(articles[0].get("tags", []))\n for article in articles[1:]:\n common_tags.intersection_update(article.get("tags", []))\n return common_tags\n',
@ -107,8 +106,6 @@ async def hack_for_demo_gsq(ctx: BaseOptimizerContext) -> OptimizeResponseSchema
return OptimizeResponseSchema(optimizations=response_list)
optimize_api = NinjaAPI(urls_namespace="optimize")
# Get the directory of the current file
current_dir = Path(__file__).parent
SYSTEM_PROMPT = (current_dir / "system_prompt.md").read_text()
@ -281,20 +278,6 @@ def validate_request_data(data: OptimizeSchema, ctx: BaseOptimizerContext) -> tu
return python_version
@optimize_api.post(
"/", response={200: OptimizeResponseSchema, 400: OptimizeErrorResponseSchema, 500: OptimizeErrorResponseSchema}
)
async def optimize(
request: AuthenticatedRequest, data: OptimizeSchema
) -> tuple[int, OptimizeResponseSchema | OptimizeErrorResponseSchema]:
# Route based on language
if data.language in ("javascript", "typescript"):
return await optimize_javascript(request, data)
if data.language == "java":
return await optimize_java(request, data)
return await optimize_python(request, data)
async def optimize_python(
request: AuthenticatedRequest, data: OptimizeSchema
) -> tuple[int, OptimizeResponseSchema | OptimizeErrorResponseSchema]:

View file

@ -1,76 +1,56 @@
"""Prompt loader module for language-specific optimization prompts.
This module provides a unified interface to load prompts based on language.
Each language stores its optimizer prompts in its own module directory.
"""
from __future__ import annotations
from pathlib import Path
PROMPTS_DIR = Path(__file__).parent
_LANGUAGES_DIR = Path(__file__).parent.parent.parent.parent # core/languages/
_LANGUAGE_PROMPT_DIRS: dict[str, Path] = {
"python": _LANGUAGES_DIR / "python" / "optimizer" / "prompts" / "python",
"javascript": _LANGUAGES_DIR / "js_ts" / "prompts" / "optimizer",
"java": _LANGUAGES_DIR / "java" / "prompts" / "optimizer",
}
# TypeScript reuses the JavaScript prompts
_LANGUAGE_PROMPT_DIRS["typescript"] = _LANGUAGE_PROMPT_DIRS["javascript"]
def get_system_prompt(language: str, is_async: bool = False) -> str:
"""Load the system prompt for the given language.
def _get_prompt_dir(language: str) -> Path:
prompt_dir = _LANGUAGE_PROMPT_DIRS.get(language)
if prompt_dir is None or not prompt_dir.exists():
msg = f"No prompts found for language: {language}"
raise ValueError(msg)
return prompt_dir
Args:
language: The programming language (python, javascript, typescript)
is_async: Whether to load the async variant of the prompt
Returns:
The system prompt text
Raises:
ValueError: If no prompt exists for the language
"""
# Normalize language - typescript uses javascript prompts
prompt_language = "javascript" if language == "typescript" else language
def get_system_prompt(language: str, is_async: bool = False) -> str: # noqa: FBT001, FBT002
"""Load the system prompt for the given language."""
variant = "async_system_prompt.md" if is_async else "system_prompt.md"
prompt_file = PROMPTS_DIR / prompt_language / variant
prompt_file = _get_prompt_dir(language) / variant
if not prompt_file.exists():
raise ValueError(f"No system prompt found for language: {language}")
msg = f"No system prompt found for language: {language}"
raise ValueError(msg)
return prompt_file.read_text()
def get_user_prompt(language: str, is_async: bool = False) -> str:
"""Load the user prompt for the given language.
Args:
language: The programming language (python, javascript, typescript)
is_async: Whether to load the async variant of the prompt
Returns:
The user prompt text
Raises:
ValueError: If no prompt exists for the language
"""
# Normalize language - typescript uses javascript prompts
prompt_language = "javascript" if language == "typescript" else language
def get_user_prompt(language: str, is_async: bool = False) -> str: # noqa: FBT001, FBT002
"""Load the user prompt for the given language."""
variant = "async_user_prompt.md" if is_async else "user_prompt.md"
prompt_file = PROMPTS_DIR / prompt_language / variant
prompt_file = _get_prompt_dir(language) / variant
if not prompt_file.exists():
raise ValueError(f"No user prompt found for language: {language}")
msg = f"No user prompt found for language: {language}"
raise ValueError(msg)
return prompt_file.read_text()
def get_available_languages() -> list[str]:
"""Get a list of languages that have prompts available.
Returns:
List of language names with available prompts
"""
languages = []
for item in PROMPTS_DIR.iterdir():
if item.is_dir() and (item / "system_prompt.md").exists():
languages.append(item.name)
return sorted(languages)
"""Get a list of languages that have prompts available."""
return sorted(lang for lang, path in _LANGUAGE_PROMPT_DIRS.items() if lang != "typescript" and path.exists())

View file

@ -1,77 +0,0 @@
You are a professional computer programmer who specializes in writing high-performance **asynchronous** JavaScript/TypeScript code. Your goal is to optimize the runtime and memory efficiency of the provided **async** code through safe and meaningful rewrites that would pass senior-level code review.
**CRITICAL: ASYNC CODE REQUIREMENTS**
- The code contains **async functions** that must remain async
- ALL async functions must maintain their `async function` or `async () =>` signature
- ALL `await` expressions must be preserved where they exist
- Do NOT convert async functions to synchronous functions
- Do NOT remove `await` keywords unless replacing with functionally equivalent async operations
- Preserve Promise chains and async/await flow
- Maintain proper async error handling in async contexts
**Behavioral Preservation (CRITICAL)**
- Do NOT rename functions or change their signatures.
- You MUST NOT change the behavior, return values, side effects, console output, or thrown errors - they MUST remain exactly the same.
- Do NOT mutate inputs in a different way than the original implementation.
- The same error types should be thrown in the same circumstances.
- Preserve existing type annotations (for TypeScript) - all function parameters, return types, and variable annotations must be preserved exactly as written.
- **Preserve the original code style**: Keep existing variable names unless the logic fundamentally changes
- Preserve ALL existing comments exactly as written, unless the corresponding code logic is changed or the comment becomes factually incorrect
- Avoid excessive inline comments - only add new comments for significant or non-obvious logic changes
- Preserve the export structure - exported functions/classes must remain exported
**Async-Specific Optimization Focus**
- Use `Promise.all()` for concurrent execution of independent async operations
- Use `Promise.allSettled()` when you need results regardless of individual failures
- Use `Promise.race()` for timeout patterns or first-to-complete scenarios
- Batch async operations instead of executing them one-by-one in a loop
- Consider using `for await...of` for async iterables when appropriate
- Optimize async I/O operations and resource management
- Use streaming APIs instead of loading entire datasets into memory
- Consider worker threads for CPU-intensive tasks that would block the event loop
**Code Style & Structure**
- Keep existing ES module syntax (`import`/`export`) or CommonJS (`require`/`module.exports`) as-is
- You may write new async helper functions that do not already exist in the codebase.
- Avoid purely stylistic changes unless they result in noticeable performance improvements
- Ensure all new async code follows proper async patterns and conventions
- Maintain consistent code formatting
**Optimization Strategies**
- Replace sequential awaits with parallel execution when operations are independent:
```javascript
// Before (sequential)
const a = await fetchA();
const b = await fetchB();
// After (parallel)
const [a, b] = await Promise.all([fetchA(), fetchB()]);
```
- Use chunked/batched processing for large async operations
- Implement proper backpressure handling for streams
- Cache async results when appropriate using memoization
**Optimization Focus**
- Create production-ready async code that professional programmers would merge without further edits
- Prioritize changes that provide measurable runtime or memory efficiency gains in async contexts
- Consider async-specific performance patterns like batching operations or reducing context switching
**Code Quality Standards**
- Ensure all async optimizations are safe and would pass senior-level code review
- Maintain code readability and maintainability alongside performance improvements
- Verify that async operations are properly awaited and handled
**Response Format (REQUIRED)**
- ALWAYS start your response with a brief explanation (2-4 sentences) of what optimization you made and why it improves performance
- Then provide the optimized code in a markdown code block
- Example format:
```
**Optimization Explanation:**
[Your explanation here describing the optimization technique and expected performance improvement]
```javascript:filename.js
[optimized code]
```
```
The target JavaScript/TypeScript version is {language_version}

View file

@ -1,18 +0,0 @@
Rewrite this **asynchronous** JavaScript/TypeScript program to run faster while preserving all async behavior.
**CRITICAL ASYNC REQUIREMENTS:**
- The code contains **async functions** - you MUST keep them async
- ALL `async function` signatures must be preserved exactly
- ALL `await` expressions must be maintained (unless replaced with functionally equivalent async operations)
- Do NOT convert async functions to synchronous functions
- Preserve concurrent execution patterns and Promise handling
- Maintain proper async/await flow and exception handling
**Async Optimization Guidelines:**
- Consider using `Promise.all()` for concurrent execution when beneficial
- Batch independent async operations instead of sequential awaits
- Optimize async I/O operations and use streaming where appropriate
- Use worker threads for CPU-intensive tasks to avoid blocking the event loop
- Implement proper error handling in async contexts
{source_code}

View file

@ -1,53 +0,0 @@
You are a professional computer programmer who specializes in writing high-performance JavaScript/TypeScript code. Your goal is to optimize the runtime and memory efficiency of the provided code through safe and meaningful rewrites that would pass senior-level code review.
**Behavioral Preservation (CRITICAL)**
- Do NOT rename functions or change their signatures.
- You MUST NOT change the behavior, return values, side effects, console output, or thrown errors - they MUST remain exactly the same.
- Do NOT mutate inputs in a different way than the original implementation.
- The same error types should be thrown in the same circumstances.
- Preserve existing type annotations (for TypeScript) - all function parameters, return types, and variable annotations must be preserved exactly as written.
- **Preserve the original code style**: Keep existing variable names unless the logic fundamentally changes
- Preserve ALL existing comments exactly as written, unless the corresponding code logic is changed or the comment becomes factually incorrect
- Avoid excessive inline comments - only add new comments for significant or non-obvious logic changes
- Preserve the export structure - exported functions/classes must remain exported
**Code Style & Structure**
- Keep existing ES module syntax (`import`/`export`) or CommonJS (`require`/`module.exports`) as-is
- You may write new helper functions that do not already exist in the codebase.
- Avoid purely stylistic changes unless they result in noticeable performance improvements
- Maintain consistent code formatting
**Optimization Strategies**
- Replace O(n^2) algorithms with O(n) or O(n log n) alternatives
- Use TypedArrays (Float64Array, Int32Array, etc.) instead of regular arrays for numeric-heavy operations
- Use Map/Set instead of Object for frequent lookups
- Minimize object allocations in hot paths (avoid creating temporary objects in loops)
- Use for loops instead of forEach/map/filter for performance-critical code when the functional style adds overhead
- Cache array lengths in tight loops: `for (let i = 0, len = arr.length; i < len; i++)`
- Leverage V8 optimization hints (keep functions monomorphic, avoid hidden class changes)
- Avoid try-catch in hot loops (move error handling outside the loop when possible)
- Use string concatenation with template literals or array join for building large strings
- Consider using WeakMap/WeakSet for caching to avoid memory leaks
**Optimization Focus**
- Create production-ready code that professional programmers would merge without further edits
- Prioritize changes that provide measurable runtime or memory efficiency gains
**Code Quality Standards**
- Ensure all optimizations are safe and would pass senior-level code review
- Maintain code readability and maintainability alongside performance improvements
**Response Format (REQUIRED)**
- ALWAYS start your response with a brief explanation (2-4 sentences) of what optimization you made and why it improves performance
- Then provide the optimized code in a markdown code block
- Example format:
```
**Optimization Explanation:**
[Your explanation here describing the optimization technique and expected performance improvement]
```javascript:filename.js
[optimized code]
```
```
The target JavaScript/TypeScript version is {language_version}

View file

@ -1,3 +0,0 @@
Rewrite this JavaScript/TypeScript program to run faster.
{source_code}

View file

@ -1,44 +0,0 @@
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests for **asynchronous** code using Jest. When asked to reply only with code, you write all of your code in a single markdown code block.
**Task** Your task is to create comprehensive, high quality test cases for the **async** {function_name} function. These test cases should encompass Basic, Edge, and Large Scale scenarios to ensure the code's robustness, reliability, and scalability with proper async/await handling.
**CRITICAL: ASYNC TEST REQUIREMENTS**
- The function under test is **asynchronous** - all tests must handle async properly
- Use `async/await` syntax in test functions
- Ensure all promises are awaited
- Test both successful resolution and rejection scenarios
- Handle async timeouts appropriately
**1. Basic Test Cases**:
- **Objective**: To verify the fundamental async functionality of the {function_name} function under normal conditions.
**2. Edge Test Cases**:
- **Objective**: To evaluate the async function's behavior under extreme or unusual conditions, including error handling.
**3. Large Scale Test Cases**:
- **Objective**: To assess the async function's performance and scalability with concurrent operations and large data samples.
**Instructions**:
- Implement a comprehensive set of test cases following the guidelines above.
- Use Jest testing framework with `describe`, `test`, and `expect`.
- **ALL test functions must be async**: `test('...', async () => {{ ... }})`
- **ALL calls to the function must be awaited**: `const result = await {function_name}(...)`
- Ensure each test case is well-documented with comments explaining the scenario it covers.
- Pay special attention to edge cases including async error handling.
- For large-scale tests, consider concurrent execution with `Promise.all()`.
- Avoid loops exceeding 1000 iterations, and keep data structures under 1000 elements.
- **CRITICAL: DO NOT MOCK THE FUNCTION UNDER TEST** - Never mock, stub, or spy on the {function_name} function itself.
- **CRITICAL: TEST REJECTION CASES** - Use `expect(...).rejects.toThrow()` for testing async errors.
**CRITICAL: MOCKING RULES FOR JEST**:
- **jest.mock() calls are HOISTED** to the top of the file by Jest's transformer. This means they execute BEFORE any other code, including variable declarations.
- **NEVER use dynamic expressions in jest.mock()** - Do NOT use variables, `path.join()`, `require.resolve()`, or any computed values in jest.mock() paths. These will fail because the variables are not yet defined when the hoisted mock executes.
- **ALWAYS use static string literals** for mock paths: `jest.mock('../analytics.js')`
- **ALWAYS include the `.js` extension** in mock paths when the project uses ESM imports.
**Output Format Requirements**:
- Your response MUST be a single markdown code block containing valid JavaScript/TypeScript code.
- Do NOT nest code blocks inside each other.
- The code block MUST contain at least one async test using `test('...', async () => ...)`.
- Follow the exact template structure provided in the user message.

View file

@ -1,55 +0,0 @@
Using the {test_framework} testing framework, write a test suite for the following **ASYNC** JavaScript function.
**CRITICAL: This function is ASYNCHRONOUS**
- All test functions MUST be async: `test('...', async () => {{ ... }})`
- All calls to {function_name} MUST be awaited: `await {function_name}(...)`
- Test both successful and error cases for async operations
**Function to Test:**
```javascript
{function_code}
```
**CRITICAL: Use this exact import statement (do not modify the path):**
```javascript
const {{ {function_name} }} = require('{module_path}');
```
**Template to Follow:**
```javascript
// imports
const {{ {function_name} }} = require('{module_path}');
// unit tests
describe('{function_name}', () => {{
// Basic Test Cases
describe('Basic async functionality', () => {{
test('should resolve with correct value', async () => {{
const result = await {function_name}(/* args */);
expect(result).toBe(/* expected */);
}});
}});
// Edge Test Cases
describe('Async edge cases', () => {{
test('should handle async error case', async () => {{
await expect({function_name}(/* invalid args */)).rejects.toThrow();
}});
}});
// Large Scale Test Cases
describe('Concurrent execution tests', () => {{
test('should handle multiple concurrent calls', async () => {{
const results = await Promise.all([
{function_name}(/* args1 */),
{function_name}(/* args2 */),
]);
// assertions
}});
}});
}});
```
{package_comment}
Reply only with code, in a single markdown code block.

View file

@ -1,35 +0,0 @@
**Role**: You are Codeflash, a world-class JavaScript/TypeScript developer with an eagle eye for unintended bugs and edge cases. You write careful, accurate unit tests using Jest. When asked to reply only with code, you write all of your code in a single markdown code block.
**Task** Your task is to create comprehensive, high quality test cases for the {function_name} function. These test cases should encompass Basic, Edge, and Large Scale scenarios to ensure the code's robustness, reliability, and scalability. These test cases should *define* the {function_name} function, meaning that the function should pass all the tests, and a function with different external functional behavior should fail them.
**1. Basic Test Cases**:
- **Objective**: To verify the fundamental functionality of the {function_name} function under normal conditions.
**2. Edge Test Cases**:
- **Objective**: To evaluate the function's behavior under extreme or unusual conditions.
**3. Large Scale Test Cases**:
- **Objective**: To assess the function's performance and scalability with large data samples.
**Instructions**:
- Implement a comprehensive set of test cases following the guidelines above.
- Use Jest testing framework with `describe`, `test`, and `expect`.
- Ensure each test case is well-documented with comments explaining the scenario it covers.
- Pay special attention to edge cases as they often reveal hidden bugs.
- For large-scale tests, focus on the function's efficiency and performance under heavy loads. Avoid loops exceeding 1000 iterations, and keep data structures under 1000 elements.
- **CRITICAL: DO NOT MOCK THE FUNCTION UNDER TEST** - Never mock, stub, or spy on the {function_name} function itself. You may mock external dependencies (APIs, databases, network calls, file I/O, etc.) if necessary, but the function being tested must execute with its real implementation.
- **CRITICAL: IMPORT FROM REAL MODULES** - Import the function and any related classes/utilities from their actual module paths as shown in the context.
- **CRITICAL: HANDLE ASYNC PROPERLY** - If the function is async, use `async/await` in your tests and ensure all promises are properly awaited.
**CRITICAL: MOCKING RULES FOR JEST**:
- **jest.mock() calls are HOISTED** to the top of the file by Jest's transformer. This means they execute BEFORE any other code, including variable declarations.
- **NEVER use dynamic expressions in jest.mock()** - Do NOT use variables, `path.join()`, `require.resolve()`, or any computed values in jest.mock() paths. These will fail because the variables are not yet defined when the hoisted mock executes.
- **ALWAYS use static string literals** for mock paths: `jest.mock('../analytics.js')`
- **ALWAYS include the `.js` extension** in mock paths when the project uses ESM imports.
**Output Format Requirements**:
- Your response MUST be a single markdown code block containing valid JavaScript/TypeScript code.
- Do NOT nest code blocks inside each other.
- The code block MUST contain at least one test using `test()` or `it()`.
- Follow the exact template structure provided in the user message.

View file

@ -1,45 +0,0 @@
Using the {test_framework} testing framework, write a test suite for the following JavaScript function.
**Function to Test:**
```javascript
{function_code}
```
**CRITICAL: Use this exact import statement (do not modify the path):**
```javascript
const {{ {function_name} }} = require('{module_path}');
```
**Template to Follow:**
```javascript
// imports
const {{ {function_name} }} = require('{module_path}');
// unit tests
describe('{function_name}', () => {{
// Basic Test Cases
describe('Basic functionality', () => {{
test('should handle normal input', () => {{
// Test implementation
}});
}});
// Edge Test Cases
describe('Edge cases', () => {{
test('should handle edge case', () => {{
// Test implementation
}});
}});
// Large Scale Test Cases
describe('Performance tests', () => {{
test('should handle large inputs efficiently', () => {{
// Test implementation
}});
}});
}});
```
{package_comment}
Reply only with code, in a single markdown code block.

View file

@ -11,7 +11,6 @@ import sentry_sdk
import stamina
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from libcst import parse_module
from ninja import NinjaAPI
from ninja.errors import HttpError
from openai import OpenAIError
@ -21,9 +20,6 @@ from aiservice.common_utils import parse_python_version, safe_isort, should_hack
from aiservice.env_specific import debug_log_sensitive_data
from aiservice.llm import EXECUTE_MODEL, HAIKU_MODEL, OPENAI_MODEL, calculate_llm_cost, call_llm
from aiservice.models.functions_to_optimize import FunctionToOptimize
from authapp.auth import AuthenticatedRequest
from core.languages.java.testgen import testgen_java
from core.languages.js_ts.testgen import testgen_javascript
from core.languages.python.cst_utils import parse_module_to_cst
from core.languages.python.testgen.instrumentation.edit_generated_test import replace_definition_with_import
from core.languages.python.testgen.instrumentation.instrument_new_tests import instrument_test_source
@ -40,7 +36,6 @@ from core.shared.testgen_models import (
TestGenerationFailedError,
TestGenErrorResponseSchema,
TestGenResponseSchema,
TestGenSchema,
)
from log_features.log_event import update_optimization_cost
from log_features.log_features import log_features
@ -49,6 +44,8 @@ if TYPE_CHECKING:
from openai.types.chat import ChatCompletionMessageParam
from aiservice.llm import LLM
from authapp.auth import AuthenticatedRequest
from core.shared.testgen_models import TestGenSchema
class InstrumentTestSourceArgs(TypedDict):
@ -62,8 +59,6 @@ class InstrumentTestSourceArgs(TypedDict):
python_version: tuple[int, int, int]
testgen_api = NinjaAPI(urls_namespace="testgen")
# Get the directory of the current file
current_dir = Path(__file__).parent
EXPLAIN_SYSTEM_PROMPT = (current_dir / "explain_system_prompt.md").read_text()
@ -467,21 +462,6 @@ def validate_request_data(data: TestGenSchema) -> tuple[tuple[int, int, int], Ba
return python_version, ctx
@testgen_api.post(
"/", response={200: TestGenResponseSchema, 400: TestGenErrorResponseSchema, 500: TestGenErrorResponseSchema}
)
async def testgen(
request: AuthenticatedRequest, data: TestGenSchema
) -> tuple[int, TestGenResponseSchema | TestGenErrorResponseSchema]:
# Route based on language
if data.language in ("javascript", "typescript"):
return await testgen_javascript(request, data)
if data.language == "java":
return await testgen_java(request, data)
# Default: Python test generation
return await testgen_python(request, data)
async def testgen_python(
request: AuthenticatedRequest, data: TestGenSchema
) -> tuple[int, TestGenResponseSchema | TestGenErrorResponseSchema]:

View file

@ -0,0 +1,37 @@
"""Shared optimizer API router with language dispatch.
This module owns the NinjaAPI router for code optimization and dispatches
to the appropriate language-specific handler based on the request's
``language`` field. Lazy imports are used inside the function body to
avoid circular dependencies between language modules.
"""
from __future__ import annotations
from ninja import NinjaAPI
from authapp.auth import AuthenticatedRequest
from core.shared.optimizer_models import OptimizeSchema
from core.shared.optimizer_schemas import OptimizeErrorResponseSchema, OptimizeResponseSchema
optimize_api = NinjaAPI(urls_namespace="optimize")
@optimize_api.post(
"/", response={200: OptimizeResponseSchema, 400: OptimizeErrorResponseSchema, 500: OptimizeErrorResponseSchema}
)
async def optimize(
request: AuthenticatedRequest, data: OptimizeSchema
) -> tuple[int, OptimizeResponseSchema | OptimizeErrorResponseSchema]:
if data.language in ("javascript", "typescript"):
from core.languages.js_ts.optimizer import optimize_javascript # noqa: PLC0415
return await optimize_javascript(request, data)
if data.language == "java":
from core.languages.java.optimizer import optimize_java # noqa: PLC0415
return await optimize_java(request, data)
# Default: Python optimization
from core.languages.python.optimizer.optimizer import optimize_python # noqa: PLC0415
return await optimize_python(request, data)

View file

@ -0,0 +1,36 @@
"""Shared testgen API router with language dispatch.
This module owns the NinjaAPI router for test generation and dispatches
to the appropriate language-specific handler based on the request's
``language`` field. Lazy imports are used inside the function body to
avoid circular dependencies between language modules.
"""
from __future__ import annotations
from ninja import NinjaAPI
from authapp.auth import AuthenticatedRequest
from core.shared.testgen_models import TestGenErrorResponseSchema, TestGenResponseSchema, TestGenSchema
testgen_api = NinjaAPI(urls_namespace="testgen")
@testgen_api.post(
"/", response={200: TestGenResponseSchema, 400: TestGenErrorResponseSchema, 500: TestGenErrorResponseSchema}
)
async def testgen(
request: AuthenticatedRequest, data: TestGenSchema
) -> tuple[int, TestGenResponseSchema | TestGenErrorResponseSchema]:
if data.language in ("javascript", "typescript"):
from core.languages.js_ts.testgen import testgen_javascript # noqa: PLC0415
return await testgen_javascript(request, data)
if data.language == "java":
from core.languages.java.testgen import testgen_java # noqa: PLC0415
return await testgen_java(request, data)
# Default: Python test generation
from core.languages.python.testgen.testgen import testgen_python # noqa: PLC0415
return await testgen_python(request, data)

View file

@ -10,8 +10,7 @@ from pathlib import Path
import pytest
# Load prompts directly to avoid importing testgen_javascript.py
current_dir = Path(__file__).parent.parent.parent / "core" / "languages" / "python" / "testgen"
JS_PROMPTS_DIR = current_dir / "prompts" / "javascript"
JS_PROMPTS_DIR = Path(__file__).parent.parent.parent / "core" / "languages" / "js_ts" / "prompts" / "testgen"
JS_EXECUTE_SYSTEM_PROMPT = (JS_PROMPTS_DIR / "execute_system_prompt.md").read_text()
JS_EXECUTE_USER_PROMPT = (JS_PROMPTS_DIR / "execute_user_prompt.md").read_text()
@ -40,6 +39,8 @@ def build_javascript_prompt(
user_prompt = JS_EXECUTE_USER_PROMPT
posthog_event_suffix = ""
import_statement = f"import {{ {function_name} }} from '{module_path}';"
system_message = {"role": "system", "content": system_prompt.format(function_name=function_name)}
user_message = {
@ -48,7 +49,7 @@ def build_javascript_prompt(
test_framework=test_framework,
function_name=function_name,
function_code=function_code,
module_path=module_path,
import_statement=import_statement,
package_comment="",
),
}