prompt change for render profile add to all nodes of pipeline

This commit is contained in:
Sarthak Agarwal 2026-03-11 01:01:09 +05:30
parent ad57aa6bbc
commit 54a123b9eb
4 changed files with 100 additions and 17 deletions

View file

@ -20,6 +20,24 @@ You are a professional React test engineer. Your goal is to generate comprehensi
- **Child prop stability**: Change parent state in a way that should NOT affect child component props — tests whether parent optimizes prop references.
- **IMPORTANT**: Do NOT assert specific render counts. Both original and optimized code must pass the same tests. Codeflash measures render metrics automatically via React.Profiler instrumentation. Focus on creating realistic interaction scenarios that produce measurable re-render patterns.
4. **Debounce/Throttle Measurement Tests**
- Use `jest.useFakeTimers()` at the start of the test.
- Type multiple characters rapidly using `fireEvent.change(input, { target: { value: 'a' } })` (NOT `userEvent.type` which conflicts with fake timers).
- Advance time with `jest.advanceTimersByTime(300)` between groups of inputs.
- Verify intermediate state (e.g., loading indicator) and final state after debounce settles.
- Call `jest.useRealTimers()` in `afterEach`.
5. **Large List/Table Measurement Tests**
- Render with a large dataset (100+ items) to exercise virtualization and memoization paths.
- Verify initial render shows expected items.
- Sort/filter the list and verify UI updates correctly.
- Scroll simulation: if the component accepts scroll props or has scroll handlers, trigger scroll events via `fireEvent.scroll()`.
6. **Memoization Measurement Tests**
- Render with initial props, then `rerender(<Component {...sameProps} />)` with identical props — measures React.memo / useMemo effectiveness.
- Then `rerender(<Component newProp="value" />)` with changed props — verifies the component still updates correctly.
- Both rerenders must pass — the test verifies behavior while the Profiler measures render count differences.
**Test Framework**
- Use `@testing-library/react` for rendering and querying.
- Use `@testing-library/user-event` for simulating user interactions.

View file

@ -44,3 +44,6 @@ The following performance issues were detected by static analysis. Generate inte
4. Use `jest.fn()` for callback props.
5. Ensure tests are self-contained and will pass on the original component code.
6. Do NOT assert specific render counts — Codeflash measures render metrics automatically.
7. If the component uses debounce/throttle, use `jest.useFakeTimers()` and `fireEvent` (not `userEvent`) for timing-sensitive tests. Call `jest.useRealTimers()` in `afterEach`.
8. If the component renders lists/tables, include tests with 100+ items to exercise virtualization and memoization.
9. Include at least 2 rerender tests: one with same props (measures memoization), one with changed props (verifies correctness).

View file

@ -47,6 +47,11 @@ JS_EXECUTE_USER_PROMPT = (JS_PROMPTS_DIR / "execute_user_prompt.md").read_text()
JS_EXECUTE_ASYNC_SYSTEM_PROMPT = (JS_PROMPTS_DIR / "execute_async_system_prompt.md").read_text()
JS_EXECUTE_ASYNC_USER_PROMPT = (JS_PROMPTS_DIR / "execute_async_user_prompt.md").read_text()
# Load React-specific testgen prompts
REACT_PROMPTS_DIR = current_dir / "prompts" / "react_testgen"
REACT_SYSTEM_PROMPT = (REACT_PROMPTS_DIR / "system_prompt.md").read_text()
REACT_USER_PROMPT_TEMPLATE = (REACT_PROMPTS_DIR / "user_prompt.md").read_text()
# Pattern to extract JavaScript code blocks
JS_PATTERN = re.compile(r"^```(?:javascript|js|typescript|ts)?\s*\n(.*?)\n```", re.MULTILINE | re.DOTALL)
@ -266,6 +271,40 @@ def build_javascript_prompt(
return messages, posthog_event_suffix
def build_react_testgen_prompt(
data: TestGenSchema,
) -> list[ChatCompletionMessageParam]:
"""Build prompt messages for React component test generation using Jinja2 templates."""
from jinja2 import Template # noqa: PLC0415
language = "typescript" if data.language == "typescript" else "javascript"
# Parse react_context JSON if provided
react_ctx = {}
if data.react_context:
try:
import json # noqa: PLC0415
react_ctx = json.loads(data.react_context) if isinstance(data.react_context, str) else data.react_context
except (json.JSONDecodeError, TypeError):
react_ctx = {}
template = Template(REACT_USER_PROMPT_TEMPLATE)
user_content = template.render(
language=language,
source_code=data.source_code_being_tested,
props_interface=react_ctx.get("props_interface"),
hooks_used=react_ctx.get("hooks_used"),
child_components=react_ctx.get("child_components"),
dependency_code=react_ctx.get("dependency_code"),
optimization_opportunities=react_ctx.get("optimization_opportunities"),
)
system_message: ChatCompletionMessageParam = {"role": "system", "content": REACT_SYSTEM_PROMPT}
user_message: ChatCompletionMessageParam = {"role": "user", "content": user_content}
return [system_message, user_message]
def parse_and_validate_js_output(response_content: str, language: str = "javascript") -> str:
"""Parse and validate the LLM response for JavaScript/TypeScript code.
@ -526,23 +565,44 @@ async def testgen_javascript(
logging.info(f"Using {model_source} model ({execute_model.name}) for JavaScript test_index {test_index}")
(
generated_test_source,
instrumented_behavior_tests,
instrumented_perf_tests,
) = await generate_javascript_tests_from_function(
user_id=request.user,
function_name=data.function_to_optimize.qualified_name,
function_code=data.source_code_being_tested,
module_path=data.module_path,
test_framework=data.test_framework,
is_async=data.is_async or False,
trace_id=data.trace_id,
call_sequence=data.call_sequence,
execute_model=execute_model,
language=data.language,
test_index=test_index,
)
if data.is_react_component:
logging.info("Using React-specific testgen prompts for component test generation")
messages = build_react_testgen_prompt(data)
cost_tracker: list[float] = []
validated_code = await generate_and_validate_js_test_code(
messages=messages,
model=execute_model,
cost_tracker=cost_tracker,
user_id=request.user,
posthog_event_suffix="react-",
trace_id=data.trace_id,
call_sequence=data.call_sequence,
language=data.language,
test_index=test_index,
)
total_llm_cost = sum(cost_tracker)
await update_optimization_cost(trace_id=data.trace_id, cost=total_llm_cost, user_id=request.user)
generated_test_source = validated_code
instrumented_behavior_tests = validated_code
instrumented_perf_tests = validated_code
else:
(
generated_test_source,
instrumented_behavior_tests,
instrumented_perf_tests,
) = await generate_javascript_tests_from_function(
user_id=request.user,
function_name=data.function_to_optimize.qualified_name,
function_code=data.source_code_being_tested,
module_path=data.module_path,
test_framework=data.test_framework,
is_async=data.is_async or False,
trace_id=data.trace_id,
call_sequence=data.call_sequence,
execute_model=execute_model,
language=data.language,
test_index=test_index,
)
# Strip incorrect file extensions from import paths (LLMs sometimes add .js to .ts imports)
generated_test_source = strip_js_extensions(generated_test_source)

View file

@ -26,6 +26,8 @@ class TestGenSchema(Schema):
is_async: bool | None = False
call_sequence: int | None = None
is_numerical_code: bool | None = None
is_react_component: bool = False
react_context: str | None = None
@model_validator(mode="after")
def helper_function_names_validator(self) -> Self: