Remove instrumented_behavior_tests and instrumented_perf_tests from testgen API

Instrumentation (behavior/perf AST transformations) moves to the client
side. The API now returns raw validated code only via generated_tests.
This commit is contained in:
Kevin Turcios 2026-04-22 23:10:16 -05:00
parent 051317e2dc
commit 92c5fd7c74
5 changed files with 5 additions and 25 deletions

View file

@ -207,12 +207,13 @@ All 9 steps complete. 13/13 endpoints implemented (12 full, 1 stub: `/ai/log_fea
### Deferred work
- **Testgen instrumentation** — behavior/performance test AST transformations (~1400 lines). Currently all four response fields return the same validated code.
- **Testgen postprocessing** — CST transformation pipeline (remove helpers, unused defs, cap loops/tensors, add imports, remove asserts).
- **DB persistence for log_features** — asyncpg upsert for optimization_features table.
- **JS/TS and Java language layers** — P2, after Python pipeline is production-validated.
- **CI pipeline** — GitHub Actions for lint, test, type check, deploy.
Testgen instrumentation (behavior/perf AST transformations) moved to client side — the API returns raw validated code only.
### Database tables
| Table | Key Fields | Purpose |

View file

@ -186,11 +186,9 @@ async def generate_tests(
trace_id: str,
user_id: str,
test_index: int,
) -> tuple[str, str, str, str]:
) -> str:
"""
Generate regression tests for a Python function.
Returns (display_tests, behavior_tests, perf_tests, raw_display).
"""
python_version = _parse_python_version(python_version_str)
model, model_type = select_model_for_test(test_index)
@ -232,9 +230,4 @@ async def generate_tests(
validation_error=("No test functions found after postprocessing"),
)
return (
validated_code,
validated_code,
validated_code,
validated_code,
)
return validated_code

View file

@ -393,6 +393,4 @@ async def testgen_repair(
return TestRepairResponse(
generated_tests=repaired_code,
instrumented_behavior_tests=repaired_code,
instrumented_perf_tests=repaired_code,
)

View file

@ -78,12 +78,7 @@ async def testgen(
test_index = data.test_index if data.test_index is not None else 0
try:
(
generated_tests,
instrumented_behavior_tests,
instrumented_perf_tests,
raw_display,
) = await generate_tests(
generated_tests = await generate_tests(
llm_client,
source_code=data.source_code_being_tested,
function_name=data.function_name,
@ -97,9 +92,6 @@ async def testgen(
return TestGenResponse(
generated_tests=generated_tests,
instrumented_behavior_tests=instrumented_behavior_tests,
instrumented_perf_tests=instrumented_perf_tests,
raw_generated_tests=raw_display,
)
except CodeValidationError as e:

View file

@ -51,8 +51,6 @@ class TestGenResponse(BaseModel):
"""
generated_tests: str
instrumented_behavior_tests: str
instrumented_perf_tests: str
raw_generated_tests: str | None = None
@ -202,5 +200,3 @@ class TestRepairResponse(BaseModel):
"""
generated_tests: str
instrumented_behavior_tests: str
instrumented_perf_tests: str