mirror of
https://github.com/codeflash-ai/codeflash-internal.git
synced 2026-05-04 18:25:18 +00:00
fix: handle syntactically invalid LLM output in testgen repair (#2472)
## Summary - Catch `ParserSyntaxError` when parsing LLM-repaired code instead of letting it bubble to the generic 500 handler - Reduces Sentry noise from expected LLM failures - The CLI already handles non-200 responses gracefully (returns `None`, continues)
This commit is contained in:
parent
4edd183d82
commit
14c0b3acca
1 changed files with 5 additions and 1 deletions
|
|
@ -90,7 +90,11 @@ async def testgen_repair(
|
|||
from core.languages.python.testgen.validate import instrument_tests, validate_request_data # noqa: PLC0415
|
||||
from core.shared.testgen_models import TestGenSchema # noqa: PLC0415
|
||||
|
||||
repaired_cst = parse_module_to_cst(repaired_code)
|
||||
try:
|
||||
repaired_cst = parse_module_to_cst(repaired_code)
|
||||
except Exception:
|
||||
logging.warning("LLM returned syntactically invalid repaired code, falling back to original")
|
||||
return 500, TestRepairErrorSchema(error="LLM returned syntactically invalid code")
|
||||
original_cst = parse_module_to_cst(data.test_source)
|
||||
|
||||
# Extract repaired function nodes by name
|
||||
|
|
|
|||
Loading…
Reference in a new issue