Add Sentry tracking and broader exception handling for API failures

This commit is contained in:
Mohamed Ashraf 2025-10-15 02:31:06 +03:00
parent 9381205363
commit ff6da849a9
6 changed files with 20 additions and 14 deletions

View file

@ -1,6 +1,7 @@
from __future__ import annotations
import logging
import sentry_sdk
import uuid
from pathlib import Path
from typing import TYPE_CHECKING
@ -96,7 +97,8 @@ async def optimize_python_code(
model=optimize_model.name, messages=messages, n=n
)
except Exception as e:
logging.exception(f"OpenAI Code Generation error in optimizer: {e}")
logging.exception("OpenAI Code Generation error in optimizer")
sentry_sdk.capture_exception(e)
debug_log_sensitive_data(f"Failed to generate code for source:\n{ctx.source_code}")
return []
llm_cost = calculate_llm_cost(output, optimize_model)

View file

@ -1,6 +1,7 @@
from __future__ import annotations
import logging
import sentry_sdk
from pathlib import Path
from typing import TYPE_CHECKING
@ -98,7 +99,8 @@ async def optimize_python_code_line_profiler(
)
await update_optimization_cost(trace_id=trace_id, cost=calculate_llm_cost(output, optimize_model))
except Exception as e:
logging.exception(f"OpenAI Code Generation error in optimizer-line-profiler: {e}")
logging.exception("OpenAI Code Generation error in optimizer-line-profiler")
sentry_sdk.capture_exception(e)
debug_log_sensitive_data(f"Failed to generate code for source:\n{ctx.source_code}")
return []

View file

@ -222,7 +222,8 @@ async def refinement(
)
llm_cost = calculate_llm_cost(output, optimize_model)
except Exception as e:
logging.exception(f"Claude Code Generation error in refinement: {e}")
logging.exception("Claude Code Generation error in refinement")
sentry_sdk.capture_exception(e)
debug_log_sensitive_data(f"Failed to generate code for source:\n{ctx.data.original_source_code}")
return OptimizeErrorResponseSchema(error=str(e))
debug_log_sensitive_data(f"ClaudeClient optimization response:\n{output.model_dump_json(indent=2)}")

View file

@ -1,6 +1,7 @@
from __future__ import annotations
import re
import sentry_sdk
from typing import TYPE_CHECKING
from aiservice.analytics.posthog import ph
@ -108,6 +109,7 @@ async def rank_optimizations(
await update_optimization_cost(trace_id=trace_id, cost=calculate_llm_cost(output, rank_model))
except Exception as e:
debug_log_sensitive_data(f"Failed to generate new explanation, Error message: {e}")
sentry_sdk.capture_exception(e)
return RankErrorResponseSchema(error=str(e))
debug_log_sensitive_data(f"AIClient optimization response:\n{output}")
if output.usage is not None:

View file

@ -376,6 +376,7 @@ class TestGenErrorResponseSchema(Schema):
from aiservice.analytics.posthog import ph
import sentry_sdk
@testgen_api.post(
@ -453,7 +454,7 @@ async def testgen(request, data: TestGenSchema) -> tuple[int, TestGenResponseSch
except TestGenerationFailedException as e:
logging.exception("Test generation failed. Skipping test generation.")
logging.exception(e)
ph(request.user, "aiservice-testgen-test-generation-failed", properties={"error": str(e)})
sentry_sdk.capture_exception(e)
return 500, TestGenErrorResponseSchema(error="Error generating tests. Internal server error.")
if hasattr(request, "should_log_features") and request.should_log_features:
await log_features(

View file

@ -9,6 +9,7 @@ from pathlib import Path
from typing import SupportsIndex
import isort
import sentry_sdk
from aiservice.analytics.posthog import ph
from aiservice.common_utils import is_codeflash_employee, parse_python_version, validate_trace_id
from aiservice.env_specific import create_openai_client, debug_log_sensitive_data
@ -177,7 +178,8 @@ To help unit test the function above, list diverse scenarios that the function s
total_llm_cost += calculate_llm_cost(execute_response, execute_model) or 0.0
except Exception as e:
logging.exception(f"OpenAI client error in {error_context}execute step")
logging.exception("OpenAI client error in execute step")
sentry_sdk.capture_exception(e)
raise TestGenerationFailedError(e) from e
debug_log_sensitive_data(
f"OpenAIClient {error_context}execute response:\n{execute_response.model_dump_json(indent=2)}"
@ -426,12 +428,8 @@ async def testgen(
try:
parse_module_to_cst(sorted_imports_test_source)
except Exception as e:
logging.exception(f"Failed to parse generated test code: {e}")
ph(
request.user,
"aiservice-testgen-invalid-isort-code",
properties={"error": str(e), "sorted_imports_test_source": sorted_imports_test_source},
)
logging.exception("Failed to parse generated test code")
sentry_sdk.capture_exception(e)
else:
generated_test_source = sorted_imports_test_source
@ -439,11 +437,11 @@ async def testgen(
except TestGenerationFailedError as e:
logging.exception("Test generation failed. Skipping test generation.")
logging.exception(e)
ph(request.user, "aiservice-testgen-test-generation-failed", properties={"error": str(e)})
sentry_sdk.capture_exception(e)
return 500, TestGenErrorResponseSchema(error="Error generating tests. Internal server error.")
except Exception as e:
logging.exception(f"Unexpected error in testgen endpoint: {e}")
ph(request.user, "aiservice-testgen-unexpected-error", properties={"error": str(e)})
logging.exception("Unexpected error in testgen endpoint")
sentry_sdk.capture_exception(e)
return 500, TestGenErrorResponseSchema(error="Error generating tests. Internal server error.")
if hasattr(request, "should_log_features") and request.should_log_features:
# TODO: Update log features with perf instrumented tests