add backward compatibility (#2217)

# Pull Request Checklist

## Description
- [ ] **Description of PR**: Clear and concise description of what this
PR accomplishes
- [ ] **Breaking Changes**: Document any breaking changes (if
applicable)
- [ ] **Related Issues**: Link to any related issues or tickets

## Testing
- [ ] **Test cases Attached**: All relevant test cases have been
added/updated
- [ ] **Manual Testing**: Manual testing completed for the changes

## Monitoring & Debugging
- [ ] **Logging in place**: Appropriate logging has been added for
debugging user issues
- [ ] **Sentry will be able to catch errors**: Error handling ensures
Sentry can capture and report errors
- [ ] **Avoid Dev based/Prisma logging**: No development-only or
Prisma-specific logging in production code

## Configuration
- [ ] **Env variables newly added**: Any new environment variables are
documented in .env.example file or mentioned in description
---

## Additional Notes
<!-- Add any additional context, screenshots, or notes for reviewers
here -->
This commit is contained in:
Sarthak Agarwal 2026-01-10 00:31:11 +05:30 committed by GitHub
parent ceb2d8bc2f
commit 50fbd275d5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 85 additions and 24 deletions

View file

@ -8,3 +8,11 @@ repos:
# args: [ --fix ]
# Run the formatter.
- id: ruff-format
- repo: local
hooks:
- id: ty
name: ty (type checker)
entry: uv run ty check
language: system
types: [python]
pass_filenames: true

View file

@ -8,20 +8,18 @@ from pathlib import Path
from typing import TYPE_CHECKING
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from packaging import version
from aiservice.analytics.posthog import ph
from aiservice.env_specific import debug_log_sensitive_data
from aiservice.llm import OPTIMIZATION_REVIEW_MODEL, calculate_llm_cost, call_llm
from authapp.auth import AuthenticatedRequest
from log_features.log_event import update_optimization_cost, update_optimization_features_review
from ninja import NinjaAPI, Schema
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from packaging import version
if TYPE_CHECKING:
from openai.types.chat import ChatCompletionMessageParam
from aiservice.llm import LLM
from openai.types.chat import ChatCompletionMessageParam
optimization_review_api = NinjaAPI(urls_namespace="optimization_review")
@ -50,8 +48,9 @@ class OptimizationReviewResponseSchema(Schema):
class OptimizationReviewSchema(Schema):
trace_id: str
original_code: str # Complete original function/code
optimized_code: str # Complete optimized function/code
code_diff: str | None = None # LEGACY: unified diff format (for CLI backward compatibility)
original_code: str | None = None
optimized_code: str | None = None
original_runtime: str
optimized_runtime: str
speedup: str
@ -73,23 +72,77 @@ def _build_optimization_review_messages(data: OptimizationReviewSchema) -> list[
data.generated_tests = f"```python\n{data.generated_tests}\n```"
system_message = ChatCompletionSystemMessageParam(role="system", content=SYSTEM_PROMPT)
if data.original_code and data.optimized_code:
user_prompt = USER_PROMPT_TEMPLATE.format(
original_runtime=data.original_runtime,
optimized_runtime=data.optimized_runtime,
speedup=data.speedup,
loop_count=data.loop_count,
coverage_message=data.coverage_message,
python_version=data.python_version or "Not specified",
original_code=data.original_code,
optimized_code=data.optimized_code,
explanation=data.explanation,
generated_tests=data.generated_tests or "Not Available",
existing_tests=data.existing_tests or "Not Available",
replay_tests=data.replay_tests or "Not Available",
benchmark_details=data.benchmark_details or "Not Available",
calling_fn_details=data.calling_fn_details or "Not Available",
)
user_prompt = USER_PROMPT_TEMPLATE.format(
original_runtime=data.original_runtime,
optimized_runtime=data.optimized_runtime,
speedup=data.speedup,
loop_count=data.loop_count,
coverage_message=data.coverage_message,
python_version=data.python_version or "Not specified",
original_code=data.original_code,
optimized_code=data.optimized_code,
explanation=data.explanation,
generated_tests=data.generated_tests or "Not Available",
existing_tests=data.existing_tests or "Not Available",
replay_tests=data.replay_tests or "Not Available",
benchmark_details=data.benchmark_details or "Not Available",
calling_fn_details=data.calling_fn_details or "Not Available",
)
# LEGACY FORMAT: Unified diff (for backward compatibility with old CLI)
elif data.code_diff:
# Old CLI sends unified diff - build prompt with diff instead
user_prompt = f"""
<optimization_review_request>
<performance_metrics>
<original_runtime>{data.original_runtime}</original_runtime>
<optimized_runtime>{data.optimized_runtime}</optimized_runtime>
<speedup_percentage>{data.speedup}</speedup_percentage>
<loop_count>{data.loop_count}</loop_count>
<test_coverage>{data.coverage_message}</test_coverage>
<python_version>{data.python_version or "Not specified"}</python_version>
</performance_metrics>
<code_changes>
{data.code_diff}
</code_changes>
<explanation>
{data.explanation}
</explanation>
<validation_data>
<generated_tests>
{data.generated_tests or "Not Available"}
</generated_tests>
<existing_tests>
{data.existing_tests or "Not Available"}
</existing_tests>
<replay_tests>
{data.replay_tests or "Not Available"}
</replay_tests>
<benchmark_details>
{data.benchmark_details or "Not Available"}
</benchmark_details>
</validation_data>
<codebase_context>
<calling_functions>
{data.calling_fn_details or "Not Available"}
</calling_functions>
</codebase_context>
</optimization_review_request>
Please analyze this optimization following the structured assessment process and provide your rating.
"""
else:
raise ValueError("Must provide either (original_code + optimized_code) or code_diff")
user_message = ChatCompletionUserMessageParam(role="user", content=user_prompt)