## Problem The JS/TS language handler (`core/languages/js_ts/`) was importing models, schemas, config, prompts, and helpers directly from the Python language handler. This created a confusing architectural dependency and risked serving wrong language-specific prompt content. ## What Changed - Created `core/shared/` for genuinely language-agnostic code (optimizer schemas, models, config, testgen models, context helpers) - Moved JS/TS-specific prompts and context helpers into `core/languages/js_ts/` - Updated all consumers (20+ files) to import from the correct locations - Removed backwards-compat re-exports from the Python module ## Result - **Before:** 11 imports from `core.languages.python` in `core/languages/js_ts/` - **After:** 0
58 lines
2.3 KiB
Python
58 lines
2.3 KiB
Python
from dataclasses import dataclass
|
|
from typing import TYPE_CHECKING
|
|
|
|
from ninja import Schema
|
|
|
|
from core.languages.python.optimizer.context_utils.optimizer_context import CodeStrAndExplanation, MultiOptimizerContext
|
|
from core.languages.python.optimizer.diff_patches_utils.diff import DiffMethod
|
|
from core.shared.optimizer_models import OptimizedCandidateSource
|
|
|
|
if TYPE_CHECKING:
|
|
from core.languages.python.optimizer.context_utils.optimizer_context import CodeStrAndExplanation
|
|
|
|
|
|
class AdaptiveOptimizedCandidate(Schema):
|
|
optimization_id: str
|
|
source_code: str
|
|
explanation: str
|
|
source: OptimizedCandidateSource
|
|
speedup: str
|
|
|
|
|
|
class AdaptiveOptRequestSchema(Schema):
|
|
trace_id: str
|
|
original_source_code: str
|
|
candidates: list[AdaptiveOptimizedCandidate]
|
|
|
|
|
|
@dataclass()
|
|
class AdaptiveOptContextData:
|
|
original_source_code: str
|
|
attempts: list[AdaptiveOptimizedCandidate]
|
|
python_version_str: str
|
|
|
|
|
|
class AdaptiveOptContext(MultiOptimizerContext):
|
|
def __init__(self, ctx_data: AdaptiveOptContextData, base_system_prompt: str, base_user_prompt: str) -> None:
|
|
self.data = ctx_data
|
|
self.base_system_prompt = base_system_prompt
|
|
self.base_user_prompt = base_user_prompt
|
|
self.extracted_code_and_expl: CodeStrAndExplanation | None = None
|
|
super().__init__(base_system_prompt, base_user_prompt, ctx_data.original_source_code, DiffMethod.NO_DIFF)
|
|
|
|
def get_system_prompt(self, python_version_str: str | None = None) -> str: # noqa: ARG002
|
|
return self.base_system_prompt.format(python_version_str=self.data.python_version_str)
|
|
|
|
def build_attempts_str(self) -> str:
|
|
attempts_str = ""
|
|
for attempt in self.data.attempts:
|
|
attempts_str += f"\n\n### Attempt {attempt.optimization_id}\n\n"
|
|
attempts_str += f"### Source Code:\n{attempt.source_code}\n\n"
|
|
attempts_str += f"### Explanation:\n{attempt.explanation}\n\n"
|
|
attempts_str += f"**\n{attempt.speedup}\n**"
|
|
return attempts_str
|
|
|
|
def get_user_prompt(self, dependency_code: str = "", line_profiler_results: str | None = None) -> str: # noqa: ARG002
|
|
return self.base_user_prompt.format(
|
|
ORIGINAL_CODE=self.data.original_source_code, OPTIMIZATION_ATTEMPTS=self.build_attempts_str()
|
|
)
|