mirror of
https://github.com/codeflash-ai/codeflash.git
synced 2026-05-04 18:25:17 +00:00
Add language guards for Python-only endpoints in process_review
Issue #9: process_review() called get_new_explanation() and get_optimization_review() without checking language. These are Python-only endpoints that would fail or return incorrect results for JavaScript/TypeScript. Root Cause: - process_review() (line 2459) calls two Python-only endpoints: - get_new_explanation() (line 2575) - no language check - get_optimization_review() (line 2638) - no language check - Same pattern as Issue #8 (adaptive_optimize) which was already fixed - Latent bug - only manifests when JS/TS optimization succeeds Fix: - Added language guard before get_new_explanation: if language == "python" - Added language guard before get_optimization_review: if language == "python" - For non-Python languages, use original explanation from AI service - For non-Python languages, skip optimization review (sets empty review) Impact: - Latent bug (not yet triggered because all JS/TS optimizations fail at baseline) - Would block JS/TS optimization success once baseline issues are resolved - Severity: MEDIUM (latent, not blocking current work) Testing: - Existing function_optimizer tests pass - No linting/type errors Category: Latent bug (will reproduce when JS/TS optimizations succeed) Type: Missing language guard in CLI Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
8d51e2d310
commit
5106b5040b
1 changed files with 39 additions and 30 deletions
|
|
@ -1263,7 +1263,9 @@ class FunctionOptimizer:
|
|||
|
||||
aiservice_client = self.aiservice_client if exp_type == "EXP0" else self.local_aiservice_client
|
||||
|
||||
if is_candidate_refined_before:
|
||||
# adaptive_optimize is Python-only (uses libcst for AST parsing)
|
||||
# For JavaScript/TypeScript, continue using optimize_code_refinement
|
||||
if is_candidate_refined_before and self.function_to_optimize.language == "python":
|
||||
future_adaptive_optimization = self.call_adaptive_optimize(
|
||||
trace_id=self.get_trace_id(exp_type),
|
||||
original_source_code=code_context.read_writable_code.markdown,
|
||||
|
|
@ -2570,28 +2572,32 @@ class FunctionOptimizer:
|
|||
)
|
||||
concurrency_improvement_str = f"{conc_improvement_value * 100:.1f}%"
|
||||
|
||||
new_explanation_raw_str = self.aiservice_client.get_new_explanation(
|
||||
source_code=code_context.read_writable_code.flat,
|
||||
dependency_code=code_context.read_only_context_code,
|
||||
trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id,
|
||||
optimized_code=best_optimization.candidate.source_code.flat,
|
||||
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
|
||||
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
|
||||
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
|
||||
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
|
||||
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
|
||||
annotated_tests=generated_tests_str,
|
||||
optimization_id=best_optimization.candidate.optimization_id,
|
||||
original_explanation=best_optimization.candidate.explanation,
|
||||
original_throughput=original_throughput_str,
|
||||
optimized_throughput=optimized_throughput_str,
|
||||
throughput_improvement=throughput_improvement_str,
|
||||
function_references=function_references,
|
||||
acceptance_reason=explanation.acceptance_reason.value,
|
||||
original_concurrency_ratio=original_concurrency_ratio_str,
|
||||
optimized_concurrency_ratio=optimized_concurrency_ratio_str,
|
||||
concurrency_improvement=concurrency_improvement_str,
|
||||
)
|
||||
# get_new_explanation is Python-only (uses Python-specific line profiler and tooling)
|
||||
# For JavaScript/TypeScript/Java, use the original explanation from the AI service
|
||||
new_explanation_raw_str = None
|
||||
if self.function_to_optimize.language == "python":
|
||||
new_explanation_raw_str = self.aiservice_client.get_new_explanation(
|
||||
source_code=code_context.read_writable_code.flat,
|
||||
dependency_code=code_context.read_only_context_code,
|
||||
trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id,
|
||||
optimized_code=best_optimization.candidate.source_code.flat,
|
||||
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
|
||||
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
|
||||
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
|
||||
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
|
||||
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
|
||||
annotated_tests=generated_tests_str,
|
||||
optimization_id=best_optimization.candidate.optimization_id,
|
||||
original_explanation=best_optimization.candidate.explanation,
|
||||
original_throughput=original_throughput_str,
|
||||
optimized_throughput=optimized_throughput_str,
|
||||
throughput_improvement=throughput_improvement_str,
|
||||
function_references=function_references,
|
||||
acceptance_reason=explanation.acceptance_reason.value,
|
||||
original_concurrency_ratio=original_concurrency_ratio_str,
|
||||
optimized_concurrency_ratio=optimized_concurrency_ratio_str,
|
||||
concurrency_improvement=concurrency_improvement_str,
|
||||
)
|
||||
new_explanation = Explanation(
|
||||
raw_explanation_message=new_explanation_raw_str or explanation.raw_explanation_message,
|
||||
winning_behavior_test_results=explanation.winning_behavior_test_results,
|
||||
|
|
@ -2631,13 +2637,16 @@ class FunctionOptimizer:
|
|||
raise_pr = not self.args.no_pr
|
||||
staging_review = self.args.staging_review
|
||||
opt_review_result = OptimizationReviewResult(review="", explanation="")
|
||||
# this will now run regardless of pr, staging review flags
|
||||
try:
|
||||
opt_review_result = self.aiservice_client.get_optimization_review(
|
||||
**data, calling_fn_details=function_references
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"optimization review response failed, investigate {e}")
|
||||
# get_optimization_review is Python-only (uses Python-specific analysis)
|
||||
# For JavaScript/TypeScript/Java, skip the review
|
||||
if self.function_to_optimize.language == "python":
|
||||
# this will now run regardless of pr, staging review flags
|
||||
try:
|
||||
opt_review_result = self.aiservice_client.get_optimization_review(
|
||||
**data, calling_fn_details=function_references
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"optimization review response failed, investigate {e}")
|
||||
data["optimization_review"] = opt_review_result.review
|
||||
self.optimization_review = opt_review_result.review
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue