formatting changes

This commit is contained in:
Aseem Saxena 2025-10-22 21:20:20 -07:00
parent 66ba5cefc2
commit 3ef90d0630
24 changed files with 50 additions and 36 deletions

View file

@ -5,7 +5,7 @@ import uuid
import isort
def safe_isort_code(code: str, **kwargs) -> str: # noqa: ANN003
def safe_isort(code: str, **kwargs) -> str: # noqa: ANN003
"""Wrap isort.code to returns the original code if isort fails.
Args:

View file

@ -1,12 +1,13 @@
import os
import sentry_sdk
from aiservice.common_utils import is_codeflash_employee
from asgiref.sync import iscoroutinefunction, markcoroutinefunction
from django.core.cache import cache
from django.http import JsonResponse
from django.utils.decorators import async_only_middleware
from django.urls import get_resolver
from django.utils.decorators import async_only_middleware
from aiservice.common_utils import is_codeflash_employee
RATE_LIMIT_WINDOW_MS = int(os.getenv("RATE_LIMIT_WINDOW_MS", "60000"))
RATE_LIMIT_MAX = int(os.getenv("RATE_LIMIT_MAX", "40"))

View file

@ -19,6 +19,7 @@ Including another URLconf
# from django.contrib import admin
from django.urls import path
from explanations.explanations import explanations_api
from log_features.log_features import features_api
from optimization_review.optimization_review import optimization_review_api

View file

@ -1,6 +1,7 @@
from django.db import models
import uuid
from django.db import models
# Create your models here.

View file

@ -4,6 +4,9 @@ import re
from typing import TYPE_CHECKING
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai import OpenAIError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.analytics.posthog import ph
from aiservice.common_utils import validate_trace_id
@ -11,18 +14,16 @@ from aiservice.env_specific import create_claude_client, debug_log_sensitive_dat
from aiservice.models.aimodels import EXPLAINATIONS_MODEL, calculate_llm_cost
from log_features.log_event import update_optimization_cost
from log_features.log_features import log_features
from ninja import NinjaAPI, Schema
from openai import OpenAIError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
if TYPE_CHECKING:
from aiservice.models.aimodels import LLM
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionFunctionMessageParam,
ChatCompletionToolMessageParam,
)
from aiservice.models.aimodels import LLM
explanations_api = NinjaAPI(urls_namespace="explanations")
explain_regex_pattern = re.compile(r"<explain>(.*)<\/explain>", re.DOTALL | re.IGNORECASE)

View file

@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Any
import sentry_sdk
from asgiref.sync import sync_to_async
from django.db import IntegrityError, transaction
from django.db import transaction
from ninja import NinjaAPI
from aiservice.common_utils import validate_trace_id

View file

@ -7,11 +7,12 @@ from enum import Enum
from typing import TYPE_CHECKING, cast
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.env_specific import create_claude_client, debug_log_sensitive_data
from aiservice.models.aimodels import OPTIMIZATION_REVIEW_MODEL, calculate_llm_cost
from log_features.log_event import update_optimization_cost, update_optimization_features_review
from ninja import NinjaAPI, Schema
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
if TYPE_CHECKING:
from aiservice.models.aimodels import LLM

View file

@ -64,7 +64,7 @@ def parse_diff(diff: str) -> list[SearchReplaceBlock]:
def apply_patches(diff_str: str, content: str) -> str:
try:
patch_blocks = parse_diff(diff_str)
except ValueError as ve:
except ValueError:
return content
for idx, block in enumerate(patch_blocks, 1):

View file

@ -54,4 +54,4 @@ class OptimizeSchema(Schema):
repo_owner: str | None = None
repo_name: str | None = None
is_async: bool | None = False
n_candidates: int | None = 5
n_candidates: int | None = 5

View file

@ -1,12 +1,11 @@
from __future__ import annotations
import logging
import sentry_sdk
from pathlib import Path
from typing import TYPE_CHECKING
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai import OpenAIError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.analytics.posthog import ph

View file

@ -8,11 +8,12 @@ from typing import TYPE_CHECKING
import libcst as cst
import sentry_sdk
from libcst import CSTTransformer, CSTVisitor, Expr, IndentedBlock, SimpleStatementLine, SimpleString
from testgen.instrumentation.edit_generated_test import parse_module_to_cst
from aiservice.common_utils import safe_isort
from optimizer.code_utils.postprocess_constants import profanity_regex
from optimizer.models import CodeExplanationAndID
from optimizer.optimizer_utils import compare_unparsed_ast_to_source, unparse_parse_source
from testgen.instrumentation.edit_generated_test import parse_module_to_cst
if TYPE_CHECKING:
from libcst import FunctionDef

View file

@ -8,7 +8,6 @@ from typing import TYPE_CHECKING
import libcst as cst
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai import OpenAIError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from pydantic import ValidationError

View file

@ -1,27 +1,28 @@
from __future__ import annotations
import re
import sentry_sdk
from typing import TYPE_CHECKING
import sentry_sdk
from ninja import NinjaAPI, Schema
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
from aiservice.analytics.posthog import ph
from aiservice.common_utils import validate_trace_id
from aiservice.env_specific import create_openai_client, debug_log_sensitive_data
from aiservice.models.aimodels import RANKING_MODEL, calculate_llm_cost
from log_features.log_event import update_optimization_cost
from log_features.log_features import log_features
from ninja import NinjaAPI, Schema
from openai import OpenAIError
from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam
if TYPE_CHECKING:
from aiservice.models.aimodels import LLM
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionFunctionMessageParam,
ChatCompletionToolMessageParam,
)
from aiservice.models.aimodels import LLM
# from google import genai
# from pydantic import BaseModel
#

View file

@ -8,8 +8,9 @@ from dataclasses import dataclass
import black
import isort
import sentry_sdk
from aiservice.models.functions_to_optimize import FunctionParent, FunctionToOptimize
from aiservice.common_utils import safe_isort
from aiservice.models.functions_to_optimize import FunctionParent, FunctionToOptimize
from testgen.models import TestingMode
plat_str = platform.python_version_tuple()

View file

@ -5,6 +5,8 @@ import logging
import sentry_sdk
from aiservice.common_utils import safe_isort
def validate_testgen_code(code: str, python_version: tuple[int, int, int], max_lines_to_remove: int = 20) -> str:
"""Validate Python testgen code by iteratively truncating invalid code.

View file

@ -78,7 +78,10 @@ def collect_top_level_definitions(
if isinstance(node, cst.ClassDef):
name = node.name.value
is_test_class = (
name.startswith("Test") or name.endswith(("Test", "TestCase")) or _class_contains_test_methods(node) or _class_inherits_from_test_class(node)
name.startswith("Test")
or name.endswith(("Test", "TestCase"))
or _class_contains_test_methods(node)
or _class_inherits_from_test_class(node)
)
definitions[name] = UsageInfo(name=name, used_by_test=is_test_class)
return definitions

View file

@ -1,3 +1,5 @@
from math import prod
from libcst import (
Add,
Arg,
@ -16,7 +18,6 @@ from libcst import (
Subtract,
UnaryOperation,
)
from math import prod
class TensorLimit(CSTTransformer):

View file

@ -7,7 +7,7 @@ import os
from pathlib import Path
from typing import SupportsIndex
from aiservice.common_utils import parse_python_version
from aiservice.common_utils import parse_python_version, safe_isort
from aiservice.env_specific import create_openai_client, debug_log_sensitive_data
from aiservice.models.aimodels import EXECUTE_MODEL, EXPLAIN_MODEL, LLM, PLAN_MODEL, calculate_llm_cost
from aiservice.models.functions_to_optimize import FunctionToOptimize

View file

@ -10,17 +10,17 @@ from typing import SupportsIndex
import sentry_sdk
import stamina
from ninja import NinjaAPI
from ninja.errors import HttpError
from openai import OpenAIError
from aiservice.analytics.posthog import ph
from aiservice.common_utils import parse_python_version, should_hack_for_demo, validate_trace_id
from aiservice.common_utils import parse_python_version, safe_isort, should_hack_for_demo, validate_trace_id
from aiservice.env_specific import IS_PRODUCTION, debug_log_sensitive_data, open_ai_client
from aiservice.models.aimodels import EXECUTE_MODEL, LLM, calculate_llm_cost
from authapp.auth import AuthBearer
from log_features.log_event import update_optimization_cost
from log_features.log_features import log_features_optimized
from ninja import NinjaAPI
from ninja.errors import HttpError
from openai import OpenAIError
from testgen.instrumentation.edit_generated_test import parse_module_to_cst, replace_definition_with_import
from testgen.instrumentation.instrument_new_tests import instrument_test_source
from testgen.models import (

View file

@ -1,5 +1,8 @@
from datetime import UTC
import pytest
from django.http import JsonResponse
from aiservice.middleware.track_usage_middleware import TrackUsageMiddleware
@ -11,14 +14,14 @@ def parse_json(response: JsonResponse):
class FakeSubscription:
def __init__(self, status="active", used=0, limit=100, lifetime=0, plan_type="free", current_period_end=None):
from datetime import datetime, timedelta, timezone
from datetime import datetime, timedelta
self.subscription_status = status
self.optimizations_used = used
self.optimizations_limit = limit
self.total_lifetime_optimizations = lifetime
self.plan_type = plan_type
self.current_period_end = current_period_end or (datetime.now(timezone.utc) + timedelta(days=30))
self.current_period_end = current_period_end or (datetime.now(UTC) + timedelta(days=30))
async def asave(self, *args, **kwargs):
return None

View file

@ -2,6 +2,7 @@ import ast
import unittest
from django.test import TestCase
from testgen.testgen_context import any_ellipsis_in_ast, ellipsis_in_ast_not_types

View file

@ -2,7 +2,6 @@ import ast
import os
from aiservice.models.functions_to_optimize import FunctionParent, FunctionToOptimize
from testgen.instrumentation.edit_generated_test import parse_module_to_cst
from testgen.instrumentation.instrument_new_tests import InjectPerfAndLogging
from testgen.models import TestingMode

View file

@ -1,5 +1,5 @@
from testgen.preprocessing.torch_tensor_limit import OBJECT_MEMORY_LIMIT_MB, detect_torch_usage, get_tensor_size_note
from testgen.preprocessing.preprocess_pipeline import preprocessing_testgen_pipeline
from testgen.preprocessing.torch_tensor_limit import OBJECT_MEMORY_LIMIT_MB, detect_torch_usage, get_tensor_size_note
def test_detect_torch_usage_with_torch():

View file

@ -1,4 +1,3 @@
import ast
from aiservice.models.functions_to_optimize import FunctionParent, FunctionToOptimize