mirror of
https://github.com/codeflash-ai/codeflash-internal.git
synced 2026-05-04 18:25:18 +00:00
Catch and log Pydantic validation errors on generated code and explanation
This commit is contained in:
parent
d0ceaf7968
commit
3c9196e4f9
4 changed files with 28 additions and 15 deletions
|
|
@ -26,7 +26,7 @@
|
|||
</ENTRIES>
|
||||
</EXTENSION>
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/cli/codeflash/main.py" />
|
||||
<option name="PARAMETERS" value="--file code_to_optimize/bubble_sort.py --function sorter --test-framework pytest --tests-root code_to_optimize/tests/pytest" />
|
||||
<option name="PARAMETERS" value="--file code_to_optimize/bubble_sort.py --function sorter --module-root $PROJECT_DIR$/cli --test-framework pytest --tests-root code_to_optimize/tests/pytest" />
|
||||
<option name="SHOW_COMMAND_LINE" value="false" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
<option name="MODULE_MODE" value="false" />
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
</ENTRIES>
|
||||
</EXTENSION>
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/cli/codeflash/main.py" />
|
||||
<option name="PARAMETERS" value="--all langchain/evaluation/embedding_distance" />
|
||||
<option name="PARAMETERS" value="--all langchain/chains/retrieval_qa" />
|
||||
<option name="SHOW_COMMAND_LINE" value="false" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
<option name="MODULE_MODE" value="false" />
|
||||
|
|
|
|||
|
|
@ -8,7 +8,13 @@ from dotenv import load_dotenv
|
|||
from ninja import NinjaAPI, Schema
|
||||
from openai import OpenAIError, AsyncOpenAI
|
||||
from openai.lib.azure import AsyncAzureOpenAI
|
||||
from openai.types.chat import ChatCompletionUserMessageParam, ChatCompletionSystemMessageParam
|
||||
from openai.types.chat import (
|
||||
ChatCompletionUserMessageParam,
|
||||
ChatCompletionSystemMessageParam,
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
)
|
||||
|
||||
from aiservice.analytics.posthog import ph
|
||||
from aiservice.models.aimodels import OPTIMIZE_MODEL, LLM
|
||||
|
|
@ -22,7 +28,7 @@ if not os.environ.get("ENVIRONMENT") == "PRODUCTION":
|
|||
load_dotenv()
|
||||
if os.environ.get("OPENAI_API_TYPE", default="openai") == "azure":
|
||||
print("Using Azure OpenAI service for optimizer.")
|
||||
openai_client = AsyncAzureOpenAI(
|
||||
openai_client: AsyncOpenAI | AsyncAzureOpenAI = AsyncAzureOpenAI(
|
||||
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
|
||||
api_version="2023-05-15",
|
||||
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
|
||||
|
|
@ -43,12 +49,14 @@ async def optimize_python_code(
|
|||
- source_code (str): The python code to optimize.
|
||||
- n (int): Number of optimization variants to generate. Default is 1.
|
||||
|
||||
Returns:
|
||||
- List[Tuple[Union[str, None], Union[str, None]]]: A list of tuples where the first element is the optimized code and the second is the explanation.
|
||||
Returns: - List[Tuple[Union[str, None], Union[str, None]]]: A list of tuples where the first element is the
|
||||
optimized code and the second is the explanation.
|
||||
"""
|
||||
print("/optimize: Optimizing python code.")
|
||||
# TODO: Experiment with iterative approaches to optimization. Take the learnings from the testing phase into the next optimization iteration
|
||||
# TODO: Experiment with iterative chain-of-thought generation. ask what is the function doing and then ask it to describe how to speed it up and then generate optimization
|
||||
# TODO: Experiment with iterative approaches to optimization. Take the learnings from the testing phase into the
|
||||
# next optimization iteration
|
||||
# TODO: Experiment with iterative chain-of-thought generation. ask what is the
|
||||
# function doing and then ask it to describe how to speed it up and then generate optimization
|
||||
system_message = ChatCompletionSystemMessageParam(
|
||||
role="system",
|
||||
content=(
|
||||
|
|
@ -61,7 +69,13 @@ async def optimize_python_code(
|
|||
role="user",
|
||||
content=f"Rewrite this python program to run faster.\n```python\n{source_code}\n```",
|
||||
)
|
||||
messages = [system_message, user_message]
|
||||
messages: list[
|
||||
ChatCompletionSystemMessageParam
|
||||
| ChatCompletionUserMessageParam
|
||||
| ChatCompletionAssistantMessageParam
|
||||
| ChatCompletionToolMessageParam
|
||||
| ChatCompletionFunctionMessageParam
|
||||
] = [system_message, user_message]
|
||||
|
||||
# TODO: Verify if the context window length is within the model capability
|
||||
try:
|
||||
|
|
@ -96,7 +110,11 @@ async def optimize_python_code(
|
|||
# log exception with sentry
|
||||
sentry_sdk.capture_exception(e)
|
||||
continue
|
||||
optimized_code_and_explanations.append(CodeAndExplanation(cst_module, explanation))
|
||||
try:
|
||||
optimized_code_and_explanations.append(CodeAndExplanation(cst_module, explanation))
|
||||
except ValueError as exc:
|
||||
# Another one bites the Pydantic validation dust
|
||||
sentry_sdk.capture_exception(exc)
|
||||
return optimized_code_and_explanations
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ readme = "README.md"
|
|||
python = "^3.12.1"
|
||||
django = "^5.0"
|
||||
django-ninja = "^1.1.0"
|
||||
django-stubs = "^1.12.0"
|
||||
openai = "^1.13.3"
|
||||
python-dotenv = "^1.0.0"
|
||||
dj-database-url = "^2.1.0"
|
||||
|
|
@ -47,7 +46,6 @@ strict = true
|
|||
verbosity = 3
|
||||
warn_unreachable = true
|
||||
plugins = [
|
||||
"mypy_django_plugin.main",
|
||||
"pydantic.mypy"
|
||||
]
|
||||
|
||||
|
|
@ -60,9 +58,6 @@ warn_required_dynamic_aliases = true
|
|||
line-length = 100
|
||||
target-version = ['py312']
|
||||
|
||||
[tool.django-stubs]
|
||||
django_settings_module = "aiservice.settings"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
Loading…
Reference in a new issue