Merge pull request #118 from codeflash-ai/azure_openai

CF-66 Move to Azure OpenAI service fer real tho
This commit is contained in:
Afik 2024-01-19 12:07:46 -08:00 committed by GitHub
commit 9ea6de6e1a
3 changed files with 5 additions and 48 deletions

View file

@ -20,7 +20,6 @@ from codeflash.code_utils.config_consts import (
)
from codeflash.code_utils.git_utils import get_repo_owner_and_name, get_github_secrets_page_url
from codeflash.github.PrComment import FileDiffContent, PrComment
from codeflash.verification import EXPLAIN_MODEL
import os
@ -225,10 +224,7 @@ class Optimizer:
code_to_optimize_with_dependents,
dependent_functions,
) = get_constrained_function_context_and_dependent_functions(
function_to_optimize,
self.args.root,
code_to_optimize,
max_tokens=EXPLAIN_MODEL.max_tokens,
function_to_optimize, self.args.root, code_to_optimize
)
logging.info("CODE TO OPTIMIZE %s", code_to_optimize_with_dependents)
module_path = module_name_from_file_path(path, self.args.root)

View file

@ -168,11 +168,14 @@ def get_function_variables_definitions(
return deduped_sources
MAX_PROMPT_TOKENS = 4096 # 128000 # gpt-4-128k
def get_constrained_function_context_and_dependent_functions(
function_to_optimize: FunctionToOptimize,
project_root_path: str,
code_to_optimize: str,
max_tokens: int,
max_tokens: int = MAX_PROMPT_TOKENS,
) -> tuple[str, list[Source]]:
# TODO: Not just do static analysis, but also find the datatypes of function arguments by running the existing
# unittests and inspecting the arguments to resolve the real definitions and dependencies.

View file

@ -1,42 +0,0 @@
from pydantic.dataclasses import dataclass
@dataclass
class LLM:
name: str
max_tokens: int
@dataclass
class GPT_4_128k(LLM):
name: str = "gpt-4-1106-preview"
max_tokens: int = 128000
@dataclass
class GPT_4_32k(LLM):
name: str = "gpt4-32k"
max_tokens: int = 32768
@dataclass
class GPT_4(LLM):
name: str = "gpt-4"
max_tokens: int = 8192
@dataclass
class GPT_3_5_Turbo_16k(LLM):
name: str = "gpt-3.5-turbo-16k"
max_tokens: int = 16384
@dataclass
class GPT_3_5_Turbo(LLM):
name: str = "gpt-3.5-turbo"
max_tokens: int = 4096
EXPLAIN_MODEL: LLM = GPT_4_128k()
PLAN_MODEL = GPT_4_128k()
EXECUTE_MODEL = GPT_4_128k()