Sort imports for generated tests too, also try adding some tests to test this, but spent an hour trying to get the django test runner to work and couldn't get it to find the tests, so punting for now
This commit is contained in:
parent
07e24d3d57
commit
aed3f853ae
8 changed files with 99 additions and 3 deletions
|
|
@ -21,6 +21,7 @@
|
|||
<sourceFolder url="file://$MODULE_DIR$/js/cf-webapp" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/js/common" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/cli" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/django/aiservice/tests" isTestSource="true" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/.aider.ident.cache.v1" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/.aider.tags.cache.v1" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/.mypy_cache" />
|
||||
|
|
@ -31,7 +32,7 @@
|
|||
<excludeFolder url="file://$MODULE_DIR$/js/cf-webapp/node_modules" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/js/common/node_modules" />
|
||||
</content>
|
||||
<orderEntry type="jdk" jdkName="$USER_HOME$/miniforge3/envs/codeflash311" jdkType="Python SDK" />
|
||||
<orderEntry type="jdk" jdkName="$USER_HOME$/miniforge3/envs/aiservice" jdkType="Python SDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
<orderEntry type="module" module-name="langchain" />
|
||||
</component>
|
||||
|
|
|
|||
20
django/aiservice/runtests.py
Normal file
20
django/aiservice/runtests.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/env python
|
||||
# TODO(arc) get this working, doesn't find any tests right now
|
||||
import os
|
||||
import sys
|
||||
|
||||
import django
|
||||
import dotenv
|
||||
|
||||
dotenv.load_dotenv("aiservice/.env")
|
||||
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.test_settings"
|
||||
|
||||
if __name__ == "__main__":
|
||||
from django.conf import settings
|
||||
from django.test.utils import get_runner
|
||||
|
||||
django.setup()
|
||||
TestRunner = get_runner(settings)
|
||||
test_runner = TestRunner()
|
||||
failures = test_runner.run_tests(["tests"])
|
||||
sys.exit(bool(failures))
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
# Derived from https://github.com/openai/openai-cookbook/blob/main/examples/Unit_test_writing_using_a_multi-step_prompt.ipynb
|
||||
|
||||
import ast # used for detecting whether generated Python code is valid
|
||||
import ast
|
||||
import logging
|
||||
import os
|
||||
|
||||
import isort
|
||||
from dotenv import load_dotenv
|
||||
from ninja import NinjaAPI, Schema
|
||||
from openai import AsyncOpenAI
|
||||
|
|
@ -362,6 +364,9 @@ async def testgen(
|
|||
generated_test_source = replace_definition_with_import(
|
||||
generated_test_source, data.function_to_optimize, data.module_path
|
||||
)
|
||||
# Use isort to sort and deduplicate the imports in the generated test code
|
||||
generated_test_source = isort.code(generated_test_source)
|
||||
|
||||
ph(request.user, "aiservice-testgen-tests-generated")
|
||||
except TestGenerationFailedException as e:
|
||||
logging.error("Test generation failed. Skipping test generation.")
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
# Create your tests here.
|
||||
0
django/aiservice/tests/__init__.py
Normal file
0
django/aiservice/tests/__init__.py
Normal file
18
django/aiservice/tests/test_settings.py
Normal file
18
django/aiservice/tests/test_settings.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import os
|
||||
|
||||
if os.environ.get("ENVIRONMENT") == "PRODUCTION":
|
||||
SECRET_KEY = os.environ.get("SECRET_KEY")
|
||||
os.environ["DATABASE_URL"] = os.environ.get("POSTGRESQLCONNSTR_DATABASE_URL")
|
||||
DEBUG = False
|
||||
else:
|
||||
SECRET_KEY = os.environ.get("SECRET_KEY")
|
||||
DEBUG = True
|
||||
|
||||
INSTALLED_APPS = [
|
||||
"authapp.apps.AuthAppConfig",
|
||||
"testgen.apps.TestgenConfig",
|
||||
"optimizer.apps.OptimizerConfig",
|
||||
"django.contrib.contenttypes",
|
||||
"django.contrib.staticfiles",
|
||||
"tests",
|
||||
]
|
||||
53
django/aiservice/tests/test_testgen.py
Normal file
53
django/aiservice/tests/test_testgen.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# TODO(arc): These don't work atm, took too long to figure out a fix for now.
|
||||
|
||||
# from unittest.mock import AsyncMock, patch
|
||||
#
|
||||
# from testgen.testgen import generate_regression_tests_from_function
|
||||
|
||||
|
||||
# @patch("testgen.testgen.openai_client", new_callable=AsyncMock)
|
||||
# async def test_generate_regression_tests_sorts_and_deduplicates_imports(mock_openai_client):
|
||||
# # Mock the OpenAI client's response to simulate the test generation process
|
||||
# mock_openai_client.with_options.return_value.chat.completions.create.return_value = AsyncMock(
|
||||
# choices=[AsyncMock(message=AsyncMock(content="import os\nimport sys\nimport os"))]
|
||||
# )
|
||||
#
|
||||
# # Simulate the function code and name
|
||||
# function_code = "def foo():\n pass"
|
||||
# function_name = "foo"
|
||||
#
|
||||
# # Generate the regression tests
|
||||
# generated_tests = await generate_regression_tests_from_function(
|
||||
# user_id="test_user",
|
||||
# function_code=function_code,
|
||||
# function_name=function_name,
|
||||
# unit_test_package="pytest",
|
||||
# )
|
||||
#
|
||||
# # Check if the generated tests have sorted and deduplicated imports
|
||||
# expected_code = "import os\nimport sys\n\ndef foo():\n pass\n"
|
||||
# assert generated_tests == expected_code
|
||||
#
|
||||
#
|
||||
# @patch("testgen.testgen.openai_client", new_callable=AsyncMock)
|
||||
# async def test_generate_regression_tests_keeps_sorted_imports(mock_openai_client):
|
||||
# # Mock the OpenAI client's response to simulate the test generation process
|
||||
# mock_openai_client.with_options.return_value.chat.completions.create.return_value = AsyncMock(
|
||||
# choices=[AsyncMock(message=AsyncMock(content="import json\nimport os\nimport sys"))]
|
||||
# )
|
||||
#
|
||||
# # Simulate the function code and name
|
||||
# function_code = "def foo():\n pass"
|
||||
# function_name = "foo"
|
||||
#
|
||||
# # Generate the regression tests
|
||||
# generated_tests = await generate_regression_tests_from_function(
|
||||
# user_id="test_user",
|
||||
# function_code=function_code,
|
||||
# function_name=function_name,
|
||||
# unit_test_package="pytest",
|
||||
# )
|
||||
#
|
||||
# # Check if the generated tests have kept the sorted imports
|
||||
# expected_code = "import json\nimport os\nimport sys\n\ndef foo():\n pass\n"
|
||||
# assert generated_tests == expected_code
|
||||
Loading…
Reference in a new issue