few manual fixes
This commit is contained in:
parent
44b71e7c76
commit
ff8d47797d
4 changed files with 45 additions and 36 deletions
|
|
@ -97,10 +97,10 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_configure(config: Config) -> None:
|
||||
config.addinivalue_line("markers", "loops(n): run the given test function `n` times.")
|
||||
config.pluginmanager.register(PyTest_Loops(config), PyTest_Loops.name)
|
||||
config.pluginmanager.register(PytestLoops(config), PytestLoops.name)
|
||||
|
||||
|
||||
class PyTest_Loops:
|
||||
class PytestLoops:
|
||||
name: str = "pytest-loops"
|
||||
|
||||
def __init__(self, config: Config) -> None:
|
||||
|
|
@ -113,9 +113,8 @@ class PyTest_Loops:
|
|||
def pytest_runtestloop(self, session: Session) -> bool:
|
||||
"""Reimplement the test loop but loop for the user defined amount of time."""
|
||||
if session.testsfailed and not session.config.option.continue_on_collection_errors:
|
||||
raise session.Interrupted(
|
||||
"%d error%s during collection" % (session.testsfailed, "s" if session.testsfailed != 1 else "")
|
||||
)
|
||||
msg = "{} error{} during collection".format(session.testsfailed, "s" if session.testsfailed != 1 else "")
|
||||
raise session.Interrupted(msg)
|
||||
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
|
|
@ -130,11 +129,11 @@ class PyTest_Loops:
|
|||
total_time = self._get_total_time(session)
|
||||
|
||||
for index, item in enumerate(session.items):
|
||||
item: pytest.Item = item
|
||||
item._report_sections.clear() # clear reports for new test
|
||||
item: pytest.Item = item # noqa: PLW0127, PLW2901
|
||||
item._report_sections.clear() # clear reports for new test # noqa: SLF001
|
||||
|
||||
if total_time > SHORTEST_AMOUNT_OF_TIME:
|
||||
item._nodeid = self._set_nodeid(item._nodeid, count)
|
||||
item._nodeid = self._set_nodeid(item._nodeid, count) # noqa: SLF001
|
||||
|
||||
next_item: pytest.Item = session.items[index + 1] if index + 1 < len(session.items) else None
|
||||
|
||||
|
|
@ -234,7 +233,8 @@ class PyTest_Loops:
|
|||
seconds = session.config.option.codeflash_seconds
|
||||
total_time = hours_in_seconds + minutes_in_seconds + seconds
|
||||
if total_time < SHORTEST_AMOUNT_OF_TIME:
|
||||
raise InvalidTimeParameterError(f"Total time cannot be less than: {SHORTEST_AMOUNT_OF_TIME}!")
|
||||
msg = f"Total time cannot be less than: {SHORTEST_AMOUNT_OF_TIME}!"
|
||||
raise InvalidTimeParameterError(msg)
|
||||
return total_time
|
||||
|
||||
def _timed_out(self, session: Session, start_time: float, count: int) -> bool:
|
||||
|
|
@ -262,11 +262,10 @@ class PyTest_Loops:
|
|||
return request.param
|
||||
except AttributeError:
|
||||
if issubclass(request.cls, TestCase):
|
||||
warnings.warn("Repeating unittest class tests not supported")
|
||||
warnings.warn("Repeating unittest class tests not supported", stacklevel=2)
|
||||
else:
|
||||
raise UnexpectedError(
|
||||
"This call couldn't work with pytest-loops. Please consider raising an issue with your usage."
|
||||
)
|
||||
msg = "This call couldn't work with pytest-loops. Please consider raising an issue with your usage."
|
||||
raise UnexpectedError(msg) from None
|
||||
return count
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Iterator
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional, cast
|
||||
from typing import TYPE_CHECKING, Optional, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.dataclasses import dataclass
|
||||
|
|
@ -13,6 +12,9 @@ from rich.tree import Tree
|
|||
from codeflash.cli_cmds.console import DEBUG_MODE, logger
|
||||
from codeflash.verification.comparator import comparator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
|
||||
|
||||
class VerificationType(str, Enum):
|
||||
FUNCTION_CALL = (
|
||||
|
|
@ -53,7 +55,11 @@ class InvocationId:
|
|||
|
||||
# test_module_path:TestSuiteClass.test_function_name:function_tested:iteration_id
|
||||
def id(self) -> str:
|
||||
return f"{self.test_module_path}:{(self.test_class_name + '.' if self.test_class_name else '')}{self.test_function_name}:{self.function_getting_tested}:{self.iteration_id}"
|
||||
class_prefix = f"{self.test_class_name}." if self.test_class_name else ""
|
||||
return (
|
||||
f"{self.test_module_path}:{class_prefix}{self.test_function_name}:"
|
||||
f"{self.function_getting_tested}:{self.iteration_id}"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_str_id(string_id: str, iteration_id: Optional[str] = None) -> InvocationId:
|
||||
|
|
@ -167,9 +173,13 @@ class TestResults(BaseModel):
|
|||
def usable_runtime_data_by_test_case(self) -> dict[InvocationId, list[int]]:
|
||||
for result in self.test_results:
|
||||
if result.did_pass and not result.runtime:
|
||||
logger.debug(
|
||||
f"Ignoring test case that passed but had no runtime -> {result.id}, Loop # {result.loop_index}, Test Type: {result.test_type}, Verification Type: {result.verification_type}"
|
||||
msg = (
|
||||
f"Ignoring test case that passed but had no runtime -> {result.id}, "
|
||||
f"Loop # {result.loop_index}, Test Type: {result.test_type}, "
|
||||
f"Verification Type: {result.verification_type}"
|
||||
)
|
||||
logger.debug(msg)
|
||||
|
||||
usable_runtimes = [
|
||||
(result.id, result.runtime) for result in self.test_results if result.did_pass and result.runtime
|
||||
]
|
||||
|
|
@ -179,16 +189,14 @@ class TestResults(BaseModel):
|
|||
}
|
||||
|
||||
def total_passed_runtime(self) -> int:
|
||||
"""Calculate the sum of runtimes of all test cases that passed, where a testcase runtime
|
||||
is the minimum value of all looped execution runtimes.
|
||||
"""Calculate the sum of runtimes of all test cases that passed.
|
||||
|
||||
A testcase runtime is the minimum value of all looped execution runtimes.
|
||||
|
||||
:return: The runtime in nanoseconds.
|
||||
"""
|
||||
return sum(
|
||||
[
|
||||
min(usable_runtime_data)
|
||||
for invocation_id, usable_runtime_data in self.usable_runtime_data_by_test_case().items()
|
||||
]
|
||||
[min(usable_runtime_data) for _, usable_runtime_data in self.usable_runtime_data_by_test_case().items()]
|
||||
)
|
||||
|
||||
def __iter__(self) -> Iterator[FunctionTestInvocation]:
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ def run_behavioral_tests(
|
|||
"--codeflash_loops_scope=session",
|
||||
"--codeflash_min_loops=1",
|
||||
"--codeflash_max_loops=1",
|
||||
f"--codeflash_seconds={pytest_target_runtime_seconds}", # TODO : This is unnecessary, update the plugin to not ask for this
|
||||
f"--codeflash_seconds={pytest_target_runtime_seconds}", # TODO : This is unnecessary, update the plugin to not ask for this # noqa: E501
|
||||
]
|
||||
|
||||
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
|
||||
|
|
@ -87,7 +87,7 @@ def run_behavioral_tests(
|
|||
)
|
||||
logger.debug(
|
||||
f"Result return code: {results.returncode}"
|
||||
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
|
||||
+ (f", Result stderr: {results.stderr}" if results.stderr else "")
|
||||
)
|
||||
else:
|
||||
results = execute_test_subprocess(
|
||||
|
|
@ -98,7 +98,7 @@ def run_behavioral_tests(
|
|||
)
|
||||
logger.debug(
|
||||
f"Result return code: {results.returncode}"
|
||||
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
|
||||
+ (f", Result stderr: {results.stderr}" if results.stderr else "")
|
||||
)
|
||||
elif test_framework == "unittest":
|
||||
if enable_coverage:
|
||||
|
|
@ -106,10 +106,12 @@ def run_behavioral_tests(
|
|||
raise ValueError(msg)
|
||||
test_env["CODEFLASH_LOOP_INDEX"] = "1"
|
||||
test_files = [file.instrumented_behavior_file_path for file in test_paths.test_files]
|
||||
result_file_path, results = run_unittest_tests(verbose, test_files, test_env, cwd)
|
||||
result_file_path, results = run_unittest_tests(
|
||||
verbose=verbose, test_file_paths=test_files, test_env=test_env, cwd=cwd
|
||||
)
|
||||
logger.debug(
|
||||
f"Result return code: {results.returncode}"
|
||||
f"{', Result stderr:' + str(results.stderr) if results.stderr else ''}"
|
||||
+ (f", Result stderr: {results.stderr}" if results.stderr else "")
|
||||
)
|
||||
else:
|
||||
msg = f"Unsupported test framework: {test_framework}"
|
||||
|
|
|
|||
|
|
@ -4,12 +4,12 @@ from typing import NoReturn
|
|||
import pytest
|
||||
from _pytest.config import Config
|
||||
|
||||
from codeflash.verification.pytest_plugin import PyTest_Loops
|
||||
from codeflash.verification.pytest_plugin import PytestLoops
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pytest_loops_instance(pytestconfig: Config) -> PyTest_Loops:
|
||||
return PyTest_Loops(pytestconfig)
|
||||
def pytest_loops_instance(pytestconfig: Config) -> PytestLoops:
|
||||
return PytestLoops(pytestconfig)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
@ -27,7 +27,7 @@ def create_mock_module(module_name: str, source_code: str) -> types.ModuleType:
|
|||
return module
|
||||
|
||||
|
||||
def test_clear_lru_caches_function(pytest_loops_instance: PyTest_Loops, mock_item: type) -> None:
|
||||
def test_clear_lru_caches_function(pytest_loops_instance: PytestLoops, mock_item: type) -> None:
|
||||
source_code = """
|
||||
import functools
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ my_func(10) # hit the cache
|
|||
assert mock_module.my_func.cache_info().currsize == 0
|
||||
|
||||
|
||||
def test_clear_lru_caches_class_method(pytest_loops_instance: PyTest_Loops, mock_item: type) -> None:
|
||||
def test_clear_lru_caches_class_method(pytest_loops_instance: PytestLoops, mock_item: type) -> None:
|
||||
source_code = """
|
||||
import functools
|
||||
|
||||
|
|
@ -67,7 +67,7 @@ obj.my_method(5) # Hit the cache
|
|||
assert mock_module.MyClass.my_method.cache_info().currsize == 0
|
||||
|
||||
|
||||
def test_clear_lru_caches_exception_handling(pytest_loops_instance: PyTest_Loops, mock_item: type) -> None:
|
||||
def test_clear_lru_caches_exception_handling(pytest_loops_instance: PytestLoops, mock_item: type) -> None:
|
||||
"""Test that exceptions during clearing are handled."""
|
||||
|
||||
class BrokenCache:
|
||||
|
|
@ -79,7 +79,7 @@ def test_clear_lru_caches_exception_handling(pytest_loops_instance: PyTest_Loops
|
|||
pytest_loops_instance._clear_lru_caches(item) # noqa: SLF001
|
||||
|
||||
|
||||
def test_clear_lru_caches_no_cache(pytest_loops_instance: PyTest_Loops, mock_item: type) -> None:
|
||||
def test_clear_lru_caches_no_cache(pytest_loops_instance: PytestLoops, mock_item: type) -> None:
|
||||
def no_cache_func(x: int) -> int:
|
||||
return x
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue