mirror of
https://github.com/codeflash-ai/codeflash-agent.git
synced 2026-05-04 18:25:19 +00:00
* chore: add gitignore entries for local eval repos, e2e fixtures, and env files * fix: restore clean bubble_sort_method.py test fixture The call-site ID commit re-contaminated this file with instrumentation decorators, causing tests to fail with missing CODEFLASH_LOOP_INDEX. * fix: resolve ruff and mypy errors in codeflash-python - Add import-not-found ignores for optional torch/jax imports - Extract magic column index to _STDOUT_COLUMN_INDEX constant - Fix unused variable in _instrument_sync.py - Cast cpu_time_ns to int for mypy arg-type * fix: add skip markers for optional deps and apply ruff formatting to tests Skip torch/jax/tensorflow tests when those packages are not installed. Move has_module helper to conftest.py for reuse across test files. Apply ruff format to all test files that drifted. * fix: resolve remaining ruff format and mypy errors - Add missing blank line in conftest.py (ruff format) - Remove unused import-untyped ignore on jax import (mypy unused-ignore) - Add type: ignore comments for object-typed SQLite row values * chore: bump codeflash-python to 0.1.1.dev0
419 lines
13 KiB
Python
419 lines
13 KiB
Python
"""Unit tests for inject_profiling_into_existing_test with different used_frameworks values.
|
|
|
|
Tests verify that:
|
|
- ``detect_frameworks_from_code`` correctly identifies GPU framework imports
|
|
- The sync instrumentation path produces framework-agnostic output with
|
|
``_codeflash_call_site`` tracking instead of ``codeflash_wrap``
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from pathlib import Path
|
|
|
|
from codeflash_python._model import FunctionToOptimize, TestingMode
|
|
from codeflash_python.test_discovery.models import CodePosition
|
|
from codeflash_python.testing._instrument_core import (
|
|
detect_frameworks_from_code,
|
|
)
|
|
from codeflash_python.testing._instrumentation import (
|
|
inject_profiling_into_existing_test,
|
|
)
|
|
|
|
|
|
class TestDetectFrameworksFromCode:
|
|
"""Tests for the detect_frameworks_from_code helper function."""
|
|
|
|
def test_no_frameworks(self) -> None:
|
|
"""Test detection with no GPU framework imports."""
|
|
code = """import os
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {}
|
|
assert result == expected
|
|
|
|
def test_torch_import(self) -> None:
|
|
"""Test detection with torch import."""
|
|
code = """import torch
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "torch"}
|
|
assert result == expected
|
|
|
|
def test_torch_aliased_import(self) -> None:
|
|
"""Test detection with torch imported as alias."""
|
|
code = """import torch as th
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "th"}
|
|
assert result == expected
|
|
|
|
def test_torch_submodule_import(self) -> None:
|
|
"""Test detection with torch submodule import (from torch import nn)."""
|
|
code = """from torch import nn
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "torch"}
|
|
assert result == expected
|
|
|
|
def test_torch_dotted_import(self) -> None:
|
|
"""Test detection with torch.cuda or torch.nn import."""
|
|
code = """import torch.cuda
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "torch"}
|
|
assert result == expected
|
|
|
|
def test_tensorflow_import(self) -> None:
|
|
"""Test detection with tensorflow import."""
|
|
code = """import tensorflow
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"tensorflow": "tensorflow"}
|
|
assert result == expected
|
|
|
|
def test_tensorflow_aliased_import(self) -> None:
|
|
"""Test detection with tensorflow imported as alias."""
|
|
code = """import tensorflow as tf
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"tensorflow": "tf"}
|
|
assert result == expected
|
|
|
|
def test_tensorflow_submodule_import(self) -> None:
|
|
"""Test detection with tensorflow submodule import."""
|
|
code = """from tensorflow import keras
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"tensorflow": "tensorflow"}
|
|
assert result == expected
|
|
|
|
def test_jax_import(self) -> None:
|
|
"""Test detection with jax import."""
|
|
code = """import jax
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"jax": "jax"}
|
|
assert result == expected
|
|
|
|
def test_jax_aliased_import(self) -> None:
|
|
"""Test detection with jax imported as alias."""
|
|
code = """import jax as jnp
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"jax": "jnp"}
|
|
assert result == expected
|
|
|
|
def test_jax_submodule_import(self) -> None:
|
|
"""Test detection with jax submodule import."""
|
|
code = """from jax import numpy as jnp
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"jax": "jax"}
|
|
assert result == expected
|
|
|
|
def test_multiple_frameworks(self) -> None:
|
|
"""Test detection with multiple framework imports."""
|
|
code = """import torch
|
|
import tensorflow
|
|
import jax
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "torch", "tensorflow": "tensorflow", "jax": "jax"}
|
|
assert result == expected
|
|
|
|
def test_multiple_frameworks_aliased(self) -> None:
|
|
"""Test detection with multiple aliased framework imports."""
|
|
code = """import torch as th
|
|
import tensorflow as tf
|
|
import jax as jnp
|
|
from mymodule import my_function
|
|
|
|
def test_something():
|
|
pass
|
|
"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {"torch": "th", "tensorflow": "tf", "jax": "jnp"}
|
|
assert result == expected
|
|
|
|
def test_syntax_error_returns_empty(self) -> None:
|
|
"""Test that syntax errors return empty dict."""
|
|
code = """this is not valid python code !!!"""
|
|
result = detect_frameworks_from_code(code)
|
|
expected = {}
|
|
assert result == expected
|
|
|
|
|
|
def _make_func() -> FunctionToOptimize:
|
|
"""Create a FunctionToOptimize for test helpers."""
|
|
return FunctionToOptimize(
|
|
function_name="my_function",
|
|
parents=[],
|
|
file_path=Path("mymodule.py"),
|
|
)
|
|
|
|
|
|
def _assert_sync_call_site_output(instrumented_code: str) -> None:
|
|
"""Assert the sync path output has call-site tracking, not codeflash_wrap."""
|
|
assert (
|
|
"from codeflash_async_wrapper import _codeflash_call_site"
|
|
in instrumented_code
|
|
)
|
|
assert "_codeflash_call_site.set(" in instrumented_code
|
|
assert "codeflash_wrap" not in instrumented_code
|
|
assert "torch.cuda.synchronize" not in instrumented_code
|
|
assert "torch.mps.synchronize" not in instrumented_code
|
|
assert "tensorflow.test.experimental.sync_devices" not in instrumented_code
|
|
assert "jax.block_until_ready" not in instrumented_code
|
|
assert "import gc" not in instrumented_code
|
|
assert "import sqlite3" not in instrumented_code
|
|
assert "import dill" not in instrumented_code
|
|
|
|
|
|
class TestInjectProfilingBehaviorMode:
|
|
"""Tests for inject_profiling_into_existing_test in BEHAVIOR mode.
|
|
|
|
The sync path produces framework-agnostic output with _codeflash_call_site
|
|
tracking regardless of which GPU frameworks are imported.
|
|
"""
|
|
|
|
def test_no_frameworks(self, tmp_path: Path) -> None:
|
|
"""Sync path injects call-site tracking with no frameworks present."""
|
|
code = """from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(4, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.BEHAVIOR,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_torch_import(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with torch import."""
|
|
code = """import torch
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(5, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.BEHAVIOR,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_tensorflow_import(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with tensorflow import."""
|
|
code = """import tensorflow
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(5, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.BEHAVIOR,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_all_frameworks(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with all GPU frameworks."""
|
|
code = """import torch
|
|
import tensorflow
|
|
import jax
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(7, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.BEHAVIOR,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
|
|
class TestInjectProfilingPerformanceMode:
|
|
"""Tests for inject_profiling_into_existing_test in PERFORMANCE mode.
|
|
|
|
The sync path produces identical framework-agnostic output regardless of
|
|
mode -- both BEHAVIOR and PERFORMANCE use _codeflash_call_site tracking.
|
|
"""
|
|
|
|
def test_no_frameworks(self, tmp_path: Path) -> None:
|
|
"""Sync path injects call-site tracking with no frameworks present."""
|
|
code = """from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(4, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.PERFORMANCE,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_torch_import(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with torch import."""
|
|
code = """import torch
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(5, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.PERFORMANCE,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_tensorflow_import(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with tensorflow import."""
|
|
code = """import tensorflow
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(5, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.PERFORMANCE,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|
|
|
|
def test_all_frameworks(self, tmp_path: Path) -> None:
|
|
"""Sync path is framework-agnostic even with all GPU frameworks."""
|
|
code = """import torch
|
|
import tensorflow
|
|
import jax
|
|
from mymodule import my_function
|
|
|
|
def test_my_function():
|
|
result = my_function(1, 2)
|
|
assert result == 3
|
|
"""
|
|
test_file = tmp_path / "test_example.py"
|
|
test_file.write_text(code)
|
|
|
|
success, instrumented_code = inject_profiling_into_existing_test(
|
|
test_path=test_file,
|
|
call_positions=[CodePosition(7, 13)],
|
|
function_to_optimize=_make_func(),
|
|
tests_project_root=tmp_path,
|
|
mode=TestingMode.PERFORMANCE,
|
|
)
|
|
|
|
assert success is True
|
|
_assert_sync_call_site_output(instrumented_code)
|