fix tests
This commit is contained in:
parent
99f77da4eb
commit
6ee61fd383
22 changed files with 206 additions and 190 deletions
|
|
@ -715,7 +715,12 @@ def inject_profiling_into_existing_test(
|
|||
from codeflash.languages.javascript.instrument import inject_profiling_into_existing_js_test
|
||||
|
||||
return inject_profiling_into_existing_js_test(
|
||||
test_string=test_string, call_positions=call_positions, function_to_optimize=function_to_optimize, tests_project_root=tests_project_root, mode= mode.value, test_path=test_path
|
||||
test_string=test_string,
|
||||
call_positions=call_positions,
|
||||
function_to_optimize=function_to_optimize,
|
||||
tests_project_root=tests_project_root,
|
||||
mode=mode.value,
|
||||
test_path=test_path,
|
||||
)
|
||||
|
||||
if is_java():
|
||||
|
|
@ -725,11 +730,14 @@ def inject_profiling_into_existing_test(
|
|||
|
||||
if function_to_optimize.is_async:
|
||||
return inject_async_profiling_into_existing_test(
|
||||
test_string=test_string, call_positions=call_positions, function_to_optimize=function_to_optimize, tests_project_root=tests_project_root, mode=mode.value, test_path=test_path
|
||||
test_string=test_string,
|
||||
call_positions=call_positions,
|
||||
function_to_optimize=function_to_optimize,
|
||||
tests_project_root=tests_project_root,
|
||||
mode=mode.value,
|
||||
test_path=test_path,
|
||||
)
|
||||
|
||||
|
||||
|
||||
used_frameworks = detect_frameworks_from_code(test_string)
|
||||
try:
|
||||
tree = ast.parse(test_string)
|
||||
|
|
|
|||
|
|
@ -572,7 +572,7 @@ class LanguageSupport(Protocol):
|
|||
function_to_optimize: Any,
|
||||
tests_project_root: Path,
|
||||
mode: str,
|
||||
test_path: Path | None
|
||||
test_path: Path | None,
|
||||
) -> tuple[bool, str | None]:
|
||||
"""Inject profiling code into an existing test file.
|
||||
|
||||
|
|
|
|||
|
|
@ -13,10 +13,7 @@ import subprocess
|
|||
import xml.etree.ElementTree as ET
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -184,7 +181,6 @@ def _get_maven_project_info(project_root: Path) -> JavaProjectInfo | None:
|
|||
if test_src.exists():
|
||||
test_roots.append(test_src)
|
||||
|
||||
|
||||
# Check for custom source directories in pom.xml <build> section
|
||||
for build in [root.find("m:build", ns), root.find("build")]:
|
||||
if build is not None:
|
||||
|
|
@ -312,9 +308,9 @@ def find_maven_executable(project_root: Path | None = None) -> str | None:
|
|||
return str(mvnw_cmd_path)
|
||||
|
||||
# Check for Maven wrapper in current directory
|
||||
if os.path.exists("mvnw"):
|
||||
if Path("mvnw").exists():
|
||||
return "./mvnw"
|
||||
if os.path.exists("mvnw.cmd"):
|
||||
if Path("mvnw.cmd").exists():
|
||||
return "mvnw.cmd"
|
||||
|
||||
# Check system Maven
|
||||
|
|
@ -348,9 +344,9 @@ def find_gradle_executable(project_root: Path | None = None) -> str | None:
|
|||
return str(gradlew_bat_path)
|
||||
|
||||
# Check for Gradle wrapper in current directory
|
||||
if os.path.exists("gradlew"):
|
||||
if Path("gradlew").exists():
|
||||
return "./gradlew"
|
||||
if os.path.exists("gradlew.bat"):
|
||||
if Path("gradlew.bat").exists():
|
||||
return "gradlew.bat"
|
||||
|
||||
# Check system Gradle
|
||||
|
|
@ -660,7 +656,7 @@ def add_codeflash_dependency_to_pom(pom_path: Path) -> bool:
|
|||
|
||||
new_content = content[:idx] + CODEFLASH_DEPENDENCY_SNIPPET
|
||||
# Skip the original </dependencies> tag since our snippet includes it
|
||||
new_content += content[idx + len(closing_tag):]
|
||||
new_content += content[idx + len(closing_tag) :]
|
||||
|
||||
pom_path.write_text(new_content, encoding="utf-8")
|
||||
logger.info("Added codeflash-runtime dependency to pom.xml")
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ def _find_java_executable() -> str | None:
|
|||
if platform.system() == "Darwin":
|
||||
# Try to extract Java home from Maven (which always finds it)
|
||||
try:
|
||||
result = subprocess.run(["mvn", "--version"], capture_output=True, text=True, timeout=10)
|
||||
result = subprocess.run(["mvn", "--version"], capture_output=True, text=True, timeout=10, check=False)
|
||||
for line in result.stdout.split("\n"):
|
||||
if "runtime:" in line:
|
||||
runtime_path = line.split("runtime:")[-1].strip()
|
||||
|
|
@ -116,7 +116,7 @@ def _find_java_executable() -> str | None:
|
|||
if java_path:
|
||||
# Verify it's a real Java, not a macOS stub
|
||||
try:
|
||||
result = subprocess.run([java_path, "--version"], capture_output=True, text=True, timeout=5)
|
||||
result = subprocess.run([java_path, "--version"], capture_output=True, text=True, timeout=5, check=False)
|
||||
if result.returncode == 0:
|
||||
return java_path
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
|
|
@ -179,13 +179,20 @@ def compare_test_results(
|
|||
[
|
||||
java_exe,
|
||||
# Java 16+ module system: Kryo needs reflective access to internal JDK classes
|
||||
"--add-opens", "java.base/java.util=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.lang.reflect=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.io=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.math=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.net=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.util.zip=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.util=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.lang.reflect=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.io=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.math=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.net=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.util.zip=ALL-UNNAMED",
|
||||
"-cp",
|
||||
str(jar_path),
|
||||
"com.codeflash.Comparator",
|
||||
|
|
|
|||
|
|
@ -14,11 +14,10 @@ from __future__ import annotations
|
|||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, ClassVar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from tree_sitter import Node
|
||||
from pathlib import Path
|
||||
|
||||
from codeflash.languages.base import FunctionInfo
|
||||
|
||||
|
|
@ -59,7 +58,7 @@ class ConcurrencyInfo:
|
|||
async_method_calls: list[str] = None
|
||||
"""List of async/concurrent method calls."""
|
||||
|
||||
def __post_init__(self):
|
||||
def __post_init__(self) -> None:
|
||||
if self.async_method_calls is None:
|
||||
self.async_method_calls = []
|
||||
|
||||
|
|
@ -68,7 +67,7 @@ class JavaConcurrencyAnalyzer:
|
|||
"""Analyzes Java code for concurrency patterns."""
|
||||
|
||||
# Concurrent patterns to detect
|
||||
COMPLETABLE_FUTURE_PATTERNS = {
|
||||
COMPLETABLE_FUTURE_PATTERNS: ClassVar[set[str]] = {
|
||||
"CompletableFuture",
|
||||
"supplyAsync",
|
||||
"runAsync",
|
||||
|
|
@ -80,7 +79,7 @@ class JavaConcurrencyAnalyzer:
|
|||
"anyOf",
|
||||
}
|
||||
|
||||
EXECUTOR_PATTERNS = {
|
||||
EXECUTOR_PATTERNS: ClassVar[set[str]] = {
|
||||
"ExecutorService",
|
||||
"Executors",
|
||||
"ThreadPoolExecutor",
|
||||
|
|
@ -93,14 +92,14 @@ class JavaConcurrencyAnalyzer:
|
|||
"newWorkStealingPool",
|
||||
}
|
||||
|
||||
VIRTUAL_THREAD_PATTERNS = {
|
||||
VIRTUAL_THREAD_PATTERNS: ClassVar[set[str]] = {
|
||||
"newVirtualThreadPerTaskExecutor",
|
||||
"Thread.startVirtualThread",
|
||||
"Thread.ofVirtual",
|
||||
"VirtualThreads",
|
||||
}
|
||||
|
||||
CONCURRENT_COLLECTION_PATTERNS = {
|
||||
CONCURRENT_COLLECTION_PATTERNS: ClassVar[set[str]] = {
|
||||
"ConcurrentHashMap",
|
||||
"ConcurrentLinkedQueue",
|
||||
"ConcurrentLinkedDeque",
|
||||
|
|
@ -113,7 +112,7 @@ class JavaConcurrencyAnalyzer:
|
|||
"ArrayBlockingQueue",
|
||||
}
|
||||
|
||||
ATOMIC_PATTERNS = {
|
||||
ATOMIC_PATTERNS: ClassVar[set[str]] = {
|
||||
"AtomicInteger",
|
||||
"AtomicLong",
|
||||
"AtomicBoolean",
|
||||
|
|
@ -123,7 +122,7 @@ class JavaConcurrencyAnalyzer:
|
|||
"AtomicReferenceArray",
|
||||
}
|
||||
|
||||
def __init__(self, analyzer=None):
|
||||
def __init__(self, analyzer=None) -> None:
|
||||
"""Initialize concurrency analyzer.
|
||||
|
||||
Args:
|
||||
|
|
@ -306,9 +305,7 @@ class JavaConcurrencyAnalyzer:
|
|||
return suggestions
|
||||
|
||||
|
||||
def analyze_function_concurrency(
|
||||
func: FunctionInfo, source: str | None = None, analyzer=None
|
||||
) -> ConcurrencyInfo:
|
||||
def analyze_function_concurrency(func: FunctionInfo, source: str | None = None, analyzer=None) -> ConcurrencyInfo:
|
||||
"""Analyze a function for concurrency patterns.
|
||||
|
||||
Convenience function that creates a JavaConcurrencyAnalyzer and analyzes the function.
|
||||
|
|
|
|||
|
|
@ -10,10 +10,11 @@ from __future__ import annotations
|
|||
import json
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from tree_sitter import Node
|
||||
|
||||
from codeflash.languages.base import FunctionInfo
|
||||
|
|
@ -34,6 +35,7 @@ class JavaLineProfiler:
|
|||
instrumented = profiler.instrument_source(source, file_path, functions)
|
||||
# Run instrumented code
|
||||
results = JavaLineProfiler.parse_results(Path("profile.json"))
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, output_file: Path) -> None:
|
||||
|
|
@ -48,13 +50,7 @@ class JavaLineProfiler:
|
|||
self.profiler_var = "__codeflashProfiler__"
|
||||
self.line_contents: dict[str, str] = {}
|
||||
|
||||
def instrument_source(
|
||||
self,
|
||||
source: str,
|
||||
file_path: Path,
|
||||
functions: list[FunctionInfo],
|
||||
analyzer=None,
|
||||
) -> str:
|
||||
def instrument_source(self, source: str, file_path: Path, functions: list[FunctionInfo], analyzer=None) -> str:
|
||||
"""Instrument Java source code with line profiling.
|
||||
|
||||
Adds profiling instrumentation to track line-level execution for the
|
||||
|
|
@ -106,9 +102,7 @@ class JavaLineProfiler:
|
|||
import_end_idx = i
|
||||
break
|
||||
|
||||
lines_with_profiler = (
|
||||
lines[:import_end_idx] + [profiler_class_code + "\n"] + lines[import_end_idx:]
|
||||
)
|
||||
lines_with_profiler = [*lines[:import_end_idx], profiler_class_code + "\n", *lines[import_end_idx:]]
|
||||
|
||||
result = "".join(lines_with_profiler)
|
||||
if not analyzer.validate_syntax(result):
|
||||
|
|
@ -121,7 +115,7 @@ class JavaLineProfiler:
|
|||
# Store line contents as a simple map (embedded directly in code)
|
||||
line_contents_code = self._generate_line_contents_map()
|
||||
|
||||
return f'''
|
||||
return f"""
|
||||
/**
|
||||
* Codeflash line profiler - tracks per-line execution statistics.
|
||||
* Auto-generated - do not modify.
|
||||
|
|
@ -132,7 +126,7 @@ class {self.profiler_class} {{
|
|||
private static final ThreadLocal<Long> lastLineTime = new ThreadLocal<>();
|
||||
private static final ThreadLocal<String> lastKey = new ThreadLocal<>();
|
||||
private static final java.util.concurrent.atomic.AtomicInteger totalHits = new java.util.concurrent.atomic.AtomicInteger(0);
|
||||
private static final String OUTPUT_FILE = "{str(self.output_file)}";
|
||||
private static final String OUTPUT_FILE = "{self.output_file!s}";
|
||||
|
||||
static class LineStats {{
|
||||
public final java.util.concurrent.atomic.AtomicLong hits = new java.util.concurrent.atomic.AtomicLong(0);
|
||||
|
|
@ -247,15 +241,9 @@ class {self.profiler_class} {{
|
|||
Runtime.getRuntime().addShutdownHook(new Thread(() -> save()));
|
||||
}}
|
||||
}}
|
||||
'''
|
||||
"""
|
||||
|
||||
def _instrument_function(
|
||||
self,
|
||||
func: FunctionInfo,
|
||||
lines: list[str],
|
||||
file_path: Path,
|
||||
analyzer,
|
||||
) -> list[str]:
|
||||
def _instrument_function(self, func: FunctionInfo, lines: list[str], file_path: Path, analyzer) -> list[str]:
|
||||
"""Instrument a single function with line profiling.
|
||||
|
||||
Args:
|
||||
|
|
@ -300,9 +288,7 @@ class {self.profiler_class} {{
|
|||
|
||||
# Add the line with enterFunction() call after it
|
||||
instrumented_lines.append(line)
|
||||
instrumented_lines.append(
|
||||
f"{body_indent}{self.profiler_class}.enterFunction();\n"
|
||||
)
|
||||
instrumented_lines.append(f"{body_indent}{self.profiler_class}.enterFunction();\n")
|
||||
function_entry_added = True
|
||||
continue
|
||||
|
||||
|
|
@ -313,8 +299,7 @@ class {self.profiler_class} {{
|
|||
and not stripped.startswith("//")
|
||||
and not stripped.startswith("/*")
|
||||
and not stripped.startswith("*")
|
||||
and stripped != "}"
|
||||
and stripped != "};"
|
||||
and stripped not in ("}", "};")
|
||||
):
|
||||
# Get indentation
|
||||
indent = len(line) - len(line.lstrip())
|
||||
|
|
@ -326,8 +311,7 @@ class {self.profiler_class} {{
|
|||
|
||||
# Add hit() call before the line
|
||||
profiled_line = (
|
||||
f"{indent_str}{self.profiler_class}.hit("
|
||||
f'"{file_path.as_posix()}", {global_line_num});\n{line}'
|
||||
f'{indent_str}{self.profiler_class}.hit("{file_path.as_posix()}", {global_line_num});\n{line}'
|
||||
)
|
||||
instrumented_lines.append(profiled_line)
|
||||
else:
|
||||
|
|
@ -450,8 +434,8 @@ class {self.profiler_class} {{
|
|||
result["str_out"] = format_line_profile_results(result)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to parse line profile results: %s", e)
|
||||
except Exception:
|
||||
logger.exception("Failed to parse line profile results")
|
||||
return {"timings": {}, "unit": 1e-9, "raw_data": {}, "str_out": ""}
|
||||
|
||||
|
||||
|
|
@ -497,8 +481,6 @@ def format_line_profile_results(results: dict, file_path: Path | None = None) ->
|
|||
avg_ms = time_ms / hits if hits > 0 else 0
|
||||
content = stats.get("content", "")[:50] # Truncate long lines
|
||||
|
||||
output.append(
|
||||
f"{line_num:6d} | {hits:10d} | {time_ms:12.3f} | {avg_ms:12.6f} | {content}"
|
||||
)
|
||||
output.append(f"{line_num:6d} | {hits:10d} | {time_ms:12.3f} | {avg_ms:12.6f} | {content}")
|
||||
|
||||
return "\n".join(output)
|
||||
|
|
|
|||
|
|
@ -308,9 +308,7 @@ class JavaAssertTransformer:
|
|||
# - Assertions.assertEquals (JUnit 5)
|
||||
# - org.junit.jupiter.api.Assertions.assertEquals (fully qualified)
|
||||
all_assertions = "|".join(JUNIT5_ALL_ASSERTIONS)
|
||||
pattern = re.compile(
|
||||
rf"(\s*)((?:(?:\w+\.)*Assert(?:ions)?\.)?({all_assertions}))\s*\(", re.MULTILINE
|
||||
)
|
||||
pattern = re.compile(rf"(\s*)((?:(?:\w+\.)*Assert(?:ions)?\.)?({all_assertions}))\s*\(", re.MULTILINE)
|
||||
|
||||
for match in pattern.finditer(source):
|
||||
leading_ws = match.group(1)
|
||||
|
|
@ -559,8 +557,7 @@ class JavaAssertTransformer:
|
|||
return results
|
||||
|
||||
def _collect_target_invocations(
|
||||
self, node, wrapper_bytes: bytes, content_bytes: bytes,
|
||||
base_offset: int, out: list[TargetCall],
|
||||
self, node, wrapper_bytes: bytes, content_bytes: bytes, base_offset: int, out: list[TargetCall]
|
||||
) -> None:
|
||||
"""Recursively walk the AST and collect method_invocation nodes that match self.func_name."""
|
||||
prefix_len = len(self._TS_WRAPPER_PREFIX_BYTES)
|
||||
|
|
@ -570,15 +567,14 @@ class JavaAssertTransformer:
|
|||
if name_node and self.analyzer.get_node_text(name_node, wrapper_bytes) == self.func_name:
|
||||
start = node.start_byte - prefix_len
|
||||
end = node.end_byte - prefix_len
|
||||
if 0 <= start and end <= len(content_bytes):
|
||||
if start >= 0 and end <= len(content_bytes):
|
||||
out.append(self._build_target_call(node, wrapper_bytes, content_bytes, start, end, base_offset))
|
||||
|
||||
for child in node.children:
|
||||
self._collect_target_invocations(child, wrapper_bytes, content_bytes, base_offset, out)
|
||||
|
||||
def _build_target_call(
|
||||
self, node, wrapper_bytes: bytes, content_bytes: bytes,
|
||||
start_byte: int, end_byte: int, base_offset: int,
|
||||
self, node, wrapper_bytes: bytes, content_bytes: bytes, start_byte: int, end_byte: int, base_offset: int
|
||||
) -> TargetCall:
|
||||
"""Build a TargetCall from a tree-sitter method_invocation node."""
|
||||
get_text = self.analyzer.get_node_text
|
||||
|
|
@ -629,7 +625,6 @@ class JavaAssertTransformer:
|
|||
# Handle generic types: Type<Generic> varName = ...
|
||||
match = self._assign_re.search(source, line_start, assertion_start)
|
||||
|
||||
|
||||
if match:
|
||||
var_type = match.group(1).strip()
|
||||
var_name = match.group(2).strip()
|
||||
|
|
@ -885,18 +880,12 @@ class JavaAssertTransformer:
|
|||
f"catch (Exception _cf_ignored{counter}) {{}}"
|
||||
)
|
||||
|
||||
return (
|
||||
f"{ws}try {{ {code_to_run} }} "
|
||||
f"catch (Exception _cf_ignored{counter}) {{}}"
|
||||
)
|
||||
return f"{ws}try {{ {code_to_run} }} catch (Exception _cf_ignored{counter}) {{}}"
|
||||
|
||||
# If no lambda body found, try to extract from target calls
|
||||
if assertion.target_calls:
|
||||
call = assertion.target_calls[0]
|
||||
return (
|
||||
f"{ws}try {{ {call.full_call}; }} "
|
||||
f"catch (Exception _cf_ignored{counter}) {{}}"
|
||||
)
|
||||
return f"{ws}try {{ {call.full_call}; }} catch (Exception _cf_ignored{counter}) {{}}"
|
||||
|
||||
# Fallback: comment out the assertion
|
||||
return f"{ws}// Removed assertThrows: could not extract callable"
|
||||
|
|
|
|||
|
|
@ -12,11 +12,8 @@ from typing import TYPE_CHECKING, Any
|
|||
from codeflash.languages.base import Language, LanguageSupport
|
||||
from codeflash.languages.java.build_tools import find_test_root
|
||||
from codeflash.languages.java.comparator import compare_test_results as _compare_test_results
|
||||
from codeflash.languages.java.concurrency_analyzer import analyze_function_concurrency
|
||||
from codeflash.languages.java.config import detect_java_project
|
||||
from codeflash.languages.java.concurrency_analyzer import (
|
||||
JavaConcurrencyAnalyzer,
|
||||
analyze_function_concurrency,
|
||||
)
|
||||
from codeflash.languages.java.context import extract_code_context, find_helper_functions
|
||||
from codeflash.languages.java.discovery import discover_functions, discover_functions_from_source
|
||||
from codeflash.languages.java.formatter import format_java_code, normalize_java_code
|
||||
|
|
@ -42,6 +39,7 @@ if TYPE_CHECKING:
|
|||
|
||||
from codeflash.discovery.functions_to_optimize import FunctionToOptimize
|
||||
from codeflash.languages.base import CodeContext, FunctionFilterCriteria, HelperFunction, TestInfo, TestResult
|
||||
from codeflash.languages.java.concurrency_analyzer import ConcurrencyInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -114,7 +112,7 @@ class JavaSupport(LanguageSupport):
|
|||
"""Find helper functions called by the target function."""
|
||||
return find_helper_functions(function, project_root, analyzer=self._analyzer)
|
||||
|
||||
def analyze_concurrency(self, function: FunctionInfo, source: str | None = None):
|
||||
def analyze_concurrency(self, function: FunctionToOptimize, source: str | None = None) -> ConcurrencyInfo:
|
||||
"""Analyze a function for concurrency patterns.
|
||||
|
||||
Args:
|
||||
|
|
@ -288,14 +286,11 @@ class JavaSupport(LanguageSupport):
|
|||
function_to_optimize: Any,
|
||||
tests_project_root: Path,
|
||||
mode: str,
|
||||
test_path: Path | None
|
||||
test_path: Path | None,
|
||||
) -> tuple[bool, str | None]:
|
||||
"""Inject profiling code into an existing test file."""
|
||||
return instrument_existing_test(
|
||||
test_string=test_string,
|
||||
function_to_optimize=function_to_optimize,
|
||||
mode=mode,
|
||||
test_path=test_path
|
||||
test_string=test_string, function_to_optimize=function_to_optimize, mode=mode, test_path=test_path
|
||||
)
|
||||
|
||||
def instrument_source_for_line_profiler(
|
||||
|
|
@ -325,8 +320,8 @@ class JavaSupport(LanguageSupport):
|
|||
func_info.file_path.write_text(instrumented, encoding="utf-8")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to instrument %s for line profiling: %s", func_info.function_name, e)
|
||||
except Exception:
|
||||
logger.exception("Failed to instrument %s for line profiling", func_info.function_name)
|
||||
return False
|
||||
|
||||
def parse_line_profile_results(self, line_profiler_output_file: Path) -> dict:
|
||||
|
|
|
|||
|
|
@ -432,27 +432,27 @@ def run_behavioral_tests(
|
|||
|
||||
# Debug: Log Maven result and coverage file status
|
||||
if enable_coverage:
|
||||
logger.info(f"Maven verify completed with return code: {result.returncode}")
|
||||
logger.info("Maven verify completed with return code: %s", result.returncode)
|
||||
if result.returncode != 0:
|
||||
logger.warning(
|
||||
f"Maven verify had non-zero return code: {result.returncode}. Coverage data may be incomplete."
|
||||
"Maven verify had non-zero return code: %s. Coverage data may be incomplete.", result.returncode
|
||||
)
|
||||
|
||||
# Log coverage file status after Maven verify
|
||||
if enable_coverage and coverage_xml_path:
|
||||
jacoco_exec_path = target_dir / "jacoco.exec"
|
||||
logger.info(f"Coverage paths - target_dir: {target_dir}, coverage_xml_path: {coverage_xml_path}")
|
||||
logger.info("Coverage paths - target_dir: %s, coverage_xml_path: %s", target_dir, coverage_xml_path)
|
||||
if jacoco_exec_path.exists():
|
||||
logger.info(f"JaCoCo exec file exists: {jacoco_exec_path} ({jacoco_exec_path.stat().st_size} bytes)")
|
||||
logger.info("JaCoCo exec file exists: %s (%s bytes)", jacoco_exec_path, jacoco_exec_path.stat().st_size)
|
||||
else:
|
||||
logger.warning(f"JaCoCo exec file not found: {jacoco_exec_path} - JaCoCo agent may not have run")
|
||||
logger.warning("JaCoCo exec file not found: %s - JaCoCo agent may not have run", jacoco_exec_path)
|
||||
if coverage_xml_path.exists():
|
||||
file_size = coverage_xml_path.stat().st_size
|
||||
logger.info(f"JaCoCo XML report exists: {coverage_xml_path} ({file_size} bytes)")
|
||||
logger.info("JaCoCo XML report exists: %s (%s bytes)", coverage_xml_path, file_size)
|
||||
if file_size == 0:
|
||||
logger.warning("JaCoCo XML report is empty - report generation may have failed")
|
||||
else:
|
||||
logger.warning(f"JaCoCo XML report not found: {coverage_xml_path} - verify phase may not have completed")
|
||||
logger.warning("JaCoCo XML report not found: %s - verify phase may not have completed", coverage_xml_path)
|
||||
|
||||
# Return tuple matching the expected signature:
|
||||
# (result_xml_path, run_result, coverage_database_file, coverage_config_file)
|
||||
|
|
@ -610,13 +610,20 @@ def _run_tests_direct(
|
|||
cmd = [
|
||||
str(java),
|
||||
# Java 16+ module system: Kryo needs reflective access to internal JDK classes
|
||||
"--add-opens", "java.base/java.util=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.lang.reflect=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.io=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.math=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.net=ALL-UNNAMED",
|
||||
"--add-opens", "java.base/java.util.zip=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.util=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.lang.reflect=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.io=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.math=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.net=ALL-UNNAMED",
|
||||
"--add-opens",
|
||||
"java.base/java.util.zip=ALL-UNNAMED",
|
||||
"-cp",
|
||||
classpath,
|
||||
"org.junit.platform.console.ConsoleLauncher",
|
||||
|
|
@ -1219,16 +1226,14 @@ def _run_maven_tests(
|
|||
# These flags are safe no-ops on older Java versions.
|
||||
# Note: This overrides JaCoCo's argLine for the forked JVM, but JaCoCo coverage
|
||||
# is handled separately via enable_coverage and the verify phase.
|
||||
add_opens_flags = " ".join(
|
||||
[
|
||||
"--add-opens java.base/java.util=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.lang.reflect=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.io=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.math=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.net=ALL-UNNAMED",
|
||||
"--add-opens java.base/java.util.zip=ALL-UNNAMED",
|
||||
]
|
||||
add_opens_flags = (
|
||||
"--add-opens java.base/java.util=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.lang=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.lang.reflect=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.io=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.math=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.net=ALL-UNNAMED"
|
||||
" --add-opens java.base/java.util.zip=ALL-UNNAMED"
|
||||
)
|
||||
cmd.append(f"-DargLine={add_opens_flags}")
|
||||
|
||||
|
|
@ -1292,14 +1297,16 @@ def _run_maven_tests(
|
|||
|
||||
if has_compilation_error:
|
||||
logger.error(
|
||||
f"Maven compilation failed for {mode} tests. "
|
||||
f"Check that generated test code is syntactically valid Java. "
|
||||
f"Return code: {result.returncode}"
|
||||
"Maven compilation failed for %s tests. "
|
||||
"Check that generated test code is syntactically valid Java. "
|
||||
"Return code: %s",
|
||||
mode,
|
||||
result.returncode,
|
||||
)
|
||||
# Log first 50 lines of output to help diagnose compilation errors
|
||||
output_lines = combined_output.split("\n")
|
||||
error_context = "\n".join(output_lines[:50]) if len(output_lines) > 50 else combined_output
|
||||
logger.error(f"Maven compilation error output:\n{error_context}")
|
||||
logger.error("Maven compilation error output:\n%s", error_context)
|
||||
|
||||
return result
|
||||
|
||||
|
|
@ -1435,8 +1442,7 @@ def _path_to_class_name(path: Path, source_dirs: list[str] | None = None) -> str
|
|||
idx = path_str.index(normalized) + len(normalized)
|
||||
remainder = path_str[idx:].lstrip("/")
|
||||
if remainder:
|
||||
class_name = remainder.replace("/", ".").removesuffix(".java")
|
||||
return class_name
|
||||
return remainder.replace("/", ".").removesuffix(".java")
|
||||
|
||||
# Look for standard Maven/Gradle source directories
|
||||
# Find 'java' that comes after 'main' or 'test'
|
||||
|
|
|
|||
|
|
@ -1941,7 +1941,7 @@ class JavaScriptSupport:
|
|||
function_to_optimize: Any,
|
||||
tests_project_root: Path,
|
||||
mode: str,
|
||||
test_path: Path|None,
|
||||
test_path: Path | None,
|
||||
) -> tuple[bool, str | None]:
|
||||
"""Inject profiling code into an existing JavaScript test file.
|
||||
|
||||
|
|
|
|||
|
|
@ -800,7 +800,9 @@ class FunctionOptimizer:
|
|||
logger.debug(
|
||||
f"[JAVA] Detected Java sources root: {java_sources_root} (from tests_root: {tests_root})"
|
||||
)
|
||||
logger.debug(f"[JAVA-ROOT] Returning Java sources root: {java_sources_root}, tests_root was: {tests_root}")
|
||||
logger.debug(
|
||||
f"[JAVA-ROOT] Returning Java sources root: {java_sources_root}, tests_root was: {tests_root}"
|
||||
)
|
||||
return java_sources_root
|
||||
|
||||
# If no standard package prefix found, check if there's a 'java' directory
|
||||
|
|
@ -810,7 +812,9 @@ class FunctionOptimizer:
|
|||
# Return up to and including 'java'
|
||||
java_sources_root = Path(*parts[: i + 1])
|
||||
logger.debug(f"[JAVA] Detected Maven-style Java sources root: {java_sources_root}")
|
||||
logger.debug(f"[JAVA-ROOT] Returning Java sources root: {java_sources_root}, tests_root was: {tests_root}")
|
||||
logger.debug(
|
||||
f"[JAVA-ROOT] Returning Java sources root: {java_sources_root}, tests_root was: {tests_root}"
|
||||
)
|
||||
return java_sources_root
|
||||
|
||||
# Default: return tests_root as-is (original behavior)
|
||||
|
|
@ -862,7 +866,7 @@ class FunctionOptimizer:
|
|||
if main_match:
|
||||
main_module_name = main_match.group(1)
|
||||
if package_name.startswith(main_module_name):
|
||||
suffix = package_name[len(main_module_name):]
|
||||
suffix = package_name[len(main_module_name) :]
|
||||
new_package = test_module_name + suffix
|
||||
old_decl = f"package {package_name};"
|
||||
new_decl = f"package {new_package};"
|
||||
|
|
|
|||
|
|
@ -164,7 +164,15 @@ def _find_project_root(start_path: Path) -> Path | None:
|
|||
|
||||
while current != current.parent:
|
||||
# Check for project markers
|
||||
markers = [".git", "pyproject.toml", "package.json", "Cargo.toml", "pom.xml", "build.gradle", "build.gradle.kts"]
|
||||
markers = [
|
||||
".git",
|
||||
"pyproject.toml",
|
||||
"package.json",
|
||||
"Cargo.toml",
|
||||
"pom.xml",
|
||||
"build.gradle",
|
||||
"build.gradle.kts",
|
||||
]
|
||||
for marker in markers:
|
||||
if (current / marker).exists():
|
||||
return current
|
||||
|
|
@ -489,10 +497,17 @@ def _detect_tests_root(project_root: Path, language: str) -> tuple[Path | None,
|
|||
for elem in [build.find("m:testSourceDirectory", ns), build.find("testSourceDirectory")]:
|
||||
if elem is not None and elem.text:
|
||||
# Resolve ${project.basedir}/src -> test_module_dir/src
|
||||
dir_text = elem.text.strip().replace("${project.basedir}/", "").replace("${project.basedir}", ".")
|
||||
dir_text = (
|
||||
elem.text.strip()
|
||||
.replace("${project.basedir}/", "")
|
||||
.replace("${project.basedir}", ".")
|
||||
)
|
||||
resolved = test_module_dir / dir_text
|
||||
if resolved.is_dir():
|
||||
return resolved, f"{test_module_name}/{dir_text} (from {test_module_name}/pom.xml testSourceDirectory)"
|
||||
return (
|
||||
resolved,
|
||||
f"{test_module_name}/{dir_text} (from {test_module_name}/pom.xml testSourceDirectory)",
|
||||
)
|
||||
except ET.ParseError:
|
||||
pass
|
||||
# Test module exists but no custom testSourceDirectory - use the module root
|
||||
|
|
@ -548,8 +563,6 @@ def _detect_test_runner(project_root: Path, language: str) -> tuple[str, str]:
|
|||
|
||||
def _detect_java_test_runner(project_root: Path) -> tuple[str, str]:
|
||||
"""Detect Java test framework."""
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
pom_path = project_root / "pom.xml"
|
||||
if pom_path.exists():
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -231,7 +231,9 @@ class JacocoCoverageUtils:
|
|||
f"File preview: {content_preview!r}"
|
||||
)
|
||||
except Exception as read_err:
|
||||
logger.warning(f"Failed to parse JaCoCo XML file at '{jacoco_xml_path}': {e}. Could not read file: {read_err}")
|
||||
logger.warning(
|
||||
f"Failed to parse JaCoCo XML file at '{jacoco_xml_path}': {e}. Could not read file: {read_err}"
|
||||
)
|
||||
return CoverageData.create_empty(source_code_path, function_name, code_context)
|
||||
|
||||
# Determine expected source file name from path
|
||||
|
|
|
|||
|
|
@ -27,9 +27,7 @@ def safe_repr(obj: object) -> str:
|
|||
return f"<repr failed: {type(e).__name__}: {e}>"
|
||||
|
||||
|
||||
def compare_test_results(
|
||||
original_results: TestResults, candidate_results: TestResults
|
||||
) -> tuple[bool, list[TestDiff]]:
|
||||
def compare_test_results(original_results: TestResults, candidate_results: TestResults) -> tuple[bool, list[TestDiff]]:
|
||||
# This is meant to be only called with test results for the first loop index
|
||||
if len(original_results) == 0 or len(candidate_results) == 0:
|
||||
return False, [] # empty test results are not equal
|
||||
|
|
@ -102,9 +100,7 @@ def compare_test_results(
|
|||
)
|
||||
)
|
||||
|
||||
elif not comparator(
|
||||
original_test_result.return_value, cdd_test_result.return_value, superset_obj=superset_obj
|
||||
):
|
||||
elif not comparator(original_test_result.return_value, cdd_test_result.return_value, superset_obj=superset_obj):
|
||||
test_diffs.append(
|
||||
TestDiff(
|
||||
scope=TestDiffScope.RETURN_VALUE,
|
||||
|
|
@ -129,9 +125,8 @@ def compare_test_results(
|
|||
)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
elif (
|
||||
(original_test_result.stdout and cdd_test_result.stdout)
|
||||
and not comparator(original_test_result.stdout, cdd_test_result.stdout)
|
||||
elif (original_test_result.stdout and cdd_test_result.stdout) and not comparator(
|
||||
original_test_result.stdout, cdd_test_result.stdout
|
||||
):
|
||||
test_diffs.append(
|
||||
TestDiff(
|
||||
|
|
|
|||
|
|
@ -1002,7 +1002,9 @@ def parse_test_xml(
|
|||
# Always use tests_project_rootdir since pytest is now the test runner for all frameworks
|
||||
base_dir = test_config.tests_project_rootdir
|
||||
logger.debug(f"[PARSE-XML] base_dir for resolution: {base_dir}")
|
||||
logger.debug(f"[PARSE-XML] Registered test files: {[str(tf.instrumented_behavior_file_path) for tf in test_files.test_files]}")
|
||||
logger.debug(
|
||||
f"[PARSE-XML] Registered test files: {[str(tf.instrumented_behavior_file_path) for tf in test_files.test_files]}"
|
||||
)
|
||||
|
||||
# For Java: pre-parse fallback stdout once (not per testcase) to avoid O(n²) complexity
|
||||
java_fallback_stdout = None
|
||||
|
|
@ -1067,7 +1069,9 @@ def parse_test_xml(
|
|||
test_file_path = resolve_test_file_from_class_path(test_class_path, base_dir)
|
||||
|
||||
if test_file_path is None:
|
||||
logger.error(f"[PARSE-XML] ERROR: Could not resolve test_class_path={test_class_path}, base_dir={base_dir}")
|
||||
logger.error(
|
||||
f"[PARSE-XML] ERROR: Could not resolve test_class_path={test_class_path}, base_dir={base_dir}"
|
||||
)
|
||||
logger.warning(f"Could not find the test for file name - {test_class_path} ")
|
||||
continue
|
||||
else:
|
||||
|
|
@ -1271,9 +1275,7 @@ def parse_test_xml(
|
|||
str(test_file.instrumented_behavior_file_path or test_file.original_file_path)
|
||||
for test_file in test_files.test_files
|
||||
]
|
||||
logger.info(
|
||||
f"Tests {test_paths_display} failed to run, skipping"
|
||||
)
|
||||
logger.info(f"Tests {test_paths_display} failed to run, skipping")
|
||||
if run_result is not None:
|
||||
stdout, stderr = "", ""
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -109,7 +109,11 @@ def generate_tests(
|
|||
|
||||
# Instrument for behavior verification (renames class)
|
||||
instrumented_behavior_test_source = instrument_generated_java_test(
|
||||
test_code=generated_test_source, function_name=func_name, qualified_name=qualified_name, mode="behavior", function_to_optimize=function_to_optimize
|
||||
test_code=generated_test_source,
|
||||
function_name=func_name,
|
||||
qualified_name=qualified_name,
|
||||
mode="behavior",
|
||||
function_to_optimize=function_to_optimize,
|
||||
)
|
||||
|
||||
# Instrument for performance measurement (adds timing markers)
|
||||
|
|
@ -118,7 +122,7 @@ def generate_tests(
|
|||
function_name=func_name,
|
||||
qualified_name=qualified_name,
|
||||
mode="performance",
|
||||
function_to_optimize=function_to_optimize
|
||||
function_to_optimize=function_to_optimize,
|
||||
)
|
||||
|
||||
logger.debug(f"Instrumented Java tests locally for {func_name}")
|
||||
|
|
|
|||
|
|
@ -809,6 +809,7 @@ def test_sync_sort():
|
|||
os.chdir(run_cwd)
|
||||
|
||||
success, instrumented_test = inject_profiling_into_existing_test(
|
||||
test_code,
|
||||
test_path,
|
||||
[CodePosition(6, 13), CodePosition(10, 13)], # Lines where sync_sorter is called
|
||||
func,
|
||||
|
|
|
|||
|
|
@ -1105,6 +1105,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(4, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1131,6 +1132,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1157,6 +1159,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1183,6 +1186,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1209,6 +1213,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1235,6 +1240,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1261,6 +1267,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1287,6 +1294,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1314,6 +1322,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(6, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1342,6 +1351,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(7, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1376,6 +1386,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(4, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1402,6 +1413,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1428,6 +1440,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1454,6 +1467,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(5, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
@ -1482,6 +1496,7 @@ def test_my_function():
|
|||
func = FunctionToOptimize(function_name="my_function", parents=[], file_path=Path("mymodule.py"))
|
||||
|
||||
success, instrumented_code = inject_profiling_into_existing_test(
|
||||
test_string=code,
|
||||
test_path=test_file,
|
||||
call_positions=[CodePosition(7, 13)],
|
||||
function_to_optimize=func,
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ def test_sort():
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=Path(fto_path))
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(6, 13), CodePosition(10, 13)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(6, 13), CodePosition(10, 13)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -287,7 +287,7 @@ def test_sort():
|
|||
tmp_test_path.write_text(code, encoding="utf-8")
|
||||
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
tmp_test_path, [CodePosition(7, 13), CodePosition(12, 13)], fto, tmp_test_path.parent
|
||||
code, tmp_test_path, [CodePosition(7, 13), CodePosition(12, 13)], fto, tmp_test_path.parent
|
||||
)
|
||||
assert success
|
||||
assert new_test.replace('"', "'") == expected.format(
|
||||
|
|
@ -557,7 +557,7 @@ def test_sort():
|
|||
tmp_test_path.write_text(code, encoding="utf-8")
|
||||
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
tmp_test_path, [CodePosition(6, 13), CodePosition(10, 13)], fto, tmp_test_path.parent
|
||||
code, tmp_test_path, [CodePosition(6, 13), CodePosition(10, 13)], fto, tmp_test_path.parent
|
||||
)
|
||||
assert success
|
||||
assert new_test.replace('"', "'") == expected.format(
|
||||
|
|
@ -728,7 +728,7 @@ def test_sort():
|
|||
tmp_test_path.write_text(code, encoding="utf-8")
|
||||
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
tmp_test_path, [CodePosition(6, 13), CodePosition(10, 13)], fto, tmp_test_path.parent
|
||||
code, tmp_test_path, [CodePosition(6, 13), CodePosition(10, 13)], fto, tmp_test_path.parent
|
||||
)
|
||||
assert success
|
||||
assert new_test.replace('"', "'") == expected.format(
|
||||
|
|
|
|||
|
|
@ -292,7 +292,7 @@ async def test_async_function():
|
|||
assert "codeflash_behavior_async" in instrumented_source
|
||||
|
||||
success, instrumented_test_code = inject_profiling_into_existing_test(
|
||||
test_file, [CodePosition(8, 18), CodePosition(11, 19)], func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
async_test_code, test_file, [CodePosition(8, 18), CodePosition(11, 19)], func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
|
||||
# For async functions, once source is decorated, test injection should fail
|
||||
|
|
@ -349,7 +349,7 @@ async def test_async_function():
|
|||
|
||||
# Now test the full pipeline with source module path
|
||||
success, instrumented_test_code = inject_profiling_into_existing_test(
|
||||
test_file, [CodePosition(8, 18)], func, temp_dir, mode=TestingMode.PERFORMANCE
|
||||
async_test_code, test_file, [CodePosition(8, 18)], func, temp_dir, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
|
||||
# For async functions, once source is decorated, test injection should fail
|
||||
|
|
@ -413,7 +413,7 @@ async def test_mixed_functions():
|
|||
assert "def sync_function(x: int, y: int) -> int:" in instrumented_source
|
||||
|
||||
success, instrumented_test_code = inject_profiling_into_existing_test(
|
||||
test_file, [CodePosition(8, 18), CodePosition(11, 19)], async_func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
mixed_test_code, test_file, [CodePosition(8, 18), CodePosition(11, 19)], async_func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
|
||||
# Async functions should not be instrumented at the test level
|
||||
|
|
@ -592,7 +592,7 @@ async def test_multiple_calls():
|
|||
assert len(call_positions) == 4
|
||||
|
||||
success, instrumented_test_code = inject_profiling_into_existing_test(
|
||||
test_file, call_positions, func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
test_code_multiple_calls, test_file, call_positions, func, temp_dir, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
|
||||
assert success
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ import dill as pickle"""
|
|||
run_cwd = Path(__file__).parent.parent.resolve()
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
Path(f.name), [CodePosition(9, 17), CodePosition(13, 17), CodePosition(17, 17)], func, Path(f.name).parent
|
||||
code, Path(f.name), [CodePosition(9, 17), CodePosition(13, 17), CodePosition(17, 17)], func, Path(f.name).parent
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -293,7 +293,7 @@ def test_prepare_image_for_yolo():
|
|||
run_cwd = Path(__file__).parent.parent.resolve()
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
Path(f.name), [CodePosition(10, 14)], func, Path(f.name).parent
|
||||
code, Path(f.name), [CodePosition(10, 14)], func, Path(f.name).parent
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -398,7 +398,7 @@ def test_sort():
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(8, 14), CodePosition(12, 14)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(8, 14), CodePosition(12, 14)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -409,7 +409,7 @@ def test_sort():
|
|||
).replace('"', "'")
|
||||
|
||||
success, new_perf_test = inject_profiling_into_existing_test(
|
||||
test_path,
|
||||
code, test_path,
|
||||
[CodePosition(8, 14), CodePosition(12, 14)],
|
||||
func,
|
||||
project_root_path,
|
||||
|
|
@ -650,11 +650,11 @@ def test_sort_parametrized(input, expected_output):
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(14, 13)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(14, 13)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(14, 13)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(14, 13)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
|
||||
os.chdir(original_cwd)
|
||||
|
|
@ -927,11 +927,11 @@ def test_sort_parametrized_loop(input, expected_output):
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(15, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(15, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(15, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(15, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
|
||||
os.chdir(original_cwd)
|
||||
|
|
@ -1287,11 +1287,11 @@ def test_sort():
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(str(run_cwd))
|
||||
success, new_test_behavior = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(11, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(11, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(11, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(11, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -1661,7 +1661,7 @@ class TestPigLatin(unittest.TestCase):
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test_behavior = inject_profiling_into_existing_test(
|
||||
test_path,
|
||||
code, test_path,
|
||||
[CodePosition(9, 17), CodePosition(13, 17), CodePosition(17, 17)],
|
||||
func,
|
||||
project_root_path,
|
||||
|
|
@ -1669,7 +1669,7 @@ class TestPigLatin(unittest.TestCase):
|
|||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path,
|
||||
code, test_path,
|
||||
[CodePosition(9, 17), CodePosition(13, 17), CodePosition(17, 17)],
|
||||
func,
|
||||
project_root_path,
|
||||
|
|
@ -1917,11 +1917,11 @@ import unittest
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test_behavior = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(16, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(16, 17)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(16, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(16, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
|
||||
os.chdir(original_cwd)
|
||||
|
|
@ -2177,11 +2177,11 @@ import unittest
|
|||
func = FunctionToOptimize(function_name="sorter", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test_behavior = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(14, 21)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(14, 21)], func, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
assert success
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(14, 21)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(14, 21)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -2428,10 +2428,10 @@ import unittest
|
|||
f = FunctionToOptimize(function_name="sorter", file_path=code_path, parents=[])
|
||||
os.chdir(run_cwd)
|
||||
success, new_test_behavior = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(17, 21)], f, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
code, test_path, [CodePosition(17, 21)], f, project_root_path, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
success, new_test_perf = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(17, 21)], f, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(17, 21)], f, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -2734,7 +2734,7 @@ def test_class_name_A_function_name():
|
|||
)
|
||||
os.chdir(str(run_cwd))
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(4, 23)], func, project_root_path
|
||||
code, test_path, [CodePosition(4, 23)], func, project_root_path
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
finally:
|
||||
|
|
@ -2811,7 +2811,7 @@ def test_common_tags_1():
|
|||
|
||||
os.chdir(str(run_cwd))
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(7, 11), CodePosition(11, 11)], func, project_root_path
|
||||
code, test_path, [CodePosition(7, 11), CodePosition(11, 11)], func, project_root_path
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -2877,7 +2877,7 @@ def test_sort():
|
|||
|
||||
os.chdir(str(run_cwd))
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(7, 15)], func, project_root_path
|
||||
code, test_path, [CodePosition(7, 15)], func, project_root_path
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -2960,7 +2960,7 @@ def test_sort():
|
|||
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(6, 26), CodePosition(10, 26)], function_to_optimize, project_root_path
|
||||
code, test_path, [CodePosition(6, 26), CodePosition(10, 26)], function_to_optimize, project_root_path
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -3061,7 +3061,7 @@ def test_code_replacement10() -> None:
|
|||
run_cwd = Path(__file__).parent.parent.resolve()
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_file_path, [CodePosition(22, 28), CodePosition(28, 28)], func, test_file_path.parent
|
||||
code, test_file_path, [CodePosition(22, 28), CodePosition(28, 28)], func, test_file_path.parent
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -3119,7 +3119,7 @@ def test_sleepfunc_sequence_short(n, expected_total_sleep_time):
|
|||
func = FunctionToOptimize(function_name="accurate_sleepfunc", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(8, 13)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(8, 13)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
|
||||
|
|
@ -3236,7 +3236,7 @@ import unittest
|
|||
func = FunctionToOptimize(function_name="accurate_sleepfunc", parents=[], file_path=code_path)
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
test_path, [CodePosition(12, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
code, test_path, [CodePosition(12, 17)], func, project_root_path, mode=TestingMode.PERFORMANCE
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
|
||||
|
|
|
|||
|
|
@ -349,7 +349,7 @@ def test_run_and_parse_picklepatch() -> None:
|
|||
run_cwd = project_root
|
||||
os.chdir(run_cwd)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
replay_test_path, [CodePosition(17, 15)], func, project_root, mode=TestingMode.BEHAVIOR
|
||||
original_replay_test_code, replay_test_path, [CodePosition(17, 15)], func, project_root, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
@ -443,7 +443,7 @@ def bubble_sort_with_unused_socket(data_container):
|
|||
function_name="bubble_sort_with_used_socket", parents=[], file_path=Path(fto_used_socket_path)
|
||||
)
|
||||
success, new_test = inject_profiling_into_existing_test(
|
||||
replay_test_path, [CodePosition(23, 15)], func, project_root, mode=TestingMode.BEHAVIOR
|
||||
original_replay_test_code, replay_test_path, [CodePosition(23, 15)], func, project_root, mode=TestingMode.BEHAVIOR
|
||||
)
|
||||
os.chdir(original_cwd)
|
||||
assert success
|
||||
|
|
|
|||
Loading…
Reference in a new issue