Merge branch 'main' into feat/show-logo-on-help

This commit is contained in:
Aseem Saxena 2026-04-23 04:10:15 -07:00 committed by GitHub
commit db5b96a80c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
159 changed files with 12044 additions and 25425 deletions

45
.claude/hooks/bash-guard.sh Executable file
View file

@ -0,0 +1,45 @@
#!/usr/bin/env bash
# PreToolUse hook: Block Bash calls that should use dedicated tools.
# Exit 0 = allow, Exit 2 = block (message on stderr).
INPUT=$(cat 2>/dev/null || true)
COMMAND=$(echo "$INPUT" | jq -r '.tool_input.command // empty' 2>/dev/null || true)
[ -z "$COMMAND" ] && exit 0
# Strip leading env vars (FOO=bar cmd ...) and whitespace to get the actual command
STRIPPED=$(echo "$COMMAND" | sed 's/^[[:space:]]*\([A-Za-z_][A-Za-z0-9_]*=[^[:space:]]*[[:space:]]*\)*//')
FIRST_CMD=$(echo "$STRIPPED" | awk '{print $1}')
case "$FIRST_CMD" in
grep|egrep|fgrep|rg)
echo "BLOCKED: Use the Grep tool instead of \`$FIRST_CMD\`. It provides better output and permissions handling." >&2
exit 2
;;
find)
echo "BLOCKED: Use the Glob tool instead of \`find\`. Glob is faster and returns results sorted by modification time." >&2
exit 2
;;
cat|head|tail)
echo "BLOCKED: Use the Read tool instead of \`$FIRST_CMD\`. Read provides line numbers and supports images/PDFs." >&2
exit 2
;;
awk)
echo "BLOCKED: Use the Grep tool or Read tool instead of \`awk\`." >&2
exit 2
;;
sed)
if echo "$COMMAND" | grep -qE '(^|[[:space:]])sed[[:space:]]+-i'; then
echo "BLOCKED: Use the Edit tool instead of \`sed -i\`. Edit tracks changes properly." >&2
exit 2
fi
;;
esac
# echo with file redirection (echo "..." > file)
if echo "$STRIPPED" | grep -qE '^echo\b.*[[:space:]]>'; then
echo "BLOCKED: Use the Write tool instead of \`echo >\`. Write provides proper file creation." >&2
exit 2
fi
exit 0

47
.claude/hooks/post-compact.sh Executable file
View file

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# PreCompact hook: Inject state preservation guidance before context compaction.
cd "$CLAUDE_PROJECT_DIR" 2>/dev/null || exit 0
STATE=""
BRANCH=$(git branch --show-current 2>/dev/null)
[ -n "$BRANCH" ] && STATE="${STATE}Branch: ${BRANCH}\n"
DIRTY=$(git status --porcelain 2>/dev/null)
if [ -n "$DIRTY" ]; then
COUNT=$(echo "$DIRTY" | wc -l | tr -d ' ')
STATE="${STATE}Uncommitted files (${COUNT}):\n${DIRTY}\n"
fi
UPSTREAM=$(git rev-parse --abbrev-ref '@{upstream}' 2>/dev/null)
if [ -n "$UPSTREAM" ]; then
AHEAD=$(git rev-list --count "${UPSTREAM}..HEAD" 2>/dev/null)
[ "$AHEAD" -gt 0 ] 2>/dev/null && STATE="${STATE}Unpushed commits: ${AHEAD}\n"
fi
RECENT=$(git log --oneline -5 2>/dev/null)
[ -n "$RECENT" ] && STATE="${STATE}Recent commits:\n${RECENT}\n"
LATEST_HANDOFF=$(ls -t "$CLAUDE_PROJECT_DIR/.claude/handoffs/"*.md 2>/dev/null | head -1)
if [ -n "$LATEST_HANDOFF" ] && [ -f "$LATEST_HANDOFF" ]; then
HANDOFF_CONTENT=$(head -40 "$LATEST_HANDOFF" 2>/dev/null)
[ -n "$HANDOFF_CONTENT" ] && STATE="${STATE}\nHandoff context:\n${HANDOFF_CONTENT}\n"
fi
STATE="${STATE}\nProject conventions to preserve:\n"
STATE="${STATE}- Python 3.9+, uv for all tooling, ruff + mypy via prek\n"
STATE="${STATE}- Verification: uv run prek (single command for lint/format/types)\n"
STATE="${STATE}- Pre-push: uv run prek run --from-ref origin/<base>\n"
STATE="${STATE}- Conventional commits: fix:, feat:, refactor:, test:, chore:\n"
STATE="${STATE}- Result type: Success(value) / Failure(error), check with is_successful()\n"
STATE="${STATE}- Language singleton: set_current_language() / current_language()\n"
STATE="${STATE}- libcst for code transforms, ast for read-only analysis\n"
[ -z "$STATE" ] && exit 0
EXPANDED=$(printf '%b' "$STATE")
jq -n --arg msg "PRESERVE the following session state through compaction:
$EXPANDED" '{"systemMessage": $msg}'
exit 0

View file

@ -1,5 +1,4 @@
#!/usr/bin/env bash
# Everyone is on macOS so this should be fine, we don't account for Windows
set -euo pipefail
input=$(cat)
@ -10,6 +9,5 @@ if [[ -z "$file_path" || ! -f "$file_path" ]]; then
fi
if [[ "$file_path" == *.py ]]; then
# First run auto-fixes formatting; second run catches real lint errors
uv run prek --files "$file_path" 2>/dev/null || uv run prek --files "$file_path"
fi

25
.claude/hooks/require-read.sh Executable file
View file

@ -0,0 +1,25 @@
#!/usr/bin/env bash
# PreToolUse hook: Block Write/Edit on existing files that haven't been Read first.
# Exit 0 = allow, Exit 2 = block (message on stderr).
INPUT=$(cat 2>/dev/null || true)
FILE_PATH=$(echo "$INPUT" | jq -r '.tool_input.file_path // empty' 2>/dev/null || true)
[ -z "$FILE_PATH" ] && exit 0
# New files don't need prior reads
[ ! -f "$FILE_PATH" ] && exit 0
TRACKER="$CLAUDE_PROJECT_DIR/.claude/.read-tracker"
if [ ! -f "$TRACKER" ]; then
echo "BLOCKED: Read \`$(basename "$FILE_PATH")\` first before modifying it." >&2
exit 2
fi
if grep -qxF "$FILE_PATH" "$TRACKER"; then
exit 0
fi
echo "BLOCKED: Read \`$(basename "$FILE_PATH")\` first before modifying it." >&2
exit 2

50
.claude/hooks/status-line.sh Executable file
View file

@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Status line: derive context from git state.
input=$(cat)
project_dir=$(echo "$input" | jq -r '.workspace.project_dir')
user=$(whoami)
branch=$(git -C "$project_dir" branch --show-current 2>/dev/null)
changed=$(git -C "$project_dir" diff --name-only HEAD 2>/dev/null)
[ -z "$changed" ] && changed=$(git -C "$project_dir" diff --name-only 2>/dev/null)
[ -z "$changed" ] && changed=$(git -C "$project_dir" diff --name-only --cached 2>/dev/null)
if [ -n "$changed" ]; then
area=$(echo "$changed" | sed 's|/.*||' | sort | uniq -c | sort -rn | head -1 | awk '{print $2}')
else
area=""
fi
context=""
case "$area" in
codeflash)
subsystem=$(echo "$changed" | grep '^codeflash/' | sed 's|^codeflash/||; s|/.*||' | sort | uniq -c | sort -rn | head -1 | awk '{print $2}')
[ -n "$subsystem" ] && context="editing $subsystem" ;;
tests)
target=$(echo "$changed" | grep '^tests/' | sed 's|^tests/||; s|/.*||' | sort -u | head -1)
[ -n "$target" ] && context="testing $target" ;;
.claude)
context="configuring claude" ;;
esac
if [ -z "$context" ] && [ -n "$branch" ]; then
case "$branch" in
feat/*|cf-*) context="building: ${branch#feat/}" ;;
fix/*) context="fixing: ${branch#fix/}" ;;
refactor/*) context="refactoring: ${branch#refactor/}" ;;
test/*) context="testing: ${branch#test/}" ;;
chore/*) context="chore: ${branch#chore/}" ;;
esac
fi
dirty=""
if [ -n "$(git -C "$project_dir" status --porcelain 2>/dev/null)" ]; then
dirty=" *"
fi
status="$user | codeflash"
[ -n "$context" ] && status="$status | $context"
[ -n "$branch" ] && status="$status | $branch$dirty"
echo "$status"

11
.claude/hooks/track-read.sh Executable file
View file

@ -0,0 +1,11 @@
#!/usr/bin/env bash
# PostToolUse hook: Track Read calls for the require-read guard.
INPUT=$(cat 2>/dev/null || true)
FILE_PATH=$(echo "$INPUT" | jq -r '.tool_input.file_path // empty' 2>/dev/null || true)
[ -z "$FILE_PATH" ] && exit 0
TRACKER="$CLAUDE_PROJECT_DIR/.claude/.read-tracker"
grep -qxF "$FILE_PATH" "$TRACKER" 2>/dev/null || echo "$FILE_PATH" >> "$TRACKER"
exit 0

View file

@ -4,10 +4,11 @@
- **Python**: 3.9+ syntax
- **Package management**: Always use `uv`, never `pip`
- **Tooling**: Ruff for linting/formatting, mypy strict mode, prek for pre-commit checks
- **Comments**: Minimal - only explain "why", not "what"
- **Docstrings**: Do not add docstrings to new or changed code unless the user explicitly asks for them — not even one-liners. The codebase intentionally keeps functions self-documenting through clear naming and type annotations
- **Types**: Match the type annotation style of surrounding code — the codebase uses annotations, so add them in new code
- **Naming**: NEVER use leading underscores (`_function_name`) - Python has no true private functions, use public names
- **Comments**: Minimal only explain "why", not "what"
- **Docstrings**: Do not add docstrings unless the user explicitly asks
- **Types**: Match the type annotation style of surrounding code
- **Naming**: No leading underscores (`_function_name`) — Python has no true private functions
- **Paths**: Always use absolute paths
- **Encoding**: Always pass `encoding="utf-8"` to `open()`, `read_text()`, `write_text()`, etc. in new or changed code — Windows defaults to `cp1252` which breaks on non-ASCII content. Don't flag pre-existing code that lacks it unless you're already modifying that line.
- **Verification**: Use `uv run prek` to verify code — it handles ruff, ty, mypy in one pass. Don't run `ruff`, `mypy`, or `python -c "import ..."` separately; `prek` is the single verification command
- **Encoding**: Always pass `encoding="utf-8"` to `open()`, `read_text()`, `write_text()` in new or changed code
- **Verification**: Use `uv run prek` — it handles ruff, ty, mypy in one pass. Don't run them separately
- **Code transforms**: Use `libcst` for code modification/transformation. `ast` is acceptable for read-only analysis

View file

@ -0,0 +1,19 @@
# Debugging
## Root cause first
When encountering a bug, investigate the root cause. Don't patch symptoms. If you're about to add a try/except, a fallback default, or a defensive check — ask whether the real fix is upstream.
## Isolated testing
Prefer running individual test functions over full suites. Only run the full suite when explicitly asked or before pushing.
- Single function: `uv run pytest tests/test_foo.py::TestBar::test_baz -v`
- Single module: `uv run pytest tests/test_foo.py -v`
- Full suite: only when asked, or before `git push`
When debugging a specific endpoint or integration, test it directly instead of running the entire pipeline end-to-end.
## Subprocess failures
When a subprocess fails, always log stdout and stderr. "Exit code 1" with no output is useless.

View file

@ -1,19 +1,35 @@
# Git Commits & Pull Requests
# Git
## Commits
- Never commit, amend, or push without explicit permission
- Don't commit intermediate states — wait until the full implementation is complete, reviewed, and explicitly approved before committing. If the user corrects direction mid-implementation, incorporate the correction before any commit
- Always create a new branch from `main` before starting any new work — never commit directly to `main` or reuse an existing feature branch for unrelated changes
- Use conventional commit format: `fix:`, `feat:`, `refactor:`, `docs:`, `test:`, `chore:`
- Keep commits atomic - one logical change per commit
- Commit message body should be concise (1-2 sentences max)
- Merge for simple syncs, rebase when branches have diverged significantly
- When committing to an external/third-party repo, follow that repo's own conventions for versioning, changelog, and CI
- Pre-commit: Run `uv run prek` before committing — fix any issues before creating the commit
- Pre-push: Run `uv run prek run --from-ref origin/<base>` to check all changed files against the PR base — this matches CI behavior and catches issues that per-commit prek misses. To detect the base branch: `gh pr view --json baseRefName -q .baseRefName 2>/dev/null || echo main`
- Don't commit intermediate states — wait until the full implementation is complete and approved
- Always create a new branch from `main` — never commit directly to `main`
- Conventional format: `fix:`, `feat:`, `refactor:`, `docs:`, `test:`, `chore:`
- First line: imperative verb + what changed, under 72 characters
- Body for *why*, not *what* — the diff shows what changed
- One purpose per commit: a bug fix, a new function, a refactor — not all three
- A commit that adds a function also adds its tests and exports — that's one logical change
## Sizing
- Too small: renaming a variable in one commit, updating its references in another
- Right size: adding a function with its tests, `__init__` export, and usage update
- Too large: implementing an entire subsystem in one commit
## Pre-commit / Pre-push
- Pre-commit: Run `uv run prek` before committing
- Pre-push: Run `uv run prek run --from-ref origin/<base>` to check all changed files against the PR base
## Pull Requests
- PR titles should use conventional format
- Keep the PR body short and straight to the point
- PR titles use conventional format
- Keep the PR body short and to the point
- If related to a Linear issue, include `CF-#` in the body
- Branch naming: `cf-#-title` (lowercase, hyphenated), no other prefixes/suffixes
- Branch naming: `cf-#-title` (lowercase, hyphenated)
## Branch Hygiene
- Delete feature branches locally after merging (`git branch -d <branch>`)
- Use `/clean_gone` to prune local branches whose remote tracking branch has been deleted

5
.claude/rules/github.md Normal file
View file

@ -0,0 +1,5 @@
# GitHub Interactions
ALWAYS use MCP GitHub tools (`mcp__github__*`) for GitHub operations. Check for a matching MCP tool first — only fall back to `gh` via Bash when no MCP tool exists for the operation.
This also applies to other MCP-connected services (Linear, Granola). MCP first, CLI second.

View file

@ -6,8 +6,8 @@ paths:
# Language Support Patterns
- Current language is a module-level singleton in `languages/current.py` — use `set_current_language()` / `current_language()`, never pass language as a parameter through call chains
- Use `get_language_support(identifier)` from `languages/registry.py` to get a `LanguageSupport` instance — never import language classes directly
- New language support classes must use the `@register_language` decorator to register with the extension and language registries
- `languages/__init__.py` uses `__getattr__` for lazy imports to avoid circular dependencies — follow this pattern when adding new exports
- Prefer `LanguageSupport` protocol dispatch over `is_python()`/`is_javascript()` guards — remaining guards are being migrated to protocol methods
- Use `get_language_support(identifier)` from `languages/registry.py` — never import language classes directly
- New language support classes must use the `@register_language` decorator
- `languages/__init__.py` uses `__getattr__` for lazy imports to avoid circular dependencies
- Prefer `LanguageSupport` protocol dispatch over `is_python()`/`is_javascript()` guards
- `is_javascript()` returns `True` for both JavaScript and TypeScript (still used in ~15 call sites pending migration)

27
.claude/rules/sessions.md Normal file
View file

@ -0,0 +1,27 @@
# Session Discipline
## Scope
One task per session. Don't mix implementation with communication drafting, transcript search, or strategic planning.
## Duration
Cap sessions at 2-3 hours. Use `/handoff` at natural breakpoints rather than letting auto-compaction degrade context.
- After 1 compaction: consider wrapping up the current task and handing off
- After 3 compactions: stop, and tell the user to start a fresh session
- Never continue past 5 compactions — context is too degraded
## Context preservation
When compacting, preserve: modified files list, current branch, test commands used, key decisions made. Use subagents for exploration to keep main context clean.
## No polling
Never poll background tasks. No `wc -l`, no `tail -f`, no `sleep` loops. Use `run_in_background` and wait for the completion notification.
## File read budget
If you've read the same file 3+ times in a session, either:
- The session is too long and compaction destroyed your context — write a handoff
- You're not retaining key information — write it down in your response before it compacts away

View file

@ -1,8 +0,0 @@
---
paths:
- "codeflash/**/*.py"
---
# Source Code Rules
- Use `libcst` for code modification/transformation to preserve formatting. `ast` is acceptable for read-only analysis and parsing.

View file

@ -4,13 +4,14 @@ paths:
- "codeflash/**/*test*.py"
---
# Testing Conventions
# Testing
- Code context extraction and replacement tests must always assert for full string equality, no substring matching.
- Use pytest's `tmp_path` fixture for temp directories — do not use `tempfile.mkdtemp()`, `tempfile.TemporaryDirectory()`, or `NamedTemporaryFile`. Some existing tests still use `tempfile` but new tests must use `tmp_path`.
- Always call `.resolve()` on Path objects before passing them to functions under test — this ensures absolute paths and resolves symlinks. Example: `source_file = (tmp_path / "example.py").resolve()`
- Use `.as_posix()` when converting resolved paths to strings (normalizes to forward slashes).
- Any new feature or bug fix that can be tested automatically must have test cases.
- If changes affect existing test expectations, update the tests accordingly. Tests must always pass after changes.
- The pytest plugin patches `time`, `random`, `uuid`, and `datetime` for deterministic test execution — never assume real randomness or real time in verification tests.
- `conftest.py` uses an autouse fixture that calls `reset_current_language()` — tests always start with Python as the default language.
- Full string equality for context extraction/replacement tests — no substring matching
- Use pytest's `tmp_path` fixture — not `tempfile.mkdtemp()` or `NamedTemporaryFile`
- Always call `.resolve()` on Path objects before passing to functions under test
- Use `.as_posix()` when converting resolved paths to strings
- New features and bug fixes must have test cases
- The pytest plugin patches `time`, `random`, `uuid`, `datetime` for deterministic execution
- `conftest.py` autouse fixture calls `reset_current_language()` — tests start with Python as default
- Prefer running individual tests over full suites: `uv run pytest tests/test_foo.py::TestBar::test_baz -v`
- Only run the full suite when explicitly asked or before pushing

View file

@ -1,13 +1,17 @@
# Workflow
## Code Changes
- Before making any changes, outline your approach in 3-5 numbered steps. Include which repo/branch you'll work in, what commands you'll run, and what success looks like. Wait for approval before starting
Before making any changes, outline your approach in 3-5 numbered steps. Include which branch you'll work on, what commands you'll run, and what success looks like. Wait for approval before starting.
## Response Style
- When listing items (PRs, functions, optimization targets), always provide the complete list ordered by priority on the first attempt. Do not give partial lists
When listing items (PRs, functions, optimization targets), provide the complete list ordered by priority on the first attempt. No partial lists.
## Commands
- When running long-running commands (benchmarks, profiling, optimizers like codeflash), always run them in the foreground. Do not use background processes
Long-running commands (benchmarks, profiling, optimizers) always run in the foreground. Do not use background processes.
## Debugging
- When claiming something is a pre-existing issue (e.g., test failures on main), verify by checking out main and running the tests before making that claim
When claiming something is a pre-existing issue (e.g., test failures on main), verify by checking out main and running the tests before making that claim.

View file

@ -1,16 +1,89 @@
{
"attribution": {
"commit": "",
"pr": ""
},
"includeCoAuthoredBy": false,
"permissions": {
"allow": [
"Bash(git status*)",
"Bash(git diff*)",
"Bash(git log*)",
"Bash(git branch*)",
"Bash(git show*)",
"Bash(git fetch*)",
"Bash(git checkout*)",
"Bash(uv run*)",
"Bash(uv sync*)",
"Bash(uv pip*)",
"Bash(prek*)",
"Bash(make*)",
"Bash(gh *)"
]
},
"hooks": {
"PreToolUse": [
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/bash-guard.sh",
"timeout": 5
}
]
},
{
"matcher": "Write",
"hooks": [
{
"type": "command",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/require-read.sh",
"timeout": 5
}
]
}
],
"PostToolUse": [
{
"matcher": "Read",
"hooks": [
{
"type": "command",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/track-read.sh",
"timeout": 5
}
]
},
{
"matcher": "Edit|Write",
"hooks": [
{
"type": "command",
"command": ".claude/hooks/post-edit-lint.sh",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/post-edit-lint.sh",
"timeout": 30
}
]
}
],
"PreCompact": [
{
"hooks": [
{
"type": "command",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/post-compact.sh",
"timeout": 10
}
]
}
]
},
"statusLine": {
"type": "command",
"command": "$CLAUDE_PROJECT_DIR/.claude/hooks/status-line.sh"
},
"enableAllProjectMcpServers": true,
"env": {
"ENABLE_LSP_TOOL": "1"
}
}

View file

@ -7,6 +7,6 @@ uv run mypy --non-interactive --config-file pyproject.toml <changed_files>
```
- Fix type annotation issues: missing return types, incorrect types, Optional/None unions, import errors for type hints
- Do NOT add `# type: ignore` comments always fix the root cause
- Do NOT add `# type: ignore` comments -- always fix the root cause
- Do NOT fix type errors that require logic changes, complex generic type rework, or anything that could change runtime behavior
- Files in `mypy_allowlist.txt` are checked in CI ensure they remain error-free
- Files in `mypy_allowlist.txt` are checked in CI -- ensure they remain error-free

View file

@ -5,5 +5,5 @@ When prek (pre-commit) checks fail:
1. Run `uv run prek run` to see failures (local, checks staged files)
2. In CI, the equivalent is `uv run prek run --from-ref origin/main`
3. prek runs ruff format, ruff check, and mypy on changed files
4. Fix issues in order: formatting → lint → type errors
4. Fix issues in order: formatting -> lint -> type errors
5. Re-run `uv run prek run` to verify all checks pass

View file

View file

@ -1,31 +1,18 @@
from argparse import Namespace
from pathlib import Path
from codeflash.discovery.functions_to_optimize import FunctionToOptimize
from codeflash.languages.python.context.code_context_extractor import get_code_optimization_context
from codeflash.models.models import FunctionParent
from codeflash.optimization.optimizer import Optimizer
def test_benchmark_extract(benchmark) -> None:
file_path = Path(__file__).parent.parent.parent.resolve() / "codeflash"
opt = Optimizer(
Namespace(
project_root=file_path.resolve(),
disable_telemetry=True,
tests_root=(file_path / "tests").resolve(),
test_framework="pytest",
pytest_cmd="pytest",
experiment_id=None,
test_project_root=Path.cwd(),
)
)
project_root = Path(__file__).parent.parent.parent.resolve() / "codeflash"
function_to_optimize = FunctionToOptimize(
function_name="replace_function_and_helpers_with_optimized_code",
file_path=file_path / "languages" / "function_optimizer.py",
file_path=project_root / "languages" / "function_optimizer.py",
parents=[FunctionParent(name="FunctionOptimizer", type="ClassDef")],
starting_line=None,
ending_line=None,
)
benchmark(get_code_optimization_context, function_to_optimize, opt.args.project_root)
benchmark(get_code_optimization_context, function_to_optimize, project_root)

View file

@ -0,0 +1,133 @@
"""Benchmark comparator type dispatch performance.
Exercises the fast-path frozenset lookup vs isinstance MRO traversal
across realistic return value shapes: primitives, nested containers,
and mixed-type structures typical of real optimization verification.
"""
from __future__ import annotations
from collections import OrderedDict
from decimal import Decimal
from codeflash.verification.comparator import comparator
# --- Test data: realistic return value shapes ---
# 1. Flat primitives (int, bool, None, str, float, bytes) — the fast-path sweet spot
_PRIMITIVES_A = [
42,
True,
None,
3.14,
"hello",
b"bytes",
0,
False,
"",
1.0,
-1,
None,
True,
99,
"world",
b"\x00\x01",
2**31,
0.0,
False,
None,
]
_PRIMITIVES_B = list(_PRIMITIVES_A)
# 2. Nested dict of lists (common return value shape: API responses, parsed configs)
_NESTED_DICT_A = {
"users": [{"id": i, "name": f"user_{i}", "active": i % 2 == 0, "score": i * 1.5} for i in range(50)],
"metadata": {"total": 50, "page": 1, "has_next": True},
"tags": [f"tag_{i}" for i in range(20)],
"config": {"timeout": 30, "retries": 3, "debug": False, "threshold": Decimal("0.95")},
}
_NESTED_DICT_B = {
"users": [{"id": i, "name": f"user_{i}", "active": i % 2 == 0, "score": i * 1.5} for i in range(50)],
"metadata": {"total": 50, "page": 1, "has_next": True},
"tags": [f"tag_{i}" for i in range(20)],
"config": {"timeout": 30, "retries": 3, "debug": False, "threshold": Decimal("0.95")},
}
# 3. List of tuples (common: database rows, CSV data)
_ROWS_A = [(i, f"row_{i}", i * 0.1, i % 3 == 0, None if i % 5 == 0 else i) for i in range(200)]
_ROWS_B = [(i, f"row_{i}", i * 0.1, i % 3 == 0, None if i % 5 == 0 else i) for i in range(200)]
# 4. Deeply nested structure (worst case for recursive comparator)
def _make_deep(depth: int) -> dict:
if depth == 0:
return {"leaf": True, "value": 42, "items": [1, 2, 3], "label": "end"}
return {"level": depth, "child": _make_deep(depth - 1), "siblings": list(range(depth))}
_DEEP_A = _make_deep(15)
_DEEP_B = _make_deep(15)
# 5. Mixed identity types (frozenset, range, slice, OrderedDict, bytes, complex)
_IDENTITY_TYPES_A = [
frozenset({1, 2, 3}),
range(100),
complex(1, 2),
Decimal("3.14"),
OrderedDict(a=1, b=2),
b"binary",
bytearray(b"mutable"),
memoryview(b"view"),
type(None),
True,
42,
None,
] * 10
_IDENTITY_TYPES_B = list(_IDENTITY_TYPES_A)
def _compare_all_primitives() -> None:
for a, b in zip(_PRIMITIVES_A, _PRIMITIVES_B):
comparator(a, b)
def _compare_nested_dict() -> None:
comparator(_NESTED_DICT_A, _NESTED_DICT_B)
def _compare_rows() -> None:
comparator(_ROWS_A, _ROWS_B)
def _compare_deep() -> None:
comparator(_DEEP_A, _DEEP_B)
def _compare_identity_types() -> None:
for a, b in zip(_IDENTITY_TYPES_A, _IDENTITY_TYPES_B):
comparator(a, b)
def test_benchmark_comparator_primitives(benchmark) -> None:
"""20 flat primitive comparisons (int, bool, None, str, float, bytes)."""
benchmark(_compare_all_primitives)
def test_benchmark_comparator_nested_dict(benchmark) -> None:
"""Nested dict with 50-element user list, metadata, tags, config."""
benchmark(_compare_nested_dict)
def test_benchmark_comparator_rows(benchmark) -> None:
"""200 tuples of (int, str, float, bool, Optional[int])."""
benchmark(_compare_rows)
def test_benchmark_comparator_deep(benchmark) -> None:
"""15-level deep nested dict structure."""
benchmark(_compare_deep)
def test_benchmark_comparator_identity_types(benchmark) -> None:
"""120 frozenset/range/complex/Decimal/OrderedDict/bytes comparisons."""
benchmark(_compare_identity_types)

View file

@ -0,0 +1,75 @@
"""Benchmark libcst visitor performance across many files.
Exercises the visitor-heavy codepaths that benefit from the libcst dispatch
table cache: discover_functions + get_code_optimization_context on multiple
real source files.
"""
from __future__ import annotations
from pathlib import Path
from codeflash.discovery.functions_to_optimize import FunctionToOptimize
from codeflash.languages.python.context.code_context_extractor import get_code_optimization_context
from codeflash.languages.python.support import PythonSupport
from codeflash.models.models import FunctionParent
# Real source files from the codeflash codebase, chosen for size and visitor diversity.
_CODEFLASH_ROOT = Path(__file__).parent.parent.parent.resolve() / "codeflash"
_SOURCE_FILES: list[Path] = [
_CODEFLASH_ROOT / "languages" / "function_optimizer.py",
_CODEFLASH_ROOT / "languages" / "python" / "context" / "code_context_extractor.py",
_CODEFLASH_ROOT / "languages" / "python" / "support.py",
_CODEFLASH_ROOT / "languages" / "python" / "static_analysis" / "code_extractor.py",
_CODEFLASH_ROOT / "languages" / "python" / "static_analysis" / "code_replacer.py",
_CODEFLASH_ROOT / "code_utils" / "instrument_existing_tests.py",
_CODEFLASH_ROOT / "benchmarking" / "compare.py",
_CODEFLASH_ROOT / "models" / "models.py",
_CODEFLASH_ROOT / "discovery" / "discover_unit_tests.py",
_CODEFLASH_ROOT / "languages" / "base.py",
]
# For each file, pick one top-level function to extract context for.
# (class, function_name) — class=None means module-level.
_TARGETS: list[tuple[Path, str | None, str]] = [
(_SOURCE_FILES[0], "FunctionOptimizer", "replace_function_and_helpers_with_optimized_code"),
(_SOURCE_FILES[1], None, "get_code_optimization_context"),
(_SOURCE_FILES[2], "PythonSupport", "discover_functions"),
(_SOURCE_FILES[3], None, "add_global_assignments"),
(_SOURCE_FILES[4], None, "replace_functions_in_file"),
(_SOURCE_FILES[5], None, "inject_profiling_into_existing_test"),
(_SOURCE_FILES[6], None, "compare_branches"),
(_SOURCE_FILES[7], None, "get_comment_prefix"),
(_SOURCE_FILES[8], None, "discover_unit_tests"),
(_SOURCE_FILES[9], None, "convert_parents_to_tuple"),
]
def _discover_all() -> None:
"""Run discover_functions on all source files."""
ps = PythonSupport()
for file_path in _SOURCE_FILES:
source = file_path.read_text(encoding="utf-8")
ps.discover_functions(source=source, file_path=file_path)
def _extract_all_contexts() -> None:
"""Run get_code_optimization_context on every target function."""
project_root = _CODEFLASH_ROOT.parent
for file_path, class_name, func_name in _TARGETS:
parents = [FunctionParent(name=class_name, type="ClassDef")] if class_name else []
fto = FunctionToOptimize(
function_name=func_name, file_path=file_path, parents=parents, starting_line=None, ending_line=None
)
get_code_optimization_context(fto, project_root)
def test_benchmark_discover_functions_multi_file(benchmark) -> None:
"""Discover functions across 10 source files."""
benchmark(_discover_all)
def test_benchmark_extract_context_multi_file(benchmark) -> None:
"""Extract code optimization context for 10 functions across 10 files."""
benchmark(_extract_all_contexts)

View file

@ -0,0 +1,56 @@
"""Benchmark the full libcst-heavy pipeline on a single file.
Runs discover extract context replace functions add global assignments
in sequence, exercising ~15 distinct visitor/transformer classes in one pass.
"""
from __future__ import annotations
from pathlib import Path
from codeflash.discovery.functions_to_optimize import FunctionToOptimize
from codeflash.languages.python.context.code_context_extractor import get_code_optimization_context
from codeflash.languages.python.static_analysis.code_extractor import add_global_assignments
from codeflash.languages.python.static_analysis.code_replacer import replace_functions_in_file
from codeflash.languages.python.support import PythonSupport
_CODEFLASH_ROOT = Path(__file__).parent.parent.parent.resolve() / "codeflash"
_PROJECT_ROOT = _CODEFLASH_ROOT.parent
# Target: a real, non-trivial file with classes and module-level functions.
_TARGET_FILE = _CODEFLASH_ROOT / "languages" / "python" / "static_analysis" / "code_extractor.py"
_TARGET_FUNC = "add_global_assignments"
# A second file to serve as "optimized" source for replace/merge steps.
_SECOND_FILE = _CODEFLASH_ROOT / "languages" / "python" / "static_analysis" / "code_replacer.py"
def _run_pipeline() -> None:
"""Simulate a single-file optimization pass through the full visitor pipeline."""
source = _TARGET_FILE.read_text(encoding="utf-8")
source2 = _SECOND_FILE.read_text(encoding="utf-8")
# 1. Discover functions (FunctionVisitor + MetadataWrapper)
ps = PythonSupport()
functions = ps.discover_functions(source=source, file_path=_TARGET_FILE)
# 2. Extract code optimization context (multiple collectors + dependency resolver)
fto = FunctionToOptimize(
function_name=_TARGET_FUNC, file_path=_TARGET_FILE, parents=[], starting_line=None, ending_line=None
)
get_code_optimization_context(fto, _PROJECT_ROOT)
# 3. Replace functions (GlobalFunctionCollector + GlobalFunctionTransformer)
# Use a class method from discovered functions if available, else module-level.
func_names = [_TARGET_FUNC]
replace_functions_in_file(
source_code=source, original_function_names=func_names, optimized_code=source2, preexisting_objects=set()
)
# 4. Add global assignments (6 visitors/transformers)
add_global_assignments(source2, source)
def test_benchmark_full_pipeline(benchmark) -> None:
"""Full discover → extract → replace → merge pipeline on one file."""
benchmark(_run_pipeline)

View file

@ -2,7 +2,7 @@ from codeflash.models.models import FunctionTestInvocation, InvocationId, TestRe
from codeflash.verification.parse_test_output import merge_test_results
def generate_test_invocations(count=100):
def generate_test_invocations(count: int = 100) -> tuple[TestResults, TestResults]:
"""Generate a set number of test invocations for benchmarking."""
test_results_xml = TestResults()
test_results_bin = TestResults()
@ -21,7 +21,7 @@ def generate_test_invocations(count=100):
function_getting_tested="sorter",
iteration_id=iteration_id,
),
file_name="/tmp/tests/unittest/test_bubble_sort__perfinstrumented.py",
file_name="/tmp/tests/unittest/test_bubble_sort__perfinstrumented.py", # noqa: S108
did_pass=True,
runtime=None if i % 3 == 0 else i * 100, # Vary runtime values
test_framework="unittest",
@ -42,7 +42,7 @@ def generate_test_invocations(count=100):
function_getting_tested="sorter",
iteration_id=iteration_id,
),
file_name="/tmp/tests/unittest/test_bubble_sort__perfinstrumented.py",
file_name="/tmp/tests/unittest/test_bubble_sort__perfinstrumented.py", # noqa: S108
did_pass=True,
runtime=500 + i * 20, # Generate varying runtime values
test_framework="unittest",
@ -56,12 +56,12 @@ def generate_test_invocations(count=100):
return test_results_xml, test_results_bin
def run_merge_benchmark(count=100):
def run_merge_benchmark(count: int = 100) -> None:
test_results_xml, test_results_bin = generate_test_invocations(count)
# Perform the merge operation that will be benchmarked
merge_test_results(xml_test_results=test_results_xml, bin_test_results=test_results_bin, test_framework="unittest")
def test_benchmark_merge_test_results(benchmark):
def test_benchmark_merge_test_results(benchmark) -> None:
benchmark(run_merge_benchmark, 1000) # Default to 100 test invocations

17
.coveragerc Normal file
View file

@ -0,0 +1,17 @@
[run]
branch = true
source = codeflash
omit =
codeflash/version.py
[report]
sort = cover
show_missing = true
fail_under = 58
exclude_lines =
pragma: no cover
if TYPE_CHECKING:
if __name__ == .__main__.:
[html]
directory = htmlcov

38
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,38 @@
# Default fallback
* @KRRT7
# Java
/codeflash/languages/java/ @mashraf-222 @HeshamHM28 @misrasaurabh1
# JavaScript / TypeScript
/codeflash/languages/javascript/ @Saga4 @mohammedahmed18 @KRRT7
# Python language support
/codeflash/languages/python/ @KRRT7
# Core pipeline
/codeflash/optimization/ @KRRT7 @aseembits93 @misrasaurabh1
/codeflash/verification/ @KRRT7 @misrasaurabh1
/codeflash/benchmarking/ @KRRT7
/codeflash/discovery/ @KRRT7 @misrasaurabh1
# CLI & setup
/codeflash/cli_cmds/ @KRRT7 @misrasaurabh1
# LSP
/codeflash/lsp/ @mohammedahmed18
# API
/codeflash/api/ @KRRT7 @aseembits93
# Tracing & entry points
/codeflash/tracing/ @misrasaurabh1 @KRRT7
/codeflash/main.py @misrasaurabh1 @KRRT7
/codeflash/tracer.py @misrasaurabh1 @KRRT7
# Shared utilities
/codeflash/code_utils/ @KRRT7 @aseembits93 @misrasaurabh1
/codeflash/models/ @KRRT7
# CI / workflows
/.github/ @KRRT7

18
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,18 @@
## Linked issue or discussion
<!-- Every PR must link to an issue or discussion — this ensures the approach has been discussed with maintainers before implementation begins, so your work fits the project's direction and doesn't need to be reworked. -->
<!-- Replace the line below with one of: -->
<!-- Closes #<number> -->
<!-- Fixes #<number> -->
<!-- Relates to #<number> -->
<!-- Discussion: <url> -->
**Required:** <!-- CI will fail if no linked issue or discussion is found. -->
## What changed
<!-- Brief description of the changes. -->
## Test plan
<!-- How was this tested? Link to passing CI, new tests, or manual verification steps. -->

35
.github/actions/validate-pr/action.yml vendored Normal file
View file

@ -0,0 +1,35 @@
name: Validate PR
description: Ensure only authorized users can modify workflow files in PRs
inputs:
base_sha:
description: Base commit SHA of the pull request
required: true
head_sha:
description: Head commit SHA of the pull request
required: true
author:
description: Login of the PR author
required: true
pr_state:
description: State of the pull request (open/closed)
required: true
runs:
using: composite
steps:
- name: Check workflow file changes
shell: bash
run: |
if git diff --name-only "${{ inputs.base_sha }}" "${{ inputs.head_sha }}" | grep -q "^.github/workflows/"; then
echo "Workflow changes detected."
AUTHOR="${{ inputs.author }}"
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ inputs.pr_state }}" == "open" ]]; then
echo "PR is open. Protection rules in place. Proceeding."
else
echo "Unauthorized user ($AUTHOR). Exiting."
exit 1
fi
else
echo "No workflow file changes. Proceeding."
fi

25
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,25 @@
version: 2
updates:
# Python (root pyproject.toml)
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
# JavaScript (codeflash npm package)
- package-ecosystem: "npm"
directory: "/packages/codeflash"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
# GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 5
# code_to_optimize/ directories are test fixtures — do NOT update them.
# Their package-lock.json files are gitignored to prevent Dependabot alerts.

537
.github/workflows/ci.yaml vendored Normal file
View file

@ -0,0 +1,537 @@
name: CI
on:
push:
branches: [main]
paths:
- 'codeflash/**'
- 'codeflash-benchmark/**'
- 'codeflash-java-runtime/**'
- 'tests/**'
- 'packages/**'
- 'pyproject.toml'
- 'uv.lock'
- 'mypy_allowlist.txt'
- '.github/workflows/ci.yaml'
- '.github/actions/**'
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
# ---------------------------------------------------------------------------
# Linked issue check — every PR must reference an issue or discussion.
# Skipped on push to main and workflow_dispatch.
# ---------------------------------------------------------------------------
check-linked-issue:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
permissions:
pull-requests: read
steps:
- name: Check PR body for linked issue or discussion
env:
PR_BODY: ${{ github.event.pull_request.body }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
AUTHOR_ASSOCIATION: ${{ github.event.pull_request.author_association }}
run: |
# Skip for bots (dependabot, renovate, github-actions)
if [[ "$PR_AUTHOR" == *"[bot]"* || "$PR_AUTHOR" == "dependabot" ]]; then
echo "Bot PR — skipping linked issue check."
exit 0
fi
# Skip for org members
if [[ "$AUTHOR_ASSOCIATION" == "MEMBER" ]]; then
echo "Org member ($PR_AUTHOR) — skipping linked issue check."
exit 0
fi
if [ -z "$PR_BODY" ]; then
echo "::error::PR body is empty. Every PR must link an issue or discussion."
echo "Use 'Closes #<number>', 'Fixes #<number>', 'Relates to #<number>', or include a discussion URL."
exit 1
fi
# Match: #123, GH-123, org/repo#123, Closes/Fixes/Relates/Resolves #123,
# or a github.com URL to an issue or discussion
if echo "$PR_BODY" | grep -qiP '(close[sd]?|fix(e[sd])?|relate[sd]?\s+to|resolve[sd]?)\s+#\d+'; then
echo "Found linked issue keyword."
exit 0
fi
if echo "$PR_BODY" | grep -qP '#\d+'; then
echo "Found issue reference."
exit 0
fi
if echo "$PR_BODY" | grep -qiP 'github\.com/[^\s]+/(issues|discussions)/\d+'; then
echo "Found GitHub issue/discussion URL."
exit 0
fi
if echo "$PR_BODY" | grep -qiP 'CF-#?\d+'; then
echo "Found Linear ticket reference."
exit 0
fi
echo "::error::No linked issue or discussion found in PR body."
echo "Every PR must reference an issue or discussion. See CONTRIBUTING.md for details."
echo "Use 'Closes #<number>', 'Fixes #<number>', 'Relates to #<number>', or include a discussion URL."
exit 1
# ---------------------------------------------------------------------------
# Change detection — decides which downstream jobs actually run.
# On push/workflow_dispatch every flag is true so all jobs execute.
# On pull_request we diff against the merge base.
# ---------------------------------------------------------------------------
determine-changes:
uses: codeflash-ai/github-workflows/.github/workflows/determine-changes.yml@main
with:
path-filters: |
{
"unit_tests": ["codeflash/", "codeflash-benchmark/", "tests/", "packages/", "pyproject.toml", "uv.lock"],
"type_check": ["codeflash/", "pyproject.toml", "uv.lock", "mypy_allowlist.txt"],
"e2e": ["codeflash/*.py", "codeflash/api/", "codeflash/benchmarking/", "codeflash/cli_cmds/", "codeflash/code_utils/", "codeflash/discovery/", "codeflash/github/", "codeflash/languages/python/", "codeflash/languages/*.py", "codeflash/lsp/", "codeflash/models/", "codeflash/optimization/", "codeflash/picklepatch/", "codeflash/result/", "codeflash/setup/", "codeflash/telemetry/", "codeflash/tracing/", "codeflash/verification/", "tests/", "pyproject.toml", "uv.lock"],
"e2e_js": ["codeflash/languages/javascript/", "codeflash/languages/base.py", "codeflash/languages/registry.py", "codeflash/optimization/", "codeflash/verification/", "packages/", "code_to_optimize/js/", "tests/scripts/end_to_end_test_js*"],
"e2e_java": ["codeflash/languages/java/", "codeflash/languages/base.py", "codeflash/languages/registry.py", "codeflash/optimization/", "codeflash/verification/", "codeflash-java-runtime/", "code_to_optimize/java/", "tests/scripts/end_to_end_test_java*", "tests/test_languages/fixtures/java_tracer_e2e/"]
}
# ---------------------------------------------------------------------------
# Unit tests — 6 Linux + 1 Windows matrix
# ---------------------------------------------------------------------------
unit-tests:
needs: determine-changes
if: fromJSON(needs.determine-changes.outputs.flags).unit_tests == 'true'
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
python-version: "3.9"
- os: ubuntu-latest
python-version: "3.10"
- os: ubuntu-latest
python-version: "3.11"
- os: ubuntu-latest
python-version: "3.12"
- os: ubuntu-latest
python-version: "3.13"
- os: ubuntu-latest
python-version: "3.14"
- os: windows-latest
python-version: "3.13"
runs-on: ${{ matrix.os }}
env:
PYTHONIOENCODING: utf-8
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 1
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: ${{ matrix.python-version }}
enable-cache: true
- name: Install dependencies
shell: bash
run: |
if [[ "${{ matrix.python-version }}" == "3.9" || "${{ matrix.python-version }}" == "3.13" ]]; then
uv sync --group tests
else
uv sync
fi
- name: Unit tests
run: uv run pytest tests/
# ---------------------------------------------------------------------------
# Coverage — single run on ubuntu/py3.13 to enforce the coverage floor.
# ---------------------------------------------------------------------------
coverage:
needs: determine-changes
if: fromJSON(needs.determine-changes.outputs.flags).unit_tests == 'true'
runs-on: ubuntu-latest
env:
PYTHONIOENCODING: utf-8
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 1
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: "3.13"
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Run tests with coverage
run: uv run pytest tests/ --ignore=tests/test_tracer.py --cov=codeflash --cov-report=xml:coverage.xml --cov-report=term-missing --cov-config=.coveragerc
- name: Check coverage floor
run: uv run coverage report --fail-under=58
- name: Upload coverage report
if: always()
uses: actions/upload-artifact@v7
with:
name: coverage-report
path: coverage.xml
retention-days: 30
# ---------------------------------------------------------------------------
# Mypy type checking
# ---------------------------------------------------------------------------
type-check:
needs: determine-changes
if: fromJSON(needs.determine-changes.outputs.flags).type_check == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 1
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Run mypy
run: uv run mypy --non-interactive --config-file pyproject.toml @mypy_allowlist.txt
# ---------------------------------------------------------------------------
# Lint (prek) — pull_request only
# ---------------------------------------------------------------------------
prek:
needs: determine-changes
if: >-
github.event_name == 'pull_request'
&& (fromJSON(needs.determine-changes.outputs.flags).e2e == 'true'
|| fromJSON(needs.determine-changes.outputs.flags).e2e_js == 'true')
uses: codeflash-ai/github-workflows/.github/workflows/prek-lint.yml@main
permissions:
contents: write
with:
auto-fix: true
checkout-ref: ${{ github.head_ref }}
restore-paths: "codeflash/version.py codeflash-benchmark/codeflash_benchmark/version.py"
# ---------------------------------------------------------------------------
# E2E tests — only on pull_request and workflow_dispatch (not push to main)
# ---------------------------------------------------------------------------
# --- Standard Python E2Es (9 tests) ---
e2e-python:
needs: determine-changes
if: >-
fromJSON(needs.determine-changes.outputs.flags).e2e == 'true'
&& github.event_name != 'push'
&& github.actor != 'dependabot[bot]'
strategy:
fail-fast: false
matrix:
include:
- name: tracer-replay
script: end_to_end_test_tracer_replay.py
expected_improvement: 10
- name: bubble-sort-pytest-nogit
script: end_to_end_test_bubblesort_pytest.py
expected_improvement: 70
remove_git: true
- name: bubble-sort-unittest
script: end_to_end_test_bubblesort_unittest.py
expected_improvement: 40
- name: futurehouse-structure
script: end_to_end_test_futurehouse.py
expected_improvement: 5
- name: topological-sort
script: end_to_end_test_topological_sort_worktree.py
expected_improvement: 5
- name: async-optimization
script: end_to_end_test_async.py
expected_improvement: 10
- name: benchmark-bubble-sort
script: end_to_end_test_benchmark_sort.py
expected_improvement: 5
- name: coverage-e2e
script: end_to_end_test_coverage.py
extra_deps: black
- name: init-optimization
script: end_to_end_test_init_optimization.py
expected_improvement: 10
environment: ${{ ((github.event_name == 'workflow_dispatch' && github.actor != 'misrasaurabh1' && github.actor != 'KRRT7') || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
MAX_RETRIES: 3
RETRY_DELAY: 5
CODEFLASH_END_TO_END: 1
steps:
- uses: actions/checkout@v6
with:
ref: ${{ github.event.pull_request.head.ref || '' }}
repository: ${{ github.event.pull_request.head.repo.full_name || '' }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
if: github.event_name == 'pull_request'
uses: ./.github/actions/validate-pr
with:
base_sha: ${{ github.event.pull_request.base.sha }}
head_sha: ${{ github.event.pull_request.head.sha }}
author: ${{ github.event.pull_request.user.login }}
pr_state: ${{ github.event.pull_request.state }}
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: 3.11.6
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Install extra dependencies
if: matrix.extra_deps
run: uv add ${{ matrix.extra_deps }}
- name: Set test configuration
if: matrix.expected_improvement
run: |
echo "COLUMNS=110" >> "$GITHUB_ENV"
echo "EXPECTED_IMPROVEMENT_PCT=${{ matrix.expected_improvement }}" >> "$GITHUB_ENV"
- name: Remove .git
if: matrix.remove_git
run: |
if [ -d ".git" ]; then
echo ".git directory exists!"
sudo rm -rf .git
if [ -d ".git" ]; then
echo ".git directory still exists after removal attempt!"
exit 1
else
echo ".git directory successfully removed."
fi
else
echo ".git directory does not exist. Nothing to remove."
exit 1
fi
- name: Run E2E test
run: uv run python tests/scripts/${{ matrix.script }}
# --- JS E2Es (3 tests, need Node.js + packages/) ---
e2e-js:
needs: determine-changes
if: >-
fromJSON(needs.determine-changes.outputs.flags).e2e_js == 'true'
&& github.event_name != 'push'
&& github.actor != 'dependabot[bot]'
strategy:
fail-fast: false
matrix:
include:
- name: js-cjs-function
script: end_to_end_test_js_cjs_function.py
js_project_dir: code_to_optimize/js/code_to_optimize_js
expected_improvement: 50
- name: js-esm-async
script: end_to_end_test_js_esm_async.py
js_project_dir: code_to_optimize/js/code_to_optimize_js_esm
expected_improvement: 10
allow_failure: true
- name: js-ts-class
script: end_to_end_test_js_ts_class.py
js_project_dir: code_to_optimize/js/code_to_optimize_ts
expected_improvement: 30
continue-on-error: ${{ matrix.allow_failure || false }}
environment: ${{ ((github.event_name == 'workflow_dispatch' && github.actor != 'misrasaurabh1' && github.actor != 'KRRT7') || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: ${{ matrix.expected_improvement }}
CODEFLASH_END_TO_END: 1
steps:
- uses: actions/checkout@v6
with:
ref: ${{ github.event.pull_request.head.ref || '' }}
repository: ${{ github.event.pull_request.head.repo.full_name || '' }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
if: github.event_name == 'pull_request'
uses: ./.github/actions/validate-pr
with:
base_sha: ${{ github.event.pull_request.base.sha }}
head_sha: ${{ github.event.pull_request.head.sha }}
author: ${{ github.event.pull_request.user.login }}
pr_state: ${{ github.event.pull_request.state }}
- name: Set up Node.js
uses: actions/setup-node@v6
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: |
packages/codeflash/package-lock.json
code_to_optimize/js/*/package-lock.json
- name: Install codeflash npm package dependencies
run: |
cd packages/codeflash
npm install
- name: Install JS test project dependencies
run: |
cd ${{ matrix.js_project_dir }}
npm install
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: 3.11.6
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Run E2E test
run: uv run python tests/scripts/${{ matrix.script }}
# --- Java E2Es (3 tests, need JDK + Maven) ---
e2e-java:
needs: determine-changes
if: >-
fromJSON(needs.determine-changes.outputs.flags).e2e_java == 'true'
&& github.event_name != 'push'
&& github.actor != 'dependabot[bot]'
strategy:
fail-fast: false
matrix:
include:
- name: java-fibonacci-nogit
script: end_to_end_test_java_fibonacci.py
expected_improvement: 70
remove_git: true
- name: java-tracer
script: end_to_end_test_java_tracer.py
expected_improvement: 10
- name: java-void-optimization-nogit
script: end_to_end_test_java_void_optimization.py
expected_improvement: 70
remove_git: true
environment: ${{ ((github.event_name == 'workflow_dispatch' && github.actor != 'misrasaurabh1' && github.actor != 'KRRT7') || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: ${{ matrix.expected_improvement }}
CODEFLASH_END_TO_END: 1
CODEFLASH_LOOPING_TIME: 5
steps:
- uses: actions/checkout@v6
with:
ref: ${{ github.event.pull_request.head.ref || '' }}
repository: ${{ github.event.pull_request.head.repo.full_name || '' }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
if: github.event_name == 'pull_request'
uses: ./.github/actions/validate-pr
with:
base_sha: ${{ github.event.pull_request.base.sha }}
head_sha: ${{ github.event.pull_request.head.sha }}
author: ${{ github.event.pull_request.user.login }}
pr_state: ${{ github.event.pull_request.state }}
- name: Set up JDK 11
uses: actions/setup-java@v5
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: 3.11.6
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Cache codeflash-runtime JAR
id: runtime-jar-cache
uses: actions/cache@v5
with:
path: ~/.m2/repository/io/codeflash
key: codeflash-runtime-${{ hashFiles('codeflash-java-runtime/pom.xml', 'codeflash-java-runtime/src/**') }}
- name: Build and install codeflash-runtime JAR
if: steps.runtime-jar-cache.outputs.cache-hit != 'true'
run: |
cd codeflash-java-runtime
mvn install -q -DskipTests
- name: Remove .git
if: matrix.remove_git
run: |
if [ -d ".git" ]; then
sudo rm -rf .git
echo ".git directory removed."
else
echo ".git directory does not exist."
exit 1
fi
- name: Run E2E test
run: uv run python tests/scripts/${{ matrix.script }}
# ---------------------------------------------------------------------------
# Gate job — the ONLY required check in the GitHub ruleset.
# Accepts "success" and "skipped" (job skipped by change detection).
# Rejects "failure" and "cancelled".
# ---------------------------------------------------------------------------
required-checks-passed:
name: required checks passed
if: always()
needs:
- check-linked-issue
- unit-tests
- coverage
- type-check
- prek
- e2e-python
- e2e-js
- e2e-java
runs-on: ubuntu-latest
steps:
- uses: codeflash-ai/github-workflows/.github/actions/required-checks-gate@main
with:
needs-json: ${{ toJSON(needs) }}

View file

@ -27,17 +27,21 @@ on:
jobs:
# Automatic PR review (can fix linting issues and push)
# Blocked for fork PRs to prevent malicious code execution
# TEMPORARILY DISABLED — re-enable by removing `false &&` below
pr-review:
concurrency:
group: pr-review-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
if: |
false &&
(
(
github.event_name == 'pull_request' &&
github.event.sender.login != 'claude[bot]' &&
github.event.pull_request.head.repo.full_name == github.repository
) ||
github.event_name == 'workflow_dispatch'
)
runs-on: ubuntu-latest
permissions:
contents: write
@ -47,13 +51,15 @@ jobs:
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref || github.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
uses: astral-sh/setup-uv@v8.1.0
with:
enable-cache: true
- name: Install dependencies
run: |
@ -61,7 +67,7 @@ jobs:
uv sync
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
uses: aws-actions/configure-aws-credentials@v6
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: ${{ secrets.AWS_REGION }}
@ -307,13 +313,15 @@ jobs:
fi
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 0
ref: ${{ steps.pr-ref.outputs.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
uses: astral-sh/setup-uv@v8.1.0
with:
enable-cache: true
- name: Install dependencies
run: |
@ -321,7 +329,7 @@ jobs:
uv sync
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
uses: aws-actions/configure-aws-credentials@v6
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: ${{ secrets.AWS_REGION }}

View file

@ -3,7 +3,10 @@ name: CodeFlash
on:
pull_request:
paths:
- '**' # Trigger for all paths
- 'codeflash/**'
- 'tests/**'
- 'pyproject.toml'
- 'uv.lock'
workflow_dispatch:
@ -23,14 +26,15 @@ jobs:
COLUMNS: 110
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: 🐍 Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: 3.11.6
enable-cache: true
- name: 📦 Install dependencies (CLI)
run: |
@ -39,4 +43,4 @@ jobs:
- name: Codeflash Optimization
id: optimize_code
run: |
uv run codeflash --benchmark --testgen-review
uv run codeflash --benchmark --testgen-review --no-pr

View file

@ -1,41 +0,0 @@
name: Codeflash
on:
pull_request:
paths:
# So that this workflow only runs when code within the target module is modified
- 'code_to_optimize_js_esm/**'
workflow_dispatch:
concurrency:
# Any new push to the PR will cancel the previous run, so that only the latest code is optimized
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
optimize:
name: Optimize new code
# Don't run codeflash on codeflash-ai[bot] commits, prevent duplicate optimizations
if: ${{ github.actor != 'codeflash-ai[bot]' }}
runs-on: ubuntu-latest
env:
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
defaults:
run:
working-directory: ./code_to_optimize_js_esm
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 🟢 Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
cache: 'npm'
- name: 📦 Install Dependencies
run: npm ci
- name: ⚡️ Codeflash Optimization
run: npx codeflash

View file

@ -1,73 +0,0 @@
name: E2E - Async
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
async-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 10
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize async code
id: optimize_async_code
run: |
uv run python tests/scripts/end_to_end_test_async.py

View file

@ -1,72 +0,0 @@
name: E2E - Bubble Sort Benchmark
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
benchmark-bubble-sort-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 5
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code_with_benchmarks
run: |
uv run python tests/scripts/end_to_end_test_benchmark_sort.py

View file

@ -1,88 +0,0 @@
name: E2E - Bubble Sort Pytest (No Git)
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
bubble-sort-optimization-pytest-no-git:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 70
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Remove .git
run: |
if [ -d ".git" ]; then
echo ".git directory exists!"
sudo rm -rf .git
if [ -d ".git" ]; then
echo ".git directory still exists after removal attempt!"
exit 1
else
echo ".git directory successfully removed."
fi
else
echo ".git directory does not exist. Nothing to remove."
exit 1
fi
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_bubblesort_pytest.py

View file

@ -1,72 +0,0 @@
name: E2E - Bubble Sort Unittest
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
bubble-sort-optimization-unittest:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 40
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_bubblesort_unittest.py

View file

@ -1,71 +0,0 @@
name: Coverage E2E
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
end-to-end-test-coverage:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
MAX_RETRIES: 3
RETRY_DELAY: 5
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
uv add black # my-best-repo in end_to_end_test_coverage.py is configured to use black
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_coverage.py

View file

@ -1,72 +0,0 @@
name: E2E - Futurehouse Structure
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
futurehouse-structure:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 5
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_futurehouse.py

View file

@ -1,71 +0,0 @@
name: E2E - Init Optimization
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
init-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 10
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_init_optimization.py

View file

@ -1,105 +0,0 @@
name: E2E - Java Fibonacci (No Git)
on:
pull_request:
paths:
- 'codeflash/languages/java/**'
- 'codeflash/languages/base.py'
- 'codeflash/languages/registry.py'
- 'codeflash/optimization/**'
- 'codeflash/verification/**'
- 'code_to_optimize/java/**'
- 'codeflash-java-runtime/**'
- 'tests/scripts/end_to_end_test_java_fibonacci.py'
- '.github/workflows/e2e-java-fibonacci-nogit.yaml'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
java-fibonacci-optimization-no-git:
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 70
CODEFLASH_END_TO_END: 1
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
env:
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_STATE: ${{ github.event.pull_request.state }}
BASE_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
run: |
if git diff --name-only "$BASE_SHA" "$HEAD_SHA" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
echo "PR Author: $PR_AUTHOR"
if [[ "$PR_AUTHOR" == "misrasaurabh1" || "$PR_AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($PR_AUTHOR). Proceeding."
elif [[ "$PR_STATE" == "open" ]]; then
echo "✅ PR is open. Proceeding."
else
echo "⛔ Unauthorized user ($PR_AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up JDK 11
uses: actions/setup-java@v4
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: uv sync
- name: Build codeflash-runtime JAR
run: |
cd codeflash-java-runtime
mvn clean package -q -DskipTests
mvn install -q -DskipTests
- name: Verify Java installation
run: |
java -version
mvn --version
- name: Remove .git
run: |
if [ -d ".git" ]; then
sudo rm -rf .git
echo ".git directory removed."
else
echo ".git directory does not exist."
exit 1
fi
- name: Run Codeflash to optimize Fibonacci
run: |
uv run python tests/scripts/end_to_end_test_java_fibonacci.py

View file

@ -1,90 +0,0 @@
name: E2E - Java Tracer
on:
pull_request:
paths:
- 'codeflash/**'
- 'codeflash-java-runtime/**'
- 'tests/**'
- '.github/workflows/e2e-java-tracer.yaml'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
java-tracer-e2e:
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 10
CODEFLASH_END_TO_END: 1
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
env:
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_STATE: ${{ github.event.pull_request.state }}
BASE_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
run: |
if git diff --name-only "$BASE_SHA" "$HEAD_SHA" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
echo "PR Author: $PR_AUTHOR"
if [[ "$PR_AUTHOR" == "misrasaurabh1" || "$PR_AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($PR_AUTHOR). Proceeding."
elif [[ "$PR_STATE" == "open" ]]; then
echo "✅ PR is open. Proceeding."
else
echo "⛔ Unauthorized user ($PR_AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up JDK 11
uses: actions/setup-java@v4
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: uv sync
- name: Build codeflash-runtime JAR
run: |
cd codeflash-java-runtime
mvn clean package -q -DskipTests
mvn install -q -DskipTests
- name: Verify Java installation
run: |
java -version
mvn --version
- name: Run Java tracer e2e test
run: |
uv run python tests/scripts/end_to_end_test_java_tracer.py

View file

@ -1,88 +0,0 @@
name: E2E - JS CommonJS Function
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
js-cjs-function-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 50
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install codeflash npm package dependencies
run: |
cd packages/codeflash
npm install
- name: Install JS test project dependencies
run: |
cd code_to_optimize/js/code_to_optimize_js
npm install
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize JS CommonJS function
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_js_cjs_function.py

View file

@ -1,88 +0,0 @@
name: E2E - JS ESM Async
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
js-esm-async-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 10
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install codeflash npm package dependencies
run: |
cd packages/codeflash
npm install
- name: Install JS test project dependencies
run: |
cd code_to_optimize/js/code_to_optimize_js_esm
npm install
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize ESM async function
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_js_esm_async.py

View file

@ -1,88 +0,0 @@
name: E2E - JS TypeScript Class
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
js-ts-class-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 30
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install codeflash npm package dependencies
run: |
cd packages/codeflash
npm install
- name: Install JS test project dependencies
run: |
cd code_to_optimize/js/code_to_optimize_ts
npm install
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize TypeScript class method
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_js_ts_class.py

View file

@ -1,97 +0,0 @@
name: E2E - Topological Sort (Worktree)
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
topological-sort-worktree-optimization:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 5
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Debug Environment Decision
run: |
# Construct the condition result manually for debugging
EVENT_NAME="${{ github.event_name }}"
FILES_CHANGED="${{ toJSON(github.event.pull_request.files.*.filename) }}"
PR_AUTHOR="${{ github.event.pull_request.user.login }}"
echo "Event Name: $EVENT_NAME"
echo "Files Changed: $FILES_CHANGED"
echo "PR Author: $PR_AUTHOR"
# Check workflow file changes
if [[ "$FILES_CHANGED" == *".github/workflows/"* ]]; then
echo "Workflow files changed: YES"
else
echo "Workflow files changed: NO"
fi
# Check author conditions
if [[ "$PR_AUTHOR" != "misrasaurabh1" && "$PR_AUTHOR" != "KRRT7" ]]; then
echo "Author needs approval: YES"
else
echo "Author needs approval: NO"
fi
# Selected environment
echo "Selected Environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}"
- name: Validate PR for workflow changes
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_topological_sort_worktree.py

View file

@ -1,72 +0,0 @@
name: E2E - Tracer Replay
on:
pull_request:
paths:
- '**' # Trigger for all paths
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
tracer-replay:
# Dynamically determine if environment is needed only when workflow files change and contributor is external
environment: ${{ (github.event_name == 'workflow_dispatch' || (contains(toJSON(github.event.pull_request.files.*.filename), '.github/workflows/') && github.event.pull_request.user.login != 'misrasaurabh1' && github.event.pull_request.user.login != 'KRRT7')) && 'external-trusted-contributors' || '' }}
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: 10
CODEFLASH_END_TO_END: 1
steps:
- name: 🛎️ Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate PR
run: |
# Check for any workflow changes
if git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}" | grep -q "^.github/workflows/"; then
echo "⚠️ Workflow changes detected."
# Get the PR author
AUTHOR="${{ github.event.pull_request.user.login }}"
echo "PR Author: $AUTHOR"
# Allowlist check
if [[ "$AUTHOR" == "misrasaurabh1" || "$AUTHOR" == "KRRT7" ]]; then
echo "✅ Authorized user ($AUTHOR). Proceeding."
elif [[ "${{ github.event.pull_request.state }}" == "open" ]]; then
echo "✅ PR triggered by 'pull_request_target' and is open. Assuming protection rules are in place. Proceeding."
else
echo "⛔ Unauthorized user ($AUTHOR) attempting to modify workflows. Exiting."
exit 1
fi
else
echo "✅ No workflow file changes detected. Proceeding."
fi
- name: Set up Python 3.11 for CLI
uses: astral-sh/setup-uv@v6
with:
python-version: 3.11.6
- name: Install dependencies (CLI)
run: |
uv sync
- name: Run Codeflash to optimize code
id: optimize_code
run: |
uv run python tests/scripts/end_to_end_test_tracer_replay.py

View file

@ -1,76 +0,0 @@
name: Java E2E Tests
on:
push:
branches:
- main
- omni-java
paths:
- 'codeflash/languages/java/**'
- 'tests/test_languages/test_java*.py'
- 'code_to_optimize/java/**'
- '.github/workflows/java-e2e-tests.yml'
pull_request:
paths:
- 'codeflash/languages/java/**'
- 'tests/test_languages/test_java*.py'
- 'code_to_optimize/java/**'
- '.github/workflows/java-e2e-tests.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
java-e2e:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK 11
uses: actions/setup-java@v4
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Install uv
uses: astral-sh/setup-uv@v6
- name: Set up Python environment
run: |
uv venv --seed
uv sync
- name: Verify Java installation
run: |
java -version
mvn --version
- name: Build codeflash-runtime JAR
run: |
cd codeflash-java-runtime
mvn clean package -q -DskipTests
mvn install -q -DskipTests
- name: Build Java sample project
run: |
cd code_to_optimize/java
mvn compile -q
- name: Run Java sample project tests
run: |
cd code_to_optimize/java
mvn test -q
- name: Run Java E2E tests
run: |
uv run pytest tests/test_languages/test_java_e2e.py -v --tb=short
- name: Run Java unit tests
run: |
uv run pytest tests/test_languages/test_java/ -v --tb=short -x

77
.github/workflows/java-e2e.yaml vendored Normal file
View file

@ -0,0 +1,77 @@
name: Java E2E Tests
on:
workflow_dispatch:
jobs:
e2e-java:
strategy:
fail-fast: false
matrix:
include:
- name: java-fibonacci-nogit
script: end_to_end_test_java_fibonacci.py
expected_improvement: 70
remove_git: true
- name: java-tracer
script: end_to_end_test_java_tracer.py
expected_improvement: 10
- name: java-void-optimization-nogit
script: end_to_end_test_java_void_optimization.py
expected_improvement: 70
remove_git: true
runs-on: ubuntu-latest
env:
CODEFLASH_AIS_SERVER: prod
POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }}
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
COLUMNS: 110
MAX_RETRIES: 3
RETRY_DELAY: 5
EXPECTED_IMPROVEMENT_PCT: ${{ matrix.expected_improvement }}
CODEFLASH_END_TO_END: 1
CODEFLASH_LOOPING_TIME: 5
steps:
- uses: actions/checkout@v6
- name: Set up JDK 11
uses: actions/setup-java@v5
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Install uv
uses: astral-sh/setup-uv@v8.1.0
with:
python-version: 3.11.6
enable-cache: true
- name: Install dependencies
run: uv sync
- name: Cache codeflash-runtime JAR
id: runtime-jar-cache
uses: actions/cache@v5
with:
path: ~/.m2/repository/io/codeflash
key: codeflash-runtime-${{ hashFiles('codeflash-java-runtime/pom.xml', 'codeflash-java-runtime/src/**') }}
- name: Build and install codeflash-runtime JAR
if: steps.runtime-jar-cache.outputs.cache-hit != 'true'
run: |
cd codeflash-java-runtime
mvn install -q -DskipTests
- name: Remove .git
if: matrix.remove_git
run: |
if [ -d ".git" ]; then
sudo rm -rf .git
echo ".git directory removed."
else
echo ".git directory does not exist."
exit 1
fi
- name: Run E2E test
run: uv run python tests/scripts/${{ matrix.script }}

View file

@ -13,7 +13,7 @@ jobs:
pull-requests: write
steps:
- name: Label PR with workflow changes
uses: actions/github-script@v6
uses: actions/github-script@v9
with:
script: |
const labelName = 'workflow-modified';

View file

@ -1,33 +0,0 @@
name: Mypy Type Checking for CLI
on:
push:
branches:
- main
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
type-check-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install uv
uses: astral-sh/setup-uv@v6
- name: sync uv
run: |
uv venv --seed
uv sync
- name: Run mypy on allowlist
run: uv run mypy --non-interactive --config-file pyproject.toml @mypy_allowlist.txt

View file

@ -1,18 +0,0 @@
name: Lint
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
prek:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: astral-sh/setup-uv@v6
- uses: j178/prek-action@v1
with:
extra-args: '--from-ref origin/${{ github.base_ref }} --to-ref ${{ github.sha }}'

View file

@ -16,7 +16,7 @@ jobs:
benchmark: ${{ steps.filter.outputs.benchmark }}
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 2
@ -34,9 +34,27 @@ jobs:
echo "benchmark=false" >> $GITHUB_OUTPUT
fi
publish-codeflash:
publish:
needs: detect-changes
if: needs.detect-changes.outputs.codeflash == 'true'
if: >-
needs.detect-changes.outputs.codeflash == 'true'
|| needs.detect-changes.outputs.benchmark == 'true'
strategy:
fail-fast: false
matrix:
include:
- package: codeflash
version_file: codeflash/version.py
tag_prefix: v
build_cmd: uv build
flag: codeflash
release_name_prefix: "Release"
- package: codeflash-benchmark
version_file: codeflash-benchmark/codeflash_benchmark/version.py
tag_prefix: benchmark-v
build_cmd: uv build --package codeflash-benchmark
flag: benchmark
release_name_prefix: "codeflash-benchmark"
runs-on: ubuntu-latest
environment:
name: pypi
@ -44,92 +62,34 @@ jobs:
id-token: write
contents: write
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Extract version from version.py
id: extract_version
- name: Check if this matrix leg should run
id: should_run
run: |
VERSION=$(grep -oP '__version__ = "\K[^"]+' codeflash/version.py)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "tag=v$VERSION" >> $GITHUB_OUTPUT
echo "Extracted version: $VERSION"
- name: Check if tag already exists
id: check_tag
run: |
if git rev-parse "v${{ steps.extract_version.outputs.version }}" >/dev/null 2>&1; then
echo "exists=true" >> $GITHUB_OUTPUT
echo "Tag v${{ steps.extract_version.outputs.version }} already exists, skipping release"
if [[ "${{ matrix.flag }}" == "codeflash" && "${{ needs.detect-changes.outputs.codeflash }}" == "true" ]]; then
echo "run=true" >> $GITHUB_OUTPUT
elif [[ "${{ matrix.flag }}" == "benchmark" && "${{ needs.detect-changes.outputs.benchmark }}" == "true" ]]; then
echo "run=true" >> $GITHUB_OUTPUT
else
echo "exists=false" >> $GITHUB_OUTPUT
echo "Tag v${{ steps.extract_version.outputs.version }} does not exist, proceeding with release"
echo "run=false" >> $GITHUB_OUTPUT
fi
- name: Create and push git tag
if: steps.check_tag.outputs.exists == 'false'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git tag -a "${{ steps.extract_version.outputs.tag }}" -m "Release ${{ steps.extract_version.outputs.tag }}"
git push origin "${{ steps.extract_version.outputs.tag }}"
- name: Install uv
if: steps.check_tag.outputs.exists == 'false'
uses: astral-sh/setup-uv@v6
- name: Build
if: steps.check_tag.outputs.exists == 'false'
run: uv build
- name: Publish to PyPI
if: steps.check_tag.outputs.exists == 'false'
run: uv publish
- name: Create GitHub Release
if: steps.check_tag.outputs.exists == 'false'
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ steps.extract_version.outputs.tag }}
name: Release ${{ steps.extract_version.outputs.tag }}
body: |
## What's Changed
Release ${{ steps.extract_version.outputs.version }} of codeflash.
**Full Changelog**: https://github.com/${{ github.repository }}/commits/${{ steps.extract_version.outputs.tag }}
draft: false
prerelease: false
generate_release_notes: true
files: |
dist/*
publish-benchmark:
needs: detect-changes
if: needs.detect-changes.outputs.benchmark == 'true'
runs-on: ubuntu-latest
environment:
name: pypi
permissions:
id-token: write
contents: write
steps:
- name: Checkout
uses: actions/checkout@v5
if: steps.should_run.outputs.run == 'true'
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Extract version from version.py
if: steps.should_run.outputs.run == 'true'
id: extract_version
run: |
VERSION=$(grep -oP '__version__ = "\K[^"]+' codeflash-benchmark/codeflash_benchmark/version.py)
VERSION=$(grep -oP '__version__ = "\K[^"]+' ${{ matrix.version_file }})
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "tag=benchmark-v$VERSION" >> $GITHUB_OUTPUT
echo "tag=${{ matrix.tag_prefix }}$VERSION" >> $GITHUB_OUTPUT
echo "Extracted version: $VERSION"
- name: Check if tag already exists
if: steps.should_run.outputs.run == 'true'
id: check_tag
run: |
if git rev-parse "${{ steps.extract_version.outputs.tag }}" >/dev/null 2>&1; then
@ -141,7 +101,7 @@ jobs:
fi
- name: Create and push git tag
if: steps.check_tag.outputs.exists == 'false'
if: steps.should_run.outputs.run == 'true' && steps.check_tag.outputs.exists == 'false'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
@ -149,31 +109,32 @@ jobs:
git push origin "${{ steps.extract_version.outputs.tag }}"
- name: Install uv
if: steps.check_tag.outputs.exists == 'false'
uses: astral-sh/setup-uv@v6
if: steps.should_run.outputs.run == 'true' && steps.check_tag.outputs.exists == 'false'
uses: astral-sh/setup-uv@v8.1.0
with:
enable-cache: true
- name: Build
if: steps.check_tag.outputs.exists == 'false'
run: uv build --package codeflash-benchmark
if: steps.should_run.outputs.run == 'true' && steps.check_tag.outputs.exists == 'false'
run: ${{ matrix.build_cmd }}
- name: Publish to PyPI
if: steps.check_tag.outputs.exists == 'false'
if: steps.should_run.outputs.run == 'true' && steps.check_tag.outputs.exists == 'false'
run: uv publish
- name: Create GitHub Release
if: steps.check_tag.outputs.exists == 'false'
uses: softprops/action-gh-release@v2
if: steps.should_run.outputs.run == 'true' && steps.check_tag.outputs.exists == 'false'
uses: softprops/action-gh-release@v3
with:
tag_name: ${{ steps.extract_version.outputs.tag }}
name: codeflash-benchmark ${{ steps.extract_version.outputs.tag }}
name: ${{ matrix.release_name_prefix }} ${{ steps.extract_version.outputs.tag }}
body: |
## What's Changed
Release ${{ steps.extract_version.outputs.version }} of codeflash-benchmark.
Release ${{ steps.extract_version.outputs.version }} of ${{ matrix.package }}.
**Full Changelog**: https://github.com/${{ github.repository }}/commits/${{ steps.extract_version.outputs.tag }}
draft: false
prerelease: false
generate_release_notes: true
files: |
dist/*

View file

@ -1,69 +0,0 @@
name: unit-tests
on:
push:
branches: [main]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
jobs:
unit-tests:
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
python-version: "3.9"
- os: ubuntu-latest
python-version: "3.10"
- os: ubuntu-latest
python-version: "3.11"
- os: ubuntu-latest
python-version: "3.12"
- os: ubuntu-latest
python-version: "3.13"
- os: ubuntu-latest
python-version: "3.14"
- os: windows-latest
python-version: "3.13"
continue-on-error: true
runs-on: ${{ matrix.os }}
env:
PYTHONIOENCODING: utf-8
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up JDK 11
uses: actions/setup-java@v4
with:
java-version: '11'
distribution: 'temurin'
cache: maven
- name: Build codeflash-runtime JAR
run: |
cd codeflash-java-runtime
mvn clean package -q -DskipTests
mvn install -q -DskipTests
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
python-version: ${{ matrix.python-version }}
- name: install dependencies
run: uv sync
- name: Install test-only dependencies (Python 3.9 and 3.13)
if: matrix.python-version == '3.9' || matrix.python-version == '3.13'
run: uv sync --group tests
- name: Unit tests
run: uv run pytest tests/

8
.gitignore vendored
View file

@ -266,15 +266,19 @@ WARP.MD
.tessl/
tessl.json
# Claude Code - track shared rules, ignore local config
# Claude Code - track shared config, ignore local state
.claude/*
!.claude/rules/
!.claude/settings.json
!.claude/hooks/
!.claude/skills/
!.claude/settings.json
**/node_modules/**
**/dist-nuitka/**
**/.npmrc
# Test fixture lockfiles — prevents Dependabot from scanning them
code_to_optimize/**/package-lock.json
# Tessl auto-generates AGENTS.md on install; ignore to avoid cluttering git status
AGENTS.md
.serena/

View file

@ -2,7 +2,14 @@ repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.15.8
hooks:
# Run the linter.
- id: ruff-check
# Run the formatter.
- id: ruff-format
- repo: local
hooks:
- id: mypy
name: mypy
entry: uv run mypy --non-interactive --config-file pyproject.toml
language: system
types: [python]
require_serial: true

View file

@ -7,19 +7,29 @@ CodeFlash is an AI-powered code optimizer that automatically improves performanc
## Optimization Pipeline
```
Discovery → Ranking → Context Extraction → Test Gen + Optimization → Baseline → Candidate Evaluation → PR
Discovery -> Ranking -> Context Extraction -> Test Gen + Optimization -> Baseline -> Candidate Evaluation -> PR
```
See `.claude/rules/architecture.md` for directory mapping and entry points.
# Instructions
- **Bug fix workflow** — follow these steps in order, do not skip ahead:
## Setup
```bash
uv sync # Install all dependencies
uv run prek install # Install git pre-commit hooks (ruff + mypy)
```
## Bug Fix Workflow
Follow these steps in order, do not skip ahead:
1. Read the relevant code to understand the bug
2. Write a test that reproduces the bug (run it to confirm it fails)
3. Spawn subagents (using the Agent tool) to attempt the fix — each subagent should apply a fix and run the test to prove it passes
4. Review the subagent results, pick the best fix, and apply it
5. Never jump straight to writing a fix yourself — always go through steps 1-4
- Everything that can be tested should have tests.
Everything that can be tested should have tests.
<!-- Section below is auto-generated by `tessl install` - do not edit manually -->

157
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,157 @@
# Contributing to Codeflash
Thanks for your interest in contributing. This guide covers both contributing changes back to Codeflash itself and running Codeflash from this repository in editable mode to optimize your own projects.
## Table of contents
- [Quick links](#quick-links)
- [Ways to contribute](#ways-to-contribute)
- [Development environment](#development-environment)
- [Running tests and checks](#running-tests-and-checks)
- [Code style](#code-style)
- [Branches, commits, and pull requests](#branches-commits-and-pull-requests)
- [Using Codeflash in editable mode](#using-codeflash-in-editable-mode)
- [Reporting bugs and requesting features](#reporting-bugs-and-requesting-features)
- [Security issues](#security-issues)
## Quick links
- Issues: https://github.com/codeflash-ai/codeflash/issues
- Discussions and support: [Discord](https://www.codeflash.ai/discord)
- Documentation: https://docs.codeflash.ai
- Security policy: [`SECURITY.md`](SECURITY.md)
- Project conventions for AI agents and humans alike: [`CLAUDE.md`](CLAUDE.md)
## Ways to contribute
- **Bug reports**: open an issue with a minimal reproducer that fails on `main`.
- **Bug fixes**: follow the bug-fix workflow in [`CLAUDE.md`](CLAUDE.md) - read the code, write a failing test, apply the fix, confirm the test now passes.
- **Features**: open an issue first for anything non-trivial so the approach can be agreed before implementation.
- **Documentation**: the full documentation lives at https://docs.codeflash.ai. Fixes to README, docstrings, and this guide can be submitted as PRs here.
- **Language support**: Codeflash supports Python, JavaScript / TypeScript, and Java today. New language support is a significant effort - please start with an issue.
## Development environment
### Prerequisites
- Python 3.9 or newer
- [`uv`](https://github.com/astral-sh/uv) for dependency management (required - do not use `pip` directly)
- `git`
- For JavaScript end-to-end tests: Node.js and `npm`
- For Java end-to-end tests: a JDK (see `.github/workflows/java-e2e.yaml` for the tested version)
### Setup
Fork the repository, clone your fork, and install the dev dependencies with `uv`:
```bash
git clone https://github.com/<your-username>/codeflash.git
cd codeflash
uv sync
```
`uv sync` installs Codeflash plus the `dev` dependency group (ruff, mypy, ipython, type stubs). The `codeflash` CLI is installed into the virtualenv and can be invoked via `uv run codeflash ...`.
### Optional: point at your fork's upstream
```bash
git remote add upstream https://github.com/codeflash-ai/codeflash.git
git fetch upstream
```
## Running tests and checks
Use `uv run prek` as the single verification command. It runs ruff (lint + format), mypy (strict), and related checks in one pass, matching what CI runs.
```bash
# Check every changed file against the pre-commit hooks locally
uv run prek
# Match CI behavior: check everything changed against the PR base branch
BASE=$(gh pr view --json baseRefName -q .baseRefName 2>/dev/null || echo main)
uv run prek run --from-ref origin/$BASE
```
Run the test suite with pytest via `uv`:
```bash
uv run pytest tests/
```
To run a subset:
```bash
uv run pytest tests/code_utils/ -k "test_something"
```
End-to-end tests live under `code_to_optimize/` and are exercised by CI (`.github/workflows/ci.yaml`, `java-e2e.yaml`). They can be run locally by invoking the scripts referenced from those workflows if you have the relevant runtime installed.
## Code style
The full ruleset is in [`.claude/rules/code-style.md`](.claude/rules/code-style.md). Highlights:
- **Line length**: 120 characters.
- **Python**: 3.9+ syntax. Use type annotations consistent with surrounding code.
- **Package management**: `uv` only. Do not add dependencies with `pip install`.
- **Docstrings**: do not add docstrings to new or changed code unless explicitly requested. The codebase intentionally keeps functions self-documenting through clear naming and type annotations.
- **Naming**: no leading underscores on Python names (`_private` style). Python has no true private functions; use public names.
- **File I/O**: always pass `encoding="utf-8"` to `open()`, `Path.read_text()`, `Path.write_text()`, and similar calls in new or changed code. Windows defaults to `cp1252`, which breaks on non-ASCII content.
- **Paths**: prefer absolute paths internally.
- **Verification**: `uv run prek` is the canonical check. Don't run `ruff`, `mypy`, or `python -c "import ..."` separately; `prek` handles them together.
## Branches, commits, and pull requests
- **Every PR must link an issue or discussion.** Use `Closes #<number>`, `Fixes #<number>`, or `Relates to #<number>` in the PR body. CI will fail if no linked issue or discussion is found. For trivial fixes (typos, formatting), open a lightweight issue first — it only takes a moment and keeps the history traceable. The goal is to have a conversation before the code — discussing the approach on an issue or discussion helps maintainers point you in the right direction early, so your implementation fits the project's needs and you don't spend time on work that gets reworked.
- Create a feature branch off an up-to-date `main`. Never commit directly to `main`.
- Use conventional-commit prefixes: `fix:`, `feat:`, `refactor:`, `docs:`, `test:`, `chore:`. Keep commit messages concise (1-2 sentence body max).
- Keep commits atomic - one logical change per commit.
- PR titles also use the conventional format. The PR body should be short and link the related issue.
- If the change corresponds to a Linear ticket, include `CF-#<number>` in the PR body.
- Run `uv run prek` (or `uv run prek run --from-ref origin/main`) before pushing. CI will block merge if hooks fail.
## Using Codeflash in editable mode
If you want to use Codeflash itself to optimize your own Python projects while developing or testing changes to Codeflash, you can install it in editable mode from this repository.
### Install as an editable dependency
From your target project's directory:
```bash
# Using uv (recommended)
uv add --editable /absolute/path/to/your/codeflash/checkout
# Or, if you use pip inside a virtualenv
pip install -e /absolute/path/to/your/codeflash/checkout
```
From the Codeflash repository root you can also run the CLI directly without installing into the target project:
```bash
cd /absolute/path/to/your/codeflash/checkout
uv run codeflash init # in the target project, cwd matters
uv run codeflash --all # optimize the entire target codebase
uv run codeflash optimize script.py
```
You will still need a Codeflash API key - `uv run codeflash init` walks through key generation and GitHub app setup. See the [Quick Start in the README](README.md#quick-start) for the full flow.
### When to use editable mode
- You are iterating on a Codeflash change and want to dogfood it against a real codebase.
- You need to reproduce a bug your target project hits, with your local patches applied.
- You are developing a new optimization rule, heuristic, or language integration and want end-to-end coverage beyond `tests/`.
For day-to-day optimization of a project you are not hacking on Codeflash itself, install the released package from PyPI (`pip install codeflash` or `uv add codeflash`) instead.
## Reporting bugs and requesting features
Before filing a new issue, please:
1. Search existing [open and closed issues](https://github.com/codeflash-ai/codeflash/issues?q=is%3Aissue) to avoid duplicates.
2. Include the Codeflash version (`codeflash --version`) and Python / uv versions.
3. Include the smallest reproducer you can. For bugs, a failing test that exercises the behavior is ideal.
## Security issues
Do not report suspected security issues in public GitHub issues. See [`SECURITY.md`](SECURITY.md) for the reporting process.

0
benchmarks/__init__.py Normal file
View file

View file

@ -0,0 +1,72 @@
"""Benchmark CLI startup latency for codeflash compare --script mode.
Run from a worktree root. Installs deps via uv sync, then times several
CLI entry points and writes a JSON file mapping command names to median
wall-clock seconds.
Usage:
codeflash compare main codeflash/optimize \
--script "python benchmarks/bench_cli_startup.py" \
--script-output benchmarks/results.json
"""
from __future__ import annotations
import json
import os
import subprocess
import time
from pathlib import Path
WARMUP = 3
RUNS = 30
OUTPUT = os.environ.get("BENCH_OUTPUT", "benchmarks/results.json")
COMMANDS: dict[str, list[str]] = {
"version": ["uv", "run", "codeflash", "--version"],
"help": ["uv", "run", "codeflash", "--help"],
"auth_status": ["uv", "run", "codeflash", "auth", "status"],
"compare_help": ["uv", "run", "codeflash", "compare", "--help"],
}
def measure(cmd: list[str], warmup: int = WARMUP, runs: int = RUNS) -> float:
"""Return median wall-clock seconds for *cmd* over *runs* iterations."""
env = {**os.environ, "CODEFLASH_API_KEY": "bench_dummy_key"}
for _ in range(warmup):
subprocess.run(cmd, capture_output=True, check=False, env=env)
times: list[float] = []
for _ in range(runs):
t0 = time.perf_counter()
subprocess.run(cmd, capture_output=True, check=False, env=env)
times.append(time.perf_counter() - t0)
times.sort()
mid = len(times) // 2
return times[mid] if len(times) % 2 else (times[mid - 1] + times[mid]) / 2
def main() -> None:
# Ensure deps are installed in the worktree
subprocess.run(["uv", "sync"], check=True, capture_output=True)
results: dict[str, float] = {}
for name, cmd in COMMANDS.items():
print(f" {name}: ", end="", flush=True)
median = measure(cmd)
results[name] = round(median, 4)
print(f"{median * 1000:.0f} ms")
# Total = sum of medians (useful for a single summary number)
results["__total__"] = round(sum(results.values()), 4)
output_path = Path(OUTPUT)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as f:
json.dump(results, f, indent=2)
print(f"\nResults written to {OUTPUT}")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,21 @@
package com.example;
public class InPlaceSorter {
public static void bubbleSortInPlace(int[] arr) {
if (arr == null || arr.length <= 1) {
return;
}
int n = arr.length;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n - 1; j++) {
if (arr[j] > arr[j + 1]) {
int temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
}
}
}
}
}

View file

@ -0,0 +1,21 @@
package com.example;
public class InstanceSorter {
public void bubbleSortInPlace(int[] arr) {
if (arr == null || arr.length <= 1) {
return;
}
int n = arr.length;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n - 1; j++) {
if (arr[j] > arr[j + 1]) {
int temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
}
}
}
}
}

View file

@ -0,0 +1,62 @@
package com.example;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
class InPlaceSorterTest {
@Test
void testBubbleSortInPlace() {
int[] arr = {5, 3, 1, 4, 2};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceAlreadySorted() {
int[] arr = {1, 2, 3, 4, 5};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceReversed() {
int[] arr = {5, 4, 3, 2, 1};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceWithDuplicates() {
int[] arr = {3, 2, 4, 1, 3, 2};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 2, 3, 3, 4}, arr);
}
@Test
void testBubbleSortInPlaceWithNegatives() {
int[] arr = {3, -2, 7, 0, -5};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{-5, -2, 0, 3, 7}, arr);
}
@Test
void testBubbleSortInPlaceSingleElement() {
int[] arr = {42};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{42}, arr);
}
@Test
void testBubbleSortInPlaceEmpty() {
int[] arr = {};
InPlaceSorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{}, arr);
}
@Test
void testBubbleSortInPlaceNull() {
InPlaceSorter.bubbleSortInPlace(null);
}
}

View file

@ -0,0 +1,69 @@
package com.example;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
class InstanceSorterTest {
@Test
void testBubbleSortInPlace() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {5, 3, 1, 4, 2};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceAlreadySorted() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {1, 2, 3, 4, 5};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceReversed() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {5, 4, 3, 2, 1};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 3, 4, 5}, arr);
}
@Test
void testBubbleSortInPlaceWithDuplicates() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {3, 2, 4, 1, 3, 2};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{1, 2, 2, 3, 3, 4}, arr);
}
@Test
void testBubbleSortInPlaceWithNegatives() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {3, -2, 7, 0, -5};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{-5, -2, 0, 3, 7}, arr);
}
@Test
void testBubbleSortInPlaceSingleElement() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {42};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{42}, arr);
}
@Test
void testBubbleSortInPlaceEmpty() {
InstanceSorter sorter = new InstanceSorter();
int[] arr = {};
sorter.bubbleSortInPlace(arr);
assertArrayEquals(new int[]{}, arr);
}
@Test
void testBubbleSortInPlaceNull() {
InstanceSorter sorter = new InstanceSorter();
sorter.bubbleSortInPlace(null);
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,966 +0,0 @@
{
"name": "code-to-optimize-mocha",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "code-to-optimize-mocha",
"version": "1.0.0",
"devDependencies": {
"codeflash": "file:../../../packages/codeflash",
"mocha": "^10.8.2"
}
},
"../../../packages/codeflash": {
"version": "0.10.1",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"@msgpack/msgpack": "^3.0.0",
"better-sqlite3": "^12.0.0"
},
"bin": {
"codeflash": "bin/codeflash.js",
"codeflash-setup": "bin/codeflash-setup.js"
},
"engines": {
"node": ">=18.0.0"
},
"peerDependencies": {
"jest": ">=27.0.0",
"jest-runner": ">=27.0.0",
"vitest": ">=1.0.0"
},
"peerDependenciesMeta": {
"jest": {
"optional": true
},
"jest-runner": {
"optional": true
},
"vitest": {
"optional": true
}
}
},
"node_modules/ansi-colors": {
"version": "4.1.3",
"resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
"integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"license": "MIT",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dev": true,
"license": "ISC",
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true,
"license": "Python-2.0"
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true,
"license": "MIT"
},
"node_modules/binary-extensions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
"integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/brace-expansion": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"license": "MIT",
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browser-stdout": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz",
"integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
"dev": true,
"license": "ISC"
},
"node_modules/camelcase": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/chalk/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/chokidar": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
"integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
"dev": true,
"license": "MIT",
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/cliui": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
"integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.0",
"wrap-ansi": "^7.0.0"
}
},
"node_modules/codeflash": {
"resolved": "../../../packages/codeflash",
"link": true
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"license": "MIT"
},
"node_modules/debug": {
"version": "4.4.3",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decamelize": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz",
"integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/diff": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/diff/-/diff-5.2.2.tgz",
"integrity": "sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A==",
"dev": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true,
"license": "MIT"
},
"node_modules/escalade": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
"integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"license": "MIT",
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
"integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
"dev": true,
"license": "MIT",
"dependencies": {
"locate-path": "^6.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/flat": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz",
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
"dev": true,
"license": "BSD-3-Clause",
"bin": {
"flat": "cli.js"
}
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true,
"license": "ISC"
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true,
"license": "ISC",
"engines": {
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/glob": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
"integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
"dev": true,
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"license": "ISC",
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/he": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
"integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
"dev": true,
"license": "MIT",
"bin": {
"he": "bin/he"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
"dev": true,
"license": "ISC",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true,
"license": "ISC"
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dev": true,
"license": "MIT",
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-plain-obj": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/is-unicode-supported": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz",
"integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/js-yaml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
"integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dev": true,
"license": "MIT",
"dependencies": {
"p-locate": "^5.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/log-symbols": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz",
"integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.1.0",
"is-unicode-supported": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/minimatch": {
"version": "5.1.9",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.9.tgz",
"integrity": "sha512-7o1wEA2RyMP7Iu7GNba9vc0RWWGACJOCZBJX2GJWip0ikV+wcOsgVuY9uE8CPiyQhkGFSlhuSkZPavN7u1c2Fw==",
"dev": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/mocha": {
"version": "10.8.2",
"resolved": "https://registry.npmjs.org/mocha/-/mocha-10.8.2.tgz",
"integrity": "sha512-VZlYo/WE8t1tstuRmqgeyBgCbJc/lEdopaa+axcKzTBJ+UIdlAB9XnmvTCAH4pwR4ElNInaedhEBmZD8iCSVEg==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-colors": "^4.1.3",
"browser-stdout": "^1.3.1",
"chokidar": "^3.5.3",
"debug": "^4.3.5",
"diff": "^5.2.0",
"escape-string-regexp": "^4.0.0",
"find-up": "^5.0.0",
"glob": "^8.1.0",
"he": "^1.2.0",
"js-yaml": "^4.1.0",
"log-symbols": "^4.1.0",
"minimatch": "^5.1.6",
"ms": "^2.1.3",
"serialize-javascript": "^6.0.2",
"strip-json-comments": "^3.1.1",
"supports-color": "^8.1.1",
"workerpool": "^6.5.1",
"yargs": "^16.2.0",
"yargs-parser": "^20.2.9",
"yargs-unparser": "^2.0.0"
},
"bin": {
"_mocha": "bin/_mocha",
"mocha": "bin/mocha.js"
},
"engines": {
"node": ">= 14.0.0"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"dev": true,
"license": "MIT"
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"license": "ISC",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
"integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
"dev": true,
"license": "MIT",
"dependencies": {
"p-limit": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/randombytes": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
"integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"safe-buffer": "^5.1.0"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dev": true,
"license": "MIT",
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/serialize-javascript": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz",
"integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"randombytes": "^2.1.0"
}
},
"node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/supports-color": {
"version": "8.1.1",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
"integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/workerpool": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz",
"integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==",
"dev": true,
"license": "Apache-2.0"
},
"node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true,
"license": "ISC"
},
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/yargs": {
"version": "16.2.0",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz",
"integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^7.0.2",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.0",
"y18n": "^5.0.5",
"yargs-parser": "^20.2.2"
},
"engines": {
"node": ">=10"
}
},
"node_modules/yargs-parser": {
"version": "20.2.9",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
"integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/yargs-unparser": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz",
"integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==",
"dev": true,
"license": "MIT",
"dependencies": {
"camelcase": "^6.0.0",
"decamelize": "^4.0.0",
"flat": "^5.0.2",
"is-plain-obj": "^2.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -24,7 +24,7 @@ describe('DataProcessor', () => {
test('handles larger arrays with duplicates', () => {
const data: number[] = [];
for (let i = 0; i < 100; i++) {
for (let i = 0; i < 10000; i++) {
data.push(i % 20);
}
const processor = new DataProcessor(data);

File diff suppressed because it is too large Load diff

View file

@ -1,2 +1,2 @@
# These version placeholders will be replaced by uv-dynamic-versioning during build.
__version__ = "0.20.1.post242.dev0+7c7eeb5b"
__version__ = "0.20.5.post151.dev0+95b62113"

View file

@ -6,7 +6,6 @@ import com.esotericsoftware.kryo.io.Output;
import com.esotericsoftware.kryo.util.DefaultInstantiatorStrategy;
import org.objenesis.strategy.StdInstantiatorStrategy;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Field;
@ -36,7 +35,11 @@ public final class Serializer {
private static final int MAX_COLLECTION_SIZE = 1000;
private static final int BUFFER_SIZE = 4096;
// Thread-local Kryo instances (Kryo is not thread-safe)
// Thread-local Kryo, Output, and IdentityHashMap instances for reuse
private static final ThreadLocal<Output> OUTPUT = ThreadLocal.withInitial(() -> new Output(BUFFER_SIZE, -1));
private static final ThreadLocal<IdentityHashMap<Object, Object>> SEEN =
ThreadLocal.withInitial(IdentityHashMap::new);
private static final ThreadLocal<Kryo> KRYO = ThreadLocal.withInitial(() -> {
Kryo kryo = new Kryo();
kryo.setRegistrationRequired(false);
@ -89,10 +92,78 @@ public final class Serializer {
* @return Serialized bytes (may contain KryoPlaceholder for unserializable parts)
*/
public static byte[] serialize(Object obj) {
Object processed = recursiveProcess(obj, new IdentityHashMap<>(), 0, "");
// Fast path: if args are all safe types, skip recursive processing entirely
if (obj instanceof Object[] && isSafeArgs((Object[]) obj)) {
return directSerialize(obj);
}
IdentityHashMap<Object, Object> seen = SEEN.get();
seen.clear();
Object processed = recursiveProcess(obj, seen, 0, "");
return directSerialize(processed);
}
/**
* Attempt fast-path serialization for args that are all known-safe types.
* Returns serialized bytes if all args are safe, or null if the slow path is needed.
* Callers can use this to avoid executor submission overhead for simple arguments.
*/
public static byte[] serializeFast(Object obj) {
if (obj instanceof Object[] && isSafeArgs((Object[]) obj)) {
return directSerialize(obj);
}
return null;
}
/**
* Check if all elements of an args array can be serialized directly without recursive processing.
*/
private static boolean isSafeArgs(Object[] args) {
for (Object arg : args) {
if (!isSafeForDirectSerialization(arg)) {
return false;
}
}
return true;
}
/**
* Check if an object is safe to serialize directly without recursive processing.
* Covers: null, simple types, primitive arrays, and safe containers (up to 3 levels deep).
*/
private static boolean isSafeForDirectSerialization(Object obj) {
return isSafeForDirectSerialization(obj, 3);
}
private static boolean isSafeForDirectSerialization(Object obj, int depthLeft) {
if (obj == null || isSimpleType(obj)) {
return true;
}
if (depthLeft <= 0) {
return false;
}
Class<?> clazz = obj.getClass();
if (clazz.isArray() && clazz.getComponentType().isPrimitive()) {
return true;
}
if (isSafeContainerType(clazz)) {
if (obj instanceof Collection) {
for (Object item : (Collection<?>) obj) {
if (!isSafeForDirectSerialization(item, depthLeft - 1)) return false;
}
return true;
}
if (obj instanceof Map) {
for (Map.Entry<?, ?> e : ((Map<?, ?>) obj).entrySet()) {
if (!isSafeForDirectSerialization(e.getKey(), depthLeft - 1) ||
!isSafeForDirectSerialization(e.getValue(), depthLeft - 1)) return false;
}
return true;
}
}
return false;
}
/**
* Deserialize bytes back to an object.
* The returned object may contain KryoPlaceholder instances for parts
@ -141,14 +212,15 @@ public final class Serializer {
/**
* Direct serialization without recursive processing.
* Reuses a ThreadLocal Output buffer to avoid per-call allocation.
*/
private static byte[] directSerialize(Object obj) {
Kryo kryo = KRYO.get();
ByteArrayOutputStream baos = new ByteArrayOutputStream(BUFFER_SIZE);
try (Output output = new Output(baos)) {
Output output = OUTPUT.get();
output.reset();
kryo.writeClassAndObject(output, obj);
}
return baos.toByteArray();
output.flush();
return output.toBytes();
}
/**
@ -201,37 +273,23 @@ public final class Serializer {
// unserializable types, recursively process to catch and replace unserializable objects.
if (obj instanceof Map) {
Map<?, ?> map = (Map<?, ?>) obj;
if (containsOnlySimpleTypes(map)) {
// Simple map - try direct serialization to preserve full size
byte[] serialized = tryDirectSerialize(obj);
if (serialized != null) {
try {
deserialize(serialized);
return obj; // Success - return original
} catch (Exception e) {
// Fall through to recursive handling
}
}
if (isSafeContainerType(clazz) && containsOnlySimpleTypes(map)) {
return obj;
}
return handleMap(map, seen, depth, path);
}
if (obj instanceof Collection) {
Collection<?> collection = (Collection<?>) obj;
if (containsOnlySimpleTypes(collection)) {
// Simple collection - try direct serialization to preserve full size
byte[] serialized = tryDirectSerialize(obj);
if (serialized != null) {
try {
deserialize(serialized);
return obj; // Success - return original
} catch (Exception e) {
// Fall through to recursive handling
}
}
if (isSafeContainerType(clazz) && containsOnlySimpleTypes(collection)) {
return obj;
}
return handleCollection(collection, seen, depth, path);
}
if (clazz.isArray()) {
// Primitive arrays (int[], double[], etc.) are directly serializable by Kryo
if (clazz.getComponentType().isPrimitive()) {
return obj;
}
return handleArray(obj, seen, depth, path);
}
@ -255,6 +313,19 @@ public final class Serializer {
}
}
/**
* Check if a container type is known to round-trip safely through Kryo without verification.
* Only includes types registered with Kryo that are known to serialize/deserialize correctly.
*/
private static boolean isSafeContainerType(Class<?> clazz) {
return clazz == ArrayList.class ||
clazz == LinkedList.class ||
clazz == HashMap.class ||
clazz == LinkedHashMap.class ||
clazz == HashSet.class ||
clazz == LinkedHashSet.class;
}
/**
* Check if a class is known to be unserializable.
*/

View file

@ -31,7 +31,7 @@ public final class TraceRecorder {
private TraceRecorder(TracerConfig config) {
this.config = config;
this.writer = new TraceWriter(config.getDbPath());
this.writer = new TraceWriter(config.getDbPath(), config.isInMemoryDb());
this.maxFunctionCount = config.getMaxFunctionCount();
this.serializerExecutor = Executors.newCachedThreadPool(r -> {
Thread t = new Thread(r, "codeflash-serializer");
@ -76,8 +76,11 @@ public final class TraceRecorder {
return;
}
// Serialize args with timeout to prevent deep object graph traversal from blocking
// Serialize args try inline fast path first, fall back to async with timeout
byte[] argsBlob;
argsBlob = Serializer.serializeFast(args);
if (argsBlob == null) {
// Slow path: async serialization with timeout for complex/unknown types
Future<byte[]> future = serializerExecutor.submit(() -> Serializer.serialize(args));
try {
argsBlob = future.get(SERIALIZATION_TIMEOUT_MS, TimeUnit.MILLISECONDS);
@ -94,6 +97,7 @@ public final class TraceRecorder {
+ methodName + ": " + cause.getClass().getSimpleName() + ": " + cause.getMessage());
return;
}
}
long timeNs = System.nanoTime();
count.incrementAndGet();

View file

@ -7,15 +7,22 @@ import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public final class TraceWriter {
private static final int BATCH_SIZE = 256;
private static final int QUEUE_CAPACITY = 65536;
private final Connection connection;
private final Path diskPath;
private final boolean inMemory;
private final BlockingQueue<WriteTask> writeQueue;
private final Thread writerThread;
private final AtomicBoolean running;
@ -23,14 +30,20 @@ public final class TraceWriter {
private PreparedStatement insertFunctionCall;
private PreparedStatement insertMetadata;
public TraceWriter(String dbPath) {
this.writeQueue = new LinkedBlockingQueue<>();
public TraceWriter(String dbPath, boolean inMemory) {
this.diskPath = Paths.get(dbPath).toAbsolutePath();
this.diskPath.getParent().toFile().mkdirs();
this.inMemory = inMemory;
this.writeQueue = new ArrayBlockingQueue<>(QUEUE_CAPACITY);
this.running = new AtomicBoolean(true);
try {
Path path = Paths.get(dbPath).toAbsolutePath();
path.getParent().toFile().mkdirs();
this.connection = DriverManager.getConnection("jdbc:sqlite:" + path);
if (inMemory) {
// In-memory database for maximum write performance; flushed to disk via VACUUM INTO at close()
this.connection = DriverManager.getConnection("jdbc:sqlite::memory:");
} else {
this.connection = DriverManager.getConnection("jdbc:sqlite:" + this.diskPath);
}
initializeSchema();
prepareStatements();
@ -45,8 +58,12 @@ public final class TraceWriter {
private void initializeSchema() throws SQLException {
try (Statement stmt = connection.createStatement()) {
if (!inMemory) {
stmt.execute("PRAGMA journal_mode=WAL");
stmt.execute("PRAGMA synchronous=NORMAL");
stmt.execute("PRAGMA cache_size=-16000");
stmt.execute("PRAGMA temp_store=MEMORY");
}
stmt.execute(
"CREATE TABLE IF NOT EXISTS function_calls(" +
@ -69,6 +86,8 @@ public final class TraceWriter {
stmt.execute("CREATE INDEX IF NOT EXISTS idx_fc_class_func ON function_calls(classname, function)");
}
// Keep autocommit off for writer performance commit explicitly per batch
connection.setAutoCommit(false);
}
private void prepareStatements() throws SQLException {
@ -95,27 +114,59 @@ public final class TraceWriter {
}
private void writerLoop() {
List<WriteTask> batch = new ArrayList<>(BATCH_SIZE);
while (running.get() || !writeQueue.isEmpty()) {
try {
WriteTask task = writeQueue.poll(100, TimeUnit.MILLISECONDS);
if (task != null) {
task.execute(this);
if (task == null) {
continue;
}
batch.add(task);
writeQueue.drainTo(batch, BATCH_SIZE - 1);
executeBatch(batch);
batch.clear();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
} catch (SQLException e) {
System.err.println("[codeflash-tracer] Write error: " + e.getMessage());
}
}
// Drain remaining
WriteTask task;
while ((task = writeQueue.poll()) != null) {
writeQueue.drainTo(batch);
if (!batch.isEmpty()) {
executeBatch(batch);
}
}
private void executeBatch(List<WriteTask> batch) {
if (batch.isEmpty()) {
return;
}
boolean hasFunctionCalls = false;
try {
for (WriteTask task : batch) {
if (task instanceof FunctionCallTask) {
((FunctionCallTask) task).bindParameters(this);
insertFunctionCall.addBatch();
hasFunctionCalls = true;
} else {
task.execute(this);
}
}
if (hasFunctionCalls) {
insertFunctionCall.executeBatch();
}
connection.commit();
} catch (SQLException e) {
System.err.println("[codeflash-tracer] Write error: " + e.getMessage());
System.err.println("[codeflash-tracer] Batch write error (" + batch.size() + " tasks): " + e.getMessage());
try {
connection.rollback();
} catch (SQLException re) {
System.err.println("[codeflash-tracer] Rollback failed: " + re.getMessage());
}
}
}
@ -139,9 +190,27 @@ public final class TraceWriter {
Thread.currentThread().interrupt();
}
// Close prepared statements first required before VACUUM
try {
if (insertFunctionCall != null) insertFunctionCall.close();
if (insertMetadata != null) insertMetadata.close();
} catch (SQLException e) {
System.err.println("[codeflash-tracer] Error closing statements: " + e.getMessage());
}
if (inMemory) {
try {
connection.commit();
connection.setAutoCommit(true);
try (Statement stmt = connection.createStatement()) {
stmt.execute("VACUUM INTO '" + diskPath.toString().replace("'", "''") + "'");
}
} catch (SQLException e) {
System.err.println("[codeflash-tracer] Failed to write trace DB to disk: " + e.getMessage());
}
}
try {
if (connection != null) connection.close();
} catch (SQLException e) {
System.err.println("[codeflash-tracer] Error closing TraceWriter: " + e.getMessage());
@ -177,8 +246,7 @@ public final class TraceWriter {
this.argsBlob = argsBlob;
}
@Override
public void execute(TraceWriter writer) throws SQLException {
void bindParameters(TraceWriter writer) throws SQLException {
writer.insertFunctionCall.setString(1, type);
writer.insertFunctionCall.setString(2, function);
writer.insertFunctionCall.setString(3, classname);
@ -187,6 +255,11 @@ public final class TraceWriter {
writer.insertFunctionCall.setString(6, descriptor);
writer.insertFunctionCall.setLong(7, timeNs);
writer.insertFunctionCall.setBytes(8, argsBlob);
}
@Override
public void execute(TraceWriter writer) throws SQLException {
bindParameters(writer);
writer.insertFunctionCall.executeUpdate();
}
}

View file

@ -30,6 +30,9 @@ public final class TracerConfig {
@SerializedName("projectRoot")
private String projectRoot = "";
@SerializedName("inMemoryDb")
private boolean inMemoryDb = false;
private static final Gson GSON = new Gson();
public static TracerConfig parse(String agentArgs) {
@ -89,6 +92,10 @@ public final class TracerConfig {
return projectRoot;
}
public boolean isInMemoryDb() {
return inMemoryDb;
}
public boolean shouldInstrumentClass(String internalClassName) {
String dotName = internalClassName.replace('/', '.');

View file

@ -209,6 +209,87 @@ class ComparatorCorrectnessTest {
assertFalse(Comparator.isDeserializationError(42));
}
// ============================================================
// VOID METHOD STATE COMPARISON proves we actually compare
// post-call state for void methods, not just skip them
// ============================================================
@Test
@DisplayName("void state: both sides sorted identically → equivalent")
void testVoidState_identicalMutation_equivalent() throws Exception {
createTestDb(originalDb);
createTestDb(candidateDb);
// Simulate: bubbleSortInPlace(arr) both original and candidate sort correctly
// Post-call state: Object[]{sorted_array}
int[] sortedArr = {1, 2, 3, 4, 5};
byte[] origState = Serializer.serialize(new Object[]{sortedArr});
byte[] candState = Serializer.serialize(new Object[]{new int[]{1, 2, 3, 4, 5}});
insertRow(originalDb, "L1_1", 1, origState);
insertRow(candidateDb, "L1_1", 1, candState);
String json = Comparator.compareDatabases(originalDb.toString(), candidateDb.toString());
Map<String, Object> result = parseJson(json);
assertTrue((Boolean) result.get("equivalent"),
"Both sides produce same sorted array — should be equivalent");
assertEquals(1, ((Number) result.get("actualComparisons")).intValue());
}
@Test
@DisplayName("void state: candidate mutates array differently → NOT equivalent")
void testVoidState_differentMutation_rejected() throws Exception {
createTestDb(originalDb);
createTestDb(candidateDb);
// Simulate: original sorts [3,1,2] [1,2,3]
// Bad optimization doesn't sort correctly [3,1,2] unchanged
byte[] origState = Serializer.serialize(new Object[]{new int[]{1, 2, 3}});
byte[] candState = Serializer.serialize(new Object[]{new int[]{3, 1, 2}});
insertRow(originalDb, "L1_1", 1, origState);
insertRow(candidateDb, "L1_1", 1, candState);
String json = Comparator.compareDatabases(originalDb.toString(), candidateDb.toString());
Map<String, Object> result = parseJson(json);
assertFalse((Boolean) result.get("equivalent"),
"Candidate produced wrong array — must be rejected");
assertEquals(1, ((Number) result.get("actualComparisons")).intValue());
}
@Test
@DisplayName("void state: receiver + args both compared — wrong receiver state rejected")
void testVoidState_receiverAndArgs_wrongReceiverRejected() throws Exception {
createTestDb(originalDb);
createTestDb(candidateDb);
// Simulate: instance method sorter.sort(data)
// Post-call state is Object[]{receiver_fields_map, mutated_data}
// Original: receiver has size=3, data is [1,2,3]
// Candidate: receiver has size=0 (wrong), data is [1,2,3]
Map<String, Object> origReceiver = new HashMap<>();
origReceiver.put("size", 3);
origReceiver.put("sorted", true);
Map<String, Object> candReceiver = new HashMap<>();
candReceiver.put("size", 0);
candReceiver.put("sorted", true);
byte[] origState = Serializer.serialize(new Object[]{origReceiver, new int[]{1, 2, 3}});
byte[] candState = Serializer.serialize(new Object[]{candReceiver, new int[]{1, 2, 3}});
insertRow(originalDb, "L1_1", 1, origState);
insertRow(candidateDb, "L1_1", 1, candState);
String json = Comparator.compareDatabases(originalDb.toString(), candidateDb.toString());
Map<String, Object> result = parseJson(json);
assertFalse((Boolean) result.get("equivalent"),
"Receiver state differs (size 3 vs 0) — must be rejected even though args match");
assertEquals(1, ((Number) result.get("actualComparisons")).intValue());
}
// --- Helpers ---
private void createTestDb(Path dbPath) throws Exception {

View file

@ -16,7 +16,7 @@
"tests/",
"-vv",
"--ignore",
"tests/benchmarks/"
".codeflash/benchmarks/"
],
},
"launch": {

View file

@ -47,7 +47,9 @@ class AiServiceClient:
self.headers = {"Authorization": f"Bearer {get_codeflash_api_key()}", "Connection": "close"}
self.llm_call_counter = count(1)
self.is_local = self.base_url == "http://localhost:8000"
self.timeout: float | None = 300 if self.is_local else 90
# (connect_timeout, read_timeout) — connect should be fast; read
# can be slow because the server runs LLM inference.
self.timeout: float | tuple[float, float] | None = (10, 300)
def get_next_sequence(self) -> int:
"""Get the next LLM call sequence number."""
@ -88,7 +90,7 @@ class AiServiceClient:
endpoint: str,
method: str = "POST",
payload: dict[str, Any] | list[dict[str, Any]] | None = None,
timeout: float | None = None,
timeout: float | tuple[float, float] | None = None,
) -> requests.Response:
"""Make an API request to the given endpoint on the AI service.

View file

@ -4,6 +4,7 @@ from typing import TYPE_CHECKING, Optional, Union
import libcst as cst
import codeflash.code_utils._libcst_cache # noqa: F401
from codeflash.code_utils.formatter import sort_imports
if TYPE_CHECKING:

View file

@ -5,15 +5,6 @@ from argparse import SUPPRESS, ArgumentParser, Namespace
from functools import lru_cache
from pathlib import Path
from codeflash.cli_cmds import logging_config
from codeflash.cli_cmds.console import apologize_and_exit, logger
from codeflash.code_utils import env_utils
from codeflash.code_utils.code_utils import exit_with_message, normalize_ignore_paths
from codeflash.code_utils.config_parser import parse_config_file
from codeflash.languages.test_framework import set_current_test_framework
from codeflash.lsp.helpers import is_LSP_enabled
from codeflash.version import __version__ as version
def parse_args() -> Namespace:
parser = _build_parser()
@ -30,12 +21,17 @@ def parse_args() -> Namespace:
def process_and_validate_cmd_args(args: Namespace) -> Namespace:
from codeflash.cli_cmds import logging_config
from codeflash.cli_cmds.console import logger
from codeflash.code_utils import env_utils
from codeflash.code_utils.code_utils import exit_with_message
from codeflash.code_utils.git_utils import (
check_running_in_git_repo,
confirm_proceeding_with_no_git_repo,
get_repo_owner_and_name,
)
from codeflash.code_utils.github_utils import require_github_app_or_exit
from codeflash.version import __version__ as version
if args.server:
os.environ["CODEFLASH_AIS_SERVER"] = args.server
@ -85,6 +81,12 @@ def process_and_validate_cmd_args(args: Namespace) -> Namespace:
def process_pyproject_config(args: Namespace) -> Namespace:
from codeflash.code_utils import env_utils
from codeflash.code_utils.code_utils import exit_with_message, normalize_ignore_paths
from codeflash.code_utils.config_parser import parse_config_file
from codeflash.languages.test_framework import set_current_test_framework
from codeflash.lsp.helpers import is_LSP_enabled
try:
pyproject_config, pyproject_file_path = parse_config_file(args.config_file)
except ValueError as e:
@ -154,7 +156,14 @@ def process_pyproject_config(args: Namespace) -> Namespace:
raise AssertionError("--tests-root must be specified")
assert Path(args.tests_root).is_dir(), f"--tests-root {args.tests_root} must be a valid directory"
if args.benchmark:
assert args.benchmarks_root is not None, "--benchmarks-root must be specified when running with --benchmark"
if args.benchmarks_root is None:
# Auto-discover .codeflash/benchmarks/ convention
candidate = Path.cwd() / ".codeflash" / "benchmarks"
if candidate.is_dir():
args.benchmarks_root = str(candidate)
else:
msg = "--benchmarks-root must be specified when running with --benchmark, or .codeflash/benchmarks/ must exist"
raise AssertionError(msg)
assert Path(args.benchmarks_root).is_dir(), (
f"--benchmarks-root {args.benchmarks_root} must be a valid directory"
)
@ -222,6 +231,9 @@ def project_root_from_module_root(module_root: Path, pyproject_file_path: Path)
def handle_optimize_all_arg_parsing(args: Namespace) -> Namespace:
from codeflash.cli_cmds.console import apologize_and_exit, logger
from codeflash.code_utils.code_utils import exit_with_message
if hasattr(args, "all") or (hasattr(args, "file") and args.file):
no_pr = getattr(args, "no_pr", False)

View file

@ -2,6 +2,9 @@ from __future__ import annotations
import os
def auth_login() -> None:
"""Perform OAuth login and save the API key."""
import click
from codeflash.cli_cmds.console import console
@ -10,9 +13,6 @@ from codeflash.code_utils.env_utils import get_codeflash_api_key
from codeflash.code_utils.shell_utils import save_api_key_to_rc
from codeflash.either import is_successful
def auth_login() -> None:
"""Perform OAuth login and save the API key."""
try:
existing_api_key = get_codeflash_api_key()
except OSError:
@ -41,6 +41,9 @@ def auth_login() -> None:
def auth_status() -> None:
"""Check and display current authentication status."""
from codeflash.cli_cmds.console import console
from codeflash.code_utils.env_utils import get_codeflash_api_key
try:
api_key = get_codeflash_api_key()
except OSError:

View file

@ -87,7 +87,13 @@ def run_compare(args: Namespace) -> None:
benchmarks_root_str = pyproject_config.get("benchmarks_root")
if not benchmarks_root_str:
logger.error("benchmarks-root must be configured in [tool.codeflash] to use compare")
# Auto-discover .codeflash/benchmarks/ if it exists
candidate = project_root / ".codeflash" / "benchmarks"
if candidate.is_dir():
benchmarks_root_str = str(candidate)
logger.info(f"Auto-discovered benchmarks at {candidate}")
else:
logger.error("benchmarks-root must be configured in [tool.codeflash] or .codeflash/benchmarks/ must exist")
sys.exit(1)
benchmarks_root = Path(benchmarks_root_str).resolve()

View file

@ -0,0 +1,64 @@
"""Cache libcst visitor dispatch table construction.
libcst's ``MatcherDecoratableTransformer`` and
``MatcherDecoratableVisitor`` rebuild visitor dispatch tables on
every instantiation by iterating ``dir(self)`` (~600 attributes)
and calling ``getattr`` + ``inspect.ismethod`` on each. The
results depend only on the class, not the instance, so caching
by ``type(obj)`` is safe.
Import this module before any libcst visitors are instantiated
to install the cache.
"""
from __future__ import annotations
from typing import Any
import libcst.matchers._visitors as _mv
_visit_cache: dict[type, Any] = {}
_leave_cache: dict[type, Any] = {}
_matchers_cache: dict[type, Any] = {}
_original_visit = _mv._gather_constructed_visit_funcs # noqa: SLF001
_original_leave = _mv._gather_constructed_leave_funcs # noqa: SLF001
_original_matchers = _mv._gather_matchers # noqa: SLF001
def _cached_visit(obj: object) -> Any:
"""Return cached visit-function dispatch table for the object's class."""
cls = type(obj)
try:
return _visit_cache[cls]
except KeyError:
result = _original_visit(obj)
_visit_cache[cls] = result
return result
def _cached_leave(obj: object) -> Any:
"""Return cached leave-function dispatch table for the object's class."""
cls = type(obj)
try:
return _leave_cache[cls]
except KeyError:
result = _original_leave(obj)
_leave_cache[cls] = result
return result
def _cached_matchers(obj: object) -> Any:
"""Return cached matcher dispatch table for the object's class."""
cls = type(obj)
try:
return dict(_matchers_cache[cls])
except KeyError:
result = _original_matchers(obj)
_matchers_cache[cls] = result
return dict(result)
_mv._gather_constructed_visit_funcs = _cached_visit # noqa: SLF001
_mv._gather_constructed_leave_funcs = _cached_leave # noqa: SLF001
_mv._gather_matchers = _cached_matchers # noqa: SLF001

View file

@ -17,7 +17,7 @@ import tomlkit
from codeflash.cli_cmds.console import logger, paneled_text
from codeflash.code_utils.config_parser import find_pyproject_toml, get_all_closest_config_files
from codeflash.lsp.helpers import is_LSP_enabled
from codeflash.lsp.helpers import is_LSP_enabled, is_subagent_mode
_INVALID_CHARS_NT = {"<", ">", ":", '"', "|", "?", "*"}
@ -423,7 +423,7 @@ def get_run_tmp_file(file_path: Path | str) -> Path:
file_path = Path(file_path)
if not hasattr(get_run_tmp_file, "tmpdir_path"):
get_run_tmp_file.tmpdir = TemporaryDirectory(prefix="codeflash_")
get_run_tmp_file.tmpdir_path = Path(get_run_tmp_file.tmpdir.name)
get_run_tmp_file.tmpdir_path = Path(get_run_tmp_file.tmpdir.name).resolve()
return get_run_tmp_file.tmpdir_path / file_path
@ -471,6 +471,11 @@ def exit_with_message(message: str, *, error_on_exit: bool = False) -> None:
if is_LSP_enabled():
logger.error(message)
return
if is_subagent_mode():
from xml.sax.saxutils import escape
sys.stdout.write(f"<codeflash-error>{escape(message)}</codeflash-error>\n")
sys.exit(1 if error_on_exit else 0)
paneled_text(message, panel_args={"style": "red"})
sys.exit(1 if error_on_exit else 0)

View file

@ -1,5 +1,6 @@
from __future__ import annotations
import os
from enum import Enum
from typing import Any, Union
@ -17,7 +18,7 @@ MIN_CONCURRENCY_IMPROVEMENT_THRESHOLD = 0.20 # 20% concurrency ratio improvemen
CONCURRENCY_FACTOR = 10 # Number of concurrent executions for concurrency benchmark
MAX_TEST_FUNCTION_RUNS = 50
MAX_CUMULATIVE_TEST_RUNTIME_NANOSECONDS = 100e6 # 100ms
TOTAL_LOOPING_TIME = 10.0 # 10 second candidate benchmarking budget
TOTAL_LOOPING_TIME = float(os.getenv("CODEFLASH_LOOPING_TIME", "10.0")) # candidate benchmarking budget (seconds)
COVERAGE_THRESHOLD = 60.0
MIN_TESTCASE_PASSED_THRESHOLD = 6
REPEAT_OPTIMIZATION_PROBABILITY = 0.1

View file

@ -9,17 +9,16 @@ from functools import lru_cache
from pathlib import Path
from typing import Any, Optional
from codeflash.cli_cmds.console import logger
from codeflash.code_utils.code_utils import exit_with_message
from codeflash.code_utils.formatter import format_code
from codeflash.code_utils.shell_utils import read_api_key_from_shell_config, save_api_key_to_rc
from codeflash.languages.registry import get_language_support_by_common_formatters
from codeflash.lsp.helpers import is_LSP_enabled
def check_formatter_installed(
formatter_cmds: list[str], exit_on_failure: bool = True, language: str = "python"
) -> bool:
from codeflash.cli_cmds.console import logger
from codeflash.code_utils.formatter import format_code
from codeflash.languages.registry import get_language_support_by_common_formatters
if not formatter_cmds or formatter_cmds[0] == "disabled":
return True
first_cmd = formatter_cmds[0]
@ -69,6 +68,8 @@ def check_formatter_installed(
@lru_cache(maxsize=1)
def get_codeflash_api_key() -> str:
from codeflash.cli_cmds.console import logger
# Check environment variable first
env_api_key = os.environ.get("CODEFLASH_API_KEY")
shell_api_key = read_api_key_from_shell_config()
@ -96,7 +97,8 @@ def get_codeflash_api_key() -> str:
# Prefer the shell configuration over environment variables for lsp,
# as the API key may change in the RC file during lsp runtime. Since the LSP client (extension) can restart
# within the same process, the environment variable could become outdated.
api_key = shell_api_key or env_api_key if is_LSP_enabled() else env_api_key or shell_api_key
is_lsp = os.getenv("CODEFLASH_LSP", default="false").lower() == "true"
api_key = shell_api_key or env_api_key if is_lsp else env_api_key or shell_api_key
api_secret_docs_message = "For more information, refer to the documentation at [https://docs.codeflash.ai/optimizing-with-codeflash/codeflash-github-actions#manual-setup]." # noqa
if not api_key:
@ -106,6 +108,8 @@ def get_codeflash_api_key() -> str:
f"{api_secret_docs_message}"
)
if is_repo_a_fork():
from codeflash.code_utils.code_utils import exit_with_message
msg = (
"Codeflash API key not detected in your environment. It appears you're running Codeflash from a GitHub fork.\n"
"For external contributors, please ensure you've added your own API key to your fork's repository secrets and set it as the CODEFLASH_API_KEY environment variable.\n"
@ -124,6 +128,8 @@ def get_codeflash_api_key() -> str:
def ensure_codeflash_api_key() -> bool:
from codeflash.cli_cmds.console import logger
try:
get_codeflash_api_key()
except OSError:

View file

@ -7,6 +7,7 @@ from typing import TYPE_CHECKING
import libcst as cst
import codeflash.code_utils._libcst_cache # noqa: F401
from codeflash.cli_cmds.console import logger
from codeflash.code_utils.code_utils import get_run_tmp_file, module_name_from_file_path
from codeflash.code_utils.formatter import sort_imports

View file

@ -8,7 +8,6 @@ import sys
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from codeflash.cli_cmds.console import logger
from codeflash.code_utils.compat import LF
from codeflash.either import Failure, Success
@ -41,6 +40,8 @@ def is_powershell() -> bool:
2. COMSPEC pointing to powershell.exe
3. TERM_PROGRAM indicating Windows Terminal (often uses PowerShell)
"""
from codeflash.cli_cmds.console import logger
if os.name != "nt":
return False
@ -72,6 +73,8 @@ def is_powershell() -> bool:
def read_api_key_from_shell_config() -> Optional[str]:
"""Read API key from shell configuration file."""
from codeflash.cli_cmds.console import logger
shell_rc_path = get_shell_rc_path()
# Ensure shell_rc_path is a Path object for consistent handling
if not isinstance(shell_rc_path, Path):
@ -127,6 +130,8 @@ def get_api_key_export_line(api_key: str) -> str:
def save_api_key_to_rc(api_key: str) -> Result[str, str]:
"""Save API key to the appropriate shell configuration file."""
from codeflash.cli_cmds.console import logger
shell_rc_path = get_shell_rc_path()
# Ensure shell_rc_path is a Path object for consistent handling
if not isinstance(shell_rc_path, Path):

View file

@ -195,7 +195,8 @@ def _find_all_functions_via_language_support(file_path: Path) -> dict[Path, list
try:
lang_support = get_language_support(file_path)
criteria = FunctionFilterCriteria(require_return=True)
require_return = lang_support.language != Language.JAVA
criteria = FunctionFilterCriteria(require_return=require_return)
functions[file_path] = lang_support.discover_functions(file_path, criteria)
except Exception as e:
logger.debug(f"Failed to discover functions in {file_path}: {e}")
@ -454,7 +455,8 @@ def find_all_functions_in_file(file_path: Path) -> dict[Path, list[FunctionToOpt
from codeflash.languages.base import FunctionFilterCriteria
lang_support = get_language_support(file_path)
criteria = FunctionFilterCriteria(require_return=True)
require_return = lang_support.language != Language.JAVA
criteria = FunctionFilterCriteria(require_return=require_return)
source = file_path.read_text(encoding="utf-8")
return {file_path: lang_support.discover_functions(source, file_path, criteria)}
except Exception as e:

View file

@ -897,7 +897,7 @@ class LanguageSupport(Protocol):
...
def instrument_source_for_line_profiler(
self, func_info: FunctionToOptimize, line_profiler_output_file: Path
self, func_info: FunctionToOptimize, line_profiler_output_file: Path, project_classpath: str | None = None
) -> bool:
"""Instrument source code before line profiling."""
...

View file

@ -20,6 +20,7 @@ from rich.syntax import Syntax
from rich.text import Text
from rich.tree import Tree
import codeflash.code_utils._libcst_cache # noqa: F401
from codeflash.api.aiservice import AiServiceClient, AIServiceRefinerRequest, LocalAiServiceClient
from codeflash.api.cfapi import add_code_context_hash, create_staging, get_cfapi_base_urls, mark_optimization_success
from codeflash.benchmarking.utils import process_benchmark_data
@ -488,6 +489,7 @@ class FunctionOptimizer:
else function_to_optimize.file_path.read_text(encoding="utf8")
)
self.language_support = current_language_support()
self.language_support.ensure_runtime_environment(self.project_root)
if not function_to_optimize_ast:
self.function_to_optimize_ast = self._resolve_function_ast(
self.function_to_optimize_source_code, function_to_optimize.function_name, function_to_optimize.parents
@ -3252,6 +3254,11 @@ class FunctionOptimizer:
test_env["CODEFLASH_TEST_ITERATION"] = str(codeflash_test_iteration)
test_env["CODEFLASH_TRACER_DISABLE"] = str(codeflash_tracer_disable)
test_env["CODEFLASH_LOOP_INDEX"] = str(codeflash_loop_index)
# Pin PYTHONHASHSEED so original and candidate test processes use the same hash seed.
# Without this, each subprocess gets a random seed, which can cause non-deterministic
# iteration order in sets/dicts and lead to flaky return-value comparisons.
if "PYTHONHASHSEED" not in test_env:
test_env["PYTHONHASHSEED"] = "0"
return test_env
def line_profiler_step(

View file

@ -404,7 +404,9 @@ class JavaFunctionOptimizer(FunctionOptimizer):
line_profiler_output_path = get_run_tmp_file(Path("line_profiler_output.json"))
success = self.language_support.instrument_source_for_line_profiler(
func_info=self.function_to_optimize, line_profiler_output_file=line_profiler_output_path
func_info=self.function_to_optimize,
line_profiler_output_file=line_profiler_output_path,
project_classpath=self._get_project_classpath(),
)
if not success:
return {"timings": {}, "unit": 0, "str_out": ""}

View file

@ -9,7 +9,6 @@ from __future__ import annotations
import logging
import os
import re
import shutil
import subprocess
import tempfile
import xml.etree.ElementTree as ET
@ -17,7 +16,7 @@ from pathlib import Path
from typing import Any
from codeflash.languages.java.build_tool_strategy import BuildToolStrategy, module_to_dir
from codeflash.languages.java.build_tools import BuildTool, JavaProjectInfo
from codeflash.languages.java.build_tools import CODEFLASH_RUNTIME_VERSION, BuildTool, JavaProjectInfo
_RE_INCLUDE = re.compile(r"""include\s*\(?([^)\n]+)\)?""")
@ -45,7 +44,8 @@ gradle.projectsEvaluated {
'spotbugsMain', 'spotbugsTest',
'pmdMain', 'pmdTest',
'rat', 'japicmp',
'jarHell', 'thirdPartyAudit'
'jarHell', 'thirdPartyAudit',
'spotlessCheck', 'spotlessApply', 'spotlessJava', 'spotlessKotlin', 'spotlessScala'
]
}.configureEach {
enabled = false
@ -103,22 +103,6 @@ gradle.projectsEvaluated {
}
"""
# Gradle init script that applies JaCoCo plugin for coverage collection.
# Uses projectsEvaluated to avoid triggering configuration of unrelated subprojects.
_JACOCO_INIT_SCRIPT = """\
gradle.projectsEvaluated {
allprojects {
apply plugin: 'jacoco'
jacocoTestReport {
reports {
xml.required = true
html.required = false
}
}
}
}
"""
def find_gradle_build_file(project_root: Path) -> Path | None:
kts = project_root / "build.gradle.kts"
@ -130,12 +114,12 @@ def find_gradle_build_file(project_root: Path) -> Path | None:
return None
def _find_top_level_dependencies_block(build_file: Path, content: str) -> int | None:
"""Find the insert position (before closing }) of the top-level dependencies block using tree-sitter.
def _find_top_level_block(build_file: Path, content: str, block_name: str) -> int | None:
"""Find the insert position (before closing }) of a top-level block using tree-sitter.
Returns the byte offset of the closing brace, or None if no top-level dependencies block exists.
Only matches `dependencies { }` at the root level ignores blocks nested inside
`buildscript`, `subprojects`, `allprojects`, etc.
Returns the byte offset of the closing brace, or None if no top-level block with the
given name exists. Only matches blocks at the root level ignores blocks nested inside
``buildscript``, ``subprojects``, ``allprojects``, etc.
"""
import tree_sitter as ts
@ -153,10 +137,10 @@ def _find_top_level_dependencies_block(build_file: Path, content: str) -> int |
tree = parser.parse(source_bytes)
# Walk only direct children of root to find top-level `dependencies { }`
# Walk only direct children of root to find top-level `<block_name> { }`
for child in tree.root_node.children:
# Groovy: expression_statement > method_invocation(identifier="dependencies", closure)
# Kotlin: call_expression(identifier="dependencies", annotated_lambda)
# Groovy: expression_statement > method_invocation(identifier, closure)
# Kotlin: call_expression(identifier, annotated_lambda)
node = child
if node.type == "expression_statement" and node.child_count > 0:
node = node.children[0]
@ -176,7 +160,7 @@ def _find_top_level_dependencies_block(build_file: Path, content: str) -> int |
continue
name = source_bytes[name_node.start_byte : name_node.end_byte].decode("utf-8")
if name != "dependencies":
if name != block_name:
continue
# Find the closing brace of this block
@ -191,6 +175,11 @@ def _find_top_level_dependencies_block(build_file: Path, content: str) -> int |
return None
def _find_top_level_dependencies_block(build_file: Path, content: str) -> int | None:
"""Find the insert position (before closing }) of the top-level dependencies block."""
return _find_top_level_block(build_file, content, "dependencies")
def _is_multimodule_project(build_root: Path) -> bool:
"""Check if this is a multi-module Gradle project by looking for include directives in settings files."""
for settings_name in ("settings.gradle", "settings.gradle.kts"):
@ -205,8 +194,66 @@ def _is_multimodule_project(build_root: Path) -> bool:
return False
def add_codeflash_dependency_multimodule(build_file: Path, runtime_jar_path: Path) -> bool:
"""Add codeflash-runtime dependency wrapped in a subprojects block for multi-module projects.
_CODEFLASH_MAVEN_COORD = f"com.codeflash:codeflash-runtime:{CODEFLASH_RUNTIME_VERSION}"
def _update_existing_codeflash_dependency(build_file: Path, content: str) -> str | None:
"""If the codeflash-runtime dependency exists but is outdated or uses the old files() format, update it.
Returns the updated content, or None if no update was needed (already current).
"""
is_kts = build_file.name.endswith(".kts")
if is_kts:
current_dep = f'testImplementation("{_CODEFLASH_MAVEN_COORD}")'
else:
current_dep = f"testImplementation '{_CODEFLASH_MAVEN_COORD}'"
if current_dep in content:
return None
# Replace the line containing "codeflash-runtime" with the current Maven Central coordinate.
# This handles both old versions (e.g. 1.0.0) and old files() format.
updated_lines: list[str] = []
replaced = False
for line in content.splitlines(keepends=True):
if "codeflash-runtime" in line:
indent = len(line) - len(line.lstrip())
spaces = " " * indent
if is_kts:
updated_lines.append(f'{spaces}testImplementation("{_CODEFLASH_MAVEN_COORD}") // codeflash-runtime\n')
else:
updated_lines.append(f"{spaces}testImplementation '{_CODEFLASH_MAVEN_COORD}' // codeflash-runtime\n")
replaced = True
else:
updated_lines.append(line)
if replaced:
return "".join(updated_lines)
return None
def _ensure_maven_central_repo(build_file: Path, content: str) -> str:
"""Ensure mavenCentral() is present in the top-level repositories block. Returns updated content.
Uses tree-sitter to find the correct top-level ``repositories {}`` block, avoiding
false matches inside ``buildscript {}``, ``subprojects {}``, etc.
"""
if "mavenCentral()" in content:
return content
# Use tree-sitter to find the top-level repositories block
insert_pos = _find_top_level_block(build_file, content, "repositories")
if insert_pos is not None:
return content[:insert_pos] + " mavenCentral()\n" + content[insert_pos:]
# No top-level repositories block — append one
content += "\nrepositories {\n mavenCentral()\n}\n"
return content
def add_codeflash_dependency_multimodule(build_file: Path) -> bool:
"""Add codeflash-runtime dependency from Maven Central in a subprojects block for multi-module projects.
This avoids adding testImplementation to the root build file directly, which would fail
if the root project doesn't apply the java plugin.
@ -218,18 +265,29 @@ def add_codeflash_dependency_multimodule(build_file: Path, runtime_jar_path: Pat
content = build_file.read_text(encoding="utf-8")
if "codeflash-runtime" in content:
logger.info("codeflash-runtime dependency already present in %s", build_file.name)
updated = _update_existing_codeflash_dependency(build_file, content)
if updated is not None:
build_file.write_text(updated, encoding="utf-8")
logger.info(
"Updated codeflash-runtime dependency in %s to version %s",
build_file.name,
CODEFLASH_RUNTIME_VERSION,
)
else:
logger.info("codeflash-runtime dependency already up-to-date in %s", build_file.name)
return True
is_kts = build_file.name.endswith(".kts")
jar_str = str(runtime_jar_path).replace("\\", "/")
if is_kts:
block = (
f"\nsubprojects {{\n"
f' plugins.withId("java") {{\n'
f" repositories {{\n"
f" mavenCentral()\n"
f" }}\n"
f" dependencies {{\n"
f' testImplementation(files("{jar_str}")) // codeflash-runtime\n'
f' testImplementation("{_CODEFLASH_MAVEN_COORD}") // codeflash-runtime\n'
f" }}\n"
f" }}\n"
f"}}\n"
@ -238,8 +296,11 @@ def add_codeflash_dependency_multimodule(build_file: Path, runtime_jar_path: Pat
block = (
f"\nsubprojects {{\n"
f" plugins.withId('java') {{\n"
f" repositories {{\n"
f" mavenCentral()\n"
f" }}\n"
f" dependencies {{\n"
f" testImplementation files('{jar_str}') // codeflash-runtime\n"
f" testImplementation '{_CODEFLASH_MAVEN_COORD}' // codeflash-runtime\n"
f" }}\n"
f" }}\n"
f"}}\n"
@ -255,7 +316,7 @@ def add_codeflash_dependency_multimodule(build_file: Path, runtime_jar_path: Pat
return False
def add_codeflash_dependency(build_file: Path, runtime_jar_path: Path) -> bool:
def add_codeflash_dependency(build_file: Path) -> bool:
if not build_file.exists():
return False
@ -263,16 +324,28 @@ def add_codeflash_dependency(build_file: Path, runtime_jar_path: Path) -> bool:
content = build_file.read_text(encoding="utf-8")
if "codeflash-runtime" in content:
logger.info("codeflash-runtime dependency already present in %s", build_file.name)
updated = _update_existing_codeflash_dependency(build_file, content)
if updated is not None:
# Also ensure mavenCentral() is present (old files() format won't have it)
updated = _ensure_maven_central_repo(build_file, updated)
build_file.write_text(updated, encoding="utf-8")
logger.info(
"Updated codeflash-runtime dependency in %s to version %s",
build_file.name,
CODEFLASH_RUNTIME_VERSION,
)
else:
logger.info("codeflash-runtime dependency already up-to-date in %s", build_file.name)
return True
content = _ensure_maven_central_repo(build_file, content)
is_kts = build_file.name.endswith(".kts")
jar_str = str(runtime_jar_path).replace("\\", "/")
if is_kts:
dep_line = f' testImplementation(files("{jar_str}")) // codeflash-runtime\n'
dep_line = f' testImplementation("{_CODEFLASH_MAVEN_COORD}") // codeflash-runtime\n'
else:
dep_line = f" testImplementation files('{jar_str}') // codeflash-runtime\n"
dep_line = f" testImplementation '{_CODEFLASH_MAVEN_COORD}' // codeflash-runtime\n"
# Use tree-sitter to find the top-level dependencies block
insert_pos = _find_top_level_dependencies_block(build_file, content)
@ -284,9 +357,13 @@ def add_codeflash_dependency(build_file: Path, runtime_jar_path: Path) -> bool:
# No existing dependencies block — append one
if is_kts:
content += f'\ndependencies {{\n testImplementation(files("{jar_str}")) // codeflash-runtime\n}}\n'
content += (
f'\ndependencies {{\n testImplementation("{_CODEFLASH_MAVEN_COORD}") // codeflash-runtime\n}}\n'
)
else:
content += f"\ndependencies {{\n testImplementation files('{jar_str}') // codeflash-runtime\n}}\n"
content += (
f"\ndependencies {{\n testImplementation '{_CODEFLASH_MAVEN_COORD}' // codeflash-runtime\n}}\n"
)
build_file.write_text(content, encoding="utf-8")
logger.info("Added codeflash-runtime dependency to %s (new block)", build_file.name)
return True
@ -420,34 +497,21 @@ class GradleStrategy(BuildToolStrategy):
return self.find_wrapper_executable(build_root, ("gradlew", "gradlew.bat"), "gradle")
def ensure_runtime(self, build_root: Path, test_module: str | None) -> bool:
runtime_jar = self.find_runtime_jar()
if runtime_jar is None:
logger.error("codeflash-runtime JAR not found. Generated tests will fail to compile.")
return False
if test_module:
module_root = build_root / module_to_dir(test_module)
else:
module_root = build_root
libs_dir = module_root / "libs"
libs_dir.mkdir(parents=True, exist_ok=True)
dest_jar = libs_dir / "codeflash-runtime-1.0.1.jar"
if not dest_jar.exists():
logger.info("Copying codeflash-runtime JAR to %s", dest_jar)
shutil.copy2(runtime_jar, dest_jar)
build_file = find_gradle_build_file(module_root)
if build_file is None:
logger.warning("No build.gradle(.kts) found at %s, cannot add codeflash-runtime dependency", module_root)
return False
if not test_module and _is_multimodule_project(build_root):
if not add_codeflash_dependency_multimodule(build_file, dest_jar):
if not add_codeflash_dependency_multimodule(build_file):
logger.error("Failed to add codeflash-runtime dependency to %s", build_file)
return False
elif not add_codeflash_dependency(build_file, dest_jar):
elif not add_codeflash_dependency(build_file):
logger.error("Failed to add codeflash-runtime dependency to %s", build_file)
return False
@ -469,14 +533,22 @@ class GradleStrategy(BuildToolStrategy):
logger.error("Gradle not found — cannot pre-install multi-module dependencies")
return False
cmd = [gradle, f":{test_module}:classes", "-x", "test", "--build-cache", "--no-daemon"]
cmd = [
gradle,
f":{test_module}:testClasses",
"-x",
"test",
"--build-cache",
"--no-daemon",
"--configure-on-demand",
]
cmd.extend(["--init-script", _get_skip_validation_init_script()])
logger.info("Pre-installing multi-module dependencies: %s (module: %s)", build_root, test_module)
logger.debug("Running: %s", " ".join(cmd))
try:
result = _run_cmd_kill_pg_on_timeout(cmd, cwd=build_root, env=env, timeout=300)
result = _run_cmd_kill_pg_on_timeout(cmd, cwd=build_root, env=env, timeout=900)
if result.returncode != 0:
logger.error(
"Failed to pre-install multi-module deps (exit %d).\nstdout: %s\nstderr: %s",
@ -504,9 +576,9 @@ class GradleStrategy(BuildToolStrategy):
return subprocess.CompletedProcess(args=["gradle"], returncode=-1, stdout="", stderr="Gradle not found")
if test_module:
cmd = [gradle, f":{test_module}:testClasses", "--no-daemon"]
cmd = [gradle, f":{test_module}:testClasses", "--no-daemon", "--configure-on-demand"]
else:
cmd = [gradle, "testClasses", "--no-daemon"]
cmd = [gradle, "testClasses", "--no-daemon", "--configure-on-demand"]
cmd.extend(["--init-script", _get_skip_validation_init_script()])
logger.debug("Compiling tests: %s in %s", " ".join(cmd), build_root)
@ -528,9 +600,9 @@ class GradleStrategy(BuildToolStrategy):
return subprocess.CompletedProcess(args=["gradle"], returncode=-1, stdout="", stderr="Gradle not found")
if test_module:
cmd = [gradle, f":{test_module}:classes", "--no-daemon"]
cmd = [gradle, f":{test_module}:classes", "--no-daemon", "--configure-on-demand"]
else:
cmd = [gradle, "classes", "--no-daemon"]
cmd = [gradle, "classes", "--no-daemon", "--configure-on-demand"]
cmd.extend(["--init-script", _get_skip_validation_init_script()])
logger.debug("Compiling source only: %s in %s", " ".join(cmd), build_root)
@ -574,7 +646,7 @@ class GradleStrategy(BuildToolStrategy):
else:
task = "codeflashPrintClasspath"
cmd = [gradle, "--init-script", init_script_path, task, "-q", "--no-daemon"]
cmd = [gradle, "--init-script", init_script_path, task, "-q", "--no-daemon", "--configure-on-demand"]
logger.debug("Getting classpath: %s", " ".join(cmd))
@ -657,7 +729,7 @@ class GradleStrategy(BuildToolStrategy):
mode: str,
test_module: str | None,
javaagent_arg: str | None = None,
enable_coverage: bool = False,
enable_coverage: bool = False, # kept for interface compatibility; coverage now uses JAVA_TOOL_OPTIONS
) -> subprocess.CompletedProcess[str]:
from codeflash.languages.java.test_runner import _build_test_filter, _run_cmd_kill_pg_on_timeout
@ -725,28 +797,15 @@ class GradleStrategy(BuildToolStrategy):
with os.fdopen(init_fd, "w", encoding="utf-8") as f:
f.write(init_script_content)
cmd = [gradle, task, "--no-daemon", "--rerun", "--init-script", init_path]
cmd = [gradle, task, "--no-daemon", "--rerun", "--configure-on-demand", "--init-script", init_path]
cmd.extend(["--init-script", _get_skip_validation_init_script()])
# --continue ensures Gradle keeps going even if some tests fail.
# For coverage: needed so jacocoTestReport runs even after test failures
# (matches Maven's -Dmaven.test.failure.ignore=true).
# Note: multi-module --tests filtering is handled by
# filter.failOnNoMatchingTests = false in the init script above
# (matches Maven's -DfailIfNoTests=false).
if enable_coverage:
cmd.append("--continue")
for class_filter in test_filter.split(","):
class_filter = class_filter.strip()
if class_filter:
cmd.extend(["--tests", class_filter])
logger.debug("Added --tests filters to Gradle command")
# Append jacocoTestReport AFTER --tests so Gradle doesn't try to apply --tests to it
if enable_coverage:
cmd.append("jacocoTestReport")
logger.debug("Running Gradle command: %s in %s", " ".join(cmd), build_root)
result = _run_cmd_kill_pg_on_timeout(cmd, cwd=build_root, env=env, timeout=timeout)
@ -883,64 +942,105 @@ class GradleStrategy(BuildToolStrategy):
timeout: int,
candidate_index: int,
) -> tuple[subprocess.CompletedProcess[str], Path, Path | None]:
from codeflash.languages.java.line_profiler import find_agent_jar
from codeflash.languages.java.test_runner import _get_combined_junit_xml
coverage_xml_path = self.setup_coverage(build_root, test_module, build_root)
if test_module:
module_path = build_root / module_to_dir(test_module)
else:
module_path = build_root
# Locate the runtime JAR (contains shaded JaCoCo agent + CLI)
classpath = self.get_classpath(build_root, run_env, test_module)
runtime_jar = find_agent_jar(classpath=classpath)
if runtime_jar is None:
logger.warning("codeflash-runtime JAR not found, cannot collect coverage")
result = self.run_tests_via_build_tool(
build_root,
test_paths,
run_env,
timeout=timeout,
mode="behavior",
enable_coverage=True,
test_module=test_module,
build_root, test_paths, run_env, timeout=timeout, mode="behavior", test_module=test_module
)
reports_dir = self.get_reports_dir(build_root, test_module)
result_xml_path = _get_combined_junit_xml(reports_dir, candidate_index)
return result, result_xml_path, None
# Use the runtime JAR's built-in JaCoCo agent via AgentDispatcher
exec_path = module_path / "build" / "jacoco" / "test.exec"
exec_path.parent.mkdir(parents=True, exist_ok=True)
jacoco_agent_arg = f"-javaagent:{runtime_jar}=destfile={exec_path}"
run_env = run_env.copy()
existing_opts = run_env.get("JAVA_TOOL_OPTIONS", "")
run_env["JAVA_TOOL_OPTIONS"] = f"{existing_opts} {jacoco_agent_arg}".strip()
# Run tests WITHOUT enable_coverage (no jacocoTestReport task needed)
result = self.run_tests_via_build_tool(
build_root, test_paths, run_env, timeout=timeout, mode="behavior", test_module=test_module
)
reports_dir = self.get_reports_dir(build_root, test_module)
result_xml_path = _get_combined_junit_xml(reports_dir, candidate_index)
# Convert .exec → .xml via the shaded JaCoCo CLI in the runtime JAR
coverage_xml_path = self._convert_jacoco_exec_to_xml(runtime_jar, exec_path, module_path)
return result, result_xml_path, coverage_xml_path
def _convert_jacoco_exec_to_xml(self, runtime_jar: Path, exec_path: Path, module_path: Path) -> Path | None:
if not exec_path.exists():
logger.warning("JaCoCo exec file not found: %s", exec_path)
return None
xml_path = exec_path.with_suffix(".xml")
# Collect classfiles directories for the report
classfiles_dirs: list[str] = []
for classes_dir in [
module_path / "build" / "classes" / "java" / "main",
module_path / "build" / "classes" / "java" / "test",
]:
if classes_dir.exists():
classfiles_dirs.append(str(classes_dir))
if not classfiles_dirs:
logger.warning("No classfiles directories found under %s/build/classes", module_path)
return None
cmd = [
"java",
"-cp",
str(runtime_jar),
"com.codeflash.shaded.org.jacoco.cli.internal.Main",
"report",
str(exec_path),
]
for d in classfiles_dirs:
cmd.extend(["--classfiles", d])
cmd.extend(["--xml", str(xml_path)])
logger.debug("Converting JaCoCo exec to XML: %s", " ".join(cmd))
try:
conv_result = subprocess.run(cmd, capture_output=True, text=True, timeout=30, check=False)
if conv_result.returncode != 0:
logger.warning(
"JaCoCo exec→XML conversion failed (exit %d): %s", conv_result.returncode, conv_result.stderr
)
return None
except Exception:
logger.exception("JaCoCo exec→XML conversion error")
return None
if xml_path.exists():
logger.info("JaCoCo coverage XML generated: %s", xml_path)
return xml_path
logger.warning("JaCoCo XML not created at %s", xml_path)
return None
def setup_coverage(self, build_root: Path, test_module: str | None, project_root: Path) -> Path | None:
if test_module:
module_root = build_root / module_to_dir(test_module)
else:
module_root = project_root
build_file = find_gradle_build_file(module_root)
if build_file is None:
logger.warning("No build.gradle(.kts) found at %s, cannot setup JaCoCo", module_root)
return None
content = build_file.read_text(encoding="utf-8")
if "jacoco" not in content.lower():
logger.info("Adding JaCoCo plugin to %s for coverage collection", build_file.name)
is_kts = build_file.name.endswith(".kts")
if is_kts:
plugin_line = "plugins {\n jacoco\n}\n"
else:
plugin_line = "apply plugin: 'jacoco'\n"
if "plugins {" in content or "plugins{" in content:
# Insert jacoco inside existing plugins block
plugins_idx = content.find("plugins")
brace_depth = 0
for i in range(plugins_idx, len(content)):
if content[i] == "{":
brace_depth += 1
elif content[i] == "}":
brace_depth -= 1
if brace_depth == 0:
insert = " jacoco\n" if is_kts else " id 'jacoco'\n"
content = content[:i] + insert + content[i:]
break
else:
content = plugin_line + content
build_file.write_text(content, encoding="utf-8")
return module_root / "build" / "reports" / "jacoco" / "test" / "jacocoTestReport.xml"
return module_root / "build" / "jacoco" / "test.xml"
def get_test_run_command(self, project_root: Path, test_classes: list[str] | None = None) -> list[str]:
from codeflash.languages.java.test_runner import _validate_java_class_name
@ -952,7 +1052,7 @@ class GradleStrategy(BuildToolStrategy):
raise ValueError(msg)
gradle = self.find_executable(project_root) or "gradle"
cmd = [gradle, "test", "--no-daemon"]
cmd = [gradle, "test", "--no-daemon", "--configure-on-demand"]
if test_classes:
for cls in test_classes:
cmd.extend(["--tests", cls])

View file

@ -203,6 +203,7 @@ def _generate_sqlite_write_code(
func_name: str,
test_method_name: str,
invocation_id: str = "",
verification_type: str = "function_call",
) -> list[str]:
"""Generate SQLite write code for a single function call.
@ -249,7 +250,7 @@ def _generate_sqlite_write_code(
f'{inner_indent} _cf_pstmt{id_pair}.setString(6, "{inv_id_str}");',
f"{inner_indent} _cf_pstmt{id_pair}.setLong(7, _cf_dur{id_pair});",
f"{inner_indent} _cf_pstmt{id_pair}.setBytes(8, _cf_serializedResult{id_pair});",
f'{inner_indent} _cf_pstmt{id_pair}.setString(9, "function_call");',
f'{inner_indent} _cf_pstmt{id_pair}.setString(9, "{verification_type}");',
f"{inner_indent} _cf_pstmt{id_pair}.executeUpdate();",
f"{inner_indent} }}",
f"{inner_indent} }}",
@ -337,12 +338,41 @@ def wrap_target_calls_with_treesitter(
orig_line = body_lines[line_idx]
line_indent_str = " " * (len(orig_line) - len(orig_line.lstrip()))
is_void = target_return_type == "void"
var_name = f"_cf_result{iter_id}_{call_counter}"
receiver = call.get("receiver", "this")
arg_texts: list[str] = call.get("arg_texts", [])
cast_type = _infer_array_cast_type(orig_line)
if not cast_type and target_return_type and target_return_type != "void":
if not cast_type and target_return_type and not is_void:
cast_type = target_return_type
var_with_cast = f"({cast_type}){var_name}" if cast_type else var_name
if is_void:
bare_call_stmt = f"{call['full_call']};"
# For void methods, serialize the post-call state to capture side effects.
# We always serialize the arguments (which are mutated in place).
# For instance methods, we also include the receiver to capture object state changes.
# For static methods, the receiver is a class name (not a value), so args only.
is_static_call = receiver != "this" and receiver[:1].isupper()
parts: list[str] = []
if not is_static_call:
parts.append(receiver)
parts.extend(arg_texts)
if parts:
serialize_target = f"new Object[]{{{', '.join(parts)}}}"
else:
serialize_target = "new Object[]{}"
if precise_call_timing:
serialize_stmt = f"_cf_serializedResult{iter_id}_{call_counter} = com.codeflash.Serializer.serialize({serialize_target});"
start_stmt = f"_cf_start{iter_id}_{call_counter} = System.nanoTime();"
end_stmt = f"_cf_end{iter_id}_{call_counter} = System.nanoTime();"
else:
serialize_stmt = (
f"_cf_serializedResult{iter_id} = com.codeflash.Serializer.serialize({serialize_target});"
)
start_stmt = f"_cf_start{iter_id} = System.nanoTime();"
end_stmt = f"_cf_end{iter_id} = System.nanoTime();"
else:
capture_stmt_with_decl = f"var {var_name} = {call['full_call']};"
capture_stmt_assign = f"{var_name} = {call['full_call']};"
if precise_call_timing:
@ -350,7 +380,9 @@ def wrap_target_calls_with_treesitter(
start_stmt = f"_cf_start{iter_id}_{call_counter} = System.nanoTime();"
end_stmt = f"_cf_end{iter_id}_{call_counter} = System.nanoTime();"
else:
serialize_stmt = f"_cf_serializedResult{iter_id} = com.codeflash.Serializer.serialize((Object) {var_name});"
serialize_stmt = (
f"_cf_serializedResult{iter_id} = com.codeflash.Serializer.serialize((Object) {var_name});"
)
start_stmt = f"_cf_start{iter_id} = System.nanoTime();"
end_stmt = f"_cf_end{iter_id} = System.nanoTime();"
@ -360,6 +392,13 @@ def wrap_target_calls_with_treesitter(
if precise_call_timing:
# No indent on first line — body_text[:es_start] already has leading whitespace.
# Subsequent lines get line_indent_str.
if is_void:
var_decls = [
f"long _cf_end{iter_id}_{call_counter} = -1;",
f"long _cf_start{iter_id}_{call_counter} = 0;",
f"byte[] _cf_serializedResult{iter_id}_{call_counter} = null;",
]
else:
var_decls = [
f"Object {var_name} = null;",
f"long _cf_end{iter_id}_{call_counter} = -1;",
@ -367,6 +406,15 @@ def wrap_target_calls_with_treesitter(
f"byte[] _cf_serializedResult{iter_id}_{call_counter} = null;",
]
start_marker = f'System.out.println("!$######" + _cf_mod{iter_id} + ":" + _cf_cls{iter_id} + "." + _cf_test{iter_id} + ":" + _cf_fn{iter_id} + ":" + _cf_loop{iter_id} + ":{inv_id}" + "######$!");'
if is_void:
try_block = [
"try {",
f" {start_stmt}",
f" {bare_call_stmt}",
f" {end_stmt}",
f" {serialize_stmt}",
]
else:
try_block = [
"try {",
f" {start_stmt}",
@ -375,16 +423,30 @@ def wrap_target_calls_with_treesitter(
f" {serialize_stmt}",
]
finally_block = _generate_sqlite_write_code(
iter_id, call_counter, "", class_name, func_name, test_method_name, invocation_id=inv_id
iter_id,
call_counter,
"",
class_name,
func_name,
test_method_name,
invocation_id=inv_id,
verification_type="void_state" if is_void else "function_call",
)
all_lines = [*var_decls, start_marker, *try_block, *finally_block]
replacement = (
all_lines[0] + "\n" + "\n".join(f"{line_indent_str}{repl_line}" for repl_line in all_lines[1:])
)
elif is_void:
replacement = f"{bare_call_stmt} {serialize_stmt}"
else:
replacement = f"{capture_stmt_with_decl} {serialize_stmt}"
body_text = body_text[:es_start] + replacement + body_text[es_end:]
else:
if is_void:
# Void calls cannot be embedded in expressions in valid Java — skip instrumentation
logger.warning("Skipping instrumentation of embedded void call: %s", call["full_call"])
continue
# Embedded call: replace call with variable, then insert capture lines before the line
call_start = call["_call_start_char"]
call_end = call["_call_end_char"]
@ -451,6 +513,15 @@ def _collect_calls(
if parent_type == "expression_statement":
es_start = parent.start_byte - prefix_len
es_end = parent.end_byte - prefix_len
object_node = node.child_by_field_name("object")
receiver = analyzer.get_node_text(object_node, wrapper_bytes) if object_node else "this"
# Extract argument texts for void method serialization
args_node = node.child_by_field_name("arguments")
arg_texts: list[str] = []
if args_node:
for child in args_node.children:
if child.type not in ("(", ")", ","):
arg_texts.append(analyzer.get_node_text(child, wrapper_bytes))
out.append(
{
"start_byte": start,
@ -461,6 +532,8 @@ def _collect_calls(
"in_complex": _is_inside_complex_expression(node),
"es_start_byte": es_start,
"es_end_byte": es_end,
"receiver": receiver,
"arg_texts": arg_texts,
}
)
for child in node.children:

View file

@ -13,6 +13,7 @@ from __future__ import annotations
import json
import logging
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, Any
@ -130,9 +131,9 @@ class JavaLineProfiler:
config_output_path.write_text(json.dumps(config, indent=2), encoding="utf-8")
return config_output_path
def build_javaagent_arg(self, config_path: Path) -> str:
def build_javaagent_arg(self, config_path: Path, classpath: str | None = None) -> str:
"""Return the -javaagent JVM argument string."""
agent_jar = find_agent_jar()
agent_jar = find_agent_jar(classpath=classpath)
if agent_jar is None:
msg = f"{AGENT_JAR_NAME} not found in resources or dev build directory"
raise FileNotFoundError(msg)
@ -565,12 +566,20 @@ def find_method_for_line(
return Path(file_path).name, line_num
def find_agent_jar() -> Path | None:
def find_agent_jar(classpath: str | None = None) -> Path | None:
"""Locate the profiler agent JAR file (now bundled in codeflash-runtime).
Checks local Maven repo, package resources, and development build directory.
Checks the resolved classpath (if provided), local Maven repo, package resources,
and development build directory.
"""
# Check local Maven repository first (fastest)
# Check resolved classpath first (Gradle projects resolve here, not ~/.m2)
if classpath:
for entry in classpath.split(os.pathsep):
jar_path = Path(entry)
if "codeflash-runtime" in jar_path.name and jar_path.suffix == ".jar" and jar_path.exists():
return jar_path
# Check local Maven repository (Maven projects resolve here)
m2_jar = (
Path.home()
/ ".m2"

View file

@ -43,6 +43,8 @@ _MAVEN_VALIDATION_SKIP_FLAGS = [
"-Denforcer.skip=true",
"-Djapicmp.skip=true",
"-Derrorprone.skip=true",
"-Dspotless.check.skip=true",
"-Dspotless.apply.skip=true",
"-Dmaven.compiler.failOnWarning=false",
"-Dmaven.compiler.showWarnings=false",
]

View file

@ -189,6 +189,7 @@ class JavaAssertTransformer:
qualified_name: str | None = None,
analyzer: JavaAnalyzer | None = None,
mode: str = "capture",
target_return_type: str = "",
) -> None:
self.analyzer = analyzer or get_java_analyzer()
self.func_name = function_name
@ -196,6 +197,7 @@ class JavaAssertTransformer:
self.invocation_counter = 0
self._detected_framework: str | None = None
self.mode = mode # "capture" (default, instrumentation) or "strip" (clean display)
self.target_return_type = target_return_type
# Precompile the assignment-detection regex to avoid recompiling on each call.
self._assign_re = re.compile(r"(\w+(?:<[^>]+>)?)\s+(\w+)\s*=\s*$")
@ -1062,7 +1064,7 @@ class JavaAssertTransformer:
if not assertion.target_calls:
return ""
if self.mode == "strip":
if self.mode == "strip" or self.target_return_type == "void":
return self._generate_strip_replacement(assertion)
# Infer the return type from assertion context to avoid Object→primitive cast errors
@ -1244,7 +1246,9 @@ class JavaAssertTransformer:
return "".join(cur).rstrip()
def transform_java_assertions(source: str, function_name: str, qualified_name: str | None = None) -> str:
def transform_java_assertions(
source: str, function_name: str, qualified_name: str | None = None, target_return_type: str = ""
) -> str:
"""Transform Java test code by removing assertions and capturing function calls.
This is the main entry point for Java assertion transformation.
@ -1253,12 +1257,15 @@ def transform_java_assertions(source: str, function_name: str, qualified_name: s
source: The Java test source code.
function_name: Name of the function being tested.
qualified_name: Optional fully qualified name of the function.
target_return_type: Return type of the target function (e.g., "void", "int").
Returns:
Transformed source code with assertions replaced by capture statements.
"""
transformer = JavaAssertTransformer(function_name=function_name, qualified_name=qualified_name)
transformer = JavaAssertTransformer(
function_name=function_name, qualified_name=qualified_name, target_return_type=target_return_type
)
return transformer.transform(source)

View file

@ -590,7 +590,7 @@ class JavaSupport(LanguageSupport):
)
def instrument_source_for_line_profiler(
self, func_info: FunctionToOptimize, line_profiler_output_file: Path
self, func_info: FunctionToOptimize, line_profiler_output_file: Path, project_classpath: str | None = None
) -> bool:
"""Prepare line profiling via the bytecode-instrumentation agent.
@ -602,6 +602,7 @@ class JavaSupport(LanguageSupport):
Args:
func_info: Function to profile.
line_profiler_output_file: Path where profiling results will be written by the agent.
project_classpath: Resolved classpath from the build tool, used to locate the agent JAR.
Returns:
True if preparation succeeded, False otherwise.
@ -619,7 +620,7 @@ class JavaSupport(LanguageSupport):
source=source, file_path=func_info.file_path, functions=[func_info], config_output_path=config_path
)
self.line_profiler_agent_arg = profiler.build_javaagent_arg(config_path)
self.line_profiler_agent_arg = profiler.build_javaagent_arg(config_path, classpath=project_classpath)
self.line_profiler_warmup_iterations = profiler.warmup_iterations
return True
except Exception:

Some files were not shown because too many files have changed in this diff Show more