fix: resolve all ruff lint errors across repo (#38)

* fix: resolve all ruff lint errors across repo

Auto-fixed 31 errors (unused imports, formatting, simplifications).
Manually fixed 14 remaining:
- EXE001: removed shebangs from non-executable bench scripts
- C417: replaced map(lambda) with generator expression
- C901/PLR0915: extracted _write_and_instrument_tests from generate_ai_tests
- C901/PLR0912: extracted _parse_toml_addopts and _ini_section_name from modify_addopts
- RUF001/RUF002: replaced ambiguous Unicode chars (en dash, multiplication sign)
- FBT002: made boolean params keyword-only in report functions
- E402: moved `import re` to top of file in security reports

* fix: resolve pre-existing mypy errors across packages

- _testgen.py: annotate `generated` as `str` to avoid no-any-return
- _test_runner.py: use str() for TimeoutExpired stdout/stderr (bytes|str),
  remove unused type: ignore on proc.kill()
- _candidate_eval.py: annotate `speedup` as `float` to avoid no-any-return
  from lazy-loaded performance_gain
This commit is contained in:
Kevin Turcios 2026-04-23 10:22:42 -05:00 committed by GitHub
parent c249bcd0ce
commit 3ee9c22c8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 1706 additions and 1078 deletions

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
"""Benchmark for split_multichar optimization (Target 6)"""
import sys
@ -12,7 +11,7 @@ def split_multichar_original(ss, chars):
if len(chars) == 0:
return ss
c = chars.pop()
ss = reduce(lambda x, y: x + y, map(lambda x: x.split(c), ss))
ss = reduce(lambda x, y: x + y, (x.split(c) for x in ss))
return split_multichar_original(ss, chars)
@ -70,7 +69,7 @@ def main():
# Verify results match
if orig_result != opt_result:
print(f" ERROR: Results don't match!")
print(" ERROR: Results don't match!")
print(f" Original: {orig_result}")
print(f" Optimized: {opt_result}")
sys.exit(1)

View file

@ -1,8 +1,8 @@
#!/usr/bin/env python
"""Benchmark for _vals_equal optimization (Target 5)"""
import sys
import time
import numpy as np
@ -13,19 +13,18 @@ def vals_equal_original(v1, v2):
isinstance(v1, np.ndarray) or isinstance(v2, np.ndarray)
):
return np.array_equal(v1, v2)
elif isinstance(v1, (list, tuple)):
if isinstance(v1, (list, tuple)):
return (
isinstance(v2, (list, tuple))
and len(v1) == len(v2)
and all(vals_equal_original(e1, e2) for e1, e2 in zip(v1, v2))
)
elif isinstance(v1, dict):
if isinstance(v1, dict):
return (
isinstance(v2, dict)
and set(v1.keys()) == set(v2.keys())
and all(vals_equal_original(v1[k], v2[k]) for k in v1)
)
else:
return v1 == v2
@ -43,19 +42,18 @@ def vals_equal_optimized(v1, v2):
return False
# Now do element-wise comparison (np.array_equal handles dtype conversion)
return np.array_equal(v1, v2)
elif isinstance(v1, (list, tuple)):
if isinstance(v1, (list, tuple)):
return (
isinstance(v2, (list, tuple))
and len(v1) == len(v2)
and all(vals_equal_optimized(e1, e2) for e1, e2 in zip(v1, v2))
)
elif isinstance(v1, dict):
if isinstance(v1, dict):
return (
isinstance(v2, dict)
and set(v1.keys()) == set(v2.keys())
and all(vals_equal_optimized(v1[k], v2[k]) for k in v1)
)
else:
return v1 == v2
@ -77,29 +75,28 @@ def main():
# Test cases
test_cases = [
("Equal arrays (same data)",
(
"Equal arrays (same data)",
np.array([1, 2, 3, 4, 5] * 100),
np.array([1, 2, 3, 4, 5] * 100)),
("Different shapes (early exit)",
np.array([1, 2, 3, 4, 5] * 100),
np.array([1, 2, 3, 4] * 100)),
("Different dtypes (but same values)",
),
(
"Different shapes (early exit)",
np.array([1, 2, 3, 4, 5] * 100),
np.array([1, 2, 3, 4] * 100),
),
(
"Different dtypes (but same values)",
np.array([1, 2, 3, 4, 5] * 100, dtype=np.int32),
np.array([1, 2, 3, 4, 5] * 100, dtype=np.float64)),
("Different values (late difference)",
np.array([1, 2, 3, 4, 5] * 100, dtype=np.float64),
),
(
"Different values (late difference)",
np.array([1, 2, 3, 4, 5] * 100),
np.array([1, 2, 3, 4, 6] * 100)),
("Large equal arrays",
np.arange(10000),
np.arange(10000)),
("Array vs scalar (type mismatch)",
np.array([1, 2, 3]),
[1, 2, 3]),
np.array([1, 2, 3, 4, 6] * 100),
),
("Large equal arrays", np.arange(10000), np.arange(10000)),
("Array vs scalar (type mismatch)", np.array([1, 2, 3]), [1, 2, 3]),
]
for name, v1, v2 in test_cases:
@ -113,13 +110,13 @@ def main():
# Verify results match
if orig_result != opt_result:
print(f" ERROR: Results don't match!")
print(" ERROR: Results don't match!")
print(f" Original: {orig_result}")
print(f" Optimized: {opt_result}")
sys.exit(1)
# Report results
speedup = orig_time / opt_time if opt_time > 0 else float('inf')
speedup = orig_time / opt_time if opt_time > 0 else float("inf")
print(f" Result: {orig_result}")
print(f" Original: {orig_time * 1e6:.2f} µs")
print(f" Optimized: {opt_time * 1e6:.2f} µs")

View file

@ -11,9 +11,9 @@ from codeflash_api.repair._context import (
is_valid_repair,
)
from codeflash_api.repair.schemas import (
CodeRepairRequest,
BehaviorDiff,
BehaviorDiffScope,
CodeRepairRequest,
)
# -------------------------------------------------------------------

View file

@ -57,7 +57,9 @@ class PytestCollectionPlugin:
global pytest_rootdir, collected_tests
collected_tests.extend(session.items)
pytest_rootdir = getattr(session.config, "rootdir", None) or getattr(session.config, "rootpath", None)
pytest_rootdir = getattr(session.config, "rootdir", None) or getattr(
session.config, "rootpath", None
)
# Write results immediately since pytest.main() will exit after
# this callback, not always with a success code.
@ -87,7 +89,8 @@ if __name__ == "__main__":
tests_root,
"-p",
"no:logging",
"-o", "addopts=",
"-o",
"addopts=",
"--collect-only",
"-m",
"not skip",

View file

@ -168,7 +168,7 @@ async def run_tests_and_benchmark( # noqa: PLR0913
optimized_runtime,
)
speedup = performance_gain(
speedup: float = performance_gain(
original_runtime_ns=baseline.runtime,
optimized_runtime_ns=optimized_runtime,
)

View file

@ -362,9 +362,7 @@ def generate_ai_tests( # noqa: PLR0913
AIServiceError,
)
from ..test_discovery.models import TestType # noqa: PLC0415
from ..testing._testgen import generate_tests # noqa: PLC0415
from ..testing.models import TestFile # noqa: PLC0415
n_tests = 2 # matches original effort default
testgen_source = code_context.testgen_context.markdown
@ -444,10 +442,7 @@ def generate_ai_tests( # noqa: PLR0913
pending: list[PendingTest] = []
with ThreadPoolExecutor(max_workers=n_tests) as pool:
futures = {
pool.submit(_generate_one, i): i
for i in range(n_tests)
}
futures = {pool.submit(_generate_one, i): i for i in range(n_tests)}
for future in as_completed(futures):
p = future.result()
if p is not None:
@ -467,19 +462,24 @@ def generate_ai_tests( # noqa: PLR0913
fn_input=fn_input,
)
# Phase 4: write files, instrument client-side, create TestFile objects.
return _write_and_instrument_tests(pending, func, tests_rootdir)
def _write_and_instrument_tests(
pending: list[PendingTest],
func: FunctionToOptimize,
tests_rootdir: Path,
) -> list[TestFile]:
"""Write generated tests to disk and instrument for behavior/perf capture."""
from .._model import TestingMode # noqa: PLC0415
from ..test_discovery.models import TestType # noqa: PLC0415
from ..testing._instrumentation import ( # noqa: PLC0415
inject_profiling_into_existing_test,
)
from ..testing.models import TestFile # noqa: PLC0415
test_file_objects: list[TestFile] = []
for (
_idx,
generated_source,
test_path,
test_perf_path,
) in pending:
for _idx, generated_source, test_path, test_perf_path in pending:
test_path.write_text(generated_source, encoding="utf-8")
ok_beh, beh_src = inject_profiling_into_existing_test(
@ -497,10 +497,10 @@ def generate_ai_tests( # noqa: PLR0913
mode=TestingMode.PERFORMANCE,
)
beh_path: _Path | None = test_path.parent / (
beh_path: Path | None = test_path.parent / (
test_path.stem + "__perfinstrumented" + test_path.suffix
)
perf_path: _Path | None = test_perf_path
perf_path: Path | None = test_perf_path
if ok_beh and beh_src is not None:
beh_path.write_text(beh_src, encoding="utf-8") # type: ignore[union-attr]
@ -694,9 +694,7 @@ def build_test_env(
module_root = str(_Path(test_cfg.module_root))
existing = env.get("PYTHONPATH", "")
env["PYTHONPATH"] = (
f"{module_root}{os.pathsep}{existing}"
if existing
else module_root
f"{module_root}{os.pathsep}{existing}" if existing else module_root
)
return env

View file

@ -93,7 +93,9 @@ def discover_unit_tests(
for func in funcs
]
return discover_tests_pytest(
cfg, discover_only_these_tests, functions_to_optimize,
cfg,
discover_only_these_tests,
functions_to_optimize,
)
@ -229,5 +231,3 @@ def discover_tests_pytest( # noqa: C901, PLR0912, PLR0915
functions_to_optimize,
)
return _count_results(function_to_tests)

View file

@ -62,7 +62,6 @@ def module_name_from_file_path(
return relative_path.with_suffix("").as_posix().replace("/", ".")
def add_test_entries( # noqa: PLR0913
function_to_test_map: dict[str, set[FunctionCalledInTest]],
qualified_name: str,

View file

@ -85,85 +85,75 @@ def filter_args(addopts_args: list[str]) -> list[str]:
return filtered_args
def _parse_toml_addopts(
content: str,
) -> tuple[list[str], tomlkit.TOMLDocument, bool] | None:
"""Parse addopts from pyproject.toml, return (args, doc, uses_ini_options)."""
data = tomlkit.parse(content)
pytest_section = data.get("tool", {}).get("pytest", {})
original_addopts = pytest_section.get("ini_options", {}).get(
"addopts", ""
) or pytest_section.get("addopts", "")
if original_addopts == "":
return None
uses_ini_options = (
"ini_options" in pytest_section
and "addopts" in pytest_section.get("ini_options", {})
)
if isinstance(original_addopts, list):
original_addopts = " ".join(original_addopts)
addopts_args = original_addopts.replace("=", " ").split()
return addopts_args, data, uses_ini_options
def _ini_section_name(filename: str) -> str:
"""Return the config section name that holds pytest addopts."""
if filename in {"pytest.ini", ".pytest.ini", "tox.ini"}:
return "pytest"
return "tool:pytest"
def modify_addopts(
config_file: Path,
) -> tuple[str, bool]:
"""Modify addopts in *config_file*, return (original_content, was_modified)."""
file_type = config_file.suffix.lower()
filename = config_file.name
config = None
if file_type not in {".toml", ".ini", ".cfg"} or not config_file.exists():
return "", False
with config_file.open(encoding="utf-8") as f:
content = f.read()
try:
if filename == "pyproject.toml":
data = tomlkit.parse(content)
pytest_section = data.get("tool", {}).get("pytest", {})
original_addopts = (
pytest_section.get("ini_options", {}).get("addopts", "")
or pytest_section.get("addopts", "")
)
if original_addopts == "":
parsed = _parse_toml_addopts(content)
if parsed is None:
return content, False
uses_ini_options = "ini_options" in pytest_section and "addopts" in pytest_section.get("ini_options", {})
if isinstance(original_addopts, list):
original_addopts = " ".join(original_addopts)
original_addopts = original_addopts.replace("=", " ")
addopts_args = original_addopts.split()
addopts_args, data, uses_ini_options = parsed
else:
config = configparser.ConfigParser()
config.read_string(content)
section = _ini_section_name(filename)
cfg_data: dict[str, dict[str, str]] = {
section: dict(config[section]) for section in config.sections()
s: dict(config[s]) for s in config.sections()
}
if filename in {
"pytest.ini",
".pytest.ini",
"tox.ini",
}:
original_addopts = cfg_data.get(
"pytest",
{},
).get("addopts", "")
else:
original_addopts = cfg_data.get(
"tool:pytest",
{},
).get("addopts", "")
original_addopts = original_addopts.replace("=", " ")
addopts_args = original_addopts.split()
original = cfg_data.get(section, {}).get("addopts", "")
addopts_args = original.replace("=", " ").split()
new_addopts_args = filter_args(addopts_args)
if new_addopts_args == addopts_args:
return content, False
new_value = " ".join(new_addopts_args)
if file_type == ".toml":
if uses_ini_options:
data["tool"]["pytest"]["ini_options"]["addopts"] = ( # type: ignore[index]
" ".join(new_addopts_args)
)
data["tool"]["pytest"]["ini_options"]["addopts"] = new_value # type: ignore[index]
else:
data["tool"]["pytest"]["addopts"] = ( # type: ignore[index]
" ".join(new_addopts_args)
)
data["tool"]["pytest"]["addopts"] = new_value # type: ignore[index]
with config_file.open("w", encoding="utf-8") as f:
f.write(tomlkit.dumps(data))
return content, True
if filename in {"pytest.ini", ".pytest.ini", "tox.ini"}:
config.set( # type: ignore[union-attr]
"pytest",
"addopts",
" ".join(new_addopts_args),
)
section = _ini_section_name(filename)
config.set(section, "addopts", new_value)
with config_file.open("w", encoding="utf-8") as f:
config.write(f) # type: ignore[union-attr]
return content, True
config.set( # type: ignore[union-attr]
"tool:pytest",
"addopts",
" ".join(new_addopts_args),
)
with config_file.open("w", encoding="utf-8") as f:
config.write(f) # type: ignore[union-attr]
config.write(f)
return content, True
except Exception: # noqa: BLE001

View file

@ -71,8 +71,8 @@ def execute_test_subprocess(
return subprocess.CompletedProcess(
args=cmd_list,
returncode=-1,
stdout=exc.stdout or "",
stderr=exc.stderr or "",
stdout=str(exc.stdout) if exc.stdout else "",
stderr=str(exc.stderr) if exc.stderr else "",
)
@ -383,7 +383,7 @@ async def async_execute_test_subprocess(
timeout,
" ".join(cmd_list),
)
proc.kill() # type: ignore[union-attr]
proc.kill()
return subprocess.CompletedProcess(
args=cmd_list,
returncode=-1,

View file

@ -204,7 +204,7 @@ def repair_generated_tests(
data = client.post("/testgen_repair", payload)
except (AIServiceError, AIServiceConnectionError):
return None
generated = data.get("generated_tests", "")
generated: str = data.get("generated_tests", "")
if not generated:
return None
return generated
@ -272,9 +272,7 @@ def merge_unit_tests(
)
for node in ast.iter_child_nodes(modified_ast):
if isinstance(node, ast.FunctionDef) and node.name.startswith(
"test_"
):
if isinstance(node, ast.FunctionDef) and node.name.startswith("test_"):
node.name = node.name + "__inspired"
unit_test_source_ast.body.extend(modified_ast.body)

View file

@ -95,7 +95,9 @@ def parse_test_xml( # noqa: C901, PLR0912, PLR0915
)
log.debug(
"XML parse: suite_file=%r, classname=%r, testcase=%r, tc_attribs=%s",
test_file_name, class_name, testcase.name,
test_file_name,
class_name,
testcase.name,
dict(testcase._elem.attrib), # noqa: SLF001
)
@ -194,8 +196,14 @@ def parse_test_xml( # noqa: C901, PLR0912, PLR0915
"Known instrumented: %s, known original: %s",
test_file_path,
class_name,
[str(tf.instrumented_behavior_file_path) for tf in test_files.test_files],
[str(tf.original_file_path) for tf in test_files.test_files],
[
str(tf.instrumented_behavior_file_path)
for tf in test_files.test_files
],
[
str(tf.original_file_path)
for tf in test_files.test_files
],
)
log.warning(
"Test type not found for %s, skipping.",

View file

@ -494,8 +494,12 @@ class TestEstablishOriginalCodeBaseline:
return test_files, test_config, test_env
@patch("codeflash_python.testing._parse_results.parse_test_results")
@patch("codeflash_python.testing._test_runner.async_run_line_profile_tests")
@patch("codeflash_python.testing._test_runner.async_run_benchmarking_tests")
@patch(
"codeflash_python.testing._test_runner.async_run_line_profile_tests"
)
@patch(
"codeflash_python.testing._test_runner.async_run_benchmarking_tests"
)
@patch("codeflash_python.testing._test_runner.async_run_behavioral_tests")
async def test_successful_baseline(
self,
@ -551,8 +555,12 @@ class TestEstablishOriginalCodeBaseline:
assert result.runtime > 0
@patch("codeflash_python.testing._parse_results.parse_test_results")
@patch("codeflash_python.testing._test_runner.async_run_line_profile_tests")
@patch("codeflash_python.testing._test_runner.async_run_benchmarking_tests")
@patch(
"codeflash_python.testing._test_runner.async_run_line_profile_tests"
)
@patch(
"codeflash_python.testing._test_runner.async_run_benchmarking_tests"
)
@patch("codeflash_python.testing._test_runner.async_run_behavioral_tests")
async def test_empty_behavioral_returns_none(
self,
@ -585,8 +593,12 @@ class TestEstablishOriginalCodeBaseline:
assert result is None
@patch("codeflash_python.testing._parse_results.parse_test_results")
@patch("codeflash_python.testing._test_runner.async_run_line_profile_tests")
@patch("codeflash_python.testing._test_runner.async_run_benchmarking_tests")
@patch(
"codeflash_python.testing._test_runner.async_run_line_profile_tests"
)
@patch(
"codeflash_python.testing._test_runner.async_run_benchmarking_tests"
)
@patch("codeflash_python.testing._test_runner.async_run_behavioral_tests")
async def test_zero_benchmark_runtime_returns_none(
self,
@ -641,8 +653,12 @@ class TestEstablishOriginalCodeBaseline:
assert result is None
@patch("codeflash_python.testing._parse_results.parse_test_results")
@patch("codeflash_python.testing._test_runner.async_run_line_profile_tests")
@patch("codeflash_python.testing._test_runner.async_run_benchmarking_tests")
@patch(
"codeflash_python.testing._test_runner.async_run_line_profile_tests"
)
@patch(
"codeflash_python.testing._test_runner.async_run_benchmarking_tests"
)
@patch("codeflash_python.testing._test_runner.async_run_behavioral_tests")
async def test_precomputed_behavioral_skips_behavioral_run(
self,
@ -689,8 +705,12 @@ class TestEstablishOriginalCodeBaseline:
assert precomputed is result.behavior_test_results
@patch("codeflash_python.testing._parse_results.parse_test_results")
@patch("codeflash_python.testing._test_runner.async_run_line_profile_tests")
@patch("codeflash_python.testing._test_runner.async_run_benchmarking_tests")
@patch(
"codeflash_python.testing._test_runner.async_run_line_profile_tests"
)
@patch(
"codeflash_python.testing._test_runner.async_run_benchmarking_tests"
)
@patch("codeflash_python.testing._test_runner.async_run_behavioral_tests")
async def test_failed_regression_in_functions_to_remove(
self,

View file

@ -33,9 +33,7 @@ def _make_project(tmp_path: Path) -> tuple[Path, Path]:
class TestCreateProjectOverlay:
"""create_project_overlay directory structure."""
def test_overlay_contains_candidate_code(
self, tmp_path: Path
) -> None:
def test_overlay_contains_candidate_code(self, tmp_path: Path) -> None:
"""The target module file has the candidate code."""
root, mod = _make_project(tmp_path)
overlay = create_project_overlay(mod, root, "def fast(): ...")
@ -61,9 +59,7 @@ class TestCreateProjectOverlay:
finally:
cleanup_overlay(overlay)
def test_project_root_siblings_are_symlinked(
self, tmp_path: Path
) -> None:
def test_project_root_siblings_are_symlinked(self, tmp_path: Path) -> None:
"""Files at project root level are symlinked."""
root, mod = _make_project(tmp_path)
overlay = create_project_overlay(mod, root, "code")
@ -95,9 +91,7 @@ class TestCreateProjectOverlay:
root, mod = _make_project(tmp_path)
overlay = create_project_overlay(mod, root, "def fast(): ...")
try:
assert not (
overlay / "src" / "mypkg" / "core.py"
).is_symlink()
assert not (overlay / "src" / "mypkg" / "core.py").is_symlink()
finally:
cleanup_overlay(overlay)
@ -145,9 +139,7 @@ class TestCreateProjectOverlay:
import sys
root, mod = _make_project(tmp_path)
overlay = create_project_overlay(
mod, root, "VALUE = 42\n"
)
overlay = create_project_overlay(mod, root, "VALUE = 42\n")
try:
result = subprocess.run(
[

View file

@ -156,16 +156,29 @@ class TestNoGenTests:
fn_input.function.is_async = False
with (
patch("codeflash_python.pipeline._test_orchestrator.generate_ai_tests") as mock_gen,
patch("codeflash_python.pipeline._test_orchestrator.instrument_tests_for_function", return_value=None),
patch("codeflash_python.pipeline._test_orchestrator.generate_concolic_tests", return_value=({}, "", None)),
patch(
"codeflash_python.pipeline._test_orchestrator.generate_ai_tests"
) as mock_gen,
patch(
"codeflash_python.pipeline._test_orchestrator.instrument_tests_for_function",
return_value=None,
),
patch(
"codeflash_python.pipeline._test_orchestrator.generate_concolic_tests",
return_value=({}, "", None),
),
patch(
"codeflash_python.context.pipeline.get_code_optimization_context",
return_value=MagicMock(),
),
patch("codeflash_python.pipeline._module_prep.resolve_python_function_ast", return_value=None),
patch(
"codeflash_python.pipeline._module_prep.resolve_python_function_ast",
return_value=None,
),
patch(f"{_mod}.is_numerical_code", return_value=False),
patch("codeflash_python.verification._baseline.establish_original_code_baseline"),
patch(
"codeflash_python.verification._baseline.establish_original_code_baseline"
),
):
ctx = OptimizationContext(
plugin=MagicMock(),

View file

@ -235,7 +235,8 @@ def test_trace_ranking_keeps_addressable_time_primary_over_test_count(
return addressable_times[function.function_name]
with patch(
"codeflash_python.analysis._function_ranking.FunctionRanker", FakeRanker
"codeflash_python.analysis._function_ranking.FunctionRanker",
FakeRanker,
):
ranked = rank_functions_globally(
{project_root / "mod.py": funcs},
@ -288,7 +289,8 @@ def test_trace_ranking_uses_test_count_as_tiebreaker(
return addressable_times[function.function_name]
with patch(
"codeflash_python.analysis._function_ranking.FunctionRanker", FakeRanker
"codeflash_python.analysis._function_ranking.FunctionRanker",
FakeRanker,
):
ranked = rank_functions_globally(
{project_root / "mod.py": funcs},

View file

@ -239,6 +239,7 @@ class TestModifyInspiredTests:
assert 2 == len(import_list)
assert all(isinstance(n, ast.ImportFrom) for n in import_list)
class TestMergeUnitTests:
"""merge_unit_tests test merging."""

View file

@ -17,9 +17,9 @@ from codeflash_python.verification._verification import (
performance_gain,
)
from codeflash_python.verification.models import (
OptimizedCandidateResult,
BehaviorDiff,
BehaviorDiffScope,
OptimizedCandidateResult,
)

View file

@ -5,13 +5,13 @@ from __future__ import annotations
import httpx
import jwt as pyjwt
import respx
from helpers import WEBHOOK_SECRET
from github_app.auth import (
generate_jwt,
get_installation_token,
verify_signature,
)
from helpers import WEBHOOK_SECRET
def test_generate_jwt_structure(mock_config):

View file

@ -7,9 +7,9 @@ from pathlib import Path
from unittest.mock import patch
import pytest
from helpers import FAKE_RSA_PEM
from github_app.config import Config, default_plugin_dir, load_private_key
from helpers import FAKE_RSA_PEM
def test_load_private_key_from_env():

View file

@ -10,7 +10,15 @@ import os
from pathlib import Path
import plotly.graph_objects as go
from dash import Dash, Input, Output, clientside_callback, dash_table, dcc, html
from dash import (
Dash,
Input,
Output,
clientside_callback,
dash_table,
dcc,
html,
)
from theme import (
ACCENT,
AMBER,
@ -25,8 +33,6 @@ from theme import (
GREEN,
GRID_OVERLAY,
LIGHT_GRAY,
LIGHT_GREEN,
LIGHT_RED,
MONO,
PURPLE,
RED,
@ -204,7 +210,11 @@ def make_fork_chart():
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
font={"family": FONT, "size": 13, "color": SLATE},
xaxis={"title": "Workflow Runs (audit period)", "gridcolor": CARD_BORDER, "zeroline": False},
xaxis={
"title": "Workflow Runs (audit period)",
"gridcolor": CARD_BORDER,
"zeroline": False,
},
yaxis={"title": "", "automargin": True},
margin={"t": 10, "b": 50, "l": 10, "r": 60},
height=320,
@ -222,7 +232,9 @@ def make_fork_cost_chart():
labels.append("All others")
values.append(other)
colors = [ACCENT, BLUE, GREEN, PURPLE, AMBER, GRAY, LIGHT_GRAY, RED][:len(labels)]
colors = [ACCENT, BLUE, GREEN, PURPLE, AMBER, GRAY, LIGHT_GRAY, RED][
: len(labels)
]
fig = go.Figure()
fig.add_trace(
@ -259,8 +271,16 @@ def make_fork_cost_chart():
def make_before_after_chart():
"""Grouped bar: operational metrics before vs after."""
cats = ["Workflow Files", "Required Checks", "Failing Fork Runs/mo"]
before = [OPS["workflow_files"][0], OPS["required_checks"][0], OPS["fork_failing_runs_monthly"][0]]
after = [OPS["workflow_files"][1], OPS["required_checks"][1], OPS["fork_failing_runs_monthly"][1]]
before = [
OPS["workflow_files"][0],
OPS["required_checks"][0],
OPS["fork_failing_runs_monthly"][0],
]
after = [
OPS["workflow_files"][1],
OPS["required_checks"][1],
OPS["fork_failing_runs_monthly"][1],
]
fig = go.Figure()
fig.add_trace(
@ -312,7 +332,13 @@ def make_before_after_chart():
def make_run_volume_chart():
"""Bar chart: monthly workflow runs before vs after."""
months = ["Dec '25", "Jan '26", "Feb '26", "Mar '26", "Apr '26\n(projected)"]
months = [
"Dec '25",
"Jan '26",
"Feb '26",
"Mar '26",
"Apr '26\n(projected)",
]
runs = [4150, 9391, 21307, 14753, RUN_VOL["codeflash_apr_projected"]]
fig = go.Figure()
@ -348,7 +374,11 @@ def make_run_volume_chart():
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
font={"family": FONT, "size": 13, "color": SLATE},
yaxis={"title": "Workflow Runs", "gridcolor": CARD_BORDER, "zeroline": False},
yaxis={
"title": "Workflow Runs",
"gridcolor": CARD_BORDER,
"zeroline": False,
},
xaxis={"title": ""},
margin={"t": 20, "b": 60, "l": 70, "r": 20},
height=360,
@ -360,7 +390,10 @@ def make_run_volume_chart():
def make_billing_chart():
"""Stacked bar: Enterprise minutes allotment vs overage."""
cats = ["Before (Feb)", "After (Apr)"]
included = [BILLING["enterprise_included_min"], BILLING["enterprise_included_min"]]
included = [
BILLING["enterprise_included_min"],
BILLING["enterprise_included_min"],
]
overage = [BILLING["overage_before_min"], BILLING["overage_after_min"]]
fig = go.Figure()
@ -390,7 +423,11 @@ def make_billing_chart():
plot_bgcolor="rgba(0,0,0,0)",
paper_bgcolor="rgba(0,0,0,0)",
font={"family": FONT, "size": 13, "color": SLATE},
yaxis={"title": "Billed Minutes/month", "gridcolor": CARD_BORDER, "zeroline": False},
yaxis={
"title": "Billed Minutes/month",
"gridcolor": CARD_BORDER,
"zeroline": False,
},
xaxis={"title": ""},
margin={"t": 40, "b": 60, "l": 70, "r": 20},
legend={
@ -415,114 +452,288 @@ def _build_summary_tab():
children=[
# Hero metrics
html.Div(
style={"display": "flex", "gap": "16px", "marginTop": "32px", "flexWrap": "wrap"},
style={
"display": "flex",
"gap": "16px",
"marginTop": "32px",
"flexWrap": "wrap",
},
children=[
hero_metric(f"~${BILLING['overage_saved_annual_usd']:,}/yr", "Overage Savings", "Enterprise minutes overage reduced 58%", GREEN),
hero_metric(f"{RUN_VOL['codeflash_reduction_pct']}%", "Fewer Runs", f"{RUN_VOL['codeflash_feb']:,}{RUN_VOL['codeflash_apr_projected']:,}/mo", ACCENT),
hero_metric("200+", "Forks Disabled", "GitHub Actions turned off org-wide", BLUE),
hero_metric("22 → 7", "Workflows Consolidated", "Single ci.yaml with gate job", PURPLE),
hero_metric(
f"~${BILLING['overage_saved_annual_usd']:,}/yr",
"Overage Savings",
"Enterprise minutes overage reduced 58%",
GREEN,
),
hero_metric(
f"{RUN_VOL['codeflash_reduction_pct']}%",
"Fewer Runs",
f"{RUN_VOL['codeflash_feb']:,}{RUN_VOL['codeflash_apr_projected']:,}/mo",
ACCENT,
),
hero_metric(
"200+",
"Forks Disabled",
"GitHub Actions turned off org-wide",
BLUE,
),
hero_metric(
"22 → 7",
"Workflows Consolidated",
"Single ci.yaml with gate job",
PURPLE,
),
],
),
section(
"What We Found",
"Full CI/CD audit of the codeflash-ai GitHub org: 200+ forks and 2 main repos.",
),
# Key findings grid
html.Div(
style={"display": "grid", "gridTemplateColumns": "1fr 1fr", "gap": "16px"},
style={
"display": "grid",
"gridTemplateColumns": "1fr 1fr",
"gap": "16px",
},
children=[
card([
html.Div("Fork CI Waste", style={"fontWeight": "700", "color": ACCENT, "fontSize": "16px", "marginBottom": "12px"}),
card(
[
html.Div(
"Fork CI Waste",
style={
"fontWeight": "700",
"color": ACCENT,
"fontSize": "16px",
"marginBottom": "12px",
},
),
html.P(
"26 of 200+ forks were running GitHub Actions — Dependabot updates, upstream scheduled CI, "
"and failing workflows creating ~960 noise runs/month. kornia alone was 91% of fork CI cost "
"due to a daily macOS + Windows test matrix.",
style={"color": GRAY, "fontSize": "14px", "lineHeight": "1.6", "margin": "0"},
style={
"color": GRAY,
"fontSize": "14px",
"lineHeight": "1.6",
"margin": "0",
},
),
]
),
card(
[
html.Div(
"Wildcard Path Triggers",
style={
"fontWeight": "700",
"color": RED,
"fontSize": "16px",
"marginBottom": "12px",
},
),
]),
card([
html.Div("Wildcard Path Triggers", style={"fontWeight": "700", "color": RED, "fontSize": "16px", "marginBottom": "12px"}),
html.P(
"All 12 E2E workflows used paths: ['**'] — any file change (README, docs) "
"triggered the full E2E suite. A single docs-only PR burned ~2 hours of compute.",
style={"color": GRAY, "fontSize": "14px", "lineHeight": "1.6", "margin": "0"},
style={
"color": GRAY,
"fontSize": "14px",
"lineHeight": "1.6",
"margin": "0",
},
),
]
),
card(
[
html.Div(
"Ghost Workflows",
style={
"fontWeight": "700",
"color": AMBER,
"fontSize": "16px",
"marginBottom": "12px",
},
),
]),
card([
html.Div("Ghost Workflows", style={"fontWeight": "700", "color": AMBER, "fontSize": "16px", "marginBottom": "12px"}),
html.P(
"13 workflow files had been deleted from the repo but their entries remained active in "
"GitHub Actions. These cluttered the Actions UI and created confusing status signals.",
style={"color": GRAY, "fontSize": "14px", "lineHeight": "1.6", "margin": "0"},
style={
"color": GRAY,
"fontSize": "14px",
"lineHeight": "1.6",
"margin": "0",
},
),
]
),
card(
[
html.Div(
"Broken claude-code-action",
style={
"fontWeight": "700",
"color": RED,
"fontSize": "16px",
"marginBottom": "12px",
},
),
]),
card([
html.Div("Broken claude-code-action", style={"fontWeight": "700", "color": RED, "fontSize": "16px", "marginBottom": "12px"}),
html.P(
"v1.0.90 broke Bedrock OIDC auth. Every Claude Code run was failing with 403s. "
"60-100% failure rate on codeflash, 85% on codeflash-internal.",
style={"color": GRAY, "fontSize": "14px", "lineHeight": "1.6", "margin": "0"},
style={
"color": GRAY,
"fontSize": "14px",
"lineHeight": "1.6",
"margin": "0",
},
),
]
),
]),
],
),
section(
"Run Volume & Cost Impact",
"Workflow runs dropped 71%. Enterprise minutes overage cut from ~215K to ~90K/month.",
),
# Run volume + billing side by side
html.Div(
style={"display": "grid", "gridTemplateColumns": "1fr 1fr", "gap": "16px"},
style={
"display": "grid",
"gridTemplateColumns": "1fr 1fr",
"gap": "16px",
},
children=[
card([
html.Div("Monthly Workflow Runs (codeflash)", style={"fontWeight": "700", "color": SLATE, "fontSize": "15px", "marginBottom": "12px"}),
dcc.Graph(figure=make_run_volume_chart(), config={"displayModeBar": False}),
]),
card([
html.Div("Enterprise Minutes Billing", style={"fontWeight": "700", "color": SLATE, "fontSize": "15px", "marginBottom": "12px"}),
card(
[
html.Div(
"Monthly Workflow Runs (codeflash)",
style={
"fontWeight": "700",
"color": SLATE,
"fontSize": "15px",
"marginBottom": "12px",
},
),
dcc.Graph(
figure=make_run_volume_chart(),
config={"displayModeBar": False},
),
]
),
card(
[
html.Div(
"Enterprise Minutes Billing",
style={
"fontWeight": "700",
"color": SLATE,
"fontSize": "15px",
"marginBottom": "12px",
},
),
html.P(
f"50K included minutes/month. Overage dropped from ~{BILLING['overage_before_min']:,} to ~{BILLING['overage_after_min']:,} min "
f"(${BILLING['overage_saved_monthly_usd']:,}/month saved).",
style={"color": GRAY, "fontSize": "13px", "lineHeight": "1.5", "margin": "0 0 12px"},
style={
"color": GRAY,
"fontSize": "13px",
"lineHeight": "1.5",
"margin": "0 0 12px",
},
),
dcc.Graph(
figure=make_billing_chart(),
config={"displayModeBar": False},
),
]
),
dcc.Graph(figure=make_billing_chart(), config={"displayModeBar": False}),
]),
],
),
section("Before vs After"),
card([dcc.Graph(figure=make_before_after_chart(), config={"displayModeBar": False})]),
card(
[
dcc.Graph(
figure=make_before_after_chart(),
config={"displayModeBar": False},
)
]
),
section(
"Operational Improvements",
"The audit transformed CI from a maintenance burden to a self-service system.",
),
# Before/after comparison table
card([
card(
[
html.Div(
style={"display": "grid", "gridTemplateColumns": "1fr auto auto", "gap": "0"},
style={
"display": "grid",
"gridTemplateColumns": "1fr auto auto",
"gap": "0",
},
children=[
# Header
html.Div("", style={"padding": "12px 16px"}),
html.Div("Before", style={"padding": "12px 16px", "fontWeight": "700", "color": RED, "fontSize": "13px", "textAlign": "right", "width": "200px"}),
html.Div("After", style={"padding": "12px 16px", "fontWeight": "700", "color": GREEN, "fontSize": "13px", "textAlign": "right", "width": "200px"}),
html.Div(
"Before",
style={
"padding": "12px 16px",
"fontWeight": "700",
"color": RED,
"fontSize": "13px",
"textAlign": "right",
"width": "200px",
},
),
html.Div(
"After",
style={
"padding": "12px 16px",
"fontWeight": "700",
"color": GREEN,
"fontSize": "13px",
"textAlign": "right",
"width": "200px",
},
),
# Rows
*_comparison_row("Workflow files in repo", "22", "7"),
*_comparison_row("Required checks in branch protection", "13 individual", "1 gate job"),
*_comparison_row("Workflow-only PR merge", "Admin override", "Self-service"),
*_comparison_row("Non-code PR compute cost", "$1.85", "$0.001"),
*_comparison_row("Fork failing runs/month", "~960", "0"),
*_comparison_row("Ghost workflows in Actions UI", "13", "0"),
*_comparison_row("Branch protection model", "Legacy rules", "Repository rulesets"),
*_comparison_row("Dependabot test fixture noise", "70% failure rate", "Excluded"),
*_comparison_row(
"Workflow files in repo", "22", "7"
),
*_comparison_row(
"Required checks in branch protection",
"13 individual",
"1 gate job",
),
*_comparison_row(
"Workflow-only PR merge",
"Admin override",
"Self-service",
),
*_comparison_row(
"Non-code PR compute cost", "$1.85", "$0.001"
),
*_comparison_row(
"Fork failing runs/month", "~960", "0"
),
*_comparison_row(
"Ghost workflows in Actions UI", "13", "0"
),
*_comparison_row(
"Branch protection model",
"Legacy rules",
"Repository rulesets",
),
*_comparison_row(
"Dependabot test fixture noise",
"70% failure rate",
"Excluded",
),
],
),
]),
]
),
],
)
@ -530,9 +741,41 @@ def _build_summary_tab():
def _comparison_row(label, before, after):
border = f"1px solid {CARD_BORDER}"
return [
html.Div(label, style={"padding": "12px 16px", "color": SLATE, "fontSize": "14px", "fontWeight": "600", "borderTop": border}),
html.Div(before, style={"padding": "12px 16px", "color": LIGHT_GRAY, "fontSize": "14px", "fontFamily": MONO, "textAlign": "right", "borderTop": border, "width": "200px"}),
html.Div(after, style={"padding": "12px 16px", "color": GREEN, "fontSize": "14px", "fontFamily": MONO, "fontWeight": "600", "textAlign": "right", "borderTop": border, "width": "200px"}),
html.Div(
label,
style={
"padding": "12px 16px",
"color": SLATE,
"fontSize": "14px",
"fontWeight": "600",
"borderTop": border,
},
),
html.Div(
before,
style={
"padding": "12px 16px",
"color": LIGHT_GRAY,
"fontSize": "14px",
"fontFamily": MONO,
"textAlign": "right",
"borderTop": border,
"width": "200px",
},
),
html.Div(
after,
style={
"padding": "12px 16px",
"color": GREEN,
"fontSize": "14px",
"fontFamily": MONO,
"fontWeight": "600",
"textAlign": "right",
"borderTop": border,
"width": "200px",
},
),
]
@ -543,12 +786,14 @@ def _build_detail_tab():
pr_rows = []
for p in PRS_MERGED:
base = REPO_BASES[p["repo"]]
pr_rows.append({
pr_rows.append(
{
"PR": f"[#{p['pr']}]({base}/{p['pr']})",
"Repo": p["repo"],
"Date": p["date"],
"Description": p["title"],
})
}
)
action_rows = [
{"Action": a["action"], "Date": a["date"], "Repo": a["repo"]}
@ -556,49 +801,99 @@ def _build_detail_tab():
]
fork_rows = [
{"Repo": f["repo"], "Runs": f"{f['runs']:,}", "Cost/yr": f"${f['cost_yr']}", "Runners": f["runners"], "Pattern": f["pattern"]}
{
"Repo": f["repo"],
"Runs": f"{f['runs']:,}",
"Cost/yr": f"${f['cost_yr']}",
"Runners": f["runners"],
"Pattern": f["pattern"],
}
for f in FORK_CI
]
finding_rows = []
for repo, items in FINDINGS.items():
for f in items:
pr_link = f"[#{f['pr']}]({REPO_BASES[repo]}/{f['pr']})" if f["pr"] else "Direct action"
finding_rows.append({
pr_link = (
f"[#{f['pr']}]({REPO_BASES[repo]}/{f['pr']})"
if f["pr"]
else "Direct action"
)
finding_rows.append(
{
"Repo": repo,
"Finding": f["finding"],
"Impact": f["impact"],
"Fix": f["fix"],
"PR": pr_link,
})
}
)
return html.Div(
id="detail-view",
style={"display": "none"},
children=[
section("Fork CI Activity", "26 of 200+ forks had active GitHub Actions. Actions disabled on all forks 2026-04-23."),
section(
"Fork CI Activity",
"26 of 200+ forks had active GitHub Actions. Actions disabled on all forks 2026-04-23.",
),
# Fork charts side by side
html.Div(
style={"display": "grid", "gridTemplateColumns": "1fr 1fr", "gap": "16px"},
style={
"display": "grid",
"gridTemplateColumns": "1fr 1fr",
"gap": "16px",
},
children=[
card([
html.Div("Runs by Repository", style={"fontWeight": "700", "color": SLATE, "fontSize": "15px", "marginBottom": "12px"}),
dcc.Graph(figure=make_fork_chart(), config={"displayModeBar": False}),
]),
card([
html.Div("Cost Breakdown", style={"fontWeight": "700", "color": SLATE, "fontSize": "15px", "marginBottom": "12px"}),
card(
[
html.Div(
"Runs by Repository",
style={
"fontWeight": "700",
"color": SLATE,
"fontSize": "15px",
"marginBottom": "12px",
},
),
dcc.Graph(
figure=make_fork_chart(),
config={"displayModeBar": False},
),
]
),
card(
[
html.Div(
"Cost Breakdown",
style={
"fontWeight": "700",
"color": SLATE,
"fontSize": "15px",
"marginBottom": "12px",
},
),
html.P(
"kornia is 91% of fork CI cost: daily scheduled test matrix across macOS ($0.08/min), Windows ($0.016/min), and Linux.",
style={"color": GRAY, "fontSize": "13px", "lineHeight": "1.5", "margin": "0 0 12px"},
style={
"color": GRAY,
"fontSize": "13px",
"lineHeight": "1.5",
"margin": "0 0 12px",
},
),
dcc.Graph(
figure=make_fork_cost_chart(),
config={"displayModeBar": False},
),
]
),
dcc.Graph(figure=make_fork_cost_chart(), config={"displayModeBar": False}),
]),
],
),
# Fork table
html.Div(style=TABLE_WRAP, children=[
html.Div(
style=TABLE_WRAP,
children=[
dash_table.DataTable(
data=fork_rows,
columns=[
@ -615,11 +910,12 @@ def _build_detail_tab():
style_as_list_view=True,
page_size=20,
),
]),
],
),
section("All Findings", "Categorized by repository."),
html.Div(style=TABLE_WRAP, children=[
html.Div(
style=TABLE_WRAP,
children=[
dash_table.DataTable(
data=finding_rows,
columns=[
@ -627,41 +923,64 @@ def _build_detail_tab():
{"name": "Finding", "id": "Finding"},
{"name": "Impact", "id": "Impact"},
{"name": "Fix", "id": "Fix"},
{"name": "PR", "id": "PR", "presentation": "markdown"},
{
"name": "PR",
"id": "PR",
"presentation": "markdown",
},
],
style_header=TABLE_HEADER,
style_cell={**TABLE_CELL, "whiteSpace": "normal", "height": "auto"},
style_cell={
**TABLE_CELL,
"whiteSpace": "normal",
"height": "auto",
},
style_data=TABLE_DATA,
style_data_conditional=TABLE_DATA_CONDITIONAL,
style_as_list_view=True,
page_size=20,
css=[{"selector": "p", "rule": "margin: 0"}],
),
]),
section("PRs Merged", f"{len(PRS_MERGED)} pull requests across 2 repositories."),
html.Div(style=TABLE_WRAP, children=[
],
),
section(
"PRs Merged",
f"{len(PRS_MERGED)} pull requests across 2 repositories.",
),
html.Div(
style=TABLE_WRAP,
children=[
dash_table.DataTable(
data=pr_rows,
columns=[
{"name": "PR", "id": "PR", "presentation": "markdown"},
{
"name": "PR",
"id": "PR",
"presentation": "markdown",
},
{"name": "Repo", "id": "Repo"},
{"name": "Date", "id": "Date"},
{"name": "Description", "id": "Description"},
],
style_header=TABLE_HEADER,
style_cell={**TABLE_CELL, "whiteSpace": "normal", "height": "auto"},
style_cell={
**TABLE_CELL,
"whiteSpace": "normal",
"height": "auto",
},
style_data=TABLE_DATA,
style_data_conditional=TABLE_DATA_CONDITIONAL,
style_as_list_view=True,
css=[{"selector": "p", "rule": "margin: 0"}],
),
]),
section("Direct Actions", "Non-PR changes applied during the audit."),
html.Div(style=TABLE_WRAP, children=[
],
),
section(
"Direct Actions", "Non-PR changes applied during the audit."
),
html.Div(
style=TABLE_WRAP,
children=[
dash_table.DataTable(
data=action_rows,
columns=[
@ -670,45 +989,125 @@ def _build_detail_tab():
{"name": "Repo", "id": "Repo"},
],
style_header=TABLE_HEADER,
style_cell={**TABLE_CELL, "whiteSpace": "normal", "height": "auto"},
style_cell={
**TABLE_CELL,
"whiteSpace": "normal",
"height": "auto",
},
style_data=TABLE_DATA,
style_data_conditional=TABLE_DATA_CONDITIONAL,
style_as_list_view=True,
css=[{"selector": "p", "rule": "margin: 0"}],
),
]),
],
),
section("Methodology"),
card([
card(
[
html.Ol(
[
html.Li([html.Strong("Inventory"), "", html.Span("gh repo list codeflash-ai", style={"fontFamily": MONO, "fontSize": "13px"}), " to enumerate all 200+ repos, classify as fork vs primary"], style=_li_style()),
html.Li([html.Strong("Fork scan"), " — Query Actions run counts per fork since Apr 2025, identify 26 active forks"], style=_li_style()),
html.Li([html.Strong("Compute cost"), " — Sample job-level data (duration, runner type), calculate at GitHub rates: $0.008/min Linux, $0.016/min Windows, $0.08/min macOS"], style=_li_style()),
html.Li([html.Strong("Main repo audit"), " — List all workflows, check run history, failure rates, ghost detection, trigger configuration"], style=_li_style()),
html.Li([html.Strong("Root cause analysis"), " — Compare working vs broken runs by commit SHA and timestamp to pinpoint regressions"], style=_li_style()),
html.Li(
[
html.Strong("Inventory"),
"",
html.Span(
"gh repo list codeflash-ai",
style={
"fontFamily": MONO,
"fontSize": "13px",
},
),
" to enumerate all 200+ repos, classify as fork vs primary",
],
style=_li_style(),
),
html.Li(
[
html.Strong("Fork scan"),
" — Query Actions run counts per fork since Apr 2025, identify 26 active forks",
],
style=_li_style(),
),
html.Li(
[
html.Strong("Compute cost"),
" — Sample job-level data (duration, runner type), calculate at GitHub rates: $0.008/min Linux, $0.016/min Windows, $0.08/min macOS",
],
style=_li_style(),
),
html.Li(
[
html.Strong("Main repo audit"),
" — List all workflows, check run history, failure rates, ghost detection, trigger configuration",
],
style=_li_style(),
),
html.Li(
[
html.Strong("Root cause analysis"),
" — Compare working vs broken runs by commit SHA and timestamp to pinpoint regressions",
],
style=_li_style(),
),
],
style={"paddingLeft": "20px", "margin": "0"},
),
]),
]
),
section("Monitoring"),
card([
card(
[
html.Ul(
[
html.Li([html.Strong("claude-code-action"), " — unpin from v1.0.89 once anthropics/claude-code-action#1196 lands upstream"], style=_li_style()),
html.Li([html.Strong("Dependabot alerts"), " — 24 known vulnerabilities at audit time; new dependabot.yml targets real deps only"], style=_li_style()),
html.Li([html.Strong("Fork re-enable"), " — if a fork is needed: ", html.Code("echo '{\"enabled\":true}' | gh api --method PUT repos/codeflash-ai/<repo>/actions/permissions --input -", style={"fontFamily": MONO, "fontSize": "12px", "color": ACCENT})], style=_li_style()),
html.Li(
[
html.Strong("claude-code-action"),
" — unpin from v1.0.89 once anthropics/claude-code-action#1196 lands upstream",
],
style={"paddingLeft": "20px", "margin": "0", "listStyleType": "'\\2022 '"},
style=_li_style(),
),
html.Li(
[
html.Strong("Dependabot alerts"),
" — 24 known vulnerabilities at audit time; new dependabot.yml targets real deps only",
],
style=_li_style(),
),
html.Li(
[
html.Strong("Fork re-enable"),
" — if a fork is needed: ",
html.Code(
"echo '{\"enabled\":true}' | gh api --method PUT repos/codeflash-ai/<repo>/actions/permissions --input -",
style={
"fontFamily": MONO,
"fontSize": "12px",
"color": ACCENT,
},
),
],
style=_li_style(),
),
],
style={
"paddingLeft": "20px",
"margin": "0",
"listStyleType": "'\\2022 '",
},
),
]
),
]),
],
)
def _li_style():
return {"color": GRAY, "fontSize": "14px", "lineHeight": "1.7", "marginBottom": "8px"}
return {
"color": GRAY,
"fontSize": "14px",
"lineHeight": "1.7",
"marginBottom": "8px",
}
# ── Main layout ──────────────────────────────────────────────────────────────
@ -774,7 +1173,7 @@ def _main_layout():
},
),
html.P(
"April 923, 2026",
"April 9-23, 2026",
style={
"fontSize": "14px",
"color": LIGHT_GRAY,
@ -784,10 +1183,13 @@ def _main_layout():
),
],
),
# Tab buttons
html.Div(
style={"display": "flex", "justifyContent": "center", "margin": "40px 0 8px"},
style={
"display": "flex",
"justifyContent": "center",
"margin": "40px 0 8px",
},
children=[
html.Div(
style={
@ -798,16 +1200,24 @@ def _main_layout():
"border": f"1px solid {CARD_BORDER}",
},
children=[
html.Button("Executive Summary", id="btn-summary", n_clicks=1, style=_TAB_BTN_ACTIVE),
html.Button("Full Detail", id="btn-detail", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(
"Executive Summary",
id="btn-summary",
n_clicks=1,
style=_TAB_BTN_ACTIVE,
),
html.Button(
"Full Detail",
id="btn-detail",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
],
),
],
),
_build_summary_tab(),
_build_detail_tab(),
# Footer
html.Div(
style={
@ -819,11 +1229,19 @@ def _main_layout():
children=[
html.Div(
_logo_lockup("16px", "20px", "10px", "3px"),
style={"display": "flex", "justifyContent": "center", "marginBottom": "4px"},
style={
"display": "flex",
"justifyContent": "center",
"marginBottom": "4px",
},
),
html.P(
"CI/CD Audit Report — April 2026",
style={"color": LIGHT_GRAY, "fontSize": "13px", "margin": "0"},
style={
"color": LIGHT_GRAY,
"fontSize": "13px",
"margin": "0",
},
),
],
),

View file

@ -9,9 +9,10 @@ findings identified during the performance engagement:
"""
import json
import re
from pathlib import Path
from dash import Dash, clientside_callback, html, Input, Output
from dash import Dash, Input, Output, clientside_callback, html
from theme import (
ACCENT,
AMBER,
@ -20,15 +21,12 @@ from theme import (
CARD,
CARD_BG,
CARD_BORDER,
DARK,
FONT,
GRAY,
GREEN,
LIGHT_GRAY,
LIGHT_GREEN,
LIGHT_RED,
MONO,
PURPLE,
RED,
SLATE,
WHITE,
@ -170,8 +168,6 @@ def status_badge(status):
)
import re
_REPO_URLS = {
"core-product": "https://github.com/Unstructured-IO/core-product/pull",
"github-workflows": "https://github.com/Unstructured-IO/github-workflows/pull",
@ -213,7 +209,9 @@ def _linkify_fixed_by(text, repo):
)
else:
result.append(
html.Span(part, style={"color": GREEN, "fontWeight": "600"})
html.Span(
part, style={"color": GREEN, "fontWeight": "600"}
)
)
else:
result.append(
@ -387,7 +385,11 @@ def finding_card(f):
severity_badge(sev),
status_badge(f["status"]),
],
style={"display": "flex", "gap": "8px", "alignItems": "center"},
style={
"display": "flex",
"gap": "8px",
"alignItems": "center",
},
),
],
style={
@ -592,7 +594,10 @@ _SEV_PRIORITY = {"critical": 0, "high": 1, "medium": 2, "low": 3, "info": 4}
def _finding_sort_key(f):
return (_SEV_PRIORITY.get(f["severity"], 9), _CATEGORY_PRIORITY.get(f["category"], 9))
return (
_SEV_PRIORITY.get(f["severity"], 9),
_CATEGORY_PRIORITY.get(f["category"], 9),
)
_CRITICAL_HIGH = sorted(
@ -654,7 +659,10 @@ def _build_summary_tab():
" and ",
html.Span(
f"{SUMMARY['high']} high",
style={"fontWeight": "700", "color": "#f97316"},
style={
"fontWeight": "700",
"color": "#f97316",
},
),
" severity issues. The lockfile-bypass pattern you fixed in core-product "
"(PR #1465) still persists in CI steps and Makefiles across the org. "
@ -738,7 +746,7 @@ def _build_summary_tab():
},
),
html.Div(
f"of findings remain unresolved",
"of findings remain unresolved",
style={
"fontSize": "20px",
"fontWeight": "600",
@ -781,7 +789,7 @@ def _build_critical_high_tab():
style={"display": "none"},
children=[
section(
f"Critical & High Findings",
"Critical & High Findings",
f"{n} findings \u2014 supply chain, container, CI/CD, and secrets",
),
*[finding_card(f) for f in _CRITICAL_HIGH],
@ -797,7 +805,7 @@ def _build_medium_low_tab():
style={"display": "none"},
children=[
section(
f"Medium & Low Findings",
"Medium & Low Findings",
f"{n} findings for planned remediation",
),
*[finding_card(f) for f in _MEDIUM_LOW],
@ -999,7 +1007,9 @@ app.layout = html.Div(
"zIndex": "1",
},
children=[
_hero_metric(str(SUMMARY["critical"]), "Critical", RED),
_hero_metric(
str(SUMMARY["critical"]), "Critical", RED
),
_hero_metric(str(SUMMARY["high"]), "High", "#f97316"),
_hero_metric(str(SUMMARY["medium"]), "Medium", AMBER),
_hero_metric(str(SUMMARY["low"]), "Low", BLUE),
@ -1027,10 +1037,30 @@ app.layout = html.Div(
"border": f"1px solid {CARD_BORDER}",
},
children=[
html.Button("Summary", id="btn-summary", n_clicks=1, style=_TAB_BTN_ACTIVE),
html.Button(f"Critical & High ({len(_CRITICAL_HIGH)})", id="btn-crit-high", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(f"Medium & Low ({len(_MEDIUM_LOW)})", id="btn-med-low", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button("By Category", id="btn-category", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(
"Summary",
id="btn-summary",
n_clicks=1,
style=_TAB_BTN_ACTIVE,
),
html.Button(
f"Critical & High ({len(_CRITICAL_HIGH)})",
id="btn-crit-high",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
f"Medium & Low ({len(_MEDIUM_LOW)})",
id="btn-med-low",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
"By Category",
id="btn-category",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
],
),
],

View file

@ -228,7 +228,7 @@ def _next_card(number, title, description, notes=None):
style={
"paddingLeft": "16px",
"margin": "0",
"listStyleType": "' '",
"listStyleType": "'- '",
},
),
],
@ -450,8 +450,10 @@ _TAB_BTN_STYLE = {
_TAB_BTN_ACTIVE = {**_TAB_BTN_STYLE, "background": ACCENT, "color": DARK}
def _logo_lockup(codeflash_h="24px", unstructured_h="28px", gap="16px", radius="4px"):
"""Codeflash × Unstructured logo pair, reused in headers and footers."""
def _logo_lockup(
codeflash_h="24px", unstructured_h="28px", gap="16px", radius="4px"
):
"""Codeflash x Unstructured logo pair, reused in headers and footers."""
return html.Div(
style={
"display": "flex",
@ -459,7 +461,9 @@ def _logo_lockup(codeflash_h="24px", unstructured_h="28px", gap="16px", radius="
"gap": gap,
},
children=[
html.Img(src="/assets/codeflash.svg", style={"height": codeflash_h}),
html.Img(
src="/assets/codeflash.svg", style={"height": codeflash_h}
),
html.Span(
"\u00d7",
style={
@ -479,7 +483,6 @@ def _logo_lockup(codeflash_h="24px", unstructured_h="28px", gap="16px", radius="
# ── View builders ────────────────────────────────────────────────────────────
def build_team_view():
return html.Div(
id="team-view",
@ -1388,10 +1391,6 @@ def build_team_view():
)
def _method_row(label, text):
"""A single labeled row for the methodology cards."""
return html.Div(
@ -1423,7 +1422,7 @@ def _method_row(label, text):
)
def _above_fold_content(negative_margin=False):
def _above_fold_content(*, negative_margin=False):
"""Hero Metrics + Infrastructure Cost Impact + Broader Context.
Used above the tab toggle on the main page and at the top of /jpc.
@ -2151,9 +2150,25 @@ def _jpc_content():
notes=[
[
"POC live in ",
html.A("ci-unified-workflows", href="https://github.com/Unstructured-IO/github-workflows/tree/ci-unified-workflows", target="_blank", style={"color": BLUE, "textDecoration": "none"}),
html.A(
"ci-unified-workflows",
href="https://github.com/Unstructured-IO/github-workflows/tree/ci-unified-workflows",
target="_blank",
style={
"color": BLUE,
"textDecoration": "none",
},
),
" branch and ",
html.A("platform-libs#667", href="https://github.com/Unstructured-IO/platform-libs/pull/667", target="_blank", style={"color": BLUE, "textDecoration": "none"}),
html.A(
"platform-libs#667",
href="https://github.com/Unstructured-IO/platform-libs/pull/667",
target="_blank",
style={
"color": BLUE,
"textDecoration": "none",
},
),
],
"Eliminates per-package workflow permutations \u2014 one matrix, one lockfile",
"No migration off GitHub Actions \u2014 same CI/CD platform, simplified configuration",
@ -2730,10 +2745,26 @@ app.index_string = """<!DOCTYPE html>
</body>
</html>"""
def _tl_node(number, title, dates, duration, status, deliverables, color,
dependencies=None, is_last=False, concurrent_with=None):
def _tl_node(
number,
title,
dates,
duration,
status,
deliverables,
color,
*,
dependencies=None,
is_last=False,
concurrent_with=None,
):
"""Single node in the vertical timeline."""
status_colors = {"Completed": GREEN, "Ready to Start": AMBER, "Proposed": ACCENT}
status_colors = {
"Completed": GREEN,
"Ready to Start": AMBER,
"Proposed": ACCENT,
}
sc = status_colors.get(status, ACCENT)
filled = status == "Completed"
@ -2754,7 +2785,9 @@ def _tl_node(number, title, dates, duration, status, deliverables, color,
style={
"width": "2px",
"flexGrow": "1",
"background": f"linear-gradient({color}, {CARD_BORDER})" if not is_last else "transparent",
"background": f"linear-gradient({color}, {CARD_BORDER})"
if not is_last
else "transparent",
"margin": "4px auto 0",
"minHeight": "0" if is_last else "20px",
},
@ -2832,26 +2865,42 @@ def _tl_node(number, title, dates, duration, status, deliverables, color,
],
),
*(
[html.Div(
[
html.Div(
[
html.Span("\u21b3 ", style={"color": AMBER}),
html.Span(dependencies, style={
"color": AMBER, "fontSize": "12px",
}),
html.Span(
dependencies,
style={
"color": AMBER,
"fontSize": "12px",
},
),
],
style={"marginBottom": "12px"},
)] if dependencies else []
)
]
if dependencies
else []
),
*(
[html.Div(
[
html.Div(
[
html.Span("\u2194 ", style={"color": LIGHT_GRAY}),
html.Span(f"Runs parallel with Phase {concurrent_with}", style={
"color": LIGHT_GRAY, "fontSize": "12px",
}),
html.Span(
f"Runs parallel with Phase {concurrent_with}",
style={
"color": LIGHT_GRAY,
"fontSize": "12px",
},
),
],
style={"marginBottom": "12px"},
)] if concurrent_with else []
)
]
if concurrent_with
else []
),
html.Div(
style={
@ -2860,10 +2909,18 @@ def _tl_node(number, title, dates, duration, status, deliverables, color,
},
children=[
html.Ul(
[html.Li(d, style={
"fontSize": "13px", "color": GRAY, "lineHeight": "1.7",
[
html.Li(
d,
style={
"fontSize": "13px",
"color": GRAY,
"lineHeight": "1.7",
"paddingLeft": "4px",
}) for d in deliverables],
},
)
for d in deliverables
],
style={"paddingLeft": "16px", "margin": "0"},
),
],
@ -2897,7 +2954,10 @@ def _tl_node(number, title, dates, duration, status, deliverables, color,
),
html.Div(
phase_card,
style={"flex": "1 1 0%", "paddingBottom": "0" if is_last else "20px"},
style={
"flex": "1 1 0%",
"paddingBottom": "0" if is_last else "20px",
},
),
],
)
@ -2921,24 +2981,29 @@ def _tl_gap(label):
"flexShrink": "0",
},
children=[
html.Div(style={
html.Div(
style={
"width": "2px",
"height": "100%",
"background": CARD_BORDER,
"margin": "0 auto",
"borderLeft": f"2px dashed {CARD_BORDER}",
"minHeight": "40px",
}),
}
),
],
),
html.Div(
html.Span(label, style={
html.Span(
label,
style={
"fontSize": "11px",
"fontWeight": "600",
"color": LIGHT_GRAY,
"fontFamily": MONO,
"letterSpacing": "0.05em",
}),
},
),
style={
"marginLeft": "20px",
"display": "flex",
@ -2957,8 +3022,11 @@ def _timeline_content():
style={"position": "relative"},
children=[
_tl_node(
"1", "Core-Product Optimization",
"Feb 27 \u2192 Apr 14", "7 weeks", "Completed",
"1",
"Core-Product Optimization",
"Feb 27 \u2192 Apr 14",
"7 weeks",
"Completed",
deliverables=[
"24 PRs merged across 5 repos, 354 tests passing",
"Memory: 32 GB \u2192 4 GB K8s pod allocation (\u221287.5%)",
@ -2969,14 +3037,33 @@ def _timeline_content():
color=GREEN,
),
_tl_node(
"1b", "Platform-Libs CI/CD Migration",
"Apr 9 \u2192 Apr 14", "1 week", "Ready to Start",
"1b",
"Platform-Libs CI/CD Migration",
"Apr 9 \u2192 Apr 14",
"1 week",
"Ready to Start",
deliverables=[
[
"POC live in ",
html.A("ci-unified-workflows", href="https://github.com/Unstructured-IO/github-workflows/tree/ci-unified-workflows", target="_blank", style={"color": BLUE, "textDecoration": "none"}),
html.A(
"ci-unified-workflows",
href="https://github.com/Unstructured-IO/github-workflows/tree/ci-unified-workflows",
target="_blank",
style={
"color": BLUE,
"textDecoration": "none",
},
),
" branch and ",
html.A("platform-libs#667", href="https://github.com/Unstructured-IO/platform-libs/pull/667", target="_blank", style={"color": BLUE, "textDecoration": "none"}),
html.A(
"platform-libs#667",
href="https://github.com/Unstructured-IO/platform-libs/pull/667",
target="_blank",
style={
"color": BLUE,
"textDecoration": "none",
},
),
],
"CI runners: ~189 \u2192 ~27 per PR (\u221285% billed minutes)",
"Same GitHub Actions \u2014 fewer workflow permutations, not a platform migration",
@ -2985,8 +3072,11 @@ def _timeline_content():
),
_tl_gap("1 week buffer"),
_tl_node(
"2", "Developer Experience & CI/CD",
"Apr 21 \u2192 May 2", "2 weeks", "Proposed",
"2",
"Developer Experience & CI/CD",
"Apr 21 \u2192 May 2",
"2 weeks",
"Proposed",
deliverables=[
"uv workspace migration for core-product (building on platform-libs POC)",
"Single lockfile replacing fragmented dependency install steps",
@ -2997,8 +3087,11 @@ def _timeline_content():
color=BLUE,
),
_tl_node(
"3", "Platform API Speed & Stability",
"May 5 \u2192 May 16", "2 weeks", "Proposed",
"3",
"Platform API Speed & Stability",
"May 5 \u2192 May 16",
"2 weeks",
"Proposed",
deliverables=[
"Pod cold start profiling and reduction (image snapshotting, pre-warming)",
"Import time audit for each pipeline step",
@ -3010,8 +3103,11 @@ def _timeline_content():
color=ACCENT,
),
_tl_node(
"4", "Security Hardening",
"Apr 21 \u2192 May 2", "2 weeks", "Proposed",
"4",
"Security Hardening",
"Apr 21 \u2192 May 2",
"2 weeks",
"Proposed",
deliverables=[
"Lockfile bypass remediation (eliminate uv pip install vectors)",
"Dependency confusion audit on internal package names",
@ -3022,8 +3118,11 @@ def _timeline_content():
color=PURPLE,
),
_tl_node(
"5", "Infrastructure Cost Discovery",
"May 19 \u2192 Jun 27", "6 weeks", "Proposed",
"5",
"Infrastructure Cost Discovery",
"May 19 \u2192 Jun 27",
"6 weeks",
"Proposed",
deliverables=[
"Full Azure spend audit ($100K/mo staging + production + development)",
"Dedicated instance cost mapping and optimization targets",
@ -3319,17 +3418,26 @@ def _main_layout():
children=[
html.Span(
"March - April 2026",
style={"color": LIGHT_GRAY, "fontSize": "13px"},
style={
"color": LIGHT_GRAY,
"fontSize": "13px",
},
),
html.Span("|", style={"color": LIGHT_GRAY}),
html.Span(
"24 PRs merged",
style={"color": LIGHT_GRAY, "fontSize": "13px"},
style={
"color": LIGHT_GRAY,
"fontSize": "13px",
},
),
html.Span("|", style={"color": LIGHT_GRAY}),
html.Span(
"5 PRs in progress",
style={"color": LIGHT_GRAY, "fontSize": "13px"},
style={
"color": LIGHT_GRAY,
"fontSize": "13px",
},
),
],
),
@ -3363,10 +3471,30 @@ def _main_layout():
"border": f"1px solid {CARD_BORDER}",
},
children=[
html.Button("Executive Summary", id="btn-jpc", n_clicks=1, style=_TAB_BTN_ACTIVE),
html.Button("Engineering Details", id="btn-team", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button("Full Detail", id="btn-detail", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button("Timeline", id="btn-timeline", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(
"Executive Summary",
id="btn-jpc",
n_clicks=1,
style=_TAB_BTN_ACTIVE,
),
html.Button(
"Engineering Details",
id="btn-team",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
"Full Detail",
id="btn-detail",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
"Timeline",
id="btn-timeline",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
],
),
],
@ -3426,10 +3554,12 @@ def _main_layout():
def _serve_layout():
"""Return fresh layout on each page load (Dash best practice)."""
return html.Div([
return html.Div(
[
dcc.Location(id="url", refresh=False),
html.Div(id="page-content"),
])
]
)
app.layout = _serve_layout

View file

@ -9,9 +9,10 @@ findings identified during the performance engagement:
"""
import json
import re
from pathlib import Path
from dash import Dash, clientside_callback, html, Input, Output
from dash import Dash, Input, Output, clientside_callback, html
from theme import (
ACCENT,
AMBER,
@ -20,15 +21,12 @@ from theme import (
CARD,
CARD_BG,
CARD_BORDER,
DARK,
FONT,
GRAY,
GREEN,
LIGHT_GRAY,
LIGHT_GREEN,
LIGHT_RED,
MONO,
PURPLE,
RED,
SLATE,
WHITE,
@ -170,8 +168,6 @@ def status_badge(status):
)
import re
_REPO_URLS = {
"core-product": "https://github.com/Unstructured-IO/core-product/pull",
"github-workflows": "https://github.com/Unstructured-IO/github-workflows/pull",
@ -213,7 +209,9 @@ def _linkify_fixed_by(text, repo):
)
else:
result.append(
html.Span(part, style={"color": GREEN, "fontWeight": "600"})
html.Span(
part, style={"color": GREEN, "fontWeight": "600"}
)
)
else:
result.append(
@ -387,7 +385,11 @@ def finding_card(f):
severity_badge(sev),
status_badge(f["status"]),
],
style={"display": "flex", "gap": "8px", "alignItems": "center"},
style={
"display": "flex",
"gap": "8px",
"alignItems": "center",
},
),
],
style={
@ -592,7 +594,10 @@ _SEV_PRIORITY = {"critical": 0, "high": 1, "medium": 2, "low": 3, "info": 4}
def _finding_sort_key(f):
return (_SEV_PRIORITY.get(f["severity"], 9), _CATEGORY_PRIORITY.get(f["category"], 9))
return (
_SEV_PRIORITY.get(f["severity"], 9),
_CATEGORY_PRIORITY.get(f["category"], 9),
)
_CRITICAL_HIGH = sorted(
@ -654,7 +659,10 @@ def _build_summary_tab():
" and ",
html.Span(
f"{SUMMARY['high']} high",
style={"fontWeight": "700", "color": "#f97316"},
style={
"fontWeight": "700",
"color": "#f97316",
},
),
" severity issues. The lockfile-bypass pattern you fixed in core-product "
"(PR #1465) still persists in CI steps and Makefiles across the org. "
@ -738,7 +746,7 @@ def _build_summary_tab():
},
),
html.Div(
f"of findings remain unresolved",
"of findings remain unresolved",
style={
"fontSize": "20px",
"fontWeight": "600",
@ -781,7 +789,7 @@ def _build_critical_high_tab():
style={"display": "none"},
children=[
section(
f"Critical & High Findings",
"Critical & High Findings",
f"{n} findings \u2014 supply chain, container, CI/CD, and secrets",
),
*[finding_card(f) for f in _CRITICAL_HIGH],
@ -797,7 +805,7 @@ def _build_medium_low_tab():
style={"display": "none"},
children=[
section(
f"Medium & Low Findings",
"Medium & Low Findings",
f"{n} findings for planned remediation",
),
*[finding_card(f) for f in _MEDIUM_LOW],
@ -999,7 +1007,9 @@ app.layout = html.Div(
"zIndex": "1",
},
children=[
_hero_metric(str(SUMMARY["critical"]), "Critical", RED),
_hero_metric(
str(SUMMARY["critical"]), "Critical", RED
),
_hero_metric(str(SUMMARY["high"]), "High", "#f97316"),
_hero_metric(str(SUMMARY["medium"]), "Medium", AMBER),
_hero_metric(str(SUMMARY["low"]), "Low", BLUE),
@ -1027,10 +1037,30 @@ app.layout = html.Div(
"border": f"1px solid {CARD_BORDER}",
},
children=[
html.Button("Summary", id="btn-summary", n_clicks=1, style=_TAB_BTN_ACTIVE),
html.Button(f"Critical & High ({len(_CRITICAL_HIGH)})", id="btn-crit-high", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(f"Medium & Low ({len(_MEDIUM_LOW)})", id="btn-med-low", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button("By Category", id="btn-category", n_clicks=0, style=_TAB_BTN_STYLE),
html.Button(
"Summary",
id="btn-summary",
n_clicks=1,
style=_TAB_BTN_ACTIVE,
),
html.Button(
f"Critical & High ({len(_CRITICAL_HIGH)})",
id="btn-crit-high",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
f"Medium & Low ({len(_MEDIUM_LOW)})",
id="btn-med-low",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
html.Button(
"By Category",
id="btn-category",
n_clicks=0,
style=_TAB_BTN_STYLE,
),
],
),
],