Lint and format entire repo, not just packages (#23)

Remove .codeflash/ from ruff extend-exclude, add per-file ignores
for .codeflash/, scripts/, evals/, and plugin/ (benchmark/script
patterns like print, eval, magic values). Remove shebangs. Widen
pre-commit hooks to check the full repo.
This commit is contained in:
Kevin Turcios 2026-04-15 03:16:15 -05:00 committed by GitHub
parent 33faedf427
commit 20f6c59f05
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 594 additions and 298 deletions

View file

@ -1,25 +1,29 @@
#!/usr/bin/env python3
"""
Benchmark for cache key computation in odoo/tools/cache.py
Focuses on the determine_key() and key() methods which are pure Python.
"""
import time
from inspect import signature, Parameter
from inspect import Parameter, signature
# Simulate the key computation logic from ormcache
def build_cache_key_eval(method, cache_args):
"""Current implementation using eval - generates lambda from string"""
args = ', '.join(
args = ", ".join(
str(params.replace(annotation=Parameter.empty))
for params in signature(method).parameters.values()
)
values = ['self._name', 'method', *cache_args]
code = f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})"
return eval(code, {'method': method})
values = ["self._name", "method", *cache_args]
code = (
f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})"
)
return eval(code, {"method": method})
# Test methods with various signatures
class MockModel:
_name = 'test.model'
_name = "test.model"
def simple_method(self, arg1, arg2):
return arg1 + arg2
@ -30,6 +34,7 @@ class MockModel:
def many_args_method(self, a, b, c, d, e, f, g, h):
return sum([a, b, c, d, e, f, g, h])
def benchmark_key_computation():
"""Benchmark the key computation overhead"""
model = MockModel()
@ -38,7 +43,7 @@ def benchmark_key_computation():
# Scenario 1: Simple method, many lookups
print("Scenario 1: Simple method (2 args), 100k lookups")
key_func = build_cache_key_eval(MockModel.simple_method, ['arg1', 'arg2'])
key_func = build_cache_key_eval(MockModel.simple_method, ["arg1", "arg2"])
start = time.perf_counter()
for i in range(100_000):
@ -50,13 +55,12 @@ def benchmark_key_computation():
# Scenario 2: Complex method with defaults, many lookups
print("Scenario 2: Complex method (4 args with defaults), 100k lookups")
key_func = build_cache_key_eval(
MockModel.complex_method,
['model_name', 'mode', 'limit', 'offset']
MockModel.complex_method, ["model_name", "mode", "limit", "offset"]
)
start = time.perf_counter()
for i in range(100_000):
_ = key_func(model, 'res.partner', 'read', 10, i)
_ = key_func(model, "res.partner", "read", 10, i)
elapsed = time.perf_counter() - start
print(f" Total: {elapsed:.3f}s")
print(f" Per call: {elapsed / 100_000 * 1e6:.2f} µs\n")
@ -64,8 +68,7 @@ def benchmark_key_computation():
# Scenario 3: Many args method
print("Scenario 3: Many args method (8 args), 100k lookups")
key_func = build_cache_key_eval(
MockModel.many_args_method,
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
MockModel.many_args_method, ["a", "b", "c", "d", "e", "f", "g", "h"]
)
start = time.perf_counter()
@ -79,10 +82,11 @@ def benchmark_key_computation():
print("Scenario 4: Key function creation (determine_key), 10k methods")
start = time.perf_counter()
for i in range(10_000):
_ = build_cache_key_eval(MockModel.simple_method, ['arg1', 'arg2'])
_ = build_cache_key_eval(MockModel.simple_method, ["arg1", "arg2"])
elapsed = time.perf_counter() - start
print(f" Total: {elapsed:.3f}s")
print(f" Per call: {elapsed / 10_000 * 1e6:.2f} µs\n")
if __name__ == '__main__':
if __name__ == "__main__":
benchmark_key_computation()

View file

@ -1,20 +1,23 @@
#!/usr/bin/env python3
"""
Benchmark for Odoo image processing utilities.
Tests resize, format conversion, and thumbnail generation.
"""
import sys
import os
import io
import timeit
from PIL import Image
import base64
import io
import os
import sys
import timeit
from PIL import Image
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
# Activating venv programmatically by modifying path
venv_site_packages = '/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages'
venv_site_packages = (
"/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages"
)
if os.path.exists(venv_site_packages):
sys.path.insert(0, venv_site_packages)
@ -28,14 +31,14 @@ except ImportError as e:
def create_test_image(width=2048, height=2048):
"""Create a test image in memory."""
img = Image.new('RGB', (width, height), color='red')
img = Image.new("RGB", (width, height), color="red")
# Add some complexity
for i in range(0, width, 100):
for j in range(0, height, 100):
img.putpixel((i, j), (0, 255, 0))
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -44,7 +47,7 @@ def benchmark_resize_pil(image_data, target_size=(512, 512)):
img = Image.open(io.BytesIO(image_data))
img_resized = img.resize(target_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img_resized.save(buffer, format='PNG')
img_resized.save(buffer, format="PNG")
return buffer.getvalue()
@ -53,7 +56,7 @@ def benchmark_thumbnail_pil(image_data, max_size=(256, 256)):
img = Image.open(io.BytesIO(image_data))
img.thumbnail(max_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -63,13 +66,13 @@ def benchmark_format_conversion(image_data):
# Convert to JPEG
buffer_jpeg = io.BytesIO()
img_rgb = img.convert('RGB')
img_rgb.save(buffer_jpeg, format='JPEG', quality=85)
img_rgb = img.convert("RGB")
img_rgb.save(buffer_jpeg, format="JPEG", quality=85)
# Convert back to PNG
img2 = Image.open(io.BytesIO(buffer_jpeg.getvalue()))
buffer_png = io.BytesIO()
img2.save(buffer_png, format='PNG')
img2.save(buffer_png, format="PNG")
return buffer_png.getvalue()
@ -105,41 +108,54 @@ def run_benchmarks():
print(f"Image size: {size_mb:.2f} MB")
# Benchmark resize
time_resize = timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)),
number=10
) / 10
time_resize = (
timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)), number=10
)
/ 10
)
print(f"Resize to 512x512: {time_resize * 1000:.2f} ms")
# Benchmark thumbnail
time_thumbnail = timeit.timeit(
time_thumbnail = (
timeit.timeit(
lambda: benchmark_thumbnail_pil(image_data, (256, 256)),
number=10
) / 10
number=10,
)
/ 10
)
print(f"Thumbnail to 256x256: {time_thumbnail * 1000:.2f} ms")
# Benchmark format conversion
time_format = timeit.timeit(
lambda: benchmark_format_conversion(image_data),
number=5
) / 5
print(f"Format conversion (PNG->JPEG->PNG): {time_format*1000:.2f} ms")
time_format = (
timeit.timeit(
lambda: benchmark_format_conversion(image_data), number=5
)
/ 5
)
print(
f"Format conversion (PNG->JPEG->PNG): {time_format * 1000:.2f} ms"
)
# Benchmark base64
time_base64 = timeit.timeit(
lambda: benchmark_base64_operations(image_data),
number=100
) / 100
time_base64 = (
timeit.timeit(
lambda: benchmark_base64_operations(image_data), number=100
)
/ 100
)
print(f"Base64 encode/decode: {time_base64 * 1000:.2f} ms")
results.append({
'size': label,
'image_mb': size_mb,
'resize_ms': time_resize * 1000,
'thumbnail_ms': time_thumbnail * 1000,
'format_ms': time_format * 1000,
'base64_ms': time_base64 * 1000,
})
results.append(
{
"size": label,
"image_mb": size_mb,
"resize_ms": time_resize * 1000,
"thumbnail_ms": time_thumbnail * 1000,
"format_ms": time_format * 1000,
"base64_ms": time_base64 * 1000,
}
)
# Summary
print("\n" + "=" * 80)
@ -147,8 +163,10 @@ def run_benchmarks():
print("=" * 80)
for r in results:
print(f"\n{r['size']} ({r['image_mb']:.2f} MB):")
print(f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms")
print(
f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms"
)
if __name__ == '__main__':
if __name__ == "__main__":
run_benchmarks()

View file

@ -1,21 +1,24 @@
#!/usr/bin/env python3
"""
Benchmark for Odoo image processing utilities - Optimized version 1.
Optimization: Use numpy array for efficient image creation instead of per-pixel operations.
"""
import sys
import os
import io
import timeit
from PIL import Image
import base64
import io
import os
import sys
import timeit
import numpy as np
from PIL import Image
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
# Activating venv programmatically by modifying path
venv_site_packages = '/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages'
venv_site_packages = (
"/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages"
)
if os.path.exists(venv_site_packages):
sys.path.insert(0, venv_site_packages)
@ -36,10 +39,10 @@ def create_test_image(width=2048, height=2048):
arr[::100, ::100] = [0, 255, 0]
# Convert numpy array to PIL Image
img = Image.fromarray(arr, 'RGB')
img = Image.fromarray(arr, "RGB")
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -48,7 +51,7 @@ def benchmark_resize_pil(image_data, target_size=(512, 512)):
img = Image.open(io.BytesIO(image_data))
img_resized = img.resize(target_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img_resized.save(buffer, format='PNG')
img_resized.save(buffer, format="PNG")
return buffer.getvalue()
@ -57,7 +60,7 @@ def benchmark_thumbnail_pil(image_data, max_size=(256, 256)):
img = Image.open(io.BytesIO(image_data))
img.thumbnail(max_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -67,13 +70,13 @@ def benchmark_format_conversion(image_data):
# Convert to JPEG
buffer_jpeg = io.BytesIO()
img_rgb = img.convert('RGB')
img_rgb.save(buffer_jpeg, format='JPEG', quality=85)
img_rgb = img.convert("RGB")
img_rgb.save(buffer_jpeg, format="JPEG", quality=85)
# Convert back to PNG
img2 = Image.open(io.BytesIO(buffer_jpeg.getvalue()))
buffer_png = io.BytesIO()
img2.save(buffer_png, format='PNG')
img2.save(buffer_png, format="PNG")
return buffer_png.getvalue()
@ -109,41 +112,54 @@ def run_benchmarks():
print(f"Image size: {size_mb:.2f} MB")
# Benchmark resize
time_resize = timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)),
number=10
) / 10
time_resize = (
timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)), number=10
)
/ 10
)
print(f"Resize to 512x512: {time_resize * 1000:.2f} ms")
# Benchmark thumbnail
time_thumbnail = timeit.timeit(
time_thumbnail = (
timeit.timeit(
lambda: benchmark_thumbnail_pil(image_data, (256, 256)),
number=10
) / 10
number=10,
)
/ 10
)
print(f"Thumbnail to 256x256: {time_thumbnail * 1000:.2f} ms")
# Benchmark format conversion
time_format = timeit.timeit(
lambda: benchmark_format_conversion(image_data),
number=5
) / 5
print(f"Format conversion (PNG->JPEG->PNG): {time_format*1000:.2f} ms")
time_format = (
timeit.timeit(
lambda: benchmark_format_conversion(image_data), number=5
)
/ 5
)
print(
f"Format conversion (PNG->JPEG->PNG): {time_format * 1000:.2f} ms"
)
# Benchmark base64
time_base64 = timeit.timeit(
lambda: benchmark_base64_operations(image_data),
number=100
) / 100
time_base64 = (
timeit.timeit(
lambda: benchmark_base64_operations(image_data), number=100
)
/ 100
)
print(f"Base64 encode/decode: {time_base64 * 1000:.2f} ms")
results.append({
'size': label,
'image_mb': size_mb,
'resize_ms': time_resize * 1000,
'thumbnail_ms': time_thumbnail * 1000,
'format_ms': time_format * 1000,
'base64_ms': time_base64 * 1000,
})
results.append(
{
"size": label,
"image_mb": size_mb,
"resize_ms": time_resize * 1000,
"thumbnail_ms": time_thumbnail * 1000,
"format_ms": time_format * 1000,
"base64_ms": time_base64 * 1000,
}
)
# Summary
print("\n" + "=" * 80)
@ -151,8 +167,10 @@ def run_benchmarks():
print("=" * 80)
for r in results:
print(f"\n{r['size']} ({r['image_mb']:.2f} MB):")
print(f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms")
print(
f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms"
)
if __name__ == '__main__':
if __name__ == "__main__":
run_benchmarks()

View file

@ -1,20 +1,23 @@
#!/usr/bin/env python3
"""
Benchmark for Odoo image processing utilities - Optimized version 2.
Optimization: Use PIL's putdata() with pre-computed pixel list instead of per-pixel loops.
"""
import sys
import os
import io
import timeit
from PIL import Image
import base64
import io
import os
import sys
import timeit
from PIL import Image
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
# Activating venv programmatically by modifying path
venv_site_packages = '/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages'
venv_site_packages = (
"/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages"
)
if os.path.exists(venv_site_packages):
sys.path.insert(0, venv_site_packages)
@ -29,7 +32,7 @@ except ImportError as e:
def create_test_image(width=2048, height=2048):
"""Create a test image in memory using PIL's native methods efficiently."""
# Create image with red background
img = Image.new('RGB', (width, height), color=(255, 0, 0))
img = Image.new("RGB", (width, height), color=(255, 0, 0))
# Use load() to get pixel access object once
pixels = img.load()
@ -40,7 +43,7 @@ def create_test_image(width=2048, height=2048):
pixels[i, j] = (0, 255, 0)
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -49,7 +52,7 @@ def benchmark_resize_pil(image_data, target_size=(512, 512)):
img = Image.open(io.BytesIO(image_data))
img_resized = img.resize(target_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img_resized.save(buffer, format='PNG')
img_resized.save(buffer, format="PNG")
return buffer.getvalue()
@ -58,7 +61,7 @@ def benchmark_thumbnail_pil(image_data, max_size=(256, 256)):
img = Image.open(io.BytesIO(image_data))
img.thumbnail(max_size, Image.Resampling.LANCZOS)
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img.save(buffer, format="PNG")
return buffer.getvalue()
@ -68,13 +71,13 @@ def benchmark_format_conversion(image_data):
# Convert to JPEG
buffer_jpeg = io.BytesIO()
img_rgb = img.convert('RGB')
img_rgb.save(buffer_jpeg, format='JPEG', quality=85)
img_rgb = img.convert("RGB")
img_rgb.save(buffer_jpeg, format="JPEG", quality=85)
# Convert back to PNG
img2 = Image.open(io.BytesIO(buffer_jpeg.getvalue()))
buffer_png = io.BytesIO()
img2.save(buffer_png, format='PNG')
img2.save(buffer_png, format="PNG")
return buffer_png.getvalue()
@ -110,41 +113,54 @@ def run_benchmarks():
print(f"Image size: {size_mb:.2f} MB")
# Benchmark resize
time_resize = timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)),
number=10
) / 10
time_resize = (
timeit.timeit(
lambda: benchmark_resize_pil(image_data, (512, 512)), number=10
)
/ 10
)
print(f"Resize to 512x512: {time_resize * 1000:.2f} ms")
# Benchmark thumbnail
time_thumbnail = timeit.timeit(
time_thumbnail = (
timeit.timeit(
lambda: benchmark_thumbnail_pil(image_data, (256, 256)),
number=10
) / 10
number=10,
)
/ 10
)
print(f"Thumbnail to 256x256: {time_thumbnail * 1000:.2f} ms")
# Benchmark format conversion
time_format = timeit.timeit(
lambda: benchmark_format_conversion(image_data),
number=5
) / 5
print(f"Format conversion (PNG->JPEG->PNG): {time_format*1000:.2f} ms")
time_format = (
timeit.timeit(
lambda: benchmark_format_conversion(image_data), number=5
)
/ 5
)
print(
f"Format conversion (PNG->JPEG->PNG): {time_format * 1000:.2f} ms"
)
# Benchmark base64
time_base64 = timeit.timeit(
lambda: benchmark_base64_operations(image_data),
number=100
) / 100
time_base64 = (
timeit.timeit(
lambda: benchmark_base64_operations(image_data), number=100
)
/ 100
)
print(f"Base64 encode/decode: {time_base64 * 1000:.2f} ms")
results.append({
'size': label,
'image_mb': size_mb,
'resize_ms': time_resize * 1000,
'thumbnail_ms': time_thumbnail * 1000,
'format_ms': time_format * 1000,
'base64_ms': time_base64 * 1000,
})
results.append(
{
"size": label,
"image_mb": size_mb,
"resize_ms": time_resize * 1000,
"thumbnail_ms": time_thumbnail * 1000,
"format_ms": time_format * 1000,
"base64_ms": time_base64 * 1000,
}
)
# Summary
print("\n" + "=" * 80)
@ -152,8 +168,10 @@ def run_benchmarks():
print("=" * 80)
for r in results:
print(f"\n{r['size']} ({r['image_mb']:.2f} MB):")
print(f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms")
print(
f" Total processing time: {r['resize_ms'] + r['thumbnail_ms'] + r['format_ms']:.2f} ms"
)
if __name__ == '__main__':
if __name__ == "__main__":
run_benchmarks()

View file

@ -1,35 +1,41 @@
#!/usr/bin/env python3
"""Profile the cache key generation to find hotspots"""
import cProfile
import pstats
from inspect import signature, Parameter
from inspect import Parameter, signature
def build_cache_key_eval(method, cache_args):
"""Current implementation using eval"""
args = ', '.join(
args = ", ".join(
str(params.replace(annotation=Parameter.empty))
for params in signature(method).parameters.values()
)
values = ['self._name', 'method', *cache_args]
code = f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})"
return eval(code, {'method': method})
values = ["self._name", "method", *cache_args]
code = (
f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})"
)
return eval(code, {"method": method})
class MockModel:
_name = 'test.model'
_name = "test.model"
def simple_method(self, arg1, arg2):
return arg1 + arg2
# Profile the creation overhead
profiler = cProfile.Profile()
profiler.enable()
for i in range(10_000):
_ = build_cache_key_eval(MockModel.simple_method, ['arg1', 'arg2'])
_ = build_cache_key_eval(MockModel.simple_method, ["arg1", "arg2"])
profiler.disable()
# Print stats
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative')
stats.sort_stats("cumulative")
print("=== Top 20 functions by cumulative time ===")
stats.print_stats(20)

View file

@ -1,29 +1,31 @@
#!/usr/bin/env python3
"""
Unified profiler for image processing benchmark.
Profiles both CPU (cProfile) and memory (tracemalloc).
"""
import cProfile
import pstats
import io
import tracemalloc
import sys
import os
import pstats
import sys
import tracemalloc
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
venv_site_packages = '/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages'
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
venv_site_packages = (
"/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages"
)
if os.path.exists(venv_site_packages):
sys.path.insert(0, venv_site_packages)
# Import benchmark module
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash")
from benchmark_image import (
create_test_image,
benchmark_base64_operations,
benchmark_format_conversion,
benchmark_resize_pil,
benchmark_thumbnail_pil,
benchmark_format_conversion,
benchmark_base64_operations,
create_test_image,
)
@ -64,7 +66,7 @@ def profile_image_operations():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('cumulative')
ps.sort_stats("cumulative")
ps.print_stats(40)
print(s.getvalue())
@ -74,7 +76,7 @@ def profile_image_operations():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('tottime')
ps.sort_stats("tottime")
ps.print_stats(40)
print(s.getvalue())
@ -82,19 +84,21 @@ def profile_image_operations():
print("\n" + "=" * 80)
print("MEMORY PROFILE (top 40 allocations)")
print("=" * 80)
top_stats = snapshot.statistics('lineno')
top_stats = snapshot.statistics("lineno")
for index, stat in enumerate(top_stats[:40], 1):
print(f"{index:2}. {stat}")
print("\n" + "=" * 80)
print("MEMORY PROFILE (grouped by file)")
print("=" * 80)
top_stats = snapshot.statistics('filename')
top_stats = snapshot.statistics("filename")
for index, stat in enumerate(top_stats[:30], 1):
print(f"{index:2}. {stat}")
# Save detailed profiles
profiler.dump_stats('/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile.prof')
profiler.dump_stats(
"/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile.prof"
)
# Summary
current, peak = tracemalloc.get_traced_memory()
@ -107,13 +111,16 @@ def profile_image_operations():
tracemalloc.stop()
# Save memory data
with open('/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/memory_baseline.txt', 'w') as f:
with open(
"/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/memory_baseline.txt",
"w",
) as f:
f.write(f"Peak memory: {peak / 1024 / 1024:.1f} MiB\n")
f.write(f"Current memory: {current / 1024 / 1024:.1f} MiB\n")
return True
if __name__ == '__main__':
if __name__ == "__main__":
success = profile_image_operations()
sys.exit(0 if success else 1)

View file

@ -1,29 +1,31 @@
#!/usr/bin/env python3
"""
Unified profiler for optimized image processing benchmark.
Profiles both CPU (cProfile) and memory (tracemalloc).
"""
import cProfile
import pstats
import io
import tracemalloc
import sys
import os
import pstats
import sys
import tracemalloc
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
venv_site_packages = '/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages'
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
venv_site_packages = (
"/Users/krrt7/Desktop/work/odoo_org/odoo/venv/lib/python3.14/site-packages"
)
if os.path.exists(venv_site_packages):
sys.path.insert(0, venv_site_packages)
# Import benchmark module
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash")
from benchmark_image_opt1 import (
create_test_image,
benchmark_base64_operations,
benchmark_format_conversion,
benchmark_resize_pil,
benchmark_thumbnail_pil,
benchmark_format_conversion,
benchmark_base64_operations,
create_test_image,
)
@ -64,7 +66,7 @@ def profile_image_operations():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('cumulative')
ps.sort_stats("cumulative")
ps.print_stats(40)
print(s.getvalue())
@ -74,12 +76,14 @@ def profile_image_operations():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('tottime')
ps.sort_stats("tottime")
ps.print_stats(40)
print(s.getvalue())
# Save detailed profiles
profiler.dump_stats('/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile_opt1.prof')
profiler.dump_stats(
"/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile_opt1.prof"
)
# Summary
current, peak = tracemalloc.get_traced_memory()
@ -94,6 +98,6 @@ def profile_image_operations():
return True
if __name__ == '__main__':
if __name__ == "__main__":
success = profile_image_operations()
sys.exit(0 if success else 1)

View file

@ -1,18 +1,18 @@
#!/usr/bin/env python3
"""
Unified profiler for Odoo test_orm performance tests.
Profiles both CPU (cProfile) and memory (tracemalloc) in a single run.
"""
import cProfile
import pstats
import io
import tracemalloc
import pstats
import sys
import os
import tracemalloc
import unittest
# Add odoo to path
sys.path.insert(0, '/Users/krrt7/Desktop/work/odoo_org/odoo')
sys.path.insert(0, "/Users/krrt7/Desktop/work/odoo_org/odoo")
def profile_tests():
"""Run test_performance with CPU and memory profiling."""
@ -27,8 +27,8 @@ def profile_tests():
# Discover and run tests
loader = unittest.TestLoader()
suite = loader.discover(
start_dir='/Users/krrt7/Desktop/work/odoo_org/odoo/odoo/addons/test_orm/tests',
pattern='test_performance.py'
start_dir="/Users/krrt7/Desktop/work/odoo_org/odoo/odoo/addons/test_orm/tests",
pattern="test_performance.py",
)
runner = unittest.TextTestRunner(verbosity=2)
@ -47,7 +47,7 @@ def profile_tests():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('cumulative')
ps.sort_stats("cumulative")
ps.print_stats(30)
print(s.getvalue())
@ -57,7 +57,7 @@ def profile_tests():
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s)
ps.strip_dirs()
ps.sort_stats('tottime')
ps.sort_stats("tottime")
ps.print_stats(30)
print(s.getvalue())
@ -65,19 +65,21 @@ def profile_tests():
print("\n" + "=" * 80)
print("MEMORY PROFILE (top 30 allocations)")
print("=" * 80)
top_stats = snapshot.statistics('lineno')
top_stats = snapshot.statistics("lineno")
for index, stat in enumerate(top_stats[:30], 1):
print(f"{index:2}. {stat}")
print("\n" + "=" * 80)
print("MEMORY PROFILE (grouped by file)")
print("=" * 80)
top_stats = snapshot.statistics('filename')
top_stats = snapshot.statistics("filename")
for index, stat in enumerate(top_stats[:20], 1):
print(f"{index:2}. {stat}")
# Save detailed profiles
profiler.dump_stats('/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile.prof')
profiler.dump_stats(
"/Users/krrt7/Desktop/work/odoo_org/odoo/.codeflash/cpu_profile.prof"
)
# Summary
print("\n" + "=" * 80)
@ -95,6 +97,7 @@ def profile_tests():
return result.wasSuccessful()
if __name__ == '__main__':
if __name__ == "__main__":
success = profile_tests()
sys.exit(0 if success else 1)

View file

@ -1,9 +1,10 @@
#!/usr/bin/env python3
"""Parse python -X importtime output and produce a sorted breakdown."""
import os
import re
import subprocess
import sys
import re
import os
def parse_importtime(stderr_lines):
pattern = re.compile(
@ -20,13 +21,15 @@ def parse_importtime(stderr_lines):
results.append((module, self_us, cumul_us, indent))
return results
def main():
target = sys.argv[1] if len(sys.argv) > 1 else "import rich"
venv_python = os.path.expanduser("~/rich/.venv/bin/python")
proc = subprocess.run(
[venv_python, "-X", "importtime", "-c", target],
capture_output=True, text=True
capture_output=True,
text=True,
)
entries = parse_importtime(proc.stderr.splitlines())
entries.sort(key=lambda e: e[1], reverse=True)
@ -43,5 +46,6 @@ def main():
f.write(f"{mod}\t{self_us}\t{cumul_us}\t{depth}\n")
print(f"\nTSV written to {sys.argv[2]}")
if __name__ == "__main__":
main()

View file

@ -8,19 +8,24 @@ Compares:
Usage:
python3.13 bench_runtime.py
"""
import timeit
import sys
import os
import sys
import timeit
sys.path.insert(0, os.path.expanduser("~/rich"))
def bench(label, stmt, setup, number=500_000):
times = timeit.repeat(stmt, setup, number=number, repeat=5)
best = min(times)
per_call_ns = best / number * 1e9
print(f" {label}: {best*1000:.1f}ms total, {per_call_ns:.0f}ns/call ({number:,} iterations, best of 5)")
print(
f" {label}: {best * 1000:.1f}ms total, {per_call_ns:.0f}ns/call ({number:,} iterations, best of 5)"
)
return best
print(f"Python {sys.version}")
print(f"Rich path: {os.path.expanduser('~/rich')}")
print()
@ -38,11 +43,14 @@ opts_b = c.options.copy()
bench("__eq__ (equal objects)", "opts_a == opts_b", eq_setup)
bench("__eq__ (same object)", "opts_a == opts_a", eq_setup)
eq_setup_diff = eq_setup + """\
eq_setup_diff = (
eq_setup
+ """\
from rich.console import ConsoleDimensions
opts_c = opts_b.copy()
opts_c.size = ConsoleDimensions(999, 999)
"""
)
bench("__eq__ (differ at size)", "opts_a == opts_c", eq_setup_diff)
print()
@ -57,8 +65,11 @@ opts = c.options
"""
bench("update(width=80)", "opts.update(width=80)", update_setup)
bench("update() no changes", "opts.update()", update_setup)
bench("update(width=80, no_wrap=True, highlight=False)",
"opts.update(width=80, no_wrap=True, highlight=False)", update_setup)
bench(
"update(width=80, no_wrap=True, highlight=False)",
"opts.update(width=80, no_wrap=True, highlight=False)",
update_setup,
)
print()
# --- 3. _emoji_replace ---
@ -68,8 +79,18 @@ import sys, os
sys.path.insert(0, os.path.expanduser("~/rich"))
from rich._emoji_replace import _emoji_replace
"""
bench("_emoji_replace (with emoji)", '_emoji_replace("Hello :wave: world :smile:")', emoji_setup, number=200_000)
bench("_emoji_replace (no emoji)", '_emoji_replace("Hello world, no emojis here")', emoji_setup, number=200_000)
bench(
"_emoji_replace (with emoji)",
'_emoji_replace("Hello :wave: world :smile:")',
emoji_setup,
number=200_000,
)
bench(
"_emoji_replace (no emoji)",
'_emoji_replace("Hello world, no emojis here")',
emoji_setup,
number=200_000,
)
print()
print("Done.")

View file

@ -8,19 +8,24 @@ Targets:
Usage:
cd ~/rich && ~/venv313/bin/python ~/bench/bench_runtime2.py
"""
import timeit
import sys
import os
import sys
import timeit
sys.path.insert(0, os.path.expanduser("~/rich"))
def bench(label, stmt, setup, number=500_000, repeat=7):
times = timeit.repeat(stmt, setup, number=number, repeat=repeat)
best = min(times)
per_call_ns = best / number * 1e9
print(f" {label}: {best*1000:.1f}ms/{number//1000}K calls, {per_call_ns:.0f}ns/call")
print(
f" {label}: {best * 1000:.1f}ms/{number // 1000}K calls, {per_call_ns:.0f}ns/call"
)
return best
print(f"Python {sys.version}")
print()
@ -32,43 +37,71 @@ from rich.style import Style
# --- 1. Style.__eq__ ---
print("=== Style.__eq__ ===")
eq_setup = common_setup + """\
eq_setup = (
common_setup
+ """\
s1 = Style(bold=True, color="red")
s2 = Style(bold=True, color="red")
# Force hash caching
hash(s1); hash(s2)
"""
)
bench("identity (s1 == s1)", "s1 == s1", eq_setup, number=1_000_000)
bench("equal (s1 == s2)", "s1 == s2", eq_setup, number=1_000_000)
bench("not-equal (s1 != Style())", "s1 != Style()", eq_setup + "s3 = Style(); hash(s3)\n", number=1_000_000)
bench(
"not-equal (s1 != Style())",
"s1 != Style()",
eq_setup + "s3 = Style(); hash(s3)\n",
number=1_000_000,
)
print()
# --- 2. Style.combine ---
print("=== Style.combine ===")
combine_setup = common_setup + """\
combine_setup = (
common_setup
+ """\
styles = [Style(bold=True), Style(color="red"), Style(italic=True)]
"""
bench("combine(3 styles)", "Style.combine(styles)", combine_setup, number=200_000)
)
bench(
"combine(3 styles)", "Style.combine(styles)", combine_setup, number=200_000
)
combine_setup_2 = common_setup + """\
combine_setup_2 = (
common_setup
+ """\
styles = [Style(bold=True), Style(color="red")]
"""
bench("combine(2 styles)", "Style.combine(styles)", combine_setup_2, number=200_000)
)
bench(
"combine(2 styles)",
"Style.combine(styles)",
combine_setup_2,
number=200_000,
)
print()
# --- 3. Style.chain ---
print("=== Style.chain ===")
chain_setup = common_setup + """\
chain_setup = (
common_setup
+ """\
s1 = Style(bold=True)
s2 = Style(color="red")
s3 = Style(italic=True)
"""
bench("chain(3 styles)", "Style.chain(s1, s2, s3)", chain_setup, number=200_000)
)
bench(
"chain(3 styles)", "Style.chain(s1, s2, s3)", chain_setup, number=200_000
)
print()
# --- 4. Segment.simplify ---
print("=== Segment.simplify ===")
simplify_setup = common_setup + """\
simplify_setup = (
common_setup
+ """\
from rich.segment import Segment
style_a = Style(bold=True, color="red")
# Same object reference (common case)
@ -80,20 +113,45 @@ segs_equal = [Segment("hello ", style_a), Segment("world", style_b), Segment("!
style_c = Style(italic=True)
segs_diff = [Segment("hello ", style_a), Segment("world", style_c), Segment("! ", style_a)]
"""
bench("simplify (identity styles)", "list(Segment.simplify(segs_identity))", simplify_setup, number=200_000)
bench("simplify (equal styles)", "list(Segment.simplify(segs_equal))", simplify_setup, number=200_000)
bench("simplify (diff styles)", "list(Segment.simplify(segs_diff))", simplify_setup, number=200_000)
)
bench(
"simplify (identity styles)",
"list(Segment.simplify(segs_identity))",
simplify_setup,
number=200_000,
)
bench(
"simplify (equal styles)",
"list(Segment.simplify(segs_equal))",
simplify_setup,
number=200_000,
)
bench(
"simplify (diff styles)",
"list(Segment.simplify(segs_diff))",
simplify_setup,
number=200_000,
)
print()
# --- 5. E2E Console.print ---
print("=== E2E Console.print ===")
e2e_setup = common_setup + """\
e2e_setup = (
common_setup
+ """\
from rich.console import Console
from rich.text import Text
c = Console(file=open(os.devnull, "w"), color_system="truecolor")
markup = "[bold red]Error:[/bold red] Something [italic]went wrong[/italic] in [blue underline]module.py[/blue underline]:42"
"""
bench("Console.print(markup)", "c.print(markup)", e2e_setup, number=5_000, repeat=5)
)
bench(
"Console.print(markup)",
"c.print(markup)",
e2e_setup,
number=5_000,
repeat=5,
)
print()
print("Done.")

View file

@ -1,17 +1,22 @@
"""Benchmark Text hot paths: construction, copy, divide, render."""
import timeit
import sys
import os
import sys
import timeit
sys.path.insert(0, os.path.expanduser("~/rich"))
def bench(label, stmt, setup, number=200_000):
times = timeit.repeat(stmt, setup, number=number, repeat=5)
best = min(times)
per_call_ns = best / number * 1e9
print(f" {label}: {best*1000:.1f}ms total, {per_call_ns:.0f}ns/call ({number:,} iters, best of 5)")
print(
f" {label}: {best * 1000:.1f}ms total, {per_call_ns:.0f}ns/call ({number:,} iters, best of 5)"
)
return best
print(f"Python {sys.version}")
print()
@ -26,12 +31,18 @@ from rich.console import Console
# --- Text construction ---
print("=== Text() construction ===")
bench("Text('hello world')", "Text('hello world')", common)
bench("Text('hello world', style='bold')", "Text('hello world', style='bold')", common)
bench(
"Text('hello world', style='bold')",
"Text('hello world', style='bold')",
common,
)
print()
# --- Text.copy ---
print("=== Text.copy() ===")
copy_setup = common + "t = Text('hello world', style='bold')\nt.stylize('red', 0, 5)\n"
copy_setup = (
common + "t = Text('hello world', style='bold')\nt.stylize('red', 0, 5)\n"
)
bench("copy()", "t.copy()", copy_setup)
print()
@ -43,13 +54,20 @@ print()
# --- Text.divide ---
print("=== Text.divide() ===")
div_setup = common + "t = Text('hello world, this is a longer text for divide testing')\nt.stylize('bold', 0, 5)\nt.stylize('red', 6, 11)\n"
bench("divide([10, 20, 30])", "t.divide([10, 20, 30])", div_setup, number=100_000)
div_setup = (
common
+ "t = Text('hello world, this is a longer text for divide testing')\nt.stylize('bold', 0, 5)\nt.stylize('red', 6, 11)\n"
)
bench(
"divide([10, 20, 30])", "t.divide([10, 20, 30])", div_setup, number=100_000
)
print()
# --- Text.render ---
print("=== Text.render() ===")
render_setup = common + """\
render_setup = (
common
+ """\
c = Console(width=80)
t0 = Text('hello world')
t1 = Text('hello world')
@ -58,6 +76,7 @@ t2 = Text('hello world')
t2.stylize('bold', 0, 5)
t2.stylize('red', 6, 11)
"""
)
bench("render (no spans)", "list(t0.render(c))", render_setup, number=100_000)
bench("render (1 span)", "list(t1.render(c))", render_setup, number=100_000)
bench("render (2 spans)", "list(t2.render(c))", render_setup, number=100_000)
@ -65,11 +84,24 @@ print()
# --- E2E Console.print ---
print("=== Console.print() E2E ===")
print_setup = common + """\
print_setup = (
common
+ """\
import io
c = Console(file=io.StringIO(), width=80)
"""
bench("print('hello')", "c.file.seek(0); c.print('hello')", print_setup, number=50_000)
bench("print('[bold]hello[/bold]')", "c.file.seek(0); c.print('[bold]hello[/bold]')", print_setup, number=50_000)
)
bench(
"print('hello')",
"c.file.seek(0); c.print('hello')",
print_setup,
number=50_000,
)
bench(
"print('[bold]hello[/bold]')",
"c.file.seek(0); c.print('[bold]hello[/bold]')",
print_setup,
number=50_000,
)
print("\nDone.")

View file

@ -40,9 +40,15 @@ REPO_ROOT = Path(__file__).resolve().parent.parent
# Single small doc to measure the hot path we're optimizing.
# ---------------------------------------------------------------------------
FAST_FIXTURES = [
("unstructured-api/sample-docs/embedded-images-tables.pdf", "img-tables-1p"),
(
"unstructured-api/sample-docs/embedded-images-tables.pdf",
"img-tables-1p",
),
("unstructured_prop/tests/test_files/multi-column-2p.pdf", "multicol-2p"),
("unstructured-api/sample-docs/layout-parser-paper-with-table.pdf", "table-1p"),
(
"unstructured-api/sample-docs/layout-parser-paper-with-table.pdf",
"table-1p",
),
]
HIRES_FIXTURE = (
@ -80,7 +86,9 @@ async def batch_serial(filepaths: list[Path], strategy: str) -> list[int]:
return results
async def batch_concurrent(filepaths: list[Path], strategy: str, concurrency: int) -> list[int]:
async def batch_concurrent(
filepaths: list[Path], strategy: str, concurrency: int
) -> list[int]:
sem = asyncio.Semaphore(concurrency)
async def worker(fp: Path) -> int:

View file

@ -3,14 +3,14 @@ repos:
hooks:
- id: ruff-check
name: ruff check
entry: uv run ruff check packages/
entry: uv run ruff check
language: system
pass_filenames: false
types: [python]
- id: ruff-format
name: ruff format
entry: uv run ruff format --check packages/
entry: uv run ruff format --check
language: system
pass_filenames: false
types: [python]

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python3
"""LLM-graded eval scorer.
Feeds the manifest rubric and full conversation to Claude, which scores

View file

@ -1,7 +1,6 @@
"""Batch processing module with async interface."""
async def async_batch_process(items: list[dict]) -> list[dict]:
"""Process a batch of items, deduplicating by ID.

View file

@ -50,7 +50,6 @@ src = [
"packages/github-app",
]
extend-exclude = [
".codeflash/",
"packages/codeflash-python/tests/code_to_optimize",
"packages/codeflash-python/src/codeflash_python/ai/_tabulate.py",
]
@ -235,6 +234,107 @@ ignore = [
"W", # whitespace issues in test data strings
]
".codeflash/**" = [
"B007", # unused loop variable in benchmarks
"B023", # function binding loop variable in timeit lambdas
"BLE001", # broad Exception catches in scripts
"C901", # complex benchmark functions
"E402", # imports after sys.path manipulation
"F841", # unused locals in benchmark setup
"PERF102", # dict values iteration fine in scripts
"PERF401", # list comprehension not always clearer
"PLC0415", # imports in functions fine in scripts
"PLR0912", # many branches in benchmark harnesses
"PLR0913", # many args in benchmark functions
"PLR0915", # many statements in benchmark functions
"PLR2004", # magic values in benchmarks
"PLW1510", # subprocess.run without check
"PTH110", # os.path.exists in scripts
"PTH111", # os.path.expanduser in scripts
"PT006", # parametrize types fine in benchmarks
"PTH123", # open() in scripts
"S101", # assert in scripts
"S108", # temp paths in scripts
"S307", # eval used for benchmark code generation
"S311", # random in benchmarks
"S324", # hashlib in benchmarks
"S603", # subprocess calls in scripts
"S607", # partial executable path in scripts
"SIM105", # contextlib.suppress suggestion
"SIM110", # any() suggestion
"SLF001", # private member access in benchmarks
"T201", # print is the output mechanism
]
"scripts/*" = [
"BLE001", # broad Exception catches in scripts
"C901", # complex analysis functions
"FA102", # future annotations not needed in scripts
"PERF102", # dict keys iteration fine in scripts
"PERF203", # try-except in loop fine in scripts
"PERF401", # list comprehension not always clearer
"PLC0415", # imports in functions fine in scripts
"PLR0911", # many return statements in analysis
"PLR0912", # many branches in analysis scripts
"PLR0913", # many args in script functions
"PLR0915", # many statements in analysis scripts
"PLR2004", # magic values in scripts
"PLW1510", # subprocess.run without check
"PTH111", # os.path.expanduser in scripts
"PTH117", # os.path.isabs in scripts
"PTH118", # os.path.join in scripts
"PTH123", # open() in scripts
"PTH208", # os.listdir in scripts
"S112", # try-except-continue in scripts
"S311", # random in scripts
"S324", # hashlib in scripts
"S603", # subprocess calls in scripts
"S607", # partial executable path in scripts
"SIM102", # collapsible if fine in scripts
"SIM105", # contextlib.suppress suggestion
"SIM110", # any() suggestion
"T201", # print is the output mechanism
"TC003", # type-checking imports fine in scripts
"TRY300", # try-except-return fine in scripts
]
"evals/**" = [
"B007", # unused loop variable
"C401", # generator to set fine in evals
"C901", # complex test/eval functions
"F841", # unused locals fine in eval setup
"FA102", # future annotations not needed in evals
"PERF102", # dict values iteration fine in evals
"PERF203", # try-except in loop fine in evals
"PERF401", # list comprehension not always clearer
"PLC0206", # dict keys iteration fine in evals
"PLC0415", # imports in functions fine in evals
"PLR0911", # many return statements
"PLR0912", # many branches
"PLR0913", # many args in eval functions
"PLR0915", # many statements
"PLR2004", # magic values in eval scenarios
"PLW1510", # subprocess.run without check
"PT006", # parametrize types fine in evals
"PTH123", # open() fine in evals
"RUF003", # ambiguous unicode fine in test data
"S101", # assert in evals
"S311", # random fine in evals
"S324", # hashlib fine in evals
"S603", # subprocess calls in evals
"S607", # partial executable path in evals
"SIM105", # contextlib.suppress suggestion
"SIM110", # any() suggestion
"SLF001", # private member access in evals
"T201", # print in eval harnesses
]
"plugin/**" = [
"B007", # unused loop variable in reference scripts
"PTH100", # os.path.abspath in scripts
"PTH110", # os.path.exists in scripts
"PTH123", # open() in scripts
"S108", # temp paths in scripts
"SLF001", # private member access in plugin scripts
"T201", # print is the output mechanism
]
"reports/*" = [
"C901", # complex layout builders are expected in Dash apps
"E501", # long strings in inline HTML

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# ///

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# ///

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# ///