mirror of
https://github.com/codeflash-ai/codeflash-agent.git
synced 2026-05-04 18:25:19 +00:00
43 lines
1.5 KiB
Python
43 lines
1.5 KiB
Python
from analytics.core import generate_transactions, process_transactions
|
||
|
||
|
||
def test_basic():
|
||
raw = generate_transactions(10)
|
||
result = process_transactions(raw)
|
||
assert result["total_processed"] == 10
|
||
assert result["group_count"] > 0
|
||
assert "TRANSACTION ANALYTICS REPORT" in result["report"]
|
||
# Check analytics structure
|
||
for key, stats in result["analytics"].items():
|
||
assert stats["count"] > 0
|
||
assert stats["revenue"] > 0
|
||
assert stats["avg_order"] > 0
|
||
assert stats["min_order"] <= stats["max_order"]
|
||
|
||
|
||
def test_large_batch():
|
||
"""Production-scale batch — process_transactions uses too much memory.
|
||
|
||
With 50k transactions, peak memory is far higher than the input data size.
|
||
The goal is to reduce memory overhead while preserving correctness.
|
||
"""
|
||
raw = generate_transactions(50_000)
|
||
result = process_transactions(raw)
|
||
|
||
# Correctness checks
|
||
assert result["total_processed"] == 50_000
|
||
assert result["group_count"] == 50 # 5 regions × 10 categories
|
||
assert "TRANSACTION ANALYTICS REPORT" in result["report"]
|
||
|
||
# Verify analytics integrity
|
||
total_count = sum(s["count"] for s in result["analytics"].values())
|
||
assert total_count == 50_000
|
||
|
||
total_revenue = sum(s["revenue"] for s in result["analytics"].values())
|
||
assert total_revenue > 0
|
||
|
||
for key, stats in result["analytics"].items():
|
||
assert stats["count"] > 0
|
||
assert stats["revenue"] > 0
|
||
assert stats["avg_order"] > 0
|
||
assert stats["min_order"] <= stats["max_order"]
|