Merge branch 'main' into fix/llm-client-toctou-race

This commit is contained in:
Kevin Turcios 2026-04-05 22:13:48 -05:00 committed by GitHub
commit 0d2a76e535
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
56 changed files with 5918 additions and 447 deletions

View file

@ -0,0 +1,145 @@
name: cf-webapp Quality Gates
on:
pull_request:
paths:
- "js/cf-webapp/**"
permissions:
contents: read
packages: read
pull-requests: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check-changes:
runs-on: ubuntu-latest
outputs:
should-run: ${{ steps.filter.outputs.webapp }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
webapp:
- 'js/cf-webapp/**'
skip:
needs: check-changes
if: needs.check-changes.outputs.should-run != 'true'
runs-on: ubuntu-latest
steps:
- run: echo "No cf-webapp changes, skipping."
benchmark:
needs: check-changes
if: needs.check-changes.outputs.should-run == 'true'
runs-on: ubuntu-latest
env:
NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: "20"
cache: npm
cache-dependency-path: js/cf-webapp/package-lock.json
registry-url: https://npm.pkg.github.com
scope: "@codeflash-ai"
- name: Install dependencies
working-directory: js/cf-webapp
run: npm ci --ignore-scripts
- name: Generate Prisma client
working-directory: js/cf-webapp
run: npx prisma generate
- name: Type-check
id: typecheck
working-directory: js/cf-webapp
run: npx tsc --noEmit
continue-on-error: true
- name: Tests
id: tests
working-directory: js/cf-webapp
run: npx vitest run --reporter=verbose 2>&1 | tee test-output.txt
continue-on-error: true
- name: Build
id: build
working-directory: js/cf-webapp
run: npx next build 2>&1 | tee build-output.txt
continue-on-error: true
- name: Extract results
id: results
working-directory: js/cf-webapp
run: |
# Type-check status
if [ "${{ steps.typecheck.outcome }}" = "success" ]; then
echo "typecheck_status=✅ Pass" >> "$GITHUB_OUTPUT"
else
echo "typecheck_status=❌ Fail" >> "$GITHUB_OUTPUT"
fi
# Test summary
if [ "${{ steps.tests.outcome }}" = "success" ]; then
TESTS_SUMMARY=$(grep -E "Tests\s+[0-9]+" test-output.txt | tail -1 || echo "passed")
echo "tests_status=✅ ${TESTS_SUMMARY}" >> "$GITHUB_OUTPUT"
else
echo "tests_status=❌ Tests failed" >> "$GITHUB_OUTPUT"
fi
# Build status
if [ "${{ steps.build.outcome }}" = "success" ]; then
echo "build_status=✅ Success" >> "$GITHUB_OUTPUT"
else
echo "build_status=❌ Fail" >> "$GITHUB_OUTPUT"
fi
# Extract route sizes from build output
ROUTES=$(sed -n '/Route.*Size.*First Load/,/^$/p' build-output.txt | head -30 || echo "No route data")
{
echo "routes<<ROUTES_EOF"
echo "$ROUTES"
echo "ROUTES_EOF"
} >> "$GITHUB_OUTPUT"
- name: Post PR comment
if: github.event_name == 'pull_request'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh pr comment ${{ github.event.pull_request.number }} \
--repo ${{ github.repository }} \
--body "$(cat <<'COMMENT_EOF'
## cf-webapp Quality Report
| Check | Result |
|-------|--------|
| Type-check | ${{ steps.results.outputs.typecheck_status }} |
| Tests | ${{ steps.results.outputs.tests_status }} |
| Build | ${{ steps.results.outputs.build_status }} |
<details>
<summary>Route Sizes</summary>
```
${{ steps.results.outputs.routes }}
```
</details>
COMMENT_EOF
)"
- name: Fail if any check failed
if: steps.typecheck.outcome == 'failure' || steps.tests.outcome == 'failure' || steps.build.outcome == 'failure'
run: exit 1

View file

@ -605,7 +605,10 @@ async def testgen_javascript(
)
# Strip incorrect file extensions from import paths (LLMs sometimes add .js to .ts imports)
# Must strip from ALL three test outputs since CLI uses instrumented versions
generated_test_source = strip_js_extensions(generated_test_source)
instrumented_behavior_tests = strip_js_extensions(instrumented_behavior_tests)
instrumented_perf_tests = strip_js_extensions(instrumented_perf_tests)
ph(request.user, "aiservice-testgen-tests-generated", properties={"language": language})

View file

@ -433,3 +433,41 @@ import { resolveCredentialsDir } from '../config/paths.js';"""
# No .js should remain
assert ".js" not in result
class TestInstrumentedTestsExtensionStripping:
"""Tests for ensuring .js extensions are stripped from ALL test outputs."""
def test_strip_extensions_on_all_outputs(self) -> None:
"""Test that .js extensions should be stripped from instrumented tests too.
This is a regression test for the bug where strip_js_extensions() was only
called on generated_test_source but not on instrumented_behavior_tests
and instrumented_perf_tests, causing "Cannot find module" errors in the CLI.
"""
# Simulated LLM output with .js extensions (what comes back from LLM)
llm_generated_test = """import { buildVerifyFn } from '../../google.js';
import { authenticate } from '../../sso.js';
test('should create verify function', () => {
const fn = buildVerifyFn(mockSave);
expect(fn).toBeDefined();
});"""
# All three test outputs should have extensions stripped
# (in practice, instrumented tests have capture() calls added, but for this test we're checking extension stripping)
expected_stripped = """import { buildVerifyFn } from '../../google';
import { authenticate } from '../../sso';
test('should create verify function', () => {
const fn = buildVerifyFn(mockSave);
expect(fn).toBeDefined();
});"""
# Verify that strip_js_extensions works
result = strip_js_extensions(llm_generated_test)
assert result == expected_stripped, "strip_js_extensions should remove .js extensions"
# Regression test: verifies strip_js_extensions() is applied correctly.
# For full end-to-end coverage, an integration test calling testgen_javascript()
# and asserting all three return values would be ideal.

View file

@ -0,0 +1,34 @@
- generic [ref=e2]:
- generic [ref=e4]:
- img [ref=e6]
- generic [ref=e12]:
- heading "Get started with Codeflash" [level=1] [ref=e13]
- paragraph [ref=e14]: Make all your code optimal
- button "Continue with GitHub" [ref=e15] [cursor=pointer]:
- img [ref=e16]
- generic [ref=e18]: Continue with GitHub
- generic [ref=e20]:
- link "Terms" [ref=e21] [cursor=pointer]:
- /url: https://www.codeflash.ai/terms-of-service
- link "Privacy" [ref=e22] [cursor=pointer]:
- /url: https://www.codeflash.ai/privacy-policy
- link "Documentation" [ref=e23] [cursor=pointer]:
- /url: https://docs.codeflash.ai
- generic [ref=e25]:
- heading "Always Ship Optimal Code" [level=2] [ref=e27]
- generic [ref=e28]:
- generic [ref=e29]:
- img [ref=e31]
- paragraph [ref=e34]: VS Code/Cursor Extension to optimize all code locally
- generic [ref=e35]:
- img [ref=e37]
- paragraph [ref=e40]: Set it as a GitHub action to automate optimization
- generic [ref=e41]:
- img [ref=e43]
- paragraph [ref=e46]: Codeflash finds 2-55x performance improvements automatically
- generic [ref=e47]:
- img [ref=e49]
- paragraph [ref=e52]: Confidently merge the tested and proven optimizations
- generic [ref=e53]:
- img [ref=e55]
- paragraph [ref=e58]: Start free. No credit card, no lock-in

View file

@ -1,6 +1,11 @@
import bundleAnalyzer from "@next/bundle-analyzer"
import { dirname } from "path"
import { fileURLToPath } from "url"
const withBundleAnalyzer = bundleAnalyzer({
enabled: process.env.ANALYZE === "true",
})
const __dirname = dirname(fileURLToPath(import.meta.url))
/** @type {import("next").NextConfig} */
@ -71,7 +76,7 @@ const nextConfig = {
import { withSentryConfig } from "@sentry/nextjs"
export default withSentryConfig(
export default withBundleAnalyzer(withSentryConfig(
nextConfig,
{
// For all available options, see:
@ -101,4 +106,4 @@ export default withSentryConfig(
// Disable automatic instrumentation that might cause issues
automaticVercelMonitors: false,
},
)
))

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,7 @@
"lint:check": "eslint .",
"test": "vitest",
"type-check": "tsc --noEmit",
"analyze": "ANALYZE=true next build",
"prisma:generate": "npx prisma generate",
"prisma:migrate": "npx prisma migrate dev",
"prepare": "simple-git-hooks",
@ -21,11 +22,13 @@
"dependencies": {
"@anthropic-ai/sdk": "^0.74.0",
"@auth0/nextjs-auth0": "^4",
"@azure/msal-node": "^3.7.3",
"@codeflash-ai/common": "^1.0.30",
"@hookform/resolvers": "^3.3.2",
"@monaco-editor/react": "^4.7.0",
"@opentelemetry/auto-instrumentations-node": "^0.72.0",
"@opentelemetry/sdk-node": "^0.214.0",
"@prisma/client": "^6.7.0",
"@prisma/instrumentation": "^7.6.0",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-dropdown-menu": "^2.0.6",
"@radix-ui/react-label": "^2.0.2",
@ -38,25 +41,27 @@
"@radix-ui/react-toast": "^1.1.5",
"@radix-ui/react-tooltip": "^1.1.4",
"@sentry/nextjs": "^10.38.0",
"@sentry/opentelemetry": "^10.47.0",
"@types/node": "^24.3.0",
"@types/pg": "^8.10.9",
"@types/react": "19.2.13",
"@types/react-dom": "19.2.3",
"@types/react-syntax-highlighter": "^15.5.13",
"chart.js": "^4.4.9",
"chartjs-plugin-datalabels": "^2.2.0",
"class-variance-authority": "^0.7.0",
"clsx": "^2.0.0",
"date-fns": "^4.1.0",
"diff": "^8.0.2",
"framer-motion": "^12.12.1",
"github-markdown-css": "^5.4.0",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.563.0",
"marked": "^16.1.1",
"motion": "^12.38.0",
"next": "16.1.6",
"next-themes": "^0.4.6",
"node-ts-cache": "^4.4.0",
"node-ts-cache-storage-memory": "^4.4.0",
"papaparse": "^5.5.3",
"pg": "^8.11.3",
"postcss": "^8",
"posthog-js": "1.127.0",
@ -67,7 +72,6 @@
"react-dom": "19.2.4",
"react-hook-form": "^7.48.2",
"react-markdown": "^9.0.1",
"react-papaparse": "^4.4.0",
"react-resizable-panels": "^4.6.4",
"react-syntax-highlighter": "^16.1.0",
"remark-gfm": "^4.0.0",
@ -80,8 +84,10 @@
"zod": "^3.22.4"
},
"devDependencies": {
"@next/bundle-analyzer": "^16.2.2",
"@testing-library/react": "^16.0.0",
"@types/jsonwebtoken": "^9.0.10",
"@types/papaparse": "^5.5.2",
"@vitejs/plugin-react": "^4.3.1",
"autoprefixer": "^10.0.1",
"baseline-browser-mapping": "^2.9.11",

BIN
js/cf-webapp/roadmap.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

View file

@ -11,8 +11,10 @@ Sentry.init({
? "https://0fa0f40b2d709e4f1eb9aac76ff9e6be@o4506833230561280.ingest.us.sentry.io/4506833279582208"
: undefined,
// Adjust this value in production, or use tracesSampler for greater control
tracesSampleRate: 1,
tracesSampleRate: isProduction ? 0.1 : 1,
// Let the custom OTel setup in src/instrumentation.ts manage OpenTelemetry
skipOpenTelemetrySetup: true,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,

View file

@ -39,7 +39,7 @@ export async function SubmitFirstOnboardingPage(
custom_pain_point: customOptionInput,
},
})
await posthog?.shutdown()
await posthog?.flush()
await submitOnboardingQuestions(user_id, email)
// Check for saved redirect URL after onboarding completion
@ -81,7 +81,7 @@ export async function SubmitSkipOnboardingPage(): Promise<void> {
username: nickname,
},
})
await posthog?.shutdown()
await posthog?.flush()
await markUserCompletedOnboarding(user_id)
// Checking for saved redirect URL after onboarding completion

View file

@ -31,5 +31,5 @@ export async function SubmitSecondOnboardingPage(
...(colleagueInviteEmail && { colleague_invite_email: colleagueInviteEmail }),
},
})
await posthog?.shutdown()
await posthog?.flush()
}

View file

@ -2,7 +2,7 @@
import { useMemo, useState, useEffect, type ReactNode } from "react"
import { useRouter } from "next/navigation"
import { AnimatePresence, motion } from "framer-motion"
import { AnimatePresence, motion } from "motion/react"
import {
ArrowRight,
ArrowRightCircle,

View file

@ -4,11 +4,10 @@ import { auth0 } from "@/lib/auth0"
import { CreateApiKeyDialog } from "./dialog-create-api-key"
import { Separator } from "@/components/ui/separator"
import { ApiKeyTable } from "./api-key-table"
import { type cf_api_keys, PrismaClient } from "@prisma/client"
import { type cf_api_keys } from "@prisma/client"
import PostHogClient from "@/lib/posthog"
import { VS_CODE_KEY_NAME } from "@codeflash-ai/common"
const prisma = new PrismaClient()
import { prisma } from "@/lib/prisma"
interface ApiKeyWithOrg extends cf_api_keys {
organization?: {
@ -41,10 +40,7 @@ export default async function APIKeyGenerator(): Promise<JSX.Element> {
// Fetch personal keys (no organization) and keys from user's organizations
const apiKeys: ApiKeyWithOrg[] = await prisma.cf_api_keys.findMany({
where: {
OR: [
{ user_id: userId, organization_id: null },
{ organization_id: { in: userOrgIds } },
],
OR: [{ user_id: userId, organization_id: null }, { organization_id: { in: userOrgIds } }],
},
include: {
organization: {
@ -69,7 +65,7 @@ export default async function APIKeyGenerator(): Promise<JSX.Element> {
event: "webapp-loaded-api-keys",
})
await posthog?.shutdown()
await posthog?.flush()
return (
<div>

View file

@ -8,9 +8,8 @@ import {
VS_CODE_KEY_NAME,
} from "@codeflash-ai/common"
import { TokenLimitExceededError } from "./token-error"
import { PrismaClient } from "@prisma/client"
const prisma = new PrismaClient()
import { prisma } from "@/lib/prisma"
import { trackApiKeyCreated } from "@/lib/analytics/tracking"
export async function generateToken(
keyName: string,
@ -24,12 +23,16 @@ export async function generateToken(
try {
const token: string = await safeGenAndStoreAPITokenHash(keyName, userId, organizationId)
await trackApiKeyCreated(userId, { keyName, organizationId })
return { success: true, token, err: undefined }
} catch (error) {
if (error instanceof Error && error.message === "Token limit exceeded") {
return { success: false, err: new TokenLimitExceededError().message, token: undefined }
}
if (error instanceof Error && error.message === "User is not a member of the specified organization") {
if (
error instanceof Error &&
error.message === "User is not a member of the specified organization"
) {
return { success: false, err: error.message, token: undefined }
}
return {

View file

@ -1,7 +1,7 @@
"use server"
import { auth0 } from "@/lib/auth0"
import { BillingView } from "./billing-view"
import PostHogClient from "@/lib/posthog"
import { trackBillingPageViewed } from "@/lib/analytics/tracking"
import { SUBSCRIPTION_PLANS, checkAndResetSubscriptionPeriod } from "@codeflash-ai/common"
export default async function BillingPage() {
@ -10,13 +10,7 @@ export default async function BillingPage() {
const userId = session.user.sub
try {
// Track page view
const posthog = PostHogClient()
posthog?.capture({
distinctId: userId,
properties: { username: session.user.nickname },
event: "webapp-loaded-billing-page",
})
await posthog?.shutdown()
await trackBillingPageViewed(userId, { username: session.user.nickname })
// Get subscription info from database with lazy reset
const subscription = (await checkAndResetSubscriptionPeriod(userId)) || {

View file

@ -14,7 +14,7 @@ export default async function GettingStarted() {
event: "webapp-loaded-getting-started",
})
await posthog?.shutdown()
await posthog?.flush()
return <GettingStartedClient />
}

View file

@ -0,0 +1,118 @@
import { describe, it, expect, vi, beforeEach } from "vitest"
import { prisma } from "@codeflash-ai/common"
vi.mock("@/lib/server-action-timing", () => ({
withTiming: vi.fn((_name: string, fn: Function) => fn),
}))
vi.mock("@/lib/analytics/tracking", () => ({
trackMemberInvited: vi.fn(),
}))
const mockOrg = {
id: "org-1",
organization_members: [
{
id: "member-1",
user_id: "user-1",
role: "admin",
added_at: new Date("2024-01-15"),
user: {
github_username: "alice",
name: "Alice Smith",
email: "alice@example.com",
},
},
{
id: "member-2",
user_id: "user-2",
role: "member",
added_at: new Date("2024-02-01"),
user: {
github_username: "bob",
name: "Bob Jones",
email: "bob@example.com",
},
},
],
}
describe("getOrganizationMembers", () => {
let getOrganizationMembers: typeof import("../action").getOrganizationMembers
beforeEach(async () => {
const mod = await import("../action")
getOrganizationMembers = mod.getOrganizationMembers
})
describe("successful retrieval", () => {
it("returns members when user has access", async () => {
vi.mocked(prisma.organizations.findFirst).mockResolvedValue(mockOrg as any)
const result = await getOrganizationMembers("user-1", "org-1")
expect(result.success).toBe(true)
expect(result.data).toHaveLength(2)
})
it("maps nested organization_members to flat Member structure", async () => {
vi.mocked(prisma.organizations.findFirst).mockResolvedValue(mockOrg as any)
const result = await getOrganizationMembers("user-1", "org-1")
const member = result.data![0]
expect(member).toEqual({
id: "member-1",
user_id: "user-1",
username: "alice",
name: "Alice Smith",
email: "alice@example.com",
role: "admin",
added_at: new Date("2024-01-15"),
avatarUrl: "https://github.com/alice.png",
})
})
})
describe("access control", () => {
it("returns error when organization not found", async () => {
vi.mocked(prisma.organizations.findFirst).mockResolvedValue(null)
const result = await getOrganizationMembers("user-1", "org-1")
expect(result.success).toBe(false)
expect(result.error).toBe("Organization not found")
})
it("returns error when user is not in organization members", async () => {
vi.mocked(prisma.organizations.findFirst).mockResolvedValue(mockOrg as any)
const result = await getOrganizationMembers("unknown-user", "org-1")
expect(result.success).toBe(false)
expect(result.error).toBe("You don't have access to this organization")
})
})
describe("error handling", () => {
it("returns error response when Prisma throws", async () => {
vi.mocked(prisma.organizations.findFirst).mockRejectedValue(
new Error("Connection failed"),
)
const result = await getOrganizationMembers("user-1", "org-1")
expect(result.success).toBe(false)
expect(result.error).toBe("Connection failed")
})
it("uses fallback message for non-Error exceptions", async () => {
vi.mocked(prisma.organizations.findFirst).mockRejectedValue("string error")
const result = await getOrganizationMembers("user-1", "org-1")
expect(result.success).toBe(false)
expect(result.error).toBe("Failed to get members")
})
})
})

View file

@ -8,14 +8,18 @@ import {
organizationMemberRepository,
prisma,
} from "@codeflash-ai/common"
import { withTiming } from "@/lib/server-action-timing"
import { trackMemberInvited } from "@/lib/analytics/tracking"
/**
* Get organization members
*/
export async function getOrganizationMembers(
currentUserId: string,
organizationId: string,
): Promise<ActionResponse<Member[]>> {
export const getOrganizationMembers = withTiming(
"getOrganizationMembers",
async (
currentUserId: string,
organizationId: string,
): Promise<ActionResponse<Member[]>> => {
try {
const org = await prisma.organizations.findFirst({
where: { id: organizationId },
@ -58,7 +62,8 @@ export async function getOrganizationMembers(
console.error("Failed to get organization members:", error)
return createErrorResponse(error instanceof Error ? error.message : "Failed to get members")
}
}
},
)
/**
* Add a member to organization
@ -121,6 +126,14 @@ export async function addOrganizationMember(
added_by: currentUserId,
},
})
trackMemberInvited(currentUserId, {
invitedUsername: invitedUser.username,
role,
scope: "organization",
targetId: organizationId,
})
return createSuccessResponse({
id: newMember.id,
user_id: newMember.user_id,

View file

@ -59,13 +59,14 @@ function OrganizationMembers() {
setCurrentUserId(data.userId)
const roleResult = await getCurrentUserRole(data.userId, currentOrg?.id)
const [roleResult, result] = await Promise.all([
getCurrentUserRole(data.userId, currentOrg?.id),
getOrganizationMembers(data.userId, currentOrg?.id),
])
if (roleResult.success && roleResult.data) {
setCurrentUserRole(roleResult.data.role)
}
const result = await getOrganizationMembers(data.userId, currentOrg?.id)
if (result.success && result.data) {
setMembers(result.data)
} else {
@ -103,10 +104,7 @@ function OrganizationMembers() {
setSuccess("Member added successfully!")
}
const handleUserAdd = async (
user: GitHubUserSearchResult,
role: "admin" | "member",
) => {
const handleUserAdd = async (user: GitHubUserSearchResult, role: "admin" | "member") => {
if (!currentOrg?.id) {
return { success: false, error: "No organization selected" }
}

View file

@ -0,0 +1,163 @@
import { describe, it, expect, vi, beforeEach } from "vitest"
import { prisma } from "@codeflash-ai/common"
import { getRepositoriesForAccountCached } from "@/lib/services/repository-utils"
import { trackRepositoryConnected } from "@/lib/analytics/tracking"
vi.mock("@/lib/server-action-timing", () => ({
withTiming: vi.fn((_name: string, fn: Function) => fn),
}))
vi.mock("@/lib/services/repository-utils", () => ({
getRepositoriesForAccountCached: vi.fn(),
}))
vi.mock("@/lib/analytics/tracking", () => ({
trackMemberInvited: vi.fn(),
trackRepositoryConnected: vi.fn(),
}))
const mockRepo = {
id: "repo-1",
github_repo_id: "12345",
name: "my-repo",
full_name: "myorg/my-repo",
is_private: false,
has_github_action: true,
created_at: new Date("2024-01-01"),
last_optimized: new Date("2024-06-01"),
optimizations_limit: 100,
optimizations_used: 50,
repository_members: [{ id: "rm-1" }, { id: "rm-2" }],
}
const mockPayload = { userId: "user-1", username: "testuser" }
describe("getRepositoryById", () => {
let getRepositoryById: typeof import("../action").getRepositoryById
beforeEach(async () => {
const mod = await import("../action")
getRepositoryById = mod.getRepositoryById
})
describe("parallel fetch", () => {
it("fetches repo and authorized repoIds concurrently", async () => {
vi.mocked(prisma.repositories.findFirst).mockResolvedValue(mockRepo as any)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["repo-1"],
repos: [],
} as any)
vi.mocked(prisma.optimization_events.count).mockResolvedValue(5)
await getRepositoryById(mockPayload as any, "repo-1")
expect(prisma.repositories.findFirst).toHaveBeenCalledTimes(1)
expect(getRepositoriesForAccountCached).toHaveBeenCalledWith(mockPayload)
})
it("returns null when repo is not found", async () => {
vi.mocked(prisma.repositories.findFirst).mockResolvedValue(null)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["repo-1"],
repos: [],
} as any)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result).toBeNull()
})
it("returns null when repo is not in authorized list", async () => {
vi.mocked(prisma.repositories.findFirst).mockResolvedValue(mockRepo as any)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["other-repo"],
repos: [],
} as any)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result).toBeNull()
})
})
describe("successful retrieval", () => {
beforeEach(() => {
vi.mocked(prisma.repositories.findFirst).mockResolvedValue(mockRepo as any)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["repo-1"],
repos: [],
} as any)
})
it("returns RepositoryWithUsage with all required fields", async () => {
vi.mocked(prisma.optimization_events.count).mockResolvedValue(3)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result).toEqual({
id: "repo-1",
github_repo_id: "12345",
name: "my-repo",
full_name: "myorg/my-repo",
is_private: false,
is_active: true,
has_github_action: true,
created_at: new Date("2024-01-01"),
last_optimized: new Date("2024-06-01"),
optimizations_limit: 100,
optimizations_used: 50,
organization: "myorg",
avatarUrl: "https://github.com/myorg.png",
membersCount: 2,
})
})
it("sets is_active to false when no recent events", async () => {
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result!.is_active).toBe(false)
})
it("sets is_active to true when recent events exist", async () => {
vi.mocked(prisma.optimization_events.count).mockResolvedValue(10)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result!.is_active).toBe(true)
})
})
describe("analytics tracking", () => {
beforeEach(() => {
vi.mocked(prisma.repositories.findFirst).mockResolvedValue(mockRepo as any)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["repo-1"],
repos: [],
} as any)
vi.mocked(prisma.optimization_events.count).mockResolvedValue(1)
})
it("calls trackRepositoryConnected for user payloads", async () => {
await getRepositoryById(mockPayload as any, "repo-1")
expect(trackRepositoryConnected).toHaveBeenCalledWith("user-1", {
repositoryId: "repo-1",
repositoryName: "myorg/my-repo",
})
})
})
describe("error handling", () => {
it("returns null and logs when Prisma throws", async () => {
vi.spyOn(console, "error").mockImplementation(() => {})
vi.mocked(prisma.repositories.findFirst).mockRejectedValue(
new Error("timeout"),
)
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: ["repo-1"],
repos: [],
} as any)
const result = await getRepositoryById(mockPayload as any, "repo-1")
expect(result).toBeNull()
})
})
})

View file

@ -1,12 +1,14 @@
"use server"
import * as Sentry from "@sentry/node"
import * as Sentry from "@sentry/nextjs"
import { AccountPayload, createOrUpdateUser, getUserById, prisma } from "@codeflash-ai/common"
import { eachDayOfInterval, startOfDay } from "date-fns"
import { GitHubUserSearchResult, Member, UserRole } from "@/lib/types"
import { ActionResponse, createErrorResponse, createSuccessResponse } from "@/lib/action-response"
import { RepositoryWithUsage } from "@/app/dashboard/action"
import { getRepositoriesForAccountCached } from "@/lib/services/repository-utils"
import { withTiming } from "@/lib/server-action-timing"
import { trackMemberInvited, trackRepositoryConnected } from "@/lib/analytics/tracking"
export async function getOptimizationsTimeSeriesData(repoId: string, onlySuccessful?: boolean) {
try {
@ -158,53 +160,61 @@ export async function getActiveUserLeaderboardLast30DaysForRepo(
}))
}
export async function getRepositoryById(
payload: AccountPayload,
repoId: string,
): Promise<RepositoryWithUsage | null> {
try {
const repo = await prisma.repositories.findFirst({
where: {
id: repoId,
},
include: {
repository_members: true,
},
})
const repoIds = await (await getRepositoriesForAccountCached(payload)).repoIds
export const getRepositoryById = withTiming(
"getRepositoryById",
async (payload: AccountPayload, repoId: string): Promise<RepositoryWithUsage | null> => {
try {
// Fetch repo and authorized repoIds in parallel
const [repo, { repoIds }] = await Promise.all([
prisma.repositories.findFirst({
where: { id: repoId },
include: { repository_members: true },
}),
getRepositoriesForAccountCached(payload),
])
if (!repo || !repoIds.includes(repo.id)) return null
if (!repo || !repoIds.includes(repo.id)) return null
const recentEventCount = await prisma.optimization_events.count({
where: {
repository_id: repo.id,
created_at: {
gte: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000),
const recentEventCount = await prisma.optimization_events.count({
where: {
repository_id: repo.id,
created_at: {
gte: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000),
},
},
},
})
})
return {
id: repo.id,
github_repo_id: repo.github_repo_id,
name: repo.name,
full_name: repo.full_name,
is_private: repo.is_private,
is_active: recentEventCount > 0,
has_github_action: repo.has_github_action,
created_at: repo.created_at,
last_optimized: repo.last_optimized,
optimizations_limit: repo.optimizations_limit,
optimizations_used: repo.optimizations_used,
organization: repo.full_name.split("/")[0],
avatarUrl: `https://github.com/${repo.full_name.split("/")[0]}.png`,
membersCount: repo.repository_members.length,
// Track repository view as a connection/engagement signal
const userId = "userId" in payload ? payload.userId : undefined
if (userId) {
trackRepositoryConnected(userId, {
repositoryId: repo.id,
repositoryName: repo.full_name,
})
}
return {
id: repo.id,
github_repo_id: repo.github_repo_id,
name: repo.name,
full_name: repo.full_name,
is_private: repo.is_private,
is_active: recentEventCount > 0,
has_github_action: repo.has_github_action,
created_at: repo.created_at,
last_optimized: repo.last_optimized,
optimizations_limit: repo.optimizations_limit,
optimizations_used: repo.optimizations_used,
organization: repo.full_name.split("/")[0],
avatarUrl: `https://github.com/${repo.full_name.split("/")[0]}.png`,
membersCount: repo.repository_members.length,
}
} catch (error) {
console.error("Failed to fetch repository by ID:", error)
return null
}
} catch (error) {
console.error("Failed to fetch repository by ID:", error)
return null
}
}
},
)
export async function addRepositoryMemberById(
currentUserId: string,
@ -265,6 +275,13 @@ export async function addRepositoryMemberById(
},
})
trackMemberInvited(currentUserId, {
invitedUsername: invitedUser.username,
role,
scope: "repository",
targetId: repoId,
})
return createSuccessResponse({
id: newMember.id,
user_id: newMember.user_id,

View file

@ -576,9 +576,22 @@ function RepositoryDetail() {
setRepository(currentRepo)
const totalAttempts = await getUserOptimizationCountByRepo(repositoryId)
const successfulAttempts = await getUserOptimizationSuccessfulCountByRepo(repositoryId)
const optimizationsOverTime = await getOptimizationsTimeSeriesData(repositoryId, false)
// Fetch all statistics in parallel - these are all independent queries
const [
totalAttempts,
successfulAttempts,
optimizationsOverTime,
successfulOptimizationsOverTime,
prData,
leaderboardData,
] = await Promise.all([
getUserOptimizationCountByRepo(repositoryId),
getUserOptimizationSuccessfulCountByRepo(repositoryId),
getOptimizationsTimeSeriesData(repositoryId, false),
getOptimizationsTimeSeriesData(repositoryId, true),
getPullRequestEventTimeSeriesData(selectedPrYear, repositoryId),
getActiveUserLeaderboardLast30DaysForRepo(repositoryId),
])
if (Array.isArray(optimizationsOverTime) && optimizationsOverTime.length > 0) {
const optimizationValues = optimizationsOverTime.map(item => item?.count || 0)
@ -590,11 +603,6 @@ function RepositoryDetail() {
setOptimizationsTrendDates([])
}
const successfulOptimizationsOverTime = await getOptimizationsTimeSeriesData(
repositoryId,
true,
)
if (
Array.isArray(successfulOptimizationsOverTime) &&
successfulOptimizationsOverTime.length > 0
@ -608,16 +616,12 @@ function RepositoryDetail() {
setSuccessfulOptimizationsTrendDates([])
}
const prData = await getPullRequestEventTimeSeriesData(selectedPrYear, repositoryId)
if (Array.isArray(prData)) {
setPrActivityData(prData)
} else {
setPrActivityData([])
}
const leaderboardData = await getActiveUserLeaderboardLast30DaysForRepo(repositoryId)
if (Array.isArray(leaderboardData)) {
setActiveUsersData(leaderboardData)
} else {

View file

@ -6,6 +6,7 @@ import { getRepositoriesForAccountCached } from "@/lib/services/repository-utils
import { auth0 } from "@/lib/auth0"
import { AccountPayload, buildOptimizationOrCondition, prisma } from "@codeflash-ai/common"
import * as Sentry from "@sentry/nextjs"
import { trackOptimizationReviewed } from "@/lib/analytics/tracking"
export interface DiffContent {
oldContent: string
@ -156,31 +157,44 @@ export async function getOptimizationEventById({
trace_id,
...buildOptimizationOrCondition(payload, repoIds),
}
const event = await prisma.optimization_events.findFirst({
where,
include: {
repository: true,
},
})
if (event) {
// Fetch review_quality and review_explanation from optimization_features
const features = await prisma.optimization_features.findUnique({
where: { trace_id: event.trace_id },
// Fire both queries in parallel — features only needs trace_id, not the event result
const [event, features] = await Promise.all([
prisma.optimization_events.findFirst({
where,
include: {
repository: true,
},
}),
prisma.optimization_features.findUnique({
where: { trace_id },
select: {
review_quality: true,
review_explanation: true,
},
})
}),
])
return {
...event,
review_quality: features?.review_quality || null,
review_explanation: features?.review_explanation || null,
}
if (!event) {
return null
}
return event
// Track that this optimization was reviewed
const userId = "userId" in payload ? payload.userId : undefined
if (userId) {
trackOptimizationReviewed(userId, {
traceId: event.trace_id,
functionName: event.function_name,
repositoryName: event.repository?.full_name ?? null,
status: event.status,
})
}
return {
...event,
review_quality: features?.review_quality || null,
review_explanation: features?.review_explanation || null,
}
}
export async function saveOptimizationChanges({
eventId,

View file

@ -5,7 +5,16 @@ import { useParams, useRouter } from "next/navigation"
import { ArrowLeft, Zap, Loader2, AlertTriangle } from "lucide-react"
import { getOptimizationEventById } from "../action"
import { getUserIdAndUsername } from "@/app/utils/auth"
import { LineProfilerView } from "@/components/LineProfiler"
import dynamic from "next/dynamic"
import { Skeleton } from "@/components/ui/skeleton"
const LineProfilerView = dynamic(
() => import("@/components/LineProfiler").then(mod => mod.LineProfilerView),
{
ssr: false,
loading: () => <Skeleton className="h-full w-full" />,
},
)
import { useViewMode } from "@/app/app/ViewModeContext"
import { toast } from "sonner"

View file

@ -0,0 +1,312 @@
import { describe, it, expect, vi, beforeEach } from "vitest"
import { prisma, buildOptimizationOrCondition } from "@codeflash-ai/common"
import { getRepositoriesForAccountCached } from "@/lib/services/repository-utils"
vi.mock("@/lib/server-action-timing", () => ({
withTiming: vi.fn((_name: string, fn: Function) => fn),
}))
vi.mock("@/lib/services/repository-utils", () => ({
getRepositoriesForAccountCached: vi.fn(),
}))
const mockPayload = { userId: "user-1", username: "testuser" }
const mockRepoIds = ["repo-1", "repo-2"]
const mockEvents = [
{
id: "evt-1",
trace_id: "trace-1",
function_name: "calculate",
file_path: "src/utils.py",
repository_id: "repo-1",
status: "approved",
is_staging: true,
created_at: new Date("2024-06-01"),
repository: { id: "repo-1", full_name: "org/repo", name: "repo" },
},
{
id: "evt-2",
trace_id: "trace-2",
function_name: "process",
file_path: "src/main.py",
repository_id: "repo-2",
status: "pending",
is_staging: true,
created_at: new Date("2024-06-02"),
repository: { id: "repo-2", full_name: "org/repo2", name: "repo2" },
},
]
const mockFeatures = [
{
trace_id: "trace-1",
review_quality: "high",
review_explanation: "Great optimization",
},
]
describe("getAllOptimizationEvents", () => {
let getAllOptimizationEvents: typeof import("../action").getAllOptimizationEvents
beforeEach(async () => {
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: mockRepoIds,
repos: [],
} as any)
vi.mocked(buildOptimizationOrCondition).mockReturnValue({})
const mod = await import("../action")
getAllOptimizationEvents = mod.getAllOptimizationEvents
})
describe("Path B: standard Prisma query", () => {
it("calls findMany and count in parallel", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue(mockEvents as any)
vi.mocked(prisma.optimization_events.count).mockResolvedValue(2)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
await getAllOptimizationEvents({ payload: mockPayload as any })
expect(prisma.optimization_events.findMany).toHaveBeenCalledTimes(1)
expect(prisma.optimization_events.count).toHaveBeenCalledTimes(1)
})
it("batch-fetches optimization_features by trace_id array (not N+1)", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue(mockEvents as any)
vi.mocked(prisma.optimization_events.count).mockResolvedValue(2)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue(mockFeatures as any)
await getAllOptimizationEvents({ payload: mockPayload as any })
// Single batch query with all trace IDs — NOT one per event
expect(prisma.optimization_features.findMany).toHaveBeenCalledTimes(1)
expect(prisma.optimization_features.findMany).toHaveBeenCalledWith({
where: { trace_id: { in: ["trace-1", "trace-2"] } },
select: {
trace_id: true,
review_quality: true,
review_explanation: true,
},
})
})
it("merges review_quality into events", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue(mockEvents as any)
vi.mocked(prisma.optimization_events.count).mockResolvedValue(2)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue(mockFeatures as any)
const result = await getAllOptimizationEvents({ payload: mockPayload as any })
expect(result.events[0].review_quality).toBe("high")
expect(result.events[0].review_explanation).toBe("Great optimization")
expect(result.events[1].review_quality).toBeNull()
})
it("returns totalCount from count query", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(42)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
const result = await getAllOptimizationEvents({ payload: mockPayload as any })
expect(result.totalCount).toBe(42)
})
it("applies pagination with skip and take", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
await getAllOptimizationEvents({
payload: mockPayload as any,
page: 3,
pageSize: 25,
})
expect(prisma.optimization_events.findMany).toHaveBeenCalledWith(
expect.objectContaining({
skip: 50, // (3 - 1) * 25
take: 25,
}),
)
})
it("uses default sort (created_at desc) when no sort provided", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
await getAllOptimizationEvents({ payload: mockPayload as any })
expect(prisma.optimization_events.findMany).toHaveBeenCalledWith(
expect.objectContaining({
orderBy: { created_at: "desc" },
}),
)
})
it("applies search filter", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
await getAllOptimizationEvents({
payload: mockPayload as any,
search: "calc",
})
const callArgs = vi.mocked(prisma.optimization_events.findMany).mock.calls[0][0] as any
const andClause = callArgs.where.AND
expect(andClause).toBeDefined()
expect(andClause.length).toBeGreaterThan(0)
// Search should include OR across function_name, file_path, repository.full_name
const orClause = andClause.find((c: any) => c.OR)?.OR
expect(orClause).toHaveLength(3)
expect(orClause[0]).toEqual({
function_name: { contains: "calc", mode: "insensitive" },
})
})
it("applies repository_id filter", async () => {
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
await getAllOptimizationEvents({
payload: mockPayload as any,
filter: { repository_id: "repo-1" },
})
const callArgs = vi.mocked(prisma.optimization_events.findMany).mock.calls[0][0] as any
const andClause = callArgs.where.AND
expect(andClause).toBeDefined()
expect(andClause).toContainEqual({ repository_id: "repo-1" })
})
})
describe("Path A: raw SQL query (review_quality sort/filter)", () => {
it("triggers when sort includes review_quality", async () => {
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce([]) // events
.mockResolvedValueOnce([{ count: BigInt(0) }]) // count
await getAllOptimizationEvents({
payload: mockPayload as any,
sort: { review_quality: "desc" },
})
expect(prisma.$queryRawUnsafe).toHaveBeenCalledTimes(2)
// Should NOT use standard Prisma findMany
expect(prisma.optimization_events.findMany).not.toHaveBeenCalled()
})
it("triggers when filter includes review_quality", async () => {
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce([])
.mockResolvedValueOnce([{ count: BigInt(0) }])
await getAllOptimizationEvents({
payload: mockPayload as any,
filter: { review_quality: "high" },
})
expect(prisma.$queryRawUnsafe).toHaveBeenCalledTimes(2)
})
it("returns correct totalCount from BigInt conversion", async () => {
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce([])
.mockResolvedValueOnce([{ count: BigInt(99) }])
const result = await getAllOptimizationEvents({
payload: mockPayload as any,
sort: { review_quality: "asc" },
})
expect(result.totalCount).toBe(99)
})
it("maps JOIN results to include repository object", async () => {
const rawEvents = [
{
id: "evt-1",
trace_id: "trace-1",
review_quality: "high",
review_explanation: "Good",
repo_full_name: "org/repo",
repo_name: "repo",
repo_id: "repo-1",
},
]
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce(rawEvents)
.mockResolvedValueOnce([{ count: BigInt(1) }])
const result = await getAllOptimizationEvents({
payload: mockPayload as any,
sort: { review_quality: "desc" },
})
expect(result.events[0].repository).toEqual({
id: "repo-1",
full_name: "org/repo",
name: "repo",
})
})
it("sets repository to null when repo_id is missing", async () => {
const rawEvents = [
{
id: "evt-1",
trace_id: "trace-1",
review_quality: null,
review_explanation: null,
repo_full_name: null,
repo_name: null,
repo_id: null,
},
]
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce(rawEvents)
.mockResolvedValueOnce([{ count: BigInt(1) }])
const result = await getAllOptimizationEvents({
payload: mockPayload as any,
sort: { review_quality: "desc" },
})
expect(result.events[0].repository).toBeNull()
})
it("includes LEFT JOIN in raw SQL queries", async () => {
vi.mocked(prisma.$queryRawUnsafe)
.mockResolvedValueOnce([])
.mockResolvedValueOnce([{ count: BigInt(0) }])
await getAllOptimizationEvents({
payload: mockPayload as any,
sort: { review_quality: "desc" },
})
const sql = vi.mocked(prisma.$queryRawUnsafe).mock.calls[0][0] as string
expect(sql).toContain("LEFT JOIN optimization_features")
expect(sql).toContain("LEFT JOIN repositories")
})
})
describe("edge cases", () => {
it("handles empty repoIds", async () => {
vi.mocked(getRepositoriesForAccountCached).mockResolvedValue({
repoIds: [],
repos: [],
} as any)
vi.mocked(prisma.optimization_events.findMany).mockResolvedValue([])
vi.mocked(prisma.optimization_events.count).mockResolvedValue(0)
vi.mocked(prisma.optimization_features.findMany).mockResolvedValue([])
const result = await getAllOptimizationEvents({ payload: mockPayload as any })
expect(result.events).toEqual([])
})
})
})

View file

@ -1,11 +1,12 @@
"use server"
import { getRepositoriesForAccountCached } from "@/lib/services/repository-utils"
import { withTiming } from "@/lib/server-action-timing"
import { AccountPayload, buildOptimizationOrCondition, prisma } from "@codeflash-ai/common"
export async function getRepositoriesWithStagingEvents(
payload: AccountPayload,
): Promise<Array<{ id: string; full_name: string }>> {
const { repoIds, repos: allRepos } = await getRepositoriesForAccountCached(payload)
export const getRepositoriesWithStagingEvents = withTiming(
"getRepositoriesWithStagingEvents",
async (payload: AccountPayload): Promise<Array<{ id: string; full_name: string }>> => {
const { repoIds, repos: allRepos } = await getRepositoriesForAccountCached(payload)
if (repoIds.length === 0) {
return []
@ -29,23 +30,26 @@ export async function getRepositoriesWithStagingEvents(
full_name: repo.full_name,
}))
.sort((a, b) => a.full_name.localeCompare(b.full_name))
}
},
)
export async function getAllOptimizationEvents({
payload,
search,
filter,
sort,
page = 1,
pageSize = 10,
}: {
payload: AccountPayload
search?: string
filter?: Record<string, any>
sort?: { [key: string]: "asc" | "desc" }
page?: number
pageSize?: number
}) {
export const getAllOptimizationEvents = withTiming(
"getAllOptimizationEvents",
async ({
payload,
search,
filter,
sort,
page = 1,
pageSize = 10,
}: {
payload: AccountPayload
search?: string
filter?: Record<string, any>
sort?: { [key: string]: "asc" | "desc" }
page?: number
pageSize?: number
}) => {
const repoIds = (await getRepositoriesForAccountCached(payload)).repoIds
const where: any = {
@ -168,83 +172,84 @@ export async function getAllOptimizationEvents({
orderByClauses.push("oe.created_at DESC")
}
const orderByClause = orderByClauses.join(", ")
const events = await prisma.$queryRawUnsafe<any[]>(
`
SELECT
oe.*,
of.review_quality,
of.review_explanation
FROM optimization_events oe
LEFT JOIN optimization_features of ON oe.trace_id = of.trace_id
LEFT JOIN repositories r ON oe.repository_id = r.id
WHERE ${whereClause}
ORDER BY ${orderByClause}
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
`,
...params,
pageSize,
(page - 1) * pageSize,
)
// Get total count
const countResult = await prisma.$queryRawUnsafe<[{ count: bigint }]>(
`
SELECT COUNT(*) as count
FROM optimization_events oe
LEFT JOIN optimization_features of ON oe.trace_id = of.trace_id
LEFT JOIN repositories r ON oe.repository_id = r.id
WHERE ${whereClause}
`,
...params,
)
const [events, countResult] = await Promise.all([
prisma.$queryRawUnsafe<any[]>(
`
SELECT
oe.*,
of.review_quality,
of.review_explanation,
r.full_name as repo_full_name,
r.name as repo_name,
r.id as repo_id
FROM optimization_events oe
LEFT JOIN optimization_features of ON oe.trace_id = of.trace_id
LEFT JOIN repositories r ON oe.repository_id = r.id
WHERE ${whereClause}
ORDER BY ${orderByClause}
LIMIT $${paramIndex} OFFSET $${paramIndex + 1}
`,
...params,
pageSize,
(page - 1) * pageSize,
),
prisma.$queryRawUnsafe<[{ count: bigint }]>(
`
SELECT COUNT(*) as count
FROM optimization_events oe
LEFT JOIN optimization_features of ON oe.trace_id = of.trace_id
LEFT JOIN repositories r ON oe.repository_id = r.id
WHERE ${whereClause}
`,
...params,
),
])
const totalCount = Number(countResult[0].count)
// Fetch repository data for the events
const eventsWithRepo = await Promise.all(
events.map(async event => {
if (event.repository_id) {
const repository = await prisma.repositories.findUnique({
where: { id: event.repository_id },
})
return { ...event, repository }
}
return { ...event, repository: null }
}),
)
// Repository data is already included from the JOIN
const eventsWithRepo = events.map(event => ({
...event,
repository: event.repo_id ? { id: event.repo_id, full_name: event.repo_full_name, name: event.repo_name } : null,
}))
return { events: eventsWithRepo, totalCount }
} else {
// Standard Prisma query with native orderBy
const orderBy = sort || { created_at: "desc" }
const events = await prisma.optimization_events.findMany({
where,
orderBy,
skip: (page - 1) * pageSize,
take: pageSize,
include: {
repository: true,
const [events, totalCount] = await Promise.all([
prisma.optimization_events.findMany({
where,
orderBy,
skip: (page - 1) * pageSize,
take: pageSize,
include: {
repository: true,
},
}),
prisma.optimization_events.count({ where }),
])
// Batch-fetch review data for all events in a single query
const traceIds = events.map(e => e.trace_id)
const features = await prisma.optimization_features.findMany({
where: { trace_id: { in: traceIds } },
select: {
trace_id: true,
review_quality: true,
review_explanation: true,
},
})
const featuresMap = new Map(features.map(f => [f.trace_id, f]))
// Fetch review_quality and review_explanation for each event
const eventsWithReviewData = await Promise.all(
events.map(async event => {
const features = await prisma.optimization_features.findUnique({
where: { trace_id: event.trace_id },
select: {
review_quality: true,
review_explanation: true,
},
})
return {
...event,
review_quality: features?.review_quality || null,
review_explanation: features?.review_explanation || null,
}
}),
)
const totalCount = await prisma.optimization_events.count({ where })
const eventsWithReviewData = events.map(event => {
const f = featuresMap.get(event.trace_id)
return {
...event,
review_quality: f?.review_quality || null,
review_explanation: f?.review_explanation || null,
}
})
return { events: eventsWithReviewData, totalCount }
}
}
},
)

View file

@ -1,10 +1,8 @@
import { NextRequest, NextResponse } from "next/server"
import { PrismaClient } from "@prisma/client"
const prisma = new PrismaClient()
import { prisma } from "@/lib/prisma"
export async function POST(request: NextRequest, props: { params: Promise<{ trace_id: string }> }) {
const params = await props.params;
const params = await props.params
try {
const { trace_id } = params
const body = await request.json()

View file

@ -0,0 +1,757 @@
import { canAccessMembench } from "@/app/utils/auth"
import { redirect } from "next/navigation"
import { MembenchToggle } from "@/components/membench/membench-toggle"
import {
PeakMemoryChart,
AllocatorChart,
HeadroomChart,
MaxAllocChart,
} from "@/components/membench/membench-charts"
export const metadata = { title: "Memory Benchmark — Unstructured" }
/* ── static data ────────────────────────────────────────────────────── */
const SUITE = {
baseline: {
peak_gb: 1.66,
total_gb: 16.398,
allocs: 5_585_979,
wall_s: 76.0,
max_alloc_mb: 268,
tests: 18,
passed: 13,
failed: 5,
},
current: {
peak_gb: 1.473,
total_gb: 20.239,
allocs: 6_210_809,
wall_s: 86.0,
max_alloc_mb: 134,
tests: 18,
passed: 13,
failed: 5,
},
}
const PEAK_DELTA_PCT =
((SUITE.current.peak_gb - SUITE.baseline.peak_gb) / SUITE.baseline.peak_gb) * 100
const PEAK_DELTA_MB = Math.abs((SUITE.current.peak_gb - SUITE.baseline.peak_gb) * 1024)
const MAX_ALLOC_DELTA_PCT =
((SUITE.current.max_alloc_mb - SUITE.baseline.max_alloc_mb) / SUITE.baseline.max_alloc_mb) * 100
const POD_RAM_LIMIT_GB = 32
const TOP_ALLOC_BASELINE: [string, number][] = [
["_create_inference_session", 1.386],
["PIL Image.tobytes", 1.188],
["PIL Image.new", 1.001],
["load_prepare", 0.751],
["render", 0.649],
]
const TOP_ALLOC_CURRENT: [string, number][] = [
["PIL Image.tobytes", 1.802],
["PIL Image.new", 1.556],
["_create_inference_session", 1.328],
["load_prepare", 1.172],
["PIL Image.tobytes (2)", 0.889],
]
const SCENARIO_TABLE = [
{
scenario: "Full Suite (18 common tests)",
bl_peak: "1.660 GB",
cu_peak: "1.473 GB",
delta: "-11.3%",
bl_time: "76.0s",
cu_time: "86.0s",
},
{
scenario: "API hi_res (layout-parser-paper, 16p)",
bl_peak: "1.515 GB",
cu_peak: "1.419 GB",
delta: "-6.3%",
bl_time: "53.6s",
cu_time: "60.2s",
},
{
scenario: "od_only (Seeda Case Study)",
bl_peak: "1.127 GB",
cu_peak: "1.046 GB",
delta: "-7.2%",
bl_time: "1.96s",
cu_time: "2.13s",
},
]
const ENV_TABLE = [
["VM", "Azure Standard_D8s_v5 (8 vCPU, 32 GB RAM)"],
["OS", "Ubuntu 20.04"],
["Python", "3.12"],
["Profiler", "memray --native (captures C/C++ malloc, mmap)"],
["Test Runner", "memray run --native -o {out}.bin --force -m pytest -v"],
["Baseline Env", "/home/krrt7/bench/baseline-core + baseline-env (pre-Feb 2026)"],
["Current Env", "/home/krrt7/bench/current-core + current-env (main)"],
["Pre-run Protocol", "VM reboot + 5-min idle wait (clean Azure telemetry window)"],
["Production Target", "Knative pods, 1 CPU / 32 GB RAM, Standard_D48s_v5 nodes"],
["Test Scope", "18 common partition tests (od_only, hi_res, pptx, docx)"],
]
/* ── page ───────────────────────────────────────────────────────────── */
export default async function MembenchPage() {
const allowed = await canAccessMembench()
if (!allowed) redirect("/")
const b = SUITE.baseline
const c = SUITE.current
return (
<div className="min-h-screen bg-white dark:bg-zinc-950 font-sans text-zinc-900 dark:text-zinc-200">
{/* ── Hero ── */}
<div className="border-b border-zinc-200 dark:border-zinc-800 bg-gradient-to-br from-zinc-100 via-green-50/40 to-zinc-100 dark:from-zinc-950 dark:via-[#0c1a0f] dark:to-zinc-950 px-6 pb-14 pt-16 text-center">
<div
className="text-[13px] font-bold text-green-600 dark:text-green-400 mb-3"
style={{ letterSpacing: "0.15em" }}
>
UNSTRUCTURED
</div>
<h1
className="text-[36px] font-extrabold tracking-tight text-zinc-900 dark:text-zinc-50"
style={{ letterSpacing: "-0.02em" }}
>
Core Product Memory Benchmark
</h1>
<p className="mx-auto mt-3 max-w-[640px] text-[17px] text-zinc-500 dark:text-zinc-400">
Peak RAM reduction measured with memray --native across the partition test suite
</p>
<div className="mt-6 flex flex-wrap items-center justify-center gap-6 text-[13px] text-zinc-400 dark:text-zinc-500">
<span>April 2026</span>
<span>|</span>
<span>Baseline: pre-Feb 2026</span>
<span>|</span>
<span>18 common partition tests</span>
<span>|</span>
<span>Azure Standard_D8s_v5 VM</span>
</div>
</div>
{/* ── Hero Metrics ── */}
<div
className="mx-auto -mt-10 grid max-w-4xl grid-cols-2 gap-5 px-6 lg:grid-cols-4"
style={{ position: "relative", zIndex: 1 }}
>
{[
{
value: `${PEAK_DELTA_PCT.toFixed(1)}%`,
label: "Peak RAM",
detail: `${b.peak_gb.toFixed(2)} GB → ${c.peak_gb.toFixed(2)} GB`,
},
{
value: `${Math.round(PEAK_DELTA_MB)} MB`,
label: "Absolute Reduction",
detail: "Peak high-water mark savings",
},
{
value: `${MAX_ALLOC_DELTA_PCT.toFixed(0)}%`,
label: "Max Single Allocation",
detail: `${b.max_alloc_mb} MB → ${c.max_alloc_mb} MB`,
},
{
value: "0",
label: "New Regressions",
detail: `Same ${c.passed}/${c.tests} pass rate on both`,
},
].map(m => (
<div
key={m.label}
className="rounded-xl border border-zinc-200 dark:border-zinc-800 bg-white dark:bg-zinc-900 px-6 py-8 text-center shadow-sm dark:shadow-none"
>
<div
className="font-mono leading-none text-green-600 dark:text-green-400"
style={{ fontSize: "42px", fontWeight: 800, letterSpacing: "-0.02em" }}
>
{m.value}
</div>
<div className="mt-2 text-[15px] font-semibold text-zinc-800 dark:text-zinc-200">
{m.label}
</div>
<div className="mt-1 text-[13px] text-zinc-500 dark:text-zinc-400">{m.detail}</div>
</div>
))}
</div>
{/* ── Toggle + Views ── */}
<div className="mx-auto max-w-4xl px-6 pb-20">
<MembenchToggle execView={<ExecView />} engView={<EngView />} />
</div>
{/* ── Footer ── */}
<div className="border-t border-zinc-200 dark:border-zinc-800 py-8 text-center">
<div
className="text-[11px] font-bold text-zinc-400 dark:text-zinc-500 mb-1"
style={{ letterSpacing: "0.15em" }}
>
UNSTRUCTURED
</div>
<p className="text-xs text-zinc-400 dark:text-zinc-500">
Core Product Memory Benchmark April 2026
</p>
</div>
</div>
)
}
/*
EXECUTIVE VIEW
*/
function ExecView() {
const b = SUITE.baseline
const c = SUITE.current
return (
<div className="space-y-14">
{/* ── Peak Memory by Scenario ── */}
<Section
title="Peak Memory by Scenario"
subtitle="High-water mark during document processing — the metric that determines OOM risk."
>
<Card>
<PeakMemoryChart />
</Card>
</Section>
{/* ── What Does This Mean? ── */}
<Section title="What Does This Mean?" subtitle="How these numbers affect production pods.">
<div className="flex flex-wrap gap-5">
<Card className="flex-1 min-w-[260px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">Lower OOM risk</h4>
<p className="mt-2 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
Peak memory during the full partition suite dropped from {b.peak_gb.toFixed(2)} GB to{" "}
{c.peak_gb.toFixed(2)} GB a {Math.round(PEAK_DELTA_MB)} MB reduction. For Knative
pods with a 32 GB RAM limit, this means more headroom before the OOM killer terminates
the container.
</p>
</Card>
<Card className="flex-1 min-w-[260px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">
Halved largest allocation
</h4>
<p className="mt-2 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
The single largest memory allocation dropped from {b.max_alloc_mb} MB to{" "}
{c.max_alloc_mb} MB a 50% reduction. Large contiguous allocations are the primary
cause of memory fragmentation and allocation failures even when total free memory
appears sufficient.
</p>
</Card>
<Card className="flex-1 min-w-[260px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">
Zero regressions
</h4>
<p className="mt-2 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
Both environments pass the same {c.passed} of {c.tests} partition tests. The{" "}
{c.failed} failures are pre-existing docx edge cases present in the baseline not
regressions from the optimization work.
</p>
</Card>
</div>
</Section>
{/* ── Pod Headroom ── */}
<Section
title="Production Pod Headroom"
subtitle="Current peak usage vs. the Knative pod RAM limit of 32 GB."
>
<Card>
<HeadroomChart />
<div className="flex flex-wrap gap-6 mt-4">
<div>
<div
className="font-mono leading-none text-green-600 dark:text-green-400"
style={{ fontSize: "32px", fontWeight: 800 }}
>
{((c.peak_gb / POD_RAM_LIMIT_GB) * 100).toFixed(1)}%
</div>
<div className="mt-1 text-[13px] text-zinc-500 dark:text-zinc-400">
of pod limit used (current)
</div>
</div>
<div>
<div
className="font-mono leading-none text-zinc-800 dark:text-zinc-200"
style={{ fontSize: "32px", fontWeight: 800 }}
>
{(POD_RAM_LIMIT_GB - c.peak_gb).toFixed(1)} GB
</div>
<div className="mt-1 text-[13px] text-zinc-500 dark:text-zinc-400">
headroom remaining
</div>
</div>
</div>
<p className="mt-4 rounded-lg bg-zinc-50 dark:bg-white/[0.03] p-3 text-xs text-zinc-400 dark:text-zinc-500">
Note: Peak memory is measured per-process during document processing. Actual pod usage
includes OS overhead, model weights in shared memory, and other sidecar containers.
These figures represent the process-level high-water mark.
</p>
</Card>
</Section>
{/* ── Largest Single Allocation ── */}
<Section
title="Largest Single Allocation"
subtitle="The biggest contiguous block requested in a single malloc/mmap call."
>
<Card>
<MaxAllocChart />
</Card>
</Section>
{/* ── Suite-Level Comparison ── */}
<Section
title="Suite-Level Comparison"
subtitle="Aggregate metrics from 18 common partition tests."
>
<Card>
<div className="overflow-x-auto">
{/* header */}
<div className="flex gap-4 border-b-2 border-zinc-200 dark:border-zinc-800 pb-2.5 text-[11px] font-bold uppercase tracking-wider text-zinc-600 dark:text-zinc-300">
<div className="flex-1">Metric</div>
<div className="w-36 text-right">Baseline</div>
<div className="w-36 text-right">Current</div>
<div className="w-20 text-center">Delta</div>
</div>
<StatRow
label="Peak Memory"
baseline={b.peak_gb}
current={c.peak_gb}
unit="GB"
format={v => v.toFixed(3)}
better="lower"
/>
<StatRow
label="Total Allocated"
baseline={b.total_gb}
current={c.total_gb}
unit="GB"
format={v => v.toFixed(1)}
better="lower"
/>
<StatRow
label="Allocation Count"
baseline={b.allocs}
current={c.allocs}
unit=""
format={v => v.toLocaleString()}
better="lower"
/>
<StatRow
label="Max Single Alloc"
baseline={b.max_alloc_mb}
current={c.max_alloc_mb}
unit="MB"
format={v => v.toFixed(0)}
better="lower"
/>
<StatRow
label="Wall Time"
baseline={b.wall_s}
current={c.wall_s}
unit="s"
format={v => v.toFixed(1)}
better="lower"
/>
<StatRow
label="Tests Passed"
baseline={b.passed}
current={c.passed}
unit={`/ ${b.tests}`}
format={v => v.toFixed(0)}
better="higher"
/>
</div>
<p className="mt-4 text-xs text-zinc-400 dark:text-zinc-500">
Total allocated increased because current uses more frequent smaller allocations peak
(the OOM-risk metric) still decreased. This pattern indicates better memory recycling.
</p>
</Card>
</Section>
{/* ── Implications & Next Steps ── */}
<Section title="Implications & Next Steps">
<Card>
<ActionItem
text="Peak RAM reduced 11.3% — same workload fits in a smaller memory footprint"
done
/>
<ActionItem
text="Max single allocation halved — lower fragmentation risk under memory pressure"
done
/>
<ActionItem text="Zero test regressions — safe to deploy without functional risk" done />
<ActionItem text="Run full E2E suite benchmarks (all tests, not just common set) for comprehensive coverage" />
<ActionItem text="Profile top allocators (PIL Image ops, ONNX sessions) for further reduction opportunities" />
<ActionItem text="Evaluate reducing pod memory request from 32 GB based on production telemetry" />
</Card>
</Section>
</div>
)
}
/*
ENGINEERING VIEW
*/
function EngView() {
return (
<div className="space-y-14">
{/* ── Per-Scenario Results ── */}
<Section
title="Per-Scenario Results"
subtitle="Individual test scenarios measured with memray --native. VM rebooted + 5-min idle before each run."
>
<Card>
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b-2 border-zinc-200 dark:border-zinc-800 text-left text-[11px] font-bold uppercase tracking-wider text-green-600 dark:text-green-400">
<th className="py-2.5 pr-4">Scenario</th>
<th className="py-2.5 w-24 text-right">Baseline Peak</th>
<th className="py-2.5 w-24 text-right">Current Peak</th>
<th className="py-2.5 w-20 text-center">Delta</th>
<th className="py-2.5 w-20 text-right">BL Time</th>
<th className="py-2.5 w-20 text-right">CU Time</th>
</tr>
</thead>
<tbody>
{SCENARIO_TABLE.map((r, i) => (
<tr
key={r.scenario}
className={`border-b border-zinc-100 dark:border-zinc-800 ${i % 2 ? "bg-zinc-50/50 dark:bg-white/[0.02]" : ""}`}
>
<td className="py-2.5 pr-4 text-zinc-800 dark:text-zinc-200">{r.scenario}</td>
<td className="py-2.5 w-24 text-right font-mono text-zinc-500 dark:text-zinc-400">
{r.bl_peak}
</td>
<td className="py-2.5 w-24 text-right font-mono font-semibold text-zinc-800 dark:text-zinc-200">
{r.cu_peak}
</td>
<td className="py-2.5 w-20 text-center font-bold text-green-600 dark:text-green-400">
{r.delta}
</td>
<td className="py-2.5 w-20 text-right font-mono text-zinc-500 dark:text-zinc-400">
{r.bl_time}
</td>
<td className="py-2.5 w-20 text-right font-mono text-zinc-500 dark:text-zinc-400">
{r.cu_time}
</td>
</tr>
))}
</tbody>
</table>
</div>
</Card>
</Section>
{/* ── Top Memory Allocators ── */}
<Section
title="Top Memory Allocators"
subtitle="Functions with highest total allocated bytes over the 18-test suite lifetime (memray --native)."
>
<Card>
<AllocatorChart />
</Card>
<div className="flex flex-wrap gap-5 mt-5">
<Card className="flex-1 min-w-[300px]">
<div
className="text-[11px] font-bold text-amber-600 dark:text-amber-400 mb-4"
style={{ letterSpacing: "0.1em" }}
>
BASELINE TOP 5
</div>
{TOP_ALLOC_BASELINE.map(([name, size], i) => (
<div
key={name}
className="flex items-center gap-2 border-b border-zinc-100 dark:border-zinc-800 py-2 last:border-b-0"
>
<span className="font-mono text-[13px] text-zinc-400 dark:text-zinc-500">
{i + 1}.
</span>
<span className="text-sm font-semibold text-zinc-800 dark:text-zinc-200">
{name}
</span>
<span className="font-mono text-[13px] text-zinc-500 dark:text-zinc-400 ml-auto">
{size.toFixed(3)} GB
</span>
</div>
))}
</Card>
<Card className="flex-1 min-w-[300px]">
<div
className="text-[11px] font-bold text-green-600 dark:text-green-400 mb-4"
style={{ letterSpacing: "0.1em" }}
>
CURRENT TOP 5
</div>
{TOP_ALLOC_CURRENT.map(([name, size], i) => (
<div
key={name}
className="flex items-center gap-2 border-b border-zinc-100 dark:border-zinc-800 py-2 last:border-b-0"
>
<span className="font-mono text-[13px] text-zinc-400 dark:text-zinc-500">
{i + 1}.
</span>
<span className="text-sm font-semibold text-zinc-800 dark:text-zinc-200">
{name}
</span>
<span className="font-mono text-[13px] text-zinc-500 dark:text-zinc-400 ml-auto">
{size.toFixed(3)} GB
</span>
</div>
))}
</Card>
</div>
</Section>
{/* ── Key Observations ── */}
<Section title="Key Observations">
<ObservationCard
title="ONNX Session Overhead Down"
badge="Improved"
badgeColor="bg-green-500 text-zinc-950"
borderColor="border-l-green-400"
body="_create_inference_session dropped from #1 allocator (1.386 GB) in baseline to #3 (1.328 GB) in current. The ONNX Runtime session creation path allocates less overall, contributing to the peak reduction."
/>
<ObservationCard
title="PIL Image Operations Increased"
badge="Expected"
badgeColor="bg-amber-400 text-zinc-950"
borderColor="border-l-amber-400"
body="PIL Image.tobytes and Image.new increased in total allocation (e.g. tobytes: 1.188 → 1.802 GB). This reflects more frequent smaller image operations rather than fewer large ones — the pattern that reduces peak memory while increasing total throughput."
/>
<ObservationCard
title="Pre-Existing Test Failures (5 docx)"
badge="Not a regression"
badgeColor="bg-zinc-400 text-zinc-950"
borderColor="border-l-zinc-400"
body="5 of 18 tests fail on both baseline and current — all docx/pptx edge cases (but_not_when_the_partitioning_strategy_is_fast, PIL cannot recognize, Pillow can_only_read_the_image_on_Windows). These were excluded from the common test set count but still run; they are not related to the memory optimization work."
/>
</Section>
{/* ── Benchmark Environment ── */}
<Section title="Benchmark Environment">
<Card>
<DataTable columns={["Parameter", "Value"]} rows={ENV_TABLE} />
</Card>
</Section>
{/* ── Methodology ── */}
<Section title="Methodology">
<Card>
<ol className="list-decimal pl-5 space-y-3 text-sm leading-relaxed text-zinc-700 dark:text-zinc-200">
<li>
VM rebooted before each environment&apos;s run to ensure clean memory state and enable
Azure telemetry correlation
</li>
<li>
5-minute idle wait after reboot for OS caches, Azure agents, and background processes
to stabilize
</li>
<li>
Each test suite runs under memray run --native, which instruments both Python
allocations and native C/C++ allocations (malloc, calloc, realloc, mmap) via
LD_PRELOAD
</li>
<li>
memray stats extracts peak memory (high-water mark), total allocated, allocation
count, wall time, and top allocating functions from the binary trace
</li>
<li>
Common test set: 18 partition tests that exist in both baseline and current codebases,
ensuring apples-to-apples comparison
</li>
<li>
Identical test deselection applied to both: 6 baseline-only tests (docx/pptx edge
cases not present in current) excluded via pytest -k filters
</li>
</ol>
</Card>
</Section>
{/* ── Engineering Action Items ── */}
<Section title="Engineering Action Items">
<Card>
<ActionItem text="Run full E2E benchmarks with all tests (not just common set) for both environments" />
<ActionItem text="Add API pipeline tests (test_api_hi_res, test_api_od_only) to the standard benchmark suite" />
<ActionItem text="Profile PIL Image.tobytes hot path — largest allocator in current (1.802 GB total)" />
<ActionItem text="Investigate load_prepare growth (0.751 → 1.172 GB) for optimization opportunities" />
<ActionItem text="Measure with production-scale documents (100+ page PDFs) to validate scaling behavior" />
<ActionItem text="Correlate memray peaks with Azure pod-level metrics for production memory modeling" />
<ActionItem text="Evaluate reducing Knative pod memory request from 32 GB based on observed peaks" />
</Card>
</Section>
</div>
)
}
/* ── shared components ──────────────────────────────────────────────── */
function Section({
title,
subtitle,
children,
}: {
title: string
subtitle?: string
children: React.ReactNode
}) {
return (
<div>
<h2
className="text-[22px] font-bold text-zinc-900 dark:text-zinc-200"
style={{ letterSpacing: "-0.01em" }}
>
{title}
</h2>
{subtitle && (
<p className="mt-1.5 text-sm leading-relaxed text-zinc-500 dark:text-zinc-400">
{subtitle}
</p>
)}
<div className="mt-6">{children}</div>
</div>
)
}
function Card({ children, className = "" }: { children: React.ReactNode; className?: string }) {
return (
<div
className={`rounded-xl border border-zinc-200 dark:border-zinc-800 bg-white dark:bg-zinc-900 shadow-sm dark:shadow-none ${className}`}
style={{ padding: "28px 32px" }}
>
{children}
</div>
)
}
function ActionItem({ text, done = false }: { text: string; done?: boolean }) {
return (
<div className="flex items-center gap-3 border-b border-zinc-100 dark:border-zinc-800 py-2.5 last:border-b-0">
<span
className={`text-sm ${done ? "text-green-500 dark:text-green-400" : "text-zinc-400 dark:text-zinc-500"}`}
>
{done ? "●" : "○"}
</span>
<span className="text-sm text-zinc-800 dark:text-zinc-200">{text}</span>
</div>
)
}
function StatRow({
label,
baseline,
current,
unit,
format,
better = "lower",
}: {
label: string
baseline: number
current: number
unit: string
format: (v: number) => string
better?: "lower" | "higher"
}) {
const delta = ((current - baseline) / baseline) * 100
const improved = better === "lower" ? delta < 0 : delta > 0
const deltaText = `${delta > 0 ? "+" : ""}${delta.toFixed(1)}%`
return (
<div className="flex items-center gap-4 border-b border-zinc-100 dark:border-zinc-800 py-3 last:border-b-0">
<div className="flex-1 text-sm font-semibold text-zinc-800 dark:text-zinc-200">{label}</div>
<div className="w-36 text-right font-mono text-sm text-zinc-500 dark:text-zinc-400">
{format(baseline)} {unit}
</div>
<div className="w-36 text-right font-mono text-sm font-semibold text-zinc-800 dark:text-zinc-200">
{format(current)} {unit}
</div>
<div className="w-20 text-center">
<span
className={`inline-block rounded-md px-2 py-0.5 text-xs font-bold ${
delta === 0
? "bg-zinc-100 text-zinc-500 dark:bg-zinc-800 dark:text-zinc-400"
: improved
? "bg-green-100 text-green-700 dark:bg-green-400/10 dark:text-green-400"
: "bg-red-100 text-red-700 dark:bg-red-400/10 dark:text-red-400"
}`}
>
{deltaText}
</span>
</div>
</div>
)
}
function ObservationCard({
title,
badge,
badgeColor,
borderColor,
body,
}: {
title: string
badge: string
badgeColor: string
borderColor: string
body: string
}) {
return (
<div
className={`mb-4 rounded-xl border border-zinc-200 dark:border-zinc-800 border-l-4 ${borderColor} bg-white dark:bg-zinc-900 p-5 shadow-sm dark:shadow-none`}
>
<div className="mb-3 flex flex-wrap items-center gap-3">
<span className="text-base font-bold text-zinc-900 dark:text-zinc-200">{title}</span>
<span
className={`inline-block rounded-full ${badgeColor} px-2.5 py-0.5 text-xs font-semibold`}
>
{badge}
</span>
</div>
<p className="text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">{body}</p>
</div>
)
}
function DataTable({ columns, rows }: { columns: string[]; rows: string[][] }) {
return (
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b-2 border-zinc-200 dark:border-zinc-800 text-left text-[11px] font-bold uppercase tracking-wider text-green-600 dark:text-green-400">
{columns.map(c => (
<th key={c} className="py-2.5 pr-4">
{c}
</th>
))}
</tr>
</thead>
<tbody>
{rows.map((row, i) => (
<tr
key={i}
className={`border-b border-zinc-100 dark:border-zinc-800 ${i % 2 ? "bg-zinc-50/50 dark:bg-white/[0.02]" : ""}`}
>
{row.map((cell, j) => (
<td key={j} className="py-2.5 pr-4 text-zinc-700 dark:text-zinc-200">
{cell}
</td>
))}
</tr>
))}
</tbody>
</table>
</div>
)
}

View file

@ -1,98 +1,84 @@
"use client"
import dynamic from "next/dynamic"
import { SyntaxHighlighter } from "@/lib/syntax-highlighter"
import { memo } from "react"
const SyntaxHighlighter = dynamic(
() => import("react-syntax-highlighter").then(m => m.Prism),
{
ssr: false,
loading: () => (
<div className="animate-pulse bg-zinc-800 rounded p-4 min-h-[100px]">
<div className="h-4 bg-zinc-700 rounded w-3/4 mb-2" />
<div className="h-4 bg-zinc-700 rounded w-1/2 mb-2" />
<div className="h-4 bg-zinc-700 rounded w-2/3" />
</div>
),
}
)
export const zincDarkTheme = {
'code[class*="language-"]': {
color: 'rgb(250, 250, 250)',
background: 'none',
fontFamily: 'var(--font-mono)',
fontSize: '1em',
textAlign: 'left',
whiteSpace: 'pre',
wordSpacing: 'normal',
wordBreak: 'normal',
wordWrap: 'normal',
lineHeight: '1.5',
color: "rgb(250, 250, 250)",
background: "none",
fontFamily: "var(--font-mono)",
fontSize: "1em",
textAlign: "left",
whiteSpace: "pre",
wordSpacing: "normal",
wordBreak: "normal",
wordWrap: "normal",
lineHeight: "1.5",
tabSize: 4,
hyphens: 'none',
hyphens: "none",
},
'pre[class*="language-"]': {
color: 'rgb(250, 250, 250)',
background: 'rgb(24, 24, 27)',
fontFamily: 'var(--font-mono)',
fontSize: '1em',
textAlign: 'left',
whiteSpace: 'pre',
wordSpacing: 'normal',
wordBreak: 'normal',
wordWrap: 'normal',
lineHeight: '1.5',
color: "rgb(250, 250, 250)",
background: "rgb(24, 24, 27)",
fontFamily: "var(--font-mono)",
fontSize: "1em",
textAlign: "left",
whiteSpace: "pre",
wordSpacing: "normal",
wordBreak: "normal",
wordWrap: "normal",
lineHeight: "1.5",
tabSize: 4,
hyphens: 'none',
padding: '1em',
margin: '0',
overflow: 'auto',
hyphens: "none",
padding: "1em",
margin: "0",
overflow: "auto",
},
comment: {
color: 'rgb(113, 113, 122)',
fontStyle: 'italic',
color: "rgb(113, 113, 122)",
fontStyle: "italic",
},
prolog: { color: 'rgb(113, 113, 122)' },
doctype: { color: 'rgb(113, 113, 122)' },
cdata: { color: 'rgb(113, 113, 122)' },
keyword: { color: 'rgb(96, 165, 250)' },
'control-flow': { color: 'rgb(96, 165, 250)' },
string: { color: 'rgb(134, 239, 172)' },
'attr-value': { color: 'rgb(134, 239, 172)' },
function: { color: 'rgb(253, 224, 71)' },
'class-name': { color: 'rgb(253, 224, 71)' },
number: { color: 'rgb(251, 146, 60)' },
boolean: { color: 'rgb(251, 146, 60)' },
operator: { color: 'rgb(161, 161, 170)' },
punctuation: { color: 'rgb(161, 161, 170)' },
variable: { color: 'rgb(250, 250, 250)' },
property: { color: 'rgb(250, 250, 250)' },
tag: { color: 'rgb(96, 165, 250)' },
'attr-name': { color: 'rgb(250, 250, 250)' },
prolog: { color: "rgb(113, 113, 122)" },
doctype: { color: "rgb(113, 113, 122)" },
cdata: { color: "rgb(113, 113, 122)" },
keyword: { color: "rgb(96, 165, 250)" },
"control-flow": { color: "rgb(96, 165, 250)" },
string: { color: "rgb(134, 239, 172)" },
"attr-value": { color: "rgb(134, 239, 172)" },
function: { color: "rgb(253, 224, 71)" },
"class-name": { color: "rgb(253, 224, 71)" },
number: { color: "rgb(251, 146, 60)" },
boolean: { color: "rgb(251, 146, 60)" },
operator: { color: "rgb(161, 161, 170)" },
punctuation: { color: "rgb(161, 161, 170)" },
variable: { color: "rgb(250, 250, 250)" },
property: { color: "rgb(250, 250, 250)" },
tag: { color: "rgb(96, 165, 250)" },
"attr-name": { color: "rgb(250, 250, 250)" },
namespace: { opacity: 0.7 },
selector: { color: 'rgb(253, 224, 71)' },
selector: { color: "rgb(253, 224, 71)" },
important: {
color: 'rgb(251, 146, 60)',
fontWeight: 'bold',
color: "rgb(251, 146, 60)",
fontWeight: "bold",
},
atrule: { color: 'rgb(96, 165, 250)' },
builtin: { color: 'rgb(253, 224, 71)' },
atrule: { color: "rgb(96, 165, 250)" },
builtin: { color: "rgb(253, 224, 71)" },
entity: {
color: 'rgb(250, 250, 250)',
cursor: 'help',
color: "rgb(250, 250, 250)",
cursor: "help",
},
url: {
color: 'rgb(96, 165, 250)',
textDecoration: 'underline',
color: "rgb(96, 165, 250)",
textDecoration: "underline",
},
inserted: {
color: 'rgb(134, 239, 172)',
background: 'rgba(134, 239, 172, 0.1)',
color: "rgb(134, 239, 172)",
background: "rgba(134, 239, 172, 0.1)",
},
deleted: {
color: 'rgb(248, 113, 113)',
background: 'rgba(248, 113, 113, 0.1)',
color: "rgb(248, 113, 113)",
background: "rgba(248, 113, 113, 0.1)",
},
} as const
@ -101,7 +87,7 @@ export const CODE_STYLE = {
padding: "1rem",
fontSize: "0.875rem",
lineHeight: 1.5,
background: 'rgb(24, 24, 27)',
background: "rgb(24, 24, 27)",
} as const
export const CODE_STYLE_RELAXED = {
@ -109,7 +95,7 @@ export const CODE_STYLE_RELAXED = {
padding: "1rem",
fontSize: "0.875rem",
lineHeight: 1.6,
background: 'rgb(24, 24, 27)',
background: "rgb(24, 24, 27)",
} as const
export const CODE_STYLE_SMALL = {
@ -117,7 +103,7 @@ export const CODE_STYLE_SMALL = {
padding: "1rem",
fontSize: "0.8125rem",
lineHeight: 1.5,
background: 'rgb(24, 24, 27)',
background: "rgb(24, 24, 27)",
} as const
interface CodeHighlighterProps {
@ -129,13 +115,13 @@ interface CodeHighlighterProps {
}
const highlightStyle = {
backgroundColor: 'rgba(250, 204, 21, 0.15)',
display: 'block',
marginLeft: '-1rem',
marginRight: '-1rem',
paddingLeft: '1rem',
paddingRight: '1rem',
borderLeft: '3px solid rgb(250, 204, 21)',
backgroundColor: "rgba(250, 204, 21, 0.15)",
display: "block",
marginLeft: "-1rem",
marginRight: "-1rem",
paddingLeft: "1rem",
paddingRight: "1rem",
borderLeft: "3px solid rgb(250, 204, 21)",
}
export const CodeHighlighter = memo(function CodeHighlighter({
@ -152,8 +138,8 @@ export const CodeHighlighter = memo(function CodeHighlighter({
return (lineNumber: number) => {
const isHighlighted = highlightSet.has(lineNumber)
return {
style: isHighlighted ? highlightStyle : { display: 'block' },
'data-highlighted': isHighlighted ? 'true' : undefined,
style: isHighlighted ? highlightStyle : { display: "block" },
"data-highlighted": isHighlighted ? "true" : undefined,
}
}
}
@ -173,4 +159,4 @@ export const CodeHighlighter = memo(function CodeHighlighter({
{code}
</SyntaxHighlighter>
)
})
})

View file

@ -31,21 +31,21 @@ export async function generateMetadata(props: LLMCallDetailPageProps): Promise<M
export default async function LLMCallDetailPage(props: LLMCallDetailPageProps) {
const params = await props.params;
// Fetch LLM call details
const llmCall = await prisma.llm_calls.findUnique({
where: { id: params.id },
})
// Fetch LLM call details and related errors in parallel
const [llmCall, relatedErrors] = await Promise.all([
prisma.llm_calls.findUnique({
where: { id: params.id },
}),
prisma.optimization_errors.findMany({
where: { llm_call_id: params.id },
orderBy: { created_at: "desc" },
}),
])
if (!llmCall) {
notFound()
}
// Fetch related errors
const relatedErrors = await prisma.optimization_errors.findMany({
where: { llm_call_id: params.id },
orderBy: { created_at: "desc" },
})
return (
<div className="container mx-auto px-4 py-8">
{/* Header */}

View file

@ -0,0 +1,993 @@
import { isTeamMember } from "@/app/utils/auth"
import { redirect } from "next/navigation"
import Image from "next/image"
import { ReportToggle } from "@/components/report/report-toggle"
import {
LatencyChart,
BundleChart,
OnboardingChart,
CategoryPie,
} from "@/components/report/report-charts"
export const metadata = { title: "Performance Report — Codeflash" }
/* ── static data ────────────────────────────────────────────────────── */
const PAGE_RESULTS: { page: string; before: number | null; after: number | null; note?: string }[] =
[
{ page: "Onboarding", before: 10275, after: 8108 },
{ page: "API Keys", before: 6415, after: 5989 },
{ page: "Billing", before: 4108, after: 4383 },
{ page: "Traces", before: 3353, after: 3888 },
{ page: "Getting Started", before: 4513, after: 5350 },
{ page: "Dashboard", before: 16816, after: 30000, note: "timeout — bug fixed separately" },
{
page: "Review Optimizations",
before: 4376,
after: 16735,
note: "only 3 samples, low confidence",
},
]
const FULL_PAGE_TABLE: {
route: string
avgBefore: number | null
avgAfter: number | null
p95Before: number | null
p95After: number | null
}[] = [
{ route: "/dashboard", avgBefore: 16816, avgAfter: 30000, p95Before: 30000, p95After: 30000 },
{ route: "/onboarding", avgBefore: 10275, avgAfter: 8108, p95Before: 20092, p95After: 8108 },
{ route: "/apikeys", avgBefore: 6415, avgAfter: 5989, p95Before: 11120, p95After: 10291 },
{ route: "/trace/:trace_id", avgBefore: 6909, avgAfter: null, p95Before: 9013, p95After: null },
{ route: "/login", avgBefore: 4958, avgAfter: null, p95Before: 7647, p95After: null },
{ route: "/repositories", avgBefore: 4306, avgAfter: null, p95Before: 7110, p95After: null },
{
route: "/review-optimizations",
avgBefore: 4376,
avgAfter: 16735,
p95Before: 4985,
p95After: 16735,
},
{ route: "/getting-started", avgBefore: 4513, avgAfter: 5350, p95Before: 4513, p95After: 5588 },
{ route: "/repositories/:id", avgBefore: 5292, avgAfter: null, p95Before: 5292, p95After: null },
{ route: "/billing", avgBefore: 4108, avgAfter: 4383, p95Before: 4108, p95After: 4383 },
{
route: "/observability/traces",
avgBefore: 3353,
avgAfter: 3888,
p95Before: 3981,
p95After: 3888,
},
{ route: "/codeflash/auth", avgBefore: 1093, avgAfter: 3465, p95Before: 1673, p95After: 5079 },
{ route: "/members", avgBefore: 1658, avgAfter: null, p95Before: 1658, p95After: null },
]
const BUNDLE_TABLE = [
{ opt: "Sentry Replay lazy-load", savings: "~600 KB deferred", pr: "#2554" },
{ opt: "prism-react-renderer dynamic import", savings: "~132 KB", pr: "#2557" },
{ opt: "framer-motion → motion/react", savings: "~70 KB", pr: "#2556" },
{ opt: "PrismLight switch", savings: "~50 KB", pr: "#2539" },
{
opt: "Unused dep removal (github-markdown-css, react-papaparse, @azure/msal-node)",
savings: "variable",
pr: "#2562",
},
{ opt: "@sentry/node → @sentry/nextjs", savings: "duplicate SDK eliminated", pr: "#2555" },
]
const RUNTIME_TABLE = [
{ opt: "N+1 query elimination", impact: "1 query vs N for optimization_features", pr: "#2544" },
{
opt: "Promise.all parallelization",
impact: "Concurrent DB fetches on 5 pages",
pr: "#2545, #2546, #2559, #2561, #2560",
},
{ opt: "React cache() dedup", impact: "2 identical Prisma calls → 1 per request", pr: "#2560" },
{ opt: "PostHog singleton", impact: "Eliminated repeated HTTP client creation", pr: "#2558" },
{
opt: "PrismaClient singleton",
impact: "Consolidated per-file PrismaClient instances",
pr: "#2543",
},
]
const OBSERVABILITY_TABLE = [
{
name: "OTel + Sentry bridge",
detail: "SentrySpanProcessor, SentryPropagator, SentrySampler",
pr: "#2547",
},
{ name: "Server action timing", detail: "withTiming() wrapper with Sentry spans", pr: "#2552" },
{ name: "Slow query logging", detail: ">500ms queries forwarded to Sentry", pr: "#2547" },
{
name: "PostHog analytics",
detail: "trackOptimizationReviewed + key user actions",
pr: "#2552",
},
{ name: "Bundle analysis", detail: "Route size analysis scripts", pr: "#2553" },
]
type PRCategory =
| "Bundle size"
| "Runtime"
| "Observability"
| "Testing"
| "Cleanup"
| "CI"
| "Bugfix"
const CATEGORY_COLORS: Record<PRCategory, string> = {
"Bundle size": "text-amber-600 dark:text-amber-400",
Runtime: "text-green-600 dark:text-green-400",
Observability: "text-amber-700 dark:text-amber-500",
Testing: "text-purple-600 dark:text-purple-400",
Cleanup: "text-zinc-500 dark:text-zinc-400",
CI: "text-pink-600 dark:text-pink-400",
Bugfix: "text-red-600 dark:text-red-400",
}
const PR_INVENTORY: { num: string; pr: string; title: string; cat: PRCategory }[] = [
{ num: "1", pr: "#2539", title: "PrismLight switch", cat: "Bundle size" },
{ num: "2", pr: "#2540", title: "Named diff + Sentry import", cat: "Bundle size" },
{ num: "3", pr: "#2543", title: "PrismaClient singleton", cat: "Runtime" },
{ num: "4", pr: "#2544", title: "N+1 query elimination", cat: "Runtime" },
{ num: "5", pr: "#2545", title: "Members page parallel fetch", cat: "Runtime" },
{ num: "6", pr: "#2546", title: "Repository page parallel fetch", cat: "Runtime" },
{ num: "7", pr: "#2547", title: "Observability stack (OTel + Sentry)", cat: "Observability" },
{ num: "8", pr: "#2552", title: "Server action timing + analytics", cat: "Observability" },
{ num: "9", pr: "#2553", title: "Test coverage + bundle analysis", cat: "Testing" },
{ num: "10", pr: "#2554", title: "Sentry Replay lazy-load", cat: "Bundle size" },
{ num: "11", pr: "#2555", title: "@sentry/nextjs consistency", cat: "Bundle size" },
{ num: "12", pr: "#2556", title: "framer-motion → motion/react", cat: "Bundle size" },
{ num: "13", pr: "#2557", title: "LineProfilerView dynamic import", cat: "Bundle size" },
{ num: "14", pr: "#2558", title: "PostHog singleton", cat: "Runtime" },
{ num: "15", pr: "#2559", title: "Optimization event parallel fetch", cat: "Runtime" },
{ num: "16", pr: "#2560", title: "React cache() dedup", cat: "Runtime" },
{ num: "17", pr: "#2561", title: "LLM call detail parallel fetch", cat: "Runtime" },
{ num: "18", pr: "#2562", title: "Unused dep removal", cat: "Cleanup" },
{ num: "19", pr: "#2563", title: "Quality gates CI workflow", cat: "CI" },
{ num: "—", pr: "#2564", title: "Sidebar refetch loop fix", cat: "Bugfix" },
]
/* ── helpers ────────────────────────────────────────────────────────── */
function fmt(v: number | null) {
if (v === null) return "—"
return v.toLocaleString()
}
function pctBadge(before: number | null, after: number | null) {
if (before === null || after === null)
return <span className="text-zinc-400 dark:text-zinc-500"></span>
const pct = ((after - before) / before) * 100
const improved = pct < 0
return (
<span
className={`inline-block rounded-md px-2 py-0.5 text-xs font-bold ${improved ? "bg-green-100 text-green-700 dark:bg-green-400/10 dark:text-green-400" : "bg-red-100 text-red-700 dark:bg-red-400/10 dark:text-red-400"}`}
>
{pct > 0 ? "+" : ""}
{pct.toFixed(0)}%
</span>
)
}
/* ── page ───────────────────────────────────────────────────────────── */
export default async function ReportPage() {
const teamMember = await isTeamMember()
if (!teamMember) redirect("/")
return (
<div className="min-h-screen bg-white dark:bg-zinc-950 font-sans text-zinc-900 dark:text-zinc-200">
{/* ── Hero ── */}
<div className="border-b border-zinc-200 dark:border-zinc-800 bg-gradient-to-br from-zinc-100 via-amber-50/40 to-zinc-100 dark:from-zinc-950 dark:via-stone-900/60 dark:to-zinc-950 px-6 pb-14 pt-16 text-center">
<Image
src="/images/codeflash_darkmode.svg"
alt="Codeflash"
width={120}
height={28}
className="mx-auto mb-4 hidden dark:block"
priority
/>
<Image
src="/images/codeflash_light.svg"
alt="Codeflash"
width={120}
height={28}
className="mx-auto mb-4 dark:hidden"
priority
/>
<h1
className="text-[36px] font-extrabold tracking-tight text-zinc-900 dark:text-zinc-50"
style={{ letterSpacing: "-0.02em" }}
>
Web App Performance Optimization
</h1>
<p className="mx-auto mt-3 max-w-[600px] text-[17px] text-zinc-500 dark:text-zinc-400">
How we made the Codeflash dashboard faster, lighter, and more reliable
</p>
<div className="mt-6 flex flex-wrap items-center justify-center gap-6 text-[13px] text-zinc-400 dark:text-zinc-500">
<span>April 4, 2026</span>
<span>·</span>
<span>20 changes shipped</span>
<span>·</span>
<span>Zero downtime</span>
</div>
</div>
{/* ── Hero Metrics ── */}
<div
className="mx-auto -mt-10 grid max-w-4xl grid-cols-2 gap-5 px-6 lg:grid-cols-4"
style={{ position: "relative", zIndex: 1 }}
>
{[
{
value: "-55%",
label: "Slowest Responses",
detail: "p95 latency: 1.2s → 0.6s",
color: "text-green-600 dark:text-green-400",
},
{
value: "-25%",
label: "Typical Responses",
detail: "p75 latency: 289ms → 216ms",
color: "text-green-600 dark:text-green-400",
},
{
value: "850 KB",
label: "Smaller Downloads",
detail: "Removed from every page load",
color: "text-amber-500 dark:text-[#ffd227]",
},
{
value: "0",
label: "New Bugs",
detail: "All changes verified, zero errors",
color: "text-amber-500 dark:text-[#ffd227]",
},
].map(m => (
<div
key={m.label}
className="rounded-xl border border-zinc-200 dark:border-zinc-800 bg-white dark:bg-zinc-900 px-6 py-8 text-center shadow-sm dark:shadow-none"
>
<div
className={`font-mono leading-none ${m.color}`}
style={{ fontSize: "42px", fontWeight: 800, letterSpacing: "-0.02em" }}
>
{m.value}
</div>
<div className="mt-2 text-[15px] font-semibold text-zinc-800 dark:text-zinc-200">
{m.label}
</div>
<div className="mt-1 text-[13px] text-zinc-500 dark:text-zinc-400">{m.detail}</div>
</div>
))}
</div>
{/* ── Toggle + Views ── */}
<div className="mx-auto max-w-4xl px-6 pb-20">
<ReportToggle execView={<ExecView />} engView={<EngView />} />
</div>
{/* ── Footer ── */}
<div className="border-t border-zinc-200 dark:border-zinc-800 py-8 text-center">
<Image
src="/images/codeflash_darkmode.svg"
alt="Codeflash"
width={80}
height={20}
className="mx-auto mb-2 opacity-40 hidden dark:block"
/>
<Image
src="/images/codeflash_light.svg"
alt="Codeflash"
width={80}
height={20}
className="mx-auto mb-2 opacity-40 dark:hidden"
/>
<p className="text-xs text-zinc-400 dark:text-zinc-500">April 2026</p>
</div>
</div>
)
}
/*
EXECUTIVE VIEW
*/
function ExecView() {
return (
<div className="space-y-14">
{/* ── Cost Impact ── */}
<Section
title="Cost Impact"
subtitle="Performance improvements unlock infrastructure savings."
>
<Card>
<div className="flex flex-wrap items-center gap-8">
<div className="min-w-[200px]">
<div
className="font-mono leading-none text-green-600 dark:text-green-400"
style={{ fontSize: "48px", fontWeight: 800 }}
>
$660
</div>
<div className="mt-1 text-sm text-zinc-500 dark:text-zinc-400">
estimated annual savings
</div>
</div>
<div className="flex-1 border-l-2 border-zinc-200 dark:border-zinc-800 pl-6">
<p className="text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
The webapp currently runs on an Azure Premium V2 (P1v2) instance at $81/month. With
the performance optimizations parallel queries, eliminated N+1 fetches, fixed
infinite request loop, and reduced CPU load per request the app can be downscaled
to a Basic B2 instance.
</p>
</div>
</div>
</Card>
{/* Infra comparison */}
<div className="mt-5 flex flex-wrap items-center gap-5">
<div className="flex-1 min-w-[260px] rounded-xl border border-zinc-200 dark:border-zinc-800 border-t-4 border-t-red-400 bg-white dark:bg-zinc-900 p-6 shadow-sm dark:shadow-none">
<div className="text-[11px] font-bold uppercase tracking-widest text-red-500 dark:text-red-400">
Current
</div>
<div className="mt-3 text-lg font-bold text-zinc-900 dark:text-zinc-200">
P1v2 Premium V2
</div>
<div className="mt-1 text-sm text-zinc-500 dark:text-zinc-400">1 vCPU · 3.5 GB RAM</div>
<div className="mt-4 border-t border-zinc-200 dark:border-zinc-800 pt-4 space-y-2">
<div className="flex justify-between text-sm">
<span className="text-zinc-500 dark:text-zinc-400">Monthly</span>
<span className="font-bold text-zinc-900 dark:text-zinc-200">$81</span>
</div>
<div className="flex justify-between text-sm">
<span className="text-zinc-500 dark:text-zinc-400">Annual</span>
<span className="font-bold text-zinc-900 dark:text-zinc-200">$972</span>
</div>
</div>
</div>
<div className="text-3xl text-zinc-300 dark:text-zinc-500"></div>
<div className="flex-1 min-w-[260px] rounded-xl border border-zinc-200 dark:border-zinc-800 border-t-4 border-t-green-400 bg-white dark:bg-zinc-900 p-6 shadow-sm dark:shadow-none">
<div className="text-[11px] font-bold uppercase tracking-widest text-green-600 dark:text-green-400">
Recommended
</div>
<div className="mt-3 text-lg font-bold text-zinc-900 dark:text-zinc-200">
B2 Basic
</div>
<div className="mt-1 text-sm text-zinc-500 dark:text-zinc-400">2 vCPU · 3.5 GB RAM</div>
<div className="mt-4 border-t border-zinc-200 dark:border-zinc-800 pt-4 space-y-2">
<div className="flex justify-between text-sm">
<span className="text-zinc-500 dark:text-zinc-400">Monthly</span>
<span className="font-bold text-green-600 dark:text-green-400">$26</span>
</div>
<div className="flex justify-between text-sm">
<span className="text-zinc-500 dark:text-zinc-400">Annual</span>
<span className="font-bold text-green-600 dark:text-green-400">$312</span>
</div>
</div>
</div>
</div>
{/* Why + monitoring */}
<div className="mt-5 flex flex-wrap gap-5">
<Card className="flex-1 min-w-[280px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">
Why downscaling is safe
</h4>
<ul className="mt-3 space-y-1.5 pl-5 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400 list-disc">
<li>The webapp handles only ~14 transactions/day minimal compute needs</li>
<li>The infinite request loop (hundreds of requests/sec) has been fixed</li>
<li>
Database queries now run in parallel, completing faster and releasing threads sooner
</li>
<li>N+1 query pattern eliminated fewer round-trips to the database</li>
<li>B2 actually gives more CPU cores (2 vs 1) at a third of the price</li>
</ul>
<p className="mt-4 rounded-lg bg-zinc-50 dark:bg-white/[0.04] p-3 text-xs text-zinc-500">
Note: B-tier does not include deployment slots. If staging/production slot swapping is
required, Standard S1 ($73/mo, saving $96/year) is the minimum tier that supports it.
</p>
</Card>
<Card className="flex-1 min-w-[280px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">
Monitoring cost reduction
</h4>
<div
className="mt-3 font-mono leading-none text-green-600 dark:text-green-400"
style={{ fontSize: "42px", fontWeight: 800 }}
>
90%
</div>
<div className="mt-1 text-sm font-semibold text-zinc-800 dark:text-zinc-200">
less Sentry event volume
</div>
<p className="mt-3 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
Trace sampling reduced from 100% to 10% in production. Same error visibility, same
alerting just 90% fewer billable events. Exact dollar savings depend on your Sentry
plan tier.
</p>
<div className="mt-4 border-t border-zinc-200 dark:border-zinc-800 pt-4">
<div className="text-sm font-bold text-red-600 dark:text-red-400">
1 pre-existing bug found &amp; fixed
</div>
<p className="mt-1 text-xs leading-relaxed text-zinc-600 dark:text-zinc-400">
Dashboard sidebar was generating hundreds of server requests per second. This silent
resource drain is now eliminated.
</p>
</div>
</Card>
</div>
</Section>
{/* ── What Does This Mean? ── */}
<Section
title="What Does This Mean?"
subtitle="These numbers translate directly to user experience."
>
<div className="flex flex-wrap gap-5">
{[
{
title: "Users wait less",
body: "The slowest page loads improved by over half a second. Users who previously waited 1.2 seconds for the app to respond now wait 0.6 seconds.",
},
{
title: "Pages load faster",
body: "We removed 850 KB of code that was being downloaded on every page visit. That's the equivalent of a high-resolution photo — gone from every load.",
},
{
title: "Nothing broke",
body: "All 20 changes were individually tested and verified. Error monitoring confirms zero new issues were introduced.",
},
].map(c => (
<Card key={c.title} className="flex-1 min-w-[260px]">
<h4 className="text-base font-bold text-zinc-900 dark:text-zinc-200">{c.title}</h4>
<p className="mt-2 text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">
{c.body}
</p>
</Card>
))}
</div>
</Section>
{/* ── Response Time ── */}
<Section
title="Response Time Improvement"
subtitle="How long users wait for the app to respond, before and after optimization."
>
<Card>
<LatencyChart />
</Card>
</Section>
{/* ── Onboarding spotlight ── */}
<Section
title="Spotlight: Onboarding Page"
subtitle="The clearest per-page improvement — new users now get a 60% faster experience."
>
<Card>
<OnboardingChart />
</Card>
</Section>
{/* ── Bundle Size ── */}
<Section
title="Download Size Reduction"
subtitle="Code removed from what every user downloads on their first visit."
>
<Card>
<BundleChart />
</Card>
</Section>
{/* ── What We Changed ── */}
<Section title="What We Changed" subtitle="20 targeted improvements across 5 categories.">
<div className="flex flex-wrap items-start gap-6">
<div className="flex-1 min-w-[300px]">
<Card>
{[
{
icon: "⚡",
title: "Faster Database Queries",
desc: "Eliminated redundant queries and ran independent queries in parallel instead of one-by-one. 5 pages now load data concurrently.",
},
{
icon: "📦",
title: "Smaller Page Downloads",
desc: "Removed unused code, deferred non-critical libraries, and switched to lighter alternatives. 850 KB+ removed from every initial page load.",
},
{
icon: "📊",
title: "Better Monitoring",
desc: "Added performance tracking for every server action, slow query alerts, and user behavior analytics. We can now see exactly what's slow and why.",
},
{
icon: "🧪",
title: "Automated Testing",
desc: "Added 39 tests covering critical paths. Set up a CI pipeline that checks code quality, runs tests, and reports page sizes on every change.",
},
{
icon: "🔧",
title: "Bug Fix",
desc: "Found and fixed a dashboard bug where the sidebar was making hundreds of server requests per second due to an infinite loop. This was wasting resources silently.",
},
].map(item => (
<div
key={item.title}
className="flex gap-4 border-b border-zinc-100 dark:border-zinc-800 py-3.5 last:border-b-0"
>
<span className="w-10 flex-shrink-0 text-center text-2xl">{item.icon}</span>
<div>
<div className="text-sm font-semibold text-zinc-900 dark:text-zinc-200">
{item.title}
</div>
<div className="mt-0.5 text-xs leading-relaxed text-zinc-500 dark:text-zinc-400">
{item.desc}
</div>
</div>
</div>
))}
</Card>
</div>
<div className="w-80 flex-shrink-0">
<Card>
<div className="mb-3 text-center text-sm font-semibold text-zinc-800 dark:text-zinc-200">
Changes by Category
</div>
<CategoryPie />
</Card>
</div>
</div>
</Section>
{/* ── Page-by-Page ── */}
<Section
title="Page-by-Page Results"
subtitle="Average load times before and after. Results will be more definitive after 7 days of data collection."
>
<Card>
<div className="overflow-x-auto">
<div className="flex gap-4 border-b-2 border-zinc-200 dark:border-zinc-800 pb-2.5 text-[11px] font-bold uppercase tracking-wider text-zinc-600 dark:text-zinc-300">
<div className="flex-1">Page</div>
<div className="w-28 text-right">Before</div>
<div className="w-28 text-right">After</div>
<div className="w-20 text-center">Change</div>
</div>
{PAGE_RESULTS.map(r => (
<div
key={r.page}
className="flex items-center gap-4 border-b border-zinc-100 dark:border-zinc-800 py-3"
>
<div className="flex-1">
<span className="text-sm font-semibold text-zinc-800 dark:text-zinc-200">
{r.page}
</span>
{r.note && (
<span className="ml-2 text-xs text-zinc-400 dark:text-zinc-500">{r.note}</span>
)}
</div>
<div className="w-28 text-right font-mono text-sm text-zinc-500 dark:text-zinc-400">
{fmt(r.before)} ms
</div>
<div className="w-28 text-right font-mono text-sm font-semibold text-zinc-800 dark:text-zinc-200">
{fmt(r.after)} ms
</div>
<div className="w-20 text-center">{pctBadge(r.before, r.after)}</div>
</div>
))}
</div>
<p className="mt-4 text-xs leading-relaxed text-zinc-400 dark:text-zinc-500">
Note: Many pages have limited post-optimization data because we reduced monitoring from
100% to 10% sampling, and only ~2 hours had passed at measurement time. Full results
expected within 7 days.
</p>
</Card>
</Section>
{/* ── Recommended Actions ── */}
<Section
title="Recommended Actions"
subtitle="Decisions and approvals needed from leadership."
>
<Card>
<ActionItem text="Approve downscaling Azure App Service from P1v2 ($81/mo) to B2 ($26/mo) — saves $660/year" />
<ActionItem text="Decide whether deployment slot swapping is needed (if yes, S1 at $73/mo instead)" />
<ActionItem text="Review Sentry plan tier — 90% fewer events may allow a plan downgrade" />
<ActionItem text="Schedule follow-up review in 7 days once full post-optimization data is available" />
</Card>
</Section>
</div>
)
}
/*
ENGINEERING VIEW
*/
function EngView() {
return (
<div className="space-y-14">
{/* ── Span Latency ── */}
<Section
title="Span Latency — Before vs. After"
subtitle="Cutoff: first PR merged at 2026-04-04T16:14:09 UTC. Before = 14 days prior. After = post-merge with 10% sampling."
>
<Card>
<LatencyChart />
</Card>
</Section>
{/* ── Error Count ── */}
<Section title="Error Count">
<div className="flex flex-wrap gap-5">
<Card className="flex-1 min-w-[200px]">
<div className="text-[13px] font-semibold uppercase tracking-widest text-zinc-500 dark:text-zinc-400">
Before (14d)
</div>
<div
className="mt-1 font-mono text-zinc-800 dark:text-zinc-200"
style={{ fontSize: "36px", fontWeight: 800 }}
>
2
</div>
<div className="text-sm text-zinc-500 dark:text-zinc-400">errors</div>
</Card>
<Card className="flex-1 min-w-[200px]">
<div className="text-[13px] font-semibold uppercase tracking-widest text-zinc-500 dark:text-zinc-400">
After (post-merge)
</div>
<div
className="mt-1 font-mono text-green-600 dark:text-green-400"
style={{ fontSize: "36px", fontWeight: 800 }}
>
0
</div>
<div className="text-sm text-green-600 dark:text-green-400">
errors zero regressions
</div>
</Card>
<Card className="flex-1 min-w-[200px]">
<div className="text-[13px] font-semibold uppercase tracking-widest text-zinc-500 dark:text-zinc-400">
Transactions (14d)
</div>
<div
className="mt-1 font-mono text-zinc-800 dark:text-zinc-200"
style={{ fontSize: "36px", fontWeight: 800 }}
>
107 41
</div>
<div className="text-sm text-zinc-500 dark:text-zinc-400">
sampling reduced 100% 10%
</div>
</Card>
</div>
</Section>
{/* ── Full Page Load Table ── */}
<Section
title="Page Load Durations (All Pages)"
subtitle="Sentry pageload transaction data. Before = 14d window, After = post-merge."
>
<Card>
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b-2 border-zinc-200 dark:border-zinc-800 text-left text-[11px] font-bold uppercase tracking-wider text-zinc-600 dark:text-zinc-300">
<th className="py-2.5 pr-4">Route</th>
<th className="py-2.5 w-24 text-right">Avg Before</th>
<th className="py-2.5 w-24 text-right">Avg After</th>
<th className="py-2.5 w-24 text-right">p95 Before</th>
<th className="py-2.5 w-24 text-right">p95 After</th>
</tr>
</thead>
<tbody>
{FULL_PAGE_TABLE.map((r, i) => (
<tr
key={r.route}
className={`border-b border-zinc-100 dark:border-zinc-800 ${i % 2 ? "bg-zinc-50/50 dark:bg-white/[0.02]" : ""}`}
>
<td className="py-2.5 pr-4 font-mono text-xs font-medium text-zinc-800 dark:text-zinc-200">
{r.route}
</td>
<td className="py-2.5 w-24 text-right font-mono text-zinc-500 dark:text-zinc-400">
{fmt(r.avgBefore)}
</td>
<td className="py-2.5 w-24 text-right font-mono font-semibold text-zinc-800 dark:text-zinc-200">
{fmt(r.avgAfter)}
</td>
<td className="py-2.5 w-24 text-right font-mono text-zinc-500 dark:text-zinc-400">
{fmt(r.p95Before)}
</td>
<td className="py-2.5 w-24 text-right font-mono font-semibold text-zinc-800 dark:text-zinc-200">
{fmt(r.p95After)}
</td>
</tr>
))}
</tbody>
</table>
</div>
<p className="mt-3 text-xs text-zinc-400 dark:text-zinc-500">
= no post-merge samples (10% sampling + short window). Dashboard 30s = sidebar refetch
loop timeout (fixed PR #2564).
</p>
</Card>
</Section>
{/* ── Bundle Size Details ── */}
<Section title="Bundle Size Reductions (~850 KB+)">
<Card>
<DataTable
columns={["Optimization", "Est. Savings", "PR"]}
rows={BUNDLE_TABLE.map(r => [r.opt, r.savings, r.pr])}
/>
</Card>
</Section>
{/* ── Runtime Details ── */}
<Section title="Runtime & Latency Improvements">
<Card>
<DataTable
columns={["Optimization", "Impact", "PR"]}
rows={RUNTIME_TABLE.map(r => [r.opt, r.impact, r.pr])}
/>
</Card>
</Section>
{/* ── Observability Stack ── */}
<Section title="Observability Stack">
<Card>
<DataTable
columns={["Addition", "Detail", "PR"]}
rows={OBSERVABILITY_TABLE.map(r => [r.name, r.detail, r.pr])}
/>
</Card>
</Section>
{/* ── Testing & CI ── */}
<Section title="Testing & CI">
<Card>
<ul className="space-y-1.5 pl-5 text-sm leading-relaxed text-zinc-700 dark:text-zinc-200 list-disc">
<li>
39 tests across 4 files: withTiming wrapper, N+1 fix verification, parallel fetches,
access control, error handling (PR #2553)
</li>
<li>
Quality gates CI workflow: type-check tests build route size PR comment (PR
#2563)
</li>
<li>Trace sampling: 100% 10% in production (PR #2547)</li>
</ul>
</Card>
</Section>
{/* ── Full PR Inventory ── */}
<Section
title="Full PR Inventory"
subtitle="All 19 optimization PRs + 1 bugfix, merged sequentially to main."
>
<Card>
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b-2 border-zinc-200 dark:border-zinc-800 text-left text-[11px] font-bold uppercase tracking-wider text-amber-600 dark:text-amber-400">
<th className="py-2.5 w-10">#</th>
<th className="py-2.5 w-20">PR</th>
<th className="py-2.5">Title</th>
<th className="py-2.5 w-32">Category</th>
</tr>
</thead>
<tbody>
{PR_INVENTORY.map((r, i) => (
<tr
key={r.pr}
className={`border-b border-zinc-100 dark:border-zinc-800 ${i % 2 ? "bg-zinc-50/50 dark:bg-white/[0.02]" : ""}`}
>
<td className="py-2.5 text-zinc-500 dark:text-zinc-400">{r.num}</td>
<td className="py-2.5 text-zinc-800 dark:text-zinc-200">{r.pr}</td>
<td className="py-2.5 text-zinc-800 dark:text-zinc-200">{r.title}</td>
<td className={`py-2.5 font-semibold ${CATEGORY_COLORS[r.cat]}`}>{r.cat}</td>
</tr>
))}
</tbody>
</table>
</div>
</Card>
</Section>
{/* ── Pre-Existing Issues ── */}
<Section title="Pre-Existing Issues Discovered">
<IssueCard
title="Dashboard Sidebar Infinite Refetch Loop"
severity="High — Fixed in PR #2564"
borderColor="border-l-red-400"
badgeColor="bg-red-100 text-red-800 dark:bg-red-400 dark:text-zinc-950"
body="sidebar.tsx:101-124 — subscriptionFetchRef reset in finally() caused re-entrancy. Each fetch → setSubscription → re-render → ref false → fetch. Hundreds of POST+GET pairs/sec."
fix="Moved ref reset to effect cleanup. Added cancelled flag for unmount safety."
/>
<IssueCard
title="Auth0 Favicon 404"
severity="Low"
borderColor="border-l-zinc-400"
badgeColor="bg-zinc-200 text-zinc-700 dark:bg-zinc-500 dark:text-zinc-950"
body="Auth0 hosted login (codeflash-ai.us.auth0.com) returns 404 for /favicon.ico. Fix: Auth0 Dashboard > Branding > Universal Login > upload favicon."
/>
<IssueCard
title="Image Cache ENOENT"
severity="Low"
borderColor="border-l-zinc-400"
badgeColor="bg-zinc-200 text-zinc-700 dark:bg-zinc-500 dark:text-zinc-950"
body="mkdir '/home/site/wwwroot/.next/cache/images' — Azure Run-From-Zip mounts app as read-only. Image optimization caching only, not functional."
/>
</Section>
{/* ── Infrastructure ── */}
<Section
title="Infrastructure"
subtitle="Current Azure App Service plan and recommended downscale after optimizations."
>
<Card>
<div className="flex flex-wrap items-center gap-6">
<div className="flex-1">
<div className="text-[11px] font-bold uppercase tracking-wider text-zinc-500 dark:text-zinc-400">
Current
</div>
<div className="mt-1 text-base font-bold text-zinc-900 dark:text-zinc-200">
P1v2 (PremiumV2)
</div>
<div className="mt-0.5 text-xs text-zinc-500 dark:text-zinc-400">
1 vCPU · 3.5 GB · 1 instance · $81/mo
</div>
</div>
<div className="text-2xl text-zinc-300 dark:text-zinc-500"></div>
<div className="flex-1">
<div className="text-[11px] font-bold uppercase tracking-wider text-green-600 dark:text-green-400">
Recommended
</div>
<div className="mt-1 text-base font-bold text-green-600 dark:text-green-400">
B2 (Basic)
</div>
<div className="mt-0.5 text-xs text-zinc-500 dark:text-zinc-400">
2 vCPU · 3.5 GB · $26/mo (saves $660/yr)
</div>
</div>
</div>
<p className="mt-4 text-xs text-zinc-400 dark:text-zinc-500">
B-tier lacks deployment slots. If slot swapping is required: S1 ($73/mo, saves $96/yr).
</p>
</Card>
</Section>
{/* ── Engineering Action Items ── */}
<Section title="Engineering Action Items" subtitle="Technical tasks to complete post-merge.">
<Card>
<ActionItem text="Re-measure page load durations after 7 days with clean post-fix Sentry data" />
<ActionItem text="Verify db.query spans from @prisma/instrumentation are appearing in Sentry" />
<ActionItem text="Upload Codeflash favicon in Auth0 Dashboard > Branding > Universal Login" />
<ActionItem text="Investigate /dashboard 30s timeout — root cause may extend beyond sidebar fix" />
<ActionItem text="Execute App Service downscale: az appservice plan update --sku B2 (or S1 if slots needed)" />
<ActionItem text="Audit remaining bundle: run next build --profile and check for further lazy-load candidates" />
<ActionItem text="Add Playwright smoke tests to CI for critical page renders" />
</Card>
</Section>
</div>
)
}
/* ── shared components ──────────────────────────────────────────────── */
function Section({
title,
subtitle,
children,
}: {
title: string
subtitle?: string
children: React.ReactNode
}) {
return (
<div>
<h2
className="text-[22px] font-bold text-zinc-900 dark:text-zinc-200"
style={{ letterSpacing: "-0.01em" }}
>
{title}
</h2>
{subtitle && (
<p className="mt-1.5 text-sm leading-relaxed text-zinc-500 dark:text-zinc-400">
{subtitle}
</p>
)}
<div className="mt-6">{children}</div>
</div>
)
}
function Card({ children, className = "" }: { children: React.ReactNode; className?: string }) {
return (
<div
className={`rounded-xl border border-zinc-200 dark:border-zinc-800 bg-white dark:bg-zinc-900 shadow-sm dark:shadow-none ${className}`}
style={{ padding: "28px 32px" }}
>
{children}
</div>
)
}
function ActionItem({ text }: { text: string }) {
return (
<div className="flex items-center gap-3 border-b border-zinc-100 dark:border-zinc-800 py-2.5 last:border-b-0">
<span className="text-sm text-zinc-400 dark:text-zinc-500"></span>
<span className="text-sm text-zinc-800 dark:text-zinc-200">{text}</span>
</div>
)
}
function IssueCard({
title,
severity,
borderColor,
badgeColor,
body,
fix,
}: {
title: string
severity: string
borderColor: string
badgeColor: string
body: string
fix?: string
}) {
return (
<div
className={`mb-4 rounded-xl border border-zinc-200 dark:border-zinc-800 border-l-4 ${borderColor} bg-white dark:bg-zinc-900 p-5 shadow-sm dark:shadow-none`}
>
<div className="mb-3 flex flex-wrap items-center gap-3">
<span className="text-base font-bold text-zinc-900 dark:text-zinc-200">{title}</span>
<span
className={`inline-block rounded-full ${badgeColor} px-2.5 py-0.5 text-xs font-semibold`}
>
{severity}
</span>
</div>
<p className="text-sm leading-relaxed text-zinc-600 dark:text-zinc-400">{body}</p>
{fix && (
<p className="mt-2 text-sm text-zinc-700 dark:text-zinc-200">
Fix: <span className="font-semibold">{fix}</span>
</p>
)}
</div>
)
}
function DataTable({ columns, rows }: { columns: string[]; rows: string[][] }) {
return (
<div className="overflow-x-auto">
<table className="w-full text-sm">
<thead>
<tr className="border-b-2 border-zinc-200 dark:border-zinc-800 text-left text-[11px] font-bold uppercase tracking-wider text-amber-600 dark:text-amber-400">
{columns.map(c => (
<th key={c} className="py-2.5 pr-4">
{c}
</th>
))}
</tr>
</thead>
<tbody>
{rows.map((row, i) => (
<tr
key={i}
className={`border-b border-zinc-100 dark:border-zinc-800 ${i % 2 ? "bg-zinc-50/50 dark:bg-white/[0.02]" : ""}`}
>
{row.map((cell, j) => (
<td key={j} className="py-2.5 pr-4 text-zinc-700 dark:text-zinc-200">
{cell}
</td>
))}
</tr>
))}
</tbody>
</table>
</div>
)
}

View file

@ -1,4 +1,4 @@
import { PrismaClient } from "@prisma/client"
import { cache } from "react"
import { notFound } from "next/navigation"
import Link from "next/link"
import { ExperimentMetadata } from "@/lib/types" // Your defined types
@ -6,13 +6,29 @@ import MonacoDiffViewer from "@/components/trace/monaco-diff-viewer"
import { Metadata } from "next" // For Next.js metadata API
import { auth0 } from "@/lib/auth0"
import { isTeamMember } from "@/app/utils/auth"
import { prisma } from "@/lib/prisma"
interface TraceDetailsPageProps {
params: Promise<{
trace_id: string
}>
}
const prisma = new PrismaClient()
// Deduplicate the Prisma query between generateMetadata and the page component.
// React cache() ensures the same trace_id only hits the DB once per request.
const getOptimizationFeature = cache(async (trace_id: string) => {
return prisma.optimization_features.findUnique({
where: { trace_id },
select: {
experiment_metadata: true,
metadata: true,
organization: true,
repository: true,
review_quality: true,
review_explanation: true,
},
})
})
// Function to generate dynamic metadata (e.g., page title)
export async function generateMetadata(props: TraceDetailsPageProps): Promise<Metadata> {
const params = await props.params
@ -22,16 +38,7 @@ export async function generateMetadata(props: TraceDetailsPageProps): Promise<Me
// For simplicity, we'll use a generic title or one derived if data is fetched quickly
// A more optimized approach might involve a separate lightweight query or using default values.
const optimizationFeature = await prisma.optimization_features.findUnique({
where: { trace_id },
select: {
experiment_metadata: true,
organization: true,
repository: true,
review_quality: true,
review_explanation: true,
},
})
const optimizationFeature = await getOptimizationFeature(trace_id)
let title = `Python Diff Trace: ${trace_id.substring(0, 8)}`
if (optimizationFeature?.experiment_metadata) {
@ -116,33 +123,12 @@ export default async function TraceDetailsPage(props: TraceDetailsPageProps) {
)
}
let optimizationFeature: {
experiment_metadata: unknown
metadata: unknown
organization: string | null
repository: string | null
review_quality: string | null
review_explanation: string | null
} | null = null
// Uses the same cached function as generateMetadata — one DB round-trip per request
let optimizationFeature: Awaited<ReturnType<typeof getOptimizationFeature>> = null
try {
optimizationFeature = await prisma.optimization_features.findUnique({
where: { trace_id: trace_id },
select: {
experiment_metadata: true, // Prisma handles JSONB parsing
metadata: true, // Include metadata field which stores modified code
organization: true,
repository: true,
review_quality: true,
review_explanation: true,
// Select other fields if needed by MonacoDiffViewer for its header/display
},
})
optimizationFeature = await getOptimizationFeature(trace_id)
} catch (error) {
console.error(`[TracePage] Failed to fetch data for trace_id ${trace_id}:`, error)
// Optionally, render a specific error UI component here instead of notFound()
// For now, notFound() will trigger the 404 page, which is reasonable if data fetch fails badly.
// Or you could pass an error state to MonacoDiffViewer to display.
// For this detailed guide, we assume MonacoDiffViewer will handle 'null' metadata.
}
// If feature is not found, or metadata is explicitly null (and you expect it for valid traces)

View file

@ -2,7 +2,7 @@
import { auth0 } from "@/lib/auth0"
import { cache } from "react"
import { isTeamMemberCheck } from "@/lib/team-members"
import { isTeamMemberCheck, isMembenchAllowed } from "@/lib/team-members"
const getCachedSession = cache(async () => {
return auth0.getSession()
@ -55,3 +55,9 @@ export const getAuthenticatedTeamSession = cache(async () => {
return isTeamMemberCheck(session.user) ? session : null
})
export async function canAccessMembench(): Promise<boolean> {
const session = await getCachedSession()
if (!session?.user) return false
return isMembenchAllowed(session.user)
}

View file

@ -6,7 +6,7 @@ import { Editor, DiffEditor } from "@monaco-editor/react"
import type { editor } from "monaco-editor"
import ReactMarkdown from "react-markdown"
import remarkGfm from "remark-gfm"
import * as Diff from "diff"
import { createPatch } from "diff"
import {
ChevronRight,
FileText,
@ -383,7 +383,7 @@ const MonacoDiffEditorGithub: React.FC<MonacoDiffEditorGithubProps> = ({
diff += `\n`
Object.entries(modifiedContents).forEach(([filePath, content]) => {
const patch = Diff.createPatch(
const patch = createPatch(
filePath,
content.oldContent,
content.newContent,

View file

@ -22,12 +22,14 @@ export function ConditionalLayout({
const [isAnnouncementVisible, setIsAnnouncementVisible] = useState(true)
const shouldHideLayout =
pathname !== null && (
HIDDEN_PAGES.includes(pathname) ||
pathname !== null &&
(HIDDEN_PAGES.includes(pathname) ||
pathname.startsWith("/trace/") ||
pathname.startsWith("/observability") ||
!user
)
pathname.startsWith("/roadmap") ||
pathname.startsWith("/report") ||
pathname.startsWith("/membench") ||
!user)
// Auto-collapse announcement after 4 seconds
useEffect(() => {

View file

@ -106,8 +106,10 @@ export function Sidebar({ className, user, isLoading, error }: SidebarProps): JS
if (subscriptionFetchRef.current) return
subscriptionFetchRef.current = true
let cancelled = false
getCurrentUserSubscriptionData()
.then(data => {
if (cancelled) return
if (data) {
setSubscription({
optimizations_used: data.optimizations_used || 0,
@ -117,10 +119,14 @@ export function Sidebar({ className, user, isLoading, error }: SidebarProps): JS
setSubscription(null)
}
})
.catch(() => setSubscription(null))
.finally(() => {
subscriptionFetchRef.current = false
.catch(() => {
if (!cancelled) setSubscription(null)
})
return () => {
cancelled = true
subscriptionFetchRef.current = false
}
}, [mode])
const toggleTheme = () => {

View file

@ -0,0 +1,372 @@
"use client"
import { useMemo } from "react"
import { Bar } from "react-chartjs-2"
import { Chart as ChartJS, CategoryScale, LinearScale, BarElement, Tooltip, Legend } from "chart.js"
import ChartDataLabels from "chartjs-plugin-datalabels"
ChartJS.register(CategoryScale, LinearScale, BarElement, Tooltip, Legend, ChartDataLabels)
const GREEN = "#4ade80"
const LIGHT_GRAY = "#71717a"
const GRAY = "#a1a1aa"
const DARK = "#09090b"
const ZINC_700 = "#3f3f46"
const ZINC_400 = "#a1a1aa"
function useIsDark() {
if (typeof window === "undefined") return true
return window.matchMedia("(prefers-color-scheme: dark)").matches
}
function useChartOptions(isDark: boolean) {
return useMemo(
() => ({
responsive: true,
maintainAspectRatio: false,
plugins: {
legend: {
position: "top" as const,
labels: {
color: isDark ? ZINC_400 : "#52525b",
font: { family: "Inter, system-ui, sans-serif", size: 13 },
},
},
datalabels: {
display: false as const,
},
},
scales: {
x: {
ticks: { color: isDark ? ZINC_400 : "#71717a", font: { size: 12 } },
grid: { display: false },
border: { display: false },
},
y: {
ticks: { color: isDark ? ZINC_400 : "#71717a", font: { size: 12 } },
grid: { color: isDark ? ZINC_700 : "#e4e4e7" },
border: { display: false },
},
},
}),
[isDark],
)
}
export function PeakMemoryChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: [
"Full Test Suite (18 tests)",
"API hi_res Pipeline (16p PDF)",
"od_only (Seeda Case Study)",
],
datasets: [
{
label: "Baseline (pre-Feb 2026)",
data: [1.66, 1.515, 1.127],
backgroundColor: LIGHT_GRAY,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v.toFixed(3)} GB`,
color: isDark ? GRAY : "#71717a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "Current (main)",
data: [1.473, 1.419, 1.046],
backgroundColor: GREEN,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v.toFixed(3)} GB`,
color: isDark ? GREEN : "#16a34a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[380px]">
<Bar
data={data}
options={{
...baseOptions,
layout: { padding: { top: 24 } },
plugins: {
...baseOptions.plugins,
datalabels: { display: false },
},
scales: {
...baseOptions.scales,
y: {
...baseOptions.scales.y,
max: 2.0,
title: {
display: true,
text: "Peak Memory (GB)",
color: isDark ? ZINC_400 : "#71717a",
},
},
},
}}
/>
</div>
)
}
export function AllocatorChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: [
"_create_inference_session",
"PIL Image.tobytes",
"PIL Image.new",
"load_prepare",
"render",
"PIL Image.tobytes (2)",
],
datasets: [
{
label: "Baseline",
data: [1.386, 1.188, 1.001, 0.751, 0.649, 0],
backgroundColor: LIGHT_GRAY,
borderRadius: 4,
datalabels: {
display: true,
anchor: "end" as const,
align: "right" as const,
formatter: (v: number) => `${v.toFixed(2)} GB`,
color: isDark ? GRAY : "#71717a",
font: { size: 12, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "Current",
data: [1.328, 1.802, 1.556, 1.172, 0, 0.889],
backgroundColor: GREEN,
borderRadius: 4,
datalabels: {
display: true,
anchor: "end" as const,
align: "right" as const,
formatter: (v: number) => `${v.toFixed(2)} GB`,
color: isDark ? GREEN : "#16a34a",
font: { size: 12, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[320px]">
<Bar
data={data}
options={{
...baseOptions,
indexAxis: "y",
layout: { padding: { right: 60 } },
plugins: {
...baseOptions.plugins,
datalabels: { display: false },
},
scales: {
x: {
...baseOptions.scales.x,
title: {
display: true,
text: "Total Allocated (GB)",
color: isDark ? ZINC_400 : "#71717a",
},
grid: { color: isDark ? ZINC_700 : "#e4e4e7" },
},
y: {
...baseOptions.scales.y,
grid: { display: false },
},
},
}}
/>
</div>
)
}
export function HeadroomChart() {
const isDark = useIsDark()
const data = useMemo(
() => ({
labels: ["Pod RAM"],
datasets: [
{
label: "Pod Limit (32 GB)",
data: [32],
backgroundColor: isDark ? "rgba(39,39,42,0.5)" : "rgba(228,228,231,0.8)",
borderRadius: 6,
datalabels: {
display: true,
anchor: "center" as const,
align: "center" as const,
formatter: (): string => "32 GB limit",
color: isDark ? GRAY : "#71717a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "Baseline Peak (1.66 GB)",
data: [1.66],
backgroundColor: "rgba(251,191,36,0.7)",
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "right" as const,
formatter: (): string => "1.66 GB",
color: isDark ? DARK : DARK,
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "Current Peak (1.47 GB)",
data: [1.473],
backgroundColor: GREEN,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "right" as const,
formatter: (): string => "1.47 GB peak",
color: isDark ? DARK : DARK,
font: { size: 14, family: "Inter, system-ui, sans-serif", weight: "bold" as const },
},
},
],
}),
[isDark],
)
return (
<div className="h-[180px]">
<Bar
data={data}
options={{
indexAxis: "y",
responsive: true,
maintainAspectRatio: false,
scales: {
y: { stacked: true, display: false },
x: {
stacked: false,
max: 35,
grid: { color: isDark ? ZINC_700 : "#e4e4e7" },
title: {
display: true,
text: "Memory (GB)",
color: isDark ? ZINC_400 : "#71717a",
},
ticks: { color: isDark ? ZINC_400 : "#71717a" },
border: { display: false },
},
},
plugins: {
legend: {
position: "top" as const,
labels: {
color: isDark ? ZINC_400 : "#52525b",
font: { size: 12, family: "Inter, system-ui, sans-serif" },
},
},
datalabels: { display: false },
},
}}
/>
</div>
)
}
export function MaxAllocChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: ["Largest Single Allocation"],
datasets: [
{
label: "Baseline",
data: [268],
backgroundColor: LIGHT_GRAY,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v} MB`,
color: isDark ? GRAY : "#71717a",
font: { size: 14, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "Current",
data: [134],
backgroundColor: GREEN,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v} MB`,
color: isDark ? GREEN : "#16a34a",
font: { size: 14, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[280px]">
<Bar
data={data}
options={{
...baseOptions,
layout: { padding: { top: 24 } },
plugins: {
...baseOptions.plugins,
datalabels: { display: false },
},
scales: {
...baseOptions.scales,
y: {
...baseOptions.scales.y,
max: 320,
title: {
display: true,
text: "Allocation Size (MB)",
color: isDark ? ZINC_400 : "#71717a",
},
},
},
}}
/>
</div>
)
}

View file

@ -0,0 +1,46 @@
"use client"
import { useState } from "react"
import { cn } from "@/lib/utils"
interface MembenchToggleProps {
execView: React.ReactNode
engView: React.ReactNode
}
export function MembenchToggle({ execView, engView }: MembenchToggleProps) {
const [view, setView] = useState<"exec" | "eng">("exec")
return (
<>
<div className="flex justify-center mt-10 mb-2">
<div className="inline-flex rounded-xl border border-zinc-200 dark:border-zinc-800 bg-zinc-100 dark:bg-zinc-900 p-1">
<button
onClick={() => setView("exec")}
className={cn(
"rounded-lg px-6 py-2.5 text-sm font-semibold transition-all",
view === "exec"
? "bg-green-400 text-zinc-950 shadow-sm"
: "text-zinc-500 dark:text-zinc-400 hover:text-zinc-800 dark:hover:text-zinc-200",
)}
>
Executive Summary
</button>
<button
onClick={() => setView("eng")}
className={cn(
"rounded-lg px-6 py-2.5 text-sm font-semibold transition-all",
view === "eng"
? "bg-green-400 text-zinc-950 shadow-sm"
: "text-zinc-500 dark:text-zinc-400 hover:text-zinc-800 dark:hover:text-zinc-200",
)}
>
Engineering Details
</button>
</div>
</div>
{view === "exec" ? execView : engView}
</>
)
}

View file

@ -8,7 +8,7 @@ import {
type ResponseSegment,
} from "@/lib/observability-response-parse"
import { CopyButton } from "@/components/observability/copy-button"
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"
import { SyntaxHighlighter } from "@/lib/syntax-highlighter"
import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism"
import { useState } from "react"
@ -89,13 +89,7 @@ export function ParsedResponseView({ rawResponse, callType }: ParsedResponseView
.map((id, i) => {
const pos = i + 1
const label =
pos === 1
? "1st"
: pos === 2
? "2nd"
: pos === 3
? "3rd"
: `${pos}th`
pos === 1 ? "1st" : pos === 2 ? "2nd" : pos === 3 ? "3rd" : `${pos}th`
return (
<li
key={`${id}-${i}`}
@ -104,9 +98,7 @@ export function ParsedResponseView({ rawResponse, callType }: ParsedResponseView
<span className="text-gray-500 dark:text-gray-400 font-medium">
{label}
</span>
<span className="text-gray-900 dark:text-gray-100">
{id.trim()}
</span>
<span className="text-gray-900 dark:text-gray-100">{id.trim()}</span>
</li>
)
})}
@ -139,7 +131,10 @@ export function ParsedResponseView({ rawResponse, callType }: ParsedResponseView
{segments.map((seg, i) => {
const textJSON = seg.kind === "text" ? tryFormatJSON(seg.content) : null
return seg.kind === "text" ? (
<div key={i} className="rounded-lg border border-gray-200 dark:border-gray-700 overflow-hidden">
<div
key={i}
className="rounded-lg border border-gray-200 dark:border-gray-700 overflow-hidden"
>
{textJSON ? (
<SyntaxHighlighter
language="json"
@ -163,7 +158,10 @@ export function ParsedResponseView({ rawResponse, callType }: ParsedResponseView
)}
</div>
) : (
<div key={i} className="rounded-lg overflow-hidden border border-gray-200 dark:border-gray-700">
<div
key={i}
className="rounded-lg overflow-hidden border border-gray-200 dark:border-gray-700"
>
<div className="flex items-center justify-between px-3 py-1.5 bg-gray-100 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700">
<span className="text-xs font-medium text-gray-600 dark:text-gray-400">
{seg.language || "code"}

View file

@ -0,0 +1,369 @@
"use client"
import { useMemo } from "react"
import { Bar, Doughnut } from "react-chartjs-2"
import {
Chart as ChartJS,
CategoryScale,
LinearScale,
BarElement,
ArcElement,
Tooltip,
Legend,
} from "chart.js"
import ChartDataLabels from "chartjs-plugin-datalabels"
ChartJS.register(
CategoryScale,
LinearScale,
BarElement,
ArcElement,
Tooltip,
Legend,
ChartDataLabels,
)
/* ── colors matching the Dash app exactly ─────────────────────────── */
const ACCENT = "#ffd227" // Codeflash brand yellow (Dash app primary)
const ACCENT_DIM = "#d08e0d" // amber-600 muted
const AMBER = "#fbbf24" // amber-400
const AMBER_DARK = "#92400e" // amber-900
const GREEN = "#4ade80" // green-400
const BLUE = "#60a5fa" // blue-400
const PURPLE = "#8B5CF6" // violet-500
const GRAY = "#a1a1aa" // zinc-400
const LIGHT_GRAY = "#71717a" // zinc-500
const ZINC_700 = "#3f3f46"
const ZINC_400 = "#a1a1aa"
const SLATE = "#e4e4e7" // zinc-200 (Dash app primary text)
function useIsDark() {
if (typeof window === "undefined") return true
return window.matchMedia("(prefers-color-scheme: dark)").matches
}
function useChartOptions(isDark: boolean) {
return useMemo(
() => ({
responsive: true,
maintainAspectRatio: false,
plugins: {
legend: {
position: "top" as const,
labels: {
color: isDark ? ZINC_400 : "#52525b",
font: { family: "Inter, system-ui, sans-serif", size: 13 },
},
},
datalabels: {
display: false as const, // off by default, each chart opts in
},
},
scales: {
x: {
ticks: { color: isDark ? ZINC_400 : "#71717a", font: { size: 12 } },
grid: { display: false },
border: { display: false },
},
y: {
ticks: { color: isDark ? ZINC_400 : "#71717a", font: { size: 12 } },
grid: { color: isDark ? ZINC_700 : "#e4e4e7" },
border: { display: false },
},
},
}),
[isDark],
)
}
export function LatencyChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: ["Typical Response (p75)", "Worst Case (p95)"],
datasets: [
{
label: "Before",
data: [289, 1242],
backgroundColor: LIGHT_GRAY,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v.toLocaleString()} ms`,
color: isDark ? GRAY : "#71717a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "After",
data: [216, 562],
backgroundColor: ACCENT,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${v.toLocaleString()} ms`,
color: isDark ? ACCENT : "#b45309",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[340px]">
<Bar
data={data}
options={{
...baseOptions,
layout: { padding: { top: 24 } },
plugins: {
...baseOptions.plugins,
datalabels: { display: false },
},
scales: {
...baseOptions.scales,
y: {
...baseOptions.scales.y,
title: {
display: true,
text: "Response Time (ms)",
color: isDark ? ZINC_400 : "#71717a",
},
},
},
}}
/>
</div>
)
}
export function BundleChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: [
"Error Monitoring\n(Sentry Replay)",
"Code Viewer\n(Syntax Highlighter)",
"Animations Library\n(framer-motion)",
"Syntax Theme\n(PrismLight)",
],
datasets: [
{
data: [600, 132, 70, 50],
backgroundColor: [ACCENT, ACCENT_DIM, AMBER, AMBER_DARK],
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "right" as const,
formatter: (v: number) => `${v} KB`,
color: isDark ? SLATE : "#27272a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[250px]">
<Bar
data={data}
options={{
...baseOptions,
indexAxis: "y",
layout: { padding: { right: 60 } },
plugins: {
...baseOptions.plugins,
legend: { display: false },
datalabels: { display: false },
},
scales: {
x: {
...baseOptions.scales.x,
title: {
display: true,
text: "Kilobytes Removed from Initial Page Load",
color: isDark ? ZINC_400 : "#71717a",
},
grid: { color: isDark ? ZINC_700 : "#e4e4e7" },
},
y: {
...baseOptions.scales.y,
grid: { display: false },
},
},
}}
/>
</div>
)
}
export function OnboardingChart() {
const isDark = useIsDark()
const baseOptions = useChartOptions(isDark)
const data = useMemo(
() => ({
labels: ["Average", "Worst Case (p95)"],
datasets: [
{
label: "Before",
data: [10275, 20092],
backgroundColor: LIGHT_GRAY,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${(v / 1000).toFixed(1)}s`,
color: isDark ? GRAY : "#71717a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
{
label: "After",
data: [8108, 8108],
backgroundColor: GREEN,
borderRadius: 6,
datalabels: {
display: true,
anchor: "end" as const,
align: "top" as const,
formatter: (v: number) => `${(v / 1000).toFixed(1)}s`,
color: isDark ? GREEN : "#16a34a",
font: { size: 13, family: "Inter, system-ui, sans-serif" },
},
},
],
}),
[isDark],
)
return (
<div className="h-[300px]">
<Bar
data={data}
options={{
...baseOptions,
layout: { padding: { top: 24 } },
plugins: {
...baseOptions.plugins,
datalabels: { display: false },
},
scales: {
...baseOptions.scales,
y: {
...baseOptions.scales.y,
title: {
display: true,
text: "Load Time (ms)",
color: isDark ? ZINC_400 : "#71717a",
},
},
},
}}
/>
</div>
)
}
export function CategoryPie() {
const isDark = useIsDark()
const data = useMemo(
() => ({
labels: [
"Faster Load Times",
"Smaller Downloads",
"Better Monitoring",
"Testing & CI",
"Cleanup",
],
datasets: [
{
data: [7, 6, 4, 2, 1],
backgroundColor: [GREEN, BLUE, AMBER, PURPLE, GRAY],
borderWidth: 0,
},
],
}),
[],
)
// Custom plugin for center text
const centerTextPlugin = useMemo(
() => ({
id: "centerText",
afterDraw(chart: ChartJS) {
const { ctx, chartArea } = chart
const centerX = (chartArea.left + chartArea.right) / 2
const centerY = (chartArea.top + chartArea.bottom) / 2
ctx.save()
ctx.textAlign = "center"
ctx.textBaseline = "middle"
// "20" number
ctx.font = "bold 20px Inter, system-ui, sans-serif"
ctx.fillStyle = isDark ? SLATE : "#27272a"
ctx.fillText("20", centerX, centerY - 8)
// "changes" label
ctx.font = "13px Inter, system-ui, sans-serif"
ctx.fillStyle = isDark ? GRAY : "#71717a"
ctx.fillText("changes", centerX, centerY + 12)
ctx.restore()
},
}),
[isDark],
)
return (
<div className="h-[280px] flex items-center justify-center">
<Doughnut
data={data}
plugins={[centerTextPlugin]}
options={{
responsive: true,
maintainAspectRatio: false,
cutout: "55%",
plugins: {
legend: { display: false },
tooltip: {
callbacks: {
label: ctx => `${ctx.label}: ${ctx.parsed} changes`,
},
},
datalabels: {
display: true,
color: isDark ? SLATE : "#27272a",
font: { size: 12, family: "Inter, system-ui, sans-serif" },
formatter: (_value: number, ctx) => {
const label = ctx.chart.data.labels?.[ctx.dataIndex] ?? ""
const value = ctx.chart.data.datasets[0].data[ctx.dataIndex]
return `${label}\n${value}`
},
textAlign: "center" as const,
anchor: "end" as const,
align: "end" as const,
offset: 4,
},
},
}}
/>
</div>
)
}

View file

@ -0,0 +1,46 @@
"use client"
import { useState } from "react"
import { cn } from "@/lib/utils"
interface ReportToggleProps {
execView: React.ReactNode
engView: React.ReactNode
}
export function ReportToggle({ execView, engView }: ReportToggleProps) {
const [view, setView] = useState<"exec" | "eng">("exec")
return (
<>
<div className="flex justify-center mt-10 mb-2">
<div className="inline-flex rounded-xl border border-zinc-200 dark:border-zinc-800 bg-zinc-100 dark:bg-zinc-900 p-1">
<button
onClick={() => setView("exec")}
className={cn(
"rounded-lg px-6 py-2.5 text-sm font-semibold transition-all",
view === "exec"
? "bg-amber-400 text-zinc-950 shadow-sm"
: "text-zinc-500 dark:text-zinc-400 hover:text-zinc-800 dark:hover:text-zinc-200",
)}
>
Executive Summary
</button>
<button
onClick={() => setView("eng")}
className={cn(
"rounded-lg px-6 py-2.5 text-sm font-semibold transition-all",
view === "eng"
? "bg-amber-400 text-zinc-950 shadow-sm"
: "text-zinc-500 dark:text-zinc-400 hover:text-zinc-800 dark:hover:text-zinc-200",
)}
>
Engineering Details
</button>
</div>
</div>
{view === "exec" ? execView : engView}
</>
)
}

View file

@ -3,30 +3,31 @@
import React, { useState, useEffect, useMemo, useCallback, useRef } from "react"
import { DiffEditor, useMonaco, DiffOnMount } from "@monaco-editor/react"
import {
AlertTriangle,
BarChart3,
CheckCircle2,
XCircle,
GitPullRequest,
Zap,
TestTube,
ChevronDown,
ChevronUp,
Edit3,
ExternalLink,
FileCode,
Edit3,
Save,
X,
FileText,
GitPullRequest,
Loader2,
Lock,
Monitor,
Save,
Smartphone,
BarChart3,
TestTube,
X,
XCircle,
Zap,
} from "lucide-react"
// Ensure you have lucide-react installed as per your package.json
import { Loader2, FileText, AlertTriangle } from "lucide-react"
import type { ExperimentMetadata, DiffContents } from "@/lib/types" // Adjust path if needed
import { getMonacoLanguage } from "@/lib/utils"
import ReactMarkdown from "react-markdown"
import remarkGfm from "remark-gfm"
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"
import { SyntaxHighlighter } from "@/lib/syntax-highlighter"
import { vscDarkPlus } from "react-syntax-highlighter/dist/esm/styles/prism"
interface MonacoDiffViewerProps {

View file

@ -14,8 +14,8 @@ Sentry.init({
? "https://0fa0f40b2d709e4f1eb9aac76ff9e6be@o4506833230561280.ingest.us.sentry.io/4506833279582208"
: undefined,
// Adjust this value in production, or use tracesSampler for greater control
tracesSampleRate: 1,
// Sample 10% in production, 100% in dev
tracesSampleRate: isProduction ? 0.1 : 1,
// Setting this option to true will print useful information to the console while you're setting up Sentry.
debug: false,
@ -26,12 +26,18 @@ Sentry.init({
// in development and sample at a lower rate in production
replaysSessionSampleRate: 0.1,
// You can remove this option if you're not planning to use the Sentry Session Replay feature:
integrations: [
Sentry.replayIntegration({
// Additional Replay configuration goes in here, for example:
// Replay is NOT included here — it's lazy-loaded below to keep it out of
// the initial bundle (~300 KB per copy, two copies were previously shipped).
integrations: [Sentry.browserTracingIntegration({ enableLongAnimationFrame: true })],
})
// Lazy-load Session Replay so the ~300 KB replay bundle is fetched after
// the page is interactive rather than blocking initial load.
Sentry.lazyLoadIntegration("replayIntegration").then((replayIntegration) => {
Sentry.addIntegration(
replayIntegration({
maskAllText: true,
blockAllMedia: true,
}),
],
)
})

View file

@ -1,10 +1,43 @@
import * as Sentry from "@sentry/nextjs"
export function register() {
// Sentry initialization is now handled by dedicated config files:
// - sentry.server.config.ts for server-side
// - sentry.client.config.ts for client-side
// This prevents duplicate initialization issues with Sentry v9
const otelEnabled =
process.env.NODE_ENV === "production" ||
process.env.OTEL_ENABLED === "true"
export async function register() {
if (!otelEnabled) return
if (process.env.NEXT_RUNTIME !== "nodejs") return
// Dynamic imports so OTel packages are only loaded when tracing is active
const { NodeSDK } = await import("@opentelemetry/sdk-node")
const { getNodeAutoInstrumentations } = await import(
"@opentelemetry/auto-instrumentations-node"
)
const { PrismaInstrumentation } = await import("@prisma/instrumentation")
const {
SentrySpanProcessor,
SentryPropagator,
SentrySampler,
} = await import("@sentry/opentelemetry")
const sentryClient = Sentry.getClient()
const sdk = new NodeSDK({
sampler: sentryClient ? new SentrySampler(sentryClient) : undefined,
spanProcessors: [new SentrySpanProcessor()],
textMapPropagator: new SentryPropagator(),
instrumentations: [
getNodeAutoInstrumentations({
// Disable noisy/low-value instrumentations
"@opentelemetry/instrumentation-fs": { enabled: false },
"@opentelemetry/instrumentation-dns": { enabled: false },
"@opentelemetry/instrumentation-net": { enabled: false },
}),
new PrismaInstrumentation(),
],
})
sdk.start()
}
export const onRequestError = Sentry.captureRequestError

View file

@ -0,0 +1,148 @@
import { describe, it, expect, vi, beforeEach } from "vitest"
import * as Sentry from "@sentry/nextjs"
import { withTiming } from "../server-action-timing"
describe("withTiming", () => {
let mockSetAttribute: ReturnType<typeof vi.fn>
beforeEach(() => {
mockSetAttribute = vi.fn()
vi.mocked(Sentry.startSpan).mockImplementation((_opts, callback) =>
callback({ setAttribute: mockSetAttribute } as any),
)
})
describe("successful execution", () => {
it("returns the wrapped function's result", async () => {
const inner = vi.fn().mockResolvedValue({ data: "hello" })
const wrapped = withTiming("test-action", inner)
const result = await wrapped("arg1")
expect(result).toEqual({ data: "hello" })
})
it("passes arguments through to the wrapped function", async () => {
const inner = vi.fn().mockResolvedValue(null)
const wrapped = withTiming("test-action", inner)
await wrapped("a", 42, true)
expect(inner).toHaveBeenCalledWith("a", 42, true)
})
it("creates a Sentry span with correct name and op", async () => {
const wrapped = withTiming("myAction", vi.fn().mockResolvedValue(null))
await wrapped()
expect(Sentry.startSpan).toHaveBeenCalledWith(
expect.objectContaining({
name: "myAction",
op: "server.action",
attributes: { "server_action.name": "myAction" },
}),
expect.any(Function),
)
})
it("sets duration_ms attribute on span", async () => {
const nowSpy = vi.spyOn(performance, "now")
nowSpy.mockReturnValueOnce(0).mockReturnValueOnce(250)
const wrapped = withTiming("test", vi.fn().mockResolvedValue(null))
await wrapped()
expect(mockSetAttribute).toHaveBeenCalledWith(
"server_action.duration_ms",
250,
)
nowSpy.mockRestore()
})
})
describe("slow action detection", () => {
it("logs console.warn when duration exceeds 1000ms", async () => {
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
const nowSpy = vi.spyOn(performance, "now")
nowSpy.mockReturnValueOnce(0).mockReturnValueOnce(1500)
const wrapped = withTiming("slowAction", vi.fn().mockResolvedValue(null))
await wrapped()
expect(warnSpy).toHaveBeenCalledWith(
expect.stringContaining("slowAction"),
)
expect(warnSpy).toHaveBeenCalledWith(
expect.stringContaining("1500"),
)
warnSpy.mockRestore()
nowSpy.mockRestore()
})
it("sets server_action.slow attribute when slow", async () => {
vi.spyOn(console, "warn").mockImplementation(() => {})
const nowSpy = vi.spyOn(performance, "now")
nowSpy.mockReturnValueOnce(0).mockReturnValueOnce(2000)
const wrapped = withTiming("slow", vi.fn().mockResolvedValue(null))
await wrapped()
expect(mockSetAttribute).toHaveBeenCalledWith("server_action.slow", true)
nowSpy.mockRestore()
})
it("does not warn for fast actions", async () => {
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
const nowSpy = vi.spyOn(performance, "now")
nowSpy.mockReturnValueOnce(0).mockReturnValueOnce(500)
const wrapped = withTiming("fast", vi.fn().mockResolvedValue(null))
await wrapped()
expect(warnSpy).not.toHaveBeenCalled()
warnSpy.mockRestore()
nowSpy.mockRestore()
})
})
describe("error handling", () => {
it("re-throws the original error", async () => {
const error = new Error("boom")
vi.spyOn(console, "error").mockImplementation(() => {})
const wrapped = withTiming("fail", vi.fn().mockRejectedValue(error))
await expect(wrapped()).rejects.toThrow("boom")
})
it("calls Sentry.captureException with error and tags", async () => {
const error = new Error("db failed")
vi.spyOn(console, "error").mockImplementation(() => {})
const wrapped = withTiming("dbAction", vi.fn().mockRejectedValue(error))
await expect(wrapped()).rejects.toThrow()
expect(Sentry.captureException).toHaveBeenCalledWith(error, {
tags: { server_action: "dbAction" },
extra: { duration_ms: expect.any(Number) },
})
})
it("logs error with duration via console.error", async () => {
const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
const nowSpy = vi.spyOn(performance, "now")
nowSpy.mockReturnValueOnce(0).mockReturnValueOnce(300)
const wrapped = withTiming("errAction", vi.fn().mockRejectedValue(new Error("oops")))
await expect(wrapped()).rejects.toThrow()
expect(errorSpy).toHaveBeenCalledWith(
expect.stringContaining("errAction"),
expect.any(Error),
)
errorSpy.mockRestore()
nowSpy.mockRestore()
})
})
})

View file

@ -37,7 +37,7 @@ export async function trackUserLogin(userData: {
})
// Ensure events are sent
await posthog?.shutdown()
await posthog?.flush()
console.log(`[Analytics] Tracked login for user ${userData.userId}`)
} catch (error) {
@ -45,3 +45,87 @@ export async function trackUserLogin(userData: {
console.error("[Analytics] Failed to track login:", error)
}
}
// --- New tracking events for key user journeys ---
/**
* Captures a single event and flushes. All tracking helpers funnel through this
* so we have one place to handle errors / shutdown.
*/
async function captureEvent(
distinctId: string,
event: string,
properties?: Record<string, unknown>,
) {
try {
const posthog = getPostHogClient()
if (!posthog) return
posthog.capture({
distinctId,
event,
properties: {
...properties,
timestamp: new Date().toISOString(),
},
})
await posthog.shutdown()
} catch (error) {
// Never let tracking errors break the calling flow
console.error(`[Analytics] Failed to track ${event}:`, error)
}
}
export async function trackOptimizationReviewed(
userId: string,
properties: {
traceId: string
functionName?: string | null
repositoryName?: string | null
status?: string | null
},
) {
await captureEvent(userId, "optimization_reviewed", properties)
}
export async function trackRepositoryConnected(
userId: string,
properties: {
repositoryId: string
repositoryName: string
},
) {
await captureEvent(userId, "repository_connected", properties)
}
export async function trackApiKeyCreated(
userId: string,
properties: {
keyName: string
organizationId?: string
},
) {
await captureEvent(userId, "api_key_created", properties)
}
export async function trackMemberInvited(
userId: string,
properties: {
invitedUsername: string
role: string
scope: "organization" | "repository"
targetId: string
},
) {
await captureEvent(userId, "member_invited", properties)
}
export async function trackBillingPageViewed(
userId: string,
properties?: {
username?: string
},
) {
await captureEvent(userId, "billing_page_viewed", properties)
}

View file

@ -1,7 +1,5 @@
import { PrismaClient } from "@prisma/client"
import { ExperimentMetadata } from "./types"
const prisma = new PrismaClient()
import { prisma } from "@/lib/prisma"
/**
* Get the modified code for a trace, falling back to original optimized code if no modifications exist

View file

@ -1,12 +1,17 @@
import { PostHog } from "posthog-node"
let client: PostHog | undefined
export default function PostHogClient(): PostHog | undefined {
if (process.env.NODE_ENV !== "production") {
return undefined
}
return new PostHog("phc_aUO790jHd7z1SXwsYCz8dRApxueplZlZWeDSpKc5hol", {
host: "https://app.posthog.com",
flushAt: 1,
flushInterval: 0,
})
if (!client) {
client = new PostHog("phc_aUO790jHd7z1SXwsYCz8dRApxueplZlZWeDSpKc5hol", {
host: "https://app.posthog.com",
flushAt: 1,
flushInterval: 0,
})
}
return client
}

View file

@ -1,9 +1,13 @@
import { PrismaClient } from "@prisma/client"
import * as Sentry from "@sentry/nextjs"
const globalForPrisma = globalThis as unknown as {
prisma: PrismaClient | undefined
}
const isProduction = process.env.NODE_ENV === "production"
const SLOW_QUERY_THRESHOLD_MS = 500
function buildDatabaseUrl() {
const baseUrl = process.env.DATABASE_URL ?? ""
if (baseUrl.includes("connection_limit")) return baseUrl
@ -14,9 +18,37 @@ function buildDatabaseUrl() {
export const prisma =
globalForPrisma.prisma ??
new PrismaClient({
log: isProduction
? [
{ emit: "event", level: "warn" },
{ emit: "event", level: "error" },
]
: [
{ emit: "event", level: "query" },
{ emit: "event", level: "warn" },
{ emit: "event", level: "error" },
],
datasources: {
db: { url: buildDatabaseUrl() },
},
})
if (process.env.NODE_ENV !== "production") globalForPrisma.prisma = prisma
// Log slow queries in development
if (!isProduction) {
;(prisma as any).$on("query", (e: any) => {
if (e.duration > SLOW_QUERY_THRESHOLD_MS) {
console.warn(`[Prisma] Slow query (${e.duration}ms): ${e.query}`)
}
})
}
// Forward Prisma warnings and errors to Sentry
;(prisma as any).$on("warn", (e: any) => {
console.warn("[Prisma] Warning:", e.message)
})
;(prisma as any).$on("error", (e: any) => {
console.error("[Prisma] Error:", e.message)
Sentry.captureException(new Error(`Prisma error: ${e.message}`))
})
if (!isProduction) globalForPrisma.prisma = prisma

View file

@ -0,0 +1,57 @@
import * as Sentry from "@sentry/nextjs"
const SLOW_ACTION_THRESHOLD_MS = 1000
/**
* Wraps a server action with performance timing instrumentation.
* Measures execution duration, logs slow actions (>1s) as warnings,
* and reports timing to Sentry as custom spans.
*/
export function withTiming<TArgs extends unknown[], TReturn>(
actionName: string,
fn: (...args: TArgs) => Promise<TReturn>,
): (...args: TArgs) => Promise<TReturn> {
return async (...args: TArgs): Promise<TReturn> => {
const start = performance.now()
try {
const result = await Sentry.startSpan(
{
name: actionName,
op: "server.action",
attributes: {
"server_action.name": actionName,
},
},
async (span) => {
const res = await fn(...args)
const durationMs = performance.now() - start
span.setAttribute("server_action.duration_ms", durationMs)
if (durationMs > SLOW_ACTION_THRESHOLD_MS) {
console.warn(
`[ServerAction] Slow action: ${actionName} took ${durationMs.toFixed(0)}ms`,
)
span.setAttribute("server_action.slow", true)
}
return res
},
)
return result
} catch (error) {
const durationMs = performance.now() - start
console.error(
`[ServerAction] ${actionName} failed after ${durationMs.toFixed(0)}ms:`,
error,
)
Sentry.captureException(error, {
tags: { server_action: actionName },
extra: { duration_ms: durationMs },
})
throw error
}
}
}

View file

@ -1,4 +1,4 @@
import * as Sentry from "@sentry/browser"
import * as Sentry from "@sentry/nextjs"
import { ActionResponse, createErrorResponse, createSuccessResponse } from "../action-response"
import { GitHubUserSearchResult } from "../types"

View file

@ -0,0 +1,36 @@
/**
* Lightweight syntax highlighter using PrismLight with only the languages
* this app needs, instead of the full Prism build that ships all 300 grammars.
*
* Usage:
* import { SyntaxHighlighter } from "@/lib/syntax-highlighter"
*
* Saves ~850 KB of client JS by not shipping unused language grammars.
*/
import SyntaxHighlighter from "react-syntax-highlighter/dist/esm/prism-light"
import python from "react-syntax-highlighter/dist/esm/languages/prism/python"
import javascript from "react-syntax-highlighter/dist/esm/languages/prism/javascript"
import typescript from "react-syntax-highlighter/dist/esm/languages/prism/typescript"
import java from "react-syntax-highlighter/dist/esm/languages/prism/java"
import json from "react-syntax-highlighter/dist/esm/languages/prism/json"
import css from "react-syntax-highlighter/dist/esm/languages/prism/css"
import markup from "react-syntax-highlighter/dist/esm/languages/prism/markup"
import bash from "react-syntax-highlighter/dist/esm/languages/prism/bash"
import jsx from "react-syntax-highlighter/dist/esm/languages/prism/jsx"
import tsx from "react-syntax-highlighter/dist/esm/languages/prism/tsx"
SyntaxHighlighter.registerLanguage("python", python)
SyntaxHighlighter.registerLanguage("javascript", javascript)
SyntaxHighlighter.registerLanguage("typescript", typescript)
SyntaxHighlighter.registerLanguage("java", java)
SyntaxHighlighter.registerLanguage("json", json)
SyntaxHighlighter.registerLanguage("css", css)
SyntaxHighlighter.registerLanguage("html", markup)
SyntaxHighlighter.registerLanguage("markup", markup)
SyntaxHighlighter.registerLanguage("bash", bash)
SyntaxHighlighter.registerLanguage("jsx", jsx)
SyntaxHighlighter.registerLanguage("tsx", tsx)
SyntaxHighlighter.registerLanguage("plaintext", () => ({}))
SyntaxHighlighter.registerLanguage("text", () => ({}))
export { SyntaxHighlighter }

View file

@ -29,3 +29,10 @@ export function isTeamMemberCheck(user: { email?: string; nickname?: string }):
(nickname !== undefined && TEAM_MEMBERS.has(nickname))
)
}
/** Codeflash team members OR anyone with an @unstructured.io email */
export function isMembenchAllowed(user: { email?: string; nickname?: string }): boolean {
if (isTeamMemberCheck(user)) return true
const email = user.email?.toLowerCase()
return email !== undefined && email.endsWith("@unstructured.io")
}

View file

@ -1,6 +1,6 @@
import { type NextRequest, NextResponse } from "next/server"
import { auth0 } from "@/lib/auth0"
import { isTeamMemberCheck } from "@/lib/team-members"
import { isTeamMemberCheck, isMembenchAllowed } from "@/lib/team-members"
export async function proxy(req: NextRequest) {
// Let Auth0 handle auth routes (/auth/login, /auth/callback, /auth/logout, etc.)
@ -46,12 +46,22 @@ export async function proxy(req: NextRequest) {
return NextResponse.redirect(loginUrl)
}
if (pathname.startsWith("/observability")) {
if (
pathname.startsWith("/observability") ||
pathname.startsWith("/roadmap") ||
pathname.startsWith("/report")
) {
if (!isTeamMemberCheck(session.user)) {
return NextResponse.redirect(new URL("/", origin))
}
}
if (pathname.startsWith("/membench")) {
if (!isMembenchAllowed(session.user)) {
return NextResponse.redirect(new URL("/", origin))
}
}
return authRes
}

View file

@ -0,0 +1,75 @@
import { vi, afterEach } from "vitest"
// ---------------------------------------------------------------------------
// Mock: @codeflash-ai/common (Prisma + utility functions)
// ---------------------------------------------------------------------------
vi.mock("@codeflash-ai/common", () => {
const mockPrisma = {
organizations: {
findFirst: vi.fn(),
},
optimization_events: {
findMany: vi.fn(),
findFirst: vi.fn(),
count: vi.fn(),
groupBy: vi.fn(),
},
optimization_features: {
findMany: vi.fn(),
findUnique: vi.fn(),
},
repositories: {
findFirst: vi.fn(),
},
organization_members: {
findFirst: vi.fn(),
create: vi.fn(),
update: vi.fn(),
},
repository_members: {
create: vi.fn(),
delete: vi.fn(),
},
users: {
create: vi.fn(),
},
$queryRawUnsafe: vi.fn(),
$disconnect: vi.fn(),
}
return {
prisma: mockPrisma,
buildOptimizationOrCondition: vi.fn().mockReturnValue({}),
getUserById: vi.fn(),
createOrUpdateUser: vi.fn(),
deleteOrganizationMemberApiKeys: vi.fn(),
organizationMemberRepository: {
delete: vi.fn(),
},
}
})
// ---------------------------------------------------------------------------
// Mock: @sentry/nextjs
// ---------------------------------------------------------------------------
vi.mock("@sentry/nextjs", () => ({
startSpan: vi.fn((_opts: any, callback: any) =>
callback({ setAttribute: vi.fn() }),
),
captureException: vi.fn(),
}))
// ---------------------------------------------------------------------------
// Mock: @sentry/node
// ---------------------------------------------------------------------------
vi.mock("@sentry/node", () => ({
captureException: vi.fn(),
captureMessage: vi.fn(),
}))
// ---------------------------------------------------------------------------
// Clear all mocks between tests
// ---------------------------------------------------------------------------
afterEach(() => {
vi.clearAllMocks()
})

View file

@ -1,9 +1,16 @@
import { defineConfig } from "vitest/config"
import react from "@vitejs/plugin-react"
import path from "path"
export default defineConfig({
plugins: [react()] as any,
resolve: {
alias: {
"@": path.resolve(__dirname, "./src"),
},
},
test: {
environment: "jsdom",
setupFiles: ["./src/test/setup.ts"],
},
})