mirror of
https://github.com/codeflash-ai/codeflash-internal.git
synced 2026-05-04 18:25:18 +00:00
104 lines
6.3 KiB
JSON
104 lines
6.3 KiB
JSON
{
|
|
"package_name": "codeflash-internal-docs",
|
|
"total_capabilities": 14,
|
|
"capabilities": [
|
|
{
|
|
"id": 0,
|
|
"name": "optimize-schema-structure",
|
|
"description": "Understands OptimizeSchema and OptimizeSchemaLP request schemas, their fields, defaults, and the difference between them (e.g., line_profiler_results, different n_candidates defaults).",
|
|
"complexity": "basic",
|
|
"api_elements": ["OptimizeSchema", "OptimizeSchemaLP", "n_candidates", "line_profiler_results", "source_code", "trace_id"]
|
|
},
|
|
{
|
|
"id": 1,
|
|
"name": "domain-models-relationships",
|
|
"description": "Understands OptimizationFeatures, OptimizationEvents, and Repositories Django models, their fields and purposes (trace_id PK, approval workflow, PR events, usage tracking).",
|
|
"complexity": "basic",
|
|
"api_elements": ["OptimizationFeatures", "OptimizationEvents", "Repositories", "trace_id", "approval_status", "llm_cost", "optimizations_limit"]
|
|
},
|
|
{
|
|
"id": 2,
|
|
"name": "optimization-pipeline-flow",
|
|
"description": "Understands the 6-step optimization pipeline: request entry, language dispatch, context extraction, LLM calls, postprocessing, response assembly.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["optimize_api", "optimize_python", "optimize_python_code", "optimize_python_code_single", "asyncio.TaskGroup"]
|
|
},
|
|
{
|
|
"id": 3,
|
|
"name": "language-dispatch-routing",
|
|
"description": "Understands how both optimize and testgen routers dispatch by data.language to JavaScript/TypeScript, Java, or Python handlers, and that imports are lazy.",
|
|
"complexity": "basic",
|
|
"api_elements": ["optimize_api", "testgen_api", "data.language", "optimize_javascript", "optimize_java", "optimize_python"]
|
|
},
|
|
{
|
|
"id": 4,
|
|
"name": "context-extraction-types",
|
|
"description": "Understands SingleOptimizerContext vs MultiOptimizerContext, the factory method get_dynamic_context(), prompt construction methods, and markdown code block format with file path annotations.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["SingleOptimizerContext", "MultiOptimizerContext", "BaseOptimizerContext", "get_dynamic_context", "get_system_prompt", "get_user_prompt", "extract_code_and_explanation_from_llm_res"]
|
|
},
|
|
{
|
|
"id": 5,
|
|
"name": "llm-response-parsing",
|
|
"description": "Understands how LLM responses are parsed: extract_code_and_explanation_from_llm_res for markdown code blocks, parse_and_generate_candidate_schema for schema conversion, and is_valid_code for syntax validation.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["extract_code_and_explanation_from_llm_res", "parse_and_generate_candidate_schema", "is_valid_code", "OptimizeResponseItemSchema"]
|
|
},
|
|
{
|
|
"id": 6,
|
|
"name": "model-distribution-formula",
|
|
"description": "Knows the model distribution formula: claude_calls = (total - 1) // 2, gpt_calls = total - claude_calls. Knows MAX_OPTIMIZER_CALLS=6, MAX_OPTIMIZER_LP_CALLS=7, and concrete example outputs.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["get_model_distribution", "MAX_OPTIMIZER_CALLS", "MAX_OPTIMIZER_LP_CALLS", "claude_calls", "gpt_calls"]
|
|
},
|
|
{
|
|
"id": 7,
|
|
"name": "postprocessing-dedup",
|
|
"description": "Understands AST-based deduplication using ast.parse() + ast.dump(), equality_check() for filtering identical candidates, and libcst for code transformations.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["deduplicate_optimizations", "equality_check", "ast.parse", "ast.dump", "libcst"]
|
|
},
|
|
{
|
|
"id": 8,
|
|
"name": "aiservice-endpoint-map",
|
|
"description": "Knows all 12 Django-Ninja endpoints, their paths, API names, and modules. Understands common patterns: async def, AuthenticatedRequest, response schemas with status codes.",
|
|
"complexity": "basic",
|
|
"api_elements": ["/ai/optimize", "/ai/testgen", "/ai/refinement", "/ai/code_repair", "/ai/adaptive_optimize", "/ai/rewrite_jit", "AuthenticatedRequest"]
|
|
},
|
|
{
|
|
"id": 9,
|
|
"name": "cf-api-route-ordering",
|
|
"description": "Understands that webhook routes must be registered before the body parser for raw body signature verification, the 4-phase registration order, and the middleware stack (checkForValidAPIKey, trackEndpointCalls, idLimiter, trackUsage).",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["webhook.routes.ts", "express.json", "checkForValidAPIKey", "trackEndpointCalls", "idLimiter", "trackUsage"]
|
|
},
|
|
{
|
|
"id": 10,
|
|
"name": "llm-dataclass-and-call",
|
|
"description": "Understands the LLM pydantic dataclass (name, max_tokens, model_type, cost fields), call_llm() function signature and its parameters, and the LLMResponse/LLMUsage types.",
|
|
"complexity": "intermediate",
|
|
"api_elements": ["LLM", "call_llm", "LLMResponse", "LLMUsage", "model_type", "input_cost", "output_cost"]
|
|
},
|
|
{
|
|
"id": 11,
|
|
"name": "llm-client-setup",
|
|
"description": "Understands how LLM clients are created: AsyncAzureOpenAI for OpenAI (AZURE_OPENAI_* env vars), AsyncAnthropicFoundry for Anthropic (ANTHROPIC_FOUNDRY_* env vars), and that fresh clients are created per request.",
|
|
"complexity": "advanced",
|
|
"api_elements": ["_create_openai_client", "_create_anthropic_client", "get_llm_client", "AsyncAzureOpenAI", "AsyncAnthropicFoundry"]
|
|
},
|
|
{
|
|
"id": 12,
|
|
"name": "llm-cost-calculation",
|
|
"description": "Understands calculate_llm_cost() and the difference between OpenAI and Anthropic cached token accounting: Anthropic's cache tokens are additive to input_tokens, OpenAI's cached_tokens is a subset of prompt_tokens.",
|
|
"complexity": "advanced",
|
|
"api_elements": ["calculate_llm_cost", "cache_read_input_tokens", "cache_creation_input_tokens", "cached_tokens", "prompt_tokens"]
|
|
},
|
|
{
|
|
"id": 13,
|
|
"name": "testgen-instrumentation",
|
|
"description": "Understands the test generation pipeline: build_prompt (Jinja2), instrument_tests (behavior + performance), framework detection for GPU sync, device sync precompute, and LLMOutputParseError.",
|
|
"complexity": "advanced",
|
|
"api_elements": ["build_prompt", "instrument_tests", "detect_frameworks_from_code", "_create_device_sync_precompute_statements", "LLMOutputParseError"]
|
|
}
|
|
]
|
|
}
|