style: auto-fix linting issues

- ruff-format: reformat test file
- fix ty type error: cast mock clients to MagicMock for assert_called_once

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
claude[bot] 2026-04-03 07:23:09 +00:00
parent 322d8736c9
commit 20b0b01994

View file

@ -3,6 +3,7 @@
from __future__ import annotations
import asyncio
from typing import cast
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
@ -56,24 +57,15 @@ class TestLLMClientEventLoopHandling:
mock_anthropic_class.side_effect = create_anthropic_mock
# Make first call in event loop 1
test_llm = LLM(
name="gpt-4.1",
model_type="openai",
input_cost=2.0,
output_cost=8.0,
cached_input_cost=None,
)
test_llm = LLM(name="gpt-4.1", model_type="openai", input_cost=2.0, output_cost=8.0, cached_input_cost=None)
await client.call(
llm=test_llm,
messages=[{"role": "user", "content": "test"}],
call_type="test",
trace_id="test-trace-1",
llm=test_llm, messages=[{"role": "user", "content": "test"}], call_type="test", trace_id="test-trace-1"
)
# Save reference to first clients
first_openai_client = client.openai_client
first_anthropic_client = client.anthropic_client
first_openai_client = cast(MagicMock, client.openai_client)
first_anthropic_client = cast(MagicMock, client.anthropic_client)
first_loop = client.client_loop
assert first_openai_client is not None
@ -86,11 +78,7 @@ class TestLLMClientEventLoopHandling:
async def inner():
# Create a fresh LLM object in the new loop
new_llm = LLM(
name="gpt-4.1",
model_type="openai",
input_cost=2.0,
output_cost=8.0,
cached_input_cost=None,
name="gpt-4.1", model_type="openai", input_cost=2.0, output_cost=8.0, cached_input_cost=None
)
await client.call(
llm=new_llm,
@ -144,20 +132,11 @@ class TestLLMClientEventLoopHandling:
mock_openai_class.return_value = mock_openai_instance
mock_anthropic_class.return_value = MagicMock()
test_llm = LLM(
name="gpt-4.1",
model_type="openai",
input_cost=2.0,
output_cost=8.0,
cached_input_cost=None,
)
test_llm = LLM(name="gpt-4.1", model_type="openai", input_cost=2.0, output_cost=8.0, cached_input_cost=None)
# Make first call
await client.call(
llm=test_llm,
messages=[{"role": "user", "content": "test1"}],
call_type="test",
trace_id="test-trace-1",
llm=test_llm, messages=[{"role": "user", "content": "test1"}], call_type="test", trace_id="test-trace-1"
)
first_openai_client = client.openai_client
@ -165,10 +144,7 @@ class TestLLMClientEventLoopHandling:
# Make second call in same event loop
await client.call(
llm=test_llm,
messages=[{"role": "user", "content": "test2"}],
call_type="test",
trace_id="test-trace-2",
llm=test_llm, messages=[{"role": "user", "content": "test2"}], call_type="test", trace_id="test-trace-2"
)
# Clients should be the same instances (not recreated)