异常日志完善
This commit is contained in:
@ -4,3 +4,14 @@ class ProviderConfigurationError(RuntimeError):
|
||||
|
||||
class ProviderAPICallError(RuntimeError):
|
||||
"""Raised when the upstream provider responds with an error."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
status_code: int | None = None,
|
||||
response_text: str | None = None,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
self.response_text = response_text
|
||||
|
||||
15
app/main.py
15
app/main.py
@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import httpx
|
||||
@ -17,6 +18,9 @@ from app.services import LLMGateway
|
||||
from app.services.import_analysis import process_import_analysis_job
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
client = httpx.AsyncClient(timeout=httpx.Timeout(30.0))
|
||||
@ -49,9 +53,18 @@ def create_app() -> FastAPI:
|
||||
try:
|
||||
return await gateway.chat(payload, client)
|
||||
except ProviderConfigurationError as exc:
|
||||
logger.error("Provider configuration error: %s", exc, exc_info=True)
|
||||
raise HTTPException(status_code=422, detail=str(exc)) from exc
|
||||
except ProviderAPICallError as exc:
|
||||
raise HTTPException(status_code=502, detail=str(exc)) from exc
|
||||
status_code = exc.status_code or 502
|
||||
log_detail = exc.response_text or str(exc)
|
||||
logger.error(
|
||||
"Provider API call error (status %s): %s",
|
||||
status_code,
|
||||
log_detail,
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(status_code=status_code, detail=str(exc)) from exc
|
||||
|
||||
@application.post(
|
||||
"/v1/import/analyze",
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import httpx
|
||||
@ -16,6 +17,9 @@ from app.models import (
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnthropicProvider(LLMProviderClient):
|
||||
name = LLMProvider.ANTHROPIC.value
|
||||
api_key_env = "ANTHROPIC_API_KEY"
|
||||
@ -52,7 +56,19 @@ class AnthropicProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error(
|
||||
"Anthropic upstream returned %s: %s", status_code, body, exc_info=True
|
||||
)
|
||||
raise ProviderAPICallError(
|
||||
f"Anthropic request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("Anthropic transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"Anthropic request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
@ -9,6 +10,9 @@ from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMRespon
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeepSeekProvider(LLMProviderClient):
|
||||
name = LLMProvider.DEEPSEEK.value
|
||||
api_key_env = "DEEPSEEK_API_KEY"
|
||||
@ -40,7 +44,17 @@ class DeepSeekProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error("DeepSeek upstream returned %s: %s", status_code, body, exc_info=True)
|
||||
raise ProviderAPICallError(
|
||||
f"DeepSeek request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("DeepSeek transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"DeepSeek request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import httpx
|
||||
@ -16,6 +17,9 @@ from app.models import (
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GeminiProvider(LLMProviderClient):
|
||||
name = LLMProvider.GEMINI.value
|
||||
api_key_env = "GEMINI_API_KEY"
|
||||
@ -53,7 +57,19 @@ class GeminiProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(endpoint, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error(
|
||||
"Gemini upstream returned %s: %s", status_code, body, exc_info=True
|
||||
)
|
||||
raise ProviderAPICallError(
|
||||
f"Gemini request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("Gemini transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"Gemini request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
@ -9,6 +10,9 @@ from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMRespon
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProviderClient):
|
||||
name = LLMProvider.OPENAI.value
|
||||
api_key_env = "OPENAI_API_KEY"
|
||||
@ -40,7 +44,19 @@ class OpenAIProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error(
|
||||
"OpenAI upstream returned %s: %s", status_code, body, exc_info=True
|
||||
)
|
||||
raise ProviderAPICallError(
|
||||
f"OpenAI request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("OpenAI transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"OpenAI request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
@ -10,6 +11,9 @@ from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMRespon
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpenRouterProvider(LLMProviderClient):
|
||||
name = LLMProvider.OPENROUTER.value
|
||||
api_key_env = "OPENROUTER_API_KEY"
|
||||
@ -51,7 +55,19 @@ class OpenRouterProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error(
|
||||
"OpenRouter upstream returned %s: %s", status_code, body, exc_info=True
|
||||
)
|
||||
raise ProviderAPICallError(
|
||||
f"OpenRouter request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("OpenRouter transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"OpenRouter request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
@ -9,6 +10,9 @@ from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMRespon
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QwenProvider(LLMProviderClient):
|
||||
name = LLMProvider.QWEN.value
|
||||
api_key_env = "QWEN_API_KEY"
|
||||
@ -48,7 +52,17 @@ class QwenProvider(LLMProviderClient):
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
status_code = exc.response.status_code
|
||||
body = exc.response.text
|
||||
logger.error("Qwen upstream returned %s: %s", status_code, body, exc_info=True)
|
||||
raise ProviderAPICallError(
|
||||
f"Qwen request failed with status {status_code}",
|
||||
status_code=status_code,
|
||||
response_text=body,
|
||||
) from exc
|
||||
except httpx.HTTPError as exc:
|
||||
logger.error("Qwen transport error: %s", exc, exc_info=True)
|
||||
raise ProviderAPICallError(f"Qwen request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
|
||||
Reference in New Issue
Block a user