Files
data-ge/app/providers/openai.py
2025-10-29 00:38:57 +08:00

67 lines
2.2 KiB
Python

from __future__ import annotations
from typing import Any, Dict, List
import httpx
from app.exceptions import ProviderAPICallError
from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMResponse
from app.providers.base import LLMProviderClient
class OpenAIProvider(LLMProviderClient):
name = LLMProvider.OPENAI.value
api_key_env = "OPENAI_API_KEY"
supports_stream = True
base_url = "https://api.openai.com/v1/chat/completions"
async def chat(
self, request: LLMRequest, client: httpx.AsyncClient
) -> LLMResponse:
self.ensure_stream_supported(request.stream)
payload = self.merge_payload(
{
"model": request.model,
"messages": [msg.model_dump() for msg in request.messages],
"temperature": request.temperature,
"top_p": request.top_p,
"max_tokens": request.max_tokens,
"stream": request.stream,
},
request.extra_params,
)
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
try:
response = await client.post(self.base_url, json=payload, headers=headers)
response.raise_for_status()
except httpx.HTTPError as exc:
raise ProviderAPICallError(f"OpenAI request failed: {exc}") from exc
data: Dict[str, Any] = response.json()
choices = self._build_choices(data.get("choices", []))
return LLMResponse(
provider=LLMProvider.OPENAI,
model=data.get("model", request.model),
choices=choices,
raw=data,
)
@staticmethod
def _build_choices(choices: List[dict[str, Any]]) -> List[LLMChoice]:
built: List[LLMChoice] = []
for choice in choices:
message_data = choice.get("message") or {}
message = LLMMessage(
role=message_data.get("role", "assistant"), # fallback to assistant
content=message_data.get("content", ""),
)
built.append(LLMChoice(index=choice.get("index", len(built)), message=message))
return built