init,llm gateway & import_analyse
This commit is contained in:
17
app/providers/__init__.py
Normal file
17
app/providers/__init__.py
Normal file
@ -0,0 +1,17 @@
|
||||
from .anthropic import AnthropicProvider
|
||||
from .base import LLMProviderClient
|
||||
from .deepseek import DeepSeekProvider
|
||||
from .gemini import GeminiProvider
|
||||
from .openai import OpenAIProvider
|
||||
from .openrouter import OpenRouterProvider
|
||||
from .qwen import QwenProvider
|
||||
|
||||
__all__ = [
|
||||
"LLMProviderClient",
|
||||
"OpenAIProvider",
|
||||
"AnthropicProvider",
|
||||
"OpenRouterProvider",
|
||||
"GeminiProvider",
|
||||
"QwenProvider",
|
||||
"DeepSeekProvider",
|
||||
]
|
||||
BIN
app/providers/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/anthropic.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/anthropic.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/anthropic.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/anthropic.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/base.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/base.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/base.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/base.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/deepseek.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/deepseek.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/deepseek.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/deepseek.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/gemini.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/gemini.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/gemini.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/gemini.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/openai.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/openai.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/openai.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/openai.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/openrouter.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/openrouter.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/openrouter.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/openrouter.cpython-312.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/qwen.cpython-311.pyc
Normal file
BIN
app/providers/__pycache__/qwen.cpython-311.pyc
Normal file
Binary file not shown.
BIN
app/providers/__pycache__/qwen.cpython-312.pyc
Normal file
BIN
app/providers/__pycache__/qwen.cpython-312.pyc
Normal file
Binary file not shown.
97
app/providers/anthropic.py
Normal file
97
app/providers/anthropic.py
Normal file
@ -0,0 +1,97 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import (
|
||||
LLMChoice,
|
||||
LLMMessage,
|
||||
LLMProvider,
|
||||
LLMRequest,
|
||||
LLMResponse,
|
||||
LLMRole,
|
||||
)
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class AnthropicProvider(LLMProviderClient):
|
||||
name = LLMProvider.ANTHROPIC.value
|
||||
api_key_env = "ANTHROPIC_API_KEY"
|
||||
base_url = "https://api.anthropic.com/v1/messages"
|
||||
anthropic_version = "2023-06-01"
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
system_prompt, chat_messages = self._convert_messages(request.messages)
|
||||
|
||||
payload = self.merge_payload(
|
||||
{
|
||||
"model": request.model,
|
||||
"messages": chat_messages,
|
||||
"max_tokens": request.max_tokens or 1024,
|
||||
"temperature": request.temperature,
|
||||
"top_p": request.top_p,
|
||||
},
|
||||
request.extra_params,
|
||||
)
|
||||
|
||||
if system_prompt:
|
||||
payload["system"] = system_prompt
|
||||
|
||||
headers = {
|
||||
"x-api-key": self.api_key,
|
||||
"anthropic-version": self.anthropic_version,
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"Anthropic request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
message = self._build_message(data)
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.ANTHROPIC,
|
||||
model=data.get("model", request.model),
|
||||
choices=[LLMChoice(index=0, message=message)],
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _convert_messages(
|
||||
messages: List[LLMMessage],
|
||||
) -> Tuple[str | None, List[dict[str, Any]]]:
|
||||
system_parts: List[str] = []
|
||||
chat_payload: List[dict[str, Any]] = []
|
||||
|
||||
for msg in messages:
|
||||
if msg.role == LLMRole.SYSTEM:
|
||||
system_parts.append(msg.content)
|
||||
continue
|
||||
|
||||
role = "user" if msg.role == LLMRole.USER else "assistant"
|
||||
chat_payload.append(
|
||||
{"role": role, "content": [{"type": "text", "text": msg.content}]}
|
||||
)
|
||||
|
||||
system_prompt = "\n\n".join(system_parts) if system_parts else None
|
||||
return system_prompt, chat_payload
|
||||
|
||||
@staticmethod
|
||||
def _build_message(data: Dict[str, Any]) -> LLMMessage:
|
||||
role = data.get("role", "assistant")
|
||||
content_blocks = data.get("content", [])
|
||||
text_parts = [
|
||||
block.get("text", "")
|
||||
for block in content_blocks
|
||||
if isinstance(block, dict) and block.get("type") == "text"
|
||||
]
|
||||
content = "\n\n".join(part for part in text_parts if part)
|
||||
return LLMMessage(role=role, content=content)
|
||||
44
app/providers/base.py
Normal file
44
app/providers/base.py
Normal file
@ -0,0 +1,44 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderConfigurationError
|
||||
from app.models import LLMRequest, LLMResponse
|
||||
|
||||
|
||||
class LLMProviderClient(ABC):
|
||||
"""Base class for provider-specific chat completion clients."""
|
||||
|
||||
name: str
|
||||
api_key_env: str | None = None
|
||||
supports_stream: bool = False
|
||||
|
||||
def __init__(self, api_key: str | None):
|
||||
if self.api_key_env and not api_key:
|
||||
raise ProviderConfigurationError(
|
||||
f"Provider '{self.name}' requires environment variable '{self.api_key_env}'."
|
||||
)
|
||||
self.api_key = api_key or ""
|
||||
|
||||
@abstractmethod
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
"""Execute a chat completion call."""
|
||||
|
||||
@staticmethod
|
||||
def merge_payload(base: dict[str, Any], extra: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Merge provider payload with optional extra params, ignoring None values."""
|
||||
merged = {k: v for k, v in base.items() if v is not None}
|
||||
if extra:
|
||||
merged.update({k: v for k, v in extra.items() if v is not None})
|
||||
return merged
|
||||
|
||||
def ensure_stream_supported(self, stream_requested: bool) -> None:
|
||||
if stream_requested and not self.supports_stream:
|
||||
raise ProviderConfigurationError(
|
||||
f"Provider '{self.name}' does not support streaming mode."
|
||||
)
|
||||
66
app/providers/deepseek.py
Normal file
66
app/providers/deepseek.py
Normal file
@ -0,0 +1,66 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMResponse
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class DeepSeekProvider(LLMProviderClient):
|
||||
name = LLMProvider.DEEPSEEK.value
|
||||
api_key_env = "DEEPSEEK_API_KEY"
|
||||
supports_stream = True
|
||||
base_url = "https://api.deepseek.com/v1/chat/completions"
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
payload = self.merge_payload(
|
||||
{
|
||||
"model": request.model,
|
||||
"messages": [msg.model_dump() for msg in request.messages],
|
||||
"temperature": request.temperature,
|
||||
"top_p": request.top_p,
|
||||
"max_tokens": request.max_tokens,
|
||||
"stream": request.stream,
|
||||
},
|
||||
request.extra_params,
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"DeepSeek request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
choices = self._build_choices(data.get("choices", []))
|
||||
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.DEEPSEEK,
|
||||
model=data.get("model", request.model),
|
||||
choices=choices,
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _build_choices(choices: List[dict[str, Any]]) -> List[LLMChoice]:
|
||||
built: List[LLMChoice] = []
|
||||
for choice in choices:
|
||||
message_data = choice.get("message") or {}
|
||||
message = LLMMessage(
|
||||
role=message_data.get("role", "assistant"),
|
||||
content=message_data.get("content", ""),
|
||||
)
|
||||
built.append(LLMChoice(index=choice.get("index", len(built)), message=message))
|
||||
return built
|
||||
112
app/providers/gemini.py
Normal file
112
app/providers/gemini.py
Normal file
@ -0,0 +1,112 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import (
|
||||
LLMChoice,
|
||||
LLMMessage,
|
||||
LLMProvider,
|
||||
LLMRequest,
|
||||
LLMResponse,
|
||||
LLMRole,
|
||||
)
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class GeminiProvider(LLMProviderClient):
|
||||
name = LLMProvider.GEMINI.value
|
||||
api_key_env = "GEMINI_API_KEY"
|
||||
base_url = "https://generativelanguage.googleapis.com/v1beta"
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
system_instruction, contents = self._convert_messages(request.messages)
|
||||
config = {
|
||||
"temperature": request.temperature,
|
||||
"topP": request.top_p,
|
||||
"maxOutputTokens": request.max_tokens,
|
||||
}
|
||||
|
||||
payload: Dict[str, Any] = self.merge_payload(
|
||||
{"contents": contents}, request.extra_params
|
||||
)
|
||||
|
||||
generation_config = {k: v for k, v in config.items() if v is not None}
|
||||
if generation_config:
|
||||
payload["generationConfig"] = generation_config
|
||||
if system_instruction:
|
||||
payload["systemInstruction"] = {
|
||||
"role": "system",
|
||||
"parts": [{"text": system_instruction}],
|
||||
}
|
||||
|
||||
endpoint = f"{self.base_url}/models/{request.model}:generateContent?key={self.api_key}"
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
try:
|
||||
response = await client.post(endpoint, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"Gemini request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
choices = self._build_choices(data.get("candidates", []))
|
||||
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.GEMINI,
|
||||
model=request.model,
|
||||
choices=choices,
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _convert_messages(
|
||||
messages: List[LLMMessage],
|
||||
) -> Tuple[str | None, List[dict[str, Any]]]:
|
||||
system_parts: List[str] = []
|
||||
contents: List[dict[str, Any]] = []
|
||||
|
||||
for msg in messages:
|
||||
if msg.role == LLMRole.SYSTEM:
|
||||
system_parts.append(msg.content)
|
||||
continue
|
||||
|
||||
role = "user" if msg.role == LLMRole.USER else "model"
|
||||
contents.append({"role": role, "parts": [{"text": msg.content}]})
|
||||
|
||||
system_instruction = "\n\n".join(system_parts) if system_parts else None
|
||||
return system_instruction, contents
|
||||
|
||||
@staticmethod
|
||||
def _build_choices(candidates: List[dict[str, Any]]) -> List[LLMChoice]:
|
||||
choices: List[LLMChoice] = []
|
||||
for idx, candidate in enumerate(candidates):
|
||||
content = candidate.get("content", {})
|
||||
parts = content.get("parts", [])
|
||||
text_parts = [
|
||||
part.get("text", "")
|
||||
for part in parts
|
||||
if isinstance(part, dict) and part.get("text")
|
||||
]
|
||||
text = "\n\n".join(text_parts)
|
||||
choices.append(
|
||||
LLMChoice(
|
||||
index=candidate.get("index", idx),
|
||||
message=LLMMessage(role="assistant", content=text),
|
||||
)
|
||||
)
|
||||
if not choices:
|
||||
choices.append(
|
||||
LLMChoice(
|
||||
index=0,
|
||||
message=LLMMessage(role="assistant", content=""),
|
||||
)
|
||||
)
|
||||
return choices
|
||||
66
app/providers/openai.py
Normal file
66
app/providers/openai.py
Normal file
@ -0,0 +1,66 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMResponse
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProviderClient):
|
||||
name = LLMProvider.OPENAI.value
|
||||
api_key_env = "OPENAI_API_KEY"
|
||||
supports_stream = True
|
||||
base_url = "https://api.openai.com/v1/chat/completions"
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
payload = self.merge_payload(
|
||||
{
|
||||
"model": request.model,
|
||||
"messages": [msg.model_dump() for msg in request.messages],
|
||||
"temperature": request.temperature,
|
||||
"top_p": request.top_p,
|
||||
"max_tokens": request.max_tokens,
|
||||
"stream": request.stream,
|
||||
},
|
||||
request.extra_params,
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"OpenAI request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
choices = self._build_choices(data.get("choices", []))
|
||||
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.OPENAI,
|
||||
model=data.get("model", request.model),
|
||||
choices=choices,
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _build_choices(choices: List[dict[str, Any]]) -> List[LLMChoice]:
|
||||
built: List[LLMChoice] = []
|
||||
for choice in choices:
|
||||
message_data = choice.get("message") or {}
|
||||
message = LLMMessage(
|
||||
role=message_data.get("role", "assistant"), # fallback to assistant
|
||||
content=message_data.get("content", ""),
|
||||
)
|
||||
built.append(LLMChoice(index=choice.get("index", len(built)), message=message))
|
||||
return built
|
||||
77
app/providers/openrouter.py
Normal file
77
app/providers/openrouter.py
Normal file
@ -0,0 +1,77 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMResponse
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class OpenRouterProvider(LLMProviderClient):
|
||||
name = LLMProvider.OPENROUTER.value
|
||||
api_key_env = "OPENROUTER_API_KEY"
|
||||
supports_stream = True
|
||||
base_url = "https://openrouter.ai/api/v1/chat/completions"
|
||||
|
||||
def __init__(self, api_key: str | None):
|
||||
super().__init__(api_key)
|
||||
self.site_url = os.getenv("OPENROUTER_SITE_URL")
|
||||
self.app_name = os.getenv("OPENROUTER_APP_NAME")
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
payload = self.merge_payload(
|
||||
{
|
||||
"model": request.model,
|
||||
"messages": [msg.model_dump() for msg in request.messages],
|
||||
"temperature": request.temperature,
|
||||
"top_p": request.top_p,
|
||||
"max_tokens": request.max_tokens,
|
||||
"stream": request.stream,
|
||||
},
|
||||
request.extra_params,
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if self.site_url:
|
||||
headers["HTTP-Referer"] = self.site_url
|
||||
if self.app_name:
|
||||
headers["X-Title"] = self.app_name
|
||||
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"OpenRouter request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
choices = self._build_choices(data.get("choices", []))
|
||||
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.OPENROUTER,
|
||||
model=data.get("model", request.model),
|
||||
choices=choices,
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _build_choices(choices: List[dict[str, Any]]) -> List[LLMChoice]:
|
||||
built: List[LLMChoice] = []
|
||||
for choice in choices:
|
||||
message_data = choice.get("message") or {}
|
||||
message = LLMMessage(
|
||||
role=message_data.get("role", "assistant"),
|
||||
content=message_data.get("content", ""),
|
||||
)
|
||||
built.append(LLMChoice(index=choice.get("index", len(built)), message=message))
|
||||
return built
|
||||
87
app/providers/qwen.py
Normal file
87
app/providers/qwen.py
Normal file
@ -0,0 +1,87 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import httpx
|
||||
|
||||
from app.exceptions import ProviderAPICallError
|
||||
from app.models import LLMChoice, LLMMessage, LLMProvider, LLMRequest, LLMResponse
|
||||
from app.providers.base import LLMProviderClient
|
||||
|
||||
|
||||
class QwenProvider(LLMProviderClient):
|
||||
name = LLMProvider.QWEN.value
|
||||
api_key_env = "QWEN_API_KEY"
|
||||
base_url = (
|
||||
"https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
|
||||
)
|
||||
|
||||
async def chat(
|
||||
self, request: LLMRequest, client: httpx.AsyncClient
|
||||
) -> LLMResponse:
|
||||
self.ensure_stream_supported(request.stream)
|
||||
|
||||
parameters = {
|
||||
"temperature": request.temperature,
|
||||
"top_p": request.top_p,
|
||||
}
|
||||
if request.max_tokens is not None:
|
||||
parameters["max_output_tokens"] = request.max_tokens
|
||||
|
||||
# Strip None values from parameters
|
||||
parameters = {k: v for k, v in parameters.items() if v is not None}
|
||||
|
||||
payload: Dict[str, Any] = {
|
||||
"model": request.model,
|
||||
"input": {"messages": [msg.model_dump() for msg in request.messages]},
|
||||
}
|
||||
if parameters:
|
||||
payload["parameters"] = parameters
|
||||
|
||||
payload = self.merge_payload(payload, request.extra_params)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
response = await client.post(self.base_url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPError as exc:
|
||||
raise ProviderAPICallError(f"Qwen request failed: {exc}") from exc
|
||||
|
||||
data: Dict[str, Any] = response.json()
|
||||
choices = self._build_choices(data.get("output", {}))
|
||||
|
||||
return LLMResponse(
|
||||
provider=LLMProvider.QWEN,
|
||||
model=request.model,
|
||||
choices=choices,
|
||||
raw=data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _build_choices(output: Dict[str, Any]) -> List[LLMChoice]:
|
||||
choices_payload = output.get("choices", [])
|
||||
if not choices_payload and output.get("text"):
|
||||
return [
|
||||
LLMChoice(
|
||||
index=0,
|
||||
message=LLMMessage(role="assistant", content=output["text"]),
|
||||
)
|
||||
]
|
||||
|
||||
choices: List[LLMChoice] = []
|
||||
for idx, choice in enumerate(choices_payload):
|
||||
message_data = choice.get("message") or {}
|
||||
message = LLMMessage(
|
||||
role=message_data.get("role", "assistant"),
|
||||
content=message_data.get("content", ""),
|
||||
)
|
||||
choices.append(LLMChoice(index=choice.get("index", idx), message=message))
|
||||
if not choices:
|
||||
choices.append(
|
||||
LLMChoice(index=0, message=LLMMessage(role="assistant", content=""))
|
||||
)
|
||||
return choices
|
||||
Reference in New Issue
Block a user