数据导入schema分析功能接口和测试用例
This commit is contained in:
45
app/main.py
45
app/main.py
@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import httpx
|
||||
@ -7,13 +8,13 @@ from fastapi import Depends, FastAPI, HTTPException, Request
|
||||
|
||||
from app.exceptions import ProviderAPICallError, ProviderConfigurationError
|
||||
from app.models import (
|
||||
DataImportAnalysisRequest,
|
||||
DataImportAnalysisResponse,
|
||||
DataImportAnalysisJobAck,
|
||||
DataImportAnalysisJobRequest,
|
||||
LLMRequest,
|
||||
LLMResponse,
|
||||
)
|
||||
from app.services import LLMGateway
|
||||
from app.services.import_analysis import build_import_messages, resolve_provider_from_model
|
||||
from app.services.import_analysis import process_import_analysis_job
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@ -54,40 +55,22 @@ def create_app() -> FastAPI:
|
||||
|
||||
@application.post(
|
||||
"/v1/import/analyze",
|
||||
response_model=DataImportAnalysisResponse,
|
||||
summary="Analyze import sample data via configured LLM",
|
||||
response_model=DataImportAnalysisJobAck,
|
||||
summary="Schedule async import analysis and notify via callback",
|
||||
status_code=200,
|
||||
)
|
||||
async def analyze_import_data(
|
||||
payload: DataImportAnalysisRequest,
|
||||
gateway: LLMGateway = Depends(get_gateway),
|
||||
payload: DataImportAnalysisJobRequest,
|
||||
client: httpx.AsyncClient = Depends(get_http_client),
|
||||
) -> DataImportAnalysisResponse:
|
||||
try:
|
||||
provider, model_name = resolve_provider_from_model(payload.llm_model)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(status_code=422, detail=str(exc)) from exc
|
||||
) -> DataImportAnalysisJobAck:
|
||||
request_copy = payload.model_copy(deep=True)
|
||||
|
||||
messages = build_import_messages(payload)
|
||||
async def _runner() -> None:
|
||||
await process_import_analysis_job(request_copy, client)
|
||||
|
||||
llm_request = LLMRequest(
|
||||
provider=provider,
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
temperature=payload.temperature if payload.temperature is not None else 0.2,
|
||||
max_tokens=payload.max_tokens,
|
||||
)
|
||||
asyncio.create_task(_runner())
|
||||
|
||||
try:
|
||||
llm_response = await gateway.chat(llm_request, client)
|
||||
except ProviderConfigurationError as exc:
|
||||
raise HTTPException(status_code=422, detail=str(exc)) from exc
|
||||
except ProviderAPICallError as exc:
|
||||
raise HTTPException(status_code=502, detail=str(exc)) from exc
|
||||
|
||||
return DataImportAnalysisResponse(
|
||||
import_record_id=payload.import_record_id,
|
||||
llm_response=llm_response,
|
||||
)
|
||||
return DataImportAnalysisJobAck(import_record_id=payload.import_record_id, status="accepted")
|
||||
|
||||
return application
|
||||
|
||||
|
||||
Reference in New Issue
Block a user