Files
gen_game-a0/api.py
vuongps38770 31de8b0d84 check point
2025-12-25 18:06:29 +07:00

514 lines
15 KiB
Python

import os
from typing import List, Dict, Any, Optional
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from pathlib import Path
import re
from src import (
GameCore, get_registry, reload_games,
get_active_game_types, get_active_type_ids,
get_game_by_id, id_to_type, type_to_id,
ModelConfig
)
# ============== APP ==============
app = FastAPI(
title="Game Generator API",
description="API tạo game giáo dục từ văn bản",
version="2.0.0"
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# ============== REQUEST/RESPONSE MODELS ==============
class LLMConfigRequest(BaseModel):
provider: str = Field(default="gemini", description="ollama, gemini, openai")
model_name: str = Field(default="gemini-2.0-flash-lite")
api_key: Optional[str] = Field(default=None, description="API key (None = lấy từ env)")
temperature: float = Field(default=0.1)
base_url: Optional[str] = Field(default=None, description="Base URL cho Ollama")
class GenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
enabled_game_ids: Optional[List[int]] = Field(default=None, description="List of type_ids (1=quiz, 2=sequence_sentence, 3=sequence_word)")
run_analyzer: bool = Field(default=True)
run_validator: bool = Field(default=True)
max_items: Optional[int] = Field(default=3)
min_score: int = Field(default=50, description="Minimum score (0-100) for analyzer to include a game")
debug: bool = Field(default=False, description="Print prompts to server log")
# LLM config (optional - override global)
llm_config: Optional[LLMConfigRequest] = Field(default=None, description="Override LLM config")
class TokenUsageResponse(BaseModel):
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
class GameScoreInfo(BaseModel):
type_id: int
score: int
reason: str = ""
class GameResultData(BaseModel):
"""Structure thống nhất cho mỗi game result"""
items: List[Dict[str, Any]] = []
metadata: Optional[Dict[str, Any]] = None
class CommonMetadataResponse(BaseModel):
"""Metadata chung cho toàn bộ kết quả generate"""
title: str = ""
description: str = ""
grade: int = 0
difficulty: int = 0
class GenerateResponse(BaseModel):
success: bool
games: List[int] # type_ids
game_scores: List[GameScoreInfo] = []
metadata: Optional[CommonMetadataResponse] = None # Metadata chung từ analyzer
results: Dict[int, GameResultData] # keyed by type_id, value is {items, metadata}
llm: Optional[str] = None
api_calls: Optional[int] = None
token_usage: Optional[TokenUsageResponse] = None
errors: List[str] = []
class GameInfo(BaseModel):
type_id: int
game_type: str # Keep for reference
display_name: str
description: str
active: bool
max_items: int
class GamesListResponse(BaseModel):
total: int
active_count: int
games: List[GameInfo]
class ActionResponse(BaseModel):
success: bool
message: str
game_type: Optional[str] = None
active: Optional[bool] = None
class LLMConfigResponse(BaseModel):
provider: str
model_name: str
temperature: float
base_url: Optional[str] = None
# ============== GLOBAL ==============
_core: Optional[GameCore] = None
_current_config: Optional[ModelConfig] = None
def get_core(config_override: Optional[LLMConfigRequest] = None) -> GameCore:
"""Get or create GameCore with optional config override"""
global _core, _current_config
if config_override:
# Create new core with override config
config = ModelConfig(
provider=config_override.provider,
model_name=config_override.model_name,
api_key=config_override.api_key,
temperature=config_override.temperature,
base_url=config_override.base_url
)
return GameCore(llm_config=config)
if _core is None:
# Default: tự detect từ env
_core = GameCore()
_current_config = _core.llm_config
return _core
# ============== ENDPOINTS ==============
@app.post("/generate", response_model=GenerateResponse)
async def generate_games(request: GenerateRequest):
"""Generate games from text with scoring"""
try:
core = get_core(request.llm_config)
# Convert type_ids to game_types
if request.enabled_game_ids:
games = [id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)]
else:
games = get_active_game_types()
result = core.run_multi(
text=request.text,
enabled_games=games,
max_items=request.max_items or 3,
min_score=request.min_score,
validate=request.run_validator,
debug=request.debug
)
# Convert game_types to type_ids in response
game_ids = [type_to_id(g) for g in result.get("games", [])]
# Convert game_scores
game_scores = []
for s in result.get("game_scores", []):
game_scores.append(GameScoreInfo(
type_id=type_to_id(s.get("type", "")),
score=s.get("score", 0),
reason=s.get("reason", "")
))
# Convert results keys to type_ids
results_by_id = {}
for game_type, items in result.get("results", {}).items():
tid = type_to_id(game_type)
if tid > 0:
results_by_id[tid] = items
# Get common metadata from analyzer
core_meta = result.get("metadata", {})
common_metadata = CommonMetadataResponse(
title=core_meta.get("title", ""),
description=core_meta.get("description", ""),
grade=core_meta.get("grade", 0),
difficulty=core_meta.get("difficulty", 0)
) if core_meta else None
return GenerateResponse(
success=result.get("success", False),
games=game_ids,
game_scores=game_scores,
metadata=common_metadata,
results=results_by_id,
llm=result.get("llm"),
token_usage=result.get("token_usage"),
errors=result.get("errors", [])
)
except Exception as e:
return GenerateResponse(
success=False,
games=[],
game_scores=[],
results={},
errors=[str(e)]
)
# ============== SINGLE BEST (1 PROMPT) ==============
class SingleGenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
enabled_game_ids: Optional[List[int]] = Field(default=None, description="Limit type_ids to choose from")
max_items: int = Field(default=3, description="Max items to generate")
run_validator: bool = Field(default=True)
debug: bool = Field(default=False)
llm_config: Optional[LLMConfigRequest] = Field(default=None)
class SingleGenerateResponse(BaseModel):
success: bool
type_id: Optional[int] = None
reason: Optional[str] = None
items: List[Dict[str, Any]] = []
token_usage: Optional[TokenUsageResponse] = None
llm: Optional[str] = None
errors: List[str] = []
@app.post("/generate/single", response_model=SingleGenerateResponse)
async def generate_single_game(request: SingleGenerateRequest):
"""
Generate 1 game phù hợp nhất trong 1 prompt duy nhất.
- Analyze text để chọn game type tốt nhất
- Generate items cho game đó
- Tất cả trong 1 API call
"""
try:
core = get_core(request.llm_config)
# Convert type_ids to game_types
if request.enabled_game_ids:
games = [id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)]
else:
games = None
result = core.run_single(
text=request.text,
enabled_games=games,
max_items=request.max_items,
debug=request.debug,
validate=request.run_validator
)
# Convert game_type to type_id
game_type = result.get("game_type")
tid = type_to_id(game_type) if game_type else None
return SingleGenerateResponse(
success=result.get("success", False),
type_id=tid,
reason=result.get("reason"),
items=result.get("items", []),
token_usage=result.get("token_usage"),
llm=result.get("llm"),
errors=result.get("errors", [])
)
except Exception as e:
return SingleGenerateResponse(
success=False,
errors=[str(e)]
)
# ============== DIRECT GENERATE (1 game cụ thể, không analyze) ==============
class DirectGenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
max_items: int = Field(default=3, description="Max items to generate")
run_validator: bool = Field(default=True)
debug: bool = Field(default=False)
llm_config: Optional[LLMConfigRequest] = Field(default=None)
class DirectGenerateResponse(BaseModel):
"""Response thống nhất, giống GenerateResponse nhưng cho 1 game"""
success: bool
games: List[int] = [] # Single type_id in list
results: Dict[int, GameResultData] = {} # Same structure as GenerateResponse
is_format_error: bool = False
format_error: Optional[str] = None
token_usage: Optional[TokenUsageResponse] = None
llm: Optional[str] = None
errors: List[str] = []
@app.post("/generate/{type_id}", response_model=DirectGenerateResponse)
async def generate_direct(type_id: int, request: DirectGenerateRequest):
"""
Generate 1 game cụ thể, KHÔNG analyze.
Response format giống với /generate nhưng chỉ có 1 game.
"""
try:
# Get game by type_id
game_type = id_to_type(type_id)
if not game_type:
return DirectGenerateResponse(
success=False,
games=[type_id],
errors=[f"Game with type_id={type_id} not found"]
)
core = get_core(request.llm_config)
result = core.generate(
game_type=game_type,
text=request.text,
max_items=request.max_items,
validate=request.run_validator,
debug=request.debug
)
format_error = result.get("format_error")
data = result.get("data") or {}
# Build results với structure thống nhất
game_result = GameResultData(
items=data.get("items", []) if isinstance(data, dict) else [],
metadata=data.get("metadata") if isinstance(data, dict) else None
)
return DirectGenerateResponse(
success=result.get("success", False),
games=[type_id],
results={type_id: game_result},
is_format_error=format_error is not None,
format_error=format_error,
token_usage=result.get("token_usage"),
llm=result.get("llm"),
errors=result.get("errors", [])
)
except Exception as e:
return DirectGenerateResponse(
success=False,
games=[type_id],
errors=[str(e)]
)
@app.get("/games", response_model=GamesListResponse)
async def list_games():
"""Lấy danh sách games"""
registry = get_registry()
all_games = registry.get_all_games_including_inactive()
games_list = []
active_count = 0
for game_type, game in all_games.items():
games_list.append(GameInfo(
type_id=game.type_id,
game_type=game.game_type,
display_name=game.display_name,
description=game.description,
active=game.active,
max_items=game.max_items,
))
if game.active:
active_count += 1
# Sort by type_id
games_list.sort(key=lambda g: g.type_id)
return GamesListResponse(
total=len(games_list),
active_count=active_count,
games=games_list
)
@app.post("/games/{game_type}/activate", response_model=ActionResponse)
async def activate_game(game_type: str):
"""Bật game"""
return _set_game_active(game_type, True)
@app.post("/games/{game_type}/deactivate", response_model=ActionResponse)
async def deactivate_game(game_type: str):
"""Tắt game"""
return _set_game_active(game_type, False)
def _set_game_active(game_type: str, active: bool) -> ActionResponse:
games_dir = Path(__file__).parent / "src" / "games"
game_file = games_dir / f"{game_type}.py"
if not game_file.exists():
raise HTTPException(404, f"Game '{game_type}' not found")
content = game_file.read_text(encoding="utf-8")
pattern = r'("active"\s*:\s*)(True|False)'
new_value = "True" if active else "False"
if not re.search(pattern, content):
raise HTTPException(400, f"Cannot find 'active' field in {game_type}.py")
new_content = re.sub(pattern, f'\\1{new_value}', content)
game_file.write_text(new_content, encoding="utf-8")
reload_games()
action = "activated" if active else "deactivated"
return ActionResponse(
success=True,
message=f"Game '{game_type}' has been {action}",
game_type=game_type,
active=active
)
@app.get("/llm", response_model=LLMConfigResponse)
async def get_llm_config():
"""Xem LLM config hiện tại"""
global _current_config
if _current_config is None:
core = get_core()
_current_config = core.llm_config
return LLMConfigResponse(
provider=_current_config.provider,
model_name=_current_config.model_name,
temperature=_current_config.temperature,
base_url=_current_config.base_url
)
@app.post("/llm", response_model=ActionResponse)
async def set_llm_config(config: LLMConfigRequest):
"""Đổi LLM config global"""
global _core, _current_config
new_config = ModelConfig(
provider=config.provider,
model_name=config.model_name,
api_key=config.api_key,
temperature=config.temperature,
base_url=config.base_url
)
try:
_core = GameCore(llm_config=new_config)
_current_config = new_config
return ActionResponse(
success=True,
message=f"LLM changed to {config.provider}/{config.model_name}"
)
except Exception as e:
return ActionResponse(
success=False,
message=f"Failed to change LLM: {str(e)}"
)
@app.post("/reload", response_model=ActionResponse)
async def reload_all_games():
"""Reload games"""
global _core
reload_games()
_core = None
return ActionResponse(
success=True,
message=f"Reloaded. Active games: {get_active_game_types()}"
)
@app.get("/health")
async def health_check():
return {
"status": "healthy",
"active_games": get_active_game_types()
}
# ============== STARTUP ==============
@app.on_event("startup")
async def startup():
print("🚀 Game Generator API started")
print(f"📋 Active games: {get_active_game_types()}")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=2088)