This commit is contained in:
vuongps38770
2026-01-13 09:33:10 +07:00
parent 29544da4c6
commit 7c41ddaa82
9 changed files with 1362 additions and 599 deletions

362
api.py
View File

@@ -5,12 +5,19 @@ from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from pathlib import Path
import re
from dotenv import load_dotenv
load_dotenv()
from src import (
GameCore, get_registry, reload_games,
get_active_game_types, get_active_type_ids,
get_game_by_id, id_to_type, type_to_id,
ModelConfig
GameCore,
get_registry,
reload_games,
get_active_game_types,
get_active_type_ids,
get_game_by_id,
id_to_type,
type_to_id,
ModelConfig,
)
@@ -18,7 +25,7 @@ from src import (
app = FastAPI(
title="Game Generator API",
description="API tạo game giáo dục từ văn bản",
version="2.0.0"
version="2.0.0",
)
app.add_middleware(
@@ -31,31 +38,43 @@ app.add_middleware(
# ============== REQUEST/RESPONSE MODELS ==============
class LLMConfigRequest(BaseModel):
provider: str = Field(default="gemini", description="ollama, gemini, openai")
model_name: str = Field(default="gemini-2.0-flash-lite")
api_key: Optional[str] = Field(default=None, description="API key (None = lấy từ env)")
api_key: Optional[str] = Field(
default=None, description="API key (None = lấy từ env)"
)
temperature: float = Field(default=0.1)
base_url: Optional[str] = Field(default=None, description="Base URL cho Ollama")
class GenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
enabled_game_ids: Optional[List[int]] = Field(default=None, description="List of type_ids (1=quiz, 2=sequence_sentence, 3=sequence_word)")
enabled_game_ids: Optional[List[int]] = Field(
default=None,
description="List of type_ids (1=quiz, 2=sequence_sentence, 3=sequence_word)",
)
run_analyzer: bool = Field(default=True)
run_validator: bool = Field(default=True)
max_items: Optional[int] = Field(default=3)
min_score: int = Field(default=50, description="Minimum score (0-100) for analyzer to include a game")
max_items: Optional[int] = Field(default=100)
min_score: int = Field(
default=50, description="Minimum score (0-100) for analyzer to include a game"
)
debug: bool = Field(default=False, description="Print prompts to server log")
# LLM config (optional - override global)
llm_config: Optional[LLMConfigRequest] = Field(default=None, description="Override LLM config")
llm_config: Optional[LLMConfigRequest] = Field(
default=None, description="Override LLM config"
)
class TokenUsageResponse(BaseModel):
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
input_chars: int = 0 # Character count sent to LLM
output_chars: int = 0 # Character count received from LLM
class GameScoreInfo(BaseModel):
@@ -66,12 +85,14 @@ class GameScoreInfo(BaseModel):
class GameResultData(BaseModel):
"""Structure thống nhất cho mỗi game result"""
items: List[Dict[str, Any]] = []
metadata: Optional[Dict[str, Any]] = None
class CommonMetadataResponse(BaseModel):
"""Metadata chung cho toàn bộ kết quả generate"""
title: str = ""
description: str = ""
grade: int = 0
@@ -92,7 +113,7 @@ class GenerateResponse(BaseModel):
class GameInfo(BaseModel):
type_id: int
game_type: str # Keep for reference
game_type: str
display_name: str
description: str
active: bool
@@ -127,7 +148,7 @@ _current_config: Optional[ModelConfig] = None
def get_core(config_override: Optional[LLMConfigRequest] = None) -> GameCore:
"""Get or create GameCore with optional config override"""
global _core, _current_config
if config_override:
# Create new core with override config
config = ModelConfig(
@@ -135,69 +156,77 @@ def get_core(config_override: Optional[LLMConfigRequest] = None) -> GameCore:
model_name=config_override.model_name,
api_key=config_override.api_key,
temperature=config_override.temperature,
base_url=config_override.base_url
base_url=config_override.base_url,
)
return GameCore(llm_config=config)
if _core is None:
# Default: tự detect từ env
_core = GameCore()
_current_config = _core.llm_config
return _core
# ============== ENDPOINTS ==============
@app.post("/generate", response_model=GenerateResponse)
async def generate_games(request: GenerateRequest):
"""Generate games from text with scoring"""
try:
core = get_core(request.llm_config)
# Convert type_ids to game_types
if request.enabled_game_ids:
games = [id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)]
games = [
id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)
]
else:
games = get_active_game_types()
result = core.run_multi(
result = await core.run_multi_async(
text=request.text,
enabled_games=games,
max_items=request.max_items or 3,
min_score=request.min_score,
max_items=request.max_items or 100,
validate=request.run_validator,
debug=request.debug
debug=request.debug,
)
# Convert game_types to type_ids in response
game_ids = [type_to_id(g) for g in result.get("games", [])]
# Convert game_scores
game_scores = []
for s in result.get("game_scores", []):
game_scores.append(GameScoreInfo(
type_id=type_to_id(s.get("type", "")),
score=s.get("score", 0),
reason=s.get("reason", "")
))
game_scores.append(
GameScoreInfo(
type_id=type_to_id(s.get("type", "")),
score=s.get("score", 0),
reason=s.get("reason", ""),
)
)
# Convert results keys to type_ids
results_by_id = {}
for game_type, items in result.get("results", {}).items():
tid = type_to_id(game_type)
if tid > 0:
if tid >= 0: # 0=quiz, 1=sequence are valid
results_by_id[tid] = items
# Get common metadata from analyzer
core_meta = result.get("metadata", {})
common_metadata = CommonMetadataResponse(
title=core_meta.get("title", ""),
description=core_meta.get("description", ""),
grade=core_meta.get("grade", 0),
difficulty=core_meta.get("difficulty", 0)
) if core_meta else None
common_metadata = (
CommonMetadataResponse(
title=core_meta.get("title", ""),
description=core_meta.get("description", ""),
grade=core_meta.get("grade", 0),
difficulty=core_meta.get("difficulty", 0),
)
if core_meta
else None
)
return GenerateResponse(
success=result.get("success", False),
games=game_ids,
@@ -206,25 +235,120 @@ async def generate_games(request: GenerateRequest):
results=results_by_id,
llm=result.get("llm"),
token_usage=result.get("token_usage"),
errors=result.get("errors", [])
errors=result.get("errors", []),
)
except Exception as e:
return GenerateResponse(
success=False,
games=[],
game_scores=[],
results={},
errors=[str(e)]
success=False, games=[], game_scores=[], results={}, errors=[str(e)]
)
# ============== FAST GENERATE (1 API call - OPTIMIZED) ==============
class FastGenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
enabled_game_ids: Optional[List[int]] = Field(
default=None, description="Limit type_ids"
)
max_items: int = Field(default=100, description="Max items per game")
min_score: int = Field(default=50, description="Min score 0-100 to include game")
run_validator: bool = Field(default=True)
debug: bool = Field(default=False)
llm_config: Optional[LLMConfigRequest] = Field(default=None)
@app.post("/generate/fast", response_model=GenerateResponse)
async def generate_fast(request: FastGenerateRequest):
"""
🚀 OPTIMIZED: 1 API call để analyze + generate TẤT CẢ games phù hợp.
So với /generate (2+ calls):
- Chỉ 1 API call
- Tiết kiệm quota/tokens
- Nhanh hơn
So với /generate/single:
- Trả về NHIỀU games (không chỉ 1)
"""
try:
core = get_core(request.llm_config)
# Convert type_ids to game_types
if request.enabled_game_ids:
games = [
id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)
]
else:
games = get_active_game_types()
result = await core.run_fast_async(
text=request.text,
enabled_games=games,
max_items=request.max_items,
min_score=request.min_score,
validate=request.run_validator,
debug=request.debug,
)
# Convert to response format (same as /generate)
game_ids = [type_to_id(g) for g in result.get("games", [])]
game_scores = [
GameScoreInfo(
type_id=type_to_id(s.get("type", "")),
score=s.get("score", 0),
reason=s.get("reason", ""),
)
for s in result.get("game_scores", [])
]
results_by_id = {}
for game_type, data in result.get("results", {}).items():
tid = type_to_id(game_type)
if tid >= 0: # 0=quiz, 1=sequence are valid
results_by_id[tid] = data
core_meta = result.get("metadata", {})
common_metadata = (
CommonMetadataResponse(
title=core_meta.get("title", ""),
description=core_meta.get("description", ""),
grade=core_meta.get("grade", 0),
difficulty=core_meta.get("difficulty", 0),
)
if core_meta
else None
)
return GenerateResponse(
success=result.get("success", False),
games=game_ids,
game_scores=game_scores,
metadata=common_metadata,
results=results_by_id,
api_calls=1, # Always 1 for fast
llm=result.get("llm"),
token_usage=result.get("token_usage"),
errors=result.get("errors", []),
)
except Exception as e:
return GenerateResponse(
success=False, games=[], game_scores=[], results={}, errors=[str(e)]
)
# ============== SINGLE BEST (1 PROMPT) ==============
class SingleGenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
enabled_game_ids: Optional[List[int]] = Field(default=None, description="Limit type_ids to choose from")
max_items: int = Field(default=3, description="Max items to generate")
enabled_game_ids: Optional[List[int]] = Field(
default=None, description="Limit type_ids to choose from"
)
max_items: int = Field(default=100, description="Max items to generate")
run_validator: bool = Field(default=True)
debug: bool = Field(default=False)
llm_config: Optional[LLMConfigRequest] = Field(default=None)
@@ -244,32 +368,34 @@ class SingleGenerateResponse(BaseModel):
async def generate_single_game(request: SingleGenerateRequest):
"""
Generate 1 game phù hợp nhất trong 1 prompt duy nhất.
- Analyze text để chọn game type tốt nhất
- Generate items cho game đó
- Tất cả trong 1 API call
"""
try:
core = get_core(request.llm_config)
# Convert type_ids to game_types
if request.enabled_game_ids:
games = [id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)]
games = [
id_to_type(tid) for tid in request.enabled_game_ids if id_to_type(tid)
]
else:
games = None
result = core.run_single(
text=request.text,
enabled_games=games,
max_items=request.max_items,
debug=request.debug,
validate=request.run_validator
validate=request.run_validator,
)
# Convert game_type to type_id
game_type = result.get("game_type")
tid = type_to_id(game_type) if game_type else None
return SingleGenerateResponse(
success=result.get("success", False),
type_id=tid,
@@ -277,21 +403,19 @@ async def generate_single_game(request: SingleGenerateRequest):
items=result.get("items", []),
token_usage=result.get("token_usage"),
llm=result.get("llm"),
errors=result.get("errors", [])
errors=result.get("errors", []),
)
except Exception as e:
return SingleGenerateResponse(
success=False,
errors=[str(e)]
)
return SingleGenerateResponse(success=False, errors=[str(e)])
# ============== DIRECT GENERATE (1 game cụ thể, không analyze) ==============
class DirectGenerateRequest(BaseModel):
text: str = Field(description="Input text", min_length=10)
max_items: int = Field(default=3, description="Max items to generate")
max_items: int = Field(default=100, description="Max items to generate")
run_validator: bool = Field(default=True)
debug: bool = Field(default=False)
llm_config: Optional[LLMConfigRequest] = Field(default=None)
@@ -299,6 +423,7 @@ class DirectGenerateRequest(BaseModel):
class DirectGenerateResponse(BaseModel):
"""Response thống nhất, giống GenerateResponse nhưng cho 1 game"""
success: bool
games: List[int] = [] # Single type_id in list
results: Dict[int, GameResultData] = {} # Same structure as GenerateResponse
@@ -322,28 +447,28 @@ async def generate_direct(type_id: int, request: DirectGenerateRequest):
return DirectGenerateResponse(
success=False,
games=[type_id],
errors=[f"Game with type_id={type_id} not found"]
errors=[f"Game with type_id={type_id} not found"],
)
core = get_core(request.llm_config)
result = core.generate(
game_type=game_type,
text=request.text,
max_items=request.max_items,
validate=request.run_validator,
debug=request.debug
debug=request.debug,
)
format_error = result.get("format_error")
data = result.get("data") or {}
# Build results với structure thống nhất
game_result = GameResultData(
items=data.get("items", []) if isinstance(data, dict) else [],
metadata=data.get("metadata") if isinstance(data, dict) else None
metadata=data.get("metadata") if isinstance(data, dict) else None,
)
return DirectGenerateResponse(
success=result.get("success", False),
games=[type_id],
@@ -352,15 +477,11 @@ async def generate_direct(type_id: int, request: DirectGenerateRequest):
format_error=format_error,
token_usage=result.get("token_usage"),
llm=result.get("llm"),
errors=result.get("errors", [])
errors=result.get("errors", []),
)
except Exception as e:
return DirectGenerateResponse(
success=False,
games=[type_id],
errors=[str(e)]
)
return DirectGenerateResponse(success=False, games=[type_id], errors=[str(e)])
@app.get("/games", response_model=GamesListResponse)
@@ -368,29 +489,29 @@ async def list_games():
"""Lấy danh sách games"""
registry = get_registry()
all_games = registry.get_all_games_including_inactive()
games_list = []
active_count = 0
for game_type, game in all_games.items():
games_list.append(GameInfo(
type_id=game.type_id,
game_type=game.game_type,
display_name=game.display_name,
description=game.description,
active=game.active,
max_items=game.max_items,
))
games_list.append(
GameInfo(
type_id=game.type_id,
game_type=game.game_type,
display_name=game.display_name,
description=game.description,
active=game.active,
max_items=game.max_items,
)
)
if game.active:
active_count += 1
# Sort by type_id
games_list.sort(key=lambda g: g.type_id)
return GamesListResponse(
total=len(games_list),
active_count=active_count,
games=games_list
total=len(games_list), active_count=active_count, games=games_list
)
@@ -409,28 +530,28 @@ async def deactivate_game(game_type: str):
def _set_game_active(game_type: str, active: bool) -> ActionResponse:
games_dir = Path(__file__).parent / "src" / "games"
game_file = games_dir / f"{game_type}.py"
if not game_file.exists():
raise HTTPException(404, f"Game '{game_type}' not found")
content = game_file.read_text(encoding="utf-8")
pattern = r'("active"\s*:\s*)(True|False)'
new_value = "True" if active else "False"
if not re.search(pattern, content):
raise HTTPException(400, f"Cannot find 'active' field in {game_type}.py")
new_content = re.sub(pattern, f'\\1{new_value}', content)
new_content = re.sub(pattern, f"\\1{new_value}", content)
game_file.write_text(new_content, encoding="utf-8")
reload_games()
action = "activated" if active else "deactivated"
return ActionResponse(
success=True,
message=f"Game '{game_type}' has been {action}",
game_type=game_type,
active=active
active=active,
)
@@ -438,16 +559,16 @@ def _set_game_active(game_type: str, active: bool) -> ActionResponse:
async def get_llm_config():
"""Xem LLM config hiện tại"""
global _current_config
if _current_config is None:
core = get_core()
_current_config = core.llm_config
return LLMConfigResponse(
provider=_current_config.provider,
model_name=_current_config.model_name,
temperature=_current_config.temperature,
base_url=_current_config.base_url
base_url=_current_config.base_url,
)
@@ -455,50 +576,43 @@ async def get_llm_config():
async def set_llm_config(config: LLMConfigRequest):
"""Đổi LLM config global"""
global _core, _current_config
new_config = ModelConfig(
provider=config.provider,
model_name=config.model_name,
api_key=config.api_key,
temperature=config.temperature,
base_url=config.base_url
base_url=config.base_url,
)
try:
_core = GameCore(llm_config=new_config)
_current_config = new_config
return ActionResponse(
success=True,
message=f"LLM changed to {config.provider}/{config.model_name}"
message=f"LLM changed to {config.provider}/{config.model_name}",
)
except Exception as e:
return ActionResponse(
success=False,
message=f"Failed to change LLM: {str(e)}"
)
return ActionResponse(success=False, message=f"Failed to change LLM: {str(e)}")
@app.post("/reload", response_model=ActionResponse)
async def reload_all_games():
"""Reload games"""
global _core
reload_games()
_core = None
return ActionResponse(
success=True,
message=f"Reloaded. Active games: {get_active_game_types()}"
success=True, message=f"Reloaded. Active games: {get_active_game_types()}"
)
@app.get("/health")
async def health_check():
return {
"status": "healthy",
"active_games": get_active_game_types()
}
return {"status": "healthy", "active_games": get_active_game_types()}
# ============== STARTUP ==============
@@ -510,4 +624,8 @@ async def startup():
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=2088)
port = os.getenv("PORT")
if not port:
raise ValueError("Missing required environment variable: PORT")
uvicorn.run(app, host="0.0.0.0", port=int(port))