Backend:
- max_ai_messages_per_day + max_ai_tokens_per_day on User model (nullable, override)
- Migration 008: add columns + seed default settings (100 msgs, 500K tokens)
- usage_service: count today's messages + tokens, check quota, get limits
- GET /chats/quota returns usage vs limits + reset time
- POST /chats/{id}/messages checks quota before streaming (429 if exceeded)
- Admin user schemas expose both limit fields
- GET /admin/usage returns per-user daily message + token counts
- admin_user_service allows updating both limit fields
Frontend:
- Chat header shows "X/Y messages · XK/YK tokens" with red highlight at limit
- Quota refreshes every 30s via TanStack Query
- Admin usage page with table: user, messages today, tokens today
- Route + sidebar entry for admin usage
- English + Russian translations
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
124 lines
4.2 KiB
Python
124 lines
4.2 KiB
Python
import uuid
|
|
from typing import Annotated
|
|
|
|
from fastapi import APIRouter, Depends, Query, status
|
|
from fastapi.responses import StreamingResponse
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
|
|
from app.api.deps import get_current_user
|
|
from app.database import get_db
|
|
from app.models.user import User
|
|
from app.schemas.chat import (
|
|
ChatListResponse,
|
|
ChatResponse,
|
|
CreateChatRequest,
|
|
MessageListResponse,
|
|
MessageResponse,
|
|
SendMessageRequest,
|
|
UpdateChatRequest,
|
|
)
|
|
from app.services import chat_service, skill_service
|
|
from app.services.ai_service import stream_ai_response
|
|
from app.services.usage_service import check_user_quota
|
|
|
|
router = APIRouter(prefix="/chats", tags=["chats"])
|
|
|
|
|
|
@router.get("/quota")
|
|
async def get_quota(
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
return await check_user_quota(db, user)
|
|
|
|
|
|
@router.post("/", response_model=ChatResponse, status_code=status.HTTP_201_CREATED)
|
|
async def create_chat(
|
|
data: CreateChatRequest,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
if data.skill_id:
|
|
await skill_service.validate_skill_accessible(db, data.skill_id, user.id)
|
|
chat = await chat_service.create_chat(db, user, data.title, data.skill_id)
|
|
return ChatResponse.model_validate(chat)
|
|
|
|
|
|
@router.get("/", response_model=ChatListResponse)
|
|
async def list_chats(
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
archived: bool | None = Query(default=None),
|
|
):
|
|
chats = await chat_service.get_user_chats(db, user.id, archived)
|
|
return ChatListResponse(chats=[ChatResponse.model_validate(c) for c in chats])
|
|
|
|
|
|
@router.get("/{chat_id}", response_model=ChatResponse)
|
|
async def get_chat(
|
|
chat_id: uuid.UUID,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
chat = await chat_service.get_chat(db, chat_id, user.id)
|
|
return ChatResponse.model_validate(chat)
|
|
|
|
|
|
@router.patch("/{chat_id}", response_model=ChatResponse)
|
|
async def update_chat(
|
|
chat_id: uuid.UUID,
|
|
data: UpdateChatRequest,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
if data.skill_id:
|
|
await skill_service.validate_skill_accessible(db, data.skill_id, user.id)
|
|
chat = await chat_service.update_chat(db, chat_id, user.id, data.title, data.is_archived, data.skill_id)
|
|
return ChatResponse.model_validate(chat)
|
|
|
|
|
|
@router.delete("/{chat_id}", status_code=status.HTTP_204_NO_CONTENT)
|
|
async def delete_chat(
|
|
chat_id: uuid.UUID,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
await chat_service.delete_chat(db, chat_id, user.id)
|
|
|
|
|
|
@router.get("/{chat_id}/messages", response_model=MessageListResponse)
|
|
async def list_messages(
|
|
chat_id: uuid.UUID,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
limit: int = Query(default=50, le=200),
|
|
before: uuid.UUID | None = Query(default=None),
|
|
):
|
|
messages = await chat_service.get_messages(db, chat_id, user.id, limit, before)
|
|
return MessageListResponse(messages=[MessageResponse.model_validate(m) for m in messages])
|
|
|
|
|
|
@router.post("/{chat_id}/messages")
|
|
async def send_message(
|
|
chat_id: uuid.UUID,
|
|
data: SendMessageRequest,
|
|
user: Annotated[User, Depends(get_current_user)],
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
):
|
|
from fastapi import HTTPException
|
|
quota = await check_user_quota(db, user)
|
|
if quota["messages_exceeded"]:
|
|
raise HTTPException(status_code=429, detail=f"Daily message limit reached ({quota['message_limit']}). Resets at {quota['resets_at']}.")
|
|
if quota["tokens_exceeded"]:
|
|
raise HTTPException(status_code=429, detail=f"Daily token limit reached ({quota['token_limit']}). Resets at {quota['resets_at']}.")
|
|
|
|
return StreamingResponse(
|
|
stream_ai_response(db, chat_id, user.id, data.content),
|
|
media_type="text/event-stream",
|
|
headers={
|
|
"Cache-Control": "no-cache",
|
|
"Connection": "keep-alive",
|
|
"X-Accel-Buffering": "no",
|
|
},
|
|
)
|