malek-messaoudii
feat: Add return_base64 parameter to generate_speech function, allowing output as Base64 audio alongside file path. Update request and response models for clarity and enhance documentation.
2683fd8
| """Service pour initialiser le serveur MCP avec FastMCP""" | |
| from mcp.server.fastmcp import FastMCP | |
| from typing import Dict, Any, Optional | |
| import logging | |
| from fastapi import FastAPI | |
| from services.stance_model_manager import stance_model_manager | |
| from services.label_model_manager import kpa_model_manager | |
| from services.stt_service import speech_to_text | |
| from services.tts_service import text_to_speech, text_to_speech_base64 | |
| from services.generate_model_manager import generate_model_manager | |
| from services.topic_service import topic_service | |
| from services.chat_service import generate_chat_response | |
| logger = logging.getLogger(__name__) | |
| # Créer l'instance FastMCP | |
| mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) # Stateful pour sessions | |
| # Tools (inchangés, OK) | |
| def detect_stance(topic: str, argument: str) -> Dict[str, Any]: | |
| if not stance_model_manager.model_loaded: | |
| raise ValueError("Modèle stance non chargé") | |
| result = stance_model_manager.predict(topic, argument) | |
| return { | |
| "predicted_stance": result["predicted_stance"], | |
| "confidence": result["confidence"], | |
| "probability_con": result["probability_con"], | |
| "probability_pro": result["probability_pro"] | |
| } | |
| def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]: | |
| if not kpa_model_manager.model_loaded: | |
| raise ValueError("Modèle KPA non chargé") | |
| result = kpa_model_manager.predict(argument, key_point) | |
| return { | |
| "prediction": result["prediction"], | |
| "label": result["label"], | |
| "confidence": result["confidence"], | |
| "probabilities": result["probabilities"] | |
| } | |
| def transcribe_audio(audio_path: str) -> str: | |
| return speech_to_text(audio_path) | |
| def generate_speech( | |
| text: str, | |
| voice: str = "en", | |
| format: str = "mp3", | |
| return_base64: bool = False, | |
| ) -> Dict[str, Any]: | |
| """ | |
| Generate speech using the free gTTS backend (MP3 only). | |
| - voice: language code (e.g. 'en', 'fr'). | |
| - format: must be 'mp3'. | |
| - return_base64: | |
| - false (default): return a file path in 'audio_path'. | |
| - true: return a JSON object with 'audio_base64', 'mime_type', 'filename', etc. | |
| """ | |
| if return_base64: | |
| result = text_to_speech_base64(text=text, voice=voice, fmt=format) | |
| # Ensure we always include basic metadata | |
| return { | |
| **result, | |
| "voice": voice, | |
| "format": format, | |
| } | |
| audio_path = text_to_speech(text=text, voice=voice, fmt=format) | |
| return { | |
| "audio_path": audio_path, | |
| "voice": voice, | |
| "format": format, | |
| } | |
| def generate_argument(topic: str, position: str) -> Dict[str, Any]: | |
| """Generate an argument for a given topic and position""" | |
| if not generate_model_manager.model_loaded: | |
| raise ValueError("Modèle de génération non chargé") | |
| argument = generate_model_manager.generate(topic=topic, position=position) | |
| return { | |
| "topic": topic, | |
| "position": position, | |
| "argument": argument | |
| } | |
| def extract_topic(text: str) -> Dict[str, Any]: | |
| """Extract a topic from the given text/argument""" | |
| if not topic_service.initialized: | |
| topic_service.initialize() | |
| topic = topic_service.extract_topic(text) | |
| return { | |
| "text": text, | |
| "topic": topic | |
| } | |
| def voice_chat(user_input: str, conversation_id: Optional[str] = None) -> Dict[str, Any]: | |
| """Generate a chatbot response for voice chat (English only)""" | |
| response_text = generate_chat_response( | |
| user_input=user_input, | |
| conversation_id=conversation_id | |
| ) | |
| return { | |
| "user_input": user_input, | |
| "conversation_id": conversation_id, | |
| "response": response_text | |
| } | |
| def get_debate_prompt() -> str: | |
| return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif." | |
| # Health tool (enregistré avant l'initialisation) | |
| def health_check() -> Dict[str, Any]: | |
| """Health check pour le serveur MCP""" | |
| try: | |
| # Liste hardcodée pour éviter les problèmes avec list_tools() | |
| tool_names = [ | |
| "detect_stance", | |
| "match_keypoint_argument", | |
| "transcribe_audio", | |
| "generate_speech", | |
| "generate_argument", | |
| "extract_topic", | |
| "voice_chat", | |
| "health_check" | |
| ] | |
| except Exception: | |
| tool_names = [] | |
| return {"status": "healthy", "tools": tool_names} | |
| def init_mcp_server(app: FastAPI) -> None: | |
| """ | |
| Initialise et monte le serveur MCP sur l'app FastAPI. | |
| """ | |
| # CORRIGÉ : Utilise streamable_http_app() qui retourne l'ASGI app | |
| mcp_app = mcp_server.streamable_http_app() # L'ASGI app pour mounting (gère /health, /tools, etc. nativement) | |
| # Monte à /api/v1/mcp - FastAPI gère le lifespan auto | |
| app.mount("/api/v1/mcp", mcp_app) | |
| logger.info("✓ Serveur MCP monté sur /api/v1/mcp avec tools NLP/STT/TTS") |