diff --git a/app/api/v1/endpoints/image/__pycache__/rubricated_analysis.cpython-312.pyc b/app/api/v1/endpoints/image/__pycache__/rubricated_analysis.cpython-312.pyc index f811b38..67e4025 100644 Binary files a/app/api/v1/endpoints/image/__pycache__/rubricated_analysis.cpython-312.pyc and b/app/api/v1/endpoints/image/__pycache__/rubricated_analysis.cpython-312.pyc differ diff --git a/app/api/v1/endpoints/image/rubricated_analysis.py b/app/api/v1/endpoints/image/rubricated_analysis.py index 7a0086e..8fe1d8c 100644 --- a/app/api/v1/endpoints/image/rubricated_analysis.py +++ b/app/api/v1/endpoints/image/rubricated_analysis.py @@ -21,7 +21,7 @@ from app.services.image.evaluations_adapters import evaluate_image_with_provider # Inicializar el router de FastAPI para este módulo image_router_analysis = APIRouter() -@image_router_analysis.post("/evaluations/", response_model=StandardImageAnalysisResult) +@image_router_analysis.post("/evaluations", response_model=StandardImageAnalysisResult) async def evaluate_image(image_request: ImageRequestFile = Depends()) -> StandardImageAnalysisResult: """ Endpoint para analizar imágenes usando una rúbrica de evaluación infiriendo el proveedor de IA diff --git a/app/core/__pycache__/config.cpython-312.pyc b/app/core/__pycache__/config.cpython-312.pyc index 8bd36b5..4a5b6fe 100644 Binary files a/app/core/__pycache__/config.cpython-312.pyc and b/app/core/__pycache__/config.cpython-312.pyc differ diff --git a/app/core/config.py b/app/core/config.py index 48d69fc..b484c5c 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -22,5 +22,6 @@ class Settings: # Proveedores de Imagen # --------------------------------------------------------------- CLARIFAI_API_KEY = os.getenv("CLARIFAI_API_KEY", "") + ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "") settings = Settings() \ No newline at end of file diff --git a/app/schemas/__pycache__/image_standard.cpython-312.pyc b/app/schemas/__pycache__/image_standard.cpython-312.pyc index 17819f7..d19fde4 100644 Binary files a/app/schemas/__pycache__/image_standard.cpython-312.pyc and b/app/schemas/__pycache__/image_standard.cpython-312.pyc differ diff --git a/app/schemas/image_standard.py b/app/schemas/image_standard.py index 105f59a..2a89045 100644 --- a/app/schemas/image_standard.py +++ b/app/schemas/image_standard.py @@ -49,4 +49,12 @@ class StandardImageAnalysisResult(BaseModel): model_used: str = Field(..., description="Modelo específico utilizado para el análisis (ej. 'vision-1')") score: float = Field(None, description="Puntuación general asignada a la imagen después del análisis") feedback: str = Field(None, description="Comentarios o retroalimentación generada por el modelo de IA sobre la imagen") - detailed_criteria: List[ImageEvaluationCriteria] = Field(None, description="Lista de criterios de evaluación detallados con sus respectivas puntuaciones y descripciones") \ No newline at end of file + detailed_criteria: List[ImageEvaluationCriteria] = Field(None, description="Lista de criterios de evaluación detallados con sus respectivas puntuaciones y descripciones") + + +class StandardImageAnalysis(BaseModel): + """Modelo que representa el resultado estándar de un análisis de imágenes para Qualidot.""" + score: float = Field(None, description="Puntuación general asignada a la imagen después del análisis") + feedback: str = Field(None, description="Comentarios o retroalimentación generada por el modelo de IA sobre la imagen") + detailed_criteria: List[ImageEvaluationCriteria] = Field(None, description="Lista de criterios de evaluación detallados con sus respectivas puntuaciones y descripciones") + \ No newline at end of file diff --git a/app/services/image/__pycache__/evaluations_adapters.cpython-312.pyc b/app/services/image/__pycache__/evaluations_adapters.cpython-312.pyc index 54677b2..1572b1c 100644 Binary files a/app/services/image/__pycache__/evaluations_adapters.cpython-312.pyc and b/app/services/image/__pycache__/evaluations_adapters.cpython-312.pyc differ diff --git a/app/services/image/__pycache__/prompt_builder.cpython-312.pyc b/app/services/image/__pycache__/prompt_builder.cpython-312.pyc index 96cb7aa..e2f87e0 100644 Binary files a/app/services/image/__pycache__/prompt_builder.cpython-312.pyc and b/app/services/image/__pycache__/prompt_builder.cpython-312.pyc differ diff --git a/app/services/image/evaluations_adapters.py b/app/services/image/evaluations_adapters.py index 0ddc153..8abdcbf 100644 --- a/app/services/image/evaluations_adapters.py +++ b/app/services/image/evaluations_adapters.py @@ -9,6 +9,8 @@ Propósito: """ import json +import mimetypes +import mimetypes import tempfile import os from fastapi import HTTPException @@ -21,6 +23,7 @@ from app.schemas.image_standard import ImageRequestFile, StandardImageAnalysisRe from app.core import config from app.utilities.image_utilities import json_to_rubric, encode_image_from_bytes from app.services.image.prompt_builder import build_image_evaluation_prompt +from anthropic import Anthropic # Función de adaptador principal que infiere el proveedor y llama al adaptador específico async def evaluate_image_with_provider(image_request: ImageRequestFile) -> StandardImageAnalysisResult: @@ -33,7 +36,7 @@ async def evaluate_image_with_provider(image_request: ImageRequestFile) -> Stand rubric_dict = json.loads(content) rubric = json_to_rubric(rubric_dict) prompt = build_image_evaluation_prompt(rubric) - + match provider: case "openai": return await evaluate_with_openai(image_request, prompt) @@ -74,20 +77,19 @@ async def evaluate_with_openai(image_request: ImageRequestFile, prompt: str) -> ) resultado = json.loads(response.choices[0].message.content) - return StandardImageAnalysisResult(**resultado) + return StandardImageAnalysisResult( + status="success", + original_filename=image_request.file.filename, + provider_used="OpenAI", + model_used=image_request.model, + **resultado + ) except Exception as e: # Capturamos cualquier error de OpenAI o de lectura de archivos raise HTTPException( status_code=500, detail=f"Error evaluando la imagen: {str(e)}" ) - # PASOS A SEGUIR PARA IMPLEMENTAR LA LÓGICA DE EVALUACIÓN CON OPENAI: - # 1. Validar la imagen de entrada (tamaño, formato, etc.) - # 2. Configurar el cliente de OpenAI con la clave API - # 3. Llamar a la API de OpenAI para evaluar la imagen - # 4. Convertir la respuesta de OpenAI al formato estándar de evaluación de imágenes de Qualidot - # 5. Manejar errores y excepciones adecuadamente - raise NotImplementedError("La función evaluate_with_openai aún no está implementada.") async def evaluate_with_clarifai(image_request: ImageRequestFile, rubric: ImageEvaluationRubric, prompt: str) -> StandardImageAnalysisResult: """ @@ -150,13 +152,54 @@ async def evaluate_with_clarifai(image_request: ImageRequestFile, rubric: ImageE async def evaluate_with_claude(image_request: ImageRequestFile, prompt: str) -> StandardImageAnalysisResult: """ Función de adaptador para evaluar imágenes usando Claude. - (Plantilla para futuras implementaciones) """ + client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY")) + + image_bytes = await image_request.file.read() + base64_image = encode_image_from_bytes(image_bytes) + + media_type = image_request.file.content_type + + if media_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]: + raise ValueError(f"Tipo de imagen no soportado por Anthropic: {media_type}") + + try: + messages = [ + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": media_type, + "data": base64_image + }, + }, + {"type": "text", "text": prompt} + ], + } + ] + + response = client.messages.create( + model=image_request.model, + max_tokens=1024, + messages=messages, + ) + + json_string = response.content[0].text + parsed_data = json.loads(json_string) + + return StandardImageAnalysisResult( + status="success", + original_filename=image_request.file.filename, + provider_used="Claude", + model_used=image_request.model, + **parsed_data + ) - # PASOS A SEGUIR PARA IMPLEMENTAR LA LÓGICA DE EVALUACIÓN CON CLAUDE: - # 1. Validar la imagen de entrada (tamaño, formato, etc.) - # 2. Configurar el cliente de Claude con la clave API - # 3. Llamar a la API de Claude para evaluar la imagen - # 4. Convertir la respuesta de Claude al formato estándar de evaluación de imágenes de Qualidot - # 5. Manejar errores y excepciones adecuadamente - raise NotImplementedError("La función evaluate_with_claude aún no está implementada.") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error evaluando la imagen: {str(e)}" + ) \ No newline at end of file diff --git a/app/services/image/prompt_builder.py b/app/services/image/prompt_builder.py index 547785b..cddac68 100644 --- a/app/services/image/prompt_builder.py +++ b/app/services/image/prompt_builder.py @@ -8,7 +8,7 @@ procesamiento y análisis. import json -from app.schemas.image_standard import StandardImageAnalysisResult, ImageEvaluationRubric +from app.schemas.image_standard import StandardImageAnalysis, ImageEvaluationRubric def build_image_evaluation_prompt(rubric: ImageEvaluationRubric) -> str: """ @@ -20,7 +20,7 @@ def build_image_evaluation_prompt(rubric: ImageEvaluationRubric) -> str: rubric_json = rubric.model_dump_json(exclude_none=True, indent=2) # 2. Extraemos el esquema dinámico de salida basado en Pydantic - expected_output_schema = json.dumps(StandardImageAnalysisResult.model_json_schema(), indent=2) + expected_output_schema = json.dumps(StandardImageAnalysis.model_json_schema(), indent=2) # 3. Obtenemos el path de especialización # Si por alguna razón viene vacío, le damos un rol genérico por defecto diff --git a/requirements.txt b/requirements.txt index ecc246d..e220608 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,4 +25,5 @@ numpy openai langchain langchain-openai -assemblyai \ No newline at end of file +assemblyai +anthropic \ No newline at end of file