Initial commit: THE FACTORY - Iterative Image Generation
Tasks: - image_generate: Generate image from prompt - image_variant: Generate variant of existing image - image_upscale: Increase resolution Models: SDXL, Flux, SDXL-Turbo RunPod Serverless Handler
This commit is contained in:
138
config.py
Normal file
138
config.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""
|
||||
=============================================================================
|
||||
THE FACTORY - Configuración
|
||||
=============================================================================
|
||||
"""
|
||||
|
||||
import os
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
class JobState(Enum):
|
||||
"""Estados posibles de un job."""
|
||||
PENDING = "PENDING"
|
||||
QUEUED = "QUEUED"
|
||||
RUNNING = "RUNNING"
|
||||
EVALUATING = "EVALUATING"
|
||||
CONVERGED = "CONVERGED"
|
||||
EXHAUSTED = "EXHAUSTED"
|
||||
FAILED = "FAILED"
|
||||
CANCELLED = "CANCELLED"
|
||||
|
||||
|
||||
class FunctionType(Enum):
|
||||
"""Tipos de función soportados."""
|
||||
TEXT_GENERATION = "TEXT_GENERATION"
|
||||
IMAGE_GENERATION = "IMAGE_GENERATION"
|
||||
CODE_GENERATION = "CODE_GENERATION"
|
||||
DOCUMENT_GENERATION = "DOCUMENT_GENERATION"
|
||||
AUDIO_GENERATION = "AUDIO_GENERATION"
|
||||
VIDEO_GENERATION = "VIDEO_GENERATION"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelConfig:
|
||||
"""Configuración de un modelo."""
|
||||
name: str
|
||||
provider: str
|
||||
cost_per_1k_input: float
|
||||
cost_per_1k_output: float
|
||||
max_tokens: int = 4096
|
||||
supports_images: bool = False
|
||||
supports_streaming: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class FactoryConfig:
|
||||
"""Configuración global de THE FACTORY."""
|
||||
|
||||
# API Keys
|
||||
anthropic_api_key: str = field(default_factory=lambda: os.environ.get("ANTHROPIC_API_KEY", ""))
|
||||
openai_api_key: str = field(default_factory=lambda: os.environ.get("OPENAI_API_KEY", ""))
|
||||
replicate_api_key: str = field(default_factory=lambda: os.environ.get("REPLICATE_API_KEY", ""))
|
||||
|
||||
# Límites por defecto
|
||||
default_max_cycles: int = 5
|
||||
default_budget_usd: float = 1.0
|
||||
default_timeout_ms: int = 120000
|
||||
|
||||
# Convergencia
|
||||
convergence_threshold: float = 0.85
|
||||
diminishing_returns_threshold: float = 0.02
|
||||
diminishing_returns_min_confidence: float = 0.70
|
||||
|
||||
# Modelos por función
|
||||
models: Dict[str, ModelConfig] = field(default_factory=lambda: {
|
||||
# Text generation
|
||||
"claude-sonnet": ModelConfig(
|
||||
name="claude-sonnet-4-20250514",
|
||||
provider="anthropic",
|
||||
cost_per_1k_input=0.003,
|
||||
cost_per_1k_output=0.015,
|
||||
max_tokens=8192,
|
||||
supports_images=True
|
||||
),
|
||||
"claude-haiku": ModelConfig(
|
||||
name="claude-haiku-4-20250514",
|
||||
provider="anthropic",
|
||||
cost_per_1k_input=0.00025,
|
||||
cost_per_1k_output=0.00125,
|
||||
max_tokens=8192,
|
||||
supports_images=True
|
||||
),
|
||||
"gpt-4o": ModelConfig(
|
||||
name="gpt-4o",
|
||||
provider="openai",
|
||||
cost_per_1k_input=0.005,
|
||||
cost_per_1k_output=0.015,
|
||||
max_tokens=4096,
|
||||
supports_images=True
|
||||
),
|
||||
"gpt-4o-mini": ModelConfig(
|
||||
name="gpt-4o-mini",
|
||||
provider="openai",
|
||||
cost_per_1k_input=0.00015,
|
||||
cost_per_1k_output=0.0006,
|
||||
max_tokens=4096,
|
||||
supports_images=True
|
||||
),
|
||||
# Image generation
|
||||
"flux-pro": ModelConfig(
|
||||
name="black-forest-labs/flux-1.1-pro",
|
||||
provider="replicate",
|
||||
cost_per_1k_input=0.05, # por imagen
|
||||
cost_per_1k_output=0.0,
|
||||
max_tokens=0
|
||||
),
|
||||
"flux-schnell": ModelConfig(
|
||||
name="black-forest-labs/flux-schnell",
|
||||
provider="replicate",
|
||||
cost_per_1k_input=0.003, # por imagen
|
||||
cost_per_1k_output=0.0,
|
||||
max_tokens=0
|
||||
),
|
||||
})
|
||||
|
||||
# Modelo por defecto por función
|
||||
default_models: Dict[FunctionType, str] = field(default_factory=lambda: {
|
||||
FunctionType.TEXT_GENERATION: "claude-sonnet",
|
||||
FunctionType.CODE_GENERATION: "claude-sonnet",
|
||||
FunctionType.DOCUMENT_GENERATION: "claude-sonnet",
|
||||
FunctionType.IMAGE_GENERATION: "flux-schnell",
|
||||
FunctionType.AUDIO_GENERATION: "claude-sonnet", # placeholder
|
||||
FunctionType.VIDEO_GENERATION: "claude-sonnet", # placeholder
|
||||
})
|
||||
|
||||
# Modelo para evaluación
|
||||
evaluator_model: str = "claude-haiku"
|
||||
|
||||
def get_model(self, name: str) -> Optional[ModelConfig]:
|
||||
"""Obtiene configuración de modelo."""
|
||||
return self.models.get(name)
|
||||
|
||||
def get_default_model(self, function: FunctionType) -> ModelConfig:
|
||||
"""Obtiene modelo por defecto para una función."""
|
||||
model_name = self.default_models.get(function, "claude-sonnet")
|
||||
return self.models[model_name]
|
||||
Reference in New Issue
Block a user