- Core architecture from zen-mcp-server - OpenRouter and Gemini provider configuration - Content variant generator tool (first marketing tool) - Chat tool for marketing strategy - Version and model listing tools - Configuration system with .env support - Logging infrastructure - Ready for Claude Desktop integration
107 lines
3.6 KiB
Python
107 lines
3.6 KiB
Python
"""
|
|
Configuration and constants for Zen-Marketing MCP Server
|
|
|
|
This module centralizes all configuration settings for the Zen-Marketing MCP Server.
|
|
It defines model configurations, token limits, temperature defaults, and other
|
|
constants used throughout the application.
|
|
|
|
Configuration values can be overridden by environment variables where appropriate.
|
|
"""
|
|
|
|
import os
|
|
|
|
# Version and metadata
|
|
__version__ = "0.1.0"
|
|
__updated__ = "2025-11-07"
|
|
__author__ = "Ben (based on Zen MCP Server by Fahad Gilani)"
|
|
|
|
# Model configuration
|
|
# DEFAULT_MODEL: The default model used for all AI operations
|
|
# Can be overridden by setting DEFAULT_MODEL environment variable
|
|
DEFAULT_MODEL = os.getenv("DEFAULT_MODEL", "google/gemini-2.5-pro-latest")
|
|
|
|
# Fast model for quick operations (variations, subject lines)
|
|
FAST_MODEL = os.getenv("FAST_MODEL", "google/gemini-2.5-flash-preview-09-2025")
|
|
|
|
# Creative model for content generation
|
|
CREATIVE_MODEL = os.getenv("CREATIVE_MODEL", "minimax/minimax-m2")
|
|
|
|
# Auto mode detection - when DEFAULT_MODEL is "auto", Claude picks the model
|
|
IS_AUTO_MODE = DEFAULT_MODEL.lower() == "auto"
|
|
|
|
# Temperature defaults for different content types
|
|
# Temperature controls the randomness/creativity of model responses
|
|
# Lower values (0.0-0.3) produce more deterministic, focused responses
|
|
# Higher values (0.7-1.0) produce more creative, varied responses
|
|
|
|
# TEMPERATURE_PRECISION: Used for fact-checking and technical verification
|
|
TEMPERATURE_PRECISION = 0.2 # For factcheck, technical verification
|
|
|
|
# TEMPERATURE_ANALYTICAL: Used for style enforcement and SEO optimization
|
|
TEMPERATURE_ANALYTICAL = 0.3 # For styleguide, seooptimize, voiceanalysis
|
|
|
|
# TEMPERATURE_BALANCED: Used for strategic planning
|
|
TEMPERATURE_BALANCED = 0.5 # For guestedit, linkstrategy, campaignmap
|
|
|
|
# TEMPERATURE_CREATIVE: Used for content variation and adaptation
|
|
TEMPERATURE_CREATIVE = 0.7 # For platformadapt
|
|
|
|
# TEMPERATURE_HIGHLY_CREATIVE: Used for bulk variation generation
|
|
TEMPERATURE_HIGHLY_CREATIVE = 0.8 # For contentvariant, subjectlines
|
|
|
|
# Thinking Mode Defaults
|
|
DEFAULT_THINKING_MODE_THINKDEEP = os.getenv("DEFAULT_THINKING_MODE_THINKDEEP", "high")
|
|
|
|
# MCP Protocol Transport Limits
|
|
def _calculate_mcp_prompt_limit() -> int:
|
|
"""
|
|
Calculate MCP prompt size limit based on MAX_MCP_OUTPUT_TOKENS environment variable.
|
|
|
|
Returns:
|
|
Maximum character count for user input prompts
|
|
"""
|
|
max_tokens_str = os.getenv("MAX_MCP_OUTPUT_TOKENS")
|
|
|
|
if max_tokens_str:
|
|
try:
|
|
max_tokens = int(max_tokens_str)
|
|
# Allocate 60% of tokens for input, convert to characters (~4 chars per token)
|
|
input_token_budget = int(max_tokens * 0.6)
|
|
character_limit = input_token_budget * 4
|
|
return character_limit
|
|
except (ValueError, TypeError):
|
|
pass
|
|
|
|
# Default fallback: 60,000 characters
|
|
return 60_000
|
|
|
|
|
|
MCP_PROMPT_SIZE_LIMIT = _calculate_mcp_prompt_limit()
|
|
|
|
# Language/Locale Configuration
|
|
LOCALE = os.getenv("LOCALE", "")
|
|
|
|
# Platform character limits
|
|
PLATFORM_LIMITS = {
|
|
"twitter": 280,
|
|
"bluesky": 300,
|
|
"linkedin": 3000,
|
|
"linkedin_optimal": 1300,
|
|
"instagram": 2200,
|
|
"facebook": 500, # Optimal length
|
|
"email_subject": 60,
|
|
"email_preview": 100,
|
|
"meta_description": 156,
|
|
"page_title": 60,
|
|
}
|
|
|
|
# Web search configuration
|
|
ENABLE_WEB_SEARCH = os.getenv("ENABLE_WEB_SEARCH", "true").lower() == "true"
|
|
|
|
# Tool disabling
|
|
# Comma-separated list of tools to disable
|
|
DISABLED_TOOLS_STR = os.getenv("DISABLED_TOOLS", "")
|
|
DISABLED_TOOLS = set(tool.strip() for tool in DISABLED_TOOLS_STR.split(",") if tool.strip())
|
|
|
|
# Logging configuration
|
|
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
|