zen-marketing/tools/factcheck.py
Ben 78127f03d7 Complete Phase 2: Add three high-priority marketing tools
## New Tools (1,125 lines)

### subjectlines (210 lines)
- Email subject line generator testing psychological angles
- Generates 15-25 variations grouped by mechanism
- Includes character counts, emoji suggestions, A/B rationale
- Temperature: 0.8 (high creativity)
- System prompt: 95 lines of email marketing expertise

### platformadapt (205 lines)
- Cross-platform content adaptation
- Supports Twitter, LinkedIn, Instagram, Facebook, Bluesky, email, blog
- Respects character limits and platform-specific best practices
- Temperature: 0.7 (creative adaptation)
- System prompt: 180 lines with detailed platform characteristics

### factcheck (195 lines)
- Technical fact verification via web search
- Source credibility hierarchy (primary → secondary → tertiary)
- Verification statuses:  Verified / ⚠️ Partial /  Unsupported / 🔍 Context
- Temperature: 0.2 (precision)
- System prompt: 213 lines of fact-checking methodology
- Web search enabled by default

## Integration

- Added 3 tool imports to server.py
- Registered tools in TOOLS dictionary
- Added prompt templates for all 3 new tools
- Exported system prompts in systemprompts/__init__.py

## Code Quality

- Code review by GLM-4.6: A grade (9.5/10)
- Consistency score: 10/10 (perfect SimpleTool pattern)
- No critical or high-priority issues
- 3 low-severity observations (1 fixed)
- Production readiness: 95%

## Testing

- All tools instantiate successfully
- Server startup confirmed (7 tools active)
- Schema validation passed
- No runtime errors

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-07 14:02:01 -04:00

195 lines
7.6 KiB
Python

"""Fact Check Tool
Quick technical fact verification via web search.
Verifies claims against authoritative sources and provides confidence levels.
"""
from typing import Optional
from pydantic import Field
from config import TEMPERATURE_PRECISION
from systemprompts import FACTCHECK_PROMPT
from tools.models import ToolModelCategory
from tools.shared.base_models import ToolRequest
from tools.simple.base import SimpleTool
class FactCheckRequest(ToolRequest):
"""Request model for Fact Check"""
content: str = Field(
...,
description="Content containing factual claims to verify. Can be a full article, social post, or specific claims.",
)
technical_domain: Optional[str] = Field(
default=None,
description="Domain for specialized fact-checking: 'hvac', 'software', 'saas', 'general'. Helps identify authoritative sources.",
)
claim_type: Optional[str] = Field(
default=None,
description="Type of claims to focus on: 'product_specs', 'technical_process', 'statistics', 'general', 'all'. Default is 'all'.",
)
confidence_threshold: str = Field(
default="balanced",
description="Verification depth: 'high_confidence_only' (strict, flag anything uncertain), 'balanced' (reasonable verification), 'comprehensive' (check everything thoroughly).",
)
focus_claims: Optional[list[str]] = Field(
default=None,
description="Specific claims to verify if you don't want to check all content. Provide as list of quoted text.",
)
class FactCheckTool(SimpleTool):
"""Verify factual claims using web search"""
def get_name(self) -> str:
return "factcheck"
def get_description(self) -> str:
return (
"Verify factual claims in content using web search. Checks product specs, statistics, "
"technical processes, and general facts against authoritative sources. Returns verification "
"status (✅ Verified / ⚠️ Partial / ❌ Unsupported / 🔍 Needs Context) with sources and "
"confidence levels. Essential for technical content accuracy before publishing."
)
def get_system_prompt(self) -> str:
return FACTCHECK_PROMPT
def get_default_temperature(self) -> float:
return TEMPERATURE_PRECISION
def get_model_category(self) -> ToolModelCategory:
return ToolModelCategory.FAST_RESPONSE
def get_request_model(self):
return FactCheckRequest
def get_tool_fields(self) -> dict:
"""Tool-specific field definitions for FactCheck"""
return {
"content": {
"type": "string",
"description": "Content with claims to verify",
}
}
async def prepare_prompt(self, request: FactCheckRequest) -> str:
"""Prepare the fact check prompt"""
prompt_parts = ["Verify the factual claims in this content:"]
if request.focus_claims:
prompt_parts.append("\n**Specific Claims to Verify:**")
for claim in request.focus_claims:
prompt_parts.append(f"- {claim}")
prompt_parts.append(f"\n**Full Content for Context:**\n{request.content}")
else:
prompt_parts.append(f"\n**Content to Fact-Check:**\n{request.content}")
if request.technical_domain:
prompt_parts.append(
f"\n**Technical Domain:** {request.technical_domain} (use domain-specific authoritative sources)"
)
if request.claim_type and request.claim_type != "all":
prompt_parts.append(
f"\n**Focus on Claim Type:** {request.claim_type}"
)
prompt_parts.append(
f"\n**Verification Approach:** {request.confidence_threshold}"
)
if request.confidence_threshold == "high_confidence_only":
prompt_parts.append(
"→ Flag any claims that cannot be verified with high confidence from multiple reliable sources"
)
elif request.confidence_threshold == "comprehensive":
prompt_parts.append(
"→ Thoroughly check every factual statement, including common knowledge claims"
)
else: # balanced
prompt_parts.append(
"→ Focus on key claims and statistics; flag obvious issues; provide context for nuanced claims"
)
prompt_parts.append(
"\n\nUse web search to verify claims. For each claim, provide verification status, "
"evidence summary, confidence level, sources, and recommendations if needed."
)
# Build prompt text without mutating request object
prompt_text = "\n".join(prompt_parts)
# Create a copy for chat-style preparation
from copy import copy
request_copy = copy(request)
request_copy.prompt = prompt_text
return self.prepare_chat_style_prompt(request_copy)
def get_input_schema(self) -> dict:
"""Return the JSON schema for this tool's input"""
return {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Content containing factual claims to verify",
},
"technical_domain": {
"type": "string",
"description": "Domain: 'hvac', 'software', 'saas', 'general'",
},
"claim_type": {
"type": "string",
"description": "Claim type: 'product_specs', 'technical_process', 'statistics', 'general', 'all'",
},
"confidence_threshold": {
"type": "string",
"description": "Verification depth: 'high_confidence_only', 'balanced', 'comprehensive'",
"default": "balanced",
},
"focus_claims": {
"type": "array",
"items": {"type": "string"},
"description": "Specific claims to verify (optional, checks all if not provided)",
},
"files": {
"type": "array",
"items": {"type": "string"},
"description": "Optional reference documents or source materials",
},
"images": {
"type": "array",
"items": {"type": "string"},
"description": "Optional screenshots or spec sheets to verify",
},
"continuation_id": {
"type": "string",
"description": "Thread ID to continue previous conversation",
},
"model": {
"type": "string",
"description": "AI model to use (leave empty for default analytical model)",
},
"temperature": {
"type": "number",
"description": "Creativity level 0.0-1.0 (default 0.2 for precision)",
"minimum": 0.0,
"maximum": 1.0,
},
"thinking_mode": {
"type": "string",
"description": "Thinking depth: minimal, low, medium, high, max",
"enum": ["minimal", "low", "medium", "high", "max"],
},
"use_websearch": {
"type": "boolean",
"description": "Enable web search (REQUIRED for fact-checking, defaults to true)",
"default": True,
},
},
"required": ["content"],
}