## Problem All 4 marketing tools (contentvariant, subjectlines, platformadapt, factcheck) were calling prepare_chat_style_prompt() which expects request.prompt field. This caused "object has no field 'prompt'" errors in Claude Desktop. ## Root Cause The prepare_prompt() methods were: 1. Building prompt_text string 2. Creating a copy of request 3. Setting request_copy.prompt = prompt_text 4. Calling prepare_chat_style_prompt(request_copy) But ToolRequest (and subclasses) don't have a 'prompt' field, causing AttributeError when prepare_chat_style_prompt tries to access it. ## Solution Changed all prepare_prompt() methods to return the prompt string directly instead of calling prepare_chat_style_prompt(). This is the correct pattern for SimpleTool implementations. ## Files Changed - tools/contentvariant.py: Removed copy() and prepare_chat_style_prompt() call - tools/subjectlines.py: Removed copy() and prepare_chat_style_prompt() call - tools/platformadapt.py: Removed copy() and prepare_chat_style_prompt() call - tools/factcheck.py: Removed copy() and prepare_chat_style_prompt() call ## Testing - Server startup: ✅ All 7 tools load successfully - Tool instantiation: ✅ All tools initialize without errors ## Impact This fixes the schema errors preventing users from using the new Phase 2 tools in Claude Desktop. All tools should now work correctly. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
188 lines
7.4 KiB
Python
188 lines
7.4 KiB
Python
"""Fact Check Tool
|
|
|
|
Quick technical fact verification via web search.
|
|
Verifies claims against authoritative sources and provides confidence levels.
|
|
"""
|
|
|
|
from typing import Optional
|
|
|
|
from pydantic import Field
|
|
|
|
from config import TEMPERATURE_PRECISION
|
|
from systemprompts import FACTCHECK_PROMPT
|
|
from tools.models import ToolModelCategory
|
|
from tools.shared.base_models import ToolRequest
|
|
from tools.simple.base import SimpleTool
|
|
|
|
|
|
class FactCheckRequest(ToolRequest):
|
|
"""Request model for Fact Check"""
|
|
|
|
content: str = Field(
|
|
...,
|
|
description="Content containing factual claims to verify. Can be a full article, social post, or specific claims.",
|
|
)
|
|
technical_domain: Optional[str] = Field(
|
|
default=None,
|
|
description="Domain for specialized fact-checking: 'hvac', 'software', 'saas', 'general'. Helps identify authoritative sources.",
|
|
)
|
|
claim_type: Optional[str] = Field(
|
|
default=None,
|
|
description="Type of claims to focus on: 'product_specs', 'technical_process', 'statistics', 'general', 'all'. Default is 'all'.",
|
|
)
|
|
confidence_threshold: str = Field(
|
|
default="balanced",
|
|
description="Verification depth: 'high_confidence_only' (strict, flag anything uncertain), 'balanced' (reasonable verification), 'comprehensive' (check everything thoroughly).",
|
|
)
|
|
focus_claims: Optional[list[str]] = Field(
|
|
default=None,
|
|
description="Specific claims to verify if you don't want to check all content. Provide as list of quoted text.",
|
|
)
|
|
|
|
|
|
class FactCheckTool(SimpleTool):
|
|
"""Verify factual claims using web search"""
|
|
|
|
def get_name(self) -> str:
|
|
return "factcheck"
|
|
|
|
def get_description(self) -> str:
|
|
return (
|
|
"Verify factual claims in content using web search. Checks product specs, statistics, "
|
|
"technical processes, and general facts against authoritative sources. Returns verification "
|
|
"status (✅ Verified / ⚠️ Partial / ❌ Unsupported / 🔍 Needs Context) with sources and "
|
|
"confidence levels. Essential for technical content accuracy before publishing."
|
|
)
|
|
|
|
def get_system_prompt(self) -> str:
|
|
return FACTCHECK_PROMPT
|
|
|
|
def get_default_temperature(self) -> float:
|
|
return TEMPERATURE_PRECISION
|
|
|
|
def get_model_category(self) -> ToolModelCategory:
|
|
return ToolModelCategory.FAST_RESPONSE
|
|
|
|
def get_request_model(self):
|
|
return FactCheckRequest
|
|
|
|
def get_tool_fields(self) -> dict:
|
|
"""Tool-specific field definitions for FactCheck"""
|
|
return {
|
|
"content": {
|
|
"type": "string",
|
|
"description": "Content with claims to verify",
|
|
}
|
|
}
|
|
|
|
async def prepare_prompt(self, request: FactCheckRequest) -> str:
|
|
"""Prepare the fact check prompt"""
|
|
prompt_parts = ["Verify the factual claims in this content:"]
|
|
|
|
if request.focus_claims:
|
|
prompt_parts.append("\n**Specific Claims to Verify:**")
|
|
for claim in request.focus_claims:
|
|
prompt_parts.append(f"- {claim}")
|
|
prompt_parts.append(f"\n**Full Content for Context:**\n{request.content}")
|
|
else:
|
|
prompt_parts.append(f"\n**Content to Fact-Check:**\n{request.content}")
|
|
|
|
if request.technical_domain:
|
|
prompt_parts.append(
|
|
f"\n**Technical Domain:** {request.technical_domain} (use domain-specific authoritative sources)"
|
|
)
|
|
|
|
if request.claim_type and request.claim_type != "all":
|
|
prompt_parts.append(
|
|
f"\n**Focus on Claim Type:** {request.claim_type}"
|
|
)
|
|
|
|
prompt_parts.append(
|
|
f"\n**Verification Approach:** {request.confidence_threshold}"
|
|
)
|
|
|
|
if request.confidence_threshold == "high_confidence_only":
|
|
prompt_parts.append(
|
|
"→ Flag any claims that cannot be verified with high confidence from multiple reliable sources"
|
|
)
|
|
elif request.confidence_threshold == "comprehensive":
|
|
prompt_parts.append(
|
|
"→ Thoroughly check every factual statement, including common knowledge claims"
|
|
)
|
|
else: # balanced
|
|
prompt_parts.append(
|
|
"→ Focus on key claims and statistics; flag obvious issues; provide context for nuanced claims"
|
|
)
|
|
|
|
prompt_parts.append(
|
|
"\n\nUse web search to verify claims. For each claim, provide verification status, "
|
|
"evidence summary, confidence level, sources, and recommendations if needed."
|
|
)
|
|
|
|
# Return the complete prompt
|
|
return "\n".join(prompt_parts)
|
|
|
|
def get_input_schema(self) -> dict:
|
|
"""Return the JSON schema for this tool's input"""
|
|
return {
|
|
"type": "object",
|
|
"properties": {
|
|
"content": {
|
|
"type": "string",
|
|
"description": "Content containing factual claims to verify",
|
|
},
|
|
"technical_domain": {
|
|
"type": "string",
|
|
"description": "Domain: 'hvac', 'software', 'saas', 'general'",
|
|
},
|
|
"claim_type": {
|
|
"type": "string",
|
|
"description": "Claim type: 'product_specs', 'technical_process', 'statistics', 'general', 'all'",
|
|
},
|
|
"confidence_threshold": {
|
|
"type": "string",
|
|
"description": "Verification depth: 'high_confidence_only', 'balanced', 'comprehensive'",
|
|
"default": "balanced",
|
|
},
|
|
"focus_claims": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": "Specific claims to verify (optional, checks all if not provided)",
|
|
},
|
|
"files": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": "Optional reference documents or source materials",
|
|
},
|
|
"images": {
|
|
"type": "array",
|
|
"items": {"type": "string"},
|
|
"description": "Optional screenshots or spec sheets to verify",
|
|
},
|
|
"continuation_id": {
|
|
"type": "string",
|
|
"description": "Thread ID to continue previous conversation",
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"description": "AI model to use (leave empty for default analytical model)",
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"description": "Creativity level 0.0-1.0 (default 0.2 for precision)",
|
|
"minimum": 0.0,
|
|
"maximum": 1.0,
|
|
},
|
|
"thinking_mode": {
|
|
"type": "string",
|
|
"description": "Thinking depth: minimal, low, medium, high, max",
|
|
"enum": ["minimal", "low", "medium", "high", "max"],
|
|
},
|
|
"use_websearch": {
|
|
"type": "boolean",
|
|
"description": "Enable web search (REQUIRED for fact-checking, defaults to true)",
|
|
"default": True,
|
|
},
|
|
},
|
|
"required": ["content"],
|
|
}
|