- Added test-monitor.sh for monitoring test execution metrics and generating reports - Created test-data-manager.sh for robust test data management - Added health-check.sh for comprehensive system health verification - Enhanced overall deployment safety and reliability 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
306 lines
No EOL
8.8 KiB
Bash
Executable file
306 lines
No EOL
8.8 KiB
Bash
Executable file
#!/bin/bash
|
|
# test-monitor.sh - Script to monitor test execution and generate reports
|
|
# Usage: ./bin/test-monitor.sh [--store] [--notify] [--threshold=80]
|
|
|
|
set -e
|
|
|
|
# Colors for output
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[0;33m'
|
|
RED='\033[0;31m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Default settings
|
|
STORE_RESULTS=false
|
|
SEND_NOTIFICATIONS=false
|
|
SUCCESS_THRESHOLD=80
|
|
CURRENT_DATE=$(date +"%Y-%m-%d")
|
|
|
|
# Parse arguments
|
|
for arg in "$@"; do
|
|
case $arg in
|
|
--store)
|
|
STORE_RESULTS=true
|
|
shift
|
|
;;
|
|
--notify)
|
|
SEND_NOTIFICATIONS=true
|
|
shift
|
|
;;
|
|
--threshold=*)
|
|
SUCCESS_THRESHOLD="${arg#*=}"
|
|
shift
|
|
;;
|
|
esac
|
|
done
|
|
|
|
echo -e "${GREEN}=== Test Monitoring Dashboard ===${NC}"
|
|
echo "Analyzing test results and performance metrics..."
|
|
|
|
# Check if we're in the right directory
|
|
if [ ! -d "tests/e2e" ]; then
|
|
echo -e "${RED}Error: Please run this script from the wordpress-dev directory${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
# Create monitoring directory structure
|
|
mkdir -p monitoring/data
|
|
mkdir -p monitoring/reports
|
|
mkdir -p monitoring/alerts
|
|
|
|
# Generate a unique run ID
|
|
RUN_ID=$(date +"%Y%m%d%H%M%S")
|
|
REPORT_FILE="monitoring/reports/test-report-${RUN_ID}.md"
|
|
DATA_FILE="monitoring/data/test-data-${RUN_ID}.json"
|
|
|
|
# Function to analyze Playwright test results
|
|
analyze_test_results() {
|
|
echo -e "\n${YELLOW}Analyzing test results...${NC}"
|
|
|
|
# Check if test results directory exists
|
|
if [ ! -d "test-results" ]; then
|
|
echo -e "${RED}No test results found.${NC}"
|
|
return 1
|
|
fi
|
|
|
|
# Count passed and failed tests
|
|
TOTAL_TESTS=$(find test-results -name "*.json" | wc -l)
|
|
PASSED_TESTS=$(grep -r '"status":"passed"' test-results | wc -l)
|
|
FAILED_TESTS=$(grep -r '"status":"failed"' test-results | wc -l)
|
|
SKIPPED_TESTS=$(grep -r '"status":"skipped"' test-results | wc -l)
|
|
|
|
# Calculate success rate
|
|
if [ $TOTAL_TESTS -gt 0 ]; then
|
|
SUCCESS_RATE=$((PASSED_TESTS * 100 / TOTAL_TESTS))
|
|
else
|
|
SUCCESS_RATE=0
|
|
fi
|
|
|
|
# Analyze test duration
|
|
AVERAGE_DURATION=$(grep -r '"duration":' test-results | awk -F'"duration":' '{sum+=$2; count++} END {print sum/count/1000}')
|
|
|
|
# Find slowest tests
|
|
SLOWEST_TESTS=$(grep -r '"duration":' test-results | sort -nr -k2 | head -5)
|
|
|
|
# Find most frequent failures
|
|
FAILURE_PATTERNS=$(grep -r "Error:" test-results | sort | uniq -c | sort -nr | head -5)
|
|
|
|
# Output results
|
|
echo -e "\n${GREEN}Test Results Summary:${NC}"
|
|
echo -e "Total Tests: ${TOTAL_TESTS}"
|
|
echo -e "Passed: ${PASSED_TESTS}"
|
|
echo -e "Failed: ${FAILED_TESTS}"
|
|
echo -e "Skipped: ${SKIPPED_TESTS}"
|
|
echo -e "Success Rate: ${SUCCESS_RATE}%"
|
|
echo -e "Average Duration: ${AVERAGE_DURATION} seconds"
|
|
|
|
# Store results if enabled
|
|
if [ "$STORE_RESULTS" = true ]; then
|
|
# Create JSON data
|
|
cat > "$DATA_FILE" << EOF
|
|
{
|
|
"runId": "${RUN_ID}",
|
|
"date": "${CURRENT_DATE}",
|
|
"totalTests": ${TOTAL_TESTS},
|
|
"passedTests": ${PASSED_TESTS},
|
|
"failedTests": ${FAILED_TESTS},
|
|
"skippedTests": ${SKIPPED_TESTS},
|
|
"successRate": ${SUCCESS_RATE},
|
|
"averageDuration": ${AVERAGE_DURATION}
|
|
}
|
|
EOF
|
|
echo -e "${GREEN}Test data stored in ${DATA_FILE}${NC}"
|
|
|
|
# Create markdown report
|
|
cat > "$REPORT_FILE" << EOF
|
|
# Test Execution Report - ${CURRENT_DATE}
|
|
|
|
## Summary
|
|
|
|
- **Run ID**: ${RUN_ID}
|
|
- **Date**: ${CURRENT_DATE}
|
|
- **Total Tests**: ${TOTAL_TESTS}
|
|
- **Passed**: ${PASSED_TESTS}
|
|
- **Failed**: ${FAILED_TESTS}
|
|
- **Skipped**: ${SKIPPED_TESTS}
|
|
- **Success Rate**: ${SUCCESS_RATE}%
|
|
- **Average Duration**: ${AVERAGE_DURATION} seconds
|
|
|
|
## Slowest Tests
|
|
|
|
\`\`\`
|
|
${SLOWEST_TESTS}
|
|
\`\`\`
|
|
|
|
## Common Failure Patterns
|
|
|
|
\`\`\`
|
|
${FAILURE_PATTERNS}
|
|
\`\`\`
|
|
|
|
## Recommendations
|
|
|
|
EOF
|
|
|
|
# Add recommendations based on results
|
|
if [ $SUCCESS_RATE -lt $SUCCESS_THRESHOLD ]; then
|
|
cat >> "$REPORT_FILE" << EOF
|
|
- **Critical Issue**: Success rate below threshold (${SUCCESS_THRESHOLD}%)
|
|
- Run the auto-recovery script: \`./bin/auto-recovery.sh\`
|
|
- Check recent code changes that might have affected tests
|
|
- Verify selector stability with \`./bin/verify-selectors.sh\`
|
|
EOF
|
|
else
|
|
cat >> "$REPORT_FILE" << EOF
|
|
- Success rate above threshold (${SUCCESS_THRESHOLD}%)
|
|
- Continue monitoring test performance
|
|
- Consider optimizing slow tests
|
|
EOF
|
|
fi
|
|
|
|
echo -e "${GREEN}Test report generated: ${REPORT_FILE}${NC}"
|
|
fi
|
|
|
|
# Send notifications if enabled and below threshold
|
|
if [ "$SEND_NOTIFICATIONS" = true ] && [ $SUCCESS_RATE -lt $SUCCESS_THRESHOLD ]; then
|
|
generate_alert "${SUCCESS_RATE}" "${FAILED_TESTS}"
|
|
fi
|
|
|
|
# Return success/failure based on threshold
|
|
if [ $SUCCESS_RATE -ge $SUCCESS_THRESHOLD ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Function to generate historical trends
|
|
generate_trends() {
|
|
echo -e "\n${YELLOW}Generating historical trends...${NC}"
|
|
|
|
# Check if we have historical data
|
|
if [ $(find monitoring/data -name "test-data-*.json" | wc -l) -lt 2 ]; then
|
|
echo -e "${YELLOW}Not enough historical data for trend analysis.${NC}"
|
|
return 0
|
|
fi
|
|
|
|
# Extract success rates from historical data
|
|
SUCCESS_TREND=$(grep -r '"successRate":' monitoring/data | sort | tail -10 | awk -F'"successRate":' '{print $2}' | tr -d ',' | tr '\n' ',')
|
|
|
|
# Extract dates from historical data
|
|
DATE_TREND=$(grep -r '"date":' monitoring/data | sort | tail -10 | awk -F'"date":"' '{print $2}' | tr -d '",' | tr '\n' ',')
|
|
|
|
echo -e "\n${GREEN}Historical Trend:${NC}"
|
|
echo -e "Recent Success Rates: ${SUCCESS_TREND}"
|
|
echo -e "Dates: ${DATE_TREND}"
|
|
|
|
# Determine trend direction
|
|
LATEST_RATE=$(echo $SUCCESS_TREND | awk -F',' '{print $(NF-1)}')
|
|
PREVIOUS_RATE=$(echo $SUCCESS_TREND | awk -F',' '{print $(NF-2)}')
|
|
|
|
if [ -n "$LATEST_RATE" ] && [ -n "$PREVIOUS_RATE" ]; then
|
|
if [ $LATEST_RATE -gt $PREVIOUS_RATE ]; then
|
|
echo -e "${GREEN}Trend: Improving ↑${NC}"
|
|
elif [ $LATEST_RATE -lt $PREVIOUS_RATE ]; then
|
|
echo -e "${RED}Trend: Declining ↓${NC}"
|
|
else
|
|
echo -e "${YELLOW}Trend: Stable →${NC}"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Function to generate alerts
|
|
generate_alert() {
|
|
local success_rate=$1
|
|
local failed_tests=$2
|
|
|
|
echo -e "\n${YELLOW}Generating alert...${NC}"
|
|
|
|
# Create alert file
|
|
ALERT_FILE="monitoring/alerts/alert-${RUN_ID}.md"
|
|
|
|
cat > "$ALERT_FILE" << EOF
|
|
# Test Alert - ${CURRENT_DATE}
|
|
|
|
## Alert Details
|
|
|
|
- **Run ID**: ${RUN_ID}
|
|
- **Date**: ${CURRENT_DATE}
|
|
- **Success Rate**: ${success_rate}% (below threshold of ${SUCCESS_THRESHOLD}%)
|
|
- **Failed Tests**: ${failed_tests}
|
|
|
|
## Recommended Actions
|
|
|
|
1. Run the auto-recovery script: \`./bin/auto-recovery.sh\`
|
|
2. Check recent code changes that might have affected tests
|
|
3. Verify selector stability with \`./bin/verify-selectors.sh\`
|
|
4. Review the full report: \`${REPORT_FILE}\`
|
|
|
|
## Next Steps
|
|
|
|
- Fix failing tests before proceeding with deployment
|
|
- Update selectors if necessary
|
|
- Run tests again to verify fixes
|
|
EOF
|
|
|
|
echo -e "${RED}Alert generated: ${ALERT_FILE}${NC}"
|
|
echo -e "Would send notification email/Slack message in a production environment"
|
|
}
|
|
|
|
# Function to generate recommendations
|
|
generate_recommendations() {
|
|
echo -e "\n${YELLOW}Generating recommendations...${NC}"
|
|
|
|
# Analyze test execution times
|
|
if [ -n "$AVERAGE_DURATION" ] && [ $(echo "$AVERAGE_DURATION > 10" | bc) -eq 1 ]; then
|
|
echo -e "${YELLOW}Performance Recommendation: Tests are taking longer than 10 seconds on average.${NC}"
|
|
echo -e "- Consider optimizing test execution"
|
|
echo -e "- Review the slowest tests identified above"
|
|
echo -e "- Use more efficient selectors"
|
|
fi
|
|
|
|
# Analyze failure patterns
|
|
if [ $FAILED_TESTS -gt 0 ]; then
|
|
echo -e "${YELLOW}Stability Recommendation: ${FAILED_TESTS} tests are failing.${NC}"
|
|
echo -e "- Run the auto-recovery script: ./bin/auto-recovery.sh"
|
|
echo -e "- Check selector stability with: ./bin/verify-selectors.sh"
|
|
echo -e "- Review the common failure patterns above"
|
|
fi
|
|
|
|
# Check for missing test data
|
|
if ! [ -f "tests/e2e/data/test-users.ts" ] || ! [ -f "tests/e2e/data/test-events.ts" ]; then
|
|
echo -e "${YELLOW}Test Data Recommendation: Missing test data files.${NC}"
|
|
echo -e "- Create or restore test data files"
|
|
echo -e "- Run test data generation scripts"
|
|
fi
|
|
}
|
|
|
|
# Main execution
|
|
echo -e "\n${GREEN}=== Test Monitoring Dashboard - ${CURRENT_DATE} ===${NC}"
|
|
|
|
# Analyze test results
|
|
analyze_test_results
|
|
ANALYSIS_STATUS=$?
|
|
|
|
# Generate historical trends
|
|
generate_trends
|
|
|
|
# Generate recommendations
|
|
generate_recommendations
|
|
|
|
# Summary
|
|
echo -e "\n${GREEN}=== Monitoring Summary ===${NC}"
|
|
if [ $ANALYSIS_STATUS -eq 0 ]; then
|
|
echo -e "${GREEN}✓ Tests are performing above threshold (${SUCCESS_THRESHOLD}%)${NC}"
|
|
echo -e "Continue monitoring for performance trends"
|
|
else
|
|
echo -e "${RED}✗ Tests are performing below threshold (${SUCCESS_THRESHOLD}%)${NC}"
|
|
echo -e "Review recommendations and take action before deployment"
|
|
fi
|
|
|
|
# Generate ASCII chart for visualization
|
|
if [ "$STORE_RESULTS" = true ]; then
|
|
echo -e "\nRun test-report-generator.js to create visual charts and reports"
|
|
fi
|
|
|
|
exit $ANALYSIS_STATUS |