Initial commit: Telegram Management System
Some checks failed
Deploy / deploy (push) Has been cancelled

Full-stack web application for Telegram management
- Frontend: Vue 3 + Vben Admin
- Backend: NestJS
- Features: User management, group broadcast, statistics

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
你的用户名
2025-11-04 15:37:50 +08:00
commit 237c7802e5
3674 changed files with 525172 additions and 0 deletions

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env node
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Express health endpoint
const expressHealthEndpoint = `
// Health check endpoint
app.get('/health', (req, res) => {
res.json({
status: 'healthy',
service: process.env.SERVICE_NAME || 'unknown',
timestamp: new Date().toISOString(),
uptime: process.uptime()
});
});
`;
// Hapi health endpoint
const hapiHealthEndpoint = `
// Health check route
server.route({
method: 'GET',
path: '/health',
handler: (request, h) => {
return {
status: 'healthy',
service: process.env.SERVICE_NAME || 'unknown',
timestamp: new Date().toISOString(),
uptime: process.uptime()
};
}
});
`;
// Services to update
const services = [
{ name: 'orchestrator', framework: 'express', port: 3001 },
{ name: 'claude-agent', framework: 'express', port: 3002 },
{ name: 'gramjs-adapter', framework: 'express', port: 3003 },
{ name: 'safety-guard', framework: 'express', port: 3004 },
{ name: 'analytics', framework: 'hapi', port: 3005 },
{ name: 'compliance-guard', framework: 'express', port: 3006 },
{ name: 'ab-testing', framework: 'express', port: 3007 }
];
async function addHealthEndpoints() {
for (const service of services) {
const indexPath = path.join(__dirname, '..', 'services', service.name, 'src', 'index.js');
try {
let content = fs.readFileSync(indexPath, 'utf8');
// Check if health endpoint already exists
if (content.includes('/health')) {
console.log(`${service.name} already has health endpoint`);
continue;
}
// Add health endpoint based on framework
if (service.framework === 'express') {
// Find where to insert (after express app creation)
const appCreationMatch = content.match(/const app = express\(\);/);
if (appCreationMatch) {
const insertPos = content.indexOf(appCreationMatch[0]) + appCreationMatch[0].length;
content = content.slice(0, insertPos) + expressHealthEndpoint + content.slice(insertPos);
}
} else if (service.framework === 'hapi') {
// Find where to insert (after server creation)
const serverCreationMatch = content.match(/const server = Hapi\.server\([^}]+\}\);/s);
if (serverCreationMatch) {
const insertPos = content.indexOf(serverCreationMatch[0]) + serverCreationMatch[0].length;
content = content.slice(0, insertPos) + hapiHealthEndpoint + content.slice(insertPos);
}
}
// Update SERVICE_NAME in Dockerfile
const dockerfilePath = path.join(__dirname, '..', 'services', service.name, 'Dockerfile');
if (fs.existsSync(dockerfilePath)) {
let dockerContent = fs.readFileSync(dockerfilePath, 'utf8');
if (!dockerContent.includes('SERVICE_NAME')) {
dockerContent = dockerContent.replace(
'CMD ["node", "src/app.js"]',
`ENV SERVICE_NAME=${service.name}\nCMD ["node", "src/app.js"]`
);
fs.writeFileSync(dockerfilePath, dockerContent);
}
}
fs.writeFileSync(indexPath, content);
console.log(`✓ Added health endpoint to ${service.name}`);
} catch (error) {
console.error(`✗ Error updating ${service.name}:`, error.message);
}
}
}
addHealthEndpoints().then(() => {
console.log('\nHealth endpoints added to all services!');
console.log('Please rebuild and restart the services.');
});

360
marketing-agent/scripts/backup.sh Executable file
View File

@@ -0,0 +1,360 @@
#!/bin/bash
# Telegram Marketing Agent System - Backup Script
# Automated backup for databases and configurations
set -e
# Configuration
BACKUP_DIR="/backup/marketing-agent"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_PATH="$BACKUP_DIR/$TIMESTAMP"
RETENTION_DAYS=30
# Colors
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Functions
print_success() {
echo -e "${GREEN}$1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
print_info() {
echo -e " $1"
}
# Create backup directory
create_backup_dir() {
mkdir -p "$BACKUP_PATH"/{mongodb,postgres,redis,config,logs}
print_success "Created backup directory: $BACKUP_PATH"
}
# Backup MongoDB
backup_mongodb() {
print_info "Backing up MongoDB..."
docker exec marketing_mongodb mongodump \
--uri="mongodb://localhost:27017/marketing_agent" \
--out=/tmp/mongodb_backup \
--gzip
docker cp marketing_mongodb:/tmp/mongodb_backup "$BACKUP_PATH/mongodb/"
docker exec marketing_mongodb rm -rf /tmp/mongodb_backup
print_success "MongoDB backup completed"
}
# Backup PostgreSQL
backup_postgres() {
print_info "Backing up PostgreSQL..."
docker exec marketing_postgres pg_dump \
-U marketing_user \
-d marketing_agent \
--clean \
--if-exists \
-f /tmp/postgres_backup.sql
docker cp marketing_postgres:/tmp/postgres_backup.sql "$BACKUP_PATH/postgres/"
docker exec marketing_postgres rm /tmp/postgres_backup.sql
# Compress the dump
gzip "$BACKUP_PATH/postgres/postgres_backup.sql"
print_success "PostgreSQL backup completed"
}
# Backup Redis
backup_redis() {
print_info "Backing up Redis..."
docker exec marketing_redis redis-cli BGSAVE
sleep 5 # Wait for background save to complete
docker cp marketing_redis:/data/dump.rdb "$BACKUP_PATH/redis/"
print_success "Redis backup completed"
}
# Backup configuration files
backup_configs() {
print_info "Backing up configuration files..."
# Copy important config files
cp .env "$BACKUP_PATH/config/" 2>/dev/null || true
cp docker-compose.yml "$BACKUP_PATH/config/"
cp docker-compose.prod.yml "$BACKUP_PATH/config/" 2>/dev/null || true
# Copy service configs
for service in services/*; do
if [ -d "$service" ]; then
service_name=$(basename "$service")
mkdir -p "$BACKUP_PATH/config/services/$service_name"
cp "$service/package.json" "$BACKUP_PATH/config/services/$service_name/" 2>/dev/null || true
cp -r "$service/src/config" "$BACKUP_PATH/config/services/$service_name/" 2>/dev/null || true
fi
done
print_success "Configuration backup completed"
}
# Backup logs
backup_logs() {
print_info "Backing up logs..."
# Get logs from containers
for container in $(docker-compose ps -q); do
container_name=$(docker inspect -f '{{.Name}}' "$container" | sed 's/^\/marketing_//')
docker logs "$container" > "$BACKUP_PATH/logs/${container_name}.log" 2>&1
done
# Compress logs
tar -czf "$BACKUP_PATH/logs.tar.gz" -C "$BACKUP_PATH" logs/
rm -rf "$BACKUP_PATH/logs"
print_success "Logs backup completed"
}
# Create backup metadata
create_metadata() {
cat > "$BACKUP_PATH/backup_metadata.json" <<EOF
{
"timestamp": "$TIMESTAMP",
"date": "$(date)",
"version": "$(git rev-parse HEAD 2>/dev/null || echo 'unknown')",
"services": {
"mongodb": "$(docker exec marketing_mongodb mongod --version | head -n1)",
"postgres": "$(docker exec marketing_postgres postgres --version)",
"redis": "$(docker exec marketing_redis redis-server --version | head -n1)"
},
"containers": $(docker-compose ps --format json | jq -s '.')
}
EOF
print_success "Backup metadata created"
}
# Compress backup
compress_backup() {
print_info "Compressing backup..."
cd "$BACKUP_DIR"
tar -czf "marketing-agent-backup-$TIMESTAMP.tar.gz" "$TIMESTAMP/"
# Calculate size
SIZE=$(du -h "marketing-agent-backup-$TIMESTAMP.tar.gz" | cut -f1)
print_success "Backup compressed: marketing-agent-backup-$TIMESTAMP.tar.gz ($SIZE)"
}
# Upload to S3 (optional)
upload_to_s3() {
if [ ! -z "$AWS_S3_BUCKET" ]; then
print_info "Uploading to S3..."
aws s3 cp "$BACKUP_DIR/marketing-agent-backup-$TIMESTAMP.tar.gz" \
"s3://$AWS_S3_BUCKET/backups/" \
--storage-class STANDARD_IA
print_success "Backup uploaded to S3"
fi
}
# Clean old backups
clean_old_backups() {
print_info "Cleaning old backups..."
# Remove local backups older than retention period
find "$BACKUP_DIR" -name "marketing-agent-backup-*.tar.gz" -mtime +$RETENTION_DAYS -delete
find "$BACKUP_DIR" -maxdepth 1 -type d -name "20*" -mtime +$RETENTION_DAYS -exec rm -rf {} \;
# Clean S3 backups if configured
if [ ! -z "$AWS_S3_BUCKET" ]; then
aws s3 ls "s3://$AWS_S3_BUCKET/backups/" | \
while read -r line; do
createDate=$(echo $line | awk '{print $1" "$2}')
createDate=$(date -d "$createDate" +%s)
olderThan=$(date -d "$RETENTION_DAYS days ago" +%s)
if [[ $createDate -lt $olderThan ]]; then
fileName=$(echo $line | awk '{print $4}')
if [[ $fileName != "" ]]; then
aws s3 rm "s3://$AWS_S3_BUCKET/backups/$fileName"
fi
fi
done
fi
print_success "Old backups cleaned"
}
# Restore from backup
restore_backup() {
BACKUP_FILE=$1
if [ -z "$BACKUP_FILE" ]; then
echo "Usage: $0 restore <backup-file>"
exit 1
fi
if [ ! -f "$BACKUP_FILE" ]; then
print_error "Backup file not found: $BACKUP_FILE"
exit 1
fi
print_info "Restoring from backup: $BACKUP_FILE"
# Extract backup
RESTORE_DIR="/tmp/restore_$(date +%s)"
mkdir -p "$RESTORE_DIR"
tar -xzf "$BACKUP_FILE" -C "$RESTORE_DIR"
# Find the extracted directory
BACKUP_CONTENT=$(ls "$RESTORE_DIR")
# Stop services
print_info "Stopping services..."
docker-compose stop
# Restore MongoDB
if [ -d "$RESTORE_DIR/$BACKUP_CONTENT/mongodb" ]; then
print_info "Restoring MongoDB..."
docker-compose up -d mongodb
sleep 10
docker cp "$RESTORE_DIR/$BACKUP_CONTENT/mongodb" marketing_mongodb:/tmp/
docker exec marketing_mongodb mongorestore \
--uri="mongodb://localhost:27017/marketing_agent" \
--drop \
--gzip \
/tmp/mongodb/marketing_agent
print_success "MongoDB restored"
fi
# Restore PostgreSQL
if [ -f "$RESTORE_DIR/$BACKUP_CONTENT/postgres/postgres_backup.sql.gz" ]; then
print_info "Restoring PostgreSQL..."
docker-compose up -d postgres
sleep 10
gunzip -c "$RESTORE_DIR/$BACKUP_CONTENT/postgres/postgres_backup.sql.gz" > "$RESTORE_DIR/postgres_backup.sql"
docker cp "$RESTORE_DIR/postgres_backup.sql" marketing_postgres:/tmp/
docker exec marketing_postgres psql -U marketing_user -d marketing_agent -f /tmp/postgres_backup.sql
print_success "PostgreSQL restored"
fi
# Restore Redis
if [ -f "$RESTORE_DIR/$BACKUP_CONTENT/redis/dump.rdb" ]; then
print_info "Restoring Redis..."
docker-compose stop redis
docker cp "$RESTORE_DIR/$BACKUP_CONTENT/redis/dump.rdb" marketing_redis:/data/
docker-compose up -d redis
print_success "Redis restored"
fi
# Clean up
rm -rf "$RESTORE_DIR"
# Start all services
print_info "Starting all services..."
docker-compose up -d
print_success "Restore completed successfully"
}
# Verify backup
verify_backup() {
BACKUP_FILE=$1
if [ -z "$BACKUP_FILE" ]; then
echo "Usage: $0 verify <backup-file>"
exit 1
fi
if [ ! -f "$BACKUP_FILE" ]; then
print_error "Backup file not found: $BACKUP_FILE"
exit 1
fi
print_info "Verifying backup: $BACKUP_FILE"
# Test extraction
tar -tzf "$BACKUP_FILE" > /dev/null 2>&1
if [ $? -eq 0 ]; then
print_success "Backup file is valid"
# Show contents
echo ""
echo "Backup contents:"
tar -tzf "$BACKUP_FILE" | head -20
echo "..."
# Show metadata if exists
TEMP_DIR="/tmp/verify_$(date +%s)"
mkdir -p "$TEMP_DIR"
tar -xzf "$BACKUP_FILE" -C "$TEMP_DIR" --wildcards "*/backup_metadata.json" 2>/dev/null
if [ -f "$TEMP_DIR"/*/backup_metadata.json ]; then
echo ""
echo "Backup metadata:"
cat "$TEMP_DIR"/*/backup_metadata.json | jq '.'
fi
rm -rf "$TEMP_DIR"
else
print_error "Backup file is corrupted"
exit 1
fi
}
# Main execution
case "$1" in
"restore")
restore_backup "$2"
;;
"verify")
verify_backup "$2"
;;
"")
# Normal backup operation
echo "Starting backup process..."
echo "========================="
create_backup_dir
backup_mongodb
backup_postgres
backup_redis
backup_configs
backup_logs
create_metadata
compress_backup
upload_to_s3
clean_old_backups
# Remove uncompressed backup
rm -rf "$BACKUP_PATH"
echo ""
echo "========================="
print_success "Backup completed successfully!"
echo "Backup file: $BACKUP_DIR/marketing-agent-backup-$TIMESTAMP.tar.gz"
;;
*)
echo "Usage: $0 [restore|verify] [backup-file]"
echo ""
echo "Commands:"
echo " $0 Create a new backup"
echo " $0 restore <file> Restore from backup file"
echo " $0 verify <file> Verify backup file integrity"
exit 1
;;
esac

175
marketing-agent/scripts/deploy.sh Executable file
View File

@@ -0,0 +1,175 @@
#!/bin/bash
# Marketing Intelligence Agent Deployment Script
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." && pwd )"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Functions
print_success() {
echo -e "${GREEN}$1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
print_info() {
echo -e "${YELLOW} $1${NC}"
}
# Check prerequisites
check_prerequisites() {
print_info "Checking prerequisites..."
# Check Docker
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed"
exit 1
fi
print_success "Docker found"
# Check Docker Compose
if ! command -v docker-compose &> /dev/null; then
print_error "Docker Compose is not installed"
exit 1
fi
print_success "Docker Compose found"
# Check kubectl (optional)
if command -v kubectl &> /dev/null; then
print_success "kubectl found (optional)"
else
print_info "kubectl not found (optional for Kubernetes deployment)"
fi
}
# Build Docker images
build_images() {
print_info "Building Docker images..."
cd "$PROJECT_ROOT"
# Build all services
docker-compose build --parallel
print_success "Docker images built successfully"
}
# Start infrastructure services
start_infrastructure() {
print_info "Starting infrastructure services..."
cd "$PROJECT_ROOT"
# Start databases and message queues
docker-compose up -d postgres mongodb redis rabbitmq elasticsearch
# Wait for services to be ready
print_info "Waiting for databases to be ready..."
sleep 30
print_success "Infrastructure services started"
}
# Run database migrations
run_migrations() {
print_info "Running database migrations..."
# TODO: Add migration scripts here
print_success "Database migrations completed"
}
# Start application services
start_services() {
print_info "Starting application services..."
cd "$PROJECT_ROOT"
# Start all services
docker-compose up -d
print_success "All services started"
}
# Check service health
check_health() {
print_info "Checking service health..."
# Wait for services to start
sleep 10
# Check each service
services=("orchestrator" "claude-agent" "gramjs-adapter" "safety-guard" "analytics" "ab-testing")
for service in "${services[@]}"; do
if docker-compose ps | grep -q "$service.*Up"; then
print_success "$service is running"
else
print_error "$service is not running"
fi
done
}
# Deploy to Kubernetes (optional)
deploy_kubernetes() {
print_info "Deploying to Kubernetes..."
if ! command -v kubectl &> /dev/null; then
print_error "kubectl is not installed, skipping Kubernetes deployment"
return
fi
cd "$PROJECT_ROOT/infrastructure/k8s"
# Apply Kubernetes manifests
kubectl apply -f namespace.yaml
kubectl apply -f configmap.yaml
kubectl apply -f secrets.yaml
print_success "Kubernetes deployment completed"
}
# Main deployment flow
main() {
print_info "Starting Marketing Intelligence Agent deployment..."
# Check prerequisites
check_prerequisites
# Parse command line arguments
case "${1:-docker}" in
docker)
build_images
start_infrastructure
run_migrations
start_services
check_health
;;
k8s|kubernetes)
deploy_kubernetes
;;
*)
print_error "Unknown deployment target: $1"
echo "Usage: $0 [docker|k8s]"
exit 1
;;
esac
print_success "Deployment completed successfully!"
print_info "Access the API at: http://localhost:8000"
print_info "Access Grafana at: http://localhost:3000 (admin/admin)"
print_info "Access RabbitMQ at: http://localhost:15672 (admin/admin)"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,196 @@
#!/bin/bash
# Docker build script for all services
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}[BUILD]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
# Get the directory of this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Change to project root
cd "$PROJECT_ROOT"
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
# Default values
TAG="latest"
REGISTRY=""
PUSH=false
BUILD_PARALLEL=false
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-t|--tag)
TAG="$2"
shift 2
;;
-r|--registry)
REGISTRY="$2"
shift 2
;;
-p|--push)
PUSH=true
shift
;;
--parallel)
BUILD_PARALLEL=true
shift
;;
-h|--help)
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " -t, --tag TAG Tag for the images (default: latest)"
echo " -r, --registry URL Registry URL (e.g., ghcr.io/username)"
echo " -p, --push Push images to registry after build"
echo " --parallel Build images in parallel"
echo " -h, --help Show this help message"
exit 0
;;
*)
print_error "Unknown option: $1"
exit 1
;;
esac
done
# Services to build
SERVICES=(
"api-gateway"
"orchestrator"
"claude-agent"
"gramjs-adapter"
"safety-guard"
"analytics"
"compliance-guard"
"ab-testing"
"workflow"
"webhook"
"template"
"i18n"
"user-management"
"scheduler"
"logging"
)
# Build function
build_service() {
local service=$1
local image_name="${service}"
if [ -n "$REGISTRY" ]; then
image_name="${REGISTRY}/${service}"
fi
print_status "Building ${service}..."
if [ -f "services/${service}/Dockerfile" ]; then
if docker build -t "${image_name}:${TAG}" -t "${image_name}:latest" "services/${service}"; then
print_status "${service} built successfully"
if [ "$PUSH" = true ] && [ -n "$REGISTRY" ]; then
print_status "Pushing ${service} to registry..."
docker push "${image_name}:${TAG}"
docker push "${image_name}:latest"
print_status "${service} pushed successfully"
fi
else
print_error "Failed to build ${service}"
return 1
fi
else
print_warning "Dockerfile not found for ${service}, skipping..."
fi
}
# Build frontend
build_frontend() {
print_status "Building frontend..."
local image_name="frontend"
if [ -n "$REGISTRY" ]; then
image_name="${REGISTRY}/frontend"
fi
if docker build -t "${image_name}:${TAG}" -t "${image_name}:latest" "frontend"; then
print_status "✓ Frontend built successfully"
if [ "$PUSH" = true ] && [ -n "$REGISTRY" ]; then
print_status "Pushing frontend to registry..."
docker push "${image_name}:${TAG}"
docker push "${image_name}:latest"
print_status "✓ Frontend pushed successfully"
fi
else
print_error "Failed to build frontend"
return 1
fi
}
# Main build process
print_status "Starting Docker build process..."
print_status "Tag: ${TAG}"
if [ -n "$REGISTRY" ]; then
print_status "Registry: ${REGISTRY}"
fi
# Build services
if [ "$BUILD_PARALLEL" = true ]; then
print_status "Building services in parallel..."
# Build all services in parallel
for service in "${SERVICES[@]}"; do
build_service "$service" &
done
# Build frontend in parallel
build_frontend &
# Wait for all background jobs to complete
wait
else
# Build services sequentially
for service in "${SERVICES[@]}"; do
build_service "$service" || exit 1
done
# Build frontend
build_frontend || exit 1
fi
print_status "Docker build process completed!"
# Summary
echo ""
print_status "Build Summary:"
print_status "- Tag: ${TAG}"
if [ -n "$REGISTRY" ]; then
print_status "- Registry: ${REGISTRY}"
fi
print_status "- Services built: ${#SERVICES[@]} + frontend"
if [ "$PUSH" = true ]; then
print_status "- Images pushed to registry: ✓"
fi

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Health check script for Telegram Marketing Agent System
echo "🏥 Running Health Check for Telegram Marketing Agent System..."
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# API Gateway URL
API_GATEWAY="http://localhost:3030"
# Function to check endpoint
check_endpoint() {
local endpoint=$1
local description=$2
response=$(curl -s -o /dev/null -w "%{http_code}" "$API_GATEWAY$endpoint")
if [ "$response" = "200" ] || [ "$response" = "401" ]; then
echo -e "${GREEN}$description - $endpoint (HTTP $response)${NC}"
return 0
else
echo -e "${RED}$description - $endpoint (HTTP $response)${NC}"
return 1
fi
}
# Check API Gateway health
echo -e "\n${YELLOW}Checking API Gateway...${NC}"
check_endpoint "/health" "API Gateway Health"
check_endpoint "/api-docs" "API Documentation"
# Check service health endpoints
echo -e "\n${YELLOW}Checking Service Health Endpoints...${NC}"
check_endpoint "/health/services" "All Services Status"
# Check authentication endpoint
echo -e "\n${YELLOW}Checking Authentication...${NC}"
check_endpoint "/api/v1/auth/login" "Auth Login Endpoint"
# Check protected endpoints (should return 401 without auth)
echo -e "\n${YELLOW}Checking Protected Endpoints (expecting 401)...${NC}"
check_endpoint "/api/v1/orchestrator/campaigns" "Campaigns Endpoint"
check_endpoint "/api/v1/gramjs-adapter/accounts" "Telegram Accounts Endpoint"
check_endpoint "/api/v1/analytics/metrics" "Analytics Endpoint"
check_endpoint "/api/v1/claude/chat" "Claude AI Endpoint"
check_endpoint "/api/v1/safety/check" "Safety Guard Endpoint"
check_endpoint "/api/v1/compliance/audit" "Compliance Endpoint"
check_endpoint "/api/v1/ab-testing/experiments" "A/B Testing Endpoint"
# Check metrics endpoint
echo -e "\n${YELLOW}Checking Monitoring Endpoints...${NC}"
check_endpoint "/metrics" "Prometheus Metrics"
# Check frontend
echo -e "\n${YELLOW}Checking Frontend...${NC}"
frontend_response=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:3008")
if [ "$frontend_response" = "200" ]; then
echo -e "${GREEN}✓ Frontend is accessible at http://localhost:3008${NC}"
else
echo -e "${RED}✗ Frontend is not accessible (HTTP $frontend_response)${NC}"
fi
# Summary
echo -e "\n${YELLOW}Health Check Summary:${NC}"
echo -e "- API Gateway: ${GREEN}http://localhost:3030${NC}"
echo -e "- Frontend: ${GREEN}http://localhost:3008${NC}"
echo -e "- API Docs: ${GREEN}http://localhost:3030/api-docs${NC}"
echo -e "\n${YELLOW}Next Steps:${NC}"
echo "1. Visit http://localhost:3008 to access the application"
echo "2. Use the API documentation at http://localhost:3030/api-docs"
echo "3. Check service logs if any health checks failed: docker-compose logs [service-name]"

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env node
import mongoose from 'mongoose';
import bcrypt from 'bcryptjs';
import dotenv from 'dotenv';
// 加载环境变量
dotenv.config();
// 用户模型
const UserSchema = new mongoose.Schema({
username: {
type: String,
required: true,
unique: true,
trim: true
},
email: {
type: String,
required: true,
unique: true,
lowercase: true,
trim: true
},
password: {
type: String,
required: true
},
role: {
type: String,
enum: ['admin', 'user', 'viewer'],
default: 'user'
},
isActive: {
type: Boolean,
default: true
},
createdAt: {
type: Date,
default: Date.now
}
});
const User = mongoose.model('User', UserSchema);
async function createAdminUser() {
try {
// 连接数据库
const mongoUri = process.env.MONGODB_URI || 'mongodb://localhost:27018/marketing_agent';
await mongoose.connect(mongoUri);
console.log('Connected to MongoDB');
// 检查是否已有管理员
const existingAdmin = await User.findOne({ username: 'admin' });
if (existingAdmin) {
console.log('Admin user already exists');
process.exit(0);
}
// 创建管理员账号
const adminPassword = await bcrypt.hash('admin123456', 10);
const admin = new User({
username: 'admin',
email: 'admin@marketing-agent.com',
password: adminPassword,
role: 'admin',
isActive: true
});
await admin.save();
console.log('Admin user created successfully:');
console.log('Username: admin');
console.log('Password: admin123456');
console.log('Please change the password after first login!');
// 创建测试用户
const testPassword = await bcrypt.hash('test123456', 10);
const testUser = new User({
username: 'test',
email: 'test@marketing-agent.com',
password: testPassword,
role: 'user',
isActive: true
});
await testUser.save();
console.log('\nTest user created successfully:');
console.log('Username: test');
console.log('Password: test123456');
} catch (error) {
console.error('Error creating admin user:', error);
} finally {
await mongoose.disconnect();
process.exit(0);
}
}
createAdminUser();

View File

@@ -0,0 +1,144 @@
#!/bin/bash
# Startup script for Telegram Marketing Agent System
# This script starts all services in the correct order
echo "🚀 Starting Telegram Marketing Agent System..."
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Function to check if service is running
check_service() {
local service_name=$1
local port=$2
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null ; then
echo -e "${GREEN}$service_name is running on port $port${NC}"
return 0
else
echo -e "${RED}$service_name is not running on port $port${NC}"
return 1
fi
}
# Function to wait for service
wait_for_service() {
local service_name=$1
local port=$2
local max_attempts=30
local attempt=0
echo -e "${YELLOW}Waiting for $service_name to start...${NC}"
while [ $attempt -lt $max_attempts ]; do
if check_service "$service_name" "$port"; then
return 0
fi
sleep 2
attempt=$((attempt + 1))
done
echo -e "${RED}Failed to start $service_name${NC}"
return 1
}
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo -e "${RED}Docker is not running. Please start Docker first.${NC}"
exit 1
fi
# Start infrastructure services first
echo -e "\n${YELLOW}Starting infrastructure services...${NC}"
docker-compose up -d mongodb redis postgres rabbitmq elasticsearch
# Wait for infrastructure services
wait_for_service "MongoDB" 27017
wait_for_service "Redis" 6379
wait_for_service "PostgreSQL" 5432
wait_for_service "RabbitMQ" 5672
wait_for_service "Elasticsearch" 9200
# Start core services
echo -e "\n${YELLOW}Starting core services...${NC}"
docker-compose up -d orchestrator gramjs-adapter safety-guard compliance-guard
# Wait for core services
wait_for_service "Orchestrator" 3001
wait_for_service "GramJS Adapter" 3003
wait_for_service "Safety Guard" 3004
wait_for_service "Compliance Guard" 3006
# Start analytics and AI services
echo -e "\n${YELLOW}Starting analytics and AI services...${NC}"
docker-compose up -d analytics ab-testing claude-agent
# Wait for analytics and AI services
wait_for_service "Analytics" 3005
wait_for_service "A/B Testing" 3007
wait_for_service "Claude Agent" 3002
# Start API Gateway
echo -e "\n${YELLOW}Starting API Gateway...${NC}"
docker-compose up -d api-gateway
# Wait for API Gateway
wait_for_service "API Gateway" 3030
# Start Frontend
echo -e "\n${YELLOW}Starting Frontend...${NC}"
docker-compose up -d frontend
# Wait for Frontend
wait_for_service "Frontend" 3008
# Check overall system status
echo -e "\n${YELLOW}Checking system status...${NC}"
all_services_running=true
# Check all services
services=(
"MongoDB:27017"
"Redis:6379"
"PostgreSQL:5432"
"RabbitMQ:5672"
"Elasticsearch:9200"
"API Gateway:3030"
"Orchestrator:3001"
"Claude Agent:3002"
"GramJS Adapter:3003"
"Safety Guard:3004"
"Analytics:3005"
"Compliance Guard:3006"
"A/B Testing:3007"
"Frontend:3008"
)
for service in "${services[@]}"; do
IFS=':' read -r name port <<< "$service"
if ! check_service "$name" "$port"; then
all_services_running=false
fi
done
if [ "$all_services_running" = true ]; then
echo -e "\n${GREEN}✨ All services are running successfully!${NC}"
echo -e "\n${GREEN}Access the application at: http://localhost:3008${NC}"
echo -e "${GREEN}API Gateway available at: http://localhost:3030${NC}"
echo -e "${GREEN}API Documentation at: http://localhost:3030/api-docs${NC}"
else
echo -e "\n${RED}Some services failed to start. Check the logs with:${NC}"
echo "docker-compose logs [service-name]"
fi
echo -e "\n${YELLOW}Useful commands:${NC}"
echo "- View all logs: docker-compose logs -f"
echo "- View specific service logs: docker-compose logs -f [service-name]"
echo "- Stop all services: docker-compose down"
echo "- Restart a service: docker-compose restart [service-name]"
echo "- View service status: docker-compose ps"

View File

@@ -0,0 +1,354 @@
#!/bin/bash
# Telegram Marketing Agent System - Startup Script
# This script helps with initial setup and deployment
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Functions
print_success() {
echo -e "${GREEN}$1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}$1${NC}"
}
print_info() {
echo -e " $1"
}
# Check prerequisites
check_prerequisites() {
echo "Checking prerequisites..."
# Check Docker
if command -v docker &> /dev/null; then
print_success "Docker is installed"
else
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
# Check Docker Compose
if command -v docker-compose &> /dev/null; then
print_success "Docker Compose is installed"
else
print_error "Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
# Check Node.js
if command -v node &> /dev/null; then
print_success "Node.js is installed"
else
print_warning "Node.js is not installed. It's required for local development."
fi
}
# Setup environment
setup_environment() {
echo ""
echo "Setting up environment..."
if [ ! -f .env ]; then
if [ -f .env.example ]; then
cp .env.example .env
print_success "Created .env file from .env.example"
print_warning "Please edit .env file and add your API keys"
# Prompt for API keys
echo ""
read -p "Would you like to configure API keys now? (y/n) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
configure_api_keys
fi
else
print_error ".env.example file not found"
exit 1
fi
else
print_success ".env file already exists"
fi
}
# Configure API keys
configure_api_keys() {
echo ""
echo "Configuring API keys..."
# Anthropic API Key
read -p "Enter your Anthropic API key: " anthropic_key
if [ ! -z "$anthropic_key" ]; then
sed -i.bak "s/ANTHROPIC_API_KEY=.*/ANTHROPIC_API_KEY=$anthropic_key/" .env
print_success "Anthropic API key configured"
fi
# OpenAI API Key
read -p "Enter your OpenAI API key: " openai_key
if [ ! -z "$openai_key" ]; then
sed -i.bak "s/OPENAI_API_KEY=.*/OPENAI_API_KEY=$openai_key/" .env
print_success "OpenAI API key configured"
fi
# Telegram API credentials
read -p "Enter your Telegram API ID: " telegram_api_id
if [ ! -z "$telegram_api_id" ]; then
sed -i.bak "s/TELEGRAM_API_ID=.*/TELEGRAM_API_ID=$telegram_api_id/" .env
print_success "Telegram API ID configured"
fi
read -p "Enter your Telegram API Hash: " telegram_api_hash
if [ ! -z "$telegram_api_hash" ]; then
sed -i.bak "s/TELEGRAM_API_HASH=.*/TELEGRAM_API_HASH=$telegram_api_hash/" .env
print_success "Telegram API Hash configured"
fi
# Generate secure keys
echo ""
print_info "Generating secure keys..."
jwt_secret=$(openssl rand -base64 32)
sed -i.bak "s/JWT_SECRET=.*/JWT_SECRET=$jwt_secret/" .env
print_success "JWT secret generated"
encryption_key=$(openssl rand -hex 32)
sed -i.bak "s/ENCRYPTION_KEY=.*/ENCRYPTION_KEY=$encryption_key/" .env
print_success "Encryption key generated"
# Clean up backup files
rm -f .env.bak
}
# Build Docker images
build_images() {
echo ""
echo "Building Docker images..."
docker-compose build
if [ $? -eq 0 ]; then
print_success "Docker images built successfully"
else
print_error "Failed to build Docker images"
exit 1
fi
}
# Start infrastructure services
start_infrastructure() {
echo ""
echo "Starting infrastructure services..."
docker-compose up -d postgres mongodb redis rabbitmq elasticsearch
# Wait for services to be ready
print_info "Waiting for services to be ready..."
sleep 30
# Check service health
docker-compose ps
print_success "Infrastructure services started"
}
# Initialize databases
initialize_databases() {
echo ""
echo "Initializing databases..."
# MongoDB indexes
print_info "Creating MongoDB indexes..."
docker exec -it marketing_mongodb mongosh marketing_agent --eval '
db.tasks.createIndex({ taskId: 1 }, { unique: true });
db.campaigns.createIndex({ campaignId: 1 }, { unique: true });
db.sessions.createIndex({ sessionId: 1 }, { unique: true });
db.sessions.createIndex({ updatedAt: 1 }, { expireAfterSeconds: 2592000 });
db.messages.createIndex({ messageId: 1 }, { unique: true });
db.messages.createIndex({ chatId: 1, timestamp: -1 });
db.events.createIndex({ eventId: 1 }, { unique: true });
db.events.createIndex({ timestamp: -1 });
db.experiments.createIndex({ experimentId: 1 }, { unique: true });
db.variants.createIndex({ experimentId: 1 });
db.assignments.createIndex({ experimentId: 1, userId: 1 });
print("Indexes created successfully");
'
print_success "Database initialization completed"
}
# Start all services
start_all_services() {
echo ""
echo "Starting all services..."
docker-compose up -d
# Wait for services to be ready
print_info "Waiting for all services to be ready..."
sleep 45
# Check service health
docker-compose ps
# Check API Gateway health
if curl -s http://localhost:3000/health > /dev/null; then
print_success "API Gateway is healthy"
else
print_warning "API Gateway health check failed"
fi
}
# Create admin user
create_admin_user() {
echo ""
read -p "Would you like to create an admin user? (y/n) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
read -p "Enter admin username: " admin_username
read -s -p "Enter admin password: " admin_password
echo ""
read -p "Enter admin email: " admin_email
# Create admin user via API
response=$(curl -s -X POST http://localhost:3000/api/v1/auth/register \
-H "Content-Type: application/json" \
-d "{
\"username\": \"$admin_username\",
\"password\": \"$admin_password\",
\"email\": \"$admin_email\"
}")
if [[ $response == *"success"* ]]; then
print_success "Admin user created successfully"
else
print_error "Failed to create admin user"
echo "Response: $response"
fi
fi
}
# Show service URLs
show_service_urls() {
echo ""
echo "==================================="
echo "Service URLs:"
echo "==================================="
echo "API Gateway: http://localhost:3000"
echo "API Documentation: http://localhost:3000/api-docs"
echo "RabbitMQ Management: http://localhost:15672 (admin/admin)"
echo "Grafana: http://localhost:3001 (admin/admin)"
echo "Prometheus: http://localhost:9090"
echo "==================================="
}
# Main menu
show_menu() {
echo ""
echo "Telegram Marketing Agent System - Startup Script"
echo "================================================"
echo "1) Full setup (recommended for first time)"
echo "2) Start all services"
echo "3) Stop all services"
echo "4) View logs"
echo "5) Check service health"
echo "6) Initialize databases only"
echo "7) Create admin user"
echo "8) Configure API keys"
echo "9) Exit"
echo ""
read -p "Select an option: " choice
case $choice in
1)
check_prerequisites
setup_environment
build_images
start_infrastructure
initialize_databases
start_all_services
create_admin_user
show_service_urls
;;
2)
docker-compose up -d
show_service_urls
;;
3)
docker-compose down
print_success "All services stopped"
;;
4)
docker-compose logs -f
;;
5)
docker-compose ps
echo ""
curl -s http://localhost:3000/health/services | jq '.' || echo "API Gateway not accessible"
;;
6)
initialize_databases
;;
7)
create_admin_user
;;
8)
configure_api_keys
;;
9)
exit 0
;;
*)
print_error "Invalid option"
show_menu
;;
esac
}
# Run the script
if [ "$1" == "--help" ] || [ "$1" == "-h" ]; then
echo "Usage: ./startup.sh [option]"
echo ""
echo "Options:"
echo " --full Run full setup"
echo " --start Start all services"
echo " --stop Stop all services"
echo " --logs View logs"
echo " --health Check service health"
echo ""
exit 0
elif [ "$1" == "--full" ]; then
check_prerequisites
setup_environment
build_images
start_infrastructure
initialize_databases
start_all_services
create_admin_user
show_service_urls
elif [ "$1" == "--start" ]; then
docker-compose up -d
show_service_urls
elif [ "$1" == "--stop" ]; then
docker-compose down
print_success "All services stopped"
elif [ "$1" == "--logs" ]; then
docker-compose logs -f
elif [ "$1" == "--health" ]; then
docker-compose ps
echo ""
curl -s http://localhost:3000/health/services | jq '.' || echo "API Gateway not accessible"
else
show_menu
fi

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env node
/**
* Script to update all MongoDB models to include tenantId field
* This script adds tenant isolation to all existing models
*/
import { promises as fs } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Models that should NOT have tenantId (system-wide models)
const EXCLUDED_MODELS = [
'Tenant.js',
'Language.js', // System languages are shared
];
// Models that need special handling
const SPECIAL_MODELS = {
'User.js': 'Already updated with tenantId',
'Role.js': 'System roles are shared across tenants',
};
async function updateModel(filePath) {
const fileName = path.basename(filePath);
// Skip excluded models
if (EXCLUDED_MODELS.includes(fileName)) {
console.log(`⏩ Skipping ${fileName} (excluded)`);
return;
}
// Skip special models
if (SPECIAL_MODELS[fileName]) {
console.log(`⏩ Skipping ${fileName} (${SPECIAL_MODELS[fileName]})`);
return;
}
try {
let content = await fs.readFile(filePath, 'utf8');
// Check if model already has tenantId
if (content.includes('tenantId:')) {
console.log(`${fileName} already has tenantId`);
return;
}
// Find the schema definition
const schemaPattern = /const\s+\w+Schema\s*=\s*new\s+(?:mongoose\.)?Schema\s*\(\s*\{/;
const match = content.match(schemaPattern);
if (!match) {
console.log(`⚠️ ${fileName} - Could not find schema definition`);
return;
}
// Insert tenantId field after schema opening
const insertPosition = match.index + match[0].length;
const tenantIdField = `
// Multi-tenant support
tenantId: {
type: mongoose.Schema.Types.ObjectId,
ref: 'Tenant',
required: true,
index: true
},`;
content = content.slice(0, insertPosition) + tenantIdField + content.slice(insertPosition);
// Update indexes to include tenantId
// Find existing index definitions
const indexPattern = /(\w+Schema\.index\s*\(\s*\{[^}]+\}\s*(?:,\s*\{[^}]+\})?\s*\);?)/g;
let indexMatches = [...content.matchAll(indexPattern)];
if (indexMatches.length > 0) {
// Add compound indexes with tenantId
let additionalIndexes = '\n\n// Multi-tenant indexes';
indexMatches.forEach(match => {
const indexDef = match[1];
// Extract the index fields
const fieldsMatch = indexDef.match(/\{([^}]+)\}/);
if (fieldsMatch) {
const fields = fieldsMatch[1].trim();
// Skip if already includes tenantId
if (!fields.includes('tenantId')) {
// Create compound index with tenantId
const newIndex = indexDef.replace(/\{([^}]+)\}/, '{ tenantId: 1, $1 }');
additionalIndexes += '\n' + newIndex;
}
}
});
// Find where to insert the new indexes (after existing indexes)
const lastIndexMatch = indexMatches[indexMatches.length - 1];
const insertPos = lastIndexMatch.index + lastIndexMatch[0].length;
content = content.slice(0, insertPos) + additionalIndexes + content.slice(insertPos);
} else {
// No existing indexes, add basic tenantId index after schema definition
const schemaEndPattern = /}\s*(?:,\s*\{[^}]+\})?\s*\);/;
const schemaEndMatch = content.match(schemaEndPattern);
if (schemaEndMatch) {
const insertPos = schemaEndMatch.index + schemaEndMatch[0].length;
const basicIndex = '\n\n// Multi-tenant index\n' +
fileName.replace('.js', '') + 'Schema.index({ tenantId: 1 });';
content = content.slice(0, insertPos) + basicIndex + content.slice(insertPos);
}
}
// Update unique indexes to be unique within tenant
content = content.replace(
/index\s*\(\s*\{([^}]+)\}\s*,\s*\{\s*unique:\s*true\s*\}\s*\)/g,
(match, fields) => {
if (!fields.includes('tenantId')) {
return `index({ tenantId: 1, ${fields} }, { unique: true })`;
}
return match;
}
);
// Save the updated file
await fs.writeFile(filePath, content, 'utf8');
console.log(`✨ Updated ${fileName} with tenantId support`);
} catch (error) {
console.error(`❌ Error updating ${filePath}:`, error.message);
}
}
async function findModels(dir) {
const models = [];
async function walk(currentDir) {
const entries = await fs.readdir(currentDir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(currentDir, entry.name);
if (entry.isDirectory()) {
// Skip node_modules and other non-service directories
if (!entry.name.includes('node_modules') &&
!entry.name.startsWith('.') &&
entry.name !== 'scripts' &&
entry.name !== 'frontend') {
await walk(fullPath);
}
} else if (entry.isFile() && entry.name.endsWith('.js')) {
// Check if it's in a models directory
if (currentDir.includes('/models')) {
models.push(fullPath);
}
}
}
}
await walk(dir);
return models;
}
async function main() {
try {
console.log('🔍 Finding all model files...');
const projectRoot = path.join(__dirname, '..');
const models = await findModels(path.join(projectRoot, 'services'));
console.log(`\n📋 Found ${models.length} model files\n`);
for (const model of models) {
await updateModel(model);
}
console.log('\n✅ Model update complete!');
console.log('\n⚠ Important next steps:');
console.log('1. Review the changes to ensure they are correct');
console.log('2. Update all queries to include tenantId filtering');
console.log('3. Update all create operations to include tenantId');
console.log('4. Test thoroughly to ensure tenant isolation works');
} catch (error) {
console.error('❌ Script failed:', error);
process.exit(1);
}
}
// Run the script
main();

View File

@@ -0,0 +1,141 @@
#!/bin/bash
# Script to wait for all services to be ready
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}[WAIT]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
# Default timeout
TIMEOUT=${TIMEOUT:-300} # 5 minutes default
START_TIME=$(date +%s)
# Function to check if timeout has been exceeded
check_timeout() {
local current_time=$(date +%s)
local elapsed=$((current_time - START_TIME))
if [ $elapsed -gt $TIMEOUT ]; then
print_error "Timeout exceeded ($TIMEOUT seconds)"
exit 1
fi
}
# Function to wait for a service to be healthy
wait_for_service() {
local service_name=$1
local check_command=$2
local max_attempts=60
local attempt=0
print_status "Waiting for $service_name to be ready..."
while [ $attempt -lt $max_attempts ]; do
check_timeout
if eval "$check_command" &> /dev/null; then
print_status "$service_name is ready"
return 0
fi
attempt=$((attempt + 1))
sleep 5
done
print_error "$service_name failed to become ready"
return 1
}
# Function to wait for HTTP endpoint
wait_for_http() {
local service_name=$1
local url=$2
local max_attempts=60
local attempt=0
print_status "Waiting for $service_name HTTP endpoint to be ready..."
while [ $attempt -lt $max_attempts ]; do
check_timeout
if curl -s -o /dev/null -w "%{http_code}" "$url" | grep -q "200"; then
print_status "$service_name HTTP endpoint is ready"
return 0
fi
attempt=$((attempt + 1))
sleep 5
done
print_error "$service_name HTTP endpoint failed to become ready"
return 1
}
# Main execution
print_info "Waiting for services to be ready (timeout: ${TIMEOUT}s)..."
# Wait for databases
wait_for_service "MongoDB" "docker exec marketing_mongodb mongosh --eval 'db.adminCommand({ping: 1})' --quiet"
wait_for_service "Redis" "docker exec marketing_redis redis-cli -a \${REDIS_PASSWORD} ping"
wait_for_service "RabbitMQ" "docker exec marketing_rabbitmq rabbitmq-diagnostics -q ping"
# Wait for Elasticsearch (optional)
if docker ps | grep -q marketing_elasticsearch; then
wait_for_service "Elasticsearch" "curl -s -u elastic:\${ELASTIC_PASSWORD} http://localhost:9201/_cluster/health"
fi
# Wait for core services
wait_for_http "API Gateway" "http://localhost:3030/health"
wait_for_http "Orchestrator" "http://localhost:3030/api/orchestrator/health"
wait_for_http "Claude Agent" "http://localhost:3030/api/claude/health"
wait_for_http "GramJS Adapter" "http://localhost:3030/api/telegram/health"
wait_for_http "Safety Guard" "http://localhost:3030/api/safety/health"
wait_for_http "Analytics" "http://localhost:3030/api/analytics/health"
# Wait for optional services
services=("compliance" "ab-testing" "workflow" "webhook" "template" "i18n" "user-management" "scheduler" "logging")
for service in "${services[@]}"; do
if docker ps | grep -q "marketing_${service}"; then
wait_for_http "${service^}" "http://localhost:3030/api/${service}/health" || print_warning "${service^} service not responding (non-critical)"
fi
done
# Calculate total time
END_TIME=$(date +%s)
TOTAL_TIME=$((END_TIME - START_TIME))
print_status "All services are ready! (took ${TOTAL_TIME}s)"
# Show service status
print_info "Service Status:"
docker compose ps
# Show URLs
echo ""
print_info "Service URLs:"
print_info "- API Gateway: http://localhost:3030"
print_info "- Frontend: http://localhost:3008"
print_info "- Grafana: http://localhost:3032 (admin/admin)"
print_info "- RabbitMQ: http://localhost:15673 (admin/admin)"