Some checks failed
Deploy / deploy (push) Has been cancelled
Full-stack web application for Telegram management - Frontend: Vue 3 + Vben Admin - Backend: NestJS - Features: User management, group broadcast, statistics 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
507 lines
14 KiB
JavaScript
507 lines
14 KiB
JavaScript
import cron from 'node-cron';
|
|
import { backupService } from './backup.js';
|
|
import { logger } from '../utils/logger.js';
|
|
import { cache } from '../utils/cache.js';
|
|
|
|
/**
|
|
* Scheduler service for automated tasks
|
|
*/
|
|
class SchedulerService {
|
|
constructor() {
|
|
this.jobs = new Map();
|
|
}
|
|
|
|
/**
|
|
* Initialize scheduler
|
|
*/
|
|
async initialize() {
|
|
logger.info('Initializing scheduler service');
|
|
|
|
// Load saved schedules
|
|
await this.loadSchedules();
|
|
|
|
// Start default schedules
|
|
this.startDefaultSchedules();
|
|
}
|
|
|
|
/**
|
|
* Load saved schedules from cache
|
|
*/
|
|
async loadSchedules() {
|
|
try {
|
|
// Load backup schedule
|
|
const backupConfig = await cache.get('backup:schedule:config');
|
|
if (backupConfig) {
|
|
const config = JSON.parse(backupConfig);
|
|
if (config.enabled && config.schedule) {
|
|
this.scheduleBackup(config);
|
|
}
|
|
}
|
|
} catch (error) {
|
|
logger.error('Failed to load schedules', error);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Start default schedules
|
|
*/
|
|
startDefaultSchedules() {
|
|
// Daily health check report at 8 AM
|
|
this.scheduleJob('daily-health-report', '0 8 * * *', async () => {
|
|
await this.generateHealthReport();
|
|
});
|
|
|
|
// Hourly metrics aggregation
|
|
this.scheduleJob('metrics-aggregation', '0 * * * *', async () => {
|
|
await this.aggregateMetrics();
|
|
});
|
|
|
|
// Clean up old logs daily at 2 AM
|
|
this.scheduleJob('log-cleanup', '0 2 * * *', async () => {
|
|
await this.cleanupOldLogs();
|
|
});
|
|
|
|
// Session cleanup every 6 hours
|
|
this.scheduleJob('session-cleanup', '0 */6 * * *', async () => {
|
|
await this.cleanupExpiredSessions();
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Schedule a backup job
|
|
*/
|
|
scheduleBackup(config) {
|
|
const { schedule, encrypt, uploadToCloud, retentionDays } = config;
|
|
|
|
this.scheduleJob('scheduled-backup', schedule, async () => {
|
|
logger.info('Starting scheduled backup');
|
|
|
|
try {
|
|
// Check if backup is already running
|
|
const isRunning = await cache.get('backup:running');
|
|
if (isRunning) {
|
|
logger.warn('Scheduled backup skipped - backup already running');
|
|
return;
|
|
}
|
|
|
|
// Create backup
|
|
const result = await backupService.createFullBackup({
|
|
description: 'Scheduled backup',
|
|
encrypt,
|
|
uploadToCloud,
|
|
initiatedBy: 'scheduler'
|
|
});
|
|
|
|
logger.info('Scheduled backup completed', result);
|
|
|
|
// Clean up old backups if retention is set
|
|
if (retentionDays) {
|
|
const cleanupResult = await backupService.cleanupOldBackups(retentionDays);
|
|
logger.info('Backup cleanup completed', cleanupResult);
|
|
}
|
|
} catch (error) {
|
|
logger.error('Scheduled backup failed', error);
|
|
|
|
// Send alert
|
|
await this.sendAlert({
|
|
type: 'backup_failure',
|
|
severity: 'critical',
|
|
message: 'Scheduled backup failed',
|
|
error: error.message
|
|
});
|
|
}
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Schedule a job
|
|
*/
|
|
scheduleJob(name, schedule, task) {
|
|
// Stop existing job if any
|
|
if (this.jobs.has(name)) {
|
|
this.jobs.get(name).stop();
|
|
}
|
|
|
|
// Validate cron expression
|
|
if (!cron.validate(schedule)) {
|
|
throw new Error(`Invalid cron expression: ${schedule}`);
|
|
}
|
|
|
|
// Create and start job
|
|
const job = cron.schedule(schedule, async () => {
|
|
const startTime = Date.now();
|
|
|
|
try {
|
|
logger.info(`Running scheduled job: ${name}`);
|
|
await task();
|
|
|
|
const duration = Date.now() - startTime;
|
|
logger.info(`Scheduled job completed: ${name}`, { duration });
|
|
|
|
// Record job execution
|
|
await this.recordJobExecution(name, 'success', duration);
|
|
} catch (error) {
|
|
const duration = Date.now() - startTime;
|
|
logger.error(`Scheduled job failed: ${name}`, error);
|
|
|
|
// Record job failure
|
|
await this.recordJobExecution(name, 'failure', duration, error.message);
|
|
}
|
|
});
|
|
|
|
this.jobs.set(name, job);
|
|
logger.info(`Scheduled job registered: ${name}`, { schedule });
|
|
}
|
|
|
|
/**
|
|
* Stop a scheduled job
|
|
*/
|
|
stopJob(name) {
|
|
const job = this.jobs.get(name);
|
|
if (job) {
|
|
job.stop();
|
|
this.jobs.delete(name);
|
|
logger.info(`Scheduled job stopped: ${name}`);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Record job execution
|
|
*/
|
|
async recordJobExecution(jobName, status, duration, error = null) {
|
|
const execution = {
|
|
jobName,
|
|
status,
|
|
duration,
|
|
error,
|
|
timestamp: new Date().toISOString()
|
|
};
|
|
|
|
await cache.lpush('scheduler:executions', JSON.stringify(execution));
|
|
await cache.ltrim('scheduler:executions', 0, 999); // Keep last 1000 executions
|
|
}
|
|
|
|
/**
|
|
* Generate health report
|
|
*/
|
|
async generateHealthReport() {
|
|
logger.info('Generating daily health report');
|
|
|
|
try {
|
|
// Collect system metrics
|
|
const metrics = await this.collectSystemMetrics();
|
|
|
|
// Get service statuses
|
|
const services = await this.checkAllServices();
|
|
|
|
// Get recent errors
|
|
const errors = await this.getRecentErrors();
|
|
|
|
// Get backup status
|
|
const backupStatus = await this.getBackupStatus();
|
|
|
|
const report = {
|
|
date: new Date().toISOString(),
|
|
system: metrics,
|
|
services,
|
|
errors,
|
|
backups: backupStatus,
|
|
alerts: await this.getActiveAlerts()
|
|
};
|
|
|
|
// Store report
|
|
await cache.set(
|
|
`health-report:${new Date().toISOString().split('T')[0]}`,
|
|
JSON.stringify(report),
|
|
'EX',
|
|
30 * 24 * 60 * 60 // Keep for 30 days
|
|
);
|
|
|
|
// Send report if configured
|
|
if (process.env.HEALTH_REPORT_EMAIL) {
|
|
await this.sendHealthReport(report);
|
|
}
|
|
|
|
logger.info('Health report generated');
|
|
} catch (error) {
|
|
logger.error('Failed to generate health report', error);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Aggregate metrics
|
|
*/
|
|
async aggregateMetrics() {
|
|
logger.info('Aggregating hourly metrics');
|
|
|
|
try {
|
|
// Get current hour
|
|
const now = new Date();
|
|
const hour = now.toISOString().substring(0, 13);
|
|
|
|
// Aggregate different metric types
|
|
const aggregations = await Promise.all([
|
|
this.aggregateHttpMetrics(hour),
|
|
this.aggregateBusinessMetrics(hour),
|
|
this.aggregateQueueMetrics(hour),
|
|
this.aggregateSystemMetrics(hour)
|
|
]);
|
|
|
|
const [http, business, queue, system] = aggregations;
|
|
|
|
const hourlyMetrics = {
|
|
hour,
|
|
http,
|
|
business,
|
|
queue,
|
|
system,
|
|
timestamp: now.toISOString()
|
|
};
|
|
|
|
// Store aggregated metrics
|
|
await cache.set(
|
|
`metrics:hourly:${hour}`,
|
|
JSON.stringify(hourlyMetrics),
|
|
'EX',
|
|
7 * 24 * 60 * 60 // Keep for 7 days
|
|
);
|
|
|
|
logger.info('Metrics aggregation completed', { hour });
|
|
} catch (error) {
|
|
logger.error('Failed to aggregate metrics', error);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Clean up old logs
|
|
*/
|
|
async cleanupOldLogs() {
|
|
logger.info('Cleaning up old logs');
|
|
|
|
try {
|
|
const cutoffDate = new Date();
|
|
cutoffDate.setDate(cutoffDate.getDate() - 30); // 30 days retention
|
|
|
|
// Clean up different log types
|
|
const logKeys = [
|
|
'app:logs',
|
|
'system:alerts',
|
|
'monitoring:events',
|
|
'scheduler:executions'
|
|
];
|
|
|
|
let totalCleaned = 0;
|
|
|
|
for (const key of logKeys) {
|
|
const logs = await cache.lrange(key, 0, -1);
|
|
const toKeep = [];
|
|
|
|
for (const log of logs) {
|
|
try {
|
|
const parsed = JSON.parse(log);
|
|
const logDate = new Date(parsed.timestamp || parsed.date);
|
|
|
|
if (logDate > cutoffDate) {
|
|
toKeep.push(log);
|
|
}
|
|
} catch (error) {
|
|
// Skip invalid entries
|
|
}
|
|
}
|
|
|
|
if (toKeep.length < logs.length) {
|
|
// Replace with filtered logs
|
|
await cache.del(key);
|
|
if (toKeep.length > 0) {
|
|
await cache.rpush(key, ...toKeep);
|
|
}
|
|
|
|
totalCleaned += logs.length - toKeep.length;
|
|
}
|
|
}
|
|
|
|
logger.info('Log cleanup completed', {
|
|
totalCleaned,
|
|
cutoffDate: cutoffDate.toISOString()
|
|
});
|
|
} catch (error) {
|
|
logger.error('Failed to cleanup logs', error);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Clean up expired sessions
|
|
*/
|
|
async cleanupExpiredSessions() {
|
|
logger.info('Cleaning up expired sessions');
|
|
|
|
try {
|
|
// Get all session keys
|
|
const sessionKeys = await cache.keys('sess:*');
|
|
let cleaned = 0;
|
|
|
|
for (const key of sessionKeys) {
|
|
const ttl = await cache.ttl(key);
|
|
|
|
// Remove sessions with no TTL or expired
|
|
if (ttl === -2 || ttl === -1) {
|
|
await cache.del(key);
|
|
cleaned++;
|
|
}
|
|
}
|
|
|
|
// Clean up orphaned user sessions
|
|
const userSessionKeys = await cache.keys('user:*:sessions');
|
|
|
|
for (const key of userSessionKeys) {
|
|
const sessions = await cache.smembers(key);
|
|
const validSessions = [];
|
|
|
|
for (const sessionId of sessions) {
|
|
const exists = await cache.exists(`sess:${sessionId}`);
|
|
if (exists) {
|
|
validSessions.push(sessionId);
|
|
}
|
|
}
|
|
|
|
if (validSessions.length < sessions.length) {
|
|
await cache.del(key);
|
|
if (validSessions.length > 0) {
|
|
await cache.sadd(key, ...validSessions);
|
|
}
|
|
}
|
|
}
|
|
|
|
logger.info('Session cleanup completed', { cleaned });
|
|
} catch (error) {
|
|
logger.error('Failed to cleanup sessions', error);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Send alert
|
|
*/
|
|
async sendAlert(alert) {
|
|
// Store alert
|
|
await cache.lpush('system:alerts', JSON.stringify({
|
|
...alert,
|
|
timestamp: new Date().toISOString()
|
|
}));
|
|
await cache.ltrim('system:alerts', 0, 99);
|
|
|
|
// Send notifications based on severity
|
|
// Implementation depends on notification services
|
|
logger.error('ALERT', alert);
|
|
}
|
|
|
|
/**
|
|
* Helper methods for health report
|
|
*/
|
|
async collectSystemMetrics() {
|
|
// Implement system metrics collection
|
|
return {
|
|
uptime: process.uptime(),
|
|
memory: process.memoryUsage(),
|
|
cpu: process.cpuUsage()
|
|
};
|
|
}
|
|
|
|
async checkAllServices() {
|
|
// Implement service health checks
|
|
return {};
|
|
}
|
|
|
|
async getRecentErrors() {
|
|
// Get recent errors from logs
|
|
const errors = await cache.lrange('app:logs', 0, 99);
|
|
return errors
|
|
.map(e => {
|
|
try {
|
|
return JSON.parse(e);
|
|
} catch {
|
|
return null;
|
|
}
|
|
})
|
|
.filter(e => e && e.level === 'error')
|
|
.slice(0, 10);
|
|
}
|
|
|
|
async getBackupStatus() {
|
|
const lastBackup = await cache.lindex('backup:history', 0);
|
|
return lastBackup ? JSON.parse(lastBackup) : null;
|
|
}
|
|
|
|
async getActiveAlerts() {
|
|
const alerts = await cache.lrange('system:alerts', 0, 19);
|
|
return alerts.map(a => JSON.parse(a));
|
|
}
|
|
|
|
async sendHealthReport(report) {
|
|
// Implement email sending
|
|
logger.info('Health report would be sent', {
|
|
to: process.env.HEALTH_REPORT_EMAIL
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Metrics aggregation helpers
|
|
*/
|
|
async aggregateHttpMetrics(hour) {
|
|
// Implement HTTP metrics aggregation
|
|
return {
|
|
requests: 0,
|
|
errors: 0,
|
|
avgResponseTime: 0
|
|
};
|
|
}
|
|
|
|
async aggregateBusinessMetrics(hour) {
|
|
// Implement business metrics aggregation
|
|
return {
|
|
messagesSent: 0,
|
|
campaignsActive: 0,
|
|
successRate: 0
|
|
};
|
|
}
|
|
|
|
async aggregateQueueMetrics(hour) {
|
|
// Implement queue metrics aggregation
|
|
return {
|
|
processed: 0,
|
|
failed: 0,
|
|
avgProcessingTime: 0
|
|
};
|
|
}
|
|
|
|
async aggregateSystemMetrics(hour) {
|
|
// Implement system metrics aggregation
|
|
return {
|
|
avgCpu: 0,
|
|
avgMemory: 0,
|
|
avgLoad: 0
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Get scheduler status
|
|
*/
|
|
getStatus() {
|
|
const jobs = [];
|
|
|
|
for (const [name, job] of this.jobs) {
|
|
jobs.push({
|
|
name,
|
|
running: job.running,
|
|
nextRun: job.nextDates(1)[0]
|
|
});
|
|
}
|
|
|
|
return {
|
|
jobs,
|
|
totalJobs: jobs.length
|
|
};
|
|
}
|
|
}
|
|
|
|
// Export singleton instance
|
|
export const schedulerService = new SchedulerService(); |