Initial commit: Telegram Management System
Some checks failed
Deploy / deploy (push) Has been cancelled
Some checks failed
Deploy / deploy (push) Has been cancelled
Full-stack web application for Telegram management - Frontend: Vue 3 + Vben Admin - Backend: NestJS - Features: User management, group broadcast, statistics 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
356
marketing-agent/services/logging/src/routes/dashboard.js
Normal file
356
marketing-agent/services/logging/src/routes/dashboard.js
Normal file
@@ -0,0 +1,356 @@
|
||||
import express from 'express';
|
||||
// Get service instances from app locals
|
||||
const getLogStorage = (req) => req.app.locals.logStorage;
|
||||
const getLogCollector = (req) => req.app.locals.logCollector;
|
||||
const getLogAnalyzer = (req) => req.app.locals.logAnalyzer;
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Get dashboard overview
|
||||
*/
|
||||
router.get('/overview', async (req, res) => {
|
||||
try {
|
||||
const { timeRange = '24h' } = req.query;
|
||||
|
||||
// Get log statistics
|
||||
const logStats = await getLogStorage(req).getStats(null, timeRange);
|
||||
|
||||
// Get collector stats
|
||||
const collectorStats = await getLogCollector(req).getQueueStats();
|
||||
|
||||
// Get recent analysis
|
||||
const recentAnalysis = await getLogStorage(req).search({
|
||||
index: getLogStorage(req).indices.metrics,
|
||||
query: {
|
||||
bool: {
|
||||
must: [
|
||||
{ term: { metric: 'error_analysis' } },
|
||||
{ range: { '@timestamp': { gte: `now-${timeRange}` } } }
|
||||
]
|
||||
}
|
||||
},
|
||||
sort: [{ '@timestamp': { order: 'desc' } }],
|
||||
size: 1
|
||||
});
|
||||
|
||||
const overview = {
|
||||
timestamp: new Date().toISOString(),
|
||||
period: timeRange,
|
||||
summary: {
|
||||
totalLogs: logStats.total_count.value,
|
||||
errorRate: calculateErrorRate(logStats),
|
||||
topServices: logStats.by_service.buckets.slice(0, 5).map(b => ({
|
||||
name: b.key,
|
||||
count: b.doc_count
|
||||
})),
|
||||
logLevels: Object.fromEntries(
|
||||
logStats.by_level.buckets.map(b => [b.key, b.doc_count])
|
||||
)
|
||||
},
|
||||
queues: collectorStats,
|
||||
timeline: logStats.over_time.buckets.map(bucket => ({
|
||||
timestamp: bucket.key_as_string,
|
||||
total: bucket.doc_count,
|
||||
levels: Object.fromEntries(
|
||||
bucket.by_level.buckets.map(b => [b.key, b.doc_count])
|
||||
)
|
||||
})),
|
||||
recentIssues: recentAnalysis.hits.length > 0 ?
|
||||
recentAnalysis.hits[0].analysis : null
|
||||
};
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: overview
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get dashboard overview:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'Failed to get dashboard overview'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Get service health
|
||||
*/
|
||||
router.get('/health', async (req, res) => {
|
||||
try {
|
||||
const { service, timeRange = '1h' } = req.query;
|
||||
|
||||
const must = [
|
||||
{ range: { '@timestamp': { gte: `now-${timeRange}` } } }
|
||||
];
|
||||
|
||||
if (service) {
|
||||
must.push({ term: { service } });
|
||||
}
|
||||
|
||||
// Get error logs
|
||||
const errorLogs = await getLogStorage(req).search({
|
||||
index: getLogStorage(req).indices.errors,
|
||||
query: { bool: { must } },
|
||||
size: 0,
|
||||
aggregations: {
|
||||
by_service: {
|
||||
terms: { field: 'service', size: 50 },
|
||||
aggs: {
|
||||
error_types: {
|
||||
terms: { field: 'error.type', size: 10 }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Get performance metrics
|
||||
const perfMetrics = await getLogStorage(req).aggregate({
|
||||
index: getLogStorage(req).indices.metrics,
|
||||
query: {
|
||||
bool: {
|
||||
must: [
|
||||
...must,
|
||||
{ term: { metric: 'response_time' } }
|
||||
]
|
||||
}
|
||||
},
|
||||
aggregations: {
|
||||
by_service: {
|
||||
terms: { field: 'service', size: 50 },
|
||||
aggs: {
|
||||
avg_response: { avg: { field: 'value' } },
|
||||
p95_response: { percentiles: { field: 'value', percents: [95] } }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Combine health data
|
||||
const healthData = {};
|
||||
|
||||
// Add error data
|
||||
if (errorLogs.aggregations) {
|
||||
errorLogs.aggregations.by_service.buckets.forEach(bucket => {
|
||||
healthData[bucket.key] = {
|
||||
service: bucket.key,
|
||||
errors: {
|
||||
total: bucket.doc_count,
|
||||
types: Object.fromEntries(
|
||||
bucket.error_types.buckets.map(b => [b.key, b.doc_count])
|
||||
)
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Add performance data
|
||||
if (perfMetrics.by_service) {
|
||||
perfMetrics.by_service.buckets.forEach(bucket => {
|
||||
if (!healthData[bucket.key]) {
|
||||
healthData[bucket.key] = { service: bucket.key };
|
||||
}
|
||||
healthData[bucket.key].performance = {
|
||||
avgResponseTime: bucket.avg_response.value,
|
||||
p95ResponseTime: bucket.p95_response.values['95.0']
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate health scores
|
||||
Object.values(healthData).forEach(service => {
|
||||
service.healthScore = calculateHealthScore(service);
|
||||
service.status = getHealthStatus(service.healthScore);
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: Object.values(healthData)
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get service health:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'Failed to get service health'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Get performance trends
|
||||
*/
|
||||
router.get('/trends', async (req, res) => {
|
||||
try {
|
||||
const {
|
||||
metric = 'response_time',
|
||||
service,
|
||||
timeRange = '24h',
|
||||
interval = '1h'
|
||||
} = req.query;
|
||||
|
||||
const must = [
|
||||
{ term: { metric } },
|
||||
{ range: { '@timestamp': { gte: `now-${timeRange}` } } }
|
||||
];
|
||||
|
||||
if (service) {
|
||||
must.push({ term: { service } });
|
||||
}
|
||||
|
||||
const trends = await getLogStorage(req).aggregate({
|
||||
index: getLogStorage(req).indices.metrics,
|
||||
query: { bool: { must } },
|
||||
aggregations: {
|
||||
trend: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: interval
|
||||
},
|
||||
aggs: {
|
||||
avg_value: { avg: { field: 'value' } },
|
||||
min_value: { min: { field: 'value' } },
|
||||
max_value: { max: { field: 'value' } },
|
||||
percentiles: {
|
||||
percentiles: {
|
||||
field: 'value',
|
||||
percents: [50, 95, 99]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const trendData = trends.trend.buckets.map(bucket => ({
|
||||
timestamp: bucket.key_as_string,
|
||||
avg: bucket.avg_value.value,
|
||||
min: bucket.min_value.value,
|
||||
max: bucket.max_value.value,
|
||||
p50: bucket.percentiles.values['50.0'],
|
||||
p95: bucket.percentiles.values['95.0'],
|
||||
p99: bucket.percentiles.values['99.0']
|
||||
}));
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: {
|
||||
metric,
|
||||
service: service || 'all',
|
||||
timeRange,
|
||||
interval,
|
||||
trends: trendData
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get performance trends:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'Failed to get performance trends'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Get top errors
|
||||
*/
|
||||
router.get('/top-errors', async (req, res) => {
|
||||
try {
|
||||
const { timeRange = '24h', limit = 10 } = req.query;
|
||||
|
||||
const topErrors = await getLogStorage(req).aggregate({
|
||||
index: getLogStorage(req).indices.errors,
|
||||
query: {
|
||||
range: {
|
||||
'@timestamp': { gte: `now-${timeRange}` }
|
||||
}
|
||||
},
|
||||
aggregations: {
|
||||
by_error: {
|
||||
terms: {
|
||||
field: 'error.message.keyword',
|
||||
size: limit,
|
||||
order: { _count: 'desc' }
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
top_hits: {
|
||||
size: 1,
|
||||
_source: ['error', 'service', '@timestamp']
|
||||
}
|
||||
},
|
||||
services: {
|
||||
terms: { field: 'service', size: 5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const errors = topErrors.by_error.buckets.map(bucket => ({
|
||||
message: bucket.key,
|
||||
count: bucket.doc_count,
|
||||
sample: bucket.sample.hits.hits[0]._source,
|
||||
affectedServices: bucket.services.buckets.map(s => ({
|
||||
name: s.key,
|
||||
count: s.doc_count
|
||||
}))
|
||||
}));
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: errors
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get top errors:', error);
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'Failed to get top errors'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Helper functions
|
||||
function calculateErrorRate(stats) {
|
||||
const errorCount = stats.by_level.buckets
|
||||
.filter(b => ['error', 'fatal'].includes(b.key))
|
||||
.reduce((sum, b) => sum + b.doc_count, 0);
|
||||
|
||||
return stats.total_count.value > 0 ?
|
||||
(errorCount / stats.total_count.value) * 100 : 0;
|
||||
}
|
||||
|
||||
function calculateHealthScore(service) {
|
||||
let score = 100;
|
||||
|
||||
// Deduct for errors
|
||||
if (service.errors) {
|
||||
const errorPenalty = Math.min(50, service.errors.total * 2);
|
||||
score -= errorPenalty;
|
||||
}
|
||||
|
||||
// Deduct for slow response times
|
||||
if (service.performance) {
|
||||
if (service.performance.avgResponseTime > 1000) {
|
||||
score -= 20;
|
||||
} else if (service.performance.avgResponseTime > 500) {
|
||||
score -= 10;
|
||||
}
|
||||
|
||||
if (service.performance.p95ResponseTime > 2000) {
|
||||
score -= 15;
|
||||
}
|
||||
}
|
||||
|
||||
return Math.max(0, score);
|
||||
}
|
||||
|
||||
function getHealthStatus(score) {
|
||||
if (score >= 90) return 'healthy';
|
||||
if (score >= 70) return 'degraded';
|
||||
if (score >= 50) return 'unhealthy';
|
||||
return 'critical';
|
||||
}
|
||||
|
||||
export default router;
|
||||
Reference in New Issue
Block a user