1.19升级

This commit is contained in:
dengqichen 2025-12-11 13:43:32 +08:00
parent 19ea644e6e
commit 8b66e7d100
13 changed files with 851 additions and 42 deletions

View File

@ -0,0 +1,48 @@
package com.qqchen.deploy.backend.deploy.api;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery;
import com.qqchen.deploy.backend.deploy.service.IServerMonitorService;
import com.qqchen.deploy.backend.framework.api.Response;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.*;
/**
* 服务器监控数据 Controller
*/
@Slf4j
@RestController
@RequestMapping("/api/v1/server/monitor")
@Tag(name = "服务器监控数据", description = "服务器监控数据查询接口")
public class ServerMonitorApiController {
@Resource
private IServerMonitorService serverMonitorService;
@Operation(
summary = "查询服务器监控指标数据",
description = "支持快捷时间范围最近1小时/6小时/24小时/7天/30天和自定义时间范围" +
"支持按指标类型查询CPU/MEMORY/DISK/NETWORK自动根据时间范围选择合适的聚合粒度"
)
@GetMapping("/{serverId}/metrics")
public Response<ServerMonitorMetricsDTO> getServerMetrics(
@Parameter(description = "服务器ID", required = true)
@PathVariable Long serverId,
@Parameter(description = "查询参数(快捷时间范围、自定义时间、指标类型等)")
@ModelAttribute ServerMonitorMetricsQuery query
) {
// 设置服务器ID从路径参数
query.setServerId(serverId);
log.info("查询服务器监控指标: serverId={}, timeRange={}, metrics={}",
serverId, query.getTimeRange(), query.getMetrics());
ServerMonitorMetricsDTO response = serverMonitorService.getServerMetrics(query);
return Response.success(response);
}
}

View File

@ -0,0 +1,268 @@
package com.qqchen.deploy.backend.deploy.dto;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.math.BigDecimal;
import java.time.LocalDateTime;
import java.util.List;
/**
* 服务器监控指标响应
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "服务器监控指标数据")
public class ServerMonitorMetricsDTO {
@Schema(description = "服务器信息")
private ServerInfo server;
@Schema(description = "时间范围信息")
private TimeRangeInfo timeRange;
@Schema(description = "指标数据")
private MetricsData metrics;
@Schema(description = "统计信息")
private StatisticsInfo statistics;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "服务器信息")
public static class ServerInfo {
@Schema(description = "服务器ID")
private Long serverId;
@Schema(description = "服务器名称")
private String serverName;
@Schema(description = "主机IP")
private String hostIp;
@Schema(description = "服务器状态")
private String status;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "时间范围信息")
public static class TimeRangeInfo {
@Schema(description = "开始时间")
private LocalDateTime startTime;
@Schema(description = "结束时间")
private LocalDateTime endTime;
@Schema(description = "聚合间隔")
private String interval;
@Schema(description = "实际数据点数量")
private Integer dataPoints;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "指标数据")
public static class MetricsData {
@Schema(description = "CPU指标数据")
private List<CpuMetric> cpu;
@Schema(description = "内存指标数据")
private List<MemoryMetric> memory;
@Schema(description = "网络指标数据")
private List<NetworkMetric> network;
@Schema(description = "磁盘指标数据")
private DiskMetric disk;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "CPU指标")
public static class CpuMetric {
@Schema(description = "时间点")
private LocalDateTime time;
@Schema(description = "CPU使用率(%)")
private BigDecimal value;
@Schema(description = "采集状态")
private String status;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "内存指标")
public static class MemoryMetric {
@Schema(description = "时间点")
private LocalDateTime time;
@Schema(description = "内存使用率(%)")
private BigDecimal usagePercent;
@Schema(description = "已用内存(GB)")
private Integer usedGB;
@Schema(description = "采集状态")
private String status;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "网络指标")
public static class NetworkMetric {
@Schema(description = "时间点")
private LocalDateTime time;
@Schema(description = "接收字节数")
private Long rxBytes;
@Schema(description = "发送字节数")
private Long txBytes;
@Schema(description = "接收速率(MB/s)")
private BigDecimal rxMBps;
@Schema(description = "发送速率(MB/s)")
private BigDecimal txMBps;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "磁盘指标")
public static class DiskMetric {
@Schema(description = "最新采集时间")
private LocalDateTime latestTime;
@Schema(description = "分区信息列表")
private List<DiskPartition> partitions;
@Schema(description = "时间范围内最大使用率(%)")
private BigDecimal maxUsagePercent;
@Schema(description = "最大使用率的分区")
private String maxUsagePartition;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "磁盘分区")
public static class DiskPartition {
@Schema(description = "挂载点")
private String mountPoint;
@Schema(description = "文件系统")
private String fileSystem;
@Schema(description = "总容量(GB)")
private Long totalSizeGB;
@Schema(description = "已用容量(GB)")
private Long usedSizeGB;
@Schema(description = "使用率(%)")
private BigDecimal usagePercent;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "统计信息")
public static class StatisticsInfo {
@Schema(description = "CPU统计")
private CpuStats cpu;
@Schema(description = "内存统计")
private MemoryStats memory;
@Schema(description = "网络统计")
private NetworkStats network;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "CPU统计")
public static class CpuStats {
@Schema(description = "平均值(%)")
private BigDecimal avg;
@Schema(description = "最大值(%)")
private BigDecimal max;
@Schema(description = "最小值(%)")
private BigDecimal min;
@Schema(description = "峰值时间")
private LocalDateTime maxTime;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "内存统计")
public static class MemoryStats {
@Schema(description = "平均使用率(%)")
private BigDecimal avgPercent;
@Schema(description = "最大使用率(%)")
private BigDecimal maxPercent;
@Schema(description = "最小使用率(%)")
private BigDecimal minPercent;
@Schema(description = "峰值时间")
private LocalDateTime maxTime;
}
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Schema(description = "网络统计")
public static class NetworkStats {
@Schema(description = "总接收字节数")
private Long totalRxBytes;
@Schema(description = "总发送字节数")
private Long totalTxBytes;
@Schema(description = "平均接收速率(MB/s)")
private BigDecimal avgRxMBps;
@Schema(description = "平均发送速率(MB/s)")
private BigDecimal avgTxMBps;
@Schema(description = "峰值接收速率(MB/s)")
private BigDecimal peakRxMBps;
@Schema(description = "峰值发送速率(MB/s)")
private BigDecimal peakTxMBps;
}
}

View File

@ -0,0 +1,27 @@
package com.qqchen.deploy.backend.deploy.dto;
import com.qqchen.deploy.backend.deploy.enums.MonitorTimeRange;
import com.qqchen.deploy.backend.framework.enums.MonitorMetricEnum;
import io.swagger.v3.oas.annotations.media.Schema;
import jakarta.validation.constraints.NotNull;
import lombok.Data;
import java.util.List;
/**
* 服务器监控指标查询参数
*/
@Data
@Schema(description = "服务器监控指标查询参数")
public class ServerMonitorMetricsQuery {
@Schema(description = "服务器ID", required = true)
private Long serverId;
@NotNull(message = "时间范围不能为空")
@Schema(description = "时间范围", required = true, example = "LAST_1_HOUR")
private MonitorTimeRange timeRange;
@Schema(description = "查询的指标类型列表(为空则查询所有)", example = "[\"CPU\", \"MEMORY\"]")
private List<MonitorMetricEnum> metrics;
}

View File

@ -0,0 +1,24 @@
package com.qqchen.deploy.backend.deploy.enums;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.time.Duration;
/**
* 监控时间范围枚举
*/
@Getter
@AllArgsConstructor
public enum MonitorTimeRange {
LAST_1_HOUR("LAST_1_HOUR", "最近1小时", Duration.ofHours(1), "5m"),
LAST_6_HOURS("LAST_6_HOURS", "最近6小时", Duration.ofHours(6), "5m"),
LAST_24_HOURS("LAST_24_HOURS", "最近24小时", Duration.ofHours(24), "15m"),
LAST_7_DAYS("LAST_7_DAYS", "最近7天", Duration.ofDays(7), "1h"),
LAST_30_DAYS("LAST_30_DAYS", "最近30天", Duration.ofDays(30), "4h");
private final String code;
private final String name;
private final Duration duration;
private final String defaultInterval;
}

View File

@ -47,4 +47,19 @@ public interface IServerMonitorRepository extends JpaRepository<ServerMonitor, L
*/
@Query("SELECT m FROM ServerMonitor m WHERE m.serverId = :serverId ORDER BY m.collectTime DESC")
List<ServerMonitor> findRecentMonitorRecords(@Param("serverId") Long serverId, org.springframework.data.domain.Pageable pageable);
/**
* 查询指定服务器在时间范围内的监控记录按时间正序
* 用于监控数据展示
* 只查询SUCCESS状态的记录过滤连接失败的FAILURE记录
*/
@Query("SELECT m FROM ServerMonitor m WHERE m.serverId = :serverId " +
"AND m.collectTime >= :startTime AND m.collectTime <= :endTime " +
"AND m.status = 'SUCCESS' " +
"ORDER BY m.collectTime ASC")
List<ServerMonitor> findByServerIdAndTimeRange(
@Param("serverId") Long serverId,
@Param("startTime") LocalDateTime startTime,
@Param("endTime") LocalDateTime endTime
);
}

View File

@ -198,22 +198,14 @@ public class ServerMonitorScheduler {
passphrase
);
// 3. 连接成功插入成功记录
ServerMonitor successRecord = ServerMonitor.builder()
.serverId(server.getId())
.status(com.qqchen.deploy.backend.framework.enums.StatusEnum.SUCCESS)
.collectTime(LocalDateTime.now())
.build();
monitorService.saveMonitorRecord(successRecord);
// 4. 解除服务器状态告警如果存在
// 3. 解除服务器状态告警如果存在
try {
alertService.resolveServerStatusAlert(server.getId());
} catch (Exception e) {
log.warn("解除服务器状态告警失败: serverId={}", server.getId(), e);
}
// 5. 更新服务器状态为ONLINE
// 4. 更新服务器状态为ONLINE
if (server.getStatus() == ServerStatusEnum.OFFLINE) {
server.setStatus(ServerStatusEnum.ONLINE);
server.setLastConnectTime(LocalDateTime.now());

View File

@ -1,6 +1,8 @@
package com.qqchen.deploy.backend.deploy.service;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorDataDTO;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery;
import com.qqchen.deploy.backend.deploy.entity.ServerMonitor;
import java.time.LocalDateTime;
@ -46,4 +48,15 @@ public interface IServerMonitorService {
* @return 连续失败次数
*/
int countConsecutiveFailures(Long serverId, int checkLimit);
/**
* 查询服务器监控指标数据
* 支持快捷时间范围和自定义时间范围
* 支持按指标类型查询CPU内存磁盘网络
* 自动根据时间范围选择合适的聚合粒度
*
* @param query 查询参数
* @return 监控指标数据
*/
ServerMonitorMetricsDTO getServerMetrics(ServerMonitorMetricsQuery query);
}

View File

@ -1,21 +1,33 @@
package com.qqchen.deploy.backend.deploy.service.impl;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.core.type.TypeReference;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorDataDTO;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO;
import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery;
import com.qqchen.deploy.backend.deploy.entity.Server;
import com.qqchen.deploy.backend.deploy.entity.ServerMonitor;
import com.qqchen.deploy.backend.deploy.enums.MonitorTimeRange;
import com.qqchen.deploy.backend.deploy.repository.IServerMonitorRepository;
import com.qqchen.deploy.backend.deploy.repository.IServerRepository;
import com.qqchen.deploy.backend.deploy.service.IServerMonitorService;
import com.qqchen.deploy.backend.framework.enums.MonitorMetricEnum;
import com.qqchen.deploy.backend.framework.enums.ResponseCode;
import com.qqchen.deploy.backend.framework.enums.StatusEnum;
import com.qqchen.deploy.backend.framework.exception.BusinessException;
import com.qqchen.deploy.backend.framework.utils.JsonUtils;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.CollectionUtils;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.List;
import java.util.*;
import java.util.stream.Collectors;
/**
@ -29,7 +41,7 @@ public class ServerMonitorServiceImpl implements IServerMonitorService {
private IServerMonitorRepository monitorRepository;
@Resource
private ObjectMapper objectMapper;
private IServerRepository serverRepository;
@Override
@Transactional
@ -67,10 +79,9 @@ public class ServerMonitorServiceImpl implements IServerMonitorService {
private ServerMonitor convertToEntity(ServerMonitorDataDTO dto) {
String diskUsageJson = null;
if (dto.getDiskUsage() != null && !dto.getDiskUsage().isEmpty()) {
try {
diskUsageJson = objectMapper.writeValueAsString(dto.getDiskUsage());
} catch (JsonProcessingException e) {
log.error("序列化磁盘使用率失败", e);
diskUsageJson = JsonUtils.toJson(dto.getDiskUsage());
if (diskUsageJson == null) {
log.error("序列化磁盘使用率失败");
}
}
@ -119,4 +130,408 @@ public class ServerMonitorServiceImpl implements IServerMonitorService {
log.debug("统计连续失败次数: serverId={}, count={}", serverId, failureCount);
return failureCount;
}
@Override
public ServerMonitorMetricsDTO getServerMetrics(ServerMonitorMetricsQuery query) {
// 1. 参数校验
if (query == null || query.getServerId() == null) {
throw new BusinessException(ResponseCode.INVALID_PARAM, new Object[]{"服务器ID不能为空"});
}
if (query.getTimeRange() == null) {
throw new BusinessException(ResponseCode.INVALID_PARAM, new Object[]{"时间范围不能为空"});
}
// 2. 查询服务器信息
Server server = serverRepository.findById(query.getServerId())
.orElseThrow(() -> new BusinessException(ResponseCode.DATA_NOT_FOUND, new Object[]{"服务器不存在"}));
// 3. 根据枚举确定时间范围和聚合间隔
MonitorTimeRange timeRange = query.getTimeRange();
LocalDateTime endTime = LocalDateTime.now();
LocalDateTime startTime = endTime.minus(timeRange.getDuration());
String interval = timeRange.getDefaultInterval();
// 4. 查询监控数据
List<ServerMonitor> monitorData = monitorRepository.findByServerIdAndTimeRange(
query.getServerId(), startTime, endTime);
if (CollectionUtils.isEmpty(monitorData)) {
log.warn("未查询到监控数据: serverId={}, timeRange={}, startTime={}, endTime={}",
query.getServerId(), timeRange, startTime, endTime);
return buildEmptyResponse(server, startTime, endTime, interval);
}
// 5. 根据聚合间隔处理数据
List<ServerMonitor> aggregatedData = aggregateData(monitorData, interval);
// 6. 构建响应
return buildMetricsResponse(server, aggregatedData, startTime, endTime, interval, query.getMetrics());
}
/**
* 数据聚合处理
*/
private List<ServerMonitor> aggregateData(List<ServerMonitor> data, String interval) {
if ("5m".equals(interval) || data.size() <= 100) {
return data;
}
int aggregateMinutes = getAggregateMinutes(interval);
Map<String, List<ServerMonitor>> groupedData = data.stream()
.collect(Collectors.groupingBy(m ->
m.getCollectTime().minusMinutes(m.getCollectTime().getMinute() % aggregateMinutes)
.withSecond(0).withNano(0).toString()
));
return groupedData.values().stream()
.map(this::aggregateMonitorGroup)
.sorted(Comparator.comparing(ServerMonitor::getCollectTime))
.collect(Collectors.toList());
}
/**
* 获取聚合时间间隔分钟
*/
private int getAggregateMinutes(String interval) {
return switch (interval) {
case "15m" -> 15;
case "1h" -> 60;
case "4h" -> 240;
default -> 5;
};
}
/**
* 聚合一组监控数据取平均值
*/
private ServerMonitor aggregateMonitorGroup(List<ServerMonitor> group) {
if (group.size() == 1) {
return group.get(0);
}
List<ServerMonitor> successRecords = group.stream()
.filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()))
.collect(Collectors.toList());
if (successRecords.isEmpty()) {
return group.get(0);
}
BigDecimal avgCpu = successRecords.stream()
.map(ServerMonitor::getCpuUsage)
.filter(Objects::nonNull)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(successRecords.size()), 2, RoundingMode.HALF_UP);
BigDecimal avgMemory = successRecords.stream()
.map(ServerMonitor::getMemoryUsage)
.filter(Objects::nonNull)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(successRecords.size()), 2, RoundingMode.HALF_UP);
return ServerMonitor.builder()
.serverId(group.get(0).getServerId())
.cpuUsage(avgCpu)
.memoryUsage(avgMemory)
.memoryUsed(successRecords.get(0).getMemoryUsed())
.diskUsage(successRecords.get(successRecords.size() - 1).getDiskUsage())
.networkRx(successRecords.stream().mapToLong(m -> m.getNetworkRx() != null ? m.getNetworkRx() : 0).sum())
.networkTx(successRecords.stream().mapToLong(m -> m.getNetworkTx() != null ? m.getNetworkTx() : 0).sum())
.collectTime(group.get(0).getCollectTime())
.status(StatusEnum.SUCCESS)
.build();
}
/**
* 构建空响应
*/
private ServerMonitorMetricsDTO buildEmptyResponse(Server server, LocalDateTime startTime,
LocalDateTime endTime, String interval) {
return ServerMonitorMetricsDTO.builder()
.server(ServerMonitorMetricsDTO.ServerInfo.builder()
.serverId(server.getId())
.serverName(server.getServerName())
.hostIp(server.getHostIp())
.status(server.getStatus().name())
.build())
.timeRange(ServerMonitorMetricsDTO.TimeRangeInfo.builder()
.startTime(startTime)
.endTime(endTime)
.interval(interval)
.dataPoints(0)
.build())
.metrics(ServerMonitorMetricsDTO.MetricsData.builder()
.cpu(new ArrayList<>())
.memory(new ArrayList<>())
.network(new ArrayList<>())
.build())
.statistics(ServerMonitorMetricsDTO.StatisticsInfo.builder().build())
.build();
}
/**
* 构建完整的监控指标响应
*/
private ServerMonitorMetricsDTO buildMetricsResponse(Server server, List<ServerMonitor> data,
LocalDateTime startTime, LocalDateTime endTime,
String interval, List<MonitorMetricEnum> requestedMetrics) {
Set<MonitorMetricEnum> metricsToInclude = requestedMetrics == null || requestedMetrics.isEmpty()
? EnumSet.allOf(MonitorMetricEnum.class)
: EnumSet.copyOf(requestedMetrics);
// 先构建所有metrics数据避免重复计算
List<ServerMonitorMetricsDTO.NetworkMetric> networkMetrics =
metricsToInclude.contains(MonitorMetricEnum.NETWORK) ? buildNetworkMetrics(data) : null;
ServerMonitorMetricsDTO.MetricsData metricsData = ServerMonitorMetricsDTO.MetricsData.builder()
.cpu(metricsToInclude.contains(MonitorMetricEnum.CPU) ? buildCpuMetrics(data) : null)
.memory(metricsToInclude.contains(MonitorMetricEnum.MEMORY) ? buildMemoryMetrics(data) : null)
.network(networkMetrics)
.disk(metricsToInclude.contains(MonitorMetricEnum.DISK) ? buildDiskMetric(data) : null)
.build();
ServerMonitorMetricsDTO.StatisticsInfo statistics = ServerMonitorMetricsDTO.StatisticsInfo.builder()
.cpu(metricsToInclude.contains(MonitorMetricEnum.CPU) ? buildCpuStats(data) : null)
.memory(metricsToInclude.contains(MonitorMetricEnum.MEMORY) ? buildMemoryStats(data) : null)
.network(metricsToInclude.contains(MonitorMetricEnum.NETWORK) ? buildNetworkStats(data, networkMetrics) : null)
.build();
return ServerMonitorMetricsDTO.builder()
.server(ServerMonitorMetricsDTO.ServerInfo.builder()
.serverId(server.getId())
.serverName(server.getServerName())
.hostIp(server.getHostIp())
.status(server.getStatus().name())
.build())
.timeRange(ServerMonitorMetricsDTO.TimeRangeInfo.builder()
.startTime(startTime)
.endTime(endTime)
.interval(interval)
.dataPoints(data.size())
.build())
.metrics(metricsData)
.statistics(statistics)
.build();
}
private List<ServerMonitorMetricsDTO.CpuMetric> buildCpuMetrics(List<ServerMonitor> data) {
return data.stream()
.map(m -> ServerMonitorMetricsDTO.CpuMetric.builder()
.time(m.getCollectTime())
.value(m.getCpuUsage())
.status(m.getStatus().name())
.build())
.collect(Collectors.toList());
}
private List<ServerMonitorMetricsDTO.MemoryMetric> buildMemoryMetrics(List<ServerMonitor> data) {
return data.stream()
.map(m -> ServerMonitorMetricsDTO.MemoryMetric.builder()
.time(m.getCollectTime())
.usagePercent(m.getMemoryUsage())
.usedGB(m.getMemoryUsed())
.status(m.getStatus().name())
.build())
.collect(Collectors.toList());
}
private List<ServerMonitorMetricsDTO.NetworkMetric> buildNetworkMetrics(List<ServerMonitor> data) {
List<ServerMonitorMetricsDTO.NetworkMetric> metrics = new ArrayList<>();
for (int i = 0; i < data.size(); i++) {
ServerMonitor current = data.get(i);
BigDecimal rxMBps = BigDecimal.ZERO;
BigDecimal txMBps = BigDecimal.ZERO;
if (i > 0 && current.getNetworkRx() != null && current.getNetworkTx() != null) {
ServerMonitor previous = data.get(i - 1);
long timeDiff = Duration.between(previous.getCollectTime(), current.getCollectTime()).getSeconds();
if (timeDiff > 0 && previous.getNetworkRx() != null && previous.getNetworkTx() != null) {
long rxDiff = current.getNetworkRx() - previous.getNetworkRx();
long txDiff = current.getNetworkTx() - previous.getNetworkTx();
rxMBps = BigDecimal.valueOf(rxDiff).divide(BigDecimal.valueOf(timeDiff * 1024 * 1024), 2, RoundingMode.HALF_UP);
txMBps = BigDecimal.valueOf(txDiff).divide(BigDecimal.valueOf(timeDiff * 1024 * 1024), 2, RoundingMode.HALF_UP);
}
}
metrics.add(ServerMonitorMetricsDTO.NetworkMetric.builder()
.time(current.getCollectTime())
.rxBytes(current.getNetworkRx())
.txBytes(current.getNetworkTx())
.rxMBps(rxMBps)
.txMBps(txMBps)
.build());
}
return metrics;
}
private ServerMonitorMetricsDTO.DiskMetric buildDiskMetric(List<ServerMonitor> data) {
ServerMonitor latest = data.get(data.size() - 1);
if (latest.getDiskUsage() == null) {
return null;
}
try {
List<Map<String, Object>> diskData = JsonUtils.fromJson(
latest.getDiskUsage(), new TypeReference<List<Map<String, Object>>>() {});
List<ServerMonitorMetricsDTO.DiskPartition> partitions = diskData.stream()
.map(disk -> ServerMonitorMetricsDTO.DiskPartition.builder()
.mountPoint((String) disk.get("mountPoint"))
.fileSystem((String) disk.get("fileSystem"))
.totalSizeGB(((Number) disk.get("totalSize")).longValue())
.usedSizeGB(((Number) disk.get("usedSize")).longValue())
.usagePercent(BigDecimal.valueOf(((Number) disk.get("usagePercent")).doubleValue()))
.build())
.collect(Collectors.toList());
BigDecimal maxUsage = partitions.stream()
.map(ServerMonitorMetricsDTO.DiskPartition::getUsagePercent)
.max(BigDecimal::compareTo)
.orElse(BigDecimal.ZERO);
String maxPartition = partitions.stream()
.filter(p -> p.getUsagePercent().compareTo(maxUsage) == 0)
.map(ServerMonitorMetricsDTO.DiskPartition::getMountPoint)
.findFirst()
.orElse(null);
return ServerMonitorMetricsDTO.DiskMetric.builder()
.latestTime(latest.getCollectTime())
.partitions(partitions)
.maxUsagePercent(maxUsage)
.maxUsagePartition(maxPartition)
.build();
} catch (Exception e) {
log.error("解析磁盘数据失败", e);
return null;
}
}
private ServerMonitorMetricsDTO.CpuStats buildCpuStats(List<ServerMonitor> data) {
List<ServerMonitor> successData = data.stream()
.filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()) && m.getCpuUsage() != null)
.collect(Collectors.toList());
if (successData.isEmpty()) {
return null;
}
BigDecimal avg = successData.stream()
.map(ServerMonitor::getCpuUsage)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(successData.size()), 2, RoundingMode.HALF_UP);
ServerMonitor maxRecord = successData.stream()
.max(Comparator.comparing(ServerMonitor::getCpuUsage))
.orElse(null);
BigDecimal min = successData.stream()
.map(ServerMonitor::getCpuUsage)
.min(BigDecimal::compareTo)
.orElse(null);
return ServerMonitorMetricsDTO.CpuStats.builder()
.avg(avg)
.max(maxRecord != null ? maxRecord.getCpuUsage() : null)
.min(min)
.maxTime(maxRecord != null ? maxRecord.getCollectTime() : null)
.build();
}
private ServerMonitorMetricsDTO.MemoryStats buildMemoryStats(List<ServerMonitor> data) {
List<ServerMonitor> successData = data.stream()
.filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()) && m.getMemoryUsage() != null)
.collect(Collectors.toList());
if (successData.isEmpty()) {
return null;
}
BigDecimal avg = successData.stream()
.map(ServerMonitor::getMemoryUsage)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(successData.size()), 2, RoundingMode.HALF_UP);
ServerMonitor maxRecord = successData.stream()
.max(Comparator.comparing(ServerMonitor::getMemoryUsage))
.orElse(null);
BigDecimal min = successData.stream()
.map(ServerMonitor::getMemoryUsage)
.min(BigDecimal::compareTo)
.orElse(null);
return ServerMonitorMetricsDTO.MemoryStats.builder()
.avgPercent(avg)
.maxPercent(maxRecord != null ? maxRecord.getMemoryUsage() : null)
.minPercent(min)
.maxTime(maxRecord != null ? maxRecord.getCollectTime() : null)
.build();
}
private ServerMonitorMetricsDTO.NetworkStats buildNetworkStats(List<ServerMonitor> data,
List<ServerMonitorMetricsDTO.NetworkMetric> networkMetrics) {
if (networkMetrics == null || networkMetrics.isEmpty()) {
return null;
}
// 计算总流量从原始数据
List<ServerMonitor> successData = data.stream()
.filter(m -> StatusEnum.SUCCESS.equals(m.getStatus())
&& m.getNetworkRx() != null && m.getNetworkTx() != null)
.collect(Collectors.toList());
if (successData.isEmpty()) {
return null;
}
long totalRx = successData.stream().mapToLong(ServerMonitor::getNetworkRx).sum();
long totalTx = successData.stream().mapToLong(ServerMonitor::getNetworkTx).sum();
// 计算速率统计从已构建的 NetworkMetric 数据中提取
BigDecimal avgRxMBps = null;
BigDecimal avgTxMBps = null;
BigDecimal peakRxMBps = null;
BigDecimal peakTxMBps = null;
// 跳过第一个数据点无前置数据速率为0从第二个开始统计
List<ServerMonitorMetricsDTO.NetworkMetric> validMetrics = networkMetrics.size() > 1
? networkMetrics.subList(1, networkMetrics.size())
: new ArrayList<>();
if (!validMetrics.isEmpty()) {
// 平均接收速率
avgRxMBps = validMetrics.stream()
.map(ServerMonitorMetricsDTO.NetworkMetric::getRxMBps)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(validMetrics.size()), 2, RoundingMode.HALF_UP);
// 平均发送速率
avgTxMBps = validMetrics.stream()
.map(ServerMonitorMetricsDTO.NetworkMetric::getTxMBps)
.reduce(BigDecimal.ZERO, BigDecimal::add)
.divide(BigDecimal.valueOf(validMetrics.size()), 2, RoundingMode.HALF_UP);
// 峰值接收速率
peakRxMBps = validMetrics.stream()
.map(ServerMonitorMetricsDTO.NetworkMetric::getRxMBps)
.max(BigDecimal::compareTo)
.orElse(null);
// 峰值发送速率
peakTxMBps = validMetrics.stream()
.map(ServerMonitorMetricsDTO.NetworkMetric::getTxMBps)
.max(BigDecimal::compareTo)
.orElse(null);
}
return ServerMonitorMetricsDTO.NetworkStats.builder()
.totalRxBytes(totalRx)
.totalTxBytes(totalTx)
.avgRxMBps(avgRxMBps)
.avgTxMBps(avgTxMBps)
.peakRxMBps(peakRxMBps)
.peakTxMBps(peakTxMBps)
.build();
}
}

View File

@ -18,7 +18,7 @@ import java.math.BigDecimal;
* @since 2025-12-07
*/
@Slf4j
@Service
@Service("frameworkServerMonitorService")
public class ServerMonitorService {
/**

View File

@ -162,9 +162,12 @@ public abstract class BaseServiceImpl<T extends Entity<ID>, D extends BaseDTO, Q
}
protected Sort createSort(BaseQuery query) {
return query != null && StringUtils.hasText(query.getSortField()) ?
Sort.by(Sort.Direction.fromString(query.getSortOrder()), query.getSortField()) :
Sort.by(Sort.Direction.DESC, "createTime");
// 添加id作为第二排序条件确保分页数据稳定性避免相同排序值时数据重复或丢失
if (query != null && StringUtils.hasText(query.getSortField())) {
Sort.Direction direction = Sort.Direction.fromString(query.getSortOrder());
return Sort.by(direction, query.getSortField()).and(Sort.by(direction, "id"));
}
return Sort.by(Sort.Direction.DESC, "createTime").and(Sort.by(Sort.Direction.DESC, "id"));
}
private void buildQueryPredicate(BaseQuery query, BooleanBuilder builder) {
@ -327,19 +330,15 @@ public abstract class BaseServiceImpl<T extends Entity<ID>, D extends BaseDTO, Q
protected PageRequest createPageRequest(BaseQuery query) {
if (query == null) {
return PageRequest.of(0, 10, Sort.by(Sort.Direction.DESC, "createTime"));
return PageRequest.of(0, 10, createSort(null));
}
// 处理分页参数前端 pageNum 0 开始
int pageNum = query.getPageNum() != null ? Math.max(0, query.getPageNum()) : 0;
int pageSize = query.getPageSize() != null ? Math.max(1, Math.min(query.getPageSize(), 100)) : 10;
// 处理排序
Sort sort = StringUtils.hasText(query.getSortField()) ?
Sort.by(Sort.Direction.fromString(query.getSortOrder()), query.getSortField()) :
Sort.by(Sort.Direction.DESC, "createTime");
return PageRequest.of(pageNum, pageSize, sort);
// 使用统一的排序逻辑包含id作为第二排序条件
return PageRequest.of(pageNum, pageSize, createSort(query));
}
@Transactional(

View File

@ -100,10 +100,12 @@ logging:
org.springframework.web: DEBUG
org.springframework.context.i18n: DEBUG
org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping: TRACE
org.hibernate.SQL: INFO
org.hibernate.SQL: DEBUG
org.hibernate.type.descriptor.sql: TRACE
org.hibernate.type.descriptor.sql.BasicBinder: TRACE
org.hibernate.orm.jdbc.bind: INFO
org.hibernate.orm.jdbc.bind: TRACE
org.hibernate.type: TRACE
com.querydsl.jpa: DEBUG
com.qqchen.deploy.backend.framework.utils.EntityPathResolver: DEBUG
com.qqchen.deploy.backend: DEBUG
# 屏蔽 SSHJ 底层日志SecureRandom、Transport 等无业务价值的日志)

View File

@ -681,6 +681,8 @@ INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_
INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (25, 'etl-executor', 'etl-executor', 'etl执行器', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (26, 'etl-scheduler', 'etl-scheduler', 'etl调度器', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (27, 'themetis-control-panel-server', 'themetis-control-panel-server', '控制台', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (28, 'themetis-permission-server', 'themetis-permission-server', '权限系统', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (29, 'themetis-gateway', 'themetis-gateway', '网关', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_environment` (`id`, `tenant_code`, `env_code`, `env_name`, `env_desc`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (1, NULL, 'LONGI-DEV', '隆基DEV', NULL, b'1', 1, 'admin', NOW(), 'admin', NOW(), 2, b'0');
INSERT INTO `deploy-ease-platform`.`deploy_environment` (`id`, `tenant_code`, `env_code`, `env_name`, `env_desc`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (2, NULL, 'LONGI-UAT', '隆基UAT', NULL, b'1', 1, 'admin', NOW(), 'admin', NOW(), 3, b'0');
@ -773,6 +775,8 @@ INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`,
INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (86, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 24, 5, 'NATIVE', 6, 7, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2);
INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (87, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 27, 5, 'NATIVE', 6, 37, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2);
INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (88, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 28, 5, 'NATIVE', 6, 41, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2);
INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (89, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 29, 5, 'NATIVE', 6, 43, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2);
INSERT INTO `deploy-ease-platform`.`deploy_team_environment_config` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `environment_id`, `approval_required`, `approver_user_ids`, `require_code_review`, `remark`) VALUES (8, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 5, b'0', NULL, b'0', '');
INSERT INTO `deploy-ease-platform`.`deploy_team_environment_config` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `environment_id`, `approval_required`, `approver_user_ids`, `require_code_review`, `remark`) VALUES (9, 'admin', NOW(), 'admin', NOW(), 1, b'0', 4, 1, b'0', NULL, b'0', '');
@ -871,7 +875,8 @@ INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_ti
INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (14, 'admin', NOW(), 'admin', NOW(), 27, b'0', '隆基Git仓库组同步', '定期同步Git仓库组信息每天凌晨2点执行', 2, 'repositoryGroupServiceImpl', 'syncGroups', NULL, '{\"externalSystemId\": 4}', '0 0 3 * * ?', 'DISABLED', b'0', NOW(), NOW(), 0, 0, 0, 3600, 2, '');
INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (15, 'admin', NOW(), 'admin', NOW(), 1212, b'0', '隆基Git项目同步', '定期同步Git项目信息每天凌晨3点执行', 2, 'repositoryProjectServiceImpl', 'syncProjects', NULL, '{\"externalSystemId\": 4}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NOW(), 7, 7, 0, 3600, 2, '');
INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (16, 'admin', NOW(), 'admin', NOW(), 5727, b'0', '隆基Git分支同步', '定期同步Git仓库分支信息每5分钟执行一次', 2, 'repositoryBranchServiceImpl', 'syncBranches', NULL, '{\"externalSystemId\": 4}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NOW(), 7, 7, 0, 3600, 2, '');
INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (17, 'admin', NOW(), 'admin', NOW(), 49, b'0', '服务器预警', '', 4, 'serverMonitorScheduler', 'collectServerMetrics', NULL, '{\"notificationChannelId\": 5, \"resourceAlertTemplateId\": 11}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NULL, 39, 38, 1, 300, 0, '');
INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (17, 'admin', NOW(), 'dengqichen', NOW(), 56, b'0', '服务器预警', '', 4, 'serverMonitorScheduler', 'collectServerMetrics', NULL, '{\"notificationChannelId\": 5, \"resourceAlertTemplateId\": 11}', '0 */10 * * * ?', 'DISABLED', b'0', NOW(), NULL, 44, 43, 1, 300, 0, '');
-- 全局告警规则

View File

@ -12,18 +12,19 @@ INSERT INTO system_release (
)
VALUES (
'system', NOW(), 'system', NOW(), 1, 0,
1.18, 'ALL', NOW(),
1.19, 'ALL', NOW(),
'【后端】
-
- id
- SSH连接成功时插入的空监控记录
- status=''SUCCESS''
- 0
- 使退
- Popover /IP
- SSH connecting"连接中 → 已连接/断开"
- HTTP 300000ms 1800000ms
- DeleteDialog "删除失败 / 未知错误"使
- CPU// 0-100% 0-10000 MB/s 100
- 使
',
- CPU51h/6h/24h/7d/30dECharts图表和统计信息
- 使DialogContent默认提供的标准关闭按钮
- pageNum和pageSizeApplication分页实现
- ID列显示使
- ID徽章',
0, NULL, NULL, 0
);