diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/api/ServerMonitorApiController.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/api/ServerMonitorApiController.java new file mode 100644 index 00000000..ce90f0c8 --- /dev/null +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/api/ServerMonitorApiController.java @@ -0,0 +1,48 @@ +package com.qqchen.deploy.backend.deploy.api; + +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO; +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery; +import com.qqchen.deploy.backend.deploy.service.IServerMonitorService; +import com.qqchen.deploy.backend.framework.api.Response; +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.Parameter; +import io.swagger.v3.oas.annotations.tags.Tag; +import jakarta.annotation.Resource; +import lombok.extern.slf4j.Slf4j; +import org.springframework.web.bind.annotation.*; + +/** + * 服务器监控数据 Controller + */ +@Slf4j +@RestController +@RequestMapping("/api/v1/server/monitor") +@Tag(name = "服务器监控数据", description = "服务器监控数据查询接口") +public class ServerMonitorApiController { + + @Resource + private IServerMonitorService serverMonitorService; + + @Operation( + summary = "查询服务器监控指标数据", + description = "支持快捷时间范围(最近1小时/6小时/24小时/7天/30天)和自定义时间范围," + + "支持按指标类型查询(CPU/MEMORY/DISK/NETWORK),自动根据时间范围选择合适的聚合粒度" + ) + @GetMapping("/{serverId}/metrics") + public Response getServerMetrics( + @Parameter(description = "服务器ID", required = true) + @PathVariable Long serverId, + + @Parameter(description = "查询参数(快捷时间范围、自定义时间、指标类型等)") + @ModelAttribute ServerMonitorMetricsQuery query + ) { + // 设置服务器ID(从路径参数) + query.setServerId(serverId); + + log.info("查询服务器监控指标: serverId={}, timeRange={}, metrics={}", + serverId, query.getTimeRange(), query.getMetrics()); + + ServerMonitorMetricsDTO response = serverMonitorService.getServerMetrics(query); + return Response.success(response); + } +} diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsDTO.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsDTO.java new file mode 100644 index 00000000..b1d1770a --- /dev/null +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsDTO.java @@ -0,0 +1,268 @@ +package com.qqchen.deploy.backend.deploy.dto; + +import io.swagger.v3.oas.annotations.media.Schema; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.math.BigDecimal; +import java.time.LocalDateTime; +import java.util.List; + +/** + * 服务器监控指标响应 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +@Schema(description = "服务器监控指标数据") +public class ServerMonitorMetricsDTO { + + @Schema(description = "服务器信息") + private ServerInfo server; + + @Schema(description = "时间范围信息") + private TimeRangeInfo timeRange; + + @Schema(description = "指标数据") + private MetricsData metrics; + + @Schema(description = "统计信息") + private StatisticsInfo statistics; + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "服务器信息") + public static class ServerInfo { + @Schema(description = "服务器ID") + private Long serverId; + + @Schema(description = "服务器名称") + private String serverName; + + @Schema(description = "主机IP") + private String hostIp; + + @Schema(description = "服务器状态") + private String status; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "时间范围信息") + public static class TimeRangeInfo { + @Schema(description = "开始时间") + private LocalDateTime startTime; + + @Schema(description = "结束时间") + private LocalDateTime endTime; + + @Schema(description = "聚合间隔") + private String interval; + + @Schema(description = "实际数据点数量") + private Integer dataPoints; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "指标数据") + public static class MetricsData { + @Schema(description = "CPU指标数据") + private List cpu; + + @Schema(description = "内存指标数据") + private List memory; + + @Schema(description = "网络指标数据") + private List network; + + @Schema(description = "磁盘指标数据") + private DiskMetric disk; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "CPU指标") + public static class CpuMetric { + @Schema(description = "时间点") + private LocalDateTime time; + + @Schema(description = "CPU使用率(%)") + private BigDecimal value; + + @Schema(description = "采集状态") + private String status; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "内存指标") + public static class MemoryMetric { + @Schema(description = "时间点") + private LocalDateTime time; + + @Schema(description = "内存使用率(%)") + private BigDecimal usagePercent; + + @Schema(description = "已用内存(GB)") + private Integer usedGB; + + @Schema(description = "采集状态") + private String status; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "网络指标") + public static class NetworkMetric { + @Schema(description = "时间点") + private LocalDateTime time; + + @Schema(description = "接收字节数") + private Long rxBytes; + + @Schema(description = "发送字节数") + private Long txBytes; + + @Schema(description = "接收速率(MB/s)") + private BigDecimal rxMBps; + + @Schema(description = "发送速率(MB/s)") + private BigDecimal txMBps; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "磁盘指标") + public static class DiskMetric { + @Schema(description = "最新采集时间") + private LocalDateTime latestTime; + + @Schema(description = "分区信息列表") + private List partitions; + + @Schema(description = "时间范围内最大使用率(%)") + private BigDecimal maxUsagePercent; + + @Schema(description = "最大使用率的分区") + private String maxUsagePartition; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "磁盘分区") + public static class DiskPartition { + @Schema(description = "挂载点") + private String mountPoint; + + @Schema(description = "文件系统") + private String fileSystem; + + @Schema(description = "总容量(GB)") + private Long totalSizeGB; + + @Schema(description = "已用容量(GB)") + private Long usedSizeGB; + + @Schema(description = "使用率(%)") + private BigDecimal usagePercent; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "统计信息") + public static class StatisticsInfo { + @Schema(description = "CPU统计") + private CpuStats cpu; + + @Schema(description = "内存统计") + private MemoryStats memory; + + @Schema(description = "网络统计") + private NetworkStats network; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "CPU统计") + public static class CpuStats { + @Schema(description = "平均值(%)") + private BigDecimal avg; + + @Schema(description = "最大值(%)") + private BigDecimal max; + + @Schema(description = "最小值(%)") + private BigDecimal min; + + @Schema(description = "峰值时间") + private LocalDateTime maxTime; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "内存统计") + public static class MemoryStats { + @Schema(description = "平均使用率(%)") + private BigDecimal avgPercent; + + @Schema(description = "最大使用率(%)") + private BigDecimal maxPercent; + + @Schema(description = "最小使用率(%)") + private BigDecimal minPercent; + + @Schema(description = "峰值时间") + private LocalDateTime maxTime; + } + + @Data + @Builder + @NoArgsConstructor + @AllArgsConstructor + @Schema(description = "网络统计") + public static class NetworkStats { + @Schema(description = "总接收字节数") + private Long totalRxBytes; + + @Schema(description = "总发送字节数") + private Long totalTxBytes; + + @Schema(description = "平均接收速率(MB/s)") + private BigDecimal avgRxMBps; + + @Schema(description = "平均发送速率(MB/s)") + private BigDecimal avgTxMBps; + + @Schema(description = "峰值接收速率(MB/s)") + private BigDecimal peakRxMBps; + + @Schema(description = "峰值发送速率(MB/s)") + private BigDecimal peakTxMBps; + } +} diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsQuery.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsQuery.java new file mode 100644 index 00000000..e7ba5cf1 --- /dev/null +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/dto/ServerMonitorMetricsQuery.java @@ -0,0 +1,27 @@ +package com.qqchen.deploy.backend.deploy.dto; + +import com.qqchen.deploy.backend.deploy.enums.MonitorTimeRange; +import com.qqchen.deploy.backend.framework.enums.MonitorMetricEnum; +import io.swagger.v3.oas.annotations.media.Schema; +import jakarta.validation.constraints.NotNull; +import lombok.Data; + +import java.util.List; + +/** + * 服务器监控指标查询参数 + */ +@Data +@Schema(description = "服务器监控指标查询参数") +public class ServerMonitorMetricsQuery { + + @Schema(description = "服务器ID", required = true) + private Long serverId; + + @NotNull(message = "时间范围不能为空") + @Schema(description = "时间范围", required = true, example = "LAST_1_HOUR") + private MonitorTimeRange timeRange; + + @Schema(description = "查询的指标类型列表(为空则查询所有)", example = "[\"CPU\", \"MEMORY\"]") + private List metrics; +} diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/enums/MonitorTimeRange.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/enums/MonitorTimeRange.java new file mode 100644 index 00000000..fc58013f --- /dev/null +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/enums/MonitorTimeRange.java @@ -0,0 +1,24 @@ +package com.qqchen.deploy.backend.deploy.enums; + +import lombok.AllArgsConstructor; +import lombok.Getter; + +import java.time.Duration; + +/** + * 监控时间范围枚举 + */ +@Getter +@AllArgsConstructor +public enum MonitorTimeRange { + LAST_1_HOUR("LAST_1_HOUR", "最近1小时", Duration.ofHours(1), "5m"), + LAST_6_HOURS("LAST_6_HOURS", "最近6小时", Duration.ofHours(6), "5m"), + LAST_24_HOURS("LAST_24_HOURS", "最近24小时", Duration.ofHours(24), "15m"), + LAST_7_DAYS("LAST_7_DAYS", "最近7天", Duration.ofDays(7), "1h"), + LAST_30_DAYS("LAST_30_DAYS", "最近30天", Duration.ofDays(30), "4h"); + + private final String code; + private final String name; + private final Duration duration; + private final String defaultInterval; +} diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/repository/IServerMonitorRepository.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/repository/IServerMonitorRepository.java index 65cc9e98..51f21109 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/deploy/repository/IServerMonitorRepository.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/repository/IServerMonitorRepository.java @@ -47,4 +47,19 @@ public interface IServerMonitorRepository extends JpaRepository findRecentMonitorRecords(@Param("serverId") Long serverId, org.springframework.data.domain.Pageable pageable); + + /** + * 查询指定服务器在时间范围内的监控记录(按时间正序) + * 用于监控数据展示 + * 只查询SUCCESS状态的记录(过滤连接失败的FAILURE记录) + */ + @Query("SELECT m FROM ServerMonitor m WHERE m.serverId = :serverId " + + "AND m.collectTime >= :startTime AND m.collectTime <= :endTime " + + "AND m.status = 'SUCCESS' " + + "ORDER BY m.collectTime ASC") + List findByServerIdAndTimeRange( + @Param("serverId") Long serverId, + @Param("startTime") LocalDateTime startTime, + @Param("endTime") LocalDateTime endTime + ); } diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/scheduler/ServerMonitorScheduler.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/scheduler/ServerMonitorScheduler.java index 28ec7cec..a252f967 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/deploy/scheduler/ServerMonitorScheduler.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/scheduler/ServerMonitorScheduler.java @@ -198,22 +198,14 @@ public class ServerMonitorScheduler { passphrase ); - // 3. 连接成功,插入成功记录 - ServerMonitor successRecord = ServerMonitor.builder() - .serverId(server.getId()) - .status(com.qqchen.deploy.backend.framework.enums.StatusEnum.SUCCESS) - .collectTime(LocalDateTime.now()) - .build(); - monitorService.saveMonitorRecord(successRecord); - - // 4. 解除服务器状态告警(如果存在) + // 3. 解除服务器状态告警(如果存在) try { alertService.resolveServerStatusAlert(server.getId()); } catch (Exception e) { log.warn("解除服务器状态告警失败: serverId={}", server.getId(), e); } - // 5. 更新服务器状态为ONLINE + // 4. 更新服务器状态为ONLINE if (server.getStatus() == ServerStatusEnum.OFFLINE) { server.setStatus(ServerStatusEnum.ONLINE); server.setLastConnectTime(LocalDateTime.now()); diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/IServerMonitorService.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/IServerMonitorService.java index d722071f..6db944cd 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/IServerMonitorService.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/IServerMonitorService.java @@ -1,6 +1,8 @@ package com.qqchen.deploy.backend.deploy.service; import com.qqchen.deploy.backend.deploy.dto.ServerMonitorDataDTO; +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO; +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery; import com.qqchen.deploy.backend.deploy.entity.ServerMonitor; import java.time.LocalDateTime; @@ -46,4 +48,15 @@ public interface IServerMonitorService { * @return 连续失败次数 */ int countConsecutiveFailures(Long serverId, int checkLimit); + + /** + * 查询服务器监控指标数据 + * 支持快捷时间范围和自定义时间范围 + * 支持按指标类型查询(CPU、内存、磁盘、网络) + * 自动根据时间范围选择合适的聚合粒度 + * + * @param query 查询参数 + * @return 监控指标数据 + */ + ServerMonitorMetricsDTO getServerMetrics(ServerMonitorMetricsQuery query); } diff --git a/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/impl/ServerMonitorServiceImpl.java b/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/impl/ServerMonitorServiceImpl.java index 92c0707b..edb96571 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/impl/ServerMonitorServiceImpl.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/deploy/service/impl/ServerMonitorServiceImpl.java @@ -1,21 +1,33 @@ package com.qqchen.deploy.backend.deploy.service.impl; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.core.type.TypeReference; import com.qqchen.deploy.backend.deploy.dto.ServerMonitorDataDTO; +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsDTO; +import com.qqchen.deploy.backend.deploy.dto.ServerMonitorMetricsQuery; +import com.qqchen.deploy.backend.deploy.entity.Server; import com.qqchen.deploy.backend.deploy.entity.ServerMonitor; +import com.qqchen.deploy.backend.deploy.enums.MonitorTimeRange; import com.qqchen.deploy.backend.deploy.repository.IServerMonitorRepository; +import com.qqchen.deploy.backend.deploy.repository.IServerRepository; import com.qqchen.deploy.backend.deploy.service.IServerMonitorService; +import com.qqchen.deploy.backend.framework.enums.MonitorMetricEnum; +import com.qqchen.deploy.backend.framework.enums.ResponseCode; import com.qqchen.deploy.backend.framework.enums.StatusEnum; +import com.qqchen.deploy.backend.framework.exception.BusinessException; +import com.qqchen.deploy.backend.framework.utils.JsonUtils; import jakarta.annotation.Resource; import lombok.extern.slf4j.Slf4j; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Pageable; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; +import org.springframework.util.CollectionUtils; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.time.Duration; import java.time.LocalDateTime; -import java.util.List; +import java.util.*; import java.util.stream.Collectors; /** @@ -29,7 +41,7 @@ public class ServerMonitorServiceImpl implements IServerMonitorService { private IServerMonitorRepository monitorRepository; @Resource - private ObjectMapper objectMapper; + private IServerRepository serverRepository; @Override @Transactional @@ -67,10 +79,9 @@ public class ServerMonitorServiceImpl implements IServerMonitorService { private ServerMonitor convertToEntity(ServerMonitorDataDTO dto) { String diskUsageJson = null; if (dto.getDiskUsage() != null && !dto.getDiskUsage().isEmpty()) { - try { - diskUsageJson = objectMapper.writeValueAsString(dto.getDiskUsage()); - } catch (JsonProcessingException e) { - log.error("序列化磁盘使用率失败", e); + diskUsageJson = JsonUtils.toJson(dto.getDiskUsage()); + if (diskUsageJson == null) { + log.error("序列化磁盘使用率失败"); } } @@ -119,4 +130,408 @@ public class ServerMonitorServiceImpl implements IServerMonitorService { log.debug("统计连续失败次数: serverId={}, count={}", serverId, failureCount); return failureCount; } + + @Override + public ServerMonitorMetricsDTO getServerMetrics(ServerMonitorMetricsQuery query) { + // 1. 参数校验 + if (query == null || query.getServerId() == null) { + throw new BusinessException(ResponseCode.INVALID_PARAM, new Object[]{"服务器ID不能为空"}); + } + if (query.getTimeRange() == null) { + throw new BusinessException(ResponseCode.INVALID_PARAM, new Object[]{"时间范围不能为空"}); + } + + // 2. 查询服务器信息 + Server server = serverRepository.findById(query.getServerId()) + .orElseThrow(() -> new BusinessException(ResponseCode.DATA_NOT_FOUND, new Object[]{"服务器不存在"})); + + // 3. 根据枚举确定时间范围和聚合间隔 + MonitorTimeRange timeRange = query.getTimeRange(); + LocalDateTime endTime = LocalDateTime.now(); + LocalDateTime startTime = endTime.minus(timeRange.getDuration()); + String interval = timeRange.getDefaultInterval(); + + // 4. 查询监控数据 + List monitorData = monitorRepository.findByServerIdAndTimeRange( + query.getServerId(), startTime, endTime); + + if (CollectionUtils.isEmpty(monitorData)) { + log.warn("未查询到监控数据: serverId={}, timeRange={}, startTime={}, endTime={}", + query.getServerId(), timeRange, startTime, endTime); + return buildEmptyResponse(server, startTime, endTime, interval); + } + + // 5. 根据聚合间隔处理数据 + List aggregatedData = aggregateData(monitorData, interval); + + // 6. 构建响应 + return buildMetricsResponse(server, aggregatedData, startTime, endTime, interval, query.getMetrics()); + } + + /** + * 数据聚合处理 + */ + private List aggregateData(List data, String interval) { + if ("5m".equals(interval) || data.size() <= 100) { + return data; + } + + int aggregateMinutes = getAggregateMinutes(interval); + Map> groupedData = data.stream() + .collect(Collectors.groupingBy(m -> + m.getCollectTime().minusMinutes(m.getCollectTime().getMinute() % aggregateMinutes) + .withSecond(0).withNano(0).toString() + )); + + return groupedData.values().stream() + .map(this::aggregateMonitorGroup) + .sorted(Comparator.comparing(ServerMonitor::getCollectTime)) + .collect(Collectors.toList()); + } + + /** + * 获取聚合时间间隔(分钟) + */ + private int getAggregateMinutes(String interval) { + return switch (interval) { + case "15m" -> 15; + case "1h" -> 60; + case "4h" -> 240; + default -> 5; + }; + } + + /** + * 聚合一组监控数据(取平均值) + */ + private ServerMonitor aggregateMonitorGroup(List group) { + if (group.size() == 1) { + return group.get(0); + } + + List successRecords = group.stream() + .filter(m -> StatusEnum.SUCCESS.equals(m.getStatus())) + .collect(Collectors.toList()); + + if (successRecords.isEmpty()) { + return group.get(0); + } + + BigDecimal avgCpu = successRecords.stream() + .map(ServerMonitor::getCpuUsage) + .filter(Objects::nonNull) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(successRecords.size()), 2, RoundingMode.HALF_UP); + + BigDecimal avgMemory = successRecords.stream() + .map(ServerMonitor::getMemoryUsage) + .filter(Objects::nonNull) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(successRecords.size()), 2, RoundingMode.HALF_UP); + + return ServerMonitor.builder() + .serverId(group.get(0).getServerId()) + .cpuUsage(avgCpu) + .memoryUsage(avgMemory) + .memoryUsed(successRecords.get(0).getMemoryUsed()) + .diskUsage(successRecords.get(successRecords.size() - 1).getDiskUsage()) + .networkRx(successRecords.stream().mapToLong(m -> m.getNetworkRx() != null ? m.getNetworkRx() : 0).sum()) + .networkTx(successRecords.stream().mapToLong(m -> m.getNetworkTx() != null ? m.getNetworkTx() : 0).sum()) + .collectTime(group.get(0).getCollectTime()) + .status(StatusEnum.SUCCESS) + .build(); + } + + /** + * 构建空响应 + */ + private ServerMonitorMetricsDTO buildEmptyResponse(Server server, LocalDateTime startTime, + LocalDateTime endTime, String interval) { + return ServerMonitorMetricsDTO.builder() + .server(ServerMonitorMetricsDTO.ServerInfo.builder() + .serverId(server.getId()) + .serverName(server.getServerName()) + .hostIp(server.getHostIp()) + .status(server.getStatus().name()) + .build()) + .timeRange(ServerMonitorMetricsDTO.TimeRangeInfo.builder() + .startTime(startTime) + .endTime(endTime) + .interval(interval) + .dataPoints(0) + .build()) + .metrics(ServerMonitorMetricsDTO.MetricsData.builder() + .cpu(new ArrayList<>()) + .memory(new ArrayList<>()) + .network(new ArrayList<>()) + .build()) + .statistics(ServerMonitorMetricsDTO.StatisticsInfo.builder().build()) + .build(); + } + + /** + * 构建完整的监控指标响应 + */ + private ServerMonitorMetricsDTO buildMetricsResponse(Server server, List data, + LocalDateTime startTime, LocalDateTime endTime, + String interval, List requestedMetrics) { + Set metricsToInclude = requestedMetrics == null || requestedMetrics.isEmpty() + ? EnumSet.allOf(MonitorMetricEnum.class) + : EnumSet.copyOf(requestedMetrics); + + // 先构建所有metrics数据(避免重复计算) + List networkMetrics = + metricsToInclude.contains(MonitorMetricEnum.NETWORK) ? buildNetworkMetrics(data) : null; + + ServerMonitorMetricsDTO.MetricsData metricsData = ServerMonitorMetricsDTO.MetricsData.builder() + .cpu(metricsToInclude.contains(MonitorMetricEnum.CPU) ? buildCpuMetrics(data) : null) + .memory(metricsToInclude.contains(MonitorMetricEnum.MEMORY) ? buildMemoryMetrics(data) : null) + .network(networkMetrics) + .disk(metricsToInclude.contains(MonitorMetricEnum.DISK) ? buildDiskMetric(data) : null) + .build(); + + ServerMonitorMetricsDTO.StatisticsInfo statistics = ServerMonitorMetricsDTO.StatisticsInfo.builder() + .cpu(metricsToInclude.contains(MonitorMetricEnum.CPU) ? buildCpuStats(data) : null) + .memory(metricsToInclude.contains(MonitorMetricEnum.MEMORY) ? buildMemoryStats(data) : null) + .network(metricsToInclude.contains(MonitorMetricEnum.NETWORK) ? buildNetworkStats(data, networkMetrics) : null) + .build(); + + return ServerMonitorMetricsDTO.builder() + .server(ServerMonitorMetricsDTO.ServerInfo.builder() + .serverId(server.getId()) + .serverName(server.getServerName()) + .hostIp(server.getHostIp()) + .status(server.getStatus().name()) + .build()) + .timeRange(ServerMonitorMetricsDTO.TimeRangeInfo.builder() + .startTime(startTime) + .endTime(endTime) + .interval(interval) + .dataPoints(data.size()) + .build()) + .metrics(metricsData) + .statistics(statistics) + .build(); + } + + private List buildCpuMetrics(List data) { + return data.stream() + .map(m -> ServerMonitorMetricsDTO.CpuMetric.builder() + .time(m.getCollectTime()) + .value(m.getCpuUsage()) + .status(m.getStatus().name()) + .build()) + .collect(Collectors.toList()); + } + + private List buildMemoryMetrics(List data) { + return data.stream() + .map(m -> ServerMonitorMetricsDTO.MemoryMetric.builder() + .time(m.getCollectTime()) + .usagePercent(m.getMemoryUsage()) + .usedGB(m.getMemoryUsed()) + .status(m.getStatus().name()) + .build()) + .collect(Collectors.toList()); + } + + private List buildNetworkMetrics(List data) { + List metrics = new ArrayList<>(); + for (int i = 0; i < data.size(); i++) { + ServerMonitor current = data.get(i); + BigDecimal rxMBps = BigDecimal.ZERO; + BigDecimal txMBps = BigDecimal.ZERO; + + if (i > 0 && current.getNetworkRx() != null && current.getNetworkTx() != null) { + ServerMonitor previous = data.get(i - 1); + long timeDiff = Duration.between(previous.getCollectTime(), current.getCollectTime()).getSeconds(); + if (timeDiff > 0 && previous.getNetworkRx() != null && previous.getNetworkTx() != null) { + long rxDiff = current.getNetworkRx() - previous.getNetworkRx(); + long txDiff = current.getNetworkTx() - previous.getNetworkTx(); + rxMBps = BigDecimal.valueOf(rxDiff).divide(BigDecimal.valueOf(timeDiff * 1024 * 1024), 2, RoundingMode.HALF_UP); + txMBps = BigDecimal.valueOf(txDiff).divide(BigDecimal.valueOf(timeDiff * 1024 * 1024), 2, RoundingMode.HALF_UP); + } + } + + metrics.add(ServerMonitorMetricsDTO.NetworkMetric.builder() + .time(current.getCollectTime()) + .rxBytes(current.getNetworkRx()) + .txBytes(current.getNetworkTx()) + .rxMBps(rxMBps) + .txMBps(txMBps) + .build()); + } + return metrics; + } + + private ServerMonitorMetricsDTO.DiskMetric buildDiskMetric(List data) { + ServerMonitor latest = data.get(data.size() - 1); + if (latest.getDiskUsage() == null) { + return null; + } + + try { + List> diskData = JsonUtils.fromJson( + latest.getDiskUsage(), new TypeReference>>() {}); + + List partitions = diskData.stream() + .map(disk -> ServerMonitorMetricsDTO.DiskPartition.builder() + .mountPoint((String) disk.get("mountPoint")) + .fileSystem((String) disk.get("fileSystem")) + .totalSizeGB(((Number) disk.get("totalSize")).longValue()) + .usedSizeGB(((Number) disk.get("usedSize")).longValue()) + .usagePercent(BigDecimal.valueOf(((Number) disk.get("usagePercent")).doubleValue())) + .build()) + .collect(Collectors.toList()); + + BigDecimal maxUsage = partitions.stream() + .map(ServerMonitorMetricsDTO.DiskPartition::getUsagePercent) + .max(BigDecimal::compareTo) + .orElse(BigDecimal.ZERO); + + String maxPartition = partitions.stream() + .filter(p -> p.getUsagePercent().compareTo(maxUsage) == 0) + .map(ServerMonitorMetricsDTO.DiskPartition::getMountPoint) + .findFirst() + .orElse(null); + + return ServerMonitorMetricsDTO.DiskMetric.builder() + .latestTime(latest.getCollectTime()) + .partitions(partitions) + .maxUsagePercent(maxUsage) + .maxUsagePartition(maxPartition) + .build(); + } catch (Exception e) { + log.error("解析磁盘数据失败", e); + return null; + } + } + + private ServerMonitorMetricsDTO.CpuStats buildCpuStats(List data) { + List successData = data.stream() + .filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()) && m.getCpuUsage() != null) + .collect(Collectors.toList()); + + if (successData.isEmpty()) { + return null; + } + + BigDecimal avg = successData.stream() + .map(ServerMonitor::getCpuUsage) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(successData.size()), 2, RoundingMode.HALF_UP); + + ServerMonitor maxRecord = successData.stream() + .max(Comparator.comparing(ServerMonitor::getCpuUsage)) + .orElse(null); + + BigDecimal min = successData.stream() + .map(ServerMonitor::getCpuUsage) + .min(BigDecimal::compareTo) + .orElse(null); + + return ServerMonitorMetricsDTO.CpuStats.builder() + .avg(avg) + .max(maxRecord != null ? maxRecord.getCpuUsage() : null) + .min(min) + .maxTime(maxRecord != null ? maxRecord.getCollectTime() : null) + .build(); + } + + private ServerMonitorMetricsDTO.MemoryStats buildMemoryStats(List data) { + List successData = data.stream() + .filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()) && m.getMemoryUsage() != null) + .collect(Collectors.toList()); + + if (successData.isEmpty()) { + return null; + } + + BigDecimal avg = successData.stream() + .map(ServerMonitor::getMemoryUsage) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(successData.size()), 2, RoundingMode.HALF_UP); + + ServerMonitor maxRecord = successData.stream() + .max(Comparator.comparing(ServerMonitor::getMemoryUsage)) + .orElse(null); + + BigDecimal min = successData.stream() + .map(ServerMonitor::getMemoryUsage) + .min(BigDecimal::compareTo) + .orElse(null); + + return ServerMonitorMetricsDTO.MemoryStats.builder() + .avgPercent(avg) + .maxPercent(maxRecord != null ? maxRecord.getMemoryUsage() : null) + .minPercent(min) + .maxTime(maxRecord != null ? maxRecord.getCollectTime() : null) + .build(); + } + + private ServerMonitorMetricsDTO.NetworkStats buildNetworkStats(List data, + List networkMetrics) { + if (networkMetrics == null || networkMetrics.isEmpty()) { + return null; + } + + // 计算总流量(从原始数据) + List successData = data.stream() + .filter(m -> StatusEnum.SUCCESS.equals(m.getStatus()) + && m.getNetworkRx() != null && m.getNetworkTx() != null) + .collect(Collectors.toList()); + + if (successData.isEmpty()) { + return null; + } + + long totalRx = successData.stream().mapToLong(ServerMonitor::getNetworkRx).sum(); + long totalTx = successData.stream().mapToLong(ServerMonitor::getNetworkTx).sum(); + + // 计算速率统计(从已构建的 NetworkMetric 数据中提取) + BigDecimal avgRxMBps = null; + BigDecimal avgTxMBps = null; + BigDecimal peakRxMBps = null; + BigDecimal peakTxMBps = null; + + // 跳过第一个数据点(无前置数据,速率为0),从第二个开始统计 + List validMetrics = networkMetrics.size() > 1 + ? networkMetrics.subList(1, networkMetrics.size()) + : new ArrayList<>(); + + if (!validMetrics.isEmpty()) { + // 平均接收速率 + avgRxMBps = validMetrics.stream() + .map(ServerMonitorMetricsDTO.NetworkMetric::getRxMBps) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(validMetrics.size()), 2, RoundingMode.HALF_UP); + + // 平均发送速率 + avgTxMBps = validMetrics.stream() + .map(ServerMonitorMetricsDTO.NetworkMetric::getTxMBps) + .reduce(BigDecimal.ZERO, BigDecimal::add) + .divide(BigDecimal.valueOf(validMetrics.size()), 2, RoundingMode.HALF_UP); + + // 峰值接收速率 + peakRxMBps = validMetrics.stream() + .map(ServerMonitorMetricsDTO.NetworkMetric::getRxMBps) + .max(BigDecimal::compareTo) + .orElse(null); + + // 峰值发送速率 + peakTxMBps = validMetrics.stream() + .map(ServerMonitorMetricsDTO.NetworkMetric::getTxMBps) + .max(BigDecimal::compareTo) + .orElse(null); + } + + return ServerMonitorMetricsDTO.NetworkStats.builder() + .totalRxBytes(totalRx) + .totalTxBytes(totalTx) + .avgRxMBps(avgRxMBps) + .avgTxMBps(avgTxMBps) + .peakRxMBps(peakRxMBps) + .peakTxMBps(peakTxMBps) + .build(); + } } diff --git a/backend/src/main/java/com/qqchen/deploy/backend/framework/monitor/ServerMonitorService.java b/backend/src/main/java/com/qqchen/deploy/backend/framework/monitor/ServerMonitorService.java index 321dd946..fd56934c 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/framework/monitor/ServerMonitorService.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/framework/monitor/ServerMonitorService.java @@ -18,7 +18,7 @@ import java.math.BigDecimal; * @since 2025-12-07 */ @Slf4j -@Service +@Service("frameworkServerMonitorService") public class ServerMonitorService { /** diff --git a/backend/src/main/java/com/qqchen/deploy/backend/framework/service/impl/BaseServiceImpl.java b/backend/src/main/java/com/qqchen/deploy/backend/framework/service/impl/BaseServiceImpl.java index 04168048..319816e3 100644 --- a/backend/src/main/java/com/qqchen/deploy/backend/framework/service/impl/BaseServiceImpl.java +++ b/backend/src/main/java/com/qqchen/deploy/backend/framework/service/impl/BaseServiceImpl.java @@ -162,9 +162,12 @@ public abstract class BaseServiceImpl, D extends BaseDTO, Q } protected Sort createSort(BaseQuery query) { - return query != null && StringUtils.hasText(query.getSortField()) ? - Sort.by(Sort.Direction.fromString(query.getSortOrder()), query.getSortField()) : - Sort.by(Sort.Direction.DESC, "createTime"); + // 添加id作为第二排序条件,确保分页数据稳定性(避免相同排序值时数据重复或丢失) + if (query != null && StringUtils.hasText(query.getSortField())) { + Sort.Direction direction = Sort.Direction.fromString(query.getSortOrder()); + return Sort.by(direction, query.getSortField()).and(Sort.by(direction, "id")); + } + return Sort.by(Sort.Direction.DESC, "createTime").and(Sort.by(Sort.Direction.DESC, "id")); } private void buildQueryPredicate(BaseQuery query, BooleanBuilder builder) { @@ -327,19 +330,15 @@ public abstract class BaseServiceImpl, D extends BaseDTO, Q protected PageRequest createPageRequest(BaseQuery query) { if (query == null) { - return PageRequest.of(0, 10, Sort.by(Sort.Direction.DESC, "createTime")); + return PageRequest.of(0, 10, createSort(null)); } // 处理分页参数(前端 pageNum 从 0 开始) int pageNum = query.getPageNum() != null ? Math.max(0, query.getPageNum()) : 0; int pageSize = query.getPageSize() != null ? Math.max(1, Math.min(query.getPageSize(), 100)) : 10; - // 处理排序 - Sort sort = StringUtils.hasText(query.getSortField()) ? - Sort.by(Sort.Direction.fromString(query.getSortOrder()), query.getSortField()) : - Sort.by(Sort.Direction.DESC, "createTime"); - - return PageRequest.of(pageNum, pageSize, sort); + // 使用统一的排序逻辑(包含id作为第二排序条件) + return PageRequest.of(pageNum, pageSize, createSort(query)); } @Transactional( diff --git a/backend/src/main/resources/application.yml b/backend/src/main/resources/application.yml index 35600e8d..ec2b76da 100644 --- a/backend/src/main/resources/application.yml +++ b/backend/src/main/resources/application.yml @@ -100,10 +100,12 @@ logging: org.springframework.web: DEBUG org.springframework.context.i18n: DEBUG org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping: TRACE - org.hibernate.SQL: INFO + org.hibernate.SQL: DEBUG org.hibernate.type.descriptor.sql: TRACE org.hibernate.type.descriptor.sql.BasicBinder: TRACE - org.hibernate.orm.jdbc.bind: INFO + org.hibernate.orm.jdbc.bind: TRACE + org.hibernate.type: TRACE + com.querydsl.jpa: DEBUG com.qqchen.deploy.backend.framework.utils.EntityPathResolver: DEBUG com.qqchen.deploy.backend: DEBUG # 屏蔽 SSHJ 底层日志(SecureRandom、Transport 等无业务价值的日志) diff --git a/backend/src/main/resources/db/changelog/init/v1.0.0-data.sql b/backend/src/main/resources/db/changelog/init/v1.0.0-data.sql index 5c363061..0f6037bd 100644 --- a/backend/src/main/resources/db/changelog/init/v1.0.0-data.sql +++ b/backend/src/main/resources/db/changelog/init/v1.0.0-data.sql @@ -681,6 +681,8 @@ INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_ INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (25, 'etl-executor', 'etl-executor', 'etl执行器', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0'); INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (26, 'etl-scheduler', 'etl-scheduler', 'etl调度器', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0'); INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (27, 'themetis-control-panel-server', 'themetis-control-panel-server', '控制台', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0'); +INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (28, 'themetis-permission-server', 'themetis-permission-server', '权限系统', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0'); +INSERT INTO `deploy-ease-platform`.`deploy_application` (`id`, `app_code`, `app_name`, `app_desc`, `language`, `application_category_id`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (29, 'themetis-gateway', 'themetis-gateway', '网关', 0, 2, b'1', 0, 'admin', NOW(), 'admin', NOW(), 1, b'0'); INSERT INTO `deploy-ease-platform`.`deploy_environment` (`id`, `tenant_code`, `env_code`, `env_name`, `env_desc`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (1, NULL, 'LONGI-DEV', '隆基DEV', NULL, b'1', 1, 'admin', NOW(), 'admin', NOW(), 2, b'0'); INSERT INTO `deploy-ease-platform`.`deploy_environment` (`id`, `tenant_code`, `env_code`, `env_name`, `env_desc`, `enabled`, `sort`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`) VALUES (2, NULL, 'LONGI-UAT', '隆基UAT', NULL, b'1', 1, 'admin', NOW(), 'admin', NOW(), 3, b'0'); @@ -773,6 +775,8 @@ INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (86, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 24, 5, 'NATIVE', 6, 7, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2); INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (87, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 27, 5, 'NATIVE', 6, 37, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2); +INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (88, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 28, 5, 'NATIVE', 6, 41, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2); +INSERT INTO `deploy-ease-platform`.`deploy_team_application` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `application_id`, `environment_id`, `build_type`, `source_git_system_id`, `source_git_project_id`, `source_branch`, `target_git_system_id`, `target_git_project_id`, `target_branch`, `deploy_system_id`, `deploy_job`, `workflow_definition_id`) VALUES (89, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 29, 5, 'NATIVE', 6, 43, 'release/1.0-localization', NULL, NULL, NULL, NULL, '', 2); INSERT INTO `deploy-ease-platform`.`deploy_team_environment_config` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `environment_id`, `approval_required`, `approver_user_ids`, `require_code_review`, `remark`) VALUES (8, 'admin', NOW(), 'admin', NOW(), 1, b'0', 5, 5, b'0', NULL, b'0', ''); INSERT INTO `deploy-ease-platform`.`deploy_team_environment_config` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `team_id`, `environment_id`, `approval_required`, `approver_user_ids`, `require_code_review`, `remark`) VALUES (9, 'admin', NOW(), 'admin', NOW(), 1, b'0', 4, 1, b'0', NULL, b'0', ''); @@ -871,7 +875,8 @@ INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_ti INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (14, 'admin', NOW(), 'admin', NOW(), 27, b'0', '隆基Git仓库组同步', '定期同步Git仓库组信息,每天凌晨2点执行', 2, 'repositoryGroupServiceImpl', 'syncGroups', NULL, '{\"externalSystemId\": 4}', '0 0 3 * * ?', 'DISABLED', b'0', NOW(), NOW(), 0, 0, 0, 3600, 2, ''); INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (15, 'admin', NOW(), 'admin', NOW(), 1212, b'0', '隆基Git项目同步', '定期同步Git项目信息,每天凌晨3点执行', 2, 'repositoryProjectServiceImpl', 'syncProjects', NULL, '{\"externalSystemId\": 4}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NOW(), 7, 7, 0, 3600, 2, ''); INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (16, 'admin', NOW(), 'admin', NOW(), 5727, b'0', '隆基Git分支同步', '定期同步Git仓库分支信息,每5分钟执行一次', 2, 'repositoryBranchServiceImpl', 'syncBranches', NULL, '{\"externalSystemId\": 4}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NOW(), 7, 7, 0, 3600, 2, ''); -INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (17, 'admin', NOW(), 'admin', NOW(), 49, b'0', '服务器预警', '', 4, 'serverMonitorScheduler', 'collectServerMetrics', NULL, '{\"notificationChannelId\": 5, \"resourceAlertTemplateId\": 11}', '0 */5 * * * ?', 'DISABLED', b'0', NOW(), NULL, 39, 38, 1, 300, 0, ''); +INSERT INTO `deploy-ease-platform`.`schedule_job` (`id`, `create_by`, `create_time`, `update_by`, `update_time`, `version`, `deleted`, `job_name`, `job_description`, `category_id`, `bean_name`, `method_name`, `form_definition_id`, `method_params`, `cron_expression`, `status`, `concurrent`, `last_execute_time`, `next_execute_time`, `execute_count`, `success_count`, `fail_count`, `timeout_seconds`, `retry_count`, `alert_email`) VALUES (17, 'admin', NOW(), 'dengqichen', NOW(), 56, b'0', '服务器预警', '', 4, 'serverMonitorScheduler', 'collectServerMetrics', NULL, '{\"notificationChannelId\": 5, \"resourceAlertTemplateId\": 11}', '0 */10 * * * ?', 'DISABLED', b'0', NOW(), NULL, 44, 43, 1, 300, 0, ''); + -- 全局告警规则 diff --git a/backend/src/main/resources/db/changelog/sql/20251209141300-01.sql b/backend/src/main/resources/db/changelog/sql/20251209141300-01.sql index fbefef9d..6080e296 100644 --- a/backend/src/main/resources/db/changelog/sql/20251209141300-01.sql +++ b/backend/src/main/resources/db/changelog/sql/20251209141300-01.sql @@ -12,18 +12,19 @@ INSERT INTO system_release ( ) VALUES ( 'system', NOW(), 'system', NOW(), 1, 0, - 1.18, 'ALL', NOW(), + 1.19, 'ALL', NOW(), '【后端】 -- 常规性性能优化 +- 分页查询排序稳定性修复:添加 id 作为第二排序条件,解决当主排序字段值相同时分页数据重复或丢失的问题 +- 服务器监控数据采集优化:移除SSH连接成功时插入的空监控记录,避免数据冗余 +- 服务器监控数据查询优化:添加 status=''SUCCESS'' 过滤条件,只查询有效监控数据,排除连接失败记录 +- 服务器监控数据展示优化:优化数据过滤逻辑,跳过第一个无前置数据的点,保留速率为0的正常监控数据 【前端】 -- 告警规则表单优化:数字输入框使用本地状态管理,解决清空后回退到默认值问题 -- 规则范围选择器优化:升级为带搜索的 Popover 组件,支持按服务器名称/IP 搜索,数据加载改为列表接口 -- SSH 终端连接状态修复:调整初始状态为 connecting,并放宽延迟连接条件,确保打开终端时按"连接中 → 已连接/断开"正常生命周期显示 -- 工作流 HTTP 请求节点优化:移除超时时间 300000ms 的前端最大值限制,支持设置更长的超时时间(如半小时 1800000ms) -- 应用删除错误提示优化:删除 DeleteDialog 中自定义"删除失败 / 未知错误"提示,统一使用全局请求拦截器展示后端返回的业务错误消息 -- 告警规则阈值范围优化:根据告警类型动态设置阈值上限,CPU/内存/磁盘为 0-100%,网络流量为 0-10000 MB/s,解决网络流量阈值无法设置超过 100 的问题 -- 系统版本发布表单优化:延迟执行和预计时长输入框使用本地状态管理,修复清空后自动回填默认值的问题 -', + +- 服务器监控功能:实现服务器历史监控数据查询和可视化展示,支持CPU、内存、网络、磁盘四类指标,提供5个预设时间范围(1h/6h/24h/7d/30d),集成ECharts图表和统计信息,支持自动刷新 +- 监控弹窗优化:移除重复的关闭按钮,使用DialogContent默认提供的标准关闭按钮 +- 服务器列表分页修复:统一分页参数命名为pageNum和pageSize,参考Application分页实现,修复点击第一页不加载数据的问题 +- 服务器列表表格增强:添加ID列显示,使用等宽字体和灰色样式 +- 服务器卡片视觉优化:在卡片左上角添加圆形通知样式的ID徽章,悬浮显示不占内容空间', 0, NULL, NULL, 0 );