增加生成后端服务代码。

This commit is contained in:
dengqichen 2025-10-29 21:44:49 +08:00
parent e5f0f3bba4
commit 7c49715172
6 changed files with 134 additions and 2537 deletions

2
backend/.cursorignore Normal file
View File

@ -0,0 +1,2 @@
# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv)
target/*

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,11 @@ package com.qqchen.deploy.backend.schedule.aspect;
import com.qqchen.deploy.backend.schedule.annotation.MonitoredJob;
import com.qqchen.deploy.backend.schedule.dto.JobStatusDTO;
import com.qqchen.deploy.backend.schedule.entity.ScheduleJob;
import com.qqchen.deploy.backend.schedule.entity.ScheduleJobLog;
import com.qqchen.deploy.backend.schedule.enums.JobStatusEnum;
import com.qqchen.deploy.backend.schedule.repository.IScheduleJobLogRepository;
import com.qqchen.deploy.backend.schedule.repository.IScheduleJobRepository;
import com.qqchen.deploy.backend.schedule.service.JobProgressReporter;
import com.qqchen.deploy.backend.schedule.service.JobStatusRedisService;
import jakarta.annotation.Resource;
@ -13,6 +15,10 @@ import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.reflect.MethodSignature;
import org.quartz.CronTrigger;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.TriggerKey;
import org.springframework.stereotype.Component;
import java.io.PrintWriter;
@ -20,7 +26,9 @@ import java.io.StringWriter;
import java.net.InetAddress;
import java.time.Duration;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.Arrays;
import java.util.Date;
import java.util.stream.Collectors;
/**
@ -43,6 +51,12 @@ public class JobMonitorAspect {
@Resource
private IScheduleJobLogRepository jobLogRepository;
@Resource
private IScheduleJobRepository jobRepository;
@Resource
private Scheduler scheduler;
@Around("@annotation(monitoredJob)")
public Object monitor(ProceedingJoinPoint joinPoint, MonitoredJob monitoredJob) throws Throwable {
Long jobId = monitoredJob.jobId();
@ -75,16 +89,22 @@ public class JobMonitorAspect {
// 4. 保存成功日志到数据库
saveLog(jobId, jobName, beanName, methodName, startTime, JobStatusEnum.SUCCESS, "任务执行成功", null);
// 5. 更新任务统计信息成功
updateJobStatistics(jobId, true);
return result;
} catch (Throwable e) {
// 5. 记录失败状态到Redis
// 6. 记录失败状态到Redis
log.error("任务执行失败: jobId={}", jobId, e);
saveStatus(jobId, jobName, "FAIL", null, "任务执行失败: " + e.getMessage(), startTime);
// 6. 保存失败日志到数据库
// 7. 保存失败日志到数据库
saveLog(jobId, jobName, beanName, methodName, startTime, JobStatusEnum.FAIL, "任务执行失败", e);
// 8. 更新任务统计信息失败
updateJobStatistics(jobId, false);
throw e;
} finally {
@ -148,6 +168,63 @@ public class JobMonitorAspect {
}
}
/**
* 更新任务统计信息
*
* @param jobId 任务ID
* @param success 是否执行成功
*/
private void updateJobStatistics(Long jobId, boolean success) {
try {
ScheduleJob job = jobRepository.findById(jobId).orElse(null);
if (job == null) {
log.warn("更新任务统计失败任务不存在jobId={}", jobId);
return;
}
// 更新执行次数
Integer executeCount = job.getExecuteCount();
job.setExecuteCount(executeCount == null ? 1 : executeCount + 1);
// 更新成功/失败次数
if (success) {
Integer successCount = job.getSuccessCount();
job.setSuccessCount(successCount == null ? 1 : successCount + 1);
} else {
Integer failCount = job.getFailCount();
job.setFailCount(failCount == null ? 1 : failCount + 1);
}
// 更新上次执行时间
job.setLastExecuteTime(LocalDateTime.now());
// 更新下次执行时间从Quartz Trigger获取
try {
TriggerKey triggerKey = TriggerKey.triggerKey("trigger_" + jobId, "DEFAULT");
CronTrigger trigger = (CronTrigger) scheduler.getTrigger(triggerKey);
if (trigger != null) {
Date nextFireTime = trigger.getNextFireTime();
if (nextFireTime != null) {
job.setNextExecuteTime(LocalDateTime.ofInstant(
nextFireTime.toInstant(),
ZoneId.systemDefault()
));
}
}
} catch (SchedulerException e) {
log.warn("获取下次执行时间失败jobId={}", jobId, e);
}
// 保存更新
jobRepository.save(job);
log.info("任务统计信息已更新jobId={}, executeCount={}, successCount={}, failCount={}",
jobId, job.getExecuteCount(), job.getSuccessCount(), job.getFailCount());
} catch (Exception e) {
log.error("更新任务统计失败jobId={}", jobId, e);
}
}
/**
* 获取异常堆栈信息
*/

View File

@ -4,13 +4,61 @@ import com.qqchen.deploy.backend.framework.converter.BaseConverter;
import com.qqchen.deploy.backend.schedule.dto.ScheduleJobDTO;
import com.qqchen.deploy.backend.schedule.entity.ScheduleJob;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.MappingTarget;
import org.mapstruct.Mappings;
/**
* 定时任务Converter
*
* 说明
* 1. executeCount/successCount/failCount/lastExecuteTime 是后台自动管理的统计字段
* 2. 创建时必须ignore让Entity的默认值(=0)生效
* 3. 更新时必须ignore防止前端覆盖这些字段
*
* @author qichen
*/
@Mapper(config = BaseConverter.class)
public interface ScheduleJobConverter extends BaseConverter<ScheduleJob, ScheduleJobDTO> {
/**
* 覆盖父类的toEntity方法忽略后台管理的统计字段
* 让Entity的默认值(executeCount=0等)正常生效
*/
@Override
@Mappings({
@Mapping(target = "id", source = "id"),
@Mapping(target = "createTime", source = "createTime"),
@Mapping(target = "createBy", source = "createBy"),
@Mapping(target = "updateTime", source = "updateTime"),
@Mapping(target = "updateBy", source = "updateBy"),
@Mapping(target = "version", source = "version"),
@Mapping(target = "deleted", source = "deleted"),
@Mapping(target = "executeCount", ignore = true),
@Mapping(target = "successCount", ignore = true),
@Mapping(target = "failCount", ignore = true),
@Mapping(target = "lastExecuteTime", ignore = true)
})
ScheduleJob toEntity(ScheduleJobDTO dto);
/**
* 覆盖父类的updateEntity方法忽略所有不应该被前端修改的字段
* 注意覆盖父类方法时父类的@Mapping注解会失效必须重新声明
*/
@Override
@Mappings({
@Mapping(target = "id", ignore = true),
@Mapping(target = "createTime", ignore = true),
@Mapping(target = "createBy", ignore = true),
@Mapping(target = "updateTime", ignore = true),
@Mapping(target = "updateBy", ignore = true),
@Mapping(target = "version", ignore = true),
@Mapping(target = "deleted", ignore = true),
@Mapping(target = "executeCount", ignore = true),
@Mapping(target = "successCount", ignore = true),
@Mapping(target = "failCount", ignore = true),
@Mapping(target = "lastExecuteTime", ignore = true)
})
void updateEntity(@MappingTarget ScheduleJob entity, ScheduleJobDTO dto);
}

View File

@ -72,6 +72,9 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJob, Schedul
@Resource
private RedisUtil redisUtil;
@Resource
private com.qqchen.deploy.backend.schedule.service.JobStatusRedisService jobStatusRedisService;
@Override
@Transactional
public ScheduleJobDTO create(ScheduleJobDTO dto) {
@ -276,8 +279,8 @@ public class ScheduleJobServiceImpl extends BaseServiceImpl<ScheduleJob, Schedul
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
for (ScheduleJob job : allJobs) {
String statusKey = "schedule:job:status:" + job.getId();
JobStatusDTO statusDTO = (JobStatusDTO) redisUtil.get(statusKey);
// 使用 JobStatusRedisService 读取 Hash 类型的状态数据
JobStatusDTO statusDTO = jobStatusRedisService.getJobStatus(job.getId());
if (statusDTO != null && "RUNNING".equals(statusDTO.getStatus())) {
JobDashboardDTO.RunningJobDTO runningJob = JobDashboardDTO.RunningJobDTO.builder()