Merge remote-tracking branch 'origin/master'

This commit is contained in:
Van0313
2025-06-03 22:16:31 +08:00
4 changed files with 63 additions and 38 deletions

View File

@@ -450,41 +450,42 @@ public class JDScheduleJob {
return client.execute(request);
}
@Scheduled(cron = "0 0 8-20 * * ?") // 每天从 8:00 到 20:00每小时执行一次
public void fetchPL() {
// 设置每天最多执行 3 次
String cacheKey = "fetchPL:executedHours";
Set<String> executedHours = getExecutedHoursFromRedis(); // 从 Redis 获取已执行的小时数
@Scheduled(cron = "0 0 8-20 * * ?") // 每天从 8:00 到 20:00每小时执行一次
public void fetchPL() {
logger.info("开始执行fetchPL任务");
// 设置每天最多执行 3 次
Set<String> executedHours = getExecutedHoursFromRedis(); // 从 Redis 获取已执行的小时数
LocalDateTime now = LocalDateTime.now();
String currentHour = String.valueOf(now.getHour());
LocalDateTime now = LocalDateTime.now();
String currentHour = String.valueOf(now.getHour());
// 如果今天已经执行了3次则跳过
if (executedHours.size() >= 3) {
return;
// 如果今天已经执行了3次则跳过
if (executedHours.size() >= 3) {
logger.info("今天已经执行了3次跳过本次任务");
return;
}
// 随机决定是否执行本次任务(例如 50% 概率)
if (new Random().nextBoolean()) {
logger.info("执行fetchPL任务");
// 执行任务逻辑
executeFetchPL();
// 记录该小时已执行
executedHours.add(currentHour);
saveExecutedHoursToRedis(executedHours); // 存入 Redis
}
}
// 随机决定是否执行本次任务(例如 50% 概率)
if (new Random().nextBoolean()) {
// 执行任务逻辑
executeFetchPL();
// 记录该小时已执行
executedHours.add(currentHour);
saveExecutedHoursToRedis(executedHours); // 存入 Redis
}
}
private void executeFetchPL() {
HashMap<String, String> productTypeMap = jdUtil.getProductTypeMap();
int allCommentCount = 0;
int usedCommentCount = 0;
int canUseComentCount = 0;
int addCommentCount = 0;
int usedCommentCount;
int canUseComentCount;
int addCommentCount;
for (Map.Entry<String, String> entry : productTypeMap.entrySet()) {
// 随机睡眠1-5分钟
int sleepTime = new Random().nextInt(300) + 60;
int sleepTime = new Random().nextInt(3000) + 60;
try {
Thread.sleep(sleepTime * 1000);
} catch (InterruptedException e) {
@@ -497,8 +498,8 @@ public void fetchPL() {
canUseComentCount = availableComments.size();
usedCommentCount = usedComments.size();
if (canUseComentCount > 5){
logger.info("商品{} 评论可用数量大于5{}", product_id, canUseComentCount);
if (canUseComentCount > 5) {
logger.info("商品{} 评论可用数量大于5{}", product_id, canUseComentCount);
return;
}

View File

@@ -28,7 +28,8 @@ spring:
execution:
pool:
core-size: 32
jpa:
show-sql: false
# token配置
token:
@@ -52,6 +53,10 @@ logging:
level:
cn.van333: debug
org.springframework: warn
org.hibernate: ERROR
org.springframework.web: WARN
org.apache.http: WARN
com.zaxxer.hikari: ERROR
rocketmq:
name-server: 192.168.8.88:9876 # RocketMQ Name Server 地址
producer:

View File

@@ -1,10 +0,0 @@
# ????????????
log4j.rootLogger=DEBUG, file
# ?????? Appender
log4j.appender.file=org.apache.log4j.RollingFileAppender
log4j.appender.file.File=logs/app.log
log4j.appender.file.MaxFileSize=10MB
log4j.appender.file.MaxBackupIndex=30
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} [%t] %-5p %c - %m%n

View File

@@ -0,0 +1,29 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- 文件输出,按日期滚动 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>logs/app.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 按天分割日志文件 -->
<fileNamePattern>logs/app.%d{yyyy-MM-dd}.log</fileNamePattern>
<!-- 保留最近7天的日志 -->
<maxHistory>15</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- 设置根日志级别为 DEBUG并同时输出到控制台和文件 -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="FILE"/>
</root>
</configuration>