Merge remote-tracking branch 'origin/develop' into develop

This commit is contained in:
shahaibo
2024-01-26 13:18:37 +08:00
24 changed files with 1167 additions and 82 deletions

View File

@@ -0,0 +1,73 @@
package com.ai.da.common.utils;
import com.ai.da.common.config.exception.BusinessException;
import com.ai.da.model.dto.GenerateToPythonDTO;
import com.ai.da.python.PythonService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.*;
@Slf4j
@Component
public class AsyncCallerUtil {
public static Map<String, Boolean> waitingStatus = new HashMap<>();
private static PythonService pythonService;
@Autowired
public void setPythonService(PythonService pythonService) {
AsyncCallerUtil.pythonService = pythonService;
}
public CompletableFuture<List<String>> callGenerateAsync(GenerateToPythonDTO generateToPython) {
return CompletableFuture.supplyAsync(() -> pythonService.generateSketchOrPrint(generateToPython));
}
public List<String> generate(GenerateToPythonDTO generateToPython) {
ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(5);
String taskId = generateToPython.getTasks_id();
ScheduledFuture<?> timeoutTask = null;
if (!waitingStatus.containsKey(taskId)) waitingStatus.put(taskId, true);
try {
CompletableFuture<List<String>> generateResult = callGenerateAsync(generateToPython);
// 5秒后第一次确认之后每隔10秒确认一次用户选择结果
timeoutTask = scheduledExecutorService.scheduleAtFixedRate(() -> {
// 调用另一个接口获取用户的选择
if (!waitingStatus.get(taskId)) {
// 如果用户选择取消则取消对generate的调用
generateResult.cancel(true);
waitingStatus.remove(taskId);
} else log.info("===============持续等待===============");
}, 5, 10, TimeUnit.SECONDS);
log.info("阻塞等待结果...");
// 阻塞,等待结果
List<String> result = generateResult.get();
// 取消定时任务
timeoutTask.cancel(true);
waitingStatus.remove(taskId);
return result;
} catch (CancellationException e) {
// generateResult.cancel(true);通过抛出异常取消该任务
log.info("==========成功取消generate任务==========");
return null;
} catch (InterruptedException | ExecutionException | BusinessException e) {
// 处理异常
log.error("发生错误 " + e);
// 取消定时任务
assert timeoutTask != null;
timeoutTask.cancel(true);
throw new BusinessException(e.getMessage());
} finally {
// 关闭线程池
// executorService.shutdown();
// scheduledExecutorService.shutdown();
}
}
}

View File

@@ -0,0 +1,126 @@
package com.ai.da.common.utils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.ZSetOperations;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import javax.annotation.Resource;
import java.util.Map;
import java.util.Set;
@Slf4j
@Component
public class RedisUtil {
@Resource
private RedisTemplate<String, String> redisTemplate;
//- - - - - - - - - - - - - - - - - - - - - ZSet类型 - - - - - - - - - - - - - - - - - - - -
/**
* 向ZSet中添加元素
*/
public void addToZSet(String key, String value, Double score) {
redisTemplate.opsForZSet().add(key, value, score);
}
/**
* 从ZSet中删除元素
*/
public void removeFromZSet(String key, String value) {
redisTemplate.opsForZSet().remove(key, value);
}
/**
* 获取指定元素的当前排列顺序
*/
public Long getRank(String key, String value) {
return redisTemplate.opsForZSet().rank(key, value);
}
/**
* 获取当前ZSet中的最大score
*/
public Double getMaxScore(String key) {
Set<ZSetOperations.TypedTuple<String>> set = redisTemplate.opsForZSet().reverseRangeWithScores(key, 0, 0);
if (!CollectionUtils.isEmpty(set)) {
Double score = set.iterator().next().getScore();
return score + 1.0;
} else {
return 1.0;
}
}
/**
* 判断元素是否存在
*/
public Boolean isElementExistsInZSet(String key, String value) {
return redisTemplate.opsForZSet().score(key, value) != null;
}
/**
* 获取当前ZSet中数据量的总和
*/
public Long getZSetTotal(String key) {
return redisTemplate.opsForZSet().zCard(key);
}
//- - - - - - - - - - - - - - - - - - - - - set类型 - - - - - - - - - - - - - - - - - - - -
/**
* 将数据放入set缓存
*/
public void addToSet(String key, String value) {
redisTemplate.opsForSet().add(key, value);
}
/**
* 弹出变量中的元素
*/
public void removeFromSet(String key, String value) {
redisTemplate.opsForSet().remove(key, value);
}
/**
* 检查给定的元素是否在变量中。
*/
public Boolean isElementExistsInSet(String key, String obj) {
return redisTemplate.opsForSet().isMember(key, obj);
}
//- - - - - - - - - - - - - - - - - - - - - hash类型 - - - - - - - - - - - - - - - - - - - -
/**
* 加入缓存
*/
public void addToMap(String key, Map<String, String> map) {
redisTemplate.opsForHash().putAll(key, map);
}
/**
* 验证指定 key 下 有没有指定的 hashkey
*/
public Boolean isElementExistsInMap(String key, String hashKey) {
return redisTemplate.opsForHash().hasKey(key, hashKey);
}
/**
* 获取指定key的值string
*/
public String getMapValue(String key1, String key2) {
return String.valueOf(redisTemplate.opsForHash().get(key1, key2));
}
/**
* 删除指定 hash 的 HashKey
*
* @return 删除成功的 数量
*/
public Long removeFromMap(String key, String hashKeys) {
return redisTemplate.opsForHash().delete(key, hashKeys);
}
}

View File

@@ -0,0 +1,137 @@
package com.ai.da.common.utils;
public class SnowflakeUtil {
// ==============================Fields===========================================
/** 开始时间截 (2015-01-01) */
private final long twepoch = 1420041600000L;
/** 机器id所占的位数 */
private final long workerIdBits = 5L;
/** 数据标识id所占的位数 */
private final long datacenterIdBits = 5L;
/** 支持的最大机器id结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数) */
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
/** 支持的最大数据标识id结果是31 */
private final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits);
/** 序列在id中占的位数 */
private final long sequenceBits = 12L;
/** 机器ID向左移12位 */
private final long workerIdShift = sequenceBits;
/** 数据标识id向左移17位(12+5) */
private final long datacenterIdShift = sequenceBits + workerIdBits;
/** 时间截向左移22位(5+5+12) */
private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;
/** 生成序列的掩码这里为4095 (0b111111111111=0xfff=4095) */
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
/** 工作机器ID(0~31) */
private long workerId;
/** 数据中心ID(0~31) */
private long datacenterId;
/** 毫秒内序列(0~4095) */
private long sequence = 0L;
/** 上次生成ID的时间截 */
private long lastTimestamp = -1L;
//==============================Constructors=====================================
/**
* 构造函数
* @param workerId 工作ID (0~31)
* @param datacenterId 数据中心ID (0~31)
*/
public SnowflakeUtil(long workerId, long datacenterId) {
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalArgumentException(String.format
("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
if (datacenterId > maxDatacenterId || datacenterId < 0) {
throw new IllegalArgumentException(String.format
("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
}
this.workerId = workerId;
this.datacenterId = datacenterId;
}
// ==============================Methods==========================================
/**
* 获得下一个ID (该方法是线程安全的)
* @return SnowflakeId
*/
public synchronized long nextId() {
long timestamp = timeGen();
//如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常
if (timestamp < lastTimestamp) {
throw new RuntimeException(
String.format
("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
//如果是同一时间生成的,则进行毫秒内序列
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
//毫秒内序列溢出
if (sequence == 0) {
//阻塞到下一个毫秒,获得新的时间戳
timestamp = tilNextMillis(lastTimestamp);
}
}
//时间戳改变,毫秒内序列重置
else {
sequence = 0L;
}
//上次生成ID的时间截
lastTimestamp = timestamp;
//移位并通过或运算拼到一起组成64位的ID
return ((timestamp - twepoch) << timestampLeftShift) //
| (datacenterId << datacenterIdShift) //
| (workerId << workerIdShift) //
| sequence;
}
/**
* 阻塞到下一个毫秒,直到获得新的时间戳
* @param lastTimestamp 上次生成ID的时间截
* @return 当前时间戳
*/
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
/**
* 返回以毫秒为单位的当前时间
* @return 当前时间(毫秒)
*/
protected long timeGen() {
return System.currentTimeMillis();
}
//==============================Test=============================================
/** 测试 */
public static void main(String[] args) {
SnowflakeUtil idWorker = new SnowflakeUtil(0, 0);
long id = idWorker.nextId();
System.out.println("id:"+id);
//id:768842202204864512
}
}