雪花算法这里不再赘述,其缺点是有状态(多副本隔离时,依赖手动配置workId
和datacenterId
),代码如下:
java
/**
* 雪花算法ID生成器
*/
public class SnowflakeIdWorker {
/**
* 开始时间截 (2017-01-01)
*/
private static final long twepoch = 1483200000000L;
/**
* 机器id所占的位数
*/
private static final long workerIdBits = 5L;
/**
* 数据标识id所占的位数
*/
private static final long datacenterIdBits = 5L;
/**
* 支持的最大机器id,结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
*/
private static final long maxWorkerId = ~(-1L << workerIdBits);
/**
* 支持的最大数据标识id,结果是31
*/
private static final long maxDatacenterId = ~(-1L << datacenterIdBits);
/**
* 序列在id中占的位数
*/
private final long sequenceBits = 12L;
/**
* 机器ID向左移12位
*/
private final long workerIdShift = sequenceBits;
/**
* 数据标识id向左移17位(12+5)
*/
private final long datacenterIdShift = sequenceBits + workerIdBits;
/**
* 时间截向左移22位(5+5+12)
*/
private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;
/**
* 生成序列的掩码,这里为4095 (0b111111111111=0xfff=4095)
*/
private final long sequenceMask = ~(-1L << sequenceBits);
/**
* 工作机器ID(0~31)
*/
private long workerId;
/**
* 数据中心ID(0~31)
*/
private long datacenterId;
/**
* 毫秒内序列(0~4095)
*/
private long sequence = 0L;
/**
* 上次生成ID的时间截
*/
private long lastTimestamp = -1L;
/**
* 构造函数
*
* @param workerId 工作ID (0~31)
* @param datacenterId 数据中心ID (0~31)
*/
public SnowflakeIdWorker(long workerId, long datacenterId) {
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
if (datacenterId > maxDatacenterId || datacenterId < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
}
this.workerId = workerId;
this.datacenterId = datacenterId;
}
/**
* 获得下一个ID (该方法是线程安全的)
*
* @return SnowflakeId
*/
public synchronized String getId() {
long id = nextId();
return id+"";
}
/**
* 获得下一个ID (该方法是线程安全的)
*
* @return SnowflakeId
*/
public synchronized long nextId() {
long timestamp = timeGen();
//如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过这个时候应当抛出异常
if (timestamp < lastTimestamp) {
throw new RuntimeException(
String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
//如果是同一时间生成的,则进行毫秒内序列
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
//毫秒内序列溢出
if (sequence == 0) {
//阻塞到下一个毫秒,获得新的时间戳
timestamp = tilNextMillis(lastTimestamp);
}
}
//时间戳改变,毫秒内序列重置
else {
sequence = 0L;
}
//上次生成ID的时间截
lastTimestamp = timestamp;
//移位并通过或运算拼到一起组成64位的ID
return ((timestamp - twepoch) << timestampLeftShift)
| (datacenterId << datacenterIdShift)
| (workerId << workerIdShift)
| sequence;
}
/**
* 阻塞到下一个毫秒,直到获得新的时间戳
*
* @param lastTimestamp 上次生成ID的时间截
* @return 当前时间戳
*/
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
/**
* 返回以毫秒为单位的当前时间
*
* @return 当前时间(毫秒)
*/
protected long timeGen() {
return System.currentTimeMillis();
}
/**
* 测试
*/
public static void main(String[] args) throws ParseException {
SnowflakeIdWorker idWorker = new SnowflakeIdWorker(1, 1);
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(10, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<>());
for (int i = 0; i < 10000; i++) {
threadPoolExecutor.execute(() -> {
long start = System.currentTimeMillis();
long id = idWorker.nextId();
System.out.println("id:"+id);
});
}
threadPoolExecutor.shutdown();
}
}
我们可以利用redis分布式锁,在服务启动时获取,实现无状态:
1、利用redis分布式锁获取,解决多副本冲突。
java
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.InetAddress;
import java.net.UnknownHostException;
@Configuration
public class SnowBeanConfig {
private static final Logger logger = LoggerFactory.getLogger(SnowBeanConfig.class);
private final static String OK = "OK";
private final static int DATA_SIZE = 31;
@Value("${cus.snowflake.expire:40}")
private int snowflakeExpireTime;
@Autowired
private IRedis redis;
@Bean
public SnowflakeIdWorker getIdWorker() {
try {
/*
* 防止水平扩展的其他机器保活失败(如0-0,但是仍然在用),故这里等35s,让其在redis中过期
*/
logger.info("Get snow work by redis and wait for {}s...",snowflakeExpireTime);
Thread.sleep(snowflakeExpireTime * 1000);
getSnowWorkIdByRedis();
Integer dataCenterId = SnowWorkIdLocalCache.getDataCenterId();
Integer workerId = SnowWorkIdLocalCache.getWorkId();
logger.info("Host: {} SnowFlake success get dataCenterId: {}, workerId: {}", getHostName(), dataCenterId, workerId);
if (dataCenterId == null || workerId == null) {
throw new RuntimeException("get SnowflakeIdWorker error");
}
return new SnowflakeIdWorker(workerId, dataCenterId);
} catch (Exception e) {
throw new RuntimeException("get SnowflakeIdWorker error", e);
}
}
private void getSnowWorkIdByRedis() throws UnknownHostException {
for (int dataCenterId = 0; dataCenterId <= DATA_SIZE; dataCenterId++) {
for (int workId = 0; workId <= DATA_SIZE; workId++) {
String key = Const.Cache.SERVER_NAME + ":snow:id:" + dataCenterId + "_" + workId;
String value = getHostName();
String result = redis.setNx(key, value, snowflakeExpireTime);
logger.info("redis setNx key:[{}],value:[{}],seconds:[{}]", key, value, snowflakeExpireTime);
if (OK.equals(result)) {
SnowWorkIdLocalCache.setCache(workId, dataCenterId);
return;
}
}
}
throw new RuntimeException("get SnowflakeIdWorker error");
}
public static String getHostName() throws UnknownHostException {
InetAddress addr = InetAddress.getLocalHost();
String hostName = addr.getHostName();
if (hostName == null) {
hostName = StringUtils.EMPTY;
}
return hostName;
}
}
2、获取后缓存到本地:
java
import java.util.HashMap;
import java.util.Map;
public class SnowWorkIdLocalCache {
private SnowWorkIdLocalCache() {
}
private static final String DATA_CENTER_ID = "data_center_id";
private static final String WORK_ID = "work_id";
private static Map<String, Integer> cacheMap = new HashMap<>(2);
static void setCache(int workId, int dataCenterId) {
cacheMap.put(WORK_ID, workId);
cacheMap.put(DATA_CENTER_ID, dataCenterId);
}
public static Integer getWorkId() {
return cacheMap.get(WORK_ID);
}
public static Integer getDataCenterId() {
return cacheMap.get(DATA_CENTER_ID);
}
}
3、定期保活
java
import com.test.common.Const;
import com.test.config.jedis.core.IRedis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
@EnableScheduling
@Component
public class SnowWorkHealth {
private static final Logger logger = LoggerFactory.getLogger(SnowWorkHealth.class);
private static final int MAX_RETRY_COUNT = 2;
private int failCount = 0;
@Value("${cus.snowflake.expire:40}")
private int snowflakeExpireTime;
@Autowired
private IRedis redis;
@Scheduled(cron = "0/10 * * * * *")
public void keepAlive() {
Integer dataCenterId = SnowWorkIdLocalCache.getDataCenterId();
Integer workId = SnowWorkIdLocalCache.getWorkId();
if (dataCenterId == null || workId == null) {
logger.error(".....keep error and system exit!!!");
System.exit(0);
}
String key = Const.Cache.SERVER_NAME + ":snow:id:" + dataCenterId + "_" + workId;
if (redis.expire(key, snowflakeExpireTime)) {
String hostName = redis.get(key);
logger.info("keep alive of snow work host:{}, dataCenterId: {},workId: {}", hostName, dataCenterId, workId);
} else {
logger.error("keep snow work id active exception of redis");
failCount++;
}
if (failCount >= MAX_RETRY_COUNT) {
logger.error(".....keep error and system exit!!!");
System.exit(0);
}
}
}