Java学习第35天 - 分布式系统深入与大数据处理

学习目标

深入理解分布式系统原理,掌握消息队列技术,学习搜索引擎使用,了解大数据处理框架,完成完整项目实战,提升分布式系统开发能力。


1. 分布式系统深入

1.1 分布式锁

Redis分布式锁:

java 复制代码
// Redis分布式锁工具类
@Component
@Slf4j
public class RedisDistributedLock {
    
    @Autowired
    private StringRedisTemplate redisTemplate;
    
    private static final String LOCK_PREFIX = "lock:";
    private static final String UNLOCK_SCRIPT = 
        "if redis.call('get', KEYS[1]) == ARGV[1] then " +
        "  return redis.call('del', KEYS[1]) " +
        "else return 0 end";
    
    // 获取锁
    public boolean tryLock(String key, String value, long expireTime, TimeUnit timeUnit) {
        Boolean result = redisTemplate.opsForValue()
            .setIfAbsent(LOCK_PREFIX + key, value, expireTime, timeUnit);
        return Boolean.TRUE.equals(result);
    }
    
    // 释放锁
    public boolean releaseLock(String key, String value) {
        DefaultRedisScript<Long> script = new DefaultRedisScript<>();
        script.setScriptText(UNLOCK_SCRIPT);
        script.setResultType(Long.class);
        
        Long result = redisTemplate.execute(script, 
            Collections.singletonList(LOCK_PREFIX + key), value);
        return result != null && result > 0;
    }
    
    // 可重入锁
    public boolean tryReentrantLock(String key, String value, long expireTime, TimeUnit timeUnit) {
        String lockKey = LOCK_PREFIX + key;
        String lockValue = value + ":" + Thread.currentThread().getId();
        
        // 检查是否已持有锁
        String existingValue = redisTemplate.opsForValue().get(lockKey);
        if (lockValue.equals(existingValue)) {
            // 重入,增加计数
            redisTemplate.opsForValue().increment(lockKey + ":count");
            return true;
        }
        
        // 尝试获取锁
        Boolean result = redisTemplate.opsForValue()
            .setIfAbsent(lockKey, lockValue, expireTime, timeUnit);
        
        if (Boolean.TRUE.equals(result)) {
            redisTemplate.opsForValue().set(lockKey + ":count", "1", expireTime, timeUnit);
            return true;
        }
        
        return false;
    }
    
    // 自动续期锁
    public boolean tryLockWithRenewal(String key, String value, long expireTime, TimeUnit timeUnit) {
        if (tryLock(key, value, expireTime, timeUnit)) {
            // 启动续期任务
            scheduleRenewal(key, value, expireTime, timeUnit);
            return true;
        }
        return false;
    }
    
    private void scheduleRenewal(String key, String value, long expireTime, TimeUnit timeUnit) {
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
        long renewalInterval = expireTime / 3; // 每1/3过期时间续期一次
        
        scheduler.scheduleAtFixedRate(() -> {
            String lockKey = LOCK_PREFIX + key;
            String existingValue = redisTemplate.opsForValue().get(lockKey);
            if (value.equals(existingValue)) {
                redisTemplate.expire(lockKey, expireTime, timeUnit);
                log.debug("锁续期成功: {}", key);
            } else {
                scheduler.shutdown();
            }
        }, renewalInterval, renewalInterval, timeUnit);
    }
}

// 分布式锁注解
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface DistributedLock {
    String key();
    long expireTime() default 30;
    TimeUnit timeUnit() default TimeUnit.SECONDS;
    String errorMessage() default "获取锁失败,请稍后重试";
}

// 分布式锁切面
@Aspect
@Component
@Slf4j
public class DistributedLockAspect {
    
    @Autowired
    private RedisDistributedLock distributedLock;
    
    @Around("@annotation(distributedLock)")
    public Object around(ProceedingJoinPoint joinPoint, DistributedLock distributedLock) throws Throwable {
        String lockKey = distributedLock.key();
        String lockValue = UUID.randomUUID().toString();
        
        try {
            boolean acquired = this.distributedLock.tryLock(
                lockKey, 
                lockValue, 
                distributedLock.expireTime(), 
                distributedLock.timeUnit()
            );
            
            if (!acquired) {
                throw new RuntimeException(distributedLock.errorMessage());
            }
            
            return joinPoint.proceed();
        } finally {
            this.distributedLock.releaseLock(lockKey, lockValue);
        }
    }
}

// 使用分布式锁
@Service
@Slf4j
public class OrderService {
    
    @Autowired
    private RedisDistributedLock distributedLock;
    
    @DistributedLock(key = "order:#{#request.userId}", expireTime = 10)
    public OrderDTO createOrder(CreateOrderRequest request) {
        // 创建订单逻辑
        return new OrderDTO();
    }
    
    // 手动使用分布式锁
    public void processOrder(Long orderId) {
        String lockKey = "order:process:" + orderId;
        String lockValue = UUID.randomUUID().toString();
        
        try {
            if (distributedLock.tryLock(lockKey, lockValue, 30, TimeUnit.SECONDS)) {
                // 处理订单逻辑
                log.info("处理订单: {}", orderId);
            } else {
                throw new RuntimeException("订单正在处理中,请稍后重试");
            }
        } finally {
            distributedLock.releaseLock(lockKey, lockValue);
        }
    }
}

Zookeeper分布式锁:

java 复制代码
// Zookeeper分布式锁
@Component
@Slf4j
public class ZookeeperDistributedLock {
    
    private final CuratorFramework client;
    private final String lockPath = "/locks";
    
    public ZookeeperDistributedLock(CuratorFramework client) {
        this.client = client;
    }
    
    // 获取锁
    public InterProcessMutex acquireLock(String lockName) {
        String path = lockPath + "/" + lockName;
        InterProcessMutex lock = new InterProcessMutex(client, path);
        
        try {
            lock.acquire();
            log.info("获取锁成功: {}", lockName);
            return lock;
        } catch (Exception e) {
            log.error("获取锁失败: {}", lockName, e);
            throw new RuntimeException("获取锁失败", e);
        }
    }
    
    // 释放锁
    public void releaseLock(InterProcessMutex lock) {
        try {
            if (lock != null && lock.isAcquiredInThisProcess()) {
                lock.release();
                log.info("释放锁成功");
            }
        } catch (Exception e) {
            log.error("释放锁失败", e);
        }
    }
    
    // 可重入读写锁
    public InterProcessReadWriteLock getReadWriteLock(String lockName) {
        String path = lockPath + "/" + lockName;
        return new InterProcessReadWriteLock(client, path);
    }
}

1.2 分布式ID生成

雪花算法分布式ID:

java 复制代码
// 雪花算法ID生成器
@Component
@Slf4j
public class SnowflakeIdGenerator {
    
    // 起始时间戳(2024-01-01)
    private static final long START_TIMESTAMP = 1704067200000L;
    
    // 机器ID占用的位数
    private static final long WORKER_ID_BITS = 5L;
    
    // 数据中心ID占用的位数
    private static final long DATACENTER_ID_BITS = 5L;
    
    // 序列号占用的位数
    private static final long SEQUENCE_BITS = 12L;
    
    // 机器ID最大值
    private static final long MAX_WORKER_ID = ~(-1L << WORKER_ID_BITS);
    
    // 数据中心ID最大值
    private static final long MAX_DATACENTER_ID = ~(-1L << DATACENTER_ID_BITS);
    
    // 机器ID向左移12位
    private static final long WORKER_ID_SHIFT = SEQUENCE_BITS;
    
    // 数据中心ID向左移17位(12+5)
    private static final long DATACENTER_ID_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS;
    
    // 时间戳向左移22位(5+5+12)
    private static final long TIMESTAMP_LEFT_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS + DATACENTER_ID_BITS;
    
    // 生成序列的掩码,这里为4095(0b111111111111=0xfff=4095)
    private static final long SEQUENCE_MASK = ~(-1L << SEQUENCE_BITS);
    
    // 工作机器ID(0~31)
    private long workerId;
    
    // 数据中心ID(0~31)
    private long datacenterId;
    
    // 毫秒内序列(0~4095)
    private long sequence = 0L;
    
    // 上次生成ID的时间戳
    private long lastTimestamp = -1L;
    
    public SnowflakeIdGenerator(@Value("${snowflake.worker-id:1}") long workerId,
                               @Value("${snowflake.datacenter-id:1}") long datacenterId) {
        if (workerId > MAX_WORKER_ID || workerId < 0) {
            throw new IllegalArgumentException(
                String.format("worker Id can't be greater than %d or less than 0", MAX_WORKER_ID));
        }
        if (datacenterId > MAX_DATACENTER_ID || datacenterId < 0) {
            throw new IllegalArgumentException(
                String.format("datacenter Id can't be greater than %d or less than 0", MAX_DATACENTER_ID));
        }
        this.workerId = workerId;
        this.datacenterId = datacenterId;
    }
    
    // 获得下一个ID(该方法是线程安全的)
    public synchronized long nextId() {
        long timestamp = timeGen();
        
        // 如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过,这个时候应当抛出异常
        if (timestamp < lastTimestamp) {
            throw new RuntimeException(
                String.format("Clock moved backwards. Refusing to generate id for %d milliseconds",
                    lastTimestamp - timestamp));
        }
        
        // 如果是同一时间生成的,则进行毫秒内序列
        if (lastTimestamp == timestamp) {
            sequence = (sequence + 1) & SEQUENCE_MASK;
            // 毫秒内序列溢出
            if (sequence == 0) {
                // 阻塞到下一个毫秒,获得新的时间戳
                timestamp = tilNextMillis(lastTimestamp);
            }
        } else {
            // 时间戳改变,毫秒内序列重置
            sequence = 0L;
        }
        
        // 上次生成ID的时间戳
        lastTimestamp = timestamp;
        
        // 移位并通过或运算拼到一起组成64位的ID
        return ((timestamp - START_TIMESTAMP) << TIMESTAMP_LEFT_SHIFT)
            | (datacenterId << DATACENTER_ID_SHIFT)
            | (workerId << WORKER_ID_SHIFT)
            | sequence;
    }
    
    // 阻塞到下一个毫秒,直到获得新的时间戳
    private long tilNextMillis(long lastTimestamp) {
        long timestamp = timeGen();
        while (timestamp <= lastTimestamp) {
            timestamp = timeGen();
        }
        return timestamp;
    }
    
    // 返回以毫秒为单位的当前时间
    private long timeGen() {
        return System.currentTimeMillis();
    }
}

// Redis分布式ID生成器
@Component
@Slf4j
public class RedisIdGenerator {
    
    @Autowired
    private StringRedisTemplate redisTemplate;
    
    private static final String ID_KEY_PREFIX = "id:generator:";
    
    // 基于Redis的分布式ID生成
    public long generateId(String businessType) {
        String key = ID_KEY_PREFIX + businessType;
        Long id = redisTemplate.opsForValue().increment(key);
        
        // 设置过期时间,防止key无限增长
        redisTemplate.expire(key, 1, TimeUnit.DAYS);
        
        return id;
    }
    
    // 批量生成ID
    public List<Long> generateBatchIds(String businessType, int count) {
        String key = ID_KEY_PREFIX + businessType;
        List<Long> ids = new ArrayList<>();
        
        for (int i = 0; i < count; i++) {
            Long id = redisTemplate.opsForValue().increment(key);
            ids.add(id);
        }
        
        redisTemplate.expire(key, 1, TimeUnit.DAYS);
        return ids;
    }
    
    // 带时间戳的ID生成
    public String generateIdWithTimestamp(String businessType) {
        long timestamp = System.currentTimeMillis();
        long sequence = generateId(businessType);
        return String.format("%d%06d", timestamp, sequence);
    }
}

// 数据库序列ID生成器
@Service
@Slf4j
public class DatabaseIdGenerator {
    
    @Autowired
    private JdbcTemplate jdbcTemplate;
    
    // 创建序列
    public void createSequence(String sequenceName) {
        String sql = "CREATE SEQUENCE IF NOT EXISTS " + sequenceName + " START WITH 1 INCREMENT BY 1";
        jdbcTemplate.execute(sql);
    }
    
    // 获取下一个ID
    public long nextId(String sequenceName) {
        String sql = "SELECT NEXTVAL('" + sequenceName + "')";
        return jdbcTemplate.queryForObject(sql, Long.class);
    }
    
    // 批量获取ID
    public List<Long> nextIds(String sequenceName, int count) {
        List<Long> ids = new ArrayList<>();
        for (int i = 0; i < count; i++) {
            ids.add(nextId(sequenceName));
        }
        return ids;
    }
}

1.3 分布式事务

Seata分布式事务:

java 复制代码
// Seata配置
@Configuration
@Slf4j
public class SeataConfig {
    
    @Bean
    public DataSourceProxy dataSourceProxy(DataSource dataSource) {
        return new DataSourceProxy(dataSource);
    }
}

// 分布式事务服务
@Service
@Slf4j
public class DistributedTransactionService {
    
    @Autowired
    private OrderService orderService;
    
    @Autowired
    private InventoryService inventoryService;
    
    @Autowired
    private AccountService accountService;
    
    // AT模式(自动补偿)
    @GlobalTransactional(rollbackFor = Exception.class)
    public void createOrderWithTransaction(CreateOrderRequest request) {
        // 1. 创建订单
        OrderDTO order = orderService.createOrder(request);
        
        // 2. 扣减库存
        inventoryService.deductInventory(request.getProductId(), request.getQuantity());
        
        // 3. 扣减账户余额
        accountService.deductBalance(request.getUserId(), order.getAmount());
        
        // 如果任何一步失败,所有操作都会回滚
    }
    
    // TCC模式(手动补偿)
    @GlobalTransactional(rollbackFor = Exception.class)
    public void createOrderWithTCC(CreateOrderRequest request) {
        // Try阶段
        try {
            orderService.tryCreateOrder(request);
            inventoryService.tryDeductInventory(request.getProductId(), request.getQuantity());
            accountService.tryDeductBalance(request.getUserId(), request.getAmount());
        } catch (Exception e) {
            // Cancel阶段
            orderService.cancelCreateOrder(request);
            inventoryService.cancelDeductInventory(request.getProductId(), request.getQuantity());
            accountService.cancelDeductBalance(request.getUserId(), request.getAmount());
            throw e;
        }
        
        // Confirm阶段
        orderService.confirmCreateOrder(request);
        inventoryService.confirmDeductInventory(request.getProductId(), request.getQuantity());
        accountService.confirmDeductBalance(request.getUserId(), request.getAmount());
    }
}

// TCC服务接口
public interface TccOrderService {
    
    @TwoPhaseBusinessAction(
        name = "createOrder",
        commitMethod = "confirm",
        rollbackMethod = "cancel"
    )
    boolean tryCreateOrder(@BusinessActionContextParameter("orderId") Long orderId,
                          @BusinessActionContextParameter("userId") Long userId,
                          @BusinessActionContextParameter("amount") BigDecimal amount);
    
    boolean confirm(BusinessActionContext context);
    
    boolean cancel(BusinessActionContext context);
}

// Saga模式(长事务)
@Service
@Slf4j
public class SagaOrderService {
    
    @SagaOrchestrationStart
    public void createOrderSaga(CreateOrderRequest request) {
        SagaTransactionContext context = new SagaTransactionContext();
        context.put("orderId", request.getOrderId());
        context.put("userId", request.getUserId());
        context.put("amount", request.getAmount());
        
        // 定义Saga事务步骤
        SagaTransactionManager.start(context, () -> {
            // 步骤1:创建订单
            orderService.createOrder(request);
            
            // 步骤2:扣减库存
            inventoryService.deductInventory(request.getProductId(), request.getQuantity());
            
            // 步骤3:扣减余额
            accountService.deductBalance(request.getUserId(), request.getAmount());
        }, (context) -> {
            // 补偿逻辑
            Long orderId = (Long) context.get("orderId");
            orderService.cancelOrder(orderId);
            inventoryService.restoreInventory(request.getProductId(), request.getQuantity());
            accountService.restoreBalance(request.getUserId(), request.getAmount());
        });
    }
}

2. 消息队列深入

2.1 RabbitMQ

RabbitMQ配置与使用:

java 复制代码
// RabbitMQ配置
@Configuration
@Slf4j
public class RabbitMQConfig {
    
    // 订单队列
    public static final String ORDER_QUEUE = "order.queue";
    public static final String ORDER_EXCHANGE = "order.exchange";
    public static final String ORDER_ROUTING_KEY = "order.create";
    
    // 死信队列
    public static final String DLX_QUEUE = "order.dlx.queue";
    public static final String DLX_EXCHANGE = "order.dlx.exchange";
    
    // 订单队列配置
    @Bean
    public Queue orderQueue() {
        Map<String, Object> args = new HashMap<>();
        args.put("x-dead-letter-exchange", DLX_EXCHANGE);
        args.put("x-dead-letter-routing-key", "order.dlx");
        args.put("x-message-ttl", 60000); // 消息TTL 60秒
        return QueueBuilder.durable(ORDER_QUEUE).withArguments(args).build();
    }
    
    // 订单交换机
    @Bean
    public TopicExchange orderExchange() {
        return new TopicExchange(ORDER_EXCHANGE);
    }
    
    // 绑定
    @Bean
    public Binding orderBinding() {
        return BindingBuilder.bind(orderQueue())
            .to(orderExchange())
            .with(ORDER_ROUTING_KEY);
    }
    
    // 死信队列
    @Bean
    public Queue dlxQueue() {
        return QueueBuilder.durable(DLX_QUEUE).build();
    }
    
    @Bean
    public DirectExchange dlxExchange() {
        return new DirectExchange(DLX_EXCHANGE);
    }
    
    @Bean
    public Binding dlxBinding() {
        return BindingBuilder.bind(dlxQueue())
            .to(dlxExchange())
            .with("order.dlx");
    }
    
    // 消息转换器
    @Bean
    public MessageConverter messageConverter() {
        return new Jackson2JsonMessageConverter();
    }
    
    // RabbitTemplate配置
    @Bean
    public RabbitTemplate rabbitTemplate(ConnectionFactory connectionFactory) {
        RabbitTemplate template = new RabbitTemplate(connectionFactory);
        template.setMessageConverter(messageConverter());
        
        // 确认回调
        template.setConfirmCallback((correlationData, ack, cause) -> {
            if (ack) {
                log.info("消息发送成功: {}", correlationData);
            } else {
                log.error("消息发送失败: {}, 原因: {}", correlationData, cause);
            }
        });
        
        // 返回回调
        template.setReturnsCallback(returned -> {
            log.error("消息返回: {}", returned);
        });
        
        return template;
    }
}

// RabbitMQ生产者
@Service
@Slf4j
public class RabbitMQProducer {
    
    @Autowired
    private RabbitTemplate rabbitTemplate;
    
    // 发送消息
    public void sendOrderMessage(OrderMessage message) {
        rabbitTemplate.convertAndSend(
            RabbitMQConfig.ORDER_EXCHANGE,
            RabbitMQConfig.ORDER_ROUTING_KEY,
            message,
            messagePostProcessor -> {
                messagePostProcessor.getMessageProperties().setExpiration("60000");
                return messagePostProcessor;
            }
        );
        log.info("发送订单消息: {}", message);
    }
    
    // 延迟消息
    public void sendDelayedMessage(OrderMessage message, long delayMillis) {
        rabbitTemplate.convertAndSend(
            RabbitMQConfig.ORDER_EXCHANGE,
            RabbitMQConfig.ORDER_ROUTING_KEY,
            message,
            messagePostProcessor -> {
                messagePostProcessor.getMessageProperties().setDelay((int) delayMillis);
                return messagePostProcessor;
            }
        );
    }
    
    // 批量发送
    public void sendBatchMessages(List<OrderMessage> messages) {
        for (OrderMessage message : messages) {
            sendOrderMessage(message);
        }
    }
}

// RabbitMQ消费者
@Component
@Slf4j
public class RabbitMQConsumer {
    
    @RabbitListener(queues = RabbitMQConfig.ORDER_QUEUE)
    public void handleOrderMessage(OrderMessage message, Channel channel, 
                                  @Header(AmqpHeaders.DELIVERY_TAG) long deliveryTag) {
        try {
            log.info("收到订单消息: {}", message);
            
            // 处理订单逻辑
            processOrder(message);
            
            // 手动确认
            channel.basicAck(deliveryTag, false);
        } catch (Exception e) {
            log.error("处理订单消息失败", e);
            
            // 拒绝消息,重新入队
            try {
                channel.basicNack(deliveryTag, false, true);
            } catch (IOException ex) {
                log.error("拒绝消息失败", ex);
            }
        }
    }
    
    // 死信队列处理
    @RabbitListener(queues = RabbitMQConfig.DLX_QUEUE)
    public void handleDlxMessage(OrderMessage message) {
        log.error("收到死信消息: {}", message);
        // 处理死信消息,如记录日志、发送告警等
    }
    
    private void processOrder(OrderMessage message) {
        // 订单处理逻辑
    }
}

2.2 Kafka

Kafka配置与使用:

java 复制代码
// Kafka配置
@Configuration
@Slf4j
public class KafkaConfig {
    
    // 生产者配置
    @Bean
    public ProducerFactory<String, Object> producerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
        configProps.put(ProducerConfig.ACKS_CONFIG, "all");
        configProps.put(ProducerConfig.RETRIES_CONFIG, 3);
        configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        configProps.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        configProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
        return new DefaultKafkaProducerFactory<>(configProps);
    }
    
    @Bean
    public KafkaTemplate<String, Object> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }
    
    // 消费者配置
    @Bean
    public ConsumerFactory<String, Object> consumerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        configProps.put(ConsumerConfig.GROUP_ID_CONFIG, "order-group");
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
        configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        return new DefaultKafkaConsumerFactory<>(configProps);
    }
    
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory = 
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(3); // 并发消费者数量
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        return factory;
    }
}

// Kafka生产者
@Service
@Slf4j
public class KafkaProducer {
    
    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;
    
    private static final String ORDER_TOPIC = "order-topic";
    
    // 发送消息
    public void sendOrderMessage(OrderMessage message) {
        kafkaTemplate.send(ORDER_TOPIC, message.getOrderId().toString(), message)
            .addCallback(
                result -> log.info("消息发送成功: {}", message),
                failure -> log.error("消息发送失败: {}", message, failure)
            );
    }
    
    // 批量发送
    public void sendBatchMessages(List<OrderMessage> messages) {
        List<ProducerRecord<String, Object>> records = messages.stream()
            .map(msg -> new ProducerRecord<>(ORDER_TOPIC, msg.getOrderId().toString(), msg))
            .collect(Collectors.toList());
        
        kafkaTemplate.send(records);
    }
    
    // 发送到指定分区
    public void sendToPartition(OrderMessage message, int partition) {
        kafkaTemplate.send(ORDER_TOPIC, partition, message.getOrderId().toString(), message);
    }
}

// Kafka消费者
@Component
@Slf4j
public class KafkaConsumer {
    
    @KafkaListener(topics = "order-topic", groupId = "order-group")
    public void consumeOrderMessage(OrderMessage message,
                                   @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                                   @Header(KafkaHeaders.OFFSET) long offset,
                                   Acknowledgment acknowledgment) {
        try {
            log.info("收到订单消息: partition={}, offset={}, message={}", 
                partition, offset, message);
            
            // 处理订单逻辑
            processOrder(message);
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
        } catch (Exception e) {
            log.error("处理订单消息失败", e);
            // 不确认,消息会重新消费
        }
    }
    
    // 批量消费
    @KafkaListener(topics = "order-topic", groupId = "order-group", 
                   containerFactory = "kafkaListenerContainerFactory")
    public void consumeBatchMessages(List<OrderMessage> messages,
                                    Acknowledgment acknowledgment) {
        try {
            log.info("批量收到 {} 条消息", messages.size());
            
            for (OrderMessage message : messages) {
                processOrder(message);
            }
            
            acknowledgment.acknowledge();
        } catch (Exception e) {
            log.error("批量处理消息失败", e);
        }
    }
    
    private void processOrder(OrderMessage message) {
        // 订单处理逻辑
    }
}

2.3 RocketMQ

RocketMQ配置与使用:

java 复制代码
// RocketMQ配置
@Configuration
@Slf4j
public class RocketMQConfig {
    
    @Value("${rocketmq.name-server:localhost:9876}")
    private String nameServer;
    
    @Value("${rocketmq.producer.group:order-producer-group}")
    private String producerGroup;
    
    @Value("${rocketmq.consumer.group:order-consumer-group}")
    private String consumerGroup;
    
    // 生产者配置
    @Bean
    public DefaultMQProducer defaultMQProducer() throws MQClientException {
        DefaultMQProducer producer = new DefaultMQProducer(producerGroup);
        producer.setNamesrvAddr(nameServer);
        producer.setRetryTimesWhenSendFailed(3);
        producer.setSendMsgTimeout(3000);
        producer.start();
        return producer;
    }
    
    // 消费者配置
    @Bean
    public DefaultMQPushConsumer defaultMQPushConsumer() throws MQClientException {
        DefaultMQPushConsumer consumer = new DefaultMQPushConsumer(consumerGroup);
        consumer.setNamesrvAddr(nameServer);
        consumer.subscribe("order-topic", "*");
        consumer.setConsumeThreadMin(5);
        consumer.setConsumeThreadMax(20);
        consumer.setConsumeMessageBatchMaxSize(10);
        return consumer;
    }
}

// RocketMQ生产者
@Service
@Slf4j
public class RocketMQProducer {
    
    @Autowired
    private DefaultMQProducer producer;
    
    private static final String ORDER_TOPIC = "order-topic";
    
    // 发送同步消息
    public SendResult sendSyncMessage(OrderMessage message) throws Exception {
        Message msg = new Message(ORDER_TOPIC, message.getOrderId().toString(), 
            JSON.toJSONBytes(message));
        SendResult result = producer.send(msg);
        log.info("发送同步消息成功: {}", result);
        return result;
    }
    
    // 发送异步消息
    public void sendAsyncMessage(OrderMessage message) throws Exception {
        Message msg = new Message(ORDER_TOPIC, message.getOrderId().toString(), 
            JSON.toJSONBytes(message));
        
        producer.send(msg, new SendCallback() {
            @Override
            public void onSuccess(SendResult sendResult) {
                log.info("发送异步消息成功: {}", sendResult);
            }
            
            @Override
            public void onException(Throwable e) {
                log.error("发送异步消息失败", e);
            }
        });
    }
    
    // 发送单向消息
    public void sendOneWayMessage(OrderMessage message) throws Exception {
        Message msg = new Message(ORDER_TOPIC, message.getOrderId().toString(), 
            JSON.toJSONBytes(message));
        producer.sendOneway(msg);
        log.info("发送单向消息: {}", message);
    }
    
    // 发送延迟消息
    public void sendDelayedMessage(OrderMessage message, int delayLevel) throws Exception {
        Message msg = new Message(ORDER_TOPIC, message.getOrderId().toString(), 
            JSON.toJSONBytes(message));
        msg.setDelayTimeLevel(delayLevel); // 1-18级延迟
        producer.send(msg);
        log.info("发送延迟消息: {}", message);
    }
    
    // 发送事务消息
    public TransactionSendResult sendTransactionMessage(OrderMessage message) throws Exception {
        Message msg = new Message(ORDER_TOPIC, message.getOrderId().toString(), 
            JSON.toJSONBytes(message));
        TransactionSendResult result = producer.sendMessageInTransaction(msg, 
            new OrderTransactionListener(), message);
        log.info("发送事务消息: {}", result);
        return result;
    }
}

// 事务消息监听器
@Component
@Slf4j
public class OrderTransactionListener implements TransactionListener {
    
    @Override
    public LocalTransactionState executeLocalTransaction(Message msg, Object arg) {
        try {
            OrderMessage orderMessage = (OrderMessage) arg;
            // 执行本地事务
            processOrder(orderMessage);
            return LocalTransactionState.COMMIT_MESSAGE;
        } catch (Exception e) {
            log.error("执行本地事务失败", e);
            return LocalTransactionState.ROLLBACK_MESSAGE;
        }
    }
    
    @Override
    public LocalTransactionState checkLocalTransaction(MessageExt msg) {
        // 检查本地事务状态
        String orderId = msg.getKeys();
        // 查询订单状态
        // 如果订单已创建,返回COMMIT_MESSAGE
        // 如果订单创建失败,返回ROLLBACK_MESSAGE
        return LocalTransactionState.COMMIT_MESSAGE;
    }
    
    private void processOrder(OrderMessage message) {
        // 订单处理逻辑
    }
}

// RocketMQ消费者
@Component
@Slf4j
public class RocketMQConsumer {
    
    @Autowired
    private DefaultMQPushConsumer consumer;
    
    @PostConstruct
    public void init() throws MQClientException {
        consumer.registerMessageListener(new MessageListenerConcurrently() {
            @Override
            public ConsumeConcurrentlyStatus consumeMessage(
                List<MessageExt> messages,
                ConsumeConcurrentlyContext context) {
                
                for (MessageExt message : messages) {
                    try {
                        OrderMessage orderMessage = JSON.parseObject(
                            message.getBody(), OrderMessage.class);
                        log.info("收到订单消息: {}", orderMessage);
                        
                        // 处理订单逻辑
                        processOrder(orderMessage);
                        
                        return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
                    } catch (Exception e) {
                        log.error("处理订单消息失败", e);
                        return ConsumeConcurrentlyStatus.RECONSUME_LATER;
                    }
                }
                return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
            }
        });
        
        consumer.start();
    }
    
    private void processOrder(OrderMessage message) {
        // 订单处理逻辑
    }
}

3. 搜索引擎

3.1 Elasticsearch

Elasticsearch配置与使用:

java 复制代码
// Elasticsearch配置
@Configuration
@Slf4j
public class ElasticsearchConfig {
    
    @Value("${elasticsearch.host:localhost:9200}")
    private String host;
    
    @Bean
    public RestHighLevelClient restHighLevelClient() {
        String[] hosts = host.split(",");
        HttpHost[] httpHosts = new HttpHost[hosts.length];
        for (int i = 0; i < hosts.length; i++) {
            String[] hostPort = hosts[i].split(":");
            httpHosts[i] = new HttpHost(hostPort[0], 
                Integer.parseInt(hostPort[1]), "http");
        }
        
        RestClientBuilder builder = RestClient.builder(httpHosts);
        return new RestHighLevelClient(builder);
    }
}

// Elasticsearch实体
@Document(indexName = "orders", type = "_doc")
@Data
public class OrderDocument {
    
    @Id
    private Long id;
    
    @Field(type = FieldType.Keyword)
    private String orderId;
    
    @Field(type = FieldType.Long)
    private Long userId;
    
    @Field(type = FieldType.Text, analyzer = "ik_max_word")
    private String productName;
    
    @Field(type = FieldType.Double)
    private BigDecimal amount;
    
    @Field(type = FieldType.Date, pattern = "yyyy-MM-dd HH:mm:ss")
    private Date createTime;
    
    @Field(type = FieldType.Keyword)
    private String status;
}

// Elasticsearch服务
@Service
@Slf4j
public class ElasticsearchService {
    
    @Autowired
    private RestHighLevelClient client;
    
    private static final String INDEX_NAME = "orders";
    
    // 创建索引
    public void createIndex() throws IOException {
        CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME);
        
        // 索引设置
        request.settings(Settings.builder()
            .put("index.number_of_shards", 3)
            .put("index.number_of_replicas", 1)
        );
        
        // 映射定义
        XContentBuilder mapping = XContentFactory.jsonBuilder()
            .startObject()
                .startObject("properties")
                    .startObject("orderId")
                        .field("type", "keyword")
                    .endObject()
                    .startObject("userId")
                        .field("type", "long")
                    .endObject()
                    .startObject("productName")
                        .field("type", "text")
                        .field("analyzer", "ik_max_word")
                    .endObject()
                    .startObject("amount")
                        .field("type", "double")
                    .endObject()
                    .startObject("createTime")
                        .field("type", "date")
                        .field("format", "yyyy-MM-dd HH:mm:ss")
                    .endObject()
                    .startObject("status")
                        .field("type", "keyword")
                    .endObject()
                .endObject()
            .endObject();
        
        request.mapping(mapping);
        
        CreateIndexResponse response = client.indices().create(request, 
            RequestOptions.DEFAULT);
        log.info("创建索引成功: {}", response.isAcknowledged());
    }
    
    // 添加文档
    public void addDocument(OrderDocument document) throws IOException {
        IndexRequest request = new IndexRequest(INDEX_NAME)
            .id(document.getId().toString())
            .source(JSON.toJSONString(document), XContentType.JSON);
        
        IndexResponse response = client.index(request, RequestOptions.DEFAULT);
        log.info("添加文档成功: {}", response.getId());
    }
    
    // 批量添加文档
    public void bulkAddDocuments(List<OrderDocument> documents) throws IOException {
        BulkRequest bulkRequest = new BulkRequest();
        
        for (OrderDocument document : documents) {
            IndexRequest request = new IndexRequest(INDEX_NAME)
                .id(document.getId().toString())
                .source(JSON.toJSONString(document), XContentType.JSON);
            bulkRequest.add(request);
        }
        
        BulkResponse response = client.bulk(bulkRequest, RequestOptions.DEFAULT);
        log.info("批量添加文档: 成功={}, 失败={}", 
            response.getItems().length - response.getItems().length, 
            response.getItems().length);
    }
    
    // 搜索文档
    public List<OrderDocument> search(String keyword, int page, int size) throws IOException {
        SearchRequest request = new SearchRequest(INDEX_NAME);
        
        // 构建查询
        BoolQueryBuilder boolQuery = QueryBuilders.boolQuery();
        
        // 多字段搜索
        if (keyword != null && !keyword.isEmpty()) {
            boolQuery.should(QueryBuilders.matchQuery("productName", keyword))
                .should(QueryBuilders.matchQuery("orderId", keyword));
        }
        
        // 分页
        request.source(new SearchSourceBuilder()
            .query(boolQuery)
            .from((page - 1) * size)
            .size(size)
            .sort("createTime", SortOrder.DESC)
        );
        
        SearchResponse response = client.search(request, RequestOptions.DEFAULT);
        
        List<OrderDocument> documents = new ArrayList<>();
        for (SearchHit hit : response.getHits().getHits()) {
            OrderDocument document = JSON.parseObject(
                hit.getSourceAsString(), OrderDocument.class);
            documents.add(document);
        }
        
        return documents;
    }
    
    // 聚合查询
    public Map<String, Long> aggregateByStatus() throws IOException {
        SearchRequest request = new SearchRequest(INDEX_NAME);
        
        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        sourceBuilder.aggregation(
            AggregationBuilders.terms("status_agg").field("status")
        );
        request.source(sourceBuilder);
        
        SearchResponse response = client.search(request, RequestOptions.DEFAULT);
        
        Terms terms = response.getAggregations().get("status_agg");
        Map<String, Long> result = new HashMap<>();
        for (Terms.Bucket bucket : terms.getBuckets()) {
            result.put(bucket.getKeyAsString(), bucket.getDocCount());
        }
        
        return result;
    }
}

4. 大数据处理

4.1 Spark

Spark配置与使用:

java 复制代码
// Spark配置
@Configuration
@Slf4j
public class SparkConfig {
    
    @Bean
    public SparkSession sparkSession() {
        return SparkSession.builder()
            .appName("OrderProcessing")
            .master("local[*]")
            .config("spark.sql.warehouse.dir", "file:///tmp/spark-warehouse")
            .getOrCreate();
    }
}

// Spark数据处理服务
@Service
@Slf4j
public class SparkDataProcessingService {
    
    @Autowired
    private SparkSession sparkSession;
    
    // 读取数据
    public Dataset<Row> readData(String path) {
        return sparkSession.read()
            .option("header", "true")
            .option("inferSchema", "true")
            .csv(path);
    }
    
    // 数据处理
    public Dataset<Row> processOrders(Dataset<Row> orders) {
        return orders
            .filter(col("amount").gt(100))
            .groupBy("userId")
            .agg(
                sum("amount").alias("totalAmount"),
                count("*").alias("orderCount")
            )
            .orderBy(desc("totalAmount"));
    }
    
    // 写入数据
    public void writeData(Dataset<Row> data, String path) {
        data.write()
            .mode(SaveMode.Overwrite)
            .option("header", "true")
            .csv(path);
    }
    
    // 流式处理
    public void processStreamingData(String kafkaBootstrapServers, String topic) {
        Dataset<Row> stream = sparkSession
            .readStream()
            .format("kafka")
            .option("kafka.bootstrap.servers", kafkaBootstrapServers)
            .option("subscribe", topic)
            .load();
        
        Dataset<Row> processed = stream
            .selectExpr("CAST(value AS STRING)")
            .select(from_json(col("value"), getOrderSchema()).alias("order"))
            .select("order.*")
            .filter(col("amount").gt(100));
        
        processed.writeStream()
            .format("console")
            .outputMode("append")
            .start()
            .awaitTermination();
    }
    
    private StructType getOrderSchema() {
        return new StructType()
            .add("orderId", DataTypes.StringType)
            .add("userId", DataTypes.LongType)
            .add("amount", DataTypes.DoubleType)
            .add("createTime", DataTypes.TimestampType);
    }
}

Flink配置与使用:

java 复制代码
// Flink配置
@Configuration
@Slf4j
public class FlinkConfig {
    
    @Bean
    public StreamExecutionEnvironment streamExecutionEnvironment() {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        env.enableCheckpointing(60000); // 1分钟checkpoint
        return env;
    }
}

// Flink流处理服务
@Service
@Slf4j
public class FlinkStreamProcessingService {
    
    @Autowired
    private StreamExecutionEnvironment env;
    
    // 处理Kafka流
    public void processKafkaStream(String kafkaBootstrapServers, String topic) throws Exception {
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
            topic,
            new SimpleStringSchema(),
            getKafkaProperties(kafkaBootstrapServers)
        );
        
        DataStream<String> stream = env.addSource(consumer);
        
        DataStream<OrderEvent> orderStream = stream
            .map(json -> JSON.parseObject(json, OrderEvent.class))
            .filter(order -> order.getAmount() > 100);
        
        // 窗口聚合
        DataStream<OrderSummary> summary = orderStream
            .keyBy(OrderEvent::getUserId)
            .window(TumblingEventTimeWindows.of(Time.minutes(5)))
            .aggregate(new OrderAggregateFunction());
        
        summary.print();
        
        env.execute("Order Processing Job");
    }
    
    // 状态处理
    public void processWithState() throws Exception {
        DataStream<OrderEvent> stream = getOrderStream();
        
        stream.keyBy(OrderEvent::getUserId)
            .flatMap(new RichFlatMapFunction<OrderEvent, OrderSummary>() {
                private transient ValueState<OrderSummary> state;
                
                @Override
                public void open(Configuration parameters) {
                    ValueStateDescriptor<OrderSummary> descriptor = 
                        new ValueStateDescriptor<>("order-summary", OrderSummary.class);
                    state = getRuntimeContext().getState(descriptor);
                }
                
                @Override
                public void flatMap(OrderEvent event, Collector<OrderSummary> out) throws Exception {
                    OrderSummary summary = state.value();
                    if (summary == null) {
                        summary = new OrderSummary();
                        summary.setUserId(event.getUserId());
                    }
                    
                    summary.setTotalAmount(summary.getTotalAmount() + event.getAmount());
                    summary.setOrderCount(summary.getOrderCount() + 1);
                    
                    state.update(summary);
                    out.collect(summary);
                }
            })
            .print();
        
        env.execute("State Processing Job");
    }
    
    private Properties getKafkaProperties(String bootstrapServers) {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", bootstrapServers);
        props.setProperty("group.id", "flink-consumer-group");
        return props;
    }
    
    private DataStream<OrderEvent> getOrderStream() {
        // 获取订单流
        return null;
    }
}

5. 项目实战

5.1 完整项目架构

微服务项目结构:

java 复制代码
// 项目结构
/*
microservices-project/
├── common/                    # 公共模块
│   ├── common-core/          # 核心工具类
│   ├── common-config/        # 配置类
│   └── common-exception/     # 异常处理
├── gateway/                   # API网关
├── eureka-server/            # 服务注册中心
├── config-server/             # 配置中心
├── order-service/             # 订单服务
│   ├── controller/          # 控制器
│   ├── service/              # 服务层
│   ├── repository/           # 数据访问层
│   └── model/                # 实体类
├── user-service/              # 用户服务
├── product-service/           # 商品服务
└── inventory-service/         # 库存服务
*/

// 公共响应类
@Data
public class ApiResponse<T> {
    private int code;
    private String message;
    private T data;
    private long timestamp;
    
    public static <T> ApiResponse<T> success(T data) {
        ApiResponse<T> response = new ApiResponse<>();
        response.setCode(200);
        response.setMessage("成功");
        response.setData(data);
        response.setTimestamp(System.currentTimeMillis());
        return response;
    }
    
    public static <T> ApiResponse<T> error(int code, String message) {
        ApiResponse<T> response = new ApiResponse<>();
        response.setCode(code);
        response.setMessage(message);
        response.setTimestamp(System.currentTimeMillis());
        return response;
    }
}

// 全局异常处理
@RestControllerAdvice
@Slf4j
public class GlobalExceptionHandler {
    
    @ExceptionHandler(BusinessException.class)
    public ResponseEntity<ApiResponse<Object>> handleBusinessException(BusinessException e) {
        log.error("业务异常", e);
        return ResponseEntity.status(HttpStatus.BAD_REQUEST)
            .body(ApiResponse.error(e.getCode(), e.getMessage()));
    }
    
    @ExceptionHandler(Exception.class)
    public ResponseEntity<ApiResponse<Object>> handleException(Exception e) {
        log.error("系统异常", e);
        return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
            .body(ApiResponse.error(500, "系统异常,请稍后重试"));
    }
}

5.2 项目部署

Docker Compose部署:

yaml 复制代码
version: '3.8'

services:
  # 服务注册中心
  eureka-server:
    image: eureka-server:latest
    ports:
      - "8761:8761"
    networks:
      - microservices-network

  # API网关
  gateway:
    image: gateway:latest
    ports:
      - "8080:8080"
    depends_on:
      - eureka-server
    environment:
      - EUREKA_CLIENT_SERVICE_URL_DEFAULTZONE=http://eureka-server:8761/eureka/
    networks:
      - microservices-network

  # 订单服务
  order-service:
    image: order-service:latest
    ports:
      - "8081:8080"
    depends_on:
      - eureka-server
      - mysql
      - redis
    environment:
      - EUREKA_CLIENT_SERVICE_URL_DEFAULTZONE=http://eureka-server:8761/eureka/
      - SPRING_DATASOURCE_URL=jdbc:mysql://mysql:3306/order_db
      - SPRING_REDIS_HOST=redis
    networks:
      - microservices-network

  # MySQL
  mysql:
    image: mysql:8.0
    environment:
      - MYSQL_ROOT_PASSWORD=root123
      - MYSQL_DATABASE=order_db
    volumes:
      - mysql-data:/var/lib/mysql
    networks:
      - microservices-network

  # Redis
  redis:
    image: redis:7-alpine
    volumes:
      - redis-data:/data
    networks:
      - microservices-network

volumes:
  mysql-data:
  redis-data:

networks:
  microservices-network:
    driver: bridge
相关推荐
2301_780669862 小时前
Set集合、HashSet集合的底层原理
java
你曾经是少年2 小时前
Java 关键字
java
海南java第二人2 小时前
SpringBoot启动流程深度解析:从入口到容器就绪的完整机制
java·开发语言
问今域中2 小时前
Spring Boot 请求参数绑定注解
java·spring boot·后端
星火开发设计2 小时前
C++ queue 全面解析与实战指南
java·开发语言·数据结构·c++·学习·知识·队列
rgeshfgreh2 小时前
Java+GeoTools+PostGIS高效求解对跖点
java
鱼跃鹰飞2 小时前
DDD中的防腐层
java·设计模式·架构
计算机程序设计小李同学2 小时前
婚纱摄影集成管理系统小程序
java·vue.js·spring boot·后端·微信小程序·小程序
栈与堆3 小时前
LeetCode 19 - 删除链表的倒数第N个节点
java·开发语言·数据结构·python·算法·leetcode·链表