接口的逻辑非常简单:根据传入的城市、仓库和发货时间,查询快递的预计送达时间。
然而,由于会频繁调用这个接口,尤其是在大促期间,接口的性能要求极高。
数据量虽然不大,但为了确保接口的高性能和高可用性,决定采用 Redis + Caffeine 两级缓存策略,以应对可能出现的缓存雪崩、缓存穿透等问题。
1.MySQL表结构
CREATE TABLE `t_estimated_arrival_date` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键id',
`warehouse_id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULLDEFAULTNULL COMMENT '货仓id',
`warehouse` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULLDEFAULTNULL COMMENT '发货仓',
`city` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULLDEFAULTNULL COMMENT '签收城市',
`delivery_date` dateNULLDEFAULTNULL COMMENT '发货时间',
`estimated_arrival_date` dateNULLDEFAULTNULL COMMENT '预计到货日期',
PRIMARY KEY (`id`) USING BTREE,
UNIQUE INDEX `uk_warehouse_id_city_delivery_date`(`warehouse_id`, `city`, `delivery_date`) USING BTREE
) ENGINE = InnoDB COMMENT ='预计到货时间表' ROW_FORMAT =Dynamic;
2.依赖配置(pom.xml)
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-aopartifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-data-redisartifactId>
dependency>
<dependency>
<groupId>org.apache.commonsgroupId>
<artifactId>commons-pool2artifactId>
dependency>
<dependency>
<groupId>org.springframework.bootgroupId>
<artifactId>spring-boot-starter-cacheartifactId>
dependency>
<dependency>
<groupId>com.github.ben-manes.caffeinegroupId>
<artifactId>caffeineartifactId>
<version>2.9.2version>
dependency>
<dependency>
<groupId>mysqlgroupId>
<artifactId>mysql-connector-javaartifactId>
<version>8.0.28version>
dependency>
<dependency>
<groupId>com.baomidougroupId>
<artifactId>mybatis-plus-boot-starterartifactId>
<version>3.3.1version>
dependency>
3.配置类
RedisConfig
public classRedisConfig {
@Bean
public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory connectionFactory) {
RedisTemplate<String, Object> redisTemplate = newRedisTemplate<>();
redisTemplate.setConnectionFactory(connectionFactory);
Jackson2JsonRedisSerializer<Object> serializer = newJackson2JsonRedisSerializer<>(Object.class);
ObjectMappermapper=newObjectMapper();
mapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
mapper.activateDefaultTyping(LaissezFaireSubTypeValidator.instance, ObjectMapper.DefaultTyping.NON_FINAL, JsonTypeInfo.As.PROPERTY);
serializer.setObjectMapper(mapper);
redisTemplate.setKeySerializer(newStringRedisSerializer());
redisTemplate.setValueSerializer(serializer);
redisTemplate.setHashKeySerializer(newStringRedisSerializer());
redisTemplate.setHashValueSerializer(serializer);
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
}
CaffeineConfig
public classCaffeineConfig {
@Bean
public Cache<String, Object> caffeineCache() {
return Caffeine.newBuilder()
.initialCapacity(128)
.maximumSize(1024)
.expireAfterWrite(60, TimeUnit.SECONDS)
.build();
}
@Bean
public CacheManager cacheManager() {
CaffeineCacheManagercacheManager=newCaffeineCacheManager();
cacheManager.setCaffeine(Caffeine.newBuilder()
.initialCapacity(128)
.maximumSize(1024)
.expireAfterWrite(60, TimeUnit.SECONDS));
return cacheManager;
}
}
4.Service 实现
@Slf4j
@Service
public class DoubleCacheServiceImpl doubleCacheServiceImpl {
@Resource
private Cache caffeineCache;
@Resource
private RedisTemplate<String, Object> redisTemplate;
@Resource
private EstimatedArrivalDateMapper estimatedArrivalDateMapper;
@Override
public EstimatedArrivalDateEntity getEstimatedArrivalDateCommon(EstimatedArrivalDateEntity request) {
String key = request.getDeliveryDate() + ":" + request.getWarehouseId() + ":" + request.getCity();
log.info("Cache key: {}", key);
Object value = caffeineCache.getIfPresent(key);
if (Objects.nonNull(value)) {
log.info("get from caffeine");
return EstimatedArrivalDateEntity.builder().estimatedArrivalDate(value.toString()).build();
}
value = redisTemplate.opsForValue().get(key);
if (Objects.nonNull(value)) {
log.info("get from redis");
caffeineCache.put(key, value);
return EstimatedArrivalDateEntity.builder().estimatedArrivalDate(value.toString()).build();
}
log.info("get from mysql");
DateTime deliveryDate = DateUtil.parse(request.getDeliveryDate(), "yyyy-MM-dd");
EstimatedArrivalDateEntity entity = estimatedArrivalDateMapper.selectOne(newQueryWrapper<>()
.eq("delivery_date", deliveryDate)
.eq("warehouse_id", request.getWarehouseId())
.eq("city", request.getCity()));
redisTemplate.opsForValue().set(key, entity.getEstimatedArrivalDate(), 120, TimeUnit.SECONDS);
caffeineCache.put(key, entity.getEstimatedArrivalDate());
return EstimatedArrivalDateEntity.builder().estimatedArrivalDate(entity.getEstimatedArrivalDate()).build();
}
}
代码分析:
1.DoubleCache 注解
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface DoubleCache {
String cacheName();
String[] key();
longexpireTime() default 120;
CacheType type() default CacheType.FULL;
enumCacheType {
FULL, PUT, DELETE
}
}
2.DoubleCacheAspect
@Slf4j
@Component
@Aspect
public class DoubleCacheAspect {
@Resource
private Cache caffeineCache;
@Resource
private RedisTemplate<String, Object> redisTemplate;
@Pointcut("@annotation(com.itender.redis.annotation.DoubleCache)")
public void doubleCachePointcut() {}
@Around("doubleCachePointcut()")
public Object doAround(ProceedingJoinPoint point)throws Throwable {
MethodSignature signature = (MethodSignature) point.getSignature();
Method method = signature.getMethod();
String[] paramNames = signature.getParameterNames();
Object[] args = point.getArgs();
TreeMap<String, Object> treeMap = newTreeMap<>();
for (int i = 0; i < paramNames.length; i++) {
treeMap.put(paramNames[i], args[i]);
}
Double Cacheannotation = method.getAnnotation(DoubleCache.class);
String elResult = DoubleCacheUtil.arrayParse(Lists.newArrayList(annotation.key()), treeMap);
String realKey = annotation.cacheName() + ":" + elResult;
if (annotation.type() == DoubleCache.CacheType.PUT) {
Object object = point.proceed();
redisTemplate.opsForValue().set(realKey, object, annotation.expireTime(), TimeUnit.SECONDS);
caffeineCache.put(realKey, object);
return object;
} elseif (annotation.type() == DoubleCache.CacheType.DELETE) {
redisTemplate.delete(realKey);
caffeineCache.invalidate(realKey);
return point.proceed();
}
Object caffeineCacheObj = caffeineCache.getIfPresent(realKey);
if (Objects.nonNull(caffeineCacheObj)) {
log.info("get data from caffeine");
return caffeineCacheObj;
}
Object redisCache = redisTemplate.opsForValue().get(realKey);
if (Objects.nonNull(redisCache)) {
log.info("get data from redis");
caffeineCache.put(realKey, redisCache);
return redisCache;
}
log.info("get data from database");
Object object = point.proceed();
if (Objects.nonNull(object)) {
log.info("get data from database write to cache: {}", object);
redisTemplate.opsForValue().set(realKey, object, annotation.expireTime(), TimeUnit.SECONDS);
caffeineCache.put(realKey, object);
}
return object;
}
}
代码分析:
需要注意的是,本地缓存的容量和过期时间需要根据实际业务场景合理设置,以防止内存溢出等问题。
虽然 Redis 单独使用已经足够强大,但在某些场景下,结合 Caffeine 的本地缓存可以进一步提升性能。