挑战 | 影响维度 | 典型指标 |
---|---|---|
瞬时高并发 | 系统吞吐量 | QPS > 10万 |
超卖问题 | 数据一致性 | 库存误差率 < 0.001% |
恶意请求 | 系统安全性 | 拦截率 > 99.9% |
热点数据 | 缓存命中率 | 命中率 > 99% |
数据结构 | 秒杀应用场景 | 优势分析 |
---|---|---|
String | 商品库存计数 | 原子操作 |
Hash | 用户购买记录 | 字段级操作 |
List | 订单排队 | 有序存储 |
Set | 去重判断 | 快速存在性检查 |
Zset | 排行榜 | 自动排序 |
# 传统方式 vs Redis原子操作
def unsafe_decrement():
stock = redis.get('stock')
if stock > 0:
redis.set('stock', stock-1) # 非原子操作
def safe_decrement():
redis.decr('stock') # 原子操作
目标:防止超卖的基础实现
import redis
import threading
class SpikeSystem:
def __init__(self):
self.redis = redis.Redis(host='localhost', port=6379, db=0)
self.lock = threading.Lock()
def init_stock(self, item_id, quantity):
"""初始化库存"""
self.redis.set(f"item:{item_id}:stock", quantity)
def spike(self, user_id, item_id):
"""秒杀核心逻辑"""
with self.lock: # 分布式环境应使用Redis分布式锁
stock = int(self.redis.get(f"item:{item_id}:stock"))
if stock <= 0:
return False
self.redis.decr(f"item:{item_id}:stock")
self.redis.hset(f"item:{item_id}:users", user_id, 1)
return True
# 压力测试模拟
def test_concurrent_spike():
system = SpikeSystem()
system.init_stock("1001", 100)
def worker(user_id):
if system.spike(user_id, "1001"):
print(f"用户{user_id} 秒杀成功")
threads = []
for i in range(200):
t = threading.Thread(target=worker, args=(f"user_{i}",))
threads.append(t)
t.start()
for t in threads:
t.join()
test_concurrent_spike()
流程图:
目标:解决集群环境下的并发问题
class DistributedSpike:
def __init__(self):
self.redis = redis.Redis()
self.lua_script = """
local stock_key = KEYS[1]
local user_key = KEYS[2]
local user_id = ARGV[1]
local stock = tonumber(redis.call('GET', stock_key))
if stock <= 0 then
return 0
end
redis.call('DECR', stock_key)
redis.call('HSET', user_key, user_id, 1)
return 1
"""
def spike_with_lua(self, user_id, item_id):
"""使用Lua脚本保证原子性"""
script = self.redis.register_script(self.lua_script)
return script(
keys=[f"item:{item_id}:stock", f"item:{item_id}:users"],
args=[user_id]
)
# 使用示例
ds = DistributedSpike()
ds.redis.set("item:1001:stock", 100)
result = ds.spike_with_lua("user_001", "1001")
print("秒杀结果:", "成功" if result else "失败")
流程图:
目标:实现流量削峰和最终一致性
import json
from queue import Queue
from threading import Thread
class AsyncOrderSystem:
def __init__(self):
self.redis = redis.Redis()
self.order_queue = Queue()
self.worker = Thread(target=self.process_orders)
self.worker.daemon = True
self.worker.start()
def spike(self, user_id, item_id):
"""秒杀预处理"""
if self.redis.hget(f"item:{item_id}:users", user_id):
return False # 已参与
stock = int(self.redis.get(f"item:{item_id}:stock"))
if stock <= 0:
return False
# 预占库存
self.redis.decr(f"item:{item_id}:stock")
self.order_queue.put({
"user_id": user_id,
"item_id": item_id,
"timestamp": time.time()
})
return True
def process_orders(self):
"""异步处理订单"""
while True:
order = self.order_queue.get()
try:
# 实际创建订单
self.create_order_in_db(
order["user_id"],
order["item_id"]
)
print(f"订单已创建: {order}")
except Exception as e:
# 恢复库存
self.redis.incr(f"item:{order['item_id']}:stock")
print(f"订单失败: {e}")
def create_order_in_db(self, user_id, item_id):
"""模拟数据库操作"""
time.sleep(0.1) # 模拟IO延迟
return True
# 使用示例
aos = AsyncOrderSystem()
aos.redis.set("item:1002:stock", 50)
print(aos.spike("user_001", "1002"))
流程图:
将商品库存拆分为N段:
S e g m e n t S i z e = T o t a l S t o c k N SegmentSize = \frac{TotalStock}{N} SegmentSize=NTotalStock
Python实现:
def init_segment_stock(item_id, total, segments=10):
segment_size = total // segments
for i in range(segments):
redis.set(f"item:{item_id}:segment:{i}", segment_size)
redis.set(f"item:{item_id}:segment:remain", total % segments)
算法 | 实现复杂度 | 平滑度 | 代码示例 |
---|---|---|---|
计数器 | 低 | 差 | INCR + EXPIRE |
漏桶 | 中 | 好 | LIST + LTRIM |
令牌桶 | 高 | 优秀 | ZSET + ZREMRANGEBYSCORE |
# Prometheus监控示例
from prometheus_client import Gauge
redis_connections = Gauge(
'redis_connections',
'Active Redis connections'
)
spike_success = Gauge(
'spike_success_count',
'Successful spike orders'
)
def monitor_redis():
while True:
info = redis.info()
redis_connections.set(info['connected_clients'])
time.sleep(5)
def check_user_behavior(user_id):
key = f"user:{user_id}:spike"
count = redis.incr(key)
redis.expire(key, 60)
return count <= 5 # 每分钟最多5次
from cryptography.fernet import Fernet
cipher = Fernet(key)
encrypted = cipher.encrypt(b"sensitive_data")
def preheat_stock(item_id):
redis.set(f"item:{item_id}:stock", 1000, ex=3600)
通过这三个案例的实践,可以掌握Redis秒杀系统的核心实现要点:
实际生产环境中还需结合:
Redis作为秒杀系统的核心组件,合理设计可支撑百万级并发请求,是构建高性能电商系统的关键技术。