Works across multiple servers automatically — Redis is the single source of truth:
TypeScript
// Server AawaitrateLimit('api:user:123', 100, 60); // count: 1// Server B (same user, same second)awaitrateLimit('api:user:123', 100, 60); // count: 2// Both servers see the same count!
Rate Limiting
Protect your APIs from abuse with Redis-based rate limiting. Fast, distributed, and reliable.
Why Redis for Rate Limiting?#
Simple Fixed Window#
The simplest approach — count requests per time window:
import Redis from 'ioredis'; const redis = new Redis(process.env.ARCTICKEY_URL); async function rateLimit( key: string, limit: number, windowSeconds: number ): Promise<{ allowed: boolean; remaining: number }> { const current = await redis.incr(key); if (current === 1) { await redis.expire(key, windowSeconds); } return { allowed: current <= limit, remaining: Math.max(0, limit - current), }; } // Usage: 100 requests per minute per IP const result = await rateLimit(`rate:api:${ip}`, 100, 60); if (!result.allowed) { return res.status(429).json({ error: 'Too many requests', retryAfter: 60 }); }Sliding Window (More Accurate)#
Smoother rate limiting without sudden resets:
async function slidingWindowRateLimit( key: string, limit: number, windowMs: number ): Promise<{ allowed: boolean; remaining: number; resetAt: number }> { const now = Date.now(); const windowStart = now - windowMs; // Use sorted set with timestamp as score const multi = redis.multi(); multi.zremrangebyscore(key, 0, windowStart); // Remove old entries multi.zadd(key, now, `${now}-${Math.random()}`); // Add current request multi.zcard(key); // Count requests in window multi.pexpire(key, windowMs); // Set expiry const results = await multi.exec(); const count = results?.[2]?.[1] as number; return { allowed: count <= limit, remaining: Math.max(0, limit - count), resetAt: now + windowMs, }; }Token Bucket (Burst-Friendly)#
Allow occasional bursts while maintaining average rate:
async function tokenBucket( key: string, bucketSize: number, // Max tokens (burst capacity) refillRate: number, // Tokens per second tokensRequired: number = 1 ): Promise<{ allowed: boolean; tokens: number }> { const now = Date.now(); const bucketKey = `bucket:${key}`; const lastRefillKey = `bucket:${key}:refill`; // Lua script for atomic token bucket const script = ` local tokens = tonumber(redis.call('GET', KEYS[1]) or ARGV[1]) local lastRefill = tonumber(redis.call('GET', KEYS[2]) or ARGV[4]) local now = tonumber(ARGV[4]) local bucketSize = tonumber(ARGV[1]) local refillRate = tonumber(ARGV[2]) local required = tonumber(ARGV[3]) -- Refill tokens based on time passed local elapsed = (now - lastRefill) / 1000 tokens = math.min(bucketSize, tokens + (elapsed * refillRate)) local allowed = 0 if tokens >= required then tokens = tokens - required allowed = 1 end redis.call('SET', KEYS[1], tokens, 'EX', 3600) redis.call('SET', KEYS[2], now, 'EX', 3600) return {allowed, math.floor(tokens)} `; const [allowed, tokens] = await redis.eval( script, 2, bucketKey, lastRefillKey, bucketSize, refillRate, tokensRequired, now ) as [number, number]; return { allowed: allowed === 1, tokens }; } // Allow 10 requests burst, refill 2 per second const result = await tokenBucket('api:user:123', 10, 2);Express Middleware#
import { Request, Response, NextFunction } from 'express'; function createRateLimiter(options: { windowMs: number; max: number; keyGenerator?: (req: Request) => string; }) { const { windowMs, max, keyGenerator } = options; return async (req: Request, res: Response, next: NextFunction) => { const key = keyGenerator?.(req) ?? req.ip; const result = await slidingWindowRateLimit( `ratelimit:${key}`, max, windowMs ); res.setHeader('X-RateLimit-Limit', max); res.setHeader('X-RateLimit-Remaining', result.remaining); res.setHeader('X-RateLimit-Reset', result.resetAt); if (!result.allowed) { return res.status(429).json({ error: 'Too Many Requests', retryAfter: Math.ceil((result.resetAt - Date.now()) / 1000), }); } next(); }; } // Usage app.use('/api', createRateLimiter({ windowMs: 60 * 1000, // 1 minute max: 100, // 100 requests per minute keyGenerator: (req) => req.user?.id ?? req.ip, }));Tiered Rate Limits#
Different limits for different users:
const TIERS = { free: { requests: 100, window: 60 }, pro: { requests: 1000, window: 60 }, enterprise: { requests: 10000, window: 60 }, }; async function tieredRateLimit(userId: string, tier: keyof typeof TIERS) { const config = TIERS[tier]; return slidingWindowRateLimit( `rate:${tier}:${userId}`, config.requests, config.window * 1000 ); }Distributed Rate Limiting#
Works across multiple servers automatically — Redis is the single source of truth:
// Server A await rateLimit('api:user:123', 100, 60); // count: 1 // Server B (same user, same second) await rateLimit('api:user:123', 100, 60); // count: 2 // Both servers see the same count!Best Practices#