Setting Up Redis for Web Application Caching
Redis is not just a cache. It's in-memory data structures: strings, hashes, lists, sets, sorted sets, streams, pub/sub. The right choice of structure often yields a cleaner solution than attempting the same in a relational database.
Installing Redis 7.x
# Ubuntu 22.04+
apt install -y lsb-release curl gpg
curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" > /etc/apt/sources.list.d/redis.list
apt update && apt install -y redis
/etc/redis/redis.conf — key parameters
# Network
bind 127.0.0.1 ::1
port 6379
requirepass your_strong_password_here
# Memory
maxmemory 2gb
maxmemory-policy allkeys-lru # evict least recently used keys
maxmemory-samples 10
# Persistence for cache — AOF only with minimal fsync
save "" # disable RDB snapshots
appendonly yes
appendfsync everysec # fsync every second
no-appendfsync-on-rewrite yes
# Performance
tcp-keepalive 300
hz 20
lazyfree-lazy-eviction yes # async eviction
lazyfree-lazy-expire yes
io-threads 4 # for Redis 6+ on multi-core servers
io-threads-do-reads yes
Caching patterns
Cache-aside — the most common:
// cache.service.ts
import { Redis } from 'ioredis'
export class CacheService {
constructor(private redis: Redis) {}
async getOrSet<T>(
key: string,
ttlSeconds: number,
factory: () => Promise<T>
): Promise<T> {
const cached = await this.redis.get(key)
if (cached !== null) {
return JSON.parse(cached) as T
}
const value = await factory()
await this.redis.setex(key, ttlSeconds, JSON.stringify(value))
return value
}
async invalidate(pattern: string): Promise<void> {
// Use SCAN, not KEYS — doesn't block Redis
let cursor = '0'
do {
const [nextCursor, keys] = await this.redis.scan(cursor, 'MATCH', pattern, 'COUNT', 100)
cursor = nextCursor
if (keys.length > 0) {
await this.redis.unlink(...keys) // async deletion
}
} while (cursor !== '0')
}
}
// Usage
const product = await cache.getOrSet(
`product:${id}`,
300, // 5 minutes
() => db.products.findOne({ id })
)
// After update
await cache.invalidate(`product:${id}`)
await cache.invalidate(`products:category:${product.categoryId}:*`)
Write-through — write to cache and DB synchronously:
async updateProduct(id: string, data: UpdateProductDto) {
const updated = await db.products.update(id, data)
await redis.setex(`product:${id}`, 600, JSON.stringify(updated))
return updated
}
User sessions
// session.service.ts
const SESSION_TTL = 86400 * 7 // 7 days
export class SessionService {
constructor(private redis: Redis) {}
async create(userId: string, metadata: SessionMeta): Promise<string> {
const sessionId = crypto.randomUUID()
const key = `session:${sessionId}`
await this.redis.hset(key, {
userId,
createdAt: Date.now(),
ip: metadata.ip,
userAgent: metadata.userAgent
})
await this.redis.expire(key, SESSION_TTL)
// Add to user's session list
await this.redis.zadd(`user:${userId}:sessions`, Date.now(), sessionId)
await this.redis.expire(`user:${userId}:sessions`, SESSION_TTL)
return sessionId
}
async get(sessionId: string): Promise<SessionData | null> {
const data = await this.redis.hgetall(`session:${sessionId}`)
if (!Object.keys(data).length) return null
await this.redis.expire(`session:${sessionId}`, SESSION_TTL) // sliding window
return data as SessionData
}
async destroyAll(userId: string): Promise<void> {
const sessionIds = await this.redis.zrange(`user:${userId}:sessions`, 0, -1)
if (sessionIds.length) {
const keys = sessionIds.map(id => `session:${id}`)
await this.redis.unlink(...keys, `user:${userId}:sessions`)
}
}
}
Rate limiting with sliding window
async function rateLimit(
redis: Redis,
key: string,
limit: number,
windowMs: number
): Promise<{ allowed: boolean; remaining: number; resetAt: number }> {
const now = Date.now()
const windowStart = now - windowMs
const pipeline = redis.pipeline()
pipeline.zremrangebyscore(key, '-inf', windowStart)
pipeline.zadd(key, now, `${now}-${Math.random()}`)
pipeline.zcard(key)
pipeline.pexpire(key, windowMs)
const results = await pipeline.exec()
const count = results![2][1] as number
return {
allowed: count <= limit,
remaining: Math.max(0, limit - count),
resetAt: now + windowMs
}
}
Pub/Sub for events
// publisher.ts
async function publishOrderEvent(event: OrderEvent) {
await redis.publish('orders:events', JSON.stringify(event))
}
// subscriber.ts
const subscriber = redis.duplicate()
await subscriber.subscribe('orders:events')
subscriber.on('message', (channel, message) => {
const event = JSON.parse(message) as OrderEvent
eventBus.emit(event.type, event.payload)
})
Monitoring
# Current load
redis-cli --latency-history -i 1
redis-cli info stats | grep -E "instantaneous_ops|rejected_connections|evicted_keys"
# Memory usage
redis-cli info memory | grep -E "used_memory_human|mem_fragmentation_ratio"
# Slow commands
redis-cli config set slowlog-log-slower-than 10000 # 10ms
redis-cli slowlog get 10
Sentinel for high availability
# sentinel.conf
sentinel monitor mymaster 10.0.0.1 6379 2
sentinel auth-pass mymaster your_password
sentinel down-after-milliseconds mymaster 5000
sentinel failover-timeout mymaster 10000
sentinel parallel-syncs mymaster 1
Timelines
Basic Redis setup with caching and session configuration for application: 1 day. Sentinel setup with automatic failover: 1–2 days. Redis Cluster for horizontal scaling with sharding: 2–3 days.







