Setting Up Apache Cassandra for Web Application
Apache Cassandra is a distributed NoSQL database with linear write scalability and high availability without a single point of failure. It is designed around query patterns, not data normalization: first you define how data will be read, then you build the schema. This fundamentally changes the design approach.
When Cassandra is chosen
Time series with millions of events per second, activity feeds, logging systems, IoT telemetry — places where fast and high-volume writes are needed. Netflix, Discord, Apple use Cassandra for this. Discord stores trillions of messages in Cassandra. Not suitable for OLTP with complex transactions.
Installing Cassandra 4.1
echo "deb https://debian.cassandra.apache.org 41x main" > /etc/apt/sources.list.d/cassandra.sources.list
curl https://downloads.apache.org/cassandra/KEYS | apt-key add -
apt update && apt install -y cassandra
cassandra.yaml — key parameters
cluster_name: 'MyAppCluster'
# Network
listen_address: 10.0.0.1 # IP of current node
rpc_address: 10.0.0.1
seeds: "10.0.0.1,10.0.0.2,10.0.0.3"
# Directories
data_file_directories:
- /var/lib/cassandra/data
commitlog_directory: /var/lib/cassandra/commitlog # separate disk for speed
hints_directory: /var/lib/cassandra/hints
saved_caches_directory: /var/lib/cassandra/saved_caches
# Performance
concurrent_reads: 32
concurrent_writes: 32
concurrent_counter_writes: 16
memtable_heap_space: 2048 # MB
compaction_throughput_mb_per_sec: 64
# Replication and consistency
endpoint_snitch: GossipingPropertyFileSnitch
# JVM
num_tokens: 256
JVM settings
# /etc/cassandra/jvm11-server.options
-Xms8G
-Xmx8G
-XX:+UseG1GC
-XX:G1RSetUpdatingPauseTimePercent=5
-XX:MaxGCPauseMillis=300
-XX:InitiatingHeapOccupancyPercent=70
Data schema — Query-driven design
-- Keyspace with replication
CREATE KEYSPACE myapp
WITH replication = {
'class': 'NetworkTopologyStrategy',
'dc1': 3
} AND durable_writes = true;
USE myapp;
-- User event feed
-- Query: all user events for period, sorted by time
CREATE TABLE user_events (
user_id uuid,
occurred_at timestamp,
event_id uuid,
event_type text,
payload text, -- JSON
PRIMARY KEY ((user_id), occurred_at, event_id)
) WITH CLUSTERING ORDER BY (occurred_at DESC)
AND compaction = {'class': 'TimeWindowCompactionStrategy',
'compaction_window_unit': 'DAYS',
'compaction_window_size': 7}
AND default_time_to_live = 7776000; -- 90 days
-- User statistics
CREATE TABLE user_stats (
user_id uuid PRIMARY KEY,
total_orders counter,
total_spent counter,
last_active timestamp
);
-- Metrics time series
CREATE TABLE metrics (
service text,
bucket timestamp, -- rounding to hour/day
metric_name text,
ts timestamp,
value double,
PRIMARY KEY ((service, bucket, metric_name), ts)
) WITH CLUSTERING ORDER BY (ts DESC)
AND compaction = {'class': 'TimeWindowCompactionStrategy',
'compaction_window_unit': 'HOURS',
'compaction_window_size': 1};
Working with data from Node.js
import cassandra from 'cassandra-driver'
const client = new cassandra.Client({
contactPoints: ['10.0.0.1', '10.0.0.2', '10.0.0.3'],
localDataCenter: 'dc1',
keyspace: 'myapp',
credentials: { username: 'cassandra', password: process.env.CASSANDRA_PASSWORD! },
pooling: {
coreConnectionsPerHost: {
[cassandra.types.distance.local]: 3,
[cassandra.types.distance.remote]: 1
}
},
socketOptions: { readTimeout: 12000 }
})
await client.connect()
// Prepared queries — mandatory in production
const insertEvent = await client.prepare(`
INSERT INTO user_events (user_id, occurred_at, event_id, event_type, payload)
VALUES (?, ?, ?, ?, ?)
`)
const selectEvents = await client.prepare(`
SELECT * FROM user_events
WHERE user_id = ? AND occurred_at >= ? AND occurred_at <= ?
ORDER BY occurred_at DESC
LIMIT ?
`)
// Batch event writes
async function writeEvents(events: UserEvent[]) {
const batch = events.map(e => ({
query: insertEvent,
params: [
cassandra.types.Uuid.fromString(e.userId),
new Date(e.occurredAt),
cassandra.types.TimeUuid.now(),
e.eventType,
JSON.stringify(e.payload)
]
}))
await client.batch(batch, { prepare: true, logged: false })
}
// Reading with pagination
async function* fetchEvents(userId: string, from: Date, to: Date) {
const options = {
prepare: true,
fetchSize: 1000 // page
}
let pageState: Buffer | undefined
do {
const result = await client.execute(
selectEvents,
[cassandra.types.Uuid.fromString(userId), from, to, 1000],
{ ...options, pageState }
)
yield result.rows
pageState = result.pageState as Buffer | undefined
} while (pageState)
}
Monitoring and diagnostics
# Cluster status
nodetool status
# Node load
nodetool tpstats
nodetool cfstats myapp.user_events
# Slow queries (enable in cassandra.yaml)
# slow_query_log_timeout_in_ms: 500
# Compaction status
nodetool compactionstats
# Data cleanup
nodetool cleanup myapp
Replication and consistency
// Different consistency levels for different operations
const { types: { consistencies } } = cassandra
// Write — LOCAL_QUORUM for speed/reliability balance
await client.execute(insertEvent, params, {
consistency: consistencies.localQuorum
})
// Read analytics — ONE for maximum speed
await client.execute(selectEvents, params, {
consistency: consistencies.one
})
// Critical data — QUORUM
await client.execute(criticalQuery, params, {
consistency: consistencies.quorum
})
Timelines
Setting up a three-node Cassandra cluster with basic schema: 3–4 days. Designing schema for specific application queries + backend integration + load testing: 1–2 weeks. Migrating data from relational database to Cassandra with model transformation: 2–4 weeks.







