Load Balancing Setup - Complete Guide
Published: September 25, 2024 | Reading time: 22 minutes
Load Balancing Overview
Load balancing distributes traffic across multiple servers for better performance:
Load Balancing Benefits
# Load Balancing Benefits
- High availability
- Improved performance
- Scalability
- Fault tolerance
- Traffic distribution
- Session persistence
- Health monitoring
Load Balancing Algorithms
Distribution Methods
Basic Algorithms
- Round Robin
- Least Connections
- IP Hash
- Weighted Round Robin
Advanced Algorithms
- Least Response Time
- URL Hash
- Geographic
- Consistent Hash
Nginx Load Balancing
Nginx Configuration
Nginx Load Balancer Setup
# Install Nginx
sudo apt update
sudo apt install nginx
# Basic Load Balancer Configuration
sudo nano /etc/nginx/sites-available/load-balancer
# Nginx Load Balancer Configuration
upstream backend {
# Load balancing method
least_conn; # or round_robin, ip_hash
# Backend servers
server 192.168.1.100:3000 weight=3 max_fails=3 fail_timeout=30s;
server 192.168.1.101:3000 weight=2 max_fails=3 fail_timeout=30s;
server 192.168.1.102:3000 weight=1 max_fails=3 fail_timeout=30s;
# Health check
keepalive 32;
}
# HTTP Load Balancer
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Connection settings
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Buffer settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# HTTPS Load Balancer
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# Enable site
sudo ln -s /etc/nginx/sites-available/load-balancer /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
Advanced Nginx Configuration
Advanced Load Balancing
# Advanced Nginx Load Balancer
upstream web_backend {
least_conn;
server 192.168.1.100:3000 weight=3 max_fails=3 fail_timeout=30s;
server 192.168.1.101:3000 weight=2 max_fails=3 fail_timeout=30s;
server 192.168.1.102:3000 weight=1 max_fails=3 fail_timeout=30s backup;
}
upstream api_backend {
ip_hash; # Session persistence
server 192.168.1.110:8080;
server 192.168.1.111:8080;
server 192.168.1.112:8080;
}
# Rate limiting
limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=web:10m rate=30r/s;
server {
listen 80;
server_name example.com;
# Web application
location / {
limit_req zone=web burst=20 nodelay;
proxy_pass http://web_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Health check
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_next_upstream_tries 3;
proxy_next_upstream_timeout 10s;
}
# API endpoints
location /api/ {
limit_req zone=api burst=10 nodelay;
proxy_pass http://api_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# CORS headers
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "Content-Type, Authorization";
}
# Static files
location /static/ {
alias /var/www/static/;
expires 1y;
add_header Cache-Control "public, immutable";
}
# Health check
location /nginx-health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# Load balancer status page
server {
listen 8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
}
HAProxy Load Balancing
HAProxy Configuration
HAProxy Setup
# Install HAProxy
sudo apt update
sudo apt install haproxy
# HAProxy Configuration
sudo nano /etc/haproxy/haproxy.cfg
# HAProxy Configuration File
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
defaults
mode http
log global
option httplog
option dontlognull
option log-health-checks
option forwardfor
option httpchk GET /health
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
# Frontend (HTTP)
frontend http_frontend
bind *:80
mode http
default_backend web_servers
# Frontend (HTTPS)
frontend https_frontend
bind *:443 ssl crt /etc/ssl/certs/example.com.pem
mode http
default_backend web_servers
# Backend servers
backend web_servers
mode http
balance roundrobin
option httpchk GET /health
http-check expect status 200
server web1 192.168.1.100:3000 check inter 5s rise 2 fall 3
server web2 192.168.1.101:3000 check inter 5s rise 2 fall 3
server web3 192.168.1.102:3000 check inter 5s rise 2 fall 3 backup
# Statistics page
frontend stats
bind *:8404
stats enable
stats uri /stats
stats refresh 30s
stats admin if TRUE
# Start HAProxy
sudo systemctl start haproxy
sudo systemctl enable haproxy
sudo systemctl status haproxy
Cloud Load Balancers
AWS Application Load Balancer
AWS ALB Setup
# AWS CLI ALB Commands
# Create target group
aws elbv2 create-target-group \
--name web-targets \
--protocol HTTP \
--port 3000 \
--vpc-id vpc-12345678 \
--target-type instance \
--health-check-path /health \
--health-check-interval-seconds 30 \
--health-check-timeout-seconds 5 \
--healthy-threshold-count 2 \
--unhealthy-threshold-count 3
# Create Application Load Balancer
aws elbv2 create-load-balancer \
--name web-alb \
--subnets subnet-12345678 subnet-87654321 \
--security-groups sg-12345678 \
--scheme internet-facing \
--type application \
--ip-address-type ipv4
# Create listener
aws elbv2 create-listener \
--load-balancer-arn arn:aws:elasticloadbalancing:region:account:loadbalancer/app/web-alb/1234567890123456 \
--protocol HTTP \
--port 80 \
--default-actions Type=forward,TargetGroupArn=arn:aws:elasticloadbalancing:region:account:targetgroup/web-targets/1234567890123456
# Register targets
aws elbv2 register-targets \
--target-group-arn arn:aws:elasticloadbalancing:region:account:targetgroup/web-targets/1234567890123456 \
--targets Id=i-1234567890abcdef0,Port=3000 Id=i-0987654321fedcba0,Port=3000
# ALB Features
- Path-based routing
- Host-based routing
- SSL termination
- WebSocket support
- HTTP/2 support
- Sticky sessions
- Health checks
Google Cloud Load Balancer
GCP Load Balancer
# Google Cloud Load Balancer
# Create instance group
gcloud compute instance-groups managed create web-instance-group \
--base-instance-name web-instance \
--size 3 \
--template web-template \
--zone us-central1-a
# Create health check
gcloud compute health-checks create http web-health-check \
--port 3000 \
--request-path /health \
--check-interval 30s \
--timeout 5s \
--healthy-threshold 2 \
--unhealthy-threshold 3
# Create backend service
gcloud compute backend-services create web-backend-service \
--protocol HTTP \
--port-name http \
--health-checks web-health-check \
--global
# Add instance group to backend service
gcloud compute backend-services add-backend web-backend-service \
--instance-group web-instance-group \
--instance-group-zone us-central1-a \
--global
# Create URL map
gcloud compute url-maps create web-url-map \
--default-service web-backend-service
# Create HTTP proxy
gcloud compute target-http-proxies create web-http-proxy \
--url-map web-url-map
# Create forwarding rule
gcloud compute forwarding-rules create web-forwarding-rule \
--global \
--target-http-proxy web-http-proxy \
--port-range 80
# GCP Load Balancer Features
- Global load balancing
- SSL termination
- CDN integration
- Autoscaling
- Health checks
- Session affinity
Health Checks
Health Check Implementation
Application Health Checks
# Node.js Health Check Implementation
const express = require('express');
const app = express();
// Health check endpoint
app.get('/health', (req, res) => {
const healthcheck = {
uptime: process.uptime(),
message: 'OK',
timestamp: Date.now(),
checks: {
database: checkDatabase(),
redis: checkRedis(),
memory: process.memoryUsage(),
cpu: process.cpuUsage()
}
};
try {
// Check if all services are healthy
const isHealthy = Object.values(healthcheck.checks).every(check =>
typeof check === 'object' ? check.status === 'OK' : true
);
if (isHealthy) {
res.status(200).json(healthcheck);
} else {
res.status(503).json(healthcheck);
}
} catch (error) {
healthcheck.message = 'ERROR';
res.status(503).json(healthcheck);
}
});
// Readiness check
app.get('/ready', (req, res) => {
const readiness = {
status: 'ready',
timestamp: Date.now(),
services: {
database: checkDatabase(),
redis: checkRedis(),
external_api: checkExternalAPI()
}
};
const isReady = Object.values(readiness.services).every(service =>
service.status === 'OK'
);
res.status(isReady ? 200 : 503).json(readiness);
});
// Liveness check
app.get('/live', (req, res) => {
res.status(200).json({
status: 'alive',
timestamp: Date.now(),
uptime: process.uptime()
});
});
// Service check functions
function checkDatabase() {
try {
// Implement database connectivity check
return { status: 'OK', response_time: '5ms' };
} catch (error) {
return { status: 'ERROR', error: error.message };
}
}
function checkRedis() {
try {
// Implement Redis connectivity check
return { status: 'OK', response_time: '2ms' };
} catch (error) {
return { status: 'ERROR', error: error.message };
}
}
function checkExternalAPI() {
try {
// Implement external API check
return { status: 'OK', response_time: '50ms' };
} catch (error) {
return { status: 'ERROR', error: error.message };
}
}
module.exports = app;
Session Persistence
Sticky Sessions
Session Persistence Configuration
# Nginx Sticky Sessions
upstream backend {
ip_hash; # Session persistence by IP
server 192.168.1.100:3000;
server 192.168.1.101:3000;
server 192.168.1.102:3000;
}
# Alternative: Cookie-based sticky sessions
upstream backend {
least_conn;
server 192.168.1.100:3000;
server 192.168.1.101:3000;
server 192.168.1.102:3000;
}
server {
listen 80;
server_name example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Sticky session cookie
proxy_cookie_path / "/; HttpOnly; Secure; SameSite=Strict";
}
}
# HAProxy Sticky Sessions
backend web_servers
mode http
balance roundrobin
cookie SERVERID insert indirect nocache
server web1 192.168.1.100:3000 check cookie web1
server web2 192.168.1.101:3000 check cookie web2
server web3 192.168.1.102:3000 check cookie web3
# AWS ALB Sticky Sessions
aws elbv2 modify-target-group-attributes \
--target-group-arn arn:aws:elasticloadbalancing:region:account:targetgroup/web-targets/1234567890123456 \
--attributes Key=stickiness.enabled,Value=true Key=stickiness.type,Value=lb_cookie Key=stickiness.lb_cookie.duration_seconds,Value=3600
# Session Persistence Methods
# 1. IP Hash - Route by client IP
# 2. Cookie-based - Use application cookies
# 3. URL-based - Route by URL path
# 4. Header-based - Route by HTTP headers
Monitoring and Metrics
Load Balancer Monitoring
Monitoring Setup
# Nginx Status Module
# Enable status module
sudo nano /etc/nginx/nginx.conf
# Add to http block
http {
# Status module
server {
listen 8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
}
}
# HAProxy Statistics
# Statistics page configuration
frontend stats
bind *:8404
stats enable
stats uri /stats
stats refresh 30s
stats admin if TRUE
# Prometheus Monitoring
# Nginx Prometheus Exporter
docker run -d \
--name nginx-exporter \
-p 9113:9113 \
nginx/nginx-prometheus-exporter:0.10.0 \
-nginx.scrape-uri=http://nginx:8080/nginx_status
# HAProxy Prometheus Exporter
docker run -d \
--name haproxy-exporter \
-p 9101:9101 \
prom/haproxy-exporter:latest \
--haproxy.scrape-uri=http://haproxy:8404/stats
# CloudWatch Monitoring (AWS)
# Enable detailed monitoring
aws elbv2 modify-load-balancer-attributes \
--load-balancer-arn arn:aws:elasticloadbalancing:region:account:loadbalancer/app/web-alb/1234567890123456 \
--attributes Key=access_logs.s3.enabled,Value=true Key=access_logs.s3.bucket,Value=my-alb-logs
# Key Metrics to Monitor
# - Request count
# - Response time
# - Error rate
# - Active connections
# - Backend server health
# - SSL certificate status
Best Practices
Load Balancing Strategy
Best Practices
- Implement health checks
- Use appropriate algorithms
- Monitor performance metrics
- Plan for failover
- Use SSL termination
- Implement rate limiting
- Regular capacity planning
Common Mistakes
- No health checks
- Poor algorithm choice
- Inadequate monitoring
- No failover planning
- SSL configuration issues
- Session handling problems
- Capacity underestimation
Summary
Load balancing setup involves several key components:
- Algorithms: Round robin, least connections, IP hash
- Nginx: Software load balancer, flexible configuration
- HAProxy: High-performance load balancer
- Cloud Load Balancers: AWS ALB, GCP LB, managed services
- Health Checks: Application monitoring, service availability
- Session Persistence: Sticky sessions, cookie-based routing
- Monitoring: Performance metrics, alerting
- Best Practices: Health checks, failover, capacity planning
Need More Help?
Struggling with load balancing setup or need help implementing high availability? Our infrastructure experts can help you design robust load balancing solutions.
Get Load Balancing Help