`n

Shell Script Automation - Complete Guide

Published: September 25, 2024 | Reading time: 20 minutes

Shell Scripting Overview

Automate repetitive tasks and streamline your workflow with shell scripts:

Script Types
# Common Automation Tasks
- Deployment scripts
- Backup automation
- System monitoring
- File processing
- Database operations
- Log management
- Service management

Shell Script Basics

Script Structure

Basic Script Template
#!/bin/bash

# Script metadata
# Description: Brief description of what the script does
# Author: Your Name
# Date: $(date +%Y-%m-%d)
# Version: 1.0

# Set strict mode
set -euo pipefail

# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOG_FILE="${SCRIPT_DIR}/script.log"

# Functions
log() {
    echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

error_exit() {
    log "ERROR: $1"
    exit 1
}

# Main script logic
main() {
    log "Starting script execution"
    
    # Your script logic here
    
    log "Script completed successfully"
}

# Run main function
main "$@"

Error Handling

Error Handling Patterns
#!/bin/bash

# Exit on any error
set -e

# Exit on undefined variables
set -u

# Exit on pipe failures
set -o pipefail

# Trap errors
trap 'error_exit "Script failed at line $LINENO"' ERR

# Function to handle errors
error_exit() {
    echo "ERROR: $1" >&2
    exit 1
}

# Check if command exists
command_exists() {
    command -v "$1" >/dev/null 2>&1
}

# Example usage
if ! command_exists git; then
    error_exit "Git is not installed"
fi

# Safe command execution
if ! git status >/dev/null 2>&1; then
    error_exit "Not in a git repository"
fi

Deployment Automation

Simple Deployment Script

deploy.sh
#!/bin/bash

set -euo pipefail

# Configuration
APP_NAME="myapp"
APP_DIR="/var/www/$APP_NAME"
BACKUP_DIR="/var/backups/$APP_NAME"
REPO_URL="https://github.com/user/$APP_NAME.git"
BRANCH="main"

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color

log() {
    echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1"
}

error() {
    echo -e "${RED}[ERROR]${NC} $1" >&2
    exit 1
}

warn() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

# Create backup
create_backup() {
    log "Creating backup..."
    local backup_name="${APP_NAME}_$(date +%Y%m%d_%H%M%S)"
    
    if [ -d "$APP_DIR" ]; then
        cp -r "$APP_DIR" "$BACKUP_DIR/$backup_name"
        log "Backup created: $backup_name"
    else
        warn "No existing application directory to backup"
    fi
}

# Deploy application
deploy() {
    log "Starting deployment..."
    
    # Create application directory if it doesn't exist
    mkdir -p "$APP_DIR"
    cd "$APP_DIR"
    
    # Clone or update repository
    if [ -d ".git" ]; then
        log "Updating existing repository..."
        git fetch origin
        git reset --hard "origin/$BRANCH"
    else
        log "Cloning repository..."
        git clone "$REPO_URL" .
    fi
    
    # Install dependencies
    if [ -f "package.json" ]; then
        log "Installing Node.js dependencies..."
        npm ci --production
    fi
    
    if [ -f "requirements.txt" ]; then
        log "Installing Python dependencies..."
        pip install -r requirements.txt
    fi
    
    # Set permissions
    chown -R www-data:www-data "$APP_DIR"
    chmod -R 755 "$APP_DIR"
    
    log "Deployment completed successfully"
}

# Restart services
restart_services() {
    log "Restarting services..."
    
    if systemctl is-active --quiet nginx; then
        systemctl reload nginx
        log "Nginx reloaded"
    fi
    
    if systemctl is-active --quiet "$APP_NAME"; then
        systemctl restart "$APP_NAME"
        log "Application service restarted"
    fi
}

# Main deployment function
main() {
    log "Starting deployment of $APP_NAME"
    
    create_backup
    deploy
    restart_services
    
    log "Deployment completed successfully"
}

# Run main function
main "$@"

Backup Automation

Database Backup Script

backup_database.sh
#!/bin/bash

set -euo pipefail

# Configuration
DB_HOST="localhost"
DB_USER="backup_user"
DB_PASS="backup_password"
DB_NAME="myapp"
BACKUP_DIR="/var/backups/database"
RETENTION_DAYS=30

# Create backup directory
mkdir -p "$BACKUP_DIR"

# Generate backup filename
BACKUP_FILE="${DB_NAME}_$(date +%Y%m%d_%H%M%S).sql"

log() {
    echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
}

# Create database backup
backup_database() {
    log "Creating database backup..."
    
    if mysqldump -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" \
        --single-transaction \
        --routines \
        --triggers \
        "$DB_NAME" > "$BACKUP_DIR/$BACKUP_FILE"; then
        log "Database backup created: $BACKUP_FILE"
    else
        error "Database backup failed"
    fi
}

# Compress backup
compress_backup() {
    log "Compressing backup..."
    
    if gzip "$BACKUP_DIR/$BACKUP_FILE"; then
        log "Backup compressed: ${BACKUP_FILE}.gz"
        BACKUP_FILE="${BACKUP_FILE}.gz"
    else
        error "Backup compression failed"
    fi
}

# Clean old backups
cleanup_old_backups() {
    log "Cleaning up old backups..."
    
    find "$BACKUP_DIR" -name "${DB_NAME}_*.sql.gz" -type f -mtime +$RETENTION_DAYS -delete
    
    log "Old backups cleaned up"
}

# Upload to cloud storage (optional)
upload_to_cloud() {
    if command -v aws >/dev/null 2>&1; then
        log "Uploading backup to S3..."
        aws s3 cp "$BACKUP_DIR/$BACKUP_FILE" "s3://my-backup-bucket/database/"
        log "Backup uploaded to S3"
    fi
}

# Verify backup
verify_backup() {
    log "Verifying backup..."
    
    if [ -f "$BACKUP_DIR/$BACKUP_FILE" ]; then
        local file_size=$(stat -f%z "$BACKUP_DIR/$BACKUP_FILE" 2>/dev/null || stat -c%s "$BACKUP_DIR/$BACKUP_FILE")
        if [ "$file_size" -gt 0 ]; then
            log "Backup verification successful (${file_size} bytes)"
        else
            error "Backup file is empty"
        fi
    else
        error "Backup file not found"
    fi
}

# Main backup function
main() {
    log "Starting database backup process"
    
    backup_database
    compress_backup
    verify_backup
    cleanup_old_backups
    upload_to_cloud
    
    log "Database backup completed successfully"
}

# Run main function
main "$@"

System Monitoring

System Health Check

health_check.sh
#!/bin/bash

set -euo pipefail

# Configuration
ALERT_EMAIL="admin@example.com"
LOG_FILE="/var/log/health_check.log"
THRESHOLD_CPU=80
THRESHOLD_MEMORY=85
THRESHOLD_DISK=90

log() {
    echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

send_alert() {
    local subject="$1"
    local message="$2"
    
    echo "$message" | mail -s "$subject" "$ALERT_EMAIL"
    log "Alert sent: $subject"
}

# Check CPU usage
check_cpu() {
    local cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1)
    local cpu_int=$(printf "%.0f" "$cpu_usage")
    
    if [ "$cpu_int" -gt "$THRESHOLD_CPU" ]; then
        send_alert "High CPU Usage Alert" "CPU usage is ${cpu_usage}% (threshold: ${THRESHOLD_CPU}%)"
        return 1
    fi
    
    log "CPU usage: ${cpu_usage}%"
    return 0
}

# Check memory usage
check_memory() {
    local memory_usage=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100.0}')
    
    if [ "$memory_usage" -gt "$THRESHOLD_MEMORY" ]; then
        send_alert "High Memory Usage Alert" "Memory usage is ${memory_usage}% (threshold: ${THRESHOLD_MEMORY}%)"
        return 1
    fi
    
    log "Memory usage: ${memory_usage}%"
    return 0
}

# Check disk usage
check_disk() {
    local disk_usage=$(df -h / | awk 'NR==2 {print $5}' | cut -d'%' -f1)
    
    if [ "$disk_usage" -gt "$THRESHOLD_DISK" ]; then
        send_alert "High Disk Usage Alert" "Disk usage is ${disk_usage}% (threshold: ${THRESHOLD_DISK}%)"
        return 1
    fi
    
    log "Disk usage: ${disk_usage}%"
    return 0
}

# Check service status
check_services() {
    local services=("nginx" "mysql" "redis")
    local failed_services=()
    
    for service in "${services[@]}"; do
        if ! systemctl is-active --quiet "$service"; then
            failed_services+=("$service")
        fi
    done
    
    if [ ${#failed_services[@]} -gt 0 ]; then
        local service_list=$(IFS=", "; echo "${failed_services[*]}")
        send_alert "Service Down Alert" "The following services are down: $service_list"
        return 1
    fi
    
    log "All services are running"
    return 0
}

# Check disk space for specific directories
check_disk_space() {
    local directories=("/var/log" "/tmp" "/var/www")
    
    for dir in "${directories[@]}"; do
        if [ -d "$dir" ]; then
            local usage=$(df -h "$dir" | awk 'NR==2 {print $5}' | cut -d'%' -f1)
            if [ "$usage" -gt "$THRESHOLD_DISK" ]; then
                send_alert "Directory Full Alert" "Directory $dir is ${usage}% full"
            fi
        fi
    done
}

# Main health check function
main() {
    log "Starting system health check"
    
    local issues=0
    
    check_cpu || ((issues++))
    check_memory || ((issues++))
    check_disk || ((issues++))
    check_services || ((issues++))
    check_disk_space
    
    if [ $issues -eq 0 ]; then
        log "All health checks passed"
    else
        log "Health check completed with $issues issues"
    fi
}

# Run main function
main "$@"

File Processing Automation

Log Rotation Script

rotate_logs.sh
#!/bin/bash

set -euo pipefail

# Configuration
LOG_DIR="/var/log"
RETENTION_DAYS=30
COMPRESS_AFTER_DAYS=7

log() {
    echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
}

# Rotate log files
rotate_log() {
    local log_file="$1"
    local log_name=$(basename "$log_file")
    local log_dir=$(dirname "$log_file")
    
    log "Rotating log: $log_file"
    
    # Create rotated filename with timestamp
    local rotated_file="${log_dir}/${log_name}.$(date +%Y%m%d_%H%M%S)"
    
    # Move current log to rotated file
    if [ -f "$log_file" ]; then
        mv "$log_file" "$rotated_file"
        touch "$log_file"
        chmod 644 "$log_file"
        log "Log rotated: $rotated_file"
    fi
}

# Compress old logs
compress_logs() {
    log "Compressing old logs..."
    
    find "$LOG_DIR" -name "*.log.*" -type f -mtime +$COMPRESS_AFTER_DAYS ! -name "*.gz" -exec gzip {} \;
    
    log "Old logs compressed"
}

# Clean up old logs
cleanup_logs() {
    log "Cleaning up old logs..."
    
    find "$LOG_DIR" -name "*.log.*.gz" -type f -mtime +$RETENTION_DAYS -delete
    
    log "Old logs cleaned up"
}

# Rotate specific application logs
rotate_app_logs() {
    local app_logs=(
        "/var/log/nginx/access.log"
        "/var/log/nginx/error.log"
        "/var/log/mysql/error.log"
        "/var/log/apache2/access.log"
        "/var/log/apache2/error.log"
    )
    
    for log_file in "${app_logs[@]}"; do
        if [ -f "$log_file" ]; then
            rotate_log "$log_file"
        fi
    done
}

# Main rotation function
main() {
    log "Starting log rotation process"
    
    rotate_app_logs
    compress_logs
    cleanup_logs
    
    log "Log rotation completed"
}

# Run main function
main "$@"

Advanced Scripting Techniques

Configuration Management

config.sh
#!/bin/bash

# Configuration file loader
load_config() {
    local config_file="$1"
    
    if [ -f "$config_file" ]; then
        # Source the configuration file
        source "$config_file"
        log "Configuration loaded from: $config_file"
    else
        error "Configuration file not found: $config_file"
    fi
}

# Environment-specific configuration
load_environment_config() {
    local env="${1:-development}"
    local config_file="config/${env}.conf"
    
    load_config "$config_file"
}

# Example configuration file (config/production.conf)
cat > config/production.conf << 'EOF'
# Production Configuration
DB_HOST="prod-db.example.com"
DB_USER="prod_user"
DB_PASS="secure_password"
APP_ENV="production"
LOG_LEVEL="error"
BACKUP_RETENTION_DAYS=90
EOF

# Example usage
load_environment_config "production"

Parallel Processing

parallel_processing.sh
#!/bin/bash

set -euo pipefail

# Configuration
MAX_PARALLEL_JOBS=4
TEMP_DIR="/tmp/parallel_$$"

# Create temporary directory
mkdir -p "$TEMP_DIR"

# Cleanup function
cleanup() {
    rm -rf "$TEMP_DIR"
}
trap cleanup EXIT

# Process function
process_item() {
    local item="$1"
    local output_file="$TEMP_DIR/result_$$_$RANDOM"
    
    # Your processing logic here
    echo "Processing: $item" > "$output_file"
    sleep 2  # Simulate work
    
    echo "Completed: $item"
}

# Process items in parallel
process_parallel() {
    local items=("item1" "item2" "item3" "item4" "item5" "item6")
    local pids=()
    
    for item in "${items[@]}"; do
        # Wait if we've reached max parallel jobs
        while [ ${#pids[@]} -ge $MAX_PARALLEL_JOBS ]; do
            for i in "${!pids[@]}"; do
                if ! kill -0 "${pids[i]}" 2>/dev/null; then
                    unset pids[i]
                fi
            done
            sleep 0.1
        done
        
        # Start new job
        process_item "$item" &
        pids+=($!)
    done
    
    # Wait for all jobs to complete
    for pid in "${pids[@]}"; do
        wait "$pid"
    done
}

# Main function
main() {
    log "Starting parallel processing"
    process_parallel
    log "Parallel processing completed"
}

# Run main function
main "$@"

Cron Job Management

Cron Job Setup

cron_setup.sh
#!/bin/bash

# Cron job management script

# Add cron job
add_cron_job() {
    local schedule="$1"
    local command="$2"
    local description="$3"
    
    # Create cron job entry
    local cron_entry="$schedule $command # $description"
    
    # Add to crontab
    (crontab -l 2>/dev/null; echo "$cron_entry") | crontab -
    
    echo "Cron job added: $description"
}

# Remove cron job
remove_cron_job() {
    local description="$1"
    
    # Remove cron job by description
    crontab -l 2>/dev/null | grep -v "# $description" | crontab -
    
    echo "Cron job removed: $description"
}

# List cron jobs
list_cron_jobs() {
    echo "Current cron jobs:"
    crontab -l 2>/dev/null | grep -v "^#"
}

# Example cron jobs
setup_cron_jobs() {
    # Daily backup at 2 AM
    add_cron_job "0 2 * * *" "/opt/scripts/backup_database.sh" "Daily database backup"
    
    # Health check every 5 minutes
    add_cron_job "*/5 * * * *" "/opt/scripts/health_check.sh" "System health check"
    
    # Log rotation weekly
    add_cron_job "0 3 * * 0" "/opt/scripts/rotate_logs.sh" "Weekly log rotation"
    
    # Cleanup old files daily
    add_cron_job "0 4 * * *" "find /tmp -type f -mtime +7 -delete" "Daily temp cleanup"
}

# Main function
main() {
    case "${1:-list}" in
        "add")
            add_cron_job "$2" "$3" "$4"
            ;;
        "remove")
            remove_cron_job "$2"
            ;;
        "list")
            list_cron_jobs
            ;;
        "setup")
            setup_cron_jobs
            ;;
        *)
            echo "Usage: $0 {add|remove|list|setup}"
            exit 1
            ;;
    esac
}

# Run main function
main "$@"

Script Testing and Validation

Script Testing Framework

test_script.sh
#!/bin/bash

# Simple testing framework for shell scripts

# Test counter
TESTS_RUN=0
TESTS_PASSED=0
TESTS_FAILED=0

# Test functions
assert_equals() {
    local expected="$1"
    local actual="$2"
    local test_name="$3"
    
    ((TESTS_RUN++))
    
    if [ "$expected" = "$actual" ]; then
        echo "PASS: $test_name"
        ((TESTS_PASSED++))
    else
        echo "FAIL: $test_name (expected: $expected, actual: $actual)"
        ((TESTS_FAILED++))
    fi
}

assert_not_equals() {
    local expected="$1"
    local actual="$2"
    local test_name="$3"
    
    ((TESTS_RUN++))
    
    if [ "$expected" != "$actual" ]; then
        echo "PASS: $test_name"
        ((TESTS_PASSED++))
    else
        echo "FAIL: $test_name (values should not be equal)"
        ((TESTS_FAILED++))
    fi
}

assert_file_exists() {
    local file="$1"
    local test_name="$2"
    
    ((TESTS_RUN++))
    
    if [ -f "$file" ]; then
        echo "PASS: $test_name"
        ((TESTS_PASSED++))
    else
        echo "FAIL: $test_name (file does not exist: $file)"
        ((TESTS_FAILED++))
    fi
}

# Test summary
test_summary() {
    echo "Test Summary:"
    echo "Tests run: $TESTS_RUN"
    echo "Tests passed: $TESTS_PASSED"
    echo "Tests failed: $TESTS_FAILED"
    
    if [ $TESTS_FAILED -eq 0 ]; then
        echo "All tests passed!"
        exit 0
    else
        echo "Some tests failed!"
        exit 1
    fi
}

# Example test
test_example() {
    # Test string equality
    assert_equals "hello" "hello" "String equality test"
    
    # Test string inequality
    assert_not_equals "hello" "world" "String inequality test"
    
    # Test file existence
    assert_file_exists "/etc/passwd" "System file exists test"
}

# Run tests
test_example
test_summary

Best Practices

Scripting Guidelines

Code Quality

  • Use strict mode (set -euo pipefail)
  • Add proper error handling
  • Include logging and debugging
  • Use meaningful variable names
  • Add comments and documentation

Security

  • Validate all inputs
  • Use proper file permissions
  • Avoid hardcoded credentials
  • Sanitize user input
  • Use secure temporary files

Summary

Shell script automation is essential for efficient system administration:

  • Start with basics: Proper script structure and error handling
  • Automate common tasks: Deployment, backup, monitoring
  • Use configuration files: Environment-specific settings
  • Implement logging: Track script execution and errors
  • Test your scripts: Validate functionality before production
  • Follow best practices: Security, maintainability, and reliability

Need More Help?

Struggling with shell script automation or need help implementing complex deployment workflows? Our automation experts can help you create robust, maintainable scripts.

Get Automation Help