Desarrolla sistemas completos con Bash para casos reales de producción
En esta sección desarrollaremos proyectos complejos que combinan múltiples conceptos de Bash scripting para resolver problemas reales que encontrarías en entornos de producción.
Desarrollar un sistema de monitoreo que recopile métricas de múltiples servidores, detecte anomalías, genere alertas y proporcione dashboards en tiempo real.
#!/bin/bash
# metrics_agent.sh - Agente distribuido de recopilación de métricas
AGENT_CONFIG="/etc/monitoring/agent.conf"
METRICS_DIR="/var/lib/monitoring/metrics"
CENTRAL_SERVER="monitoring.company.com"
CENTRAL_PORT="9999"
HOST_ID=$(hostname)
# Configuración por defecto
DEFAULT_CONFIG="# Configuración del Agente de Monitoreo
COLLECTION_INTERVAL=30
METRICS_RETENTION_DAYS=7
ENABLE_SYSTEM_METRICS=true
ENABLE_PROCESS_METRICS=true
ENABLE_NETWORK_METRICS=true
ENABLE_DISK_METRICS=true
CUSTOM_COMMANDS=()
ALERT_THRESHOLDS=()
DEBUG_MODE=false"
# Crear configuración si no existe
if [[ ! -f "$AGENT_CONFIG" ]]; then
sudo mkdir -p "$(dirname "$AGENT_CONFIG")"
echo "$DEFAULT_CONFIG" | sudo tee "$AGENT_CONFIG" > /dev/null
fi
source "$AGENT_CONFIG"
# Crear directorios necesarios
sudo mkdir -p "$METRICS_DIR"
sudo mkdir -p "/var/log/monitoring"
LOG_FILE="/var/log/monitoring/agent.log"
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | sudo tee -a "$LOG_FILE" >/dev/null
}
# Recopilar métricas del sistema
collect_system_metrics() {
local timestamp=$(date +%s)
local metrics_file="$METRICS_DIR/system_$(date +%Y%m%d_%H%M%S).json"
# CPU
local cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | sed 's/%us,//')
# Memoria
local mem_info=$(free -m | awk 'NR==2{printf "%.1f %.1f %.1f", $3*100/$2, $3, $2}')
read -r mem_percent mem_used mem_total <<< "$mem_info"
# Carga del sistema
local load_avg=$(uptime | awk -F'load average:' '{print $2}' | sed 's/^ *//')
# Espacio en disco
local disk_usage=$(df -h / | awk 'NR==2 {print $5}' | sed 's/%//')
# Red
local network_stats=$(cat /proc/net/dev | grep -E "(eth0|ens|enp)" | head -1 | awk '{print $2,$10}')
read -r bytes_in bytes_out <<< "$network_stats"
# Procesos
local process_count=$(ps aux | wc -l)
# Temperatura (si está disponible)
local temp=""
if [[ -f /sys/class/thermal/thermal_zone0/temp ]]; then
temp=$(cat /sys/class/thermal/thermal_zone0/temp)
temp=$((temp / 1000))
fi
# Crear JSON con métricas
cat > "$metrics_file" << EOF
{
"timestamp": $timestamp,
"host": "$HOST_ID",
"type": "system",
"metrics": {
"cpu": {
"usage_percent": ${cpu_usage:-0}
},
"memory": {
"usage_percent": $mem_percent,
"used_mb": $mem_used,
"total_mb": $mem_total
},
"load": {
"average": "$load_avg"
},
"disk": {
"usage_percent": $disk_usage
},
"network": {
"bytes_in": ${bytes_in:-0},
"bytes_out": ${bytes_out:-0}
},
"processes": {
"count": $process_count
},
"temperature": {
"celsius": ${temp:-null}
}
}
}
EOF
log "Sistema: Métricas recopiladas en $metrics_file"
echo "$metrics_file"
}
# Recopilar métricas de procesos específicos
collect_process_metrics() {
local timestamp=$(date +%s)
local metrics_file="$METRICS_DIR/processes_$(date +%Y%m%d_%H%M%S).json"
# Procesos que consumen más CPU
local top_cpu=$(ps aux --sort=-%cpu | head -6 | tail -5 | awk '{print $11,$3,$4}')
# Procesos que consumen más memoria
local top_mem=$(ps aux --sort=-%mem | head -6 | tail -5 | awk '{print $11,$3,$4}')
# Servicios específicos a monitorear
local services=("nginx" "apache2" "mysql" "postgresql" "redis" "docker")
local service_status=""
for service in "${services[@]}"; do
if systemctl is-active --quiet "$service" 2>/dev/null; then
local status="running"
local pid=$(systemctl show -p MainPID "$service" | cut -d= -f2)
local cpu_mem=""
if [[ "$pid" != "0" ]]; then
cpu_mem=$(ps -p "$pid" -o %cpu,%mem --no-headers 2>/dev/null || echo "0.0 0.0")
else
cpu_mem="0.0 0.0"
fi
else
status="stopped"
cpu_mem="0.0 0.0"
fi
service_status="$service_status\"$service\": {\"status\": \"$status\", \"cpu\": $(echo $cpu_mem | awk '{print $1}'), \"memory\": $(echo $cpu_mem | awk '{print $2}')},"
done
# Quitar última coma
service_status=${service_status%,}
cat > "$metrics_file" << EOF
{
"timestamp": $timestamp,
"host": "$HOST_ID",
"type": "processes",
"metrics": {
"top_cpu_processes": [$(echo "$top_cpu" | while read line; do echo "\"$line\","; done | head -c -1)],
"top_memory_processes": [$(echo "$top_mem" | while read line; do echo "\"$line\","; done | head -c -1)],
"services": {$service_status}
}
}
EOF
log "Procesos: Métricas recopiladas en $metrics_file"
echo "$metrics_file"
}
# Ejecutar comandos personalizados
collect_custom_metrics() {
local timestamp=$(date +%s)
local metrics_file="$METRICS_DIR/custom_$(date +%Y%m%d_%H%M%S).json"
if [[ ${#CUSTOM_COMMANDS[@]} -eq 0 ]]; then
return
fi
local custom_results=""
for cmd in "${CUSTOM_COMMANDS[@]}"; do
local cmd_name=$(echo "$cmd" | awk '{print $1}')
local result=$(eval "$cmd" 2>/dev/null || echo "error")
custom_results="$custom_results\"$cmd_name\": \"$result\","
done
# Quitar última coma
custom_results=${custom_results%,}
cat > "$metrics_file" << EOF
{
"timestamp": $timestamp,
"host": "$HOST_ID",
"type": "custom",
"metrics": {$custom_results}
}
EOF
log "Custom: Métricas recopiladas en $metrics_file"
echo "$metrics_file"
}
# Detectar anomalías locales
detect_anomalies() {
local metrics_file="$1"
# Parsear métricas del archivo JSON
local cpu_usage=$(jq -r '.metrics.cpu.usage_percent // 0' "$metrics_file" 2>/dev/null)
local mem_usage=$(jq -r '.metrics.memory.usage_percent // 0' "$metrics_file" 2>/dev/null)
local disk_usage=$(jq -r '.metrics.disk.usage_percent // 0' "$metrics_file" 2>/dev/null)
local alerts=""
# Verificar umbrales (valores por defecto)
if (( $(echo "$cpu_usage > 80" | bc -l) )); then
alerts="$alerts,{\"type\":\"cpu\",\"level\":\"warning\",\"value\":$cpu_usage,\"threshold\":80}"
fi
if (( $(echo "$cpu_usage > 95" | bc -l) )); then
alerts="$alerts,{\"type\":\"cpu\",\"level\":\"critical\",\"value\":$cpu_usage,\"threshold\":95}"
fi
if (( $(echo "$mem_usage > 85" | bc -l) )); then
alerts="$alerts,{\"type\":\"memory\",\"level\":\"warning\",\"value\":$mem_usage,\"threshold\":85}"
fi
if (( $(echo "$disk_usage > 90" | bc -l) )); then
alerts="$alerts,{\"type\":\"disk\",\"level\":\"critical\",\"value\":$disk_usage,\"threshold\":90}"
fi
# Si hay alertas, crear archivo de alerta
if [[ -n "$alerts" ]]; then
alerts=${alerts#,} # Quitar primera coma
local alert_file="$METRICS_DIR/alert_$(date +%Y%m%d_%H%M%S).json"
cat > "$alert_file" << EOF
{
"timestamp": $(date +%s),
"host": "$HOST_ID",
"type": "alert",
"alerts": [$alerts]
}
EOF
log "ALERTA: Anomalías detectadas - $alert_file"
echo "$alert_file"
fi
}
# Enviar métricas al servidor central
send_to_central() {
local metrics_file="$1"
if [[ ! -f "$metrics_file" ]]; then
return 1
fi
# Intentar enviar vía netcat si está disponible
if command -v nc >/dev/null 2>&1; then
if nc -z "$CENTRAL_SERVER" "$CENTRAL_PORT" 2>/dev/null; then
cat "$metrics_file" | nc "$CENTRAL_SERVER" "$CENTRAL_PORT"
if [[ $? -eq 0 ]]; then
log "Métricas enviadas exitosamente a $CENTRAL_SERVER"
rm -f "$metrics_file"
return 0
fi
fi
fi
# Fallback: enviar vía curl si hay endpoint HTTP
if command -v curl >/dev/null 2>&1; then
if curl -s -X POST -H "Content-Type: application/json" \
-d "@$metrics_file" "http://$CENTRAL_SERVER:8080/metrics" >/dev/null 2>&1; then
log "Métricas enviadas via HTTP a $CENTRAL_SERVER"
rm -f "$metrics_file"
return 0
fi
fi
log "WARNING: No se pudo enviar métricas al servidor central"
return 1
}
# Limpiar archivos antiguos
cleanup_old_metrics() {
find "$METRICS_DIR" -type f -name "*.json" -mtime +$METRICS_RETENTION_DAYS -delete 2>/dev/null
find "/var/log/monitoring" -type f -name "*.log" -mtime +30 -delete 2>/dev/null
}
# Función principal de recopilación
collect_all_metrics() {
log "Iniciando recopilación de métricas..."
local metrics_files=()
# Recopilar diferentes tipos de métricas
if [[ "$ENABLE_SYSTEM_METRICS" == "true" ]]; then
metrics_files+=($(collect_system_metrics))
fi
if [[ "$ENABLE_PROCESS_METRICS" == "true" ]]; then
metrics_files+=($(collect_process_metrics))
fi
if [[ ${#CUSTOM_COMMANDS[@]} -gt 0 ]]; then
metrics_files+=($(collect_custom_metrics))
fi
# Detectar anomalías y enviar métricas
for metrics_file in "${metrics_files[@]}"; do
if [[ -f "$metrics_file" ]]; then
# Detectar anomalías
local alert_file=$(detect_anomalies "$metrics_file")
# Enviar métricas
send_to_central "$metrics_file"
# Enviar alertas si existen
if [[ -n "$alert_file" && -f "$alert_file" ]]; then
send_to_central "$alert_file"
fi
fi
done
# Limpiar archivos antiguos
cleanup_old_metrics
log "Recopilación completada"
}
# Daemon mode
run_daemon() {
log "Iniciando agente de monitoreo en modo daemon"
while true; do
collect_all_metrics
sleep "$COLLECTION_INTERVAL"
done
}
# Script principal
case "$1" in
"start"|"daemon")
run_daemon
;;
"collect")
collect_all_metrics
;;
"test")
echo "Ejecutando recopilación de prueba..."
collect_all_metrics
echo "Archivos generados:"
ls -la "$METRICS_DIR"
;;
"install")
echo "Instalando servicio del agente..."
sudo cp "$0" /usr/local/bin/monitoring-agent
sudo chmod +x /usr/local/bin/monitoring-agent
# Crear servicio systemd
sudo tee /etc/systemd/system/monitoring-agent.service > /dev/null << EOF
[Unit]
Description=Monitoring Agent
After=network.target
[Service]
ExecStart=/usr/local/bin/monitoring-agent daemon
Restart=always
User=monitoring
Group=monitoring
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable monitoring-agent
echo "Servicio instalado. Usar: sudo systemctl start monitoring-agent"
;;
*)
echo "Agente de Monitoreo Distribuido"
echo "Uso: $0 [start|collect|test|install]"
echo ""
echo " start/daemon - Ejecutar en modo daemon"
echo " collect - Recopilar métricas una vez"
echo " test - Prueba de recopilación"
echo " install - Instalar como servicio systemd"
;;
esac
#!/bin/bash
# central_server.sh - Servidor central para recibir y procesar métricas
METRICS_DB="/var/lib/monitoring/central"
DASHBOARD_DIR="/var/www/monitoring"
SERVER_PORT="9999"
HTTP_PORT="8080"
ALERT_CONFIG="/etc/monitoring/alerts.conf"
setup_database() {
mkdir -p "$METRICS_DB"/{raw,processed,alerts,dashboards}
# Crear tablas SQLite si no existen
sqlite3 "$METRICS_DB/metrics.db" << 'EOF'
CREATE TABLE IF NOT EXISTS metrics (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp INTEGER,
host TEXT,
type TEXT,
data TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_timestamp ON metrics(timestamp);
CREATE INDEX IF NOT EXISTS idx_host ON metrics(host);
CREATE INDEX IF NOT EXISTS idx_type ON metrics(type);
CREATE TABLE IF NOT EXISTS alerts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp INTEGER,
host TEXT,
alert_type TEXT,
level TEXT,
value REAL,
threshold REAL,
resolved_at INTEGER DEFAULT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
EOF
}
# Servidor TCP para recibir métricas
start_tcp_server() {
echo "Iniciando servidor TCP en puerto $SERVER_PORT..."
while true; do
# Usar netcat para escuchar conexiones
nc -l -p "$SERVER_PORT" | while read -r line; do
if [[ -n "$line" ]]; then
process_metrics "$line"
fi
done
done
}
# Procesar métricas recibidas
process_metrics() {
local metrics_json="$1"
local timestamp=$(date +%s)
local raw_file="$METRICS_DB/raw/metrics_${timestamp}_$$.json"
# Guardar datos crudos
echo "$metrics_json" > "$raw_file"
# Parsear y validar JSON
if ! echo "$metrics_json" | jq . >/dev/null 2>&1; then
echo "ERROR: JSON inválido recibido"
return 1
fi
# Extraer información básica
local host=$(echo "$metrics_json" | jq -r '.host // "unknown"')
local type=$(echo "$metrics_json" | jq -r '.type // "unknown"')
local metric_timestamp=$(echo "$metrics_json" | jq -r '.timestamp // 0')
# Insertar en base de datos
sqlite3 "$METRICS_DB/metrics.db" << EOF
INSERT INTO metrics (timestamp, host, type, data)
VALUES ($metric_timestamp, '$host', '$type', '$metrics_json');
EOF
echo "Métricas procesadas: $host/$type"
# Si es una alerta, procesarla especialmente
if [[ "$type" == "alert" ]]; then
process_alert "$metrics_json"
fi
# Actualizar dashboard en tiempo real
update_dashboard
}
# Procesar alertas
process_alert() {
local alert_json="$1"
local host=$(echo "$alert_json" | jq -r '.host')
local timestamp=$(echo "$alert_json" | jq -r '.timestamp')
# Extraer alertas individuales
echo "$alert_json" | jq -c '.alerts[]' | while read -r alert; do
local alert_type=$(echo "$alert" | jq -r '.type')
local level=$(echo "$alert" | jq -r '.level')
local value=$(echo "$alert" | jq -r '.value')
local threshold=$(echo "$alert" | jq -r '.threshold')
# Insertar en tabla de alertas
sqlite3 "$METRICS_DB/metrics.db" << EOF
INSERT INTO alerts (timestamp, host, alert_type, level, value, threshold)
VALUES ($timestamp, '$host', '$alert_type', '$level', $value, $threshold);
EOF
echo "ALERTA: $host - $alert_type ($level) - Valor: $value, Umbral: $threshold"
# Enviar notificación
send_alert_notification "$host" "$alert_type" "$level" "$value" "$threshold"
done
}
# Enviar notificaciones de alerta
send_alert_notification() {
local host="$1"
local alert_type="$2"
local level="$3"
local value="$4"
local threshold="$5"
local message="🚨 ALERTA [$level]: $host - $alert_type
Valor actual: $value
Umbral: $threshold
Timestamp: $(date)"
# Slack
if [[ -n "$SLACK_WEBHOOK" ]]; then
curl -X POST -H 'Content-type: application/json' \
--data "{\"text\":\"$message\"}" "$SLACK_WEBHOOK" 2>/dev/null
fi
# Email
if [[ -n "$ALERT_EMAIL" ]]; then
echo "$message" | mail -s "Alerta de Monitoreo: $host" "$ALERT_EMAIL" 2>/dev/null
fi
# Log local
echo "$message" >> "$METRICS_DB/alerts/alerts.log"
}
# Generar dashboard HTML
update_dashboard() {
local dashboard_file="$DASHBOARD_DIR/index.html"
mkdir -p "$DASHBOARD_DIR"
# Obtener métricas recientes (última hora)
local recent_timestamp=$(($(date +%s) - 3600))
# Generar datos para gráficos
sqlite3 -json "$METRICS_DB/metrics.db" "
SELECT host, type, data, timestamp
FROM metrics
WHERE timestamp > $recent_timestamp
ORDER BY timestamp DESC
LIMIT 1000
" > "$DASHBOARD_DIR/recent_metrics.json"
# Alertas activas
sqlite3 -json "$METRICS_DB/metrics.db" "
SELECT * FROM alerts
WHERE resolved_at IS NULL
ORDER BY timestamp DESC
" > "$DASHBOARD_DIR/active_alerts.json"
# Generar HTML del dashboard
cat > "$dashboard_file" << 'EOF'
Monitoring Dashboard
Sistema de Monitoreo - Dashboard
Alertas Activas
Hosts Monitoreados
Métricas de CPU por Host
Métricas de Memoria por Host
EOF
}
# Servidor HTTP simple para dashboard
start_http_server() {
cd "$DASHBOARD_DIR"
if command -v python3 >/dev/null 2>&1; then
echo "Iniciando servidor HTTP en puerto $HTTP_PORT..."
python3 -m http.server "$HTTP_PORT"
elif command -v python >/dev/null 2>&1; then
python -m SimpleHTTPServer "$HTTP_PORT"
else
echo "ERROR: Python no disponible para servidor HTTP"
return 1
fi
}
# Función principal
main() {
setup_database
case "$1" in
"start")
echo "Iniciando servidor central completo..."
# Iniciar servidor TCP en background
start_tcp_server &
TCP_PID=$!
# Iniciar servidor HTTP
start_http_server &
HTTP_PID=$!
echo "Servidor TCP PID: $TCP_PID"
echo "Servidor HTTP PID: $HTTP_PID"
echo "Dashboard disponible en: http://localhost:$HTTP_PORT"
# Esperar señales
trap "kill $TCP_PID $HTTP_PID 2>/dev/null; exit" SIGINT SIGTERM
wait
;;
"tcp")
start_tcp_server
;;
"http")
start_http_server
;;
"dashboard")
update_dashboard
echo "Dashboard actualizado en $DASHBOARD_DIR"
;;
*)
echo "Servidor Central de Monitoreo"
echo "Uso: $0 [start|tcp|http|dashboard]"
echo ""
echo " start - Iniciar servidor completo (TCP + HTTP)"
echo " tcp - Solo servidor TCP de métricas"
echo " http - Solo servidor HTTP del dashboard"
echo " dashboard - Actualizar dashboard manualmente"
;;
esac
}
main "$@"
Crear un sistema completo de deployment que maneje múltiples aplicaciones, entornos, estrategias de deployment y rollback automático.
#!/bin/bash
# deployment_system.sh - Sistema completo de deployment automático
DEPLOY_CONFIG_DIR="/etc/deployment"
DEPLOY_WORKSPACE="/var/lib/deployment"
APPS_CONFIG="$DEPLOY_CONFIG_DIR/apps.yaml"
GLOBAL_CONFIG="$DEPLOY_CONFIG_DIR/global.conf"
# Función para parsear YAML simple
parse_yaml() {
local yaml_file="$1"
local prefix="$2"
# Convertir YAML a variables bash (implementación simplificada)
sed -n 's/^\s*\([^:]*\):\s*\(.*\)/'"$prefix"'_\1="\2"/p' "$yaml_file" | \
sed 's/[^a-zA-Z0-9_]/_/g'
}
# Deployment con Rolling Update
deploy_rolling() {
local app_name="$1"
local environment="$2"
local image_tag="$3"
echo "Iniciando Rolling Deployment: $app_name -> $environment"
# Implementación específica según orquestador
case "$ORCHESTRATOR" in
"kubernetes")
kubectl set image deployment/"$app_name" "$app_name"="$image_tag" \
-n "$environment"
kubectl rollout status deployment/"$app_name" -n "$environment" \
--timeout=600s
;;
"docker-swarm")
docker service update --image "$image_tag" \
"${environment}_${app_name}"
;;
"systemd")
# Para aplicaciones tradicionales con systemd
deploy_systemd_app "$app_name" "$environment" "$image_tag"
;;
esac
}
# Deployment Blue-Green
deploy_blue_green() {
local app_name="$1"
local environment="$2"
local image_tag="$3"
echo "Iniciando Blue-Green Deployment: $app_name"
# Determinar color actual y nuevo
local current_color=$(get_current_deployment_color "$app_name" "$environment")
local new_color="blue"
[[ "$current_color" == "blue" ]] && new_color="green"
echo "Desplegando en ambiente $new_color"
# Deployment del nuevo color
deploy_color_environment "$app_name" "$environment" "$new_color" "$image_tag"
# Health checks
if perform_health_checks "$app_name" "$environment" "$new_color"; then
echo "Health checks exitosos - cambiando tráfico"
switch_traffic "$app_name" "$environment" "$new_color"
# Esperar propagación
sleep 30
# Eliminar ambiente anterior
cleanup_old_environment "$app_name" "$environment" "$current_color"
echo "Blue-Green deployment completado"
else
echo "Health checks fallaron - limpiando deployment fallido"
cleanup_old_environment "$app_name" "$environment" "$new_color"
return 1
fi
}
# Deployment Canary
deploy_canary() {
local app_name="$1"
local environment="$2"
local image_tag="$3"
local traffic_percent="${4:-10}"
echo "Iniciando Canary Deployment: $app_name (${traffic_percent}% tráfico)"
# Deployment de versión canary
deploy_canary_version "$app_name" "$environment" "$image_tag" "$traffic_percent"
# Monitoreo durante período de observación
local observation_period=300 # 5 minutos
local start_time=$(date +%s)
while [[ $(($(date +%s) - start_time)) -lt $observation_period ]]; do
if ! monitor_canary_health "$app_name" "$environment"; then
echo "Métricas de canary degradadas - rollback automático"
rollback_canary "$app_name" "$environment"
return 1
fi
sleep 30
done
echo "Canary exitoso - promoviendo a producción"
promote_canary "$app_name" "$environment"
}
# Función de rollback automático
perform_rollback() {
local app_name="$1"
local environment="$2"
local target_version="$3"
echo "Iniciando rollback de $app_name a versión $target_version"
case "$DEPLOYMENT_STRATEGY" in
"rolling")
kubectl rollout undo deployment/"$app_name" -n "$environment" \
${target_version:+--to-revision=$target_version}
;;
"blue-green")
# Volver al color anterior
local previous_color=$(get_previous_deployment_color "$app_name" "$environment")
switch_traffic "$app_name" "$environment" "$previous_color"
;;
"canary")
rollback_canary "$app_name" "$environment"
;;
esac
# Verificar que el rollback fue exitoso
if perform_health_checks "$app_name" "$environment"; then
echo "Rollback completado exitosamente"
send_notification "✅ Rollback exitoso: $app_name en $environment"
else
echo "ERROR: Rollback falló - intervención manual requerida"
send_critical_alert "🚨 CRÍTICO: Rollback falló para $app_name"
return 1
fi
}
# Health checks avanzados
perform_health_checks() {
local app_name="$1"
local environment="$2"
local color="${3:-}"
echo "Ejecutando health checks para $app_name"
local health_endpoint="http://${app_name}${color:+-$color}.${environment}.local/health"
local max_attempts=20
local attempt=1
while [[ $attempt -le $max_attempts ]]; do
echo "Health check attempt $attempt/$max_attempts"
# HTTP Health Check
if curl -f -s --max-time 10 "$health_endpoint" >/dev/null; then
echo "✓ HTTP health check passed"
# Smoke tests adicionales
if run_smoke_tests "$app_name" "$environment" "$color"; then
echo "✓ Smoke tests passed"
return 0
fi
fi
sleep 15
((attempt++))
done
echo "✗ Health checks failed after $max_attempts attempts"
return 1
}
# Smoke tests específicos de la aplicación
run_smoke_tests() {
local app_name="$1"
local environment="$2"
local color="$3"
local base_url="http://${app_name}${color:+-$color}.${environment}.local"
# Tests básicos
local tests=(
"$base_url/api/status:200"
"$base_url/api/version:200"
"$base_url/metrics:200"
)
for test in "${tests[@]}"; do
local url="${test%:*}"
local expected_code="${test#*:}"
local actual_code=$(curl -s -o /dev/null -w "%{http_code}" "$url")
if [[ "$actual_code" != "$expected_code" ]]; then
echo "✗ Smoke test failed: $url (expected $expected_code, got $actual_code)"
return 1
fi
done
echo "✓ All smoke tests passed"
return 0
}
# Pipeline completo de CI/CD
run_pipeline() {
local app_name="$1"
local git_branch="$2"
local target_environment="$3"
local pipeline_id="pipeline_$(date +%Y%m%d_%H%M%S)"
local workspace="$DEPLOY_WORKSPACE/$pipeline_id"
mkdir -p "$workspace"
cd "$workspace"
echo "=== Pipeline $pipeline_id ===" | tee pipeline.log
echo "App: $app_name" | tee -a pipeline.log
echo "Branch: $git_branch" | tee -a pipeline.log
echo "Environment: $target_environment" | tee -a pipeline.log
# 1. Clone y Checkout
if ! git clone "$GIT_REPOSITORY" . >> pipeline.log 2>&1; then
echo "ERROR: Git clone failed" | tee -a pipeline.log
return 1
fi
git checkout "$git_branch" >> pipeline.log 2>&1
# 2. Build
echo "Building application..." | tee -a pipeline.log
if ! run_build_process >> pipeline.log 2>&1; then
echo "ERROR: Build failed" | tee -a pipeline.log
return 1
fi
# 3. Tests
echo "Running tests..." | tee -a pipeline.log
if ! run_test_suite >> pipeline.log 2>&1; then
echo "ERROR: Tests failed" | tee -a pipeline.log
return 1
fi
# 4. Security Scan
echo "Running security scans..." | tee -a pipeline.log
if ! run_security_scan >> pipeline.log 2>&1; then
echo "WARNING: Security issues detected" | tee -a pipeline.log
# Continuar pero notificar
send_notification "⚠️ Security scan warnings for $app_name"
fi
# 5. Build Docker Image
local image_tag="$app_name:$(git rev-parse --short HEAD)"
echo "Building Docker image: $image_tag" | tee -a pipeline.log
if ! docker build -t "$image_tag" . >> pipeline.log 2>&1; then
echo "ERROR: Docker build failed" | tee -a pipeline.log
return 1
fi
# 6. Push to Registry
if ! push_to_registry "$image_tag" >> pipeline.log 2>&1; then
echo "ERROR: Push to registry failed" | tee -a pipeline.log
return 1
fi
# 7. Deploy
echo "Deploying to $target_environment..." | tee -a pipeline.log
case "$DEPLOYMENT_STRATEGY" in
"rolling")
deploy_rolling "$app_name" "$target_environment" "$image_tag"
;;
"blue-green")
deploy_blue_green "$app_name" "$target_environment" "$image_tag"
;;
"canary")
deploy_canary "$app_name" "$target_environment" "$image_tag"
;;
*)
echo "ERROR: Unknown deployment strategy: $DEPLOYMENT_STRATEGY"
return 1
;;
esac
if [[ $? -eq 0 ]]; then
echo "=== Pipeline Completed Successfully ===" | tee -a pipeline.log
send_notification "✅ Deployment exitoso: $app_name -> $target_environment"
# Archivar logs
cp pipeline.log "$DEPLOY_WORKSPACE/completed/$pipeline_id.log"
else
echo "=== Pipeline Failed ===" | tee -a pipeline.log
send_notification "❌ Deployment falló: $app_name -> $target_environment"
# Intentar rollback automático
if [[ "$AUTO_ROLLBACK" == "true" ]]; then
perform_rollback "$app_name" "$target_environment"
fi
return 1
fi
}
# Función principal
main() {
case "$1" in
"deploy")
run_pipeline "$2" "$3" "$4"
;;
"rollback")
perform_rollback "$2" "$3" "$4"
;;
"status")
show_deployment_status "$2" "$3"
;;
"pipeline")
list_pipeline_history
;;
*)
echo "Sistema de Deployment Automático"
echo "Uso: $0 [comando] [argumentos]"
echo ""
echo "Comandos:"
echo " deploy - Ejecutar pipeline completo"
echo " rollback [version] - Rollback a versión anterior"
echo " status - Estado del deployment"
echo " pipeline - Historial de pipelines"
;;
esac
}
main "$@"
Desarrollar un sistema distribuido de recopilación, procesamiento y análisis de logs con capacidades de búsqueda, alertas y visualización en tiempo real.
Este proyecto incluiría componentes como log shippers, procesadores de streams, índices de búsqueda, detectores de anomalías y dashboards de visualización. La implementación completa sería extensiva y combinaría múltiples técnicas avanzadas.
¡Felicitaciones! Has completado el curso más completo de Bash Scripting. Ahora tienes las habilidades para crear sistemas robustos y escalables.