LVS 配置(四层负载均衡)

吴书松
吴书松
发布于 2025-12-11 / 5 阅读
0
0

LVS 配置(四层负载均衡)

一、解决方案概览

1. LVS-DR 模式配置(推荐)

网络架构:

text

客户端 → LVS (VIP: 192.168.1.100) → Nginx服务器 (隐藏VIP) → 业务服务器

1.1 LVS 主服务器配置 (/etc/keepalived/keepalived.conf)

bash

! Configuration File for keepalived

global_defs {
   router_id LVS_MASTER  # 标识,备机改为LVS_BACKUP
}

# VIP健康检查
vrrp_script chk_lvs {
    script "/etc/keepalived/check_lvs.sh"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER          # 备机改为BACKUP
    interface eth0        # 网卡名称
    virtual_router_id 51  # 集群ID,主备必须一致
    priority 100          # 优先级,备机改为90
    advert_int 1          # 心跳间隔
    
    authentication {
        auth_type PASS
        auth_pass 1111    # 主备密码一致
    }
    
    virtual_ipaddress {
        192.168.1.100/24 dev eth0 label eth0:0  # VIP配置
    }
    
    track_script {
        chk_lvs           # 执行健康检查脚本
    }
}

# 虚拟服务器配置(负载均衡规则)
virtual_server 192.168.1.100 80 {
    delay_loop 6          # 健康检查间隔
    lb_algo wrr           # 加权轮询算法
    lb_kind DR            # 直接路由模式
    persistence_timeout 50  # 会话保持时间(秒)
    protocol TCP          # 协议类型
    
    # 真实服务器1 - Nginx节点
    real_server 192.168.1.101 80 {
        weight 1          # 权重
        TCP_CHECK {
            connect_timeout 3      # 连接超时
            nb_get_retry 3         # 重试次数
            delay_before_retry 3   # 重试间隔
            connect_port 80        # 检查端口
        }
    }
    
    # 真实服务器2 - Nginx节点
    real_server 192.168.1.102 80 {
        weight 1
        HTTP_GET {
            url {
              path /health_check  # 健康检查URL
              status_code 200     # 期望状态码
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 2
        }
    }
    
    # 真实服务器3 - Nginx节点
    real_server 192.168.1.103 80 {
        weight 2          # 权重更高,处理更多请求
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}

# 配置HTTPS负载均衡(443端口)
virtual_server 192.168.1.100 443 {
    delay_loop 6
    lb_algo wrr
    lb_kind DR
    persistence_timeout 50
    protocol TCP
    
    real_server 192.168.1.101 443 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            connect_port 443
        }
    }
    # 更多真实服务器...
}

1.2 LVS 健康检查脚本 (/etc/keepalived/check_lvs.sh)

bash

#!/bin/bash
# LVS健康检查脚本

# 检查ipvsadm服务
if ! systemctl is-active --quiet ipvsadm; then
    systemctl restart ipvsadm
    sleep 2
    if ! systemctl is-active --quiet ipvsadm; then
        exit 1
    fi
fi

# 检查VIP是否绑定
VIP="192.168.1.100"
if ! ip addr show | grep -q $VIP; then
    exit 1
fi

# 检查后端服务器连通性
BACKEND_SERVERS=("192.168.1.101" "192.168.1.102" "192.168.1.103")
for server in "${BACKEND_SERVERS[@]}"; do
    if ! timeout 2 nc -z $server 80; then
        logger "LVS健康检查: $server 不可达"
    fi
done

exit 0

1.3 LVS 备服务器配置

bash

! Configuration File for keepalived
global_defs {
   router_id LVS_BACKUP
}

vrrp_instance VI_1 {
    state BACKUP          # 备机
    interface eth0
    virtual_router_id 51  # 与主机一致
    priority 90           # 优先级低于主机
    advert_int 1
    
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    
    virtual_ipaddress {
        192.168.1.100/24 dev eth0 label eth0:0
    }
}

# 虚拟服务器配置与主机相同
virtual_server 192.168.1.100 80 {
    # ... 与主机配置相同
}

2. 后端Nginx服务器配置(DR模式专用)

2.1 配置VIP到lo网卡 (/etc/sysconfig/network-scripts/ifcfg-lo:0)

bash

DEVICE=lo:0
IPADDR=192.168.1.100
NETMASK=255.255.255.255
ONBOOT=yes
NAME=loopback

2.2 ARP抑制脚本 (/etc/init.d/lvs_dr)

bash

#!/bin/bash
# LVS-DR模式ARP抑制配置

VIP=192.168.1.100

case "$1" in
start)
    echo "Starting LVS-DR configuration..."
    
    # 1. 配置ARP抑制
    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
    
    # 2. 添加VIP到lo网卡
    ifconfig lo:0 $VIP netmask 255.255.255.255 broadcast $VIP up
    
    # 3. 添加路由
    route add -host $VIP dev lo:0
    
    echo "LVS-DR configuration completed."
    ;;
stop)
    echo "Stopping LVS-DR configuration..."
    
    # 删除VIP
    ifconfig lo:0 down
    
    # 恢复ARP设置
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
    
    echo "LVS-DR configuration removed."
    ;;
*)
    echo "Usage: $0 {start|stop}"
    exit 1
esac

2.3 设置开机自启

bash

chmod +x /etc/init.d/lvs_dr
chkconfig --add lvs_dr
chkconfig lvs_dr on

二、Nginx 配置(七层负载均衡)

1. Nginx负载均衡配置 (/etc/nginx/nginx.conf)

nginx

# 用户和进程配置
user nginx;
worker_processes auto;  # 自动根据CPU核心数设置
worker_rlimit_nofile 65535;  # 文件描述符限制

# 事件模块
events {
    worker_connections 10240;  # 每个worker连接数
    use epoll;                 # 使用epoll模型
    multi_accept on;           # 同时接受多个连接
}

# HTTP模块
http {
    # 基础配置
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    
    # 日志格式
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for" '
                    'upstream_addr=$upstream_addr upstream_status=$upstream_status '
                    'request_time=$request_time upstream_response_time=$upstream_response_time';
    
    access_log /var/log/nginx/access.log main buffer=32k flush=5s;
    error_log /var/log/nginx/error.log warn;
    
    # 性能优化
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    keepalive_requests 1000;
    client_max_body_size 100m;
    client_body_buffer_size 128k;
    
    # 连接超时
    proxy_connect_timeout 5s;
    proxy_send_timeout 60s;
    proxy_read_timeout 60s;
    proxy_buffer_size 64k;
    proxy_buffers 4 64k;
    proxy_busy_buffers_size 128k;
    proxy_temp_file_write_size 256k;
    
    # upstream配置 - 业务服务器集群
    upstream backend_servers {
        # 一致性哈希,解决session问题
        hash $remote_addr consistent;
        
        # 健康检查
        zone backend_zone 64k;
        keepalive 32;  # 长连接数
        
        # 业务服务器列表
        server 10.0.1.1:8080 weight=3 max_fails=3 fail_timeout=30s;
        server 10.0.1.2:8080 weight=2 max_fails=3 fail_timeout=30s;
        server 10.0.1.3:8080 weight=1 max_fails=3 fail_timeout=30s;
        server 10.0.1.4:8080 weight=1 max_fails=3 fail_timeout=30s backup;  # 备份服务器
        
        # 被动健康检查
        check interval=3000 rise=2 fall=3 timeout=1000 type=http;
        check_http_send "HEAD /health HTTP/1.0\r\n\r\n";
        check_http_expect_alive http_2xx http_3xx;
    }
    
    # upstream配置 - 微服务集群(示例)
    upstream api_service {
        least_conn;  # 最少连接算法
        
        server 10.0.2.1:8001;
        server 10.0.2.2:8001;
        server 10.0.2.3:8001;
        
        # 会话保持(需要nginx-sticky-module)
        # sticky cookie srv_id expires=1h domain=.example.com path=/;
    }
    
    # 服务器配置
    server {
        listen 80;
        server_name _;
        
        # 状态监控页面(仅限内网访问)
        location /nginx_status {
            stub_status on;
            access_log off;
            allow 192.168.1.0/24;  # 内网访问
            allow 127.0.0.1;
            deny all;
        }
        
        # 健康检查接口
        location /health_check {
            access_log off;
            return 200 "OK";
            add_header Content-Type text/plain;
        }
        
        # 静态文件服务
        location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff|woff2|ttf|svg)$ {
            expires 30d;
            add_header Cache-Control "public, immutable";
            
            # 如果静态文件在业务服务器上
            proxy_pass http://backend_servers;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }
        
        # 动态请求代理
        location / {
            # 限流配置
            limit_req zone=req_limit burst=20 nodelay;
            
            # 代理到后端服务器
            proxy_pass http://backend_servers;
            
            # 代理头设置
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            proxy_set_header X-Forwarded-Host $server_name;
            
            # 超时设置
            proxy_connect_timeout 5s;
            proxy_send_timeout 60s;
            proxy_read_timeout 60s;
            
            # 错误处理
            proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
            proxy_next_upstream_tries 3;
            proxy_next_upstream_timeout 10s;
            
            # 缓冲优化
            proxy_buffering on;
            proxy_buffer_size 4k;
            proxy_buffers 8 4k;
            proxy_busy_buffers_size 8k;
            
            # 启用gzip
            proxy_set_header Accept-Encoding "";
        }
        
        # API接口特殊配置
        location /api/ {
            proxy_pass http://api_service;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            
            # API特殊超时
            proxy_connect_timeout 3s;
            proxy_send_timeout 30s;
            proxy_read_timeout 30s;
        }
    }
    
    # HTTPS配置(如果需要)
    server {
        listen 443 ssl http2;
        server_name www.example.com;
        
        ssl_certificate /etc/nginx/ssl/server.crt;
        ssl_certificate_key /etc/nginx/ssl/server.key;
        ssl_protocols TLSv1.2 TLSv1.3;
        ssl_ciphers HIGH:!aNULL:!MD5;
        
        # SSL会话缓存
        ssl_session_cache shared:SSL:10m;
        ssl_session_timeout 10m;
        
        location / {
            proxy_pass http://backend_servers;
            # ... 其他配置同上
        }
    }
    
    # 限流配置
    limit_req_zone $binary_remote_addr zone=req_limit:10m rate=10r/s;
    limit_conn_zone $binary_remote_addr zone=conn_limit:10m;
}

# TCP/UDP负载均衡(四层代理,可选)
stream {
    # 定义后端服务器组
    upstream backend_tcp {
        server 10.0.1.1:3306 weight=1 max_fails=3 fail_timeout=30s;
        server 10.0.1.2:3306 weight=1 max_fails=3 fail_timeout=30s;
    }
    
    # MySQL负载均衡
    server {
        listen 3306;
        proxy_pass backend_tcp;
        proxy_connect_timeout 1s;
        proxy_timeout 3s;
    }
}

2. Nginx健康检查页面

创建 /usr/share/nginx/html/health.html

html

<!DOCTYPE html>
<html>
<head>
    <title>Nginx Health Status</title>
</head>
<body>
    <h1>Nginx Status: <span style="color:green">Healthy</span></h1>
    <p>Server: <%= ENV["HOSTNAME"] %></p>
    <p>Time: <%= Time.now %></p>
    <p>Load Average: <%= `uptime`.split(':').last.strip %></p>
</body>
</html>

三、配置验证和管理脚本

1. LVS状态检查脚本 (/usr/local/bin/check_lvs_status.sh)

bash

#!/bin/bash
# 检查LVS状态

echo "=== LVS状态检查 ==="
echo "1. 检查VIP绑定状态:"
ip addr show | grep -A5 "eth0:"

echo -e "\n2. 检查LVS规则:"
ipvsadm -Ln

echo -e "\n3. 检查连接统计:"
ipvsadm -Lnc

echo -e "\n4. 检查后端服务器状态:"
for server in 192.168.1.101 192.168.1.102 192.168.1.103; do
    if nc -z $server 80 2>/dev/null; then
        echo "$server: ✓ 运行正常"
    else
        echo "$server: ✗ 无法连接"
    fi
done

echo -e "\n5. 检查Keepalived进程:"
systemctl status keepalived --no-pager -l

2. Nginx状态检查脚本 (/usr/local/bin/check_nginx_status.sh)

bash

#!/bin/bash
# 检查Nginx状态

echo "=== Nginx状态检查 ==="

# 检查进程
echo "1. 检查Nginx进程:"
ps aux | grep -E "nginx:\s+(master|worker)" | grep -v grep

# 检查监听端口
echo -e "\n2. 检查监听端口:"
netstat -tlnp | grep nginx

# 检查连接数
echo -e "\n3. 当前连接数统计:"
echo "活跃连接: $(netstat -ant | grep :80 | grep ESTABLISHED | wc -l)"
echo "等待连接: $(netstat -ant | grep :80 | grep WAIT | wc -l)"

# 检查错误日志
echo -e "\n4. 最近错误日志:"
tail -20 /var/log/nginx/error.log

# 检查配置文件
echo -e "\n5. 配置文件检查:"
nginx -t

3. 自动监控和恢复脚本 (/usr/local/bin/lvs_monitor.sh)

bash

#!/bin/bash
# LVS监控和自动恢复

LOG_FILE="/var/log/lvs_monitor.log"
VIP="192.168.1.100"

log() {
    echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a $LOG_FILE
}

# 检查VIP是否绑定
check_vip() {
    if ! ip addr show | grep -q $VIP; then
        log "VIP $VIP 未绑定,尝试恢复..."
        systemctl restart keepalived
        sleep 2
        
        if ip addr show | grep -q $VIP; then
            log "VIP恢复成功"
        else
            log "VIP恢复失败,需要人工干预"
            # 发送告警
            echo "LVS VIP异常" | mail -s "LVS告警" admin@example.com
        fi
    fi
}

# 检查后端服务器
check_backends() {
    BACKENDS=("192.168.1.101" "192.168.1.102" "192.168.1.103")
    
    for backend in "${BACKENDS[@]}"; do
        if ! timeout 2 nc -z $backend 80; then
            log "后端服务器 $backend 不可达"
            
            # 从LVS中移除故障服务器
            ipvsadm -d -t $VIP:80 -r $backend:80 2>/dev/null
            log "已将 $backend 从LVS中移除"
        else
            # 检查是否在LVS中
            if ! ipvsadm -Ln | grep -q $backend; then
                ipvsadm -a -t $VIP:80 -r $backend:80 -g -w 1
                log "已将 $backend 添加到LVS"
            fi
        fi
    done
}

# 主循环
while true; do
    check_vip
    check_backends
    sleep 30
done

四、部署步骤

步骤1:LVS服务器部署

bash

# 1. 安装软件
yum install -y ipvsadm keepalived

# 2. 配置keepalived
cp keepalived.conf /etc/keepalived/
cp check_lvs.sh /etc/keepalived/

# 3. 启动服务
systemctl enable keepalived
systemctl start keepalived

# 4. 验证配置
ipvsadm -Ln

步骤2:Nginx服务器部署

bash

# 1. 安装Nginx
yum install -y nginx

# 2. 配置DR模式
./lvs_dr start

# 3. 配置Nginx
cp nginx.conf /etc/nginx/

# 4. 启动Nginx
systemctl enable nginx
systemctl start nginx

# 5. 验证
curl http://localhost/health_check

步骤3:验证整个架构

bash

# 从客户端测试
for i in {1..10}; do
    curl -s http://192.168.1.100/health_check
    echo
    sleep 0.5
done

# 查看LVS连接分发
watch -n 1 "ipvsadm -Lnc | tail -20"

五、性能调优参数

LVS调优 (/etc/sysctl.conf):

bash

# 连接跟踪表大小
net.ipv4.vs.conn_reuse_mode = 1
net.ipv4.vs.expire_nodest_conn = 1
net.ipv4.vs.expire_quiescent_template = 1

# 网络参数
net.core.somaxconn = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_syncookies = 1

Nginx调优:

nginx

# 调整worker进程
worker_processes auto;
worker_cpu_affinity auto;

# 调整连接数
worker_rlimit_nofile 65535;
events {
    worker_connections 10240;
    use epoll;
}

六、监控指标

  1. LVS监控

    • 连接数:ipvsadm -Ln --stats

    • 吞吐量:ipvsadm -Ln --rate

    • 后端服务器状态:ipvsadm -Ln --timeout

  2. Nginx监控

    • 活跃连接:curl http://localhost/nginx_status

    • 请求率:tail -f access.log | awk '{print $4}' | uniq -c

    • 错误率:grep -c " 50[0-9] " access.log

这个配置方案可以支撑数十万并发连接,通过LVS做四层负载均衡,Nginx做七层负载均衡,既保证了性能又提供了灵活性。


评论