基于Predixy和LB构建高可用Redis-cluster(2)Redis集群安装篇

/ Linux服务安装搭建 / 没有评论 / 5833浏览

官方文档

https://redis.io/topics/cluster-tutorial

节点配置信息

三主三从,解决拜占庭问题。

服务器:Ubuntu 16.04

下载安装

[root@paa11 ~]# wget http://download.redis.io/releases/redis-5.0.0.tar.gz
[root@paa11 ~]# tar xf redis-5.0.0.tar.gz
[root@paa11 ~]# cd redis-5.0.0 && make && make install PREFIX=/usr/local/redis #编译一下
[root@paa11 ~]# mkdir /usr/local/redis/conf #创建配置文件目录

创建集群目录与规范

创建集群目录

[root@paa11 ~]# mkdir -p /usr/local/redis-cluster
[root@paa11 ~]# cd /usr/local/redis-cluster
[root@paa11 ~]# mkdir conf data log
[root@paa11 ~]# mkdir -p data/redis-6379 data/redis-6389 data/redis-6380 data/redis-6390 data/redis-6381 data/redis-6391

配置文件解释

# redis后台运行
daemonize yes
# 绑定的主机端口
bind 127.0.0.1
# 数据存放目录
dir /usr/local/redis-cluster/data/redis-6379
# 进程文件
pidfile /var/run/redis-cluster/${自定义}.pid
# 日志文件
logfile /usr/local/redis-cluster/log/${自定义}.log
# 端口号
port 6379
# 开启集群模式,把注释#去掉
cluster-enabled yes
# 集群的配置,配置文件首次启动自动生成
cluster-config-file /usr/local/redis-cluster/conf/${自定义}.conf
# 请求超时,设置10秒
cluster-node-timeout 10000
# aof日志开启,有需要就开启,它会每次写操作都记录一条日志
appendonly yes
#在2.6之后版本,可以修改最大连接数配置,默认10000,可以在redis.conf配置文件中修改
maxclients 80000

create 创建集群

check 检查集群

info 查看集群信息

fix 修复集群

reshard 在线迁移slot

rebalance 平衡集群节点slot数量

add-node 将新节点加入集群

del-node 从集群中删除节点

set-timeout 设置集群节点间心跳连接的超时时间

call 在集群全部节点上执行命令

import 将外部redis数据导入集群

二进制文件配置

[root@paa11 conf]# cp /usr/local/redis/bin/redis-server  /usr/bin/
[root@paa11 conf]# cp /usr/local/redis/bin/redis-cli /usr/bin/
[root@paa11 conf]# chmod +x /usr/bin/redis-server /usr/bin/redis-cli

节点配置文件

6379.conf

[root@paa11 conf]# cat 6379.conf
daemonize yes
bind 127.0.0.1
dir /usr/local/redis-cluster/data/redis-6379
pidfile /var/run/redis-cluster/redis-6379.pid
logfile /usr/local/redis-cluster/log/redis-6379.log
port 6379
cluster-enabled yes
cluster-config-file /usr/local/redis-cluster/conf/node-6379.conf
cluster-node-timeout 10000
appendonly yes
maxclients 80000

6380.conf

[root@paa11 conf]# cat 6380.conf
daemonize yes
bind 127.0.0.1
dir /usr/local/redis-cluster/data/redis-6380
pidfile /var/run/redis-cluster/redis-6380.pid
logfile /usr/local/redis-cluster/log/redis-6380.log
port 6380
cluster-enabled yes
cluster-config-file /usr/local/redis-cluster/conf/node-6380.conf
cluster-node-timeout 10000
appendonly yes
maxclients 80000

6381.conf

····
以此类推
·····

启动

[root@paa11 redis-cluster]# redis-server conf/6379.conf && redis-server conf/6389.conf && redis-server conf/6380.conf && redis-server conf/6390.conf && redis-server conf/6381.conf && redis-server conf/6391.conf

redis-cli关联集群节点

需要安装ruby

在5.0版本之前使用的是 redis-trib

三主三从(生产使用)

[root@paa11 redis-cluster]# redis-cli --cluster create  127.0.0.1:6379 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 127.0.0.1:6389 to 127.0.0.1:6379
Adding replica 127.0.0.1:6390 to 127.0.0.1:6380
Adding replica 127.0.0.1:6391 to 127.0.0.1:6381
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 127.0.0.1:6379
   slots:[0-5460] (5461 slots) master
M: 9166bd556c8d631ca0fc8a922fdf72a375993e4f 127.0.0.1:6380
   slots:[5461-10922] (5462 slots) master
M: 3165b6822e74ecb5a689032501f9eb7f2b2872f6 127.0.0.1:6381
   slots:[10923-16383] (5461 slots) master
S: e5df872afbbf92c29b9931bfb31495d6e58bf7b4 127.0.0.1:6389
   replicates c2af4f3118b64f5ae31ac20e934bbbe93c0824a2
S: d7ec8682999b15b74dcb5e47e530e8ed0f9acb79 127.0.0.1:6390
   replicates 9166bd556c8d631ca0fc8a922fdf72a375993e4f
S: b3b9d813a0f7e59a4259c2c349d6c2715e96038c 127.0.0.1:6391
   replicates 3165b6822e74ecb5a689032501f9eb7f2b2872f6
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 127.0.0.1:6379)
M: c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 127.0.0.1:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: b3b9d813a0f7e59a4259c2c349d6c2715e96038c 127.0.0.1:6391
   slots: (0 slots) slave
   replicates 3165b6822e74ecb5a689032501f9eb7f2b2872f6
S: e5df872afbbf92c29b9931bfb31495d6e58bf7b4 127.0.0.1:6389
   slots: (0 slots) slave
   replicates c2af4f3118b64f5ae31ac20e934bbbe93c0824a2
M: 3165b6822e74ecb5a689032501f9eb7f2b2872f6 127.0.0.1:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: d7ec8682999b15b74dcb5e47e530e8ed0f9acb79 127.0.0.1:6390
   slots: (0 slots) slave
   replicates 9166bd556c8d631ca0fc8a922fdf72a375993e4f
M: 9166bd556c8d631ca0fc8a922fdf72a375993e4f 127.0.0.1:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@paa11 redis-cluster]# redis-cli  -h 127.0.0.1  -p 6379 cluster nodes
b3b9d813a0f7e59a4259c2c349d6c2715e96038c 127.0.0.1:6391@16391 slave 3165b6822e74ecb5a689032501f9eb7f2b2872f6 0 1551422399045 6 connected
c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 127.0.0.1:6379@16379 myself,master - 0 1551422400000 1 connected 0-5460
e5df872afbbf92c29b9931bfb31495d6e58bf7b4 127.0.0.1:6389@16389 slave c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 0 1551422401050 4 connected
3165b6822e74ecb5a689032501f9eb7f2b2872f6 127.0.0.1:6381@16381 master - 0 1551422402053 3 connected 10923-16383
d7ec8682999b15b74dcb5e47e530e8ed0f9acb79 127.0.0.1:6390@16390 slave 9166bd556c8d631ca0fc8a922fdf72a375993e4f 0 1551422402000 5 connected
9166bd556c8d631ca0fc8a922fdf72a375993e4f 127.0.0.1:6380@16380 master - 0 1551422400047 2 connected 5461-10922

查看节点

[root@paa11 redis-cluster]# redis-cli  -h 127.0.0.1  -p 6379 cluster nodes
b3b9d813a0f7e59a4259c2c349d6c2715e96038c 127.0.0.1:6391@16391 slave 3165b6822e74ecb5a689032501f9eb7f2b2872f6 0 1551422399045 6 connected
c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 127.0.0.1:6379@16379 myself,master - 0 1551422400000 1 connected 0-5460
e5df872afbbf92c29b9931bfb31495d6e58bf7b4 127.0.0.1:6389@16389 slave c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 0 1551422401050 4 connected
3165b6822e74ecb5a689032501f9eb7f2b2872f6 127.0.0.1:6381@16381 master - 0 1551422402053 3 connected 10923-16383
d7ec8682999b15b74dcb5e47e530e8ed0f9acb79 127.0.0.1:6390@16390 slave 9166bd556c8d631ca0fc8a922fdf72a375993e4f 0 1551422402000 5 connected
9166bd556c8d631ca0fc8a922fdf72a375993e4f 127.0.0.1:6380@16380 master - 0 1551422400047 2 connected 5461-10922

全部是主

[root@paa11 redis-cluster]# redis-cli --cluster create  127.0.0.1:6379 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 2730
Master[1] -> Slots 2731 - 5460
Master[2] -> Slots 5461 - 8191
Master[3] -> Slots 8192 - 10922
Master[4] -> Slots 10923 - 13652
Master[5] -> Slots 13653 - 16383
M: 9ec022a8d9dff42fc4e44c96a84baa2c82300bec 127.0.0.1:6379
   slots:[0-2730] (2731 slots) master
M: ea12dfc58b6e514921bbf934b1a8ed492159eaeb 127.0.0.1:6380
   slots:[2731-5460] (2730 slots) master
M: bae5a3f700e5884e57190172cbcc1559d051e33a 127.0.0.1:6381
   slots:[5461-8191] (2731 slots) master
M: 07293e193439e8587ddbe2a692543196e1b34ff7 127.0.0.1:6389
   slots:[8192-10922] (2731 slots) master
M: 94edcae76e1604cfbb4b3f5fe49bfecf82978935 127.0.0.1:6390
   slots:[10923-13652] (2730 slots) master
M: bae3986ca4f946ed2dcf2effa3c126e5785da32d 127.0.0.1:6391
   slots:[13653-16383] (2731 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.......
>>> Performing Cluster Check (using node 127.0.0.1:6379)
M: 9ec022a8d9dff42fc4e44c96a84baa2c82300bec 127.0.0.1:6379
   slots:[0-2730] (2731 slots) master
M: bae3986ca4f946ed2dcf2effa3c126e5785da32d 127.0.0.1:6391
   slots:[13653-16383] (2731 slots) master
M: 94edcae76e1604cfbb4b3f5fe49bfecf82978935 127.0.0.1:6390
   slots:[10923-13652] (2730 slots) master
M: ea12dfc58b6e514921bbf934b1a8ed492159eaeb 127.0.0.1:6380
   slots:[2731-5460] (2730 slots) master
M: 07293e193439e8587ddbe2a692543196e1b34ff7 127.0.0.1:6389
   slots:[8192-10922] (2731 slots) master
M: bae5a3f700e5884e57190172cbcc1559d051e33a 127.0.0.1:6381
   slots:[5461-8191] (2731 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

获取列表

[root@paa11 conf]# redis-cli  -h 127.0.0.1  -p 6379 cluster nodes
b3b9d813a0f7e59a4259c2c349d6c2715e96038c 127.0.0.1:6391@16391 slave 3165b6822e74ecb5a689032501f9eb7f2b2872f6 0 1551428086121 6 connected
c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 127.0.0.1:6379@16379 myself,master - 0 1551428085000 1 connected 0-5460
e5df872afbbf92c29b9931bfb31495d6e58bf7b4 127.0.0.1:6389@16389 slave c2af4f3118b64f5ae31ac20e934bbbe93c0824a2 0 1551428087123 4 connected
3165b6822e74ecb5a689032501f9eb7f2b2872f6 127.0.0.1:6381@16381 master - 0 1551428084000 3 connected 10923-16383
d7ec8682999b15b74dcb5e47e530e8ed0f9acb79 127.0.0.1:6390@16390 slave 9166bd556c8d631ca0fc8a922fdf72a375993e4f 0 1551428085000 5 connected
9166bd556c8d631ca0fc8a922fdf72a375993e4f 127.0.0.1:6380@16380 master - 0 1551428085118 2 connected 5461-10922

目录结构

检查端口

[root@paa11 redis-cluster]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 127.0.0.1:16391         0.0.0.0:*               LISTEN      31924/redis-server
tcp        0      0 127.0.0.1:6379          0.0.0.0:*               LISTEN      31899/redis-server
tcp        0      0 127.0.0.1:6380          0.0.0.0:*               LISTEN      31909/redis-server
tcp        0      0 127.0.0.1:6381          0.0.0.0:*               LISTEN      31919/redis-server
tcp        0      0 127.0.0.1:6389          0.0.0.0:*               LISTEN      31904/redis-server
tcp        0      0 127.0.0.1:6390          0.0.0.0:*               LISTEN      31914/redis-server
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      5228/sshd
tcp        0      0 127.0.0.1:6391          0.0.0.0:*               LISTEN      31924/redis-server
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      5730/master
tcp        0      0 127.0.0.1:16379         0.0.0.0:*               LISTEN      31899/redis-server
tcp        0      0 127.0.0.1:16380         0.0.0.0:*               LISTEN      31909/redis-server
tcp        0      0 127.0.0.1:16381         0.0.0.0:*               LISTEN      31919/redis-server
tcp        0      0 127.0.0.1:16389         0.0.0.0:*               LISTEN      31904/redis-server
tcp        0      0 127.0.0.1:16390         0.0.0.0:*               LISTEN      31914/redis-server
tcp6       0      0 :::22                   :::*                    LISTEN      5228/sshd
tcp6       0      0 ::1:25                  :::*                    LISTEN      5730/master
udp        0      0 127.0.0.1:323           0.0.0.0:*                           4633/chronyd
udp6       0      0 ::1:323                 :::*                                4633/chronyd

fine

错误解决

这可能是因为操作系统没有足够快地清理连接。禁用keepalive时会发出以下警告redis-benchmark,但同样适用于运行循环基准测试

[root@paa11 bin]# echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse
[root@paa11 bin]# echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle

内存不够了

echo 3 > /proc/sys/vm/drop_caches

重建集群

[ERR] Node 192.168.0.253:6379 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.

删除所有节点的rdb aof 数据和nodes.conf,全部重启

下一篇,predixy安装配置篇

https://www.blog.lijinghua.club/article/predixy_redis_cluster_3