5. 分片集群
0. 前期准备¶
#1.关闭所有其他无关进程
pkill mongod
[root@151 ~]# netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:17000 0.0.0.0:* LISTEN 58547/redis-server
tcp 0 0 0.0.0.0:27017 0.0.0.0:* LISTEN 21714/mongod
tcp 0 0 0.0.0.0:17001 0.0.0.0:* LISTEN 58549/redis-server
tcp 0 0 0.0.0.0:17002 0.0.0.0:* LISTEN 58551/redis-server
tcp 0 0 0.0.0.0:17003 0.0.0.0:* LISTEN 58557/redis-server
tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN 54711/redis-server
tcp 0 0 0.0.0.0:17004 0.0.0.0:* LISTEN 58559/redis-server
tcp 0 0 0.0.0.0:26380 0.0.0.0:* LISTEN 35556/redis-sentine
tcp 0 0 0.0.0.0:6380 0.0.0.0:* LISTEN 34058/redis-server
tcp 0 0 0.0.0.0:17005 0.0.0.0:* LISTEN 58573/redis-server
tcp 0 0 0.0.0.0:6381 0.0.0.0:* LISTEN 34062/redis-server
tcp 0 0 0.0.0.0:6382 0.0.0.0:* LISTEN 34072/redis-server
tcp 0 0 0.0.0.0:28017 0.0.0.0:* LISTEN 45004/mongod
tcp 0 0 0.0.0.0:28018 0.0.0.0:* LISTEN 46652/mongod
tcp 0 0 0.0.0.0:28019 0.0.0.0:* LISTEN 79015/mongod
tcp 0 0 0.0.0.0:28020 0.0.0.0:* LISTEN 79079/mongod
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 7350/sshd
tcp 0 0 0.0.0.0:7000 0.0.0.0:* LISTEN 58547/redis-server
tcp 0 0 0.0.0.0:7001 0.0.0.0:* LISTEN 58549/redis-server
tcp 0 0 0.0.0.0:7002 0.0.0.0:* LISTEN 58551/redis-server
tcp 0 0 0.0.0.0:7003 0.0.0.0:* LISTEN 58557/redis-server
tcp 0 0 0.0.0.0:7004 0.0.0.0:* LISTEN 58559/redis-server
tcp 0 0 0.0.0.0:7005 0.0.0.0:* LISTEN 58573/redis-server
tcp6 0 0 :::17000 :::* LISTEN 58547/redis-server
tcp6 0 0 :::17001 :::* LISTEN 58549/redis-server
tcp6 0 0 :::17002 :::* LISTEN 58551/redis-server
tcp6 0 0 :::17003 :::* LISTEN 58557/redis-server
tcp6 0 0 :::6379 :::* LISTEN 54711/redis-server
tcp6 0 0 :::17004 :::* LISTEN 58559/redis-server
tcp6 0 0 :::26380 :::* LISTEN 35556/redis-sentine
tcp6 0 0 :::6380 :::* LISTEN 34058/redis-server
tcp6 0 0 :::17005 :::* LISTEN 58573/redis-server
tcp6 0 0 :::6381 :::* LISTEN 34062/redis-server
tcp6 0 0 :::6382 :::* LISTEN 34072/redis-server
tcp6 0 0 :::22 :::* LISTEN 7350/sshd
tcp6 0 0 :::7000 :::* LISTEN 58547/redis-server
tcp6 0 0 :::7001 :::* LISTEN 58549/redis-server
tcp6 0 0 :::7002 :::* LISTEN 58551/redis-server
tcp6 0 0 :::7003 :::* LISTEN 58557/redis-server
tcp6 0 0 :::7004 :::* LISTEN 58559/redis-server
tcp6 0 0 :::7005 :::* LISTEN 58573/redis-server
[root@151 ~]#
[root@151 ~]#
[root@151 ~]#
[root@151 ~]# pkill mongod
[root@151 ~]# pkill redis-server
[root@151 ~]# netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:26380 0.0.0.0:* LISTEN 35556/redis-sentine
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 7350/sshd
tcp6 0 0 :::26380 :::* LISTEN 35556/redis-sentine
tcp6 0 0 :::22 :::* LISTEN 7350/sshd
[root@151 ~]# pkill redis-sentine
[root@151 ~]# netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 7350/sshd
tcp6 0 0 :::22 :::* LISTEN 7350/sshd
[root@151 ~]#
1. mongodb分片集群组成¶
#1.mongos
1.接收用户请求,返回用户数据
2.读取config server,获取分片策略和节点信息
3.根据分片策略路由请求和获取数据
4.自动均衡数据chunk
#2.config server
1.存储节点信息
2.存储分片策略
3.记录数据存储位置
#3.mongod
1.通过chunk进行数据存储 (chunk是固定大小的存储的单元,默认大小64M)
#4.chunk自动拆分&chunk自动迁移
在默认情况下(没有开启任何分片策略时),初始化处一个64M大小的chunk,64M被用完分裂成2个
mongos中,提供了balancer功能,来均衡所有节点的chunk
也就是说,使用mongodb时,可以不用人为的分片,也可以达到自动分片的功能
这种功能,确实在数量级上是比较均衡的,而且还可以比redis分片更加灵活
删除节点时,会自动将需要删除的节点的chunk迁移到其他节点
增加节点时,会自动均衡chunk到新加节点
但是,chunk的拆分和chunk迁移,会有额外的性能消耗,chunk迁移只考虑了数据量的均衡,没有考虑业务层面的均衡性
所以,
可以通过调整chunk的大小,减少拆分的次数,有效的减少io消耗
可以预分片,减少chunk迁移的出现
2. 端口规划¶
10个实例:38017-38026
#1.mongos:
192.168.178.151:38017
#2.config server:
192.168.178.151:38018
192.168.178.151:38019
192.168.178.151:38020
#3.mongod
sh1:
192.168.178.151:38021
192.168.178.151:38022
192.168.178.151:38023
sh2:
192.168.178.151:38024
192.168.178.151:38025
192.168.178.151:38026
3. mongod配置¶
3.1 目录创建¶
su - mongod
mkdir -p /mongodb/38021/conf /mongodb/38021/log /mongodb/38021/data
mkdir -p /mongodb/38022/conf /mongodb/38022/log /mongodb/38022/data
mkdir -p /mongodb/38023/conf /mongodb/38023/log /mongodb/38023/data
mkdir -p /mongodb/38024/conf /mongodb/38024/log /mongodb/38024/data
mkdir -p /mongodb/38025/conf /mongodb/38025/log /mongodb/38025/data
mkdir -p /mongodb/38026/conf /mongodb/38026/log /mongodb/38026/data
3.2 修改配置文件¶
#sh1:
#1.编写第一个配置文件(注意:最好先启动测试,看配置文件是否有问题,然后再继续!!!)
vim /mongodb/38021/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38021/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38021/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38021
replication:
oplogSizeMB: 2048
replSetName: sh1
sharding:
clusterRole: shardsvr
processManagement:
fork: true
#1.1启动测试
mongod -f /mongodb/38021/conf/mongodb.conf
#2.复制
cp /mongodb/38021/conf/mongodb.conf /mongodb/38022/conf/
cp /mongodb/38021/conf/mongodb.conf /mongodb/38023/conf/
#3.替换
sed 's#38021#38022#g' /mongodb/38022/conf/mongodb.conf -i
sed 's#38021#38023#g' /mongodb/38023/conf/mongodb.conf -i
#sh2
#1.
vim /mongodb/38024/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38024/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38024/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38024
replication:
oplogSizeMB: 2048
replSetName: sh2
sharding:
clusterRole: shardsvr
processManagement:
fork: true
#1.1启动测试
mongod -f /mongodb/38024/conf/mongodb.conf
#2.
cp /mongodb/38024/conf/mongodb.conf /mongodb/38025/conf/
cp /mongodb/38024/conf/mongodb.conf /mongodb/38026/conf/
#3.
sed 's#38024#38025#g' /mongodb/38025/conf/mongodb.conf -i
sed 's#38024#38026#g' /mongodb/38026/conf/mongodb.conf -i
#sh1
[root@151 ~]# su - mongod
Last login: Wed May 12 16:46:34 CST 2021 on pts/1
[mongod@151 ~]$
[mongod@151 ~]$ mkdir -p /mongodb/38021/conf /mongodb/38021/log /mongodb/38021/data
[mongod@151 ~]$ mkdir -p /mongodb/38022/conf /mongodb/38022/log /mongodb/38022/data
[mongod@151 ~]$ mkdir -p /mongodb/38023/conf /mongodb/38023/log /mongodb/38023/data
[mongod@151 ~]$ mkdir -p /mongodb/38024/conf /mongodb/38024/log /mongodb/38024/data
[mongod@151 ~]$ mkdir -p /mongodb/38025/conf /mongodb/38025/log /mongodb/38025/data
[mongod@151 ~]$ mkdir -p /mongodb/38026/conf /mongodb/38026/log /mongodb/38026/data
[mongod@151 ~]$
[mongod@151 ~]$ vim /mongodb/38021/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38021/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 8220
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ cp /mongodb/38021/conf/mongodb.conf /mongodb/38022/conf/
[mongod@151 ~]$ cp /mongodb/38021/conf/mongodb.conf /mongodb/38023/conf/
[mongod@151 ~]$
[mongod@151 ~]$ sed 's#38021#38022#g' /mongodb/38022/conf/mongodb.conf -i
[mongod@151 ~]$ sed 's#38021#38023#g' /mongodb/38023/conf/mongodb.conf -i
[mongod@151 ~]$
[mongod@151 ~]$ cat /mongodb/38021/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38021/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38021/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38021
replication:
oplogSizeMB: 2048
replSetName: sh1
sharding:
clusterRole: shardsvr
processManagement:
fork: true
#sh2
[mongod@151 ~]$ vim /mongodb/38024/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38024/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9548
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ cp /mongodb/38024/conf/mongodb.conf /mongodb/38025/conf/
[mongod@151 ~]$ cp /mongodb/38024/conf/mongodb.conf /mongodb/38026/conf/
[mongod@151 ~]$
[mongod@151 ~]$ sed 's#38024#38025#g' /mongodb/38025/conf/mongodb.conf -i
[mongod@151 ~]$ sed 's#38024#38026#g' /mongodb/38026/conf/mongodb.conf -i
[mongod@151 ~]$
[mongod@151 ~]$ cat /mongodb/38024/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38024/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38024/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38024
replication:
oplogSizeMB: 2048
replSetName: sh1
sharding:
clusterRole: shardsvr
processManagement:
fork: true
3.3 启动所有节点¶
mongod -f /mongodb/38021/conf/mongodb.conf
mongod -f /mongodb/38022/conf/mongodb.conf
mongod -f /mongodb/38023/conf/mongodb.conf
mongod -f /mongodb/38024/conf/mongodb.conf
mongod -f /mongodb/38025/conf/mongodb.conf
mongod -f /mongodb/38026/conf/mongodb.conf
ps -ef |grep mongod
[mongod@151 ~]$ mongod -f /mongodb/38021/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9903
ERROR: child process failed, exited with error number 48
[mongod@151 ~]$ mongod -f /mongodb/38022/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9907
child process started successfully, parent exiting
[mongod@151 ~]$ mongod -f /mongodb/38023/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9928
child process started successfully, parent exiting
[mongod@151 ~]$ mongod -f /mongodb/38024/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9954
ERROR: child process failed, exited with error number 48
[mongod@151 ~]$ mongod -f /mongodb/38025/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 9969
child process started successfully, parent exiting
[mongod@151 ~]$ mongod -f /mongodb/38026/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 10005
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ ps -ef |grep mongod
root 7833 113232 0 01:09 pts/1 00:00:00 su - mongod
mongod 7834 7833 0 01:09 pts/1 00:00:00 -bash
mongod 8220 1 2 01:10 ? 00:00:05 mongod -f /mongodb/38021/conf/mongodb.conf
mongod 9548 1 2 01:13 ? 00:00:02 mongod -f /mongodb/38024/conf/mongodb.conf
mongod 9907 1 4 01:14 ? 00:00:00 mongod -f /mongodb/38022/conf/mongodb.conf
mongod 9928 1 3 01:14 ? 00:00:00 mongod -f /mongodb/38023/conf/mongodb.conf
mongod 9969 1 3 01:14 ? 00:00:00 mongod -f /mongodb/38025/conf/mongodb.conf
mongod 10005 1 4 01:14 ? 00:00:00 mongod -f /mongodb/38026/conf/mongodb.conf
mongod 10109 7834 0 01:15 pts/1 00:00:00 ps -ef
mongod 10110 7834 0 01:15 pts/1 00:00:00 grep --color=auto mongod
[mongod@151 ~]$
[mongod@151 ~]$
[mongod@151 ~]$ netstat -tunlp |grep 3802
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
tcp 0 0 0.0.0.0:38023 0.0.0.0:* LISTEN 9928/mongod
tcp 0 0 0.0.0.0:38024 0.0.0.0:* LISTEN 9548/mongod
tcp 0 0 0.0.0.0:38025 0.0.0.0:* LISTEN 9969/mongod
tcp 0 0 0.0.0.0:38026 0.0.0.0:* LISTEN 10005/mongod
tcp 0 0 0.0.0.0:38021 0.0.0.0:* LISTEN 8220/mongod
tcp 0 0 0.0.0.0:38022 0.0.0.0:* LISTEN 9907/mongod
3.4 搭建复制集¶
#sh1
#1.登录到sh1复制集
mongo --port 38021
#2.use到管理库
use admin
#3.配置信息
config = {
_id: 'sh1', members: [
{_id: 0, host: '192.168.178.151:38021'},
{_id: 1, host: '192.168.178.151:38022'},
{_id: 2, host: '192.168.178.151:38023','arbiterOnly': true}
]
}
#4.初始化
rs.initiate(config)
#5.检查状态
rs.status()
#sh2
#1.登录到sh1复制集
mongo --port 38024
#2.use到管理库
use admin
#3.配置信息
config = {
_id: 'sh2', members: [
{_id: 0, host: '192.168.178.151:38024'},
{_id: 1, host: '192.168.178.151:38025'},
{_id: 2, host: '192.168.178.151:38026','arbiterOnly': true}
]
}
#4.初始化
rs.initiate(config)
#5.检查状态
rs.status()
#sh1
[mongod@151 ~]$ mongo --port 38021
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38021/test
> use admin
switched to db admin
> config = {
... _id: 'sh1', members: [
... {_id: 0, host: '192.168.178.151:38021'},
... {_id: 1, host: '192.168.178.151:38022'},
... {_id: 2, host: '192.168.178.151:38023','arbiterOnly': true}
... ]
... }
{
"_id" : "sh1",
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38021"
},
{
"_id" : 1,
"host" : "192.168.178.151:38022"
},
{
"_id" : 2,
"host" : "192.168.178.151:38023",
"arbiterOnly" : true
}
]
}
>
> rs.initiate(config)
{ "ok" : 1 }
sh1:OTHER>
sh1:SECONDARY> rs.status()
{
"set" : "sh1",
"date" : ISODate("2021-05-12T17:16:37.524Z"),
"myState" : 2,
"term" : NumberLong(0),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "192.168.178.151:38021",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 384,
"optime" : {
"ts" : Timestamp(1620839787, 1),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("2021-05-12T17:16:27Z"),
"infoMessage" : "could not find member to sync from",
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.178.151:38022",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 10,
"optime" : {
"ts" : Timestamp(1620839787, 1),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("2021-05-12T17:16:27Z"),
"lastHeartbeat" : ISODate("2021-05-12T17:16:37.098Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:16:35.111Z"),
"pingMs" : NumberLong(1),
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.178.151:38023",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 10,
"lastHeartbeat" : ISODate("2021-05-12T17:16:37.098Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:16:34.111Z"),
"pingMs" : NumberLong(1),
"configVersion" : 1
}
],
"ok" : 1
}
sh1:SECONDARY> rs.isMaster()
{
"hosts" : [
"192.168.178.151:38021",
"192.168.178.151:38022"
],
"arbiters" : [
"192.168.178.151:38023"
],
"setName" : "sh1",
"setVersion" : 1,
"ismaster" : true,
"secondary" : false,
"primary" : "192.168.178.151:38021",
"me" : "192.168.178.151:38021",
"electionId" : ObjectId("7fffffff0000000000000001"),
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2021-05-12T17:16:48.044Z"),
"maxWireVersion" : 4,
"minWireVersion" : 0,
"ok" : 1
}
sh1:PRIMARY>
sh1:PRIMARY> rs.conf()
{
"_id" : "sh1",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38021",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "192.168.178.151:38022",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.178.151:38023",
"arbiterOnly" : true,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("609c0d6b527f30b08aa5bb71")
}
}
sh1:PRIMARY>
sh1:PRIMARY> exit
bye
#sh2
[mongod@151 ~]$ mongo --port 38024
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38024/test
> use admin
switched to db admin
> config = {
... _id: 'sh2', members: [
... {_id: 0, host: '192.168.178.151:38024'},
... {_id: 1, host: '192.168.178.151:38025'},
... {_id: 2, host: '192.168.178.151:38026','arbiterOnly': true}
... ]
... }
{
"_id" : "sh2",
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38024"
},
{
"_id" : 1,
"host" : "192.168.178.151:38025"
},
{
"_id" : 2,
"host" : "192.168.178.151:38026",
"arbiterOnly" : true
}
]
}
>
> rs.initiate(config)
{
"ok" : 0,
"errmsg" : "Attempting to initiate a replica set with name sh2, but command line reports sh1; rejecting",
"code" : 93
}
>
#出现报错,印象中貌似配置文件未改,查看配置文件
#果然!!!修改配置文件
[mongod@151 ~]$ vim /mongodb/38024/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ vim /mongodb/38025/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ vim /mongodb/38026/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ cat /mongodb/38024/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38024/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38024/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38024
replication:
oplogSizeMB: 2048
replSetName: sh2
sharding:
clusterRole: shardsvr
processManagement:
fork: true
[mongod@151 ~]$
#重启sh2的节点
[mongod@151 ~]$ mongod -f /mongodb/38024/conf/mongodb.conf --shutdown
killing process with pid: 9548
[mongod@151 ~]$ mongod -f /mongodb/38025/conf/mongodb.conf --shutdown
killing process with pid: 9969
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38026/conf/mongodb.conf --shutdown
killing process with pid: 10005
[mongod@151 ~]$
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38024/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 12686
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38025/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 12769
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38026/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 12841
child process started successfully, parent exiting
[mongod@151 ~]$
#再次搭建sh2复制集
[mongod@151 ~]$ mongo --port 38024
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38024/test
> use admin
switched to db admin
> config = {
... _id: 'sh2', members: [
... {_id: 0, host: '192.168.178.151:38024'},
... {_id: 1, host: '192.168.178.151:38025'},
... {_id: 2, host: '192.168.178.151:38026','arbiterOnly': true}
... ]
... }
{
"_id" : "sh2",
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38024"
},
{
"_id" : 1,
"host" : "192.168.178.151:38025"
},
{
"_id" : 2,
"host" : "192.168.178.151:38026",
"arbiterOnly" : true
}
]
}
>
> rs.initiate(config)
{ "ok" : 1 }
sh2:OTHER>
sh2:SECONDARY> rs.status()
{
"set" : "sh2",
"date" : ISODate("2021-05-12T17:23:09.553Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "192.168.178.151:38024",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 82,
"optime" : {
"ts" : Timestamp(1620840188, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-05-12T17:23:08Z"),
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1620840187, 1),
"electionDate" : ISODate("2021-05-12T17:23:07Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.178.151:38025",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 12,
"optime" : {
"ts" : Timestamp(1620840177, 1),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("2021-05-12T17:22:57Z"),
"lastHeartbeat" : ISODate("2021-05-12T17:23:07.562Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:23:05.334Z"),
"pingMs" : NumberLong(1),
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.178.151:38026",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 12,
"lastHeartbeat" : ISODate("2021-05-12T17:23:07.563Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:23:09.335Z"),
"pingMs" : NumberLong(0),
"configVersion" : 1
}
],
"ok" : 1
}
sh2:PRIMARY>
sh2:PRIMARY> rs.isMaster()
{
"hosts" : [
"192.168.178.151:38024",
"192.168.178.151:38025"
],
"arbiters" : [
"192.168.178.151:38026"
],
"setName" : "sh2",
"setVersion" : 1,
"ismaster" : true,
"secondary" : false,
"primary" : "192.168.178.151:38024",
"me" : "192.168.178.151:38024",
"electionId" : ObjectId("7fffffff0000000000000001"),
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2021-05-12T17:23:18.165Z"),
"maxWireVersion" : 4,
"minWireVersion" : 0,
"ok" : 1
}
sh2:PRIMARY>
sh2:PRIMARY> rs.conf()
{
"_id" : "sh2",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38024",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "192.168.178.151:38025",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.178.151:38026",
"arbiterOnly" : true,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("609c0ef1c0099ba97e706d6d")
}
}
sh2:PRIMARY>
sh2:PRIMARY> exit
bye
4. config server节点配置¶
4.1 目录创建¶
mkdir -p /mongodb/38018/conf /mongodb/38018/log /mongodb/38018/data
mkdir -p /mongodb/38019/conf /mongodb/38019/log /mongodb/38019/data
mkdir -p /mongodb/38020/conf /mongodb/38020/log /mongodb/38020/data
4.2 配置文件创建¶
#1.编写第一个配置文件(注意:最好先启动测试,看配置文件是否有问题,然后再继续!!!)
vim /mongodb/38018/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38018/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38018/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38018
replication:
oplogSizeMB: 2048
replSetName: configReplSet
sharding:
clusterRole: configsvr
processManagement:
fork: true
#1.1启动测试
mongod -f /mongodb/38018/conf/mongodb.conf
#2.复制
cp /mongodb/38018/conf/mongodb.conf /mongodb/38019/conf/
cp /mongodb/38018/conf/mongodb.conf /mongodb/38020/conf/
#3.替换
sed 's#38018#38019#g' /mongodb/38019/conf/mongodb.conf -i
sed 's#38018#38020#g' /mongodb/38020/conf/mongodb.conf -i
[mongod@151 ~]$ mkdir -p /mongodb/38018/conf /mongodb/38018/log /mongodb/38018/data
[mongod@151 ~]$ mkdir -p /mongodb/38019/conf /mongodb/38019/log /mongodb/38019/data
[mongod@151 ~]$ mkdir -p /mongodb/38020/conf /mongodb/38020/log /mongodb/38020/data
[mongod@151 ~]$
[mongod@151 ~]$ vim /mongodb/38018/conf/mongodb.conf
[mongod@151 ~]$
[mongod@151 ~]$ mongod -f /mongodb/38018/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 14038
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ cp /mongodb/38018/conf/mongodb.conf /mongodb/38019/conf/
[mongod@151 ~]$ cp /mongodb/38018/conf/mongodb.conf /mongodb/38020/conf/
[mongod@151 ~]$
[mongod@151 ~]$ sed 's#38018#38019#g' /mongodb/38019/conf/mongodb.conf -i
[mongod@151 ~]$ sed 's#38018#38020#g' /mongodb/38020/conf/mongodb.conf -i
[mongod@151 ~]$
[mongod@151 ~]$ cat /mongodb/38019/conf/mongodb.conf
systemLog:
destination: file
path: /mongodb/38019/log/mongodb.log
logAppend: true
storage:
journal:
enabled: true
dbPath: /mongodb/38019/data
directoryPerDB: true
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
directoryForIndexes: true
collectionConfig:
blockCompressor: zlib
indexConfig:
prefixCompression: true
net:
port: 38019
replication:
oplogSizeMB: 2048
replSetName: configReplSet
sharding:
clusterRole: configsvr
processManagement:
fork: true
4.3 启动节点¶
mongod -f /mongodb/38018/conf/mongodb.conf
mongod -f /mongodb/38019/conf/mongodb.conf
mongod -f /mongodb/38020/conf/mongodb.conf
netstat -tunlp|grep 380
[mongod@151 ~]$ mongod -f /mongodb/38019/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 14610
child process started successfully, parent exiting
[mongod@151 ~]$ mongod -f /mongodb/38020/conf/mongodb.conf
about to fork child process, waiting until server is ready for connections.
forked process: 14648
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ netstat -tunlp |grep 380
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
tcp 0 0 0.0.0.0:38023 0.0.0.0:* LISTEN 9928/mongod
tcp 0 0 0.0.0.0:38024 0.0.0.0:* LISTEN 12686/mongod
tcp 0 0 0.0.0.0:38025 0.0.0.0:* LISTEN 12769/mongod
tcp 0 0 0.0.0.0:38026 0.0.0.0:* LISTEN 12841/mongod
tcp 0 0 0.0.0.0:38018 0.0.0.0:* LISTEN 14038/mongod
tcp 0 0 0.0.0.0:38019 0.0.0.0:* LISTEN 14610/mongod
tcp 0 0 0.0.0.0:38020 0.0.0.0:* LISTEN 14648/mongod
tcp 0 0 0.0.0.0:38021 0.0.0.0:* LISTEN 8220/mongod
tcp 0 0 0.0.0.0:38022 0.0.0.0:* LISTEN 9907/mongod
4.4 复制集构建¶
#1.登录到config复制集
mongo --port 38018
#2.use到管理库
use admin
#3.配置信息
config = {
_id: 'configReplSet', members: [
{_id: 0, host: '192.168.178.151:38018'},
{_id: 1, host: '192.168.178.151:38019'},
{_id: 2, host: '192.168.178.151:38020'}
]
}
#4.初始化
rs.initiate(config)
#5.检查状态
rs.status()
#注意:
#1.config server可以是一个节点,官方建议复制集。
#2.config server不能有arbiter。
#3.新版本中,要求必须是复制集!!!(mongodb3.4之后,虽然要求config server未replica set,但是不支持arbiter)
[mongod@151 ~]$ mongo --port 38018
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38018/test
>
> use admin
switched to db admin
>
> config = {
... _id: 'configReplSet', members: [
... {_id: 0, host: '192.168.178.151:38018'},
... {_id: 1, host: '192.168.178.151:38019'},
... {_id: 2, host: '192.168.178.151:38020','arbiterOnly': true}
... ]
... }
{
"_id" : "configReplSet",
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38018"
},
{
"_id" : 1,
"host" : "192.168.178.151:38019"
},
{
"_id" : 2,
"host" : "192.168.178.151:38020",
"arbiterOnly" : true
}
]
}
>
> rs.initiate(config)
{
"ok" : 0,
"errmsg" : "Arbiters are not allowed in replica set configurations being used for config servers",
"code" : 93
}
>
>
#出现报错。发现config配置信息错误。重新修改后再次测试
[mongod@151 ~]$ mongo --port 38018
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38018/test
>
> use admin
switched to db admin
>
> config = {
... _id: 'configReplSet', members: [
... {_id: 0, host: '192.168.178.151:38018'},
... {_id: 1, host: '192.168.178.151:38019'},
... {_id: 2, host: '192.168.178.151:38020'}
... ]
... }
{
"_id" : "configReplSet",
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38018"
},
{
"_id" : 1,
"host" : "192.168.178.151:38019"
},
{
"_id" : 2,
"host" : "192.168.178.151:38020"
}
]
}
>
> rs.initiate(config)
{ "ok" : 1 }
configReplSet:OTHER>
configReplSet:SECONDARY>
configReplSet:SECONDARY>
configReplSet:SECONDARY>
configReplSet:SECONDARY> rs.conf()
{
"_id" : "configReplSet",
"version" : 1,
"configsvr" : true,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "192.168.178.151:38018",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "192.168.178.151:38019",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.178.151:38020",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("609c106f35ea8b0ea5b3bf32")
}
}
configReplSet:PRIMARY>
configReplSet:PRIMARY> rs.isMaster()
{
"hosts" : [
"192.168.178.151:38018",
"192.168.178.151:38019",
"192.168.178.151:38020"
],
"setName" : "configReplSet",
"setVersion" : 1,
"ismaster" : true,
"secondary" : false,
"primary" : "192.168.178.151:38018",
"me" : "192.168.178.151:38018",
"electionId" : ObjectId("7fffffff0000000000000001"),
"configsvr" : 1,
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2021-05-12T17:29:39.018Z"),
"maxWireVersion" : 4,
"minWireVersion" : 0,
"ok" : 1
}
configReplSet:PRIMARY>
configReplSet:PRIMARY> rs.status()
{
"set" : "configReplSet",
"date" : ISODate("2021-05-12T17:29:46.349Z"),
"myState" : 1,
"term" : NumberLong(1),
"configsvr" : true,
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "192.168.178.151:38018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 295,
"optime" : {
"ts" : Timestamp(1620840570, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-05-12T17:29:30Z"),
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1620840569, 1),
"electionDate" : ISODate("2021-05-12T17:29:29Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.178.151:38019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 26,
"optime" : {
"ts" : Timestamp(1620840570, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-05-12T17:29:30Z"),
"lastHeartbeat" : ISODate("2021-05-12T17:29:45.645Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:29:45.574Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.178.151:38018",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.178.151:38020",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 26,
"optime" : {
"ts" : Timestamp(1620840570, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-05-12T17:29:30Z"),
"lastHeartbeat" : ISODate("2021-05-12T17:29:45.644Z"),
"lastHeartbeatRecv" : ISODate("2021-05-12T17:29:45.585Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.178.151:38018",
"configVersion" : 1
}
],
"ok" : 1
}
configReplSet:PRIMARY>
configReplSet:PRIMARY> exit
bye
5. mongos节点配置¶
5.1 创建目录¶
mkdir -p /mongodb/38017/conf /mongodb/38017/log
5.2 配置文件¶
vim /mongodb/38017/conf/mongos.conf
systemLog:
destination: file
path: /mongodb/38017/log/mongos.log
logAppend: true
net:
port: 38017
sharding:
configDB: configReplSet/192.168.178.151:38018,192.168.178.151:38019,192.168.178.151:38020
processManagement:
fork: true
5.3 启动mongos¶
mongos -f /mongodb/38017/conf/mongos.conf
[mongod@151 ~]$ mkdir -p /mongodb/38017/conf /mongodb/38017/log
[mongod@151 ~]$
[mongod@151 ~]$ vim /mongodb/38017/conf/mongos.conf
[mongod@151 ~]$
[mongod@151 ~]$ mongos -f /mongodb/38017/conf/mongos.conf
about to fork child process, waiting until server is ready for connections.
forked process: 16553
child process started successfully, parent exiting
[mongod@151 ~]$
[mongod@151 ~]$ netstat -tunlp |grep 38017
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
tcp 0 0 0.0.0.0:38017 0.0.0.0:* LISTEN 16553/mongos
[mongod@151 ~]$
[mongod@151 ~]$ ps -ef|grep mongos
mongod 16553 1 0 01:30 ? 00:00:00 mongos -f /mongodb/38017/conf/mongos.conf
mongod 16690 7834 0 01:31 pts/1 00:00:00 grep --color=auto mongos
6. 分片集群搭建¶
6.1 连接到mongos节点¶
su - mongod
#登录到mongos节点
mongo 192.168.178.151:38017/admin
6.2 添加分片¶
db.runCommand({addshard: "sh1/192.168.178.151:38021,192.168.178.151:38022,192.168.178.151:38023",name: "shard1"})
db.runCommand({addshard: "sh2/192.168.178.151:38024,192.168.178.151:38025,192.168.178.151:38026",name: "shard2"})
6.3 列出分片¶
db.runCommand({listshards: 1})
6.4 整体状态查看¶
sh.status()
#虚拟机演示如下
#1.登录mongos节点
[mongod@151 ~]$ mongo 192.168.178.151:38017/admin
MongoDB shell version: 3.2.22
connecting to: 192.168.178.151:38017/admin
#2.添加分片1
mongos> db.runCommand({addshard: "sh1/192.168.178.151:38021,192.168.178.151:38022,192.168.178.151:38023",name: "shadb.runCommand({addshard: "sh1/192.168.178.151:38021,192.168.178.151:38022,192.168.178.151:38023",name: "shard1"})
{ "shardAdded" : "shard1", "ok" : 1 }
#3.添加分片2
mongos> db.runCommand({addshard: "sh2/192.168.178.151:38024,192.168.178.151:38025,192.168.178.151:38026",name: "shard2"})
{ "shardAdded" : "shard2", "ok" : 1 }
mongos>
mongos> db.runCommand({listshards: 1})
{
"shards" : [
{
"_id" : "shard1",
"host" : "sh1/192.168.178.151:38021,192.168.178.151:38022"
},
{
"_id" : "shard2",
"host" : "sh2/192.168.178.151:38024,192.168.178.151:38025"
}
],
"ok" : 1
}
#4.查看分片状态
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025" }
active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
mongos>
mongos> exit
bye
[mongod@151 ~]$
7. range分片策略¶
7.1 激活数据库分片功能¶
#1.登录mongos节点
mongo --port 38017 admin
#2.开启test库的分片功能
db.runCommand({enablesharding:"test"})
7.2 指定分片键,对集合分片¶
#1.创建索引
use test
db.vast.ensureIndex({id: 1})
#查看分片2是否创建了索引?如何查看???
#2.开启分片
use admin
db.runCommand({shardcollection: "test.vast",key: {id: 1}})
7.3 集合分片验证¶
use test
for(i=1;i<500000;i++){db.vast.insert({"id":i,"name":"chupeng","age":70,"date":new Date()})}
#大概需要1小时,尽量给2核
db.vast.stats()
7.4 分片结果测试¶
#shard1
mongo --port 38021
db.vast.count()
#shard2
mongo --port 38024
db.vast.count()
#虚拟机演示如下
[mongod@151 ~]$ mongo --port 38017 admin
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38017/admin
mongos>
mongos>
mongos> db.runCommand({enablesharding:"test"})
{ "ok" : 1 }
mongos>
mongos> use test
switched to db test
mongos>
mongos> db.vast.ensureIndex({id: 1})
{
"raw" : {
"sh1/192.168.178.151:38021,192.168.178.151:38022" : {
"createdCollectionAutomatically" : true,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1,
"$gleStats" : {
"lastOpTime" : Timestamp(1620844791, 2),
"electionId" : ObjectId("7fffffff0000000000000001")
}
}
},
"ok" : 1
}
mongos>
mongos>
mongos> use admin
switched to db admin
mongos>
mongos> db.runCommand({shardcollection: "test.vast",key: {id: 1}})
{ "collectionsharded" : "test.vast", "ok" : 1 }
mongos>
mongos>
mongos> use test
switched to db test
mongos>
mongos> for(i=1;i<500000;i++){db.vast.insert({"id":i,"name":"chupeng","age":70,"date":new Date()})}
WriteResult({ "nInserted" : 1 })
mongos>
mongos> db.vast.stats()
{
"sharded" : true,
"capped" : false,
"ns" : "test.vast",
"count" : 499999,
"size" : 39499921,
"storageSize" : 4976640,
"totalIndexSize" : 10928128,
"indexSizes" : {
"_id_" : 4665344,
"id_1" : 6262784
},
"avgObjSize" : 79,
"nindexes" : 2,
"nchunks" : 7,
"shards" : {
"shard1" : {
"ns" : "test.vast",
"count" : 271936,
"size" : 21482944,
"avgObjSize" : 79,
"storageSize" : 2736128,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),block_allocation=best,block_compressor=zlib,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=true),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_max=15,merge_min=0),memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:test/collection/11-8343655941705275676",
"LSM" : {
"bloom filter false positives" : 0,
"bloom filter hits" : 0,
"bloom filter misses" : 0,
"bloom filter pages evicted from cache" : 0,
"bloom filter pages read into cache" : 0,
"bloom filters in the LSM tree" : 0,
"chunks in the LSM tree" : 0,
"highest merge generation in the LSM tree" : 0,
"queries that could have benefited from a Bloom filter that did not exist" : 0,
"sleep for LSM checkpoint throttle" : 0,
"sleep for LSM merge throttle" : 0,
"total size of bloom filters" : 0
},
"block-manager" : {
"allocations requiring file extension" : 92,
"blocks allocated" : 261,
"blocks freed" : 51,
"checkpoint size" : 2580480,
"file allocation unit size" : 4096,
"file bytes available for reuse" : 139264,
"file magic number" : 120897,
"file major version number" : 1,
"file size in bytes" : 2736128,
"minor version number" : 0
},
"btree" : {
"btree checkpoint generation" : 54,
"column-store fixed-size leaf pages" : 0,
"column-store internal pages" : 0,
"column-store variable-size RLE encoded values" : 0,
"column-store variable-size deleted values" : 0,
"column-store variable-size leaf pages" : 0,
"fixed-record size" : 0,
"maximum internal page key size" : 368,
"maximum internal page size" : 4096,
"maximum leaf page key size" : 2867,
"maximum leaf page size" : 32768,
"maximum leaf page value size" : 67108864,
"maximum tree depth" : 3,
"number of key/value pairs" : 0,
"overflow pages" : 0,
"pages rewritten by compaction" : 0,
"row-store internal pages" : 0,
"row-store leaf pages" : 0
},
"cache" : {
"bytes currently in the cache" : 46509818,
"bytes read into cache" : 0,
"bytes written from cache" : 31797357,
"checkpoint blocked page eviction" : 0,
"data source pages selected for eviction unable to be evicted" : 0,
"hazard pointer blocked page eviction" : 0,
"in-memory page passed criteria to be split" : 10,
"in-memory page splits" : 5,
"internal pages evicted" : 0,
"internal pages split during eviction" : 0,
"leaf pages split during eviction" : 0,
"modified pages evicted" : 0,
"overflow pages read into cache" : 0,
"overflow values cached in memory" : 0,
"page split during eviction deepened the tree" : 0,
"page written requiring lookaside records" : 0,
"pages read into cache" : 0,
"pages read into cache requiring lookaside entries" : 0,
"pages requested from the cache" : 271953,
"pages written from cache" : 176,
"pages written requiring in-memory restoration" : 0,
"tracked dirty bytes in the cache" : 0,
"unmodified pages evicted" : 0
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : 0,
"Average on-disk page image size seen" : 0,
"Clean pages currently in cache" : 0,
"Current eviction generation" : 0,
"Dirty pages currently in cache" : 0,
"Entries in the root page" : 0,
"Internal pages currently in cache" : 0,
"Leaf pages currently in cache" : 0,
"Maximum difference between current eviction generation when the page was last considered" : 0,
"Maximum page size seen" : 0,
"Minimum on-disk page image size seen" : 0,
"On-disk page image sizes smaller than a single allocation unit" : 0,
"Pages created in memory and never written" : 0,
"Pages currently queued for eviction" : 0,
"Pages that could not be queued for eviction" : 0,
"Refs skipped during cache traversal" : 0,
"Size of the root page" : 0,
"Total number of pages currently in cache" : 0
},
"compression" : {
"compressed pages read" : 0,
"compressed pages written" : 0,
"page written failed to compress" : 0,
"page written was too small to compress" : 44,
"raw compression call failed, additional data available" : 0,
"raw compression call failed, no additional data available" : 0,
"raw compression call succeeded" : 405
},
"cursor" : {
"bulk-loaded cursor-insert calls" : 0,
"create calls" : 7,
"cursor-insert key and value bytes inserted" : 22488489,
"cursor-remove key bytes removed" : 5,
"cursor-update value bytes updated" : 0,
"insert calls" : 271938,
"next calls" : 1,
"prev calls" : 1,
"remove calls" : 2,
"reset calls" : 271950,
"restarted searches" : 0,
"search calls" : 11,
"search near calls" : 0,
"truncate calls" : 0,
"update calls" : 0
},
"reconciliation" : {
"dictionary matches" : 0,
"fast-path pages deleted" : 0,
"internal page key bytes discarded using suffix compression" : 0,
"internal page multi-block writes" : 0,
"internal-page overflow keys" : 0,
"leaf page key bytes discarded using prefix compression" : 0,
"leaf page multi-block writes" : 44,
"leaf-page overflow keys" : 0,
"maximum blocks required for a page" : 0,
"overflow values written" : 0,
"page checksum matches" : 273,
"page reconciliation calls" : 91,
"page reconciliation calls for eviction" : 0,
"pages deleted" : 0
},
"session" : {
"object compaction" : 0,
"open cursor count" : 4
},
"transaction" : {
"update conflicts" : 0
}
},
"nindexes" : 2,
"totalIndexSize" : 5931008,
"indexSizes" : {
"_id_" : 2539520,
"id_1" : 3391488
},
"ok" : 1
},
"shard2" : {
"ns" : "test.vast",
"count" : 228063,
"size" : 18016977,
"avgObjSize" : 79,
"storageSize" : 2240512,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),block_allocation=best,block_compressor=zlib,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=true),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_max=15,merge_min=0),memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:test/collection/7-7811135352084568871",
"LSM" : {
"bloom filter false positives" : 0,
"bloom filter hits" : 0,
"bloom filter misses" : 0,
"bloom filter pages evicted from cache" : 0,
"bloom filter pages read into cache" : 0,
"bloom filters in the LSM tree" : 0,
"chunks in the LSM tree" : 0,
"highest merge generation in the LSM tree" : 0,
"queries that could have benefited from a Bloom filter that did not exist" : 0,
"sleep for LSM checkpoint throttle" : 0,
"sleep for LSM merge throttle" : 0,
"total size of bloom filters" : 0
},
"block-manager" : {
"allocations requiring file extension" : 75,
"blocks allocated" : 215,
"blocks freed" : 42,
"checkpoint size" : 2150400,
"file allocation unit size" : 4096,
"file bytes available for reuse" : 73728,
"file magic number" : 120897,
"file major version number" : 1,
"file size in bytes" : 2240512,
"minor version number" : 0
},
"btree" : {
"btree checkpoint generation" : 73,
"column-store fixed-size leaf pages" : 0,
"column-store internal pages" : 0,
"column-store variable-size RLE encoded values" : 0,
"column-store variable-size deleted values" : 0,
"column-store variable-size leaf pages" : 0,
"fixed-record size" : 0,
"maximum internal page key size" : 368,
"maximum internal page size" : 4096,
"maximum leaf page key size" : 2867,
"maximum leaf page size" : 32768,
"maximum leaf page value size" : 67108864,
"maximum tree depth" : 3,
"number of key/value pairs" : 0,
"overflow pages" : 0,
"pages rewritten by compaction" : 0,
"row-store internal pages" : 0,
"row-store leaf pages" : 0
},
"cache" : {
"bytes currently in the cache" : 38994042,
"bytes read into cache" : 0,
"bytes written from cache" : 26672874,
"checkpoint blocked page eviction" : 0,
"data source pages selected for eviction unable to be evicted" : 0,
"hazard pointer blocked page eviction" : 0,
"in-memory page passed criteria to be split" : 8,
"in-memory page splits" : 4,
"internal pages evicted" : 0,
"internal pages split during eviction" : 0,
"leaf pages split during eviction" : 0,
"modified pages evicted" : 0,
"overflow pages read into cache" : 0,
"overflow values cached in memory" : 0,
"page split during eviction deepened the tree" : 0,
"page written requiring lookaside records" : 0,
"pages read into cache" : 0,
"pages read into cache requiring lookaside entries" : 0,
"pages requested from the cache" : 228073,
"pages written from cache" : 146,
"pages written requiring in-memory restoration" : 0,
"tracked dirty bytes in the cache" : 0,
"unmodified pages evicted" : 0
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : 0,
"Average on-disk page image size seen" : 0,
"Clean pages currently in cache" : 0,
"Current eviction generation" : 0,
"Dirty pages currently in cache" : 0,
"Entries in the root page" : 0,
"Internal pages currently in cache" : 0,
"Leaf pages currently in cache" : 0,
"Maximum difference between current eviction generation when the page was last considered" : 0,
"Maximum page size seen" : 0,
"Minimum on-disk page image size seen" : 0,
"On-disk page image sizes smaller than a single allocation unit" : 0,
"Pages created in memory and never written" : 0,
"Pages currently queued for eviction" : 0,
"Pages that could not be queued for eviction" : 0,
"Refs skipped during cache traversal" : 0,
"Size of the root page" : 0,
"Total number of pages currently in cache" : 0
},
"compression" : {
"compressed pages read" : 0,
"compressed pages written" : 0,
"page written failed to compress" : 0,
"page written was too small to compress" : 36,
"raw compression call failed, additional data available" : 0,
"raw compression call failed, no additional data available" : 0,
"raw compression call succeeded" : 326
},
"cursor" : {
"bulk-loaded cursor-insert calls" : 0,
"create calls" : 6,
"cursor-insert key and value bytes inserted" : 18846947,
"cursor-remove key bytes removed" : 4,
"cursor-update value bytes updated" : 0,
"insert calls" : 228064,
"next calls" : 1,
"prev calls" : 1,
"remove calls" : 1,
"reset calls" : 228071,
"restarted searches" : 0,
"search calls" : 5,
"search near calls" : 0,
"truncate calls" : 0,
"update calls" : 0
},
"reconciliation" : {
"dictionary matches" : 0,
"fast-path pages deleted" : 0,
"internal page key bytes discarded using suffix compression" : 0,
"internal page multi-block writes" : 0,
"internal-page overflow keys" : 0,
"leaf page key bytes discarded using prefix compression" : 0,
"leaf page multi-block writes" : 37,
"leaf-page overflow keys" : 0,
"maximum blocks required for a page" : 0,
"overflow values written" : 0,
"page checksum matches" : 216,
"page reconciliation calls" : 74,
"page reconciliation calls for eviction" : 0,
"pages deleted" : 0
},
"session" : {
"object compaction" : 0,
"open cursor count" : 5
},
"transaction" : {
"update conflicts" : 0
}
},
"nindexes" : 2,
"totalIndexSize" : 4997120,
"indexSizes" : {
"_id_" : 2125824,
"id_1" : 2871296
},
"ok" : 1
}
},
"ok" : 1
}
mongos>
#结果
[mongod@151 ~]$ mongo --port 38021
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38021/test
sh1:PRIMARY>
sh1:PRIMARY> db.vast.count()
271936
sh1:PRIMARY>
sh1:PRIMARY> exit
bye
[mongod@151 ~]$
[mongod@151 ~]$ mongo --port 38024
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38024/test
sh2:PRIMARY>
sh2:PRIMARY> db.vast.count()
228063
sh2:PRIMARY>
sh2:PRIMARY> exit
bye
[mongod@151 ~]$
8. hash分片策略¶
8.1 对oldboy库开启分片功能¶
#1.切换用户
su - mongod
#2.登录到mongos节点
mongo --port 38017 admin
#3.开启分片
use admin
db.runCommand({enablesharding: "oldboy"})
8.2 对于oldboy库下的vast1表建立hash索引¶
use oldboy
db.vast1.ensureIndex({id: "hashed"})
8.3 开启分片¶
use admin
sh.shardCollection("oldboy.vast1",{id: "hashed"})
8.4 录入10w行数据测试¶
use oldboy
for(i=1;i<100000;i++){db.vast1.insert({"id":i,"name":"chupeng1","age":70,"date":new Date()})}
#大概需要15分钟
8.5 hash分片结果测试¶
#sh1
mongo --port 38021
use oldboy
db.vast1.count()
#sh2
mongo --port 38024
use oldboy
db.vast1.count()
#虚拟机演示
[root@151 ~]# su - mongod
Last login: Thu May 13 02:38:22 CST 2021 on pts/0
[mongod@151 ~]$
[mongod@151 ~]$
[mongod@151 ~]$
[mongod@151 ~]$ mongo --port 38017 admin
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38017/admin
mongos>
mongos> use admin
switched to db admin
mongos>
mongos> db.runCommand({enablesharding: "oldboy"})
{ "ok" : 1 }
mongos>
mongos>
mongos> use oldboy
switched to db oldboy
mongos>
mongos>
mongos> db.vast1.ensureIndex({id:"hashed"})
{
"raw" : {
"sh2/192.168.178.151:38024,192.168.178.151:38025" : {
"createdCollectionAutomatically" : true,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1,
"$gleStats" : {
"lastOpTime" : Timestamp(1620845091, 2),
"electionId" : ObjectId("7fffffff0000000000000001")
}
}
},
"ok" : 1
}
mongos>
mongos>
mongos> use admin
switched to db admin
mongos>
mongos> sh.shardCollection("oldboy.vast1",{id:"hashed"})
{ "collectionsharded" : "oldboy.vast1", "ok" : 1 }
mongos>
mongos>
mongos> use oldboy
switched to db oldboy
mongos>
mongos>
mongos> for(i=1;i<100000;i++){db.vast1.insert({"id":i,"name":"chupeng1","age":70,"date":new Date()})}
WriteResult({ "nInserted" : 1 })
mongos>
mongos>exit
#查看结果
[mongod@151 ~]$ mongo --port 38021
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38021/test
sh1:PRIMARY>
sh1:PRIMARY> use oldboy
switched to db oldboy
sh1:PRIMARY>
sh1:PRIMARY> db.vast1.count()
50393
sh1:PRIMARY>
sh1:PRIMARY> exit
bye
[mongod@151 ~]$
[mongod@151 ~]$ mongo --port 38024
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38024/test
sh2:PRIMARY>
sh2:PRIMARY> use oldboy
switched to db oldboy
sh2:PRIMARY>
sh2:PRIMARY> db.vast1.count()
49606
sh2:PRIMARY>
sh2:PRIMARY> exit
bye
[mongod@151 ~]$
9. 其他mongodb分片集群管理命令¶
#0.登录mongos节点
mongo --port 38017 admin
#1.判断是否是shard集群
db.runCommand({isdbgrid: 1})
#2.列出所有分片信息
db.runCommand({listshards: 1})
#3.列出开启分片的数据库
use config
db.databases.find({"partitioned": true})
#or
db.databases.find()
#4.查看分片的片键
db.collections.find().pretty()
#5.查看分片的详细信息
db.printShardingStatus()
sh.status()
#6.删除分片节点(谨慎)
#6.1 确认balance是否在工作
sh.getBalancerState()
#6.2 删除shard2节点(谨慎)
db.runCommand({removeShard: "shard2"})
#注意:删除操作一定会立即触发balancer,所以要在业务不繁忙时候操作
#7.添加分片(前面的命令)
db.runCommand({addshard: "sh1/192.168.178.151:38021,192.168.178.151:38022,192.168.178.151:38023",name: "shard1"})
db.runCommand({addshard: "sh2/192.168.178.151:38024,192.168.178.151:38025,192.168.178.151:38026",name: "shard2"})
[root@151 ~]# mongo --port 38017 admin
-bash: mongo: command not found
[root@151 ~]#
[root@151 ~]#
[root@151 ~]#
[root@151 ~]#
[root@151 ~]# su - mongod
Last login: Thu May 13 02:41:59 CST 2021 on pts/1
[mongod@151 ~]$
[mongod@151 ~]$
[mongod@151 ~]$ mongo --port 38017 admin
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38017/admin
mongos>
mongos>
mongos> db.runCommand({isdbgrid: 1})
{ "isdbgrid" : 1, "hostname" : "151", "ok" : 1 }
mongos>
mongos>
mongos>
mongos> db.runCommand({listshards: 1})
{
"shards" : [
{
"_id" : "shard1",
"host" : "sh1/192.168.178.151:38021,192.168.178.151:38022"
},
{
"_id" : "shard2",
"host" : "sh2/192.168.178.151:38024,192.168.178.151:38025"
}
],
"ok" : 1
}
mongos>
mongos>
mongos>
mongos> show dbs
config 0.001GB
oldboy 0.005GB
test 0.015GB
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos> db.databases.find({"partitioned": true})
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
mongos>
mongos>
mongos>
mongos> db.databases.find()
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
mongos>
mongos>
mongos>
mongos> db.collections.find()
{ "_id" : "test.vast", "lastmodEpoch" : ObjectId("609c210ea630ecb676f8afc3"), "lastmod" : ISODate("1970-02-19T17:02:47.296Z"), "dropped" : false, "key" : { "id" : 1 }, "unique" : false }
{ "_id" : "oldboy.vast1", "lastmodEpoch" : ObjectId("609c2243a630ecb676f8afe7"), "lastmod" : ISODate("1970-02-19T17:02:47.297Z"), "dropped" : false, "key" : { "id" : "hashed" }, "unique" : false }
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> db.collections.find().pretty()
{
"_id" : "test.vast",
"lastmodEpoch" : ObjectId("609c210ea630ecb676f8afc3"),
"lastmod" : ISODate("1970-02-19T17:02:47.296Z"),
"dropped" : false,
"key" : {
"id" : 1
},
"unique" : false
}
{
"_id" : "oldboy.vast1",
"lastmodEpoch" : ObjectId("609c2243a630ecb676f8afe7"),
"lastmod" : ISODate("1970-02-19T17:02:47.297Z"),
"dropped" : false,
"key" : {
"id" : "hashed"
},
"unique" : false
}
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025" }
active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
4 : Success
databases:
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
test.vast
shard key: { "id" : 1 }
unique: false
balancing: true
chunks:
shard1 4
shard2 3
{ "id" : { "$minKey" : 1 } } -->> { "id" : 2 } on : shard2 Timestamp(4, 1)
{ "id" : 2 } -->> { "id" : 14 } on : shard1 Timestamp(3, 1)
{ "id" : 14 } -->> { "id" : 106198 } on : shard1 Timestamp(2, 2)
{ "id" : 106198 } -->> { "id" : 229379 } on : shard1 Timestamp(2, 3)
{ "id" : 229379 } -->> { "id" : 335563 } on : shard2 Timestamp(3, 2)
{ "id" : 335563 } -->> { "id" : 457441 } on : shard2 Timestamp(3, 3)
{ "id" : 457441 } -->> { "id" : { "$maxKey" : 1 } } on : shard1 Timestamp(4, 0)
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
oldboy.vast1
shard key: { "id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 2
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : NumberLong("-4611686018427387902") } on : shard1 Timestamp(2, 2)
{ "id" : NumberLong("-4611686018427387902") } -->> { "id" : NumberLong(0) } on : shard1 Timestamp(2, 3)
{ "id" : NumberLong(0) } -->> { "id" : NumberLong("4611686018427387902") } on : shard2 Timestamp(2, 4)
{ "id" : NumberLong("4611686018427387902") } -->> { "id" : { "$maxKey" : 1 } } on : shard2 Timestamp(2, 5)
mongos>
mongos>
mongos>
mongos>
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025" }
active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
4 : Success
databases:
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
test.vast
shard key: { "id" : 1 }
unique: false
balancing: true
chunks:
shard1 4
shard2 3
{ "id" : { "$minKey" : 1 } } -->> { "id" : 2 } on : shard2 Timestamp(4, 1)
{ "id" : 2 } -->> { "id" : 14 } on : shard1 Timestamp(3, 1)
{ "id" : 14 } -->> { "id" : 106198 } on : shard1 Timestamp(2, 2)
{ "id" : 106198 } -->> { "id" : 229379 } on : shard1 Timestamp(2, 3)
{ "id" : 229379 } -->> { "id" : 335563 } on : shard2 Timestamp(3, 2)
{ "id" : 335563 } -->> { "id" : 457441 } on : shard2 Timestamp(3, 3)
{ "id" : 457441 } -->> { "id" : { "$maxKey" : 1 } } on : shard1 Timestamp(4, 0)
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
oldboy.vast1
shard key: { "id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 2
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : NumberLong("-4611686018427387902") } on : shard1 Timestamp(2, 2)
{ "id" : NumberLong("-4611686018427387902") } -->> { "id" : NumberLong(0) } on : shard1 Timestamp(2, 3)
{ "id" : NumberLong(0) } -->> { "id" : NumberLong("4611686018427387902") } on : shard2 Timestamp(2, 4)
{ "id" : NumberLong("4611686018427387902") } -->> { "id" : { "$maxKey" : 1 } } on : shard2 Timestamp(2, 5)
mongos>
mongos>
mongos>
mongos>
mongos> exit
bye
#删除分片测试(虚拟机尽量不要做。。。迁移把网络就压死了。资源都用完了)
[mongod@151 ~]$ mongo --port 38017 admin
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38017/admin
mongos>
mongos>
mongos> sh.getBalancerState()
true
mongos>
mongos>
mongos>
mongos> db.runCommand({removeShard: "shard2"})
{
"msg" : "draining started successfully",
"state" : "started",
"shard" : "shard2",
"note" : "you need to drop or movePrimary these databases",
"dbsToMove" : [
"oldboy"
],
"ok" : 1
}
mongos>
mongos>
mongos>
mongos> db.runCommand({isdbgrid: 1})
{ "isdbgrid" : 1, "hostname" : "151", "ok" : 1 }
mongos>
mongos>
mongos> #2.列出所有分片信息
2021-05-13T08:25:25.370+0800 E QUERY [thread1] SyntaxError: illegal character @(shell):1:0
mongos> db.runCommand({listshards: 1})
{
"shards" : [
{
"_id" : "shard1",
"host" : "sh1/192.168.178.151:38021,192.168.178.151:38022"
},
{
"_id" : "shard2",
"host" : "sh2/192.168.178.151:38024,192.168.178.151:38025",
"draining" : true
}
],
"ok" : 1
}
mongos>
mongos>
mongos>
mongos>
mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025", "draining" : true }
most recently active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: yes
NaN
Collections with active migrations:
oldboy.vast1 started at Thu May 13 2021 08:25:15 GMT+0800 (CST)
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
5 : Success
databases:
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
test.vast
shard key: { "id" : 1 }
unique: false
balancing: true
chunks:
shard1 5
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : 2 } on : shard1 Timestamp(5, 0)
{ "id" : 2 } -->> { "id" : 14 } on : shard1 Timestamp(3, 1)
{ "id" : 14 } -->> { "id" : 106198 } on : shard1 Timestamp(2, 2)
{ "id" : 106198 } -->> { "id" : 229379 } on : shard1 Timestamp(2, 3)
{ "id" : 229379 } -->> { "id" : 335563 } on : shard2 Timestamp(5, 1)
{ "id" : 335563 } -->> { "id" : 457441 } on : shard2 Timestamp(3, 3)
{ "id" : 457441 } -->> { "id" : { "$maxKey" : 1 } } on : shard1 Timestamp(4, 0)
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
oldboy.vast1
shard key: { "id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 2
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : NumberLong("-4611686018427387902") } on : shard1 Timestamp(2, 2)
{ "id" : NumberLong("-4611686018427387902") } -->> { "id" : NumberLong(0) } on : shard1 Timestamp(2, 3)
{ "id" : NumberLong(0) } -->> { "id" : NumberLong("4611686018427387902") } on : shard2 Timestamp(2, 4)
{ "id" : NumberLong("4611686018427387902") } -->> { "id" : { "$maxKey" : 1 } } on : shard2 Timestamp(2, 5)
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> db.runCommand({listshards: 1})
{
"shards" : [
{
"_id" : "shard1",
"host" : "sh1/192.168.178.151:38021,192.168.178.151:38022"
},
{
"_id" : "shard2",
"host" : "sh2/192.168.178.151:38024,192.168.178.151:38025",
"draining" : true
}
],
"ok" : 1
}
mongos>
mongos>
mongos>
mongos>
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025", "draining" : true }
most recently active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: yes
NaN
Collections with active migrations:
oldboy.vast1 started at Thu May 13 2021 08:25:15 GMT+0800 (CST)
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
5 : Success
databases:
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
test.vast
shard key: { "id" : 1 }
unique: false
balancing: true
chunks:
shard1 5
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : 2 } on : shard1 Timestamp(5, 0)
{ "id" : 2 } -->> { "id" : 14 } on : shard1 Timestamp(3, 1)
{ "id" : 14 } -->> { "id" : 106198 } on : shard1 Timestamp(2, 2)
{ "id" : 106198 } -->> { "id" : 229379 } on : shard1 Timestamp(2, 3)
{ "id" : 229379 } -->> { "id" : 335563 } on : shard2 Timestamp(5, 1)
{ "id" : 335563 } -->> { "id" : 457441 } on : shard2 Timestamp(3, 3)
{ "id" : 457441 } -->> { "id" : { "$maxKey" : 1 } } on : shard1 Timestamp(4, 0)
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
oldboy.vast1
shard key: { "id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 2
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : NumberLong("-4611686018427387902") } on : shard1 Timestamp(2, 2)
{ "id" : NumberLong("-4611686018427387902") } -->> { "id" : NumberLong(0) } on : shard1 Timestamp(2, 3)
{ "id" : NumberLong(0) } -->> { "id" : NumberLong("4611686018427387902") } on : shard2 Timestamp(2, 4)
{ "id" : NumberLong("4611686018427387902") } -->> { "id" : { "$maxKey" : 1 } } on : shard2 Timestamp(2, 5)
mongos>
mongos>
mongos>
mongos> exit
bye
10. balancer操作¶
10.1 介绍¶
#1.balancer是mongos的一个重要功能,自动巡查所有shard节点上的chunk的情况,自动做chunk迁移
#2.balancer什么时候工作?
1.自动运行,会检测系统不繁忙的时候做迁移
2.在做节点删除的时候,立即开始迁移工作
3.balancer只能在预设定的时间窗口内运行
#3.有需要时,可以关闭和开启balancer(如备份的时候)
sh.stopBalancer()
sh.startBalancer()
11.2 自定义自动平衡进行的时间段¶
use config
sh.setBalancerState(true)
#开启指定时间段的balancer
db.settings.update({_id: "balancer"},{$set: {activeWindow: {start: "10:00",stop: "12:00"}}},true)
#查看
sh.getBalancerWindow()
#or
sh.status()
[mongod@151 ~]$ mongo --port 38017 admin
MongoDB shell version: 3.2.22
connecting to: 127.0.0.1:38017/admin
mongos>
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos> sh.getBalancerWindow()
null
mongos>
mongos>
mongos> sh.setBalancerState(true)
WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "balancer" })
mongos>
mongos>
mongos> db.settings.update({_id: "balancer"},{$set: {activeWindow: {start: "10:00",stop: "12:00"}}},true)
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
mongos>
mongos>
mongos>
mongos> sh.getBalancerWindow()
{ "start" : "10:00", "stop" : "12:00" }
mongos>
mongos>
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("609c10cda630ecb676f8ae1e")
}
shards:
{ "_id" : "shard1", "host" : "sh1/192.168.178.151:38021,192.168.178.151:38022" }
{ "_id" : "shard2", "host" : "sh2/192.168.178.151:38024,192.168.178.151:38025", "draining" : true }
most recently active mongoses:
"3.2.22" : 1
balancer:
Currently enabled: yes
Currently running: yes
NaN
Balancer active window is set between 10:00 and 12:00 server local time
Collections with active migrations:
test.vast started at Thu May 13 2021 08:27:46 GMT+0800 (CST)
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
6 : Success
databases:
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
test.vast
shard key: { "id" : 1 }
unique: false
balancing: true
chunks:
shard1 5
shard2 2
{ "id" : { "$minKey" : 1 } } -->> { "id" : 2 } on : shard1 Timestamp(5, 0)
{ "id" : 2 } -->> { "id" : 14 } on : shard1 Timestamp(3, 1)
{ "id" : 14 } -->> { "id" : 106198 } on : shard1 Timestamp(2, 2)
{ "id" : 106198 } -->> { "id" : 229379 } on : shard1 Timestamp(2, 3)
{ "id" : 229379 } -->> { "id" : 335563 } on : shard2 Timestamp(5, 1)
{ "id" : 335563 } -->> { "id" : 457441 } on : shard2 Timestamp(3, 3)
{ "id" : 457441 } -->> { "id" : { "$maxKey" : 1 } } on : shard1 Timestamp(4, 0)
{ "_id" : "oldboy", "primary" : "shard2", "partitioned" : true }
oldboy.vast1
shard key: { "id" : "hashed" }
unique: false
balancing: true
chunks:
shard1 3
shard2 1
{ "id" : { "$minKey" : 1 } } -->> { "id" : NumberLong("-4611686018427387902") } on : shard1 Timestamp(2, 2)
{ "id" : NumberLong("-4611686018427387902") } -->> { "id" : NumberLong(0) } on : shard1 Timestamp(2, 3)
{ "id" : NumberLong(0) } -->> { "id" : NumberLong("4611686018427387902") } on : shard1 Timestamp(3, 0)
{ "id" : NumberLong("4611686018427387902") } -->> { "id" : { "$maxKey" : 1 } } on : shard2 Timestamp(3, 1)
mongos>
mongos>
mongos> exit
bye
最后更新:
2022-02-20 11:21:58