centos7 mongodb4.4分片集群部署

Posted 疯子7314

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了centos7 mongodb4.4分片集群部署相关的知识,希望对你有一定的参考价值。


#创建mongo相应的目录

mkdir -pv /data/app/mongodb/conf
mkdir -pv /data/app/mongodb/configset,shared1,shared2,shared3/log
mkdir -pv /data/mongodb/configset,shared1,shared2,shared3/data

 

#创建于用户,给权限

useradd -s /sbin/nologin -M mongod
chown -R mongod.mongod /data/mongodb/
chown -R mongod.mongod /data/app/mongodb/

 

#创建证书,将这个证书copy到另外2个节点

openssl rand -base64 753 > /data/app/mongodb/conf/keyFile.key
chmod 0600 /data/app/mongodb/conf/keyFile.key
chown mongod.mongod /data/app/mongodb/conf/keyFile.key

 

#设置mongo环境变量

echo "export PATH=$PATH:/data/app/mongodb/bin" > /etc/profile.d/mongodb.sh
source /etc/profile.d/mongodb.sh

 


#秘钥分发

scp /data/app/mongodb/conf/keyFile.key root@192.168.1.101:/data/app/mongodb/conf/keyFile.key
scp /data/app/mongodb/conf/keyFile.key root@192.168.1.102:/data/app/mongodb/conf/keyFile.key
scp /data/app/mongodb/conf/keyFile.key root@192.168.1.103:/data/app/mongodb/conf/keyFile.key
mv mongodb /data/app/mongodb/
yum install -y numactl

 

 


#创建config节点的服务

cat /etc/systemd/system/mongo-configset.service
[Unit]
Description=High-performance, schema-free document-oriented database
After=network.target
[Service]
User=mongod
Type=forking
ExecStart=/usr/bin/numactl --interleave=all /data/app/mongodb/bin/mongod --config /data/app/mongodb/conf/configset.conf
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/data/app/mongodb/bin/mongod --shutdown --config /data/app/mongodb/conf/configset.conf
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl start mongo-configset
chown -R mongod.mongod /data/

 

#配置config节点配置文件

cat  /etc/systemd/system/mongo-configset.service
[Unit]
Description=High-performance, schema-free document-oriented database
After=network.target
[Service]
User=mongod
Type=forking
ExecStart=/usr/bin/numactl --interleave=all /data/app/mongodb/bin/mongod --config /data/app/mongodb/conf/configset.conf
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/data/app/mongodb/bin/mongod --shutdown --config /data/app/mongodb/conf/configset.conf
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target


systemctl daemon-reload 
systemctl start mongo-configset
chown -R mongod.mongod /data/

 

#检查端口是否启动

ss -tnl | grep 2100

 

#添加config节点

mongo 192.168.1.101:21000
rs.add("192.168.1.102:21000")
rs.add("192.168.1.103:21000")
rs.status()

 

#添加用户

mongo 192.168.1.101:21000
use admin
db.createUser(user:"root",pwd:\'密码\',roles:["root","clusterAdmin","userAdminAnyDatabase","readWriteAnyDatabase"])
db.auth("root","密码")

 


#用户创建完毕后修改config节点配置文件,放开认证部分

systemctl restart mongo-configset

#下次登录使用户用户名密码登录

mongo -u root -p root 192.168.1.101:21000/admin

配置shard配置文件,mongodb1,mongodb2,mongodb3都要配置shard1.conf,shard2.conf,shard3.conf,注意修改名称和端口号

[root@mongodb1 ~]# cat /data/app/mongodb/conf/shared1.conf 
storage:
    dbPath: /data/mongodb/shared1/data
    journal:
        enabled: true
    wiredTiger:
        engineConfig:
            cacheSizeGB: 4
systemLog:
    destination: file
    logAppend: true
    path: /data/app/mongodb/shared1/log/shared1.log
replication:
    replSetName: shared1
processManagement:
    fork: true
    pidFilePath: /data/app/mongodb/shared1/log/shared1.pid
net:
    port: 27001
    bindIp: 0.0.0.0
security:
    keyFile: /data/app/mongodb/conf/keyFile.key
    authorization: enabled
sharding:
    # 分片集群中当前实例的角色(configsvr:配置中心实例,shardsvr:分片实例)
    clusterRole: shardsvr

 

 

#配置shard服务 mongo-27001,mongo-27002,mongo-27003

[root@mongodb1 ~]# cat /etc/systemd/system/mongo-27001.service 
[Unit]
Description=High-performance,schema-free document-oriented database
After=network.target

[Service]
User=mongod
Type=forking
ExecStart=/usr/bin/numactl --interleave=all /data/app/mongodb/bin/mongod --config /data/app/mongodb/conf/shared1.conf
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/data/app/mongodb/bin/mongod --shutdown --config /data/app/mongodb/conf/shared1.conf
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

 

 


#shard1初始化,并添加其他两个节点

mongo 192.168.1.101:27001
rs.initiate()
rs.add("192.168.1.102:27001")
rs.add("192.168.1.103:27001")
rs.status()

 

#shard2初始化,并添加其他两个节点

mongo 192.168.1.102:27002
rs.initiate()
rs.add("192.168.1.101:27002")
rs.add("192.168.1.103:27002")
rs.status()

 

#shard3初始化,并添加其他两个节点

mongo 192.168.1.103:27003
rs.initiate()
rs.add("192.168.1.101:27003")
rs.add("192.168.1.102:27003")
rs.status()

 

use admin
db.createUser(user:"root",pwd:\'密码\',roles:["root","clusterAdmin","userAdminAnyDatabase","readWriteAnyDatabase"])
db.auth("root","密码")

 

#用户创建完毕后所有节点配置文件开启认证

systemctl restart mongo-27001 mongo-27002 mongo-27003
mongo -u root -p "密码" 192.168.1.101:27001/admin

#配置路由服务

mkdir -pv /data/app/mongodb/mongos/log
chown -R mongod.mongod /data/app/mongodb

 

#配置mongos路由配置文件

[root@mongodb1 ~]# cat /data/app/mongodb/conf/mongos.conf 
systemLog:
    destination: file
    logAppend: true
    path: /data/app/mongodb/mongos/log/mongos.log
processManagement:
    fork: true
    pidFilePath: /data/app/mongodb/mongos/log/mongos.pid
net:
    port: 20000
    bindIp: 0.0.0.0
sharding:
    configDB: configset/192.168.1.101:21000,192.168.1.102:21000,192.168.1.103:21000
security:
    keyFile: /data/app/mongodb/conf/keyFile.key

#配置mongos服务

[root@mongodb1 ~]# cat /etc/systemd/system/mongos.service
[Unit]
Description=High-performance, shcema-free document-oriented database
After=network.target

[Service]
User=mongod
Type=forking
ExecStart=/usr/bin/numactl --interleave=all /data/app/mongodb/bin/mongos --config /data/app/mongodb/conf/mongos.conf
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

 

#mongos添加shard成员

sh.addShard("shard1/192.168.1.101:27001")
sh.addShard("shard1/192.168.1.102:27001")
sh.addShard("shard1/192.168.1.103:27001")


sh.addShard("shard1/192.168.1.101:27002")
sh.addShard("shard1/192.168.1.102:27002")
sh.addShard("shard1/192.168.1.103:27002")

 

sh.addShard("shard1/192.168.1.101:27003")
sh.addShard("shard1/192.168.1.102:27003")
sh.addShard("shard1/192.168.1.103:27003")
sh.status()

 


#测试分片

mongo -u root -p "密码" 192.168.1.101:20000/admin

 

#查看分片节点

db.getSiblingDB("config").shards.find()

 

#查看那些数据库开启了分片

db.getSiblingDB("config").databases.find()

 


#创建并进入testdb库

use testdb

 

#开启test01库的分片

sh.enableSharding("testkdb")

 

#根据id建进行hash分片

sh.shardCollection("testdb.test01","id":"hashed")

 


#模拟插入9999条数据

use testdb
for(i=1;i<10000;i++)db.test01.insert("id":i,"name":"aaa")

 


#查看分片的片建

use config
db.collections.find().pretty()

 


#hash分片数据验证,hashed分片会尽可能均匀分布在每个primary节点上

shard1:
mongo -u root -p "密码" 192.168.1.101:27001/admin
use testdb
db.test01.count()

shard2:
mongo -u root -p "密码" 192.168.1.102:27002/admin
use testdb
db.test01.count()

shard3:
mongo -u root -p "密码" 192.168.1.103:27003/admin
use testdb
db.test01.count()

 

centos7部署Mongodb复制集结合分片(超详细)

Mongodb复制集结合分片
重点:概述、原理、实施案例
一、概述:
概述:
分片(sharding)是指将数据库拆分,将其分散在不同的机器上的过程。分片集群(sharded cluster)是一种水平扩展数据库系统性能的方法,能够将数据集分布式存储在不同的分片(shard)上,每个分片只保存数据集的一部分,MongoDB保证各个分片之间不会有重复的数据,所有分片保存的数据之和就是完整的数据集。分片集群将数据集分布式存储,能够将负载分摊到多个分片上,每个分片只负责读写一部分数据,充分利用了各个shard的系统资源,提高数据库系统的吞吐量。
注:mongodb3.2版本后,分片技术必须结合复制集完成;
应用场景:
1.单台机器的磁盘不够用了,使用分片解决磁盘空间的问题。
2.单个mongod已经不能满足写数据的性能要求。通过分片让写压力分散到各个分片上面,使用分片服务器自身的资源。
3.想把大量数据放到内存里提高性能。和上面一样,通过分片使用分片服务器自身的资源。

二、原理:

存储方式:数据集被拆分成数据块(chunk),每个数据块包含多个doc,数据块分布式存储在分片集群中。
角色:
Config server:MongoDB负责追踪数据块在shard上的分布信息,每个分片存储哪些数据块,叫做分片的元数据,保存在config server上的数据库 config中,一般使用3台config server,所有config server中的config数据库必须完全相同(建议将config server部署在不同的服务器,以保证稳定性);
Shard server:将数据进行分片,拆分成数据块(chunk),数据块真正存放的单位;
Mongos server:数据库集群请求的入口,所有的请求都通过mongos进行协调,查看分片的元数据,查找chunk存放位置,mongos自己就是一个请求分发中心,在生产环境通常有多mongos作为请求的入口,防止其中一个挂掉所有的mongodb请求都没有办法操作。
总结:
应用请求mongos来操作mongodb的增删改查,配置服务器存储数据库元信息,并且和mongos做同步,数据最终存入在shard(分片)上,为了防止数据丢失,同步在副本集中存储了一份,仲裁节点在数据存储到分片的时候决定存储到哪个节点。

三、案例实施:

实验环境:
192.168.100.101
config.benet.com 192.168.100.102
shard1.benet.com 192.168.100.103
shard2.benet.com
Mongos:27025 mongos:27025 mongos:27025
config(configs):27017 shard(shard1):27017 shard(shard2):27017
config(configs):27018 shard(shard1):27018 shard(shard2):27018
config(configs):27019 shard(shard1):27019 shard(shard2):27019

实验步骤:
? 安装mongodb服务;
? 配置config节点的实例;
? 配置shard1的实例:
? 配置shard2实例:
? 配置分片并验证:

? 安装mongodb服务:
192.168.100.101、192.168.100.102、192.168.100.103:
[[email protected] ~]# tar zxvf mongodb-linux-x86_64-rhel70-3.6.3.tgz
[[email protected] ~]# mv mongodb-linux-x86_64-rhel70-3.6.3 /usr/local/mongodb
[[email protected] ~]# echo "export PATH=/usr/local/mongodb/bin:$PATH" >>/etc/profile
[[email protected] ~]# source /etc/profile
[[email protected] ~]# ulimit -n 25000
[[email protected] ~]# ulimit -u 25000
[[email protected] ~]# echo 0 >/proc/sys/vm/zone_reclaim_mode
[[email protected] ~]# sysctl -w vm.zone_reclaim_mode=0
[[email protected] ~]# echo never >/sys/kernel/mm/transparent_hugepage/enabled
[[email protected] ~]# echo never >/sys/kernel/mm/transparent_hugepage/defrag
[[email protected] ~]# cd /usr/local/mongodb/bin/
[[email protected] bin]# mkdir {../mongodb1,../mongodb2,../mongodb3}
[[email protected] bin]# mkdir ../logs
[[email protected] bin]# touch ../logs/mongodb{1..3}.log
[[email protected] bin]# chmod 777 ../logs/mongodb*

? 配置config节点的实例:
192.168.100.101:
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb1.conf
bind_ip=192.168.100.101
port=27017
dbpath=/usr/local/mongodb/mongodb1/
logpath=/usr/local/mongodb/logs/mongodb1.log
logappend=true
fork=true
maxConns=5000
replSet=configs
#replication name
configsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb2.conf
bind_ip=192.168.100.101
port=27018
dbpath=/usr/local/mongodb/mongodb2/
logpath=/usr/local/mongodb/logs/mongodb2.log
logappend=true
fork=true
maxConns=5000
replSet=configs
configsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb3.conf
bind_ip=192.168.100.101
port=27019
dbpath=/usr/local/mongodb/mongodb3/
logpath=/usr/local/mongodb/logs/mongodb3.log
logappend=true
fork=true
maxConns=5000
replSet=configs
configsvr=true
END
[[email protected] bin]# cd
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb3.conf
[[email protected] ~]# netstat -utpln |grep mongod
tcp 0 0 192.168.100.101:27019 0.0.0.0: LISTEN 2271/mongod
tcp 0 0 192.168.100.101:27017 0.0.0.0:
LISTEN 2440/mongod
tcp 0 0 192.168.100.101:27018 0.0.0.0:* LISTEN 1412/mongod
[[email protected] ~]# echo -e "/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb2.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb3.conf">>/etc/rc.local
[[email protected] ~]# chmod +x /etc/rc.local
[[email protected] ~]# cat <<END >>/etc/init.d/mongodb
#!/bin/bash
INSTANCE=$1
ACTION=$2
case "$ACTION" in
‘start‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
‘stop‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown;;
‘restart‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
esac
END
[[email protected] ~]# chmod +x /etc/init.d/mongodb
[[email protected] ~]# mongo --port 27017 --host 192.168.100.101

cfg={"_id":"configs","members":[{"_id":0,"host":"192.168.100.101:27017"},{"_id":1,"host":"192.168.100.101:27018"},{"_id":2,"host":"192.168.100.101:27019"}]}
rs.initiate(cfg)
configs:PRIMARY> rs.status()
{
"set" : "configs",
"date" : ISODate("2018-04-24T18:53:44.375Z"),
"myState" : 1,
"term" : NumberLong(1),
"configsvr" : true,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.101:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6698,
"optime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T18:53:40Z"),
"electionTime" : Timestamp(1524590293, 1),
"electionDate" : ISODate("2018-04-24T17:18:13Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.100.101:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 5741,
"optime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T18:53:40Z"),
"optimeDurableDate" : ISODate("2018-04-24T18:53:40Z"),
"lastHeartbeat" : ISODate("2018-04-24T18:53:42.992Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T18:53:43.742Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.101:27017",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.100.101:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 5741,
"optime" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596020, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T18:53:40Z"),
"optimeDurableDate" : ISODate("2018-04-24T18:53:40Z"),
"lastHeartbeat" : ISODate("2018-04-24T18:53:42.992Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T18:53:43.710Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.101:27017",
"configVersion" : 1
}
],
"ok" : 1,
"operationTime" : Timestamp(1524596020, 1),
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"$clusterTime" : {
"clusterTime" : Timestamp(1524596020, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
configs:PRIMARY> show dbs
admin 0.000GB
config 0.000GB
local 0.000GB
configs:PRIMARY> exit
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongos.conf
bind_ip=192.168.100.101
port=27025
logpath=/usr/local/mongodb/logs/mongodbs.log
fork=true
maxConns=5000
configdb=configs/192.168.100.101:27017,192.168.100.101:27018,192.168.100.101:27019
END
注:mongos的configdb参数只能指定一个(复制集中的primary)或多个(复制集中的全部节点);
[[email protected] bin]# touch ../logs/mongos.log
[[email protected] bin]# chmod 777 ../logs/mongos.log
[[email protected] bin]# mongos -f /usr/local/mongodb/bin/mongos.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1562
child process started successfully, parent exiting
[[email protected] ~]# netstat -utpln |grep mongo
tcp 0 0 192.168.100.101:27019 0.0.0.0: LISTEN 1601/mongod
tcp 0 0 192.168.100.101:27020 0.0.0.0:
LISTEN 1345/mongod
tcp 0 0 192.168.100.101:27025 0.0.0.0: LISTEN 1822/mongos
tcp 0 0 192.168.100.101:27017 0.0.0.0:
LISTEN 1437/mongod
tcp 0 0 192.168.100.101:27018 0.0.0.0:* LISTEN 1541/mongod

? 配置shard1的实例:
192.168.100.102:
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb1.conf
bind_ip=192.168.100.102
port=27017
dbpath=/usr/local/mongodb/mongodb1/
logpath=/usr/local/mongodb/logs/mongodb1.log
logappend=true
fork=true
maxConns=5000
replSet=shard1
#replication name
shardsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb2.conf
bind_ip=192.168.100.102
port=27018
dbpath=/usr/local/mongodb/mongodb2/
logpath=/usr/local/mongodb/logs/mongodb2.log
logappend=true
fork=true
maxConns=5000
replSet=shard1
shardsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb3.conf
bind_ip=192.168.100.102
port=27019
dbpath=/usr/local/mongodb/mongodb3/
logpath=/usr/local/mongodb/logs/mongodb3.log
logappend=true
fork=true
maxConns=5000
replSet=shard1
shardsvr=true
END
[[email protected] bin]# cd
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb3.conf
[[email protected] ~]# netstat -utpln |grep mongod
tcp 0 0 192.168.100.101:27019 0.0.0.0: LISTEN 2271/mongod
tcp 0 0 192.168.100.101:27017 0.0.0.0:
LISTEN 2440/mongod
tcp 0 0 192.168.100.101:27018 0.0.0.0:* LISTEN 1412/mongod
[[email protected] ~]# echo -e "/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb2.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb3.conf">>/etc/rc.local
[[email protected] ~]# chmod +x /etc/rc.local
[[email protected] ~]# cat <<END >>/etc/init.d/mongodb
#!/bin/bash
INSTANCE=$1
ACTION=$2
case "$ACTION" in
‘start‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
‘stop‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown;;
‘restart‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
esac
END
[[email protected] ~]# chmod +x /etc/init.d/mongodb
[[email protected] ~]# mongo --port 27017 --host 192.168.100.102

cfg={"_id":"shard1","members":[{"_id":0,"host":"192.168.100.102:27017"},{"_id":1,"host":"192.168.100.102:27018"},{"_id":2,"host":"192.168.100.102:27019"}]}
rs.initiate(cfg)
{ "ok" : 1 }
shard1:PRIMARY> rs.status()
{
"set" : "shard1",
"date" : ISODate("2018-04-24T19:06:53.160Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.102:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6648,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"electionTime" : Timestamp(1524590628, 1),
"electionDate" : ISODate("2018-04-24T17:23:48Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.100.102:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6195,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"optimeDurableDate" : ISODate("2018-04-24T19:06:50Z"),
"lastHeartbeat" : ISODate("2018-04-24T19:06:52.176Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T19:06:52.626Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.102:27017",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.100.102:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6195,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"optimeDurableDate" : ISODate("2018-04-24T19:06:50Z"),
"lastHeartbeat" : ISODate("2018-04-24T19:06:52.177Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T19:06:52.626Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.102:27017",
"configVersion" : 1
}
],
"ok" : 1
}
shard1:PRIMARY> show dbs
admin 0.000GB
config 0.000GB
local 0.000GB
shard1:PRIMARY> exit
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongos.conf
bind_ip=192.168.100.102
port=27025
logpath=/usr/local/mongodb/logs/mongodbs.log
fork=true
maxConns=5000
configdb=configs/192.168.100.101:27017,192.168.100.101:27018,192.168.100.101:27019
END
[[email protected] bin]# touch ../logs/mongos.log
[[email protected] bin]# chmod 777 ../logs/mongos.log
[[email protected] bin]# mongos -f /usr/local/mongodb/bin/mongos.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1562
child process started successfully, parent exiting
[[email protected] ~]# netstat -utpln| grep mongo
tcp 0 0 192.168.100.102:27019 0.0.0.0: LISTEN 1098/mongod
tcp 0 0 192.168.100.102:27020 0.0.0.0:
LISTEN 1125/mongod
tcp 0 0 192.168.100.102:27025 0.0.0.0: LISTEN 1562/mongos
tcp 0 0 192.168.100.102:27017 0.0.0.0:
LISTEN 1044/mongod
tcp 0 0 192.168.100.102:27018 0.0.0.0:* LISTEN 1071/mongod

? 配置shard2实例:
192.168.100.103:
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb1.conf
bind_ip=192.168.100.103
port=27017
dbpath=/usr/local/mongodb/mongodb1/
logpath=/usr/local/mongodb/logs/mongodb1.log
logappend=true
fork=true
maxConns=5000
replSet=shard2
#replication name
shardsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb2.conf
bind_ip=192.168.100.103
port=27018
dbpath=/usr/local/mongodb/mongodb2/
logpath=/usr/local/mongodb/logs/mongodb2.log
logappend=true
fork=true
maxConns=5000
replSet=shard2
shardsvr=true
END
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongodb3.conf
bind_ip=192.168.100.103
port=27019
dbpath=/usr/local/mongodb/mongodb3/
logpath=/usr/local/mongodb/logs/mongodb3.log
logappend=true
fork=true
maxConns=5000
replSet=shard2
shardsvr=true
END
[[email protected] bin]# cd
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb1.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb2.conf
[[email protected] ~]# mongod -f /usr/local/mongodb/bin/mongodb3.conf
[[email protected] ~]# netstat -utpln |grep mongod
tcp 0 0 192.168.100.101:27019 0.0.0.0: LISTEN 2271/mongod
tcp 0 0 192.168.100.101:27017 0.0.0.0:
LISTEN 2440/mongod
tcp 0 0 192.168.100.101:27018 0.0.0.0:* LISTEN 1412/mongod
[[email protected] ~]# echo -e "/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb1.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb2.conf /usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/mongodb3.conf">>/etc/rc.local
[[email protected] ~]# chmod +x /etc/rc.local
[[email protected] ~]# cat <<END >>/etc/init.d/mongodb
#!/bin/bash
INSTANCE=$1
ACTION=$2
case "$ACTION" in
‘start‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
‘stop‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown;;
‘restart‘)
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf --shutdown
/usr/local/mongodb/bin/mongod -f /usr/local/mongodb/bin/"$INSTANCE".conf;;
esac
END
[[email protected] ~]# chmod +x /etc/init.d/mongodb
[[email protected] ~]# mongo --port 27017 --host 192.168.100.103

cfg={"_id":"shard2","members":[{"_id":0,"host":"192.168.100.103:27017"},{"_id":1,"host":"192.168.100.103:27018"},{"_id":2,"host":"192.168.100.103:27019"}]}
rs.initiate(cfg)
{ "ok" : 1 }
shard2:PRIMARY> rs.status()
{
"set" : "shard2",
"date" : ISODate("2018-04-24T19:06:53.160Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
}
},
"members" : [
{
"_id" : 0,
"name" : "192.168.100.103:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6648,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"electionTime" : Timestamp(1524590628, 1),
"electionDate" : ISODate("2018-04-24T17:23:48Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.100.103:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6195,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"optimeDurableDate" : ISODate("2018-04-24T19:06:50Z"),
"lastHeartbeat" : ISODate("2018-04-24T19:06:52.176Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T19:06:52.626Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.103:27017",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "192.168.100.103:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6195,
"optime" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1524596810, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2018-04-24T19:06:50Z"),
"optimeDurableDate" : ISODate("2018-04-24T19:06:50Z"),
"lastHeartbeat" : ISODate("2018-04-24T19:06:52.177Z"),
"lastHeartbeatRecv" : ISODate("2018-04-24T19:06:52.626Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "192.168.100.103:27017",
"configVersion" : 1
}
],
"ok" : 1
}
shard2:PRIMARY> show dbs
admin 0.000GB
config 0.000GB
local 0.000GB
shard2:PRIMARY> exit
[[email protected] bin]# cat <<END >>/usr/local/mongodb/bin/mongos.conf
bind_ip=192.168.100.103
port=27025
logpath=/usr/local/mongodb/logs/mongodbs.log
fork=true
maxConns=5000
configdb=configs/192.168.100.101:27017,192.168.100.101:27018,192.168.100.101:27019
END
[[email protected] bin]# touch ../logs/mongos.log
[[email protected] bin]# chmod 777 ../logs/mongos.log
[[email protected] bin]# mongos -f /usr/local/mongodb/bin/mongos.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1562
child process started successfully, parent exiting
[[email protected] ~]# netstat -utpln |grep mongo
tcp 0 0 192.168.100.103:27019 0.0.0.0: LISTEN 1095/mongod
tcp 0 0 192.168.100.103:27020 0.0.0.0:
LISTEN 1122/mongod
tcp 0 0 192.168.100.103:27025 0.0.0.0: LISTEN 12122/mongos
tcp 0 0 192.168.100.103:27017 0.0.0.0:
LISTEN 1041/mongod
tcp 0 0 192.168.100.103:27018 0.0.0.0:* LISTEN 1068/mongod

? 配置分片并验证:
192.168.100.101(随意选择mongos进行设置分片,三台mongos会同步以下操作):
[[email protected] ~]# mongo --port 27025 --host 192.168.100.101
mongos> use admin;
switched to db admin
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5adf66d7518b3e5b3aad4e77")
}
shards:
active mongoses:
"3.6.3" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
mongos>
sh.addShard("shard1/192.168.100.102:27017,192.168.100.102:27018,192.168.100.102:27019")
{
"shardAdded" : "shard1",
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1524598580, 9),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1524598580, 9)
}
mongos> sh.addShard("shard2/192.168.100.103:27017,192.168.100.103:27018,192.168.100.103:27019")
{
"shardAdded" : "shard2",
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1524598657, 7),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1524598657, 7)
}
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5adf66d7518b3e5b3aad4e77")
}
shards:
{ "_id" : "shard1", "host" : "shard1/192.168.100.102:27017,192.168.100.102:27018,192.168.100.102:27019", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/192.168.100.103:27017,192.168.100.103:27018,192.168.100.103:27019", "state" : 1 }
active mongoses:
"3.6.3" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }

注:目前配置服务、路由服务、分片服务、副本集服务都已经串联起来了,但我们的目的是希望插入数据,数据能够自动分片。连接在mongos上,准备让指定的数据库、指定的集合分片生效。
[[email protected] ~]# mongo --port 27025 --host 192.168.100.101
mongos> use admin
mongos> sh.enableSharding("testdb") ##开启数据库的分片
{
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1524599672, 13),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1524599672, 13)
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5adf66d7518b3e5b3aad4e77")
}
shards:
{ "_id" : "shard1", "host" : "shard1/192.168.100.102:27017,192.168.100.102:27018,192.168.100.102:27019", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/192.168.100.103:27017,192.168.100.103:27018,192.168.100.103:27019", "state" : 1 }
active mongoses:
"3.6.3" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "testdb", "primary" : "shard2", "partitioned" : true }
mongos> db.runCommand({shardcollection:"testdb.table1", key:{_id:1}}); ##开启数据库中集合的分片
{
"collectionsharded" : "testdb.table1",
"collectionUUID" : UUID("883bb1e2-b218-41ab-8122-6a5cf4df5e7b"),
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1524601471, 14),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1524601471, 14)
}
mongos> use testdb;
mongos> for(i=1;i<=10000;i++){db.table1.insert({"id":i,"name":"huge"})};
WriteResult({ "nInserted" : 1 })
mongos> show collections
table1
mongos> db.table1.count()
10000
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5adf66d7518b3e5b3aad4e77")
}
shards:
{ "_id" : "shard1", "host" : "shard1/192.168.100.102:27017,192.168.100.102:27018,192.168.100.102:27019", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/192.168.100.103:27017,192.168.100.103:27018,192.168.100.103:27019", "state" : 1 }
active mongoses:
"3.6.3" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "testdb", "primary" : "shard2", "partitioned" : true }
testdb.table1
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard2 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard2 Timestamp(1, 0)
mongos> use admin
switched to db admin
mongos> sh.enableSharding("testdb2")
{
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1524602371, 7),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1524602371, 7)
}
mongos> db.runCommand({shardcollection:"testdb2.table1", key:{_id:1}});
mongos> use testdb2
switched to db testdb2
mongos> for(i=1;i<=10000;i++){db.table1.insert({"id":i,"name":"huge"})};
WriteResult({ "nInserted" : 1 })
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5adf66d7518b3e5b3aad4e77")
}
shards:
{ "_id" : "shard1", "host" : "shard1/192.168.100.102:27017,192.168.100.102:27018,192.168.100.102:27019", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/192.168.100.103:27017,192.168.100.103:27018,192.168.100.103:27019", "state" : 1 }
active mongoses:
"3.6.3" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "testdb", "primary" : "shard2", "partitioned" : true }
testdb.table1
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard2 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard2 Timestamp(1, 0)

    {  "_id" : "testdb2",  "primary" : "shard1",  "partitioned" : true }
            testdb2.table1
                    shard key: { "_id" : 1 }
                    unique: false
                    balancing: true
                    chunks:
                            shard1  1
                    { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)

mongos> db.table1.stats() ##查看集合的分片情况
{
"sharded" : true,
"capped" : false,
"ns" : "testdb2.table1",
"count" : 10000,
"size" : 490000,
"storageSize" : 167936,
"totalIndexSize" : 102400,
"indexSizes" : {
"id" : 102400
},
"avgObjSize" : 49,
"nindexes" : 1,
"nchunks" : 1,
"shards" : {
"shard1" : {
"ns" : "testdb2.table1",
"size" : 490000,
"count" : 10000,
"avgObjSize" : 49,
"storageSize" : 167936,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" :
...
在192.168.100.102和192.168.100.103上登录mongos节点查看上述配置,发现已经同步;

以上是关于centos7 mongodb4.4分片集群部署的主要内容,如果未能解决你的问题,请参考以下文章

centos7部署Mongodb复制集结合分片(超详细)

使用ansible一键部署MongoDB分片集群

教你快速搭建mongodb分片集群

MongoDB学习笔记:分片

MongoDB分片集群部署

手把手超详细Docker部署MongoDB集群