57-4 数据库分片概念及mongodb sharding的实现
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了57-4 数据库分片概念及mongodb sharding的实现相关的知识,希望对你有一定的参考价值。
04 数据库分片的概念及mongodb sharding的实现
配置环境:
node1: 192.168.1.121 CentOS release 6.7
node2: 192.168.1.122 CentOS release 6.7
node3: 192.168.1.123 CentOS release 6.7
[[email protected] ~]# vim /etc/hosts
添加
192.168.1.121 node1
192.168.1.122 node2
192.168.1.123 node3
[[email protected] ~]# scp /etc/hosts node2:/etc
[[email protected] ~]# scp /etc/hosts node3:/etc
[[email protected] ~]# service mongod stop
[[email protected] ~]# vim /etc/mongod.conf
修改
#replSet=setname
为
replSet=testSet
replIndexPrefetch=_id_only
[[email protected] ~]# service mongod start
[[email protected] ~]# mongo
MongoDB shell version: 2.6.4
connecting to: test
> show dbs
admin (empty)
local 0.078GB
testdb 0.078GB
> use local
switched to db local
> show collections
startup_log
system.indexes
> exit
bye
[[email protected] mongodb-2.6.4]# scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node2:/root
[[email protected] mongodb-2.6.4]# scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node3:/root
[[email protected] ~]# yum -y install *rpm
[[email protected] ~]# mkdir -p /mongodb/data
[[email protected] ~]# chown -R mongod.mongod /mongodb/
[[email protected] ~]# yum -y install *rpm
[[email protected] ~]# mkdir -p /mongodb/data
[[email protected] ~]# chown -R mongod.mongod /mongodb/
[[email protected] ~]# scp /etc/mongod.conf node2:/etc/
[[email protected] ~]# scp /etc/mongod.conf node3:/etc/
[[email protected] ~]# service mongod start
[[email protected] ~]# service mongod start
[[email protected] ~]# mongo
MongoDB shell version: 2.6.4
connecting to: test
> rs.status()
{
"startupStatus" : 3,
"info" : "run rs.initiate(...) if not yet done for the set",
"ok" : 0,
"errmsg" : "can‘t get local.system.replset config from self or any seed (EMPTYCONFIG)"
}
> rs.initiate()
{
"info2" : "no configuration explicitly specified -- making one",
"me" : "node1:27017",
"info" : "Config now saved locally. Should come online in about a minute.",
"ok" : 1
}
> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-08T14:33:14Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1316,
"optime" : Timestamp(1483885955, 1),
"optimeDate" : ISODate("2017-01-08T14:32:35Z"),
"electionTime" : Timestamp(1483885956, 1),
"electionDate" : ISODate("2017-01-08T14:32:36Z"),
"self" : true
}
],
"ok" : 1
}
#添加节点
testSet:PRIMARY> rs.add("192.168.1.122")
{ "ok" : 1 }
testSet:PRIMARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-08T14:38:50Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1652,
"optime" : Timestamp(1483886304, 1),
"optimeDate" : ISODate("2017-01-08T14:38:24Z"),
"electionTime" : Timestamp(1483885956, 1),
"electionDate" : ISODate("2017-01-08T14:32:36Z"),
"self" : true
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 26,
"optime" : Timestamp(1483886304, 1),
"optimeDate" : ISODate("2017-01-08T14:38:24Z"),
"lastHeartbeat" : ISODate("2017-01-08T14:38:48Z"),
"lastHeartbeatRecv" : ISODate("2017-01-08T14:38:48Z"),
"pingMs" : 1,
"syncingTo" : "node1:27017"
}
],
"ok" : 1
}
[[email protected] ~]# mongo
MongoDB shell version: 2.6.4
connecting to: test
Welcome to the MongoDB shell.
For interactive help, type "help".
For more comprehensive documentation, see
http://docs.mongodb.org/
Questions? Try the support group
http://groups.google.com/group/mongodb-user
testSet:SECONDARY> show dbs
admin (empty)
local 1.078GB
testdb 0.078GB
testSet:SECONDARY> use testdb;
switched to db testdb
testSet:SECONDARY> rs.slaveOk()
testSet:SECONDARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-09T12:02:14Z"),
"myState" : 2,
"syncingTo" : "node1:27017",
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 77028,
"optime" : Timestamp(1483886304, 1),
"optimeDate" : ISODate("2017-01-08T14:38:24Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:02:13Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:02:13Z"),
"pingMs" : 1,
"electionTime" : Timestamp(1483885956, 1),
"electionDate" : ISODate("2017-01-08T14:32:36Z")
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 77851,
"optime" : Timestamp(1483886304, 1),
"optimeDate" : ISODate("2017-01-08T14:38:24Z"),
"self" : true
}
],
"ok" : 1
}
testSet:SECONDARY> rs.isMaster()
{
"setName" : "testSet",
"setVersion" : 2,
"ismaster" : false,
"secondary" : true,
"hosts" : [
"192.168.1.122:27017",
"node1:27017"
],
"primary" : "node1:27017",
"me" : "192.168.1.122:27017",
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2017-01-09T12:03:59.702Z"),
"maxWireVersion" : 2,
"minWireVersion" : 0,
"ok" : 1
}
testSet:PRIMARY> rs.isMaster()
{
"setName" : "testSet",
"setVersion" : 2,
"ismaster" : true,
"secondary" : false,
"hosts" : [
"node1:27017",
"192.168.1.122:27017"
],
"primary" : "node1:27017",
"me" : "node1:27017",
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2017-01-09T12:05:47.182Z"),
"maxWireVersion" : 2,
"minWireVersion" : 0,
"ok" : 1
}
#增加新节点
testSet:PRIMARY> rs.add("192.168.1.123")
{ "ok" : 1 }
[[email protected] ~]# mongo
MongoDB shell version: 2.6.4
connecting to: test
Welcome to the MongoDB shell.
For interactive help, type "help".
For more comprehensive documentation, see
http://docs.mongodb.org/
Questions? Try the support group
http://groups.google.com/group/mongodb-user
testSet:SECONDARY> rs.slaveOk()
testSet:SECONDARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-09T12:10:20Z"),
"myState" : 2,
"syncingTo" : "node1:27017",
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 78,
"optime" : Timestamp(1483963739, 1),
"optimeDate" : ISODate("2017-01-09T12:08:59Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:10:18Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:10:19Z"),
"pingMs" : 1,
"electionTime" : Timestamp(1483885956, 1),
"electionDate" : ISODate("2017-01-08T14:32:36Z")
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 78,
"optime" : Timestamp(1483963739, 1),
"optimeDate" : ISODate("2017-01-09T12:08:59Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:10:18Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:10:18Z"),
"pingMs" : 1,
"syncingTo" : "node1:27017"
},
{
"_id" : 2,
"name" : "192.168.1.123:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 78317,
"optime" : Timestamp(1483963739, 1),
"optimeDate" : ISODate("2017-01-09T12:08:59Z"),
"self" : true
}
],
"ok" : 1
}
testSet:SECONDARY> use testdb
switched to db testdb
testSet:SECONDARY> db.students.findOne()
{ "_id" : ObjectId("5871e94113222f399a5240a3"), "name" : "tom", "age" : 23 }
testSet:SECONDARY> rs.conf()
{
"_id" : "testSet",
"version" : 3,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017"
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
testSet:PRIMARY> use testdb
switched to db testdb
testSet:PRIMARY> db.classes.insert({class: "One",nostu: 40})
WriteResult({ "nInserted" : 1 })
testSet:PRIMARY> show collections;
classes
students
system.indexes
testSet:SECONDARY> db.classes.findOne()
{
"_id" : ObjectId("58737e8606a316aec46edfdc"),
"class" : "One",
"nostu" : 40
}
testSet:SECONDARY> db.classes.insert({class: "Two", nostu: 50})
WriteResult({ "writeError" : { "code" : undefined, "errmsg" : "not master" } })
testSet:SECONDARY> rs.conf()
{
"_id" : "testSet",
"version" : 3,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017"
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
#使主结点“下台”
testSet:PRIMARY> rs.stepDown()
2017-01-09T20:23:48.978+0800 DBClientCursor::init call() failed
2017-01-09T20:23:48.980+0800 Error: error doing query: failed at src/mongo/shell/query.js:81
2017-01-09T20:23:48.982+0800 trying reconnect to 127.0.0.1:27017 (127.0.0.1) failed
2017-01-09T20:23:48.984+0800 reconnect 127.0.0.1:27017 (127.0.0.1) ok
testSet:SECONDARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-09T12:24:27Z"),
"myState" : 2,
"syncingTo" : "192.168.1.123:27017",
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 79989,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"infoMessage" : "syncing to: 192.168.1.123:27017",
"self" : true
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 78363,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:24:25Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:24:25Z"),
"pingMs" : 1,
"lastHeartbeatMessage" : "syncing to: node1:27017",
"syncingTo" : "node1:27017"
},
{
"_id" : 2,
"name" : "192.168.1.123:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 928,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:24:26Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:24:25Z"),
"pingMs" : 1,
"electionTime" : Timestamp(1483964629, 1),
"electionDate" : ISODate("2017-01-09T12:23:49Z")
}
],
"ok" : 1
}
testSet:PRIMARY> db.printReplicationInfo()
configured oplog size: 990MB
log length start to end: 299secs (0.08hrs)
oplog first event time: Mon Jan 09 2017 20:08:59 GMT+0800 (CST)
oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)
now: Mon Jan 09 2017 20:27:20 GMT+0800 (CST)
testSet:SECONDARY> db.printReplicationInfo()
configured oplog size: 990MB
log length start to end: 77734secs (21.59hrs)
oplog first event time: Sun Jan 08 2017 22:38:24 GMT+0800 (CST)
oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)
now: Mon Jan 09 2017 20:28:01 GMT+0800 (CST)
testSet:SECONDARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-09T12:29:38Z"),
"myState" : 2,
"syncingTo" : "node1:27017",
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 78672,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:29:37Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:29:37Z"),
"pingMs" : 1,
"syncingTo" : "192.168.1.123:27017"
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 79495,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"self" : true
},
{
"_id" : 2,
"name" : "192.168.1.123:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1238,
"optime" : Timestamp(1483964038, 1),
"optimeDate" : ISODate("2017-01-09T12:13:58Z"),
"lastHeartbeat" : ISODate("2017-01-09T12:29:37Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T12:29:37Z"),
"pingMs" : 1,
"electionTime" : Timestamp(1483964629, 1),
"electionDate" : ISODate("2017-01-09T12:23:49Z")
}
],
"ok" : 1
}
#node1节点下线
testSet:SECONDARY> exit
bye
[[email protected] ~]# service mongod stop
Stopping mongod: [ OK ]
#保存配置文件至cfg中(必须在主节点配置)
testSet:PRIMARY> cfg=rs.conf()
{
"_id" : "testSet",
"version" : 3,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017"
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
#设定节点优先级
testSet:PRIMARY> cfg.members[1].priority=2
2
#重读cfg配置文件
testSet:PRIMARY> rs.reconfig(cfg)
2017-01-09T21:08:58.403+0800 DBClientCursor::init call() failed
2017-01-09T21:08:58.404+0800 Error: error doing query: failed at src/mongo/shell/query.js:81
2017-01-09T21:08:58.406+0800 trying reconnect to 127.0.0.1:27017 (127.0.0.1) failed
2017-01-09T21:08:58.407+0800 reconnect 127.0.0.1:27017 (127.0.0.1) ok
testSet:SECONDARY>
testSet:SECONDARY> rs.status()
{
"set" : "testSet",
"date" : ISODate("2017-01-09T13:09:46Z"),
"myState" : 2,
"syncingTo" : "192.168.1.122:27017",
"members" : [
{
"_id" : 0,
"name" : "node1:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 98,
"optime" : Timestamp(1483967288, 1),
"optimeDate" : ISODate("2017-01-09T13:08:08Z"),
"lastHeartbeat" : ISODate("2017-01-09T13:09:45Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T13:09:45Z"),
"pingMs" : 1,
"lastHeartbeatMessage" : "syncing to: 192.168.1.122:27017",
"syncingTo" : "192.168.1.122:27017"
},
{
"_id" : 1,
"name" : "192.168.1.122:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 98,
"optime" : Timestamp(1483967288, 1),
"optimeDate" : ISODate("2017-01-09T13:08:08Z"),
"lastHeartbeat" : ISODate("2017-01-09T13:09:45Z"),
"lastHeartbeatRecv" : ISODate("2017-01-09T13:09:46Z"),
"pingMs" : 1,
"electionTime" : Timestamp(1483967290, 1),
"electionDate" : ISODate("2017-01-09T13:08:10Z")
},
{
"_id" : 2,
"name" : "192.168.1.123:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 81883,
"optime" : Timestamp(1483967288, 1),
"optimeDate" : ISODate("2017-01-09T13:08:08Z"),
"infoMessage" : "syncing to: 192.168.1.122:27017",
"self" : true
}
],
"ok" : 1
}
testSet:SECONDARY> rs.conf()
{
"_id" : "testSet",
"version" : 4,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017",
"priority" : 2
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
testSet:PRIMARY> cfg=rs.conf()
{
"_id" : "testSet",
"version" : 4,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017",
"priority" : 2
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
testSet:PRIMARY> cfg.members[2].arbiterOnly=true
true
testSet:PRIMARY> rs.reconfig(cfg)
{
"errmsg" : "exception: arbiterOnly may not change for members",
"code" : 13510,
"ok" : 0
}
testSet:PRIMARY> rs.conf()
{
"_id" : "testSet",
"version" : 4,
"members" : [
{
"_id" : 0,
"host" : "node1:27017"
},
{
"_id" : 1,
"host" : "192.168.1.122:27017",
"priority" : 2
},
{
"_id" : 2,
"host" : "192.168.1.123:27017"
}
]
}
testSet:PRIMARY> rs.printSlaveReplicationInfo()
source: node1:27017
syncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
source: 192.168.1.123:27017
syncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
[[email protected] ~]# service mongod stop
[[email protected] ~]# service mongod stop
[[email protected] ~]# rm -rf /mongodb/
[[email protected] ~]# rm -rf /mongodb/data/
[[email protected] ~]# rm -rf /mongodb/data/
[[email protected] ~]# scp *rpm node4:/root
[[email protected] ~]# mkdir -p /mongodb/data
[[email protected] ~]# yum -y install *rpm
[[email protected] ~]# chown -R mongod.mongod /mongodb/
[[email protected] ~]# vim /etc/mongod.conf
修改
replSet=testSet
replIndexPrefetch=_id_only
为
#replSet=testSet
#replIndexPrefetch=_id_only
添加
dbpath=/mongodb/data
configsvr=true
[[email protected] ~]# install -o mongod -g mongod -d /mongodb/data
[[email protected] ~]# ls -ld /mongodb/data/
drwxr-xr-x 2 mongod mongod 4096 Jan 9 22:13 /mongodb/data/
[[email protected] ~]# service mongod start
[[email protected] ~]# cd mongodb-2.6.4/
[[email protected] mongodb-2.6.4]# yum -y install mongodb-org-mongos-2.6.4-1.x86_64.rpm
[[email protected] ~]# service mongod stop
[[email protected] ~]# rm -rf /mongodb/data/*
[[email protected] ~]# service mongod start
[[email protected] mongodb-2.6.4]# mongos --configdb=192.168.1.122 --fork
[[email protected] mongodb-2.6.4]# mongos --configdb=192.168.1.122 --fork --logpath=/var/log/mongodb/mongod.log
2017-01-09T22:28:03.812+0800 warning: running with 1 config server should be done only for testing purposes and is not recommended for production
about to fork child process, waiting until server is ready for connections.
forked process: 18397
child process started successfully, parent exiting
[[email protected] mongodb-2.6.4]# mongo --host 192.168.1.121
MongoDB shell version: 2.6.4
connecting to: 192.168.1.121:27017/test
[[email protected] ~]# install -o mongod -g mongod -d /mongodb/data
[[email protected] ~]# vim /etc/mongod.conf
修改
replSet=testSet
replIndexPrefetch=_id_only
为
#replSet=testSet
#replIndexPrefetch=_id_only
[[email protected] ~]# service mongod start
[[email protected] ~]# vim /etc/mongod.conf
修改
dbpath=/var/lib/mongo
为
dbpath=/mongodb/data
修改
bind_ip=127.0.0.1
为
#bind_ip=127.0.0.1
[[email protected] ~]# service mongod start
mongos> sh.addShard("192.168.1.122")
{
"ok" : 0,
"errmsg" : "couldn‘t connect to new shard socket exception [CONNECT_ERROR] for 192.168.1.122:27017"
}
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"version" : 4,
"minCompatibleVersion" : 4,
"currentVersion" : 5,
"clusterId" : ObjectId("58739d7487c21f53b917098b")
}
shards:
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
mongos> sh.addShard("192.168.1.123")
{
"ok" : 0,
"errmsg" : "host is part of set testSet, use replica set url format <setname>/<server1>,<server2>,...."
}
100:33(91411)
本文出自 “追梦” 博客,请务必保留此出处http://sihua.blog.51cto.com/377227/1890849
以上是关于57-4 数据库分片概念及mongodb sharding的实现的主要内容,如果未能解决你的问题,请参考以下文章