提示:文章是安装mongodb分片搭建安装
文章目录
- 前言
- 一、环境需求
- 二、搭建第一套副本集(主从仲)
- 三、搭建第二套副本集(主从仲)
- [四、搭建config server](#四、搭建config server)
- 五、搭建mongos
- 六、创建分片
- 七、测试分片
- 总结
前言
随着业务规模不断增长,单台数据库服务器在存储空间、处理能力以及并发访问方面难以满足需求。当数据量从 GB 增长到 TB,甚至 PB 级别时,传统的垂直扩展方式(加大 CPU、内存、磁盘)已经无法继续提升性能,这时就需要一种能够将数据水平拆分、分布存储在多台服务器上的方法。
MongoDB 的 分片(Sharding)技术正是为了解决这一问题而设计。它可以把一个庞大的数据集合按某种规则拆分成多个"碎片",分别存储到不同的机器集群中,从而实现 无限水平扩展、负载均衡和高可用性。对于高并发、高数据量的业务(如日志系统、电商订单系统、实时分析等),分片几乎是必选方案。
下面将从原理层面对 MongoDB 分片进行清晰直观的讲解。
提示:以下是本篇文章正文内容,下面案例可供参考
一、环境需求
| 公网 | 内网 | 配置 | 节点 | 端口号 | 副本集名称 |
|---|---|---|---|---|---|
| 124.221.94.90 | 172.17.48.40 | 2C2G | 主 | 27017 | test、configsvr |
| 123.206.99.184 | 172.17.48.42 | 2C2G | 从 | 27017 | test、configsvr |
| 115.159.126.145 | 172.17.48.101 | 2C2G | 仲裁 | 27017、27018 | test、test1、configsvr、mongos |
| 175.24.138.26 | 172.17.48.231 | 2C2G | 主 | 27018 | test1 |
| 49.235.83.45 | 172.17.48.247 | 2C2G | 从 | 27018 | test1 |
二、搭建第一套副本集(主从仲)
2.1、装mongdb,172.17.48.40、172.17.48.42、172.17.48.101,机器分别操作
bash
[root@localhost ~]# tar xvf mongodb-linux-x86_64-4.0.10.tgz -C /usr/local/
[root@localhost ~]# cd /usr/local/
[root@localhost local]# ln -s mongodb-linux-x86_64-4.0.10/ mongodb
[root@localhost local]# cd mongodb
[root@localhost mongodb]# mkdir config
[root@localhost mongodb]# cd config/
[root@localhost config]# vim mongodb-27017.conf
添加如下:
bind_ip = 0.0.0.0
port = 27017
dbpath = /var/lib/mongodb/27017
logpath = /var/log/mongodb_27017.log
logappend = true
fork = true
auth = false
directoryperdb = true
storageEngine = wiredTiger
shardsvr=true ##如果不添加这项,后续进行分片不成功
replSet = test ## 副本及名称
#keyFile = /usr/local/mongodb/config/mongodb-keyfile ##高版本不需要配置密钥
[root@localhost config]# mkdir -pv /var/lib/mongodb/27017
[root@localhost config]# ln -s /usr/local/mongodb/bin/* /usr/local/bin/
## 启动mongodb
[root@localhost config]# mongod -f /usr/local/mongodb/config/mongodb-27017.conf
about to fork child process, waiting until server is ready for connections.
forked process: 42376
child process started successfully, parent exiting
## 关闭mongodb
[root@localhost config]# mongod -f /usr/local/mongodb/config/mongodb-27017.conf --shutdown
2.2、搭建第一套副本集
##########任意一台登录mongodb,本人是172.17.48.40机器上操作的##########
bash
[root@localhost config]# mongo --port 27017
MongoDB shell version v4.0.10
connecting to: mongodb://127.0.0.1:27017/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("4529d43d-9513-4065-aa8c-4a228947c12a") }
MongoDB server version: 4.0.10
Welcome to the MongoDB shell.
For interactive help, type "help".
For more comprehensive documentation, see
http://docs.mongodb.org/
Questions? Try the support group
http://groups.google.com/group/mongodb-user
Server has startup warnings:
2023-09-07T22:18:59.649+0800 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2023-09-07T22:18:59.649+0800 I CONTROL [initandlisten]
2023-09-07T22:18:59.649+0800 I CONTROL [initandlisten]
2023-09-07T22:18:59.649+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.,"priority": 是优先级,默认为1,优先级0为被动节点,不能成为活跃节点。优先级不位0则按照有大到小选出活跃节点,"arbiterOnly": 仲裁节点,只参与投票,不接收数据,也不能成为活跃节点。
2023-09-07T22:18:59.649+0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2023-09-07T22:18:59.650+0800 I CONTROL [initandlisten]
> config = {_id: "test", members: [] } ## "_id": 副本集的名称
{ "_id" : "test", "members" : [ ] } ## "members": 副本集的服务器列表
> config.members.push({_id: 0, host: "172.17.48.40:27017", "priority": 1}) ## "_id": 服务器的唯一ID,"host": 服务器主机
1
> config.members.push({_id: 1, host: "172.17.48.42:27017", "priority": 1})
2
> config.members.push({_id: 2, host: "172.17.48.101:27017", "priority": 1})
3
> rs.initiate(config) ## 出现以下内容"ok : 1"表示成功
{
"ok" : 1,
"operationTime" : Timestamp(1694097416, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694097416, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
#########查看副本集的状态##########
test:PRIMARY> rs.status()
{
"set" : "test",
"date" : ISODate("2025-11-17T08:00:59.244Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1763366453, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.40:27017", ## 172.17.48.40机器是主节点
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", # 主节点
"uptime" : 7904,
"optime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2025-11-17T08:00:53Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1763358651, 1),
"electionDate" : ISODate("2025-11-17T05:50:51Z"),
"configVersion" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.42:27017", ## 172.17.48.42是从节点
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY", ## 表示从节点
"uptime" : 7818,
"optime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2025-11-17T08:00:53Z"),
"optimeDurableDate" : ISODate("2025-11-17T08:00:53Z"),
"lastHeartbeat" : ISODate("2025-11-17T08:00:57.751Z"),
"lastHeartbeatRecv" : ISODate("2025-11-17T08:00:57.746Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "172.17.48.40:27017",
"syncSourceHost" : "172.17.48.40:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "172.17.48.101:27017", ## 172.17.48.42是从节点
"health" : 1,
"state" : 7,
"stateStr" : "SECONDARY", ## 表示从节点
"uptime" : 7717,
"lastHeartbeat" : ISODate("2025-11-17T08:00:57.465Z"),
"lastHeartbeatRecv" : ISODate("2025-11-17T08:00:58.853Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 3
}
],
"ok" : 1,
"operationTime" : Timestamp(1763366453, 1),
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1763366453, 1),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1763366450, 4),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1763366453, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
注意:以上提示可以看出172.17.48.40机器是主节点而172.17.48.42、172.17.48.101机器是从节点,可以看出是一主俩从,并没有仲节点,所以要手动搭建。
注意:在高版本以后 3.x 或 4.x 需要提前创建账号才能使用 rs.add()
=======配置仲裁节点=======
注意:当你在敲击回车键是之前在172.17.48.40机器上的的test:SECONDARY从,这时候也变成了test:PRIMARY>主
一下结果在主节点上操作
test:PRIMARY> use admin
switched to db admin
test:PRIMARY> db.createUser({user:"root",pwd:"root",roles:[{role:"root",db:"admin"}]}); ## 配置账号密码登录
Successfully added user: {
"user" : "root",
"roles" : [
{
"role" : "root",
"db" : "admin"
}
]
}
test:PRIMARY> use admin ## 登录admin
switched to db admin
test:PRIMARY> db.auth('root','root') ## 账号密码验证
1
test:PRIMARY> rs.remove("172.17.48.101:27017") ## 删除一个从裁点
{
"ok" : 1,
"operationTime" : Timestamp(1694097585, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694097585, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
test:PRIMARY> rs.addArb("172.17.48.101:27017") ## 添加一个仲裁点
{
"ok" : 1,
"operationTime" : Timestamp(1694097605, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694097605, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
=======再次查看状态=======
test:PRIMARY> rs.status()
{
"set" : "test",
"date" : ISODate("2023-09-07T14:40:41.996Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1694097605, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.40:27017", ## 172.17.48.40任然是主
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1303,
"optime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T14:40:38Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1694097426, 1),
"electionDate" : ISODate("2023-09-07T14:37:06Z"),
"configVersion" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.42:27017", ##172.17.48.42任然是从
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 225,
"optime" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1694097638, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T14:40:38Z"),
"optimeDurableDate" : ISODate("2023-09-07T14:40:38Z"),
"lastHeartbeat" : ISODate("2023-09-07T14:40:41.583Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T14:40:41.583Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "10.10.1.2:27017",
"syncSourceHost" : "10.10.1.2:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "10.10.1.4:27017", ##10.10.1.4可以看出报错"(not reachable/healthy)"
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)", ##出现报错(无法到达/正常)
"uptime" : 0,
"lastHeartbeat" : ISODate("2023-09-07T14:40:41.609Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T14:40:05.580Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Error connecting to 10.10.1.4:27017 :: caused by :: Connection refused",
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : -1
}
],
"ok" : 1,
"operationTime" : Timestamp(1694097638, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694097638, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
=======以上信息查看得出结果172.17.48.101机器报错=======
##不用担心此时不要慌退出mongodb执行以下命令
##注意:先关闭在重启先重启主节点在重启添加仲裁节点,也可以先登录172.17.48.101机器直接启动mongodb(本人是直接启动)
========172.17.48.101机器上操作=======
[root@localhost config]# mongod -f /usr/local/mongodb/config/mongodb-27017.conf
about to fork child process, waiting until server is ready for connections.
forked process: 48120
child process started successfully, parent exiting
=======在回到172.17.48.40主节点机器上操作=======
##再次查看副本及的状态
test:PRIMARY> rs.status()
{
"set" : "test",
"date" : ISODate("2023-09-07T14:42:39.477Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1694097758, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1694097758, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1694097758, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1694097758, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1694097728, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.40:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", ## 主节点
"uptime" : 1421,
"optime" : {
"ts" : Timestamp(1694097758, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T14:42:38Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1694097426, 1),
"electionDate" : ISODate("2023-09-07T14:37:06Z"),
"configVersion" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.42:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY", ##从节点
"uptime" : 342,
"optime" : {
"ts" : Timestamp(1694097748, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1694097748, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T14:42:28Z"),
"optimeDurableDate" : ISODate("2023-09-07T14:42:28Z"),
"lastHeartbeat" : ISODate("2023-09-07T14:42:37.596Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T14:42:37.595Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "10.10.1.2:27017",
"syncSourceHost" : "10.10.1.2:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "172.17.48.101:27017",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER", ##仲裁节点
"uptime" : 5,
"lastHeartbeat" : ISODate("2023-09-07T14:42:37.719Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T14:42:38.802Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 3
}
],
"ok" : 1,
"operationTime" : Timestamp(1694097758, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694097758, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
=======以上信息可以看出主从仲三个节点都已经选举出来了=========
==============================至此第一套副本集搭建完成===============================
三、搭建第二套副本集(主从仲)
##############任意一台机器登录mongodb (本人登录的是172.17.48.231机器)#############
注意:172.17.48.101机器上安装时比较特殊不用在解压缩包了直接在config/路径下创建一个主配置文件,因为config/路径下有之前创建好的mongodb-27017.conf文件
3.1、装mongodb,172.17.48.231、172.17.48.247、172.17.48.101,机器上分别操作
bash
[root@localhost config]# vim mongodb-27018.conf
添加配置如下:
bind_ip = 0.0.0.0
port = 27018
dbpath = /var/lib/mongodb/27018
logpath = /var/log/mongodb_27018.log
logappend = true
fork = true
auth = false
directoryperdb = true
storageEngine = wiredTiger
shardsvr=true
replSet = test1
##一下操作在172.17.48.231、172.17.48.247机器上操作
[root@localhost ~]# tar xvf mongodb-linux-x86_64-4.0.10.tgz -C /usr/local/
[root@localhost ~]# cd /usr/local/
[root@localhost local]# ln -s mongodb-linux-x86_64-4.0.10/ mongodb
[root@localhost local]# cd mongodb
[root@localhost mongodb]# mkdir config
[root@localhost mongodb]# cd config/
[root@localhost config]# vim mongodb-27018.conf
添加如下:
bind_ip = 0.0.0.0
port = 27018
dbpath = /var/lib/mongodb/27018
logpath = /var/log/mongodb_27018.log
logappend = true
fork = true
auth = false
directoryperdb = true
storageEngine = wiredTiger
shardsvr=true ##如果不添加这项,后续进行分片不成功
replSet = test1
#keyFile = /usr/local/mongodb/config/mongodb-keyfile ##高版本不需要配置密钥
[root@localhost config]# mkdir -p /var/lib/mongodb/27018
mkdir: 已创建目录 '/var/lib/mongodb'
mkdir: 已创建目录 '/var/lib/mongodb/27018'
[root@localhost config]# ln -s /usr/local/mongodb/bin/* /usr/local/bin/
##启动mongodb
[root@localhost config]# mongod -f /usr/local/mongodb/config/mongodb-27018.conf
about to fork child process, waiting until server is ready for connections.
forked process: 50138
child process started successfully, parent exiting
3.2、搭建第二套副本集(主从仲)
bash
[root@localhost config]# mongo --port 27018
MongoDB shell version v4.0.10
connecting to: mongodb://127.0.0.1:27018/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("053af6ac-909f-4140-a049-b5da451ea27a") }
MongoDB server version: 4.0.10
Welcome to the MongoDB shell.
For interactive help, type "help".
For more comprehensive documentation, see
http://docs.mongodb.org/
Questions? Try the support group
http://groups.google.com/group/mongodb-user
Server has startup warnings:
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten]
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten]
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2023-09-08T00:30:31.985+0800 I CONTROL [initandlisten]
> config = {_id: "test1", members: [] } ##第一步
{ "_id" : "test1", "members" : [ ] }
> config.members.push({_id: 0, host: "172.17.48.231:27018", "priority": 1}) ##第二步
1
> config.members.push({_id: 1, host: "172.17.48.247:27018", "priority": 1}) ##第三步
2
> config.members.push({_id: 2, host: "172.17.48.101:27018", "priority": 1}) ##第四步
3
> rs.initiate(config) ##第五步
"ok" : 1,
"operationTime" : Timestamp(1694104398, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694104398, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
test1:SECONDARY> rs.status()
{
"set" : "test1",
"date" : ISODate("2023-09-07T16:33:43.298Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1694104410, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.231:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 193,
"optime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T16:33:41Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1694104409, 1),
"electionDate" : ISODate("2023-09-07T16:33:29Z"),
"configVersion" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.231:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 24,
"optime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T16:33:41Z"),
"optimeDurableDate" : ISODate("2023-09-07T16:33:41Z"),
"lastHeartbeat" : ISODate("2023-09-07T16:33:41.377Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T16:33:42.027Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "172.17.48.231:27018",
"syncSourceHost" : "172.17.48.231:27018",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "10.10.1.4:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 24,
"optime" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1694104421, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T16:33:41Z"),
"optimeDurableDate" : ISODate("2023-09-07T16:33:41Z"),
"lastHeartbeat" : ISODate("2023-09-07T16:33:41.376Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T16:33:42.527Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "10.10.1.6:27018",
"syncSourceHost" : "10.10.1.6:27018",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1
}
],
"ok" : 1,
"operationTime" : Timestamp(1694104421, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694104421, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
test1:PRIMARY> use admin
switched to db admin
test1:PRIMARY> db.createUser({user:"root",pwd:"root",roles:[{role:"root",db:"admin"}]});
Successfully added user: {
"user" : "root",
"roles" : [
{
"role" : "root",
"db" : "admin"
}
]
}
============以上信息可以看出和上一套搭建的副本集一样也是一主两从,没有仲裁节点=========
============下面配置仲裁节点===============
test1:PRIMARY> use admin
switched to db admin
test1:PRIMARY> db.auth('root','root')
1
test1:PRIMARY> rs.remove("10.10.1.4:27018")
2023-09-08T00:34:47.260+0800 E QUERY [js] SyntaxError: illegal character @(shell):1:10
test1:PRIMARY> rs.remove("172.17.48.101:27018")
{
"ok" : 1,
"operationTime" : Timestamp(1694104503, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694104503, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
test1:PRIMARY> rs.addArb("172.17.48.101:27018")
{
"ok" : 1,
"operationTime" : Timestamp(1694104525, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694104525, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
## 再次查看集群状态
test1:PRIMARY> rs.status()
{
"set" : "test1",
"date" : ISODate("2023-09-07T16:35:53.884Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1694104525, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.231:27018", ##172.17.48.231是主节点
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 323,
"optime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T16:35:51Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1694104409, 1),
"electionDate" : ISODate("2023-09-07T16:33:29Z"),
"configVersion" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.247:27018", ##172.17.48.247是从节点
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 154,
"optime" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1694104551, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2023-09-07T16:35:51Z"),
"optimeDurableDate" : ISODate("2023-09-07T16:35:51Z"),
"lastHeartbeat" : ISODate("2023-09-07T16:35:53.223Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T16:35:53.224Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "172.17.48.247:27018",
"syncSourceHost" : "172.17.48.247:27018",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "172.17.48.101:27018", ##172.17.48.101节点信息报错,和上一套副本集一样错误
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"lastHeartbeat" : ISODate("2023-09-07T16:35:53.242Z"),
"lastHeartbeatRecv" : ISODate("2023-09-07T16:35:25.225Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Error connecting to 10.10.1.4:27018 :: caused by :: Connection refused",
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : -1
}
],
"ok" : 1,
"operationTime" : Timestamp(1694104551, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1694104551, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
####################解决172.17.48.101节点的报错################################
####################登录172.17.48.101机器上启动一下mongodb即可########################
[root@localhost config]# mongod -f /usr/local/mongodb/config/mongodb-27018.conf
about to fork child process, waiting until server is ready for connections.
forked process: 50452
child process started successfully, parent exiting
##############回到172.17.48.231机器再次查看副本集信息信息###############
test1:PRIMARY> rs.status()
{
"set" : "test",
"date" : ISODate("2025-11-17T08:00:59.244Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"appliedOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
}
},
"lastStableCheckpointTimestamp" : Timestamp(1763366453, 1),
"members" : [
{
"_id" : 0,
"name" : "172.17.48.231:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 7904,
"optime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2025-11-17T08:00:53Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1763358651, 1),
"electionDate" : ISODate("2025-11-17T05:50:51Z"),
"configVersion" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "172.17.48.247:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 7818,
"optime" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1763366453, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2025-11-17T08:00:53Z"),
"optimeDurableDate" : ISODate("2025-11-17T08:00:53Z"),
"lastHeartbeat" : ISODate("2025-11-17T08:00:57.751Z"),
"lastHeartbeatRecv" : ISODate("2025-11-17T08:00:57.746Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "172.17.48.231:27017",
"syncSourceHost" : "172.17.48.231:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3
},
{
"_id" : 2,
"name" : "172.17.48.101:27017",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 7717,
"lastHeartbeat" : ISODate("2025-11-17T08:00:57.465Z"),
"lastHeartbeatRecv" : ISODate("2025-11-17T08:00:58.853Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 3
}
],
"ok" : 1,
"operationTime" : Timestamp(1763366453, 1),
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1763366453, 1),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1763366450, 4),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1763366453, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
四、搭建config server
4.1、创建configserver172.17.48.40、172.17.48.42、172.17.48.101机器上分别操作
注意:可以端口一样、configserver副本集名称要一样、不能配置服务器密钥
bash
直接在config/路径下配置
[root@localhost config]# vim configsvr-27019.yml
添加如下配置:
systemLog: ##顶格
destination: file ##空4格
#日志存储位置
##路径需要创建出来
path: "/usr/local/mongodb/config/log/congigsrv.log" ##空4格
logAppend: true ##空4格
storage: ##顶格
journal: ##空4格
enabled: true ##空8格
#数据文件存储位置
dbPath: "/usr/local/mongodb/config/data" ##空4格
#是否一个库一个文件夹
directoryPerDB: true ##空4格
#WT引擎配置
wiredTiger: ##空4格
engineConfig: ##空8格
#WT最大使用cache(根据服务器实际情况调节)
cacheSizeGB: 1 ##空12格
##是否将索引也按数据库名单独存储
directoryForIndexes: true ##空12格
##表压缩配置
collectionConfig: ##空8格
blockCompressor: zlib ##空12格
##索引配置
indexConfig: ##空8格
prefixCompression: true ##空12格
##端口配置
net: ##顶格
bindIp: 0.0.0.0 ##空4格
port: 27019 ##空4格
replication: ##顶格
oplogSizeMB: 2048 ##空4格
replSetName: configsvr ##空4格 #置服务器副本集名称
sharding: ##顶格
clusterRole: configsvr ##空4格
processManagement: ##顶格
fork: true ##空4格
pidFilePath: "/usr/local/mongodb/config/log/configsrv.pid" ##空4格
##要在config/路径下创建data/和log/,三台机器都要创建
mkdir /usr/local/mongodb/config/data
mkdir /usr/local/mongodb/config/log
## 将configsvr-27019.conf上传至10.10.1.3、10.10.1.4机器
[root@localhost config]# scp configsrv-27019.conf root@172.17.48.42:/usr/local/mongodb/config
[root@localhost config]# scp configsrv-27019.conf root@172.17.48.101:/usr/local/mongodb/config
## 三台机器启动confisvr-27019.yml
[root@localhost config]#mongod -f /usr/local/mongnodb/config/confisvr-27019.yml
## 登录任意一台服务器以下操作(本人是172.17.48.40机器)
[root@localhost config]# mongo --port 27019
MongoDB shell version v4.0.10
connecting to: mongodb://127.0.0.1:27019/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("ac9171e1-9382-4db6-84ea-9ee3c2b2e164") }
MongoDB server version: 4.0.10
Server has startup warnings:
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten]
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten]
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten]
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2023-09-08T19:40:47.316+0800 I CONTROL [initandlisten]
> config = {
... _id : "configsvr",
... members : [
... {_id : 0, host : "172.17.48.40:27019" },
... {_id : 1, host : "172.17.48.42:27019" },
... {_id : 2, host : "172.17.48.101:27019" },
... ]
... }
{
"_id" : "configsvr",
"members" : [
{
"_id" : 0,
"host" : "172.17.48.40:27019"
},
{
"_id" : 1,
"host" : "172.17.48.42:27019"
},
{
"_id" : 2,
"host" : "172.17.48.101:27019"
}
]
}
> rs.initiate(config)
{
"ok" : 1,
"operationTime" : Timestamp(1694173587, 1),
"$gleStats" : {
"lastOpTime" : Timestamp(1694173587, 1),
"electionId" : ObjectId("000000000000000000000000")
},
"lastCommittedOpTime" : Timestamp(0, 0),
"$clusterTime" : {
"clusterTime" : Timestamp(1694173587, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
configsvr:SECONDARY> ##多次回车可以看出还是从节点,也可以登录其他节点查看
configsvr:SECONDARY>
configsvr:SECONDARY>
configsvr:SECONDARY>
configsvr:SECONDARY>
configsvr:PRIMARY> ##等会再看已经变成主节点了
configsvr:PRIMARY>
configsvr:PRIMARY>
configsvr:PRIMARY>
五、搭建mongos
5.1、创建mongos在172.17.48.101机器上
本人搭建的mongs路由节点是在172.17.48.101机器,为什么在172.17.48.101机器,是因为172.17.48.101机器一直是做仲裁节点没有大量的读写不会造成宕机行为。也可以是副本集,本例单节点
bash
同样是在conig/路径配置
[root@localhost config]# vim mongos.yml
systemLog: ##顶格
destination: file ##空4格
path: "/var/log/mongodb/mongos.log" ##空4格
#路径需要提前创建
logAppend: true ##空4格
net: ##顶格
bindIp: 0.0.0.0 ##空4格
port: 20000 ##空4格
sharding: ##顶格
configDB: configsvr/172.17.48.40:27019,172.17.48.42:27019,172.17.48.101:27019 ##空4格
#配置配置服务器的副本集# 将confige server 添加到路由
processManagement: ##顶格
fork: true ##空4格
## 启动mongos节点
## 注意:一定要用mongos来启动
[root@localhost config]# mongos -f /usr/local/mongodb/config/mongos.yml
about to fork child process, waiting until server is ready for connections.
forked process: 71967
child process started successfully, parent exiting
六、创建分片
6.1、台服务器分别设置两个片建shard1、shard2
172.17.48.101机器开始分片
bash
## 登录mongs
[root@VM-48-101-centos config]# mongo --port 20000
MongoDB shell version v4.0.10
connecting to: mongodb://127.0.0.1:20000/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("0b8ba2e6-d3a5-412c-b4fd-73cbd6915a54") }
MongoDB server version: 4.0.10
Server has startup warnings:
2025-11-17T14:33:47.295+0800 I CONTROL [main]
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** WARNING: Access control is not enabled for the database.
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** Read and write access to data and configuration is unrestricted.
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** WARNING: You are running this process as the root user, which is not recommended.
2025-11-17T14:33:47.295+0800 I CONTROL [main]
mongos> use admin ## 进入admni
switched to db admin
========分片shard1========
mongos> db.runCommand( { addshard : "test/172.17.48.40:27017,172.17.48.42:27017",name:"shard1"} )
{
"shardAdded" : "shard1",
"ok" : 1,
"operationTime" : Timestamp(1694176447, 5),
"$clusterTime" : {
"clusterTime" : Timestamp(1694176447, 5),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
========分片shard2========
mongos> db.runCommand( { addshard : "test1/172.17.48.231:27018,172.17.48.247:27018",name:"shard2"} )
{
"shardAdded" : "shard2",
"ok" : 1,
"operationTime" : Timestamp(1694176626, 5),
"$clusterTime" : {
"clusterTime" : Timestamp(1694176626, 5),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
========验证是否分片成功========
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("64fb099f61b0ba6908cf025a")
}
shards:
{ "_id" : "shard1", "host" : "test/172.17.48.40:27017,172.17.48.42:27017", "state" : 1 }
{ "_id" : "shard2", "host" : "test1/172.17.48.231:27018,172.17.48.247:27018", "state" : 1 }
active mongoses:
"4.0.10" : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "dbname", "primary" : "shard2", "partitioned" : true, "version" : { "uuid" : UUID("c6e2ffe6-89d5-4220-bfe3-9d4c4c28a450"), "lastMod" : 1 } }
七、测试分片
##7.1、创建唯一索引date字段hashed分片
bash
[root@VM-48-101-centos config]# mongo --port 20000
MongoDB shell version v4.0.10
connecting to: mongodb://127.0.0.1:20000/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("a742226e-063f-49a9-8f39-4fb144e3ec65") }
MongoDB server version: 4.0.10
Server has startup warnings:
2025-11-17T14:33:47.295+0800 I CONTROL [main]
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** WARNING: Access control is not enabled for the database.
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** Read and write access to data and configuration is unrestricted.
2025-11-17T14:33:47.295+0800 I CONTROL [main] ** WARNING: You are running this process as the root user, which is not recommended.
2025-11-17T14:33:47.295+0800 I CONTROL [main]
mongos> use dbname
switched to db dbname
mongos> db.c1.ensureIndex({"date":1,"hour":1,"ad_id":1},{"unique":true})
{
"raw" : {
"test1/172.17.48.231:27018,172.17.48.247:27018" : {
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
},
"ok" : 1,
"operationTime" : Timestamp(1763368828, 10),
"$clusterTime" : {
"clusterTime" : Timestamp(1763368828, 10),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
mongos> db.c1.createIndex({date:'hashed'})
{
"raw" : {
"test1/172.17.48.231:27018,172.17.48.247:27018" : {
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 2,
"numIndexesAfter" : 3,
"ok" : 1
}
},
"ok" : 1,
"operationTime" : Timestamp(1763368833, 2),
"$clusterTime" : {
"clusterTime" : Timestamp(1763368833, 2),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
mongos> use admin
switched to db admin
=======必须先启用数据库分片=======
mongos> db.runCommand({ "enablesharding": "dbname" })
{
"ok" : 1,
"operationTime" : Timestamp(1763368844, 3),
"$clusterTime" : {
"clusterTime" : Timestamp(1763368844, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
=======例子中 "dbname.c1" 就是 dbname 数据库下的 c1 表。=======
=======例子中 {"date": "hashed"} 表示用 date 字段作为分片键,并使用 哈希分片=======
mongos> db.runCommand({"shardcollection":"dbname.c1","key":{"date":"hashed"}})
{
"collectionsharded" : "dbname.c1",
"collectionUUID" : UUID("9e8b62ed-e779-4463-b15a-1e9707c6de3c"),
"ok" : 1,
"operationTime" : Timestamp(1763368850, 27),
"$clusterTime" : {
"clusterTime" : Timestamp(1763368850, 27),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
7.2、插入数据100条看看是否可以分片
bash
mongos> use dbname;
switched to db dbname
mongos> for(i=0;i<100;i++){ db.c1.insert({"date":new Date("2021-08-01").valueOf()-28800000+86400000*i,"hour":16,"ad_id":101,"number":1}); }
WriteResult({ "nInserted" : 1 })
=======查看数据是否分片=======
mongos> db.c1.stats();
{
"sharded" : true,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:dbname/collection-57-5472963469170210209",
"LSM" : {
"bloom filter false positives" : 0,
"bloom filter hits" : 0,
"bloom filter misses" : 0,
"bloom filter pages evicted from cache" : 0,
"bloom filter pages read into cache" : 0,
"bloom filters in the LSM tree" : 0,
"chunks in the LSM tree" : 0,
"highest merge generation in the LSM tree" : 0,
"queries that could have benefited from a Bloom filter that did not exist" : 0,
"sleep for LSM checkpoint throttle" : 0,
"sleep for LSM merge throttle" : 0,
"total size of bloom filters" : 0
},
"block-manager" : {
"allocations requiring file extension" : 0,
"blocks allocated" : 0,
"blocks freed" : 0,
"checkpoint size" : 0,
"file allocation unit size" : 4096,
"file bytes available for reuse" : 0,
"file magic number" : 120897,
"file major version number" : 1,
"file size in bytes" : 4096,
"minor version number" : 0
},
"btree" : {
"btree checkpoint generation" : 176,
"column-store fixed-size leaf pages" : 0,
"column-store internal pages" : 0,
"column-store variable-size RLE encoded values" : 0,
"column-store variable-size deleted values" : 0,
"column-store variable-size leaf pages" : 0,
"fixed-record size" : 0,
"maximum internal page key size" : 368,
"maximum internal page size" : 4096,
"maximum leaf page key size" : 2867,
"maximum leaf page size" : 32768,
"maximum leaf page value size" : 67108864,
"maximum tree depth" : 3,
"number of key/value pairs" : 0,
"overflow pages" : 0,
"pages rewritten by compaction" : 0,
"row-store internal pages" : 0,
"row-store leaf pages" : 0
},
"cache" : {
"bytes currently in the cache" : 9961,
"bytes dirty in the cache cumulative" : 371,
"bytes read into cache" : 0,
"bytes written from cache" : 0,
"checkpoint blocked page eviction" : 0,
"data source pages selected for eviction unable to be evicted" : 0,
"eviction walk passes of a file" : 0,
"eviction walk target pages histogram - 0-9" : 0,
"eviction walk target pages histogram - 10-31" : 0,
"eviction walk target pages histogram - 128 and higher" : 0,
"eviction walk target pages histogram - 32-63" : 0,
"eviction walk target pages histogram - 64-128" : 0,
"eviction walks abandoned" : 0,
"eviction walks gave up because they restarted their walk twice" : 0,
"eviction walks gave up because they saw too many pages and found no candidates" : 0,
"eviction walks gave up because they saw too many pages and found too few candidates" : 0,
"eviction walks reached end of tree" : 0,
"eviction walks started from root of tree" : 0,
"eviction walks started from saved location in tree" : 0,
"hazard pointer blocked page eviction" : 0,
"in-memory page passed criteria to be split" : 0,
"in-memory page splits" : 0,
"internal pages evicted" : 0,
"internal pages split during eviction" : 0,
"leaf pages split during eviction" : 0,
"modified pages evicted" : 0,
"overflow pages read into cache" : 0,
"page split during eviction deepened the tree" : 0,
"page written requiring cache overflow records" : 0,
"pages read into cache" : 0,
"pages read into cache after truncate" : 1,
"pages read into cache after truncate in prepare state" : 0,
"pages read into cache requiring cache overflow entries" : 0,
"pages requested from the cache" : 55,
"pages seen by eviction walk" : 0,
"pages written from cache" : 0,
"pages written requiring in-memory restoration" : 0,
"tracked dirty bytes in the cache" : 9779,
"unmodified pages evicted" : 0
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : 0,
"Average on-disk page image size seen" : 0,
"Average time in cache for pages that have been visited by the eviction server" : 0,
"Average time in cache for pages that have not been visited by the eviction server" : 0,
"Clean pages currently in cache" : 0,
"Current eviction generation" : 0,
"Dirty pages currently in cache" : 0,
"Entries in the root page" : 0,
"Internal pages currently in cache" : 0,
"Leaf pages currently in cache" : 0,
"Maximum difference between current eviction generation when the page was last considered" : 0,
"Maximum page size seen" : 0,
"Minimum on-disk page image size seen" : 0,
"Number of pages never visited by eviction server" : 0,
"On-disk page image sizes smaller than a single allocation unit" : 0,
"Pages created in memory and never written" : 0,
"Pages currently queued for eviction" : 0,
"Pages that could not be queued for eviction" : 0,
"Refs skipped during cache traversal" : 0,
"Size of the root page" : 0,
"Total number of pages currently in cache" : 0
},
"compression" : {
"compressed pages read" : 0,
"compressed pages written" : 0,
"page written failed to compress" : 0,
"page written was too small to compress" : 0
},
"cursor" : {
"bulk-loaded cursor-insert calls" : 0,
"close calls that result in cache" : 0,
"create calls" : 3,
"cursor operation restarted" : 0,
"cursor-insert key and value bytes inserted" : 4510,
"cursor-remove key bytes removed" : 0,
"cursor-update value bytes updated" : 0,
"cursors reused from cache" : 53,
"insert calls" : 55,
"modify calls" : 0,
"next calls" : 0,
"open cursor count" : 0,
"prev calls" : 1,
"remove calls" : 0,
"reserve calls" : 0,
"reset calls" : 112,
"search calls" : 0,
"search near calls" : 0,
"truncate calls" : 0,
"update calls" : 0
},
"reconciliation" : {
"dictionary matches" : 0,
"fast-path pages deleted" : 0,
"internal page key bytes discarded using suffix compression" : 0,
"internal page multi-block writes" : 0,
"internal-page overflow keys" : 0,
"leaf page key bytes discarded using prefix compression" : 0,
"leaf page multi-block writes" : 0,
"leaf-page overflow keys" : 0,
"maximum blocks required for a page" : 0,
"overflow values written" : 0,
"page checksum matches" : 0,
"page reconciliation calls" : 0,
"page reconciliation calls for eviction" : 0,
"pages deleted" : 0
},
"session" : {
"object compaction" : 0
},
"transaction" : {
"update conflicts" : 0
}
},
"ns" : "dbname.c1",
"count" : 100, ====总共插入100条数据====
"size" : 8100,
"storageSize" : 8192,
"totalIndexSize" : 24576,
"indexSizes" : {
"_id_" : 8192,
"date_1_hour_1_ad_id_1" : 8192,
"date_hashed" : 8192
},
"avgObjSize" : 81,
"maxSize" : NumberLong(0),
"nindexes" : 3,
"nchunks" : 4,
"shards" : {
"shard1" : {
"ns" : "dbname.c1",
"size" : 4455,
"count" : 55, ====shard1存入55条数据====
"avgObjSize" : 81,
"storageSize" : 4096,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:dbname/collection-57-5472963469170210209",
"LSM" : {
"bloom filter false positives" : 0,
"bloom filter hits" : 0,
"bloom filter misses" : 0,
"bloom filter pages evicted from cache" : 0,
"bloom filter pages read into cache" : 0,
"bloom filters in the LSM tree" : 0,
"chunks in the LSM tree" : 0,
"highest merge generation in the LSM tree" : 0,
"queries that could have benefited from a Bloom filter that did not exist" : 0,
"sleep for LSM checkpoint throttle" : 0,
"sleep for LSM merge throttle" : 0,
"total size of bloom filters" : 0
},
"block-manager" : {
"allocations requiring file extension" : 0,
"blocks allocated" : 0,
"blocks freed" : 0,
"checkpoint size" : 0,
"file allocation unit size" : 4096,
"file bytes available for reuse" : 0,
"file magic number" : 120897,
"file major version number" : 1,
"file size in bytes" : 4096,
"minor version number" : 0
},
"btree" : {
"btree checkpoint generation" : 176,
"column-store fixed-size leaf pages" : 0,
"column-store internal pages" : 0,
"column-store variable-size RLE encoded values" : 0,
"column-store variable-size deleted values" : 0,
"column-store variable-size leaf pages" : 0,
"fixed-record size" : 0,
"maximum internal page key size" : 368,
"maximum internal page size" : 4096,
"maximum leaf page key size" : 2867,
"maximum leaf page size" : 32768,
"maximum leaf page value size" : 67108864,
"maximum tree depth" : 3,
"number of key/value pairs" : 0,
"overflow pages" : 0,
"pages rewritten by compaction" : 0,
"row-store internal pages" : 0,
"row-store leaf pages" : 0
},
"cache" : {
"bytes currently in the cache" : 9961,
"bytes dirty in the cache cumulative" : 371,
"bytes read into cache" : 0,
"bytes written from cache" : 0,
"checkpoint blocked page eviction" : 0,
"data source pages selected for eviction unable to be evicted" : 0,
"eviction walk passes of a file" : 0,
"eviction walk target pages histogram - 0-9" : 0,
"eviction walk target pages histogram - 10-31" : 0,
"eviction walk target pages histogram - 128 and higher" : 0,
"eviction walk target pages histogram - 32-63" : 0,
"eviction walk target pages histogram - 64-128" : 0,
"eviction walks abandoned" : 0,
"eviction walks gave up because they restarted their walk twice" : 0,
"eviction walks gave up because they saw too many pages and found no candidates" : 0,
"eviction walks gave up because they saw too many pages and found too few candidates" : 0,
"eviction walks reached end of tree" : 0,
"eviction walks started from root of tree" : 0,
"eviction walks started from saved location in tree" : 0,
"hazard pointer blocked page eviction" : 0,
"in-memory page passed criteria to be split" : 0,
"in-memory page splits" : 0,
"internal pages evicted" : 0,
"internal pages split during eviction" : 0,
"leaf pages split during eviction" : 0,
"modified pages evicted" : 0,
"overflow pages read into cache" : 0,
"page split during eviction deepened the tree" : 0,
"page written requiring cache overflow records" : 0,
"pages read into cache" : 0,
"pages read into cache after truncate" : 1,
"pages read into cache after truncate in prepare state" : 0,
"pages read into cache requiring cache overflow entries" : 0,
"pages requested from the cache" : 55,
"pages seen by eviction walk" : 0,
"pages written from cache" : 0,
"pages written requiring in-memory restoration" : 0,
"tracked dirty bytes in the cache" : 9779,
"unmodified pages evicted" : 0
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : 0,
"Average on-disk page image size seen" : 0,
"Average time in cache for pages that have been visited by the eviction server" : 0,
"Average time in cache for pages that have not been visited by the eviction server" : 0,
"Clean pages currently in cache" : 0,
"Current eviction generation" : 0,
"Dirty pages currently in cache" : 0,
"Entries in the root page" : 0,
"Internal pages currently in cache" : 0,
"Leaf pages currently in cache" : 0,
"Maximum difference between current eviction generation when the page was last considered" : 0,
"Maximum page size seen" : 0,
"Minimum on-disk page image size seen" : 0,
"Number of pages never visited by eviction server" : 0,
"On-disk page image sizes smaller than a single allocation unit" : 0,
"Pages created in memory and never written" : 0,
"Pages currently queued for eviction" : 0,
"Pages that could not be queued for eviction" : 0,
"Refs skipped during cache traversal" : 0,
"Size of the root page" : 0,
"Total number of pages currently in cache" : 0
},
"compression" : {
"compressed pages read" : 0,
"compressed pages written" : 0,
"page written failed to compress" : 0,
"page written was too small to compress" : 0
},
"cursor" : {
"bulk-loaded cursor-insert calls" : 0,
"close calls that result in cache" : 0,
"create calls" : 3,
"cursor operation restarted" : 0,
"cursor-insert key and value bytes inserted" : 4510,
"cursor-remove key bytes removed" : 0,
"cursor-update value bytes updated" : 0,
"cursors reused from cache" : 53,
"insert calls" : 55,
"modify calls" : 0,
"next calls" : 0,
"open cursor count" : 0,
"prev calls" : 1,
"remove calls" : 0,
"reserve calls" : 0,
"reset calls" : 112,
"search calls" : 0,
"search near calls" : 0,
"truncate calls" : 0,
"update calls" : 0
},
"reconciliation" : {
"dictionary matches" : 0,
"fast-path pages deleted" : 0,
"internal page key bytes discarded using suffix compression" : 0,
"internal page multi-block writes" : 0,
"internal-page overflow keys" : 0,
"leaf page key bytes discarded using prefix compression" : 0,
"leaf page multi-block writes" : 0,
"leaf-page overflow keys" : 0,
"maximum blocks required for a page" : 0,
"overflow values written" : 0,
"page checksum matches" : 0,
"page reconciliation calls" : 0,
"page reconciliation calls for eviction" : 0,
"pages deleted" : 0
},
"session" : {
"object compaction" : 0
},
"transaction" : {
"update conflicts" : 0
}
},
"nindexes" : 3,
"totalIndexSize" : 12288,
"indexSizes" : {
"_id_" : 4096,
"date_1_hour_1_ad_id_1" : 4096,
"date_hashed" : 4096
},
"ok" : 1,
"operationTime" : Timestamp(1763369028, 2),
"$gleStats" : {
"lastOpTime" : {
"ts" : Timestamp(1763369028, 2),
"t" : NumberLong(1)
},
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1763369028, 2),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1763369028, 3),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1763369028, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
},
"shard2" : {
"ns" : "dbname.c1",
"size" : 3645,
"count" : 45, ====shard2存入45条数据====
"avgObjSize" : 81,
"storageSize" : 4096,
"capped" : false,
"wiredTiger" : {
"metadata" : {
"formatVersion" : 1
},
"creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=false),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_custom=(prefix=,start_generation=0,suffix=),merge_max=15,merge_min=0),memory_page_image_max=0,memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
"type" : "file",
"uri" : "statistics:table:dbname/collection-51--4201342805821987292",
"LSM" : {
"bloom filter false positives" : 0,
"bloom filter hits" : 0,
"bloom filter misses" : 0,
"bloom filter pages evicted from cache" : 0,
"bloom filter pages read into cache" : 0,
"bloom filters in the LSM tree" : 0,
"chunks in the LSM tree" : 0,
"highest merge generation in the LSM tree" : 0,
"queries that could have benefited from a Bloom filter that did not exist" : 0,
"sleep for LSM checkpoint throttle" : 0,
"sleep for LSM merge throttle" : 0,
"total size of bloom filters" : 0
},
"block-manager" : {
"allocations requiring file extension" : 0,
"blocks allocated" : 0,
"blocks freed" : 0,
"checkpoint size" : 0,
"file allocation unit size" : 4096,
"file bytes available for reuse" : 0,
"file magic number" : 120897,
"file major version number" : 1,
"file size in bytes" : 4096,
"minor version number" : 0
},
"btree" : {
"btree checkpoint generation" : 163,
"column-store fixed-size leaf pages" : 0,
"column-store internal pages" : 0,
"column-store variable-size RLE encoded values" : 0,
"column-store variable-size deleted values" : 0,
"column-store variable-size leaf pages" : 0,
"fixed-record size" : 0,
"maximum internal page key size" : 368,
"maximum internal page size" : 4096,
"maximum leaf page key size" : 2867,
"maximum leaf page size" : 32768,
"maximum leaf page value size" : 67108864,
"maximum tree depth" : 3,
"number of key/value pairs" : 0,
"overflow pages" : 0,
"pages rewritten by compaction" : 0,
"row-store internal pages" : 0,
"row-store leaf pages" : 0
},
"cache" : {
"bytes currently in the cache" : 8266,
"bytes dirty in the cache cumulative" : 371,
"bytes read into cache" : 0,
"bytes written from cache" : 0,
"checkpoint blocked page eviction" : 0,
"data source pages selected for eviction unable to be evicted" : 0,
"eviction walk passes of a file" : 0,
"eviction walk target pages histogram - 0-9" : 0,
"eviction walk target pages histogram - 10-31" : 0,
"eviction walk target pages histogram - 128 and higher" : 0,
"eviction walk target pages histogram - 32-63" : 0,
"eviction walk target pages histogram - 64-128" : 0,
"eviction walks abandoned" : 0,
"eviction walks gave up because they restarted their walk twice" : 0,
"eviction walks gave up because they saw too many pages and found no candidates" : 0,
"eviction walks gave up because they saw too many pages and found too few candidates" : 0,
"eviction walks reached end of tree" : 0,
"eviction walks started from root of tree" : 0,
"eviction walks started from saved location in tree" : 0,
"hazard pointer blocked page eviction" : 0,
"in-memory page passed criteria to be split" : 0,
"in-memory page splits" : 0,
"internal pages evicted" : 0,
"internal pages split during eviction" : 0,
"leaf pages split during eviction" : 0,
"modified pages evicted" : 0,
"overflow pages read into cache" : 0,
"page split during eviction deepened the tree" : 0,
"page written requiring cache overflow records" : 0,
"pages read into cache" : 0,
"pages read into cache after truncate" : 1,
"pages read into cache after truncate in prepare state" : 0,
"pages read into cache requiring cache overflow entries" : 0,
"pages requested from the cache" : 45,
"pages seen by eviction walk" : 0,
"pages written from cache" : 0,
"pages written requiring in-memory restoration" : 0,
"tracked dirty bytes in the cache" : 8083,
"unmodified pages evicted" : 0
},
"cache_walk" : {
"Average difference between current eviction generation when the page was last considered" : 0,
"Average on-disk page image size seen" : 0,
"Average time in cache for pages that have been visited by the eviction server" : 0,
"Average time in cache for pages that have not been visited by the eviction server" : 0,
"Clean pages currently in cache" : 0,
"Current eviction generation" : 0,
"Dirty pages currently in cache" : 0,
"Entries in the root page" : 0,
"Internal pages currently in cache" : 0,
"Leaf pages currently in cache" : 0,
"Maximum difference between current eviction generation when the page was last considered" : 0,
"Maximum page size seen" : 0,
"Minimum on-disk page image size seen" : 0,
"Number of pages never visited by eviction server" : 0,
"On-disk page image sizes smaller than a single allocation unit" : 0,
"Pages created in memory and never written" : 0,
"Pages currently queued for eviction" : 0,
"Pages that could not be queued for eviction" : 0,
"Refs skipped during cache traversal" : 0,
"Size of the root page" : 0,
"Total number of pages currently in cache" : 0
},
"compression" : {
"compressed pages read" : 0,
"compressed pages written" : 0,
"page written failed to compress" : 0,
"page written was too small to compress" : 0
},
"cursor" : {
"bulk-loaded cursor-insert calls" : 0,
"close calls that result in cache" : 0,
"create calls" : 4,
"cursor operation restarted" : 0,
"cursor-insert key and value bytes inserted" : 3690,
"cursor-remove key bytes removed" : 0,
"cursor-update value bytes updated" : 0,
"cursors reused from cache" : 45,
"insert calls" : 45,
"modify calls" : 0,
"next calls" : 3,
"open cursor count" : 0,
"prev calls" : 1,
"remove calls" : 0,
"reserve calls" : 0,
"reset calls" : 98,
"search calls" : 0,
"search near calls" : 0,
"truncate calls" : 0,
"update calls" : 0
},
"reconciliation" : {
"dictionary matches" : 0,
"fast-path pages deleted" : 0,
"internal page key bytes discarded using suffix compression" : 0,
"internal page multi-block writes" : 0,
"internal-page overflow keys" : 0,
"leaf page key bytes discarded using prefix compression" : 0,
"leaf page multi-block writes" : 0,
"leaf-page overflow keys" : 0,
"maximum blocks required for a page" : 0,
"overflow values written" : 0,
"page checksum matches" : 0,
"page reconciliation calls" : 0,
"page reconciliation calls for eviction" : 0,
"pages deleted" : 0
},
"session" : {
"object compaction" : 0
},
"transaction" : {
"update conflicts" : 0
}
},
"nindexes" : 3,
"totalIndexSize" : 12288,
"indexSizes" : {
"_id_" : 4096,
"date_1_hour_1_ad_id_1" : 4096,
"date_hashed" : 4096
},
"ok" : 1,
"operationTime" : Timestamp(1763369020, 1),
"$gleStats" : {
"lastOpTime" : {
"ts" : Timestamp(1763369003, 97),
"t" : NumberLong(1)
},
"electionId" : ObjectId("7fffffff0000000000000001")
},
"lastCommittedOpTime" : Timestamp(1763369020, 1),
"$configServerState" : {
"opTime" : {
"ts" : Timestamp(1763369028, 3),
"t" : NumberLong(1)
}
},
"$clusterTime" : {
"clusterTime" : Timestamp(1763369028, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
},
"ok" : 1,
"operationTime" : Timestamp(1763369028, 2),
"$clusterTime" : {
"clusterTime" : Timestamp(1763369028, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
总结
提示:这里对文章进行总结:
例如:以上就是今天要讲的内容,本文仅仅简单介绍了pandas的使用,而pandas提供了大量能使我们快速便捷地处理数据的函数和方法。