Newer
Older
ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-001
ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-001
ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-001
ceph-deploy --overwrite-conf osd create --data /dev/sde ds-001
ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-002 X
ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-002
ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-002
ceph-deploy --overwrite-conf osd create --data /dev/sde ds-002
ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-004
ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-004
ceph-deploy --overwrite-conf osd create --data /dev/sde ds-004
ceph-deploy --overwrite-conf osd create --data /dev/sdf ds-004
ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-303
ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-303
ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-303
ceph-deploy --overwrite-conf osd create --data /dev/sde ds-303
ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-304
ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-304
ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-304
ceph-deploy --overwrite-conf osd create --data /dev/sde ds-304
ceph osd pool create cephfs_data 100
ceph osd pool create cephfs_metadata 100
ceph fs new cephfs cephfs_metadata cephfs_data
ceph auth get-or-create client.1 mon 'allow r' mds 'allow rw' osd 'allow rw pool=cephfs-data'
ceph osd tree
ceph-deploy disk list ds-507
ceph-deploy disk prepare ds-507:/dev/nvme0n1
ceph-deploy disk zap ds-507:/dev/nvme0n1
ceph-deploy disk zap ds-507 /dev/nvme0n1
ceph-deploy disk activate ds-507 /dev/nvme0n1
ceph device ls
ceph osd lspools
ceph fs rm cephfs --yes-i-really-mean-it
ceph pool list
ceph pool status
ceph osd pool delete --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it
ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507
ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507
ceph mds status
ceph mds stat
ceph auth list
ceph auth del client.1
ceph osd lspools
ceph osd crush tree
1530 18/09/19 12:09:19 sudo ceph osd pool create cephfs-data 64
1531 18/09/19 12:09:37 sudo ceph osd pool create cephfs-metadata 1
1532 18/09/19 12:10:39 sudo ceph osd pool set cephfs-data size 1
1533 18/09/19 12:12:20 sudo ceph osd crush tree
1534 18/09/19 12:13:35 sudo ceph osd getcrushmap -o crushmap.txt
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
1536 18/09/19 12:14:22 crushtool -d crushmap.txt -o crushmap.decompiled
1537 18/09/19 12:14:27 less crushmap.decompiled
1538 18/09/19 12:17:38 sudo ceph osd lspools
1539 18/09/19 12:24:24 sudo ceph osd tree
1540 18/09/19 12:26:23 less crushmap.decompiled
1541 18/09/19 12:28:02 sudo ceph osd crush add-bucket ssd root
1542 18/09/19 12:28:27 sudo ceph osd crush add-bucket ds-507 host
1543 18/09/19 12:29:00 sudo ceph osd crush move ds-507 root=ssd
1544 18/09/19 12:30:08 sudo ceph osd crush set osd.19 .1102 root=ssd host=ds-507
1545 18/09/19 12:30:42 sudo ceph osd crush set osd.19 1.0 root=ssd host=ds-507
1546 18/09/19 12:31:42 sudo ceph osd crush tree
1547 18/09/19 12:32:30 history
1548 18/09/19 12:32:42 sudo ceph osd pool remove cephfs-data
1549 18/09/19 12:33:21 sudo ceph osd pool delete cephfs-data
1550 18/09/19 12:33:29 sudo ceph osd pool delete cephfs-data --yes-i-really-really-mean-it
1551 18/09/19 12:33:45 sudo ceph osd pool delete cephfs-data cephfs-data --yes-i-really-really-mean-it
1552 18/09/19 12:34:02 sudo ceph osd pool delete cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1553 18/09/19 12:39:25 history
1554 18/09/19 12:40:22 sudo ceph osd pool create cephfs-data 64 crushRoot default
1555 18/09/19 12:40:39 sudo ceph osd crush tree
1556 18/09/19 12:42:00 history
1557 18/09/19 12:42:07 sudo ceph osd getcrushmap -o crushmap.txt
1558 18/09/19 12:42:15 crushtool -d crushmap.txt -o crushmap.decompiled
1559 18/09/19 12:42:19 less crushmap.decompiled
1560 18/09/19 12:42:58 sudo ceph osd pool create cephfs-data 64
1561 18/09/19 12:43:25 sudo ceph osd pool create cephfs-metadata 1 crushRoot -22
1562 18/09/19 12:43:31 less crushmap.decompiled
1563 18/09/19 12:45:03 sudo ceph osd pool create cephfs-metadata 1 crushRoot -24
1564 18/09/19 12:47:46 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd
1565 18/09/19 12:47:59 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name -24
1566 18/09/19 12:48:09 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name "ssd"
1567 18/09/19 12:49:47 sudo ceph osd crush rule ls
1568 18/09/19 12:49:57 history
1569 18/09/19 12:55:51 #ceph osd crush set osd.0 1.0 root=default datacenter=dc1 room=room1 row=foo rack=bar host=foo-bar-1
1570 18/09/19 12:56:10 #sudo ceph osd crush set osd
1571 18/09/19 12:56:19 less crushmap.decompiled
1572 18/09/19 12:57:02 sudo ceph osd crush set osd.19 1.0 root=ssd
1573 18/09/19 12:57:12 sudo ceph osd crush rule ls
1574 18/09/19 13:01:32 sudo ceph osd crush rule create-simple ssd ssd ds-507 firstn
1575 18/09/19 13:01:50 less crushmap.decompiled
1576 18/09/19 13:03:29 sudo ceph osd crush rule create-simple ssd ssd host firstn
1577 18/09/19 13:03:37 sudo ceph osd crush rule ls
1578 18/09/19 13:04:27 history
1579 18/09/19 13:05:08 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd
1580 18/09/19 13:05:13 sudo ceph osd crush rule ls
1581 18/09/19 13:05:18 history
1582 18/09/19 13:05:29 sudo ceph osd getcrushmap -o crushmap.txt
1583 18/09/19 13:05:39 crushtool -d crushmap.txt -o crushmap.decompiled
1585 18/09/19 13:05:55 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name 1
1586 18/09/19 13:06:09 sudo ceph osd pool create cephfs-metadata 1 crushRoot 1
1587 18/09/19 13:06:17 sudo ceph osd pool create cephfs-metadata 1 1
1588 18/09/19 13:07:11 sudo ceph fs new cephfs cephfs_metadata cephfs_data
1589 18/09/19 13:07:23 sudo ceph fs new cephfs cephfs-metadata cephfs_data
1590 18/09/19 13:07:29 sudo ceph fs new cephfs cephfs-metadata cephfs-data
1591 18/09/19 13:09:16 sudo ceph osd crush tree
1592 18/09/19 13:50:44 sudo ceph fs delete cephfs
1593 18/09/19 13:50:50 sudo ceph fs rm cephfs
1594 18/09/19 13:51:44 sudo ceph fs rm cephfs --yes-i-really-mean-it
1595 18/09/19 13:52:22 sudo ceph osd pool rm cephfs-metadata
1596 18/09/19 13:52:33 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1597 18/09/19 13:52:41 sudo ceph osd crush tree
1598 18/09/19 13:53:41 sudo ceph osd crush -h
1599 18/09/19 13:53:49 sudo ceph osd crush ls
1600 18/09/19 13:54:05 sudo ceph osd crush ls ds-507
1601 18/09/19 13:55:05 sudo ceph osd crush rule list
1602 18/09/19 13:56:14 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd
1603 18/09/19 13:57:24 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name 1
1604 18/09/19 13:57:38 sudo ceph osd crush rule list
1605 18/09/19 13:58:35 sudo ceph osd pool create cephfs-metadata 1 ssd
1606 18/09/19 13:59:00 sudo ceph fs new cephfs cephfs-metadata cephfs-data
1607 18/09/19 14:00:40 history
1655 18/09/19 15:23:55 sudo ceph osd pool create cephfs-metadata 32 ssd
1656 18/09/19 15:24:22 sudo ceph health
1657 18/09/19 15:24:29 sudo ceph -s
1658 18/09/19 15:26:56 sudo ceph health detail
1659 18/09/19 15:27:39 sudo ceph pg 11.1b query
1660 18/09/19 15:28:34 history
1661 18/09/19 15:28:42 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1662 18/09/19 15:28:55 sudo ceph osd pool create cephfs-metadata 1 ssd
1663 18/09/19 15:29:28 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1664 18/09/19 15:29:33 sudo ceph osd pool create cephfs-metadata 3 ssd
1665 18/09/19 15:30:02 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it
1666 18/09/19 15:30:15 sudo ceph osd pool create cephfs-data 1
1667 18/09/19 15:30:49 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it
1668 18/09/19 15:30:56 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1669 18/09/19 15:31:58 sudo ceph osd pool create --help
1670 18/09/19 15:35:06 sudo ceph osd pool create cephfs-data 5 5
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
1671 18/09/19 15:35:56 sudo ceph osd pool set cephfs-data size 1
1672 18/09/19 15:36:24 sudo ceph osd pool create cephfs-metadata 1 ssd
1673 18/09/19 15:36:41 sudo ceph osd pool set cephfs-metadata size 1
1674 18/09/19 15:37:04 history
rmmod brd
modprobe brd rd_size=2097152
mke2fs -m 0 /dev/ram0 1048576
mke2fs -m 0 /dev/ram1 1048576
mke2fs -m 0 /dev/ram2 1048576
mke2fs -m 0 /dev/ram3 1048576
sudo ceph-osd -i 0 --mkjournal
sudo ceph-osd -i 1 --mkjournal
sudo ceph-osd -i 2 --mkjournal
120 19/09/19 14:19:41 sudo systemctl status ceph-osd*
121 19/09/19 14:19:50 ls -l /var/lib/ceph/osd/ceph-4/journal
122 19/09/19 14:19:52 sudo ls -l /var/lib/ceph/osd/ceph-4/journal
123 19/09/19 14:19:56 sudo ls -l /var/lib/ceph/osd/ceph-4/
124 19/09/19 14:29:10 history
[osd.3]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram0
[osd.4]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram1
[osd.5]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram2
[osd.6]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram3