Newer
Older
ceph osd pool create cephfs_data 100
ceph osd pool create cephfs_metadata 100
ceph fs new cephfs cephfs_metadata cephfs_data
ceph auth get-or-create client.1 mon 'allow r' mds 'allow rw' osd 'allow rw pool=cephfs-data'
ceph device ls
ceph osd lspools
ceph fs rm cephfs --yes-i-really-mean-it
ceph pool list
ceph pool status
ceph osd pool delete --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it
ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507
ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507
ceph mds status
ceph mds stat
ceph auth list
ceph auth del client.1
ceph osd lspools
ceph osd crush tree
1588 18/09/19 13:07:11 sudo ceph fs new cephfs cephfs_metadata cephfs_data
1589 18/09/19 13:07:23 sudo ceph fs new cephfs cephfs-metadata cephfs_data
1590 18/09/19 13:07:29 sudo ceph fs new cephfs cephfs-metadata cephfs-data
1592 18/09/19 13:50:44 sudo ceph fs delete cephfs
1593 18/09/19 13:50:50 sudo ceph fs rm cephfs
1594 18/09/19 13:51:44 sudo ceph fs rm cephfs --yes-i-really-mean-it
1595 18/09/19 13:52:22 sudo ceph osd pool rm cephfs-metadata
1596 18/09/19 13:52:33 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1597 18/09/19 13:52:41 sudo ceph osd crush tree
1598 18/09/19 13:53:41 sudo ceph osd crush -h
1599 18/09/19 13:53:49 sudo ceph osd crush ls
1600 18/09/19 13:54:05 sudo ceph osd crush ls ds-507
1601 18/09/19 13:55:05 sudo ceph osd crush rule list
1602 18/09/19 13:56:14 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd
1603 18/09/19 13:57:24 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name 1
1604 18/09/19 13:57:38 sudo ceph osd crush rule list
1605 18/09/19 13:58:35 sudo ceph osd pool create cephfs-metadata 1 ssd
1606 18/09/19 13:59:00 sudo ceph fs new cephfs cephfs-metadata cephfs-data
1607 18/09/19 14:00:40 history
1655 18/09/19 15:23:55 sudo ceph osd pool create cephfs-metadata 32 ssd
1656 18/09/19 15:24:22 sudo ceph health
1657 18/09/19 15:24:29 sudo ceph -s
1658 18/09/19 15:26:56 sudo ceph health detail
1659 18/09/19 15:27:39 sudo ceph pg 11.1b query
1660 18/09/19 15:28:34 history
1661 18/09/19 15:28:42 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1662 18/09/19 15:28:55 sudo ceph osd pool create cephfs-metadata 1 ssd
1663 18/09/19 15:29:28 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1664 18/09/19 15:29:33 sudo ceph osd pool create cephfs-metadata 3 ssd
1665 18/09/19 15:30:02 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it
1666 18/09/19 15:30:15 sudo ceph osd pool create cephfs-data 1
1667 18/09/19 15:30:49 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it
1668 18/09/19 15:30:56 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
1669 18/09/19 15:31:58 sudo ceph osd pool create --help
1670 18/09/19 15:35:06 sudo ceph osd pool create cephfs-data 5 5
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
1671 18/09/19 15:35:56 sudo ceph osd pool set cephfs-data size 1
1672 18/09/19 15:36:24 sudo ceph osd pool create cephfs-metadata 1 ssd
1673 18/09/19 15:36:41 sudo ceph osd pool set cephfs-metadata size 1
1674 18/09/19 15:37:04 history
rmmod brd
modprobe brd rd_size=2097152
mke2fs -m 0 /dev/ram0 1048576
mke2fs -m 0 /dev/ram1 1048576
mke2fs -m 0 /dev/ram2 1048576
mke2fs -m 0 /dev/ram3 1048576
sudo ceph-osd -i 0 --mkjournal
sudo ceph-osd -i 1 --mkjournal
sudo ceph-osd -i 2 --mkjournal
120 19/09/19 14:19:41 sudo systemctl status ceph-osd*
121 19/09/19 14:19:50 ls -l /var/lib/ceph/osd/ceph-4/journal
122 19/09/19 14:19:52 sudo ls -l /var/lib/ceph/osd/ceph-4/journal
123 19/09/19 14:19:56 sudo ls -l /var/lib/ceph/osd/ceph-4/
124 19/09/19 14:29:10 history
[osd.3]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram0
[osd.4]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram1
[osd.5]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram2
[osd.6]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram3