Skip to content
Snippets Groups Projects
ceph_session.txt 4.43 KiB
ceph mds stat

ceph osd pool create cephfs_data 100
ceph osd pool create cephfs_metadata 100
ceph fs new cephfs cephfs_metadata cephfs_data

ceph fs ls
ceph fs status

ceph auth get-or-create client.1 mon 'allow r' mds 'allow rw' osd 'allow rw pool=cephfs-data'


ceph device ls

ceph osd lspools


ceph fs rm cephfs --yes-i-really-mean-it

ceph pool list
ceph pool status
ceph osd pool delete  --yes-i-really-really-mean-it

ceph osd pool delete cephfs_data --yes-i-really-really-mean-it
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it


ceph-deploy --overwrite-conf  osd create --data /dev/nvme0n1 ds-507

ceph-deploy --overwrite-conf  osd create --data /dev/nvme0n1 ds-507

ceph mds status
ceph mds stat

ceph auth list
ceph auth del client.1
ceph osd lspools
ceph osd crush tree



 1588  18/09/19 13:07:11 sudo ceph fs new cephfs cephfs_metadata cephfs_data
 1589  18/09/19 13:07:23 sudo ceph fs new cephfs cephfs-metadata cephfs_data
 1590  18/09/19 13:07:29 sudo ceph fs new cephfs cephfs-metadata cephfs-data

 1591  18/09/19 13:09:16 sudo ceph osd crush tree
 1592  18/09/19 13:50:44 sudo ceph fs delete cephfs
 1593  18/09/19 13:50:50 sudo ceph fs rm cephfs
 1594  18/09/19 13:51:44 sudo ceph fs rm cephfs --yes-i-really-mean-it
 1595  18/09/19 13:52:22 sudo ceph osd pool rm  cephfs-metadata
 1596  18/09/19 13:52:33 sudo ceph osd pool rm  cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
 1597  18/09/19 13:52:41 sudo ceph osd crush tree
 1598  18/09/19 13:53:41 sudo ceph osd crush -h
 1599  18/09/19 13:53:49 sudo ceph osd crush ls
 1600  18/09/19 13:54:05 sudo ceph osd crush ls ds-507
 1601  18/09/19 13:55:05 sudo ceph osd crush rule list
 1602  18/09/19 13:56:14 sudo ceph osd pool create cephfs-metadata 1  crush-ruleset-name ssd
 1603  18/09/19 13:57:24 sudo ceph osd pool create cephfs-metadata 1  crush-ruleset-name 1
 1604  18/09/19 13:57:38 sudo ceph osd crush rule list
 1605  18/09/19 13:58:35 sudo ceph osd pool create cephfs-metadata 1  ssd
 1606  18/09/19 13:59:00 sudo ceph fs new cephfs cephfs-metadata cephfs-data
 1607  18/09/19 14:00:40 history
 1655  18/09/19 15:23:55 sudo ceph osd pool create cephfs-metadata 32  ssd
 1656  18/09/19 15:24:22 sudo ceph health
 1657  18/09/19 15:24:29 sudo ceph -s
 1658  18/09/19 15:26:56 sudo ceph health detail
 1659  18/09/19 15:27:39 sudo ceph pg 11.1b query
 1660  18/09/19 15:28:34 history
 1661  18/09/19 15:28:42 sudo ceph osd pool rm  cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
 1662  18/09/19 15:28:55 sudo ceph osd pool create cephfs-metadata 1  ssd
 1663  18/09/19 15:29:28 sudo ceph osd pool rm  cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
 1664  18/09/19 15:29:33 sudo ceph osd pool create cephfs-metadata 3  ssd
 1665  18/09/19 15:30:02 sudo ceph osd pool rm  cephfs-data cephfs-data --yes-i-really-really-mean-it
 1666  18/09/19 15:30:15 sudo ceph osd pool create cephfs-data 1
 1667  18/09/19 15:30:49 sudo ceph osd pool rm  cephfs-data cephfs-data --yes-i-really-really-mean-it
 1668  18/09/19 15:30:56 sudo ceph osd pool rm  cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
 1669  18/09/19 15:31:58 sudo ceph osd pool create --help
 1670  18/09/19 15:35:06 sudo ceph osd pool create cephfs-data 5 5
 1671  18/09/19 15:35:56 sudo ceph osd pool set cephfs-data size 1
 1672  18/09/19 15:36:24 sudo ceph osd pool create cephfs-metadata 1  ssd
 1673  18/09/19 15:36:41 sudo ceph osd pool set cephfs-metadata size 1
 1674  18/09/19 15:37:04 history




































rmmod brd
modprobe brd rd_size=2097152
mke2fs -m 0 /dev/ram0 1048576
mke2fs -m 0 /dev/ram1 1048576
mke2fs -m 0 /dev/ram2 1048576
mke2fs -m 0 /dev/ram3 1048576
sudo ceph-osd -i 0 --mkjournal
sudo ceph-osd -i 1 --mkjournal
sudo ceph-osd -i 2 --mkjournal






  120  19/09/19 14:19:41 sudo systemctl status ceph-osd*
  121  19/09/19 14:19:50 ls -l /var/lib/ceph/osd/ceph-4/journal
  122  19/09/19 14:19:52 sudo ls -l /var/lib/ceph/osd/ceph-4/journal
  123  19/09/19 14:19:56 sudo ls -l /var/lib/ceph/osd/ceph-4/
  124  19/09/19 14:29:10 history



[osd.3]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram0

[osd.4]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram1

[osd.5]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram2

[osd.6]
host = ds-001
cluster_addr = 131.154.129.145
public_addr = 131.154.128.145
osd_journal = /dev/ram3