ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-001 ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-001 ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-001 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-001 ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-002 X ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-002 ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-002 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-002 ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-004 ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-004 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-004 ceph-deploy --overwrite-conf osd create --data /dev/sdf ds-004 ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-303 ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-303 ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-303 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-303 ceph-deploy --overwrite-conf osd create --data /dev/sdb ds-304 ceph-deploy --overwrite-conf osd create --data /dev/sdc ds-304 ceph-deploy --overwrite-conf osd create --data /dev/sdd ds-304 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-304 ceph-deploy --overwrite-conf osd create --data /dev/sde ds-002 sudo ceph dashboard set-login-credentials admin xxxxx ceph-deploy --overwrite-conf mds create ds-507 ceph-deploy --overwrite-conf mon add ds-507 sudo ceph auth get-or-create mds.ds-507 mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-ds-507/keyring sudo ceph mds stat 1434 17/09/19 14:51:09 ceph osd pool create cephfs_data 100 1435 17/09/19 14:51:14 sudo ceph osd pool create cephfs_data 100 1436 17/09/19 14:51:26 sudo ceph osd pool create cephfs_metadata 100 1437 17/09/19 14:51:45 sudo ceph fs new cephfs cephfs_metadata cephfs_data 1438 17/09/19 14:51:57 sudo ceph fs ls 1439 17/09/19 14:52:10 sudo ceph mds stat 1440 17/09/19 14:56:16 sudo ceph fs status 1441 17/09/19 14:56:52 sudo ceph fs set cephfs max_mds 1 1442 17/09/19 14:56:55 sudo ceph fs status 1443 17/09/19 14:57:48 # 1444 17/09/19 14:58:15 sudo ceph auth get-or-create client.1 mon 'allow r' mds 'allow r, allow rw path=/mnt/ceph/' osd 'allow rw' 1447 17/09/19 15:03:39 sudo ceph auth get client.1 1448 17/09/19 15:04:47 ceph auth get-or-create client.2 mon 'allow r' mds 'allow rw' osd 'allow rw pool=' 1449 17/09/19 15:05:15 sudo ceph auth get-or-create client.1 mon 'allow r' mds 'allow rw' osd 'allow rw pool=cephfs-data' 1450 17/09/19 15:35:47 ceph-deploy --overwrite-conf mds create --data /dev/nvme0n1 ds-507 1451 17/09/19 15:36:05 ceph-deploy --overwrite-conf mds create --help 1452 17/09/19 15:36:11 ceph-deploy mds create --help 1453 17/09/19 15:39:07 sudo ceph osd tree 1454 17/09/19 15:42:51 cat ceph.conf 1455 17/09/19 15:42:57 ls -l /var/lib/ceph/ 1456 17/09/19 15:43:01 sudo ls -l /var/lib/ceph/ 1457 17/09/19 15:43:06 sudo ls -l /var/lib/ceph/mds 1458 17/09/19 15:43:15 sudo ls -l /var/lib/ceph/mds/ceph-ds-507 1459 17/09/19 15:43:54 history 1460 17/09/19 15:45:48 ceph-deploy disk list 1461 17/09/19 15:45:58 ceph-deploy disk list ds-507 1462 17/09/19 15:46:34 ceph-deploy disk prepare ds-507:/dev/nvme0n1 1463 17/09/19 15:46:46 ceph-deploy disk zap ds-507:/dev/nvme0n1 1464 17/09/19 15:46:55 ceph-deploy disk zap ds-507 /dev/nvme0n1 1465 17/09/19 15:47:20 ceph-deploy disk activate ds-507 /dev/nvme0n1 1466 17/09/19 18:28:34 sudo ceph orchestator device ls 1467 17/09/19 18:29:09 sudo ceph device ls 1468 17/09/19 18:59:14 sudo ceph osd numa-status 1469 17/09/19 19:05:35 sudo ls -l /var/lib/ceph/ 1470 17/09/19 19:05:39 sudo ls -l /var/lib/ceph/mds 1471 17/09/19 19:05:44 sudo ls -l /var/lib/ceph/mds/ceph-ds-507 1472 17/09/19 19:05:47 sudo ls -l /var/lib/ceph/mds/ 1473 17/09/19 19:05:49 sudo ls -l /var/lib/ceph/ 1474 17/09/19 19:05:55 sudo ls -l /var/lib/ceph/bootstrap-mds 1475 17/09/19 19:10:15 sudo ceph pool -h 1476 17/09/19 19:10:32 sudo ceph -h 1477 17/09/19 19:10:52 sudo ceph fs ls 1478 17/09/19 19:10:58 sudo ceph fs -h 1479 17/09/19 19:11:22 sudo ceph fs status 1480 17/09/19 19:13:05 ceph osd lspools 1481 17/09/19 19:13:09 sudo ceph osd lspools 1483 18/09/19 10:17:59 sudo ceph fs rm cephfs --yes-i-really-mean-it 1484 18/09/19 10:18:20 sudo ceph pool list 1485 18/09/19 10:18:33 sudo ceph pool status 1486 18/09/19 10:19:40 #ceph osd pool delete --yes-i-really-really-mean-it 1487 18/09/19 10:19:59 sudo ceph osd pool delete cephfs_data --yes-i-really-really-mean-it 1488 18/09/19 10:20:17 sudo ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it 1489 18/09/19 10:21:37 sudo ceph tell mon.\* injectargs '--mon-allow-pool-delete=true' 1490 18/09/19 10:21:46 sudo ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it 1491 18/09/19 10:23:57 sudo ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it 1492 18/09/19 10:24:47 sudo ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507 1493 18/09/19 10:24:54 ceph-deploy --overwrite-conf osd create --data /dev/nvme0n1 ds-507 1494 18/09/19 10:25:29 sudo ceph auth del mds."ds-507" 1495 18/09/19 10:29:29 sudo ceph auth get-or-create mds.ds-507 mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-ds-507/keyring 1496 18/09/19 10:29:48 sudo ls -l /var/lib/ceph/mds/ceph-ds-507/ 1497 18/09/19 10:29:51 sudo ls -l /var/lib/ceph/mds/ 1498 18/09/19 10:30:12 sudo ls -l /var/lib/ceph/mds/ceph-ds-507 1499 18/09/19 10:30:13 sudo ls -l /var/lib/ceph/mds/ceph-ds-507/ 1500 18/09/19 10:30:41 sudo ls -l /var/lib/ceph/mds/ 1501 18/09/19 10:30:50 sudo rm -rf /var/lib/ceph/mds/* 1502 18/09/19 10:30:55 sudo ceph auth get-or-create mds.ds-507 mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-ds-507/keyring 1503* 18/09/19 10:31:11 sudo mkdir /var/lib/ceph/mds/ceph-ds-507 1504 18/09/19 10:31:26 sudo ls -l /var/lib/ceph/mds/ 1505 18/09/19 10:31:42 sudo ceph auth get-or-create mds.ds-507 mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' 1506 18/09/19 10:32:03 sudo vim /var/lib/ceph/mds/ceph-ds-507/keyring 1507 18/09/19 10:37:23 ceph-deploy mds create ds-507 1508 18/09/19 10:37:45 ceph-deploy mds delete ds-507 1509 18/09/19 10:37:52 ceph-deploy mds remove ds-507 1510 18/09/19 10:38:40 sudo ceph health 1511 18/09/19 10:38:52 sudo ceph mds status 1512 18/09/19 10:39:10 sudo ceph mds stat 1513 18/09/19 10:39:43 ceph-deploy mds create ds-507 1514 18/09/19 10:40:11 sudo rm /var/lib/ceph/mds/ceph-ds-507/keyring 1515 18/09/19 10:40:15 ceph-deploy mds create ds-507 1516 18/09/19 10:40:35 sudo rm -rf /var/lib/ceph/mds/ceph-ds-507 1517 18/09/19 10:40:43 ceph-deploy mds create ds-507 1518 18/09/19 10:41:21 sudo ls -l /var/lib/ceph/bootstrap-mds/ceph.keyring 1519 18/09/19 10:41:26 sudo ls -l /var/lib/ceph/bootstrap-mds/ 1520 18/09/19 10:44:24 sudo ceph auth list 1521 18/09/19 10:45:28 sudo ceph auth del client.1 1522 18/09/19 10:45:58 sudo ceph auth del mds. 1523 18/09/19 10:46:02 sudo ceph auth del mds.ds-507 1524 18/09/19 10:46:08 ceph-deploy mds create ds-507 1525 18/09/19 10:48:44 sudo ceph osd lspools 1526 18/09/19 11:04:59 sudo rpm -qa | grep ceph 1527 18/09/19 11:05:53 sudo ls -l /etc/yum.repos.d/ 1528 18/09/19 11:06:01 sudo cat /etc/yum.repos.d/ceph.repo 1529 18/09/19 11:43:58 sudo ceph osd crush tree 1530 18/09/19 12:09:19 sudo ceph osd pool create cephfs-data 64 1531 18/09/19 12:09:37 sudo ceph osd pool create cephfs-metadata 1 1532 18/09/19 12:10:39 sudo ceph osd pool set cephfs-data size 1 1533 18/09/19 12:12:20 sudo ceph osd crush tree 1534 18/09/19 12:13:35 sudo ceph osd getcrushmap -o crushmap.txt 1535 18/09/19 12:13:39 less crushmap.txt 1536 18/09/19 12:14:22 crushtool -d crushmap.txt -o crushmap.decompiled 1537 18/09/19 12:14:27 less crushmap.decompiled 1538 18/09/19 12:17:38 sudo ceph osd lspools 1539 18/09/19 12:24:24 sudo ceph osd tree 1540 18/09/19 12:26:23 less crushmap.decompiled 1541 18/09/19 12:28:02 sudo ceph osd crush add-bucket ssd root 1542 18/09/19 12:28:27 sudo ceph osd crush add-bucket ds-507 host 1543 18/09/19 12:29:00 sudo ceph osd crush move ds-507 root=ssd 1544 18/09/19 12:30:08 sudo ceph osd crush set osd.19 .1102 root=ssd host=ds-507 1545 18/09/19 12:30:42 sudo ceph osd crush set osd.19 1.0 root=ssd host=ds-507 1546 18/09/19 12:31:42 sudo ceph osd crush tree 1547 18/09/19 12:32:30 history 1548 18/09/19 12:32:42 sudo ceph osd pool remove cephfs-data 1549 18/09/19 12:33:21 sudo ceph osd pool delete cephfs-data 1550 18/09/19 12:33:29 sudo ceph osd pool delete cephfs-data --yes-i-really-really-mean-it 1551 18/09/19 12:33:45 sudo ceph osd pool delete cephfs-data cephfs-data --yes-i-really-really-mean-it 1552 18/09/19 12:34:02 sudo ceph osd pool delete cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it 1553 18/09/19 12:39:25 history 1554 18/09/19 12:40:22 sudo ceph osd pool create cephfs-data 64 crushRoot default 1555 18/09/19 12:40:39 sudo ceph osd crush tree 1556 18/09/19 12:42:00 history 1557 18/09/19 12:42:07 sudo ceph osd getcrushmap -o crushmap.txt 1558 18/09/19 12:42:15 crushtool -d crushmap.txt -o crushmap.decompiled 1559 18/09/19 12:42:19 less crushmap.decompiled 1560 18/09/19 12:42:58 sudo ceph osd pool create cephfs-data 64 1561 18/09/19 12:43:25 sudo ceph osd pool create cephfs-metadata 1 crushRoot -22 1562 18/09/19 12:43:31 less crushmap.decompiled 1563 18/09/19 12:45:03 sudo ceph osd pool create cephfs-metadata 1 crushRoot -24 1564 18/09/19 12:47:46 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd 1565 18/09/19 12:47:59 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name -24 1566 18/09/19 12:48:09 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name "ssd" 1567 18/09/19 12:49:47 sudo ceph osd crush rule ls 1568 18/09/19 12:49:57 history 1569 18/09/19 12:55:51 #ceph osd crush set osd.0 1.0 root=default datacenter=dc1 room=room1 row=foo rack=bar host=foo-bar-1 1570 18/09/19 12:56:10 #sudo ceph osd crush set osd 1571 18/09/19 12:56:19 less crushmap.decompiled 1572 18/09/19 12:57:02 sudo ceph osd crush set osd.19 1.0 root=ssd 1573 18/09/19 12:57:12 sudo ceph osd crush rule ls 1574 18/09/19 13:01:32 sudo ceph osd crush rule create-simple ssd ssd ds-507 firstn 1575 18/09/19 13:01:50 less crushmap.decompiled 1576 18/09/19 13:03:29 sudo ceph osd crush rule create-simple ssd ssd host firstn 1577 18/09/19 13:03:37 sudo ceph osd crush rule ls 1578 18/09/19 13:04:27 history 1579 18/09/19 13:05:08 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd 1580 18/09/19 13:05:13 sudo ceph osd crush rule ls 1581 18/09/19 13:05:18 history 1582 18/09/19 13:05:29 sudo ceph osd getcrushmap -o crushmap.txt 1583 18/09/19 13:05:39 crushtool -d crushmap.txt -o crushmap.decompiled 1584 18/09/19 13:05:44 less crushmap.decompiled 1585 18/09/19 13:05:55 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name 1 1586 18/09/19 13:06:09 sudo ceph osd pool create cephfs-metadata 1 crushRoot 1 1587 18/09/19 13:06:17 sudo ceph osd pool create cephfs-metadata 1 1 1588 18/09/19 13:07:11 sudo ceph fs new cephfs cephfs_metadata cephfs_data 1589 18/09/19 13:07:23 sudo ceph fs new cephfs cephfs-metadata cephfs_data 1590 18/09/19 13:07:29 sudo ceph fs new cephfs cephfs-metadata cephfs-data 1591 18/09/19 13:09:16 sudo ceph osd crush tree 1592 18/09/19 13:50:44 sudo ceph fs delete cephfs 1593 18/09/19 13:50:50 sudo ceph fs rm cephfs 1594 18/09/19 13:51:44 sudo ceph fs rm cephfs --yes-i-really-mean-it 1595 18/09/19 13:52:22 sudo ceph osd pool rm cephfs-metadata 1596 18/09/19 13:52:33 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it 1597 18/09/19 13:52:41 sudo ceph osd crush tree 1598 18/09/19 13:53:41 sudo ceph osd crush -h 1599 18/09/19 13:53:49 sudo ceph osd crush ls 1600 18/09/19 13:54:05 sudo ceph osd crush ls ds-507 1601 18/09/19 13:55:05 sudo ceph osd crush rule list 1602 18/09/19 13:56:14 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name ssd 1603 18/09/19 13:57:24 sudo ceph osd pool create cephfs-metadata 1 crush-ruleset-name 1 1604 18/09/19 13:57:38 sudo ceph osd crush rule list 1605 18/09/19 13:58:35 sudo ceph osd pool create cephfs-metadata 1 ssd 1606 18/09/19 13:59:00 sudo ceph fs new cephfs cephfs-metadata cephfs-data 1607 18/09/19 14:00:40 history 1655 18/09/19 15:23:55 sudo ceph osd pool create cephfs-metadata 32 ssd 1656 18/09/19 15:24:22 sudo ceph health 1657 18/09/19 15:24:29 sudo ceph -s 1658 18/09/19 15:26:56 sudo ceph health detail 1659 18/09/19 15:27:39 sudo ceph pg 11.1b query 1660 18/09/19 15:28:34 history 1661 18/09/19 15:28:42 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it 1662 18/09/19 15:28:55 sudo ceph osd pool create cephfs-metadata 1 ssd 1663 18/09/19 15:29:28 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it 1664 18/09/19 15:29:33 sudo ceph osd pool create cephfs-metadata 3 ssd 1665 18/09/19 15:30:02 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it 1666 18/09/19 15:30:15 sudo ceph osd pool create cephfs-data 1 1667 18/09/19 15:30:49 sudo ceph osd pool rm cephfs-data cephfs-data --yes-i-really-really-mean-it 1668 18/09/19 15:30:56 sudo ceph osd pool rm cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it 1669 18/09/19 15:31:58 sudo ceph osd pool create --help 1670 18/09/19 15:35:06 sudo ceph osd pool create cephfs-data 5 5 1671 18/09/19 15:35:56 sudo ceph osd pool set cephfs-data size 1 1672 18/09/19 15:36:24 sudo ceph osd pool create cephfs-metadata 1 ssd 1673 18/09/19 15:36:41 sudo ceph osd pool set cephfs-metadata size 1 1674 18/09/19 15:37:04 history rmmod brd modprobe brd rd_size=2097152 mke2fs -m 0 /dev/ram0 1048576 mke2fs -m 0 /dev/ram1 1048576 mke2fs -m 0 /dev/ram2 1048576 mke2fs -m 0 /dev/ram3 1048576 sudo ceph-osd -i 0 --mkjournal sudo ceph-osd -i 1 --mkjournal sudo ceph-osd -i 2 --mkjournal 120 19/09/19 14:19:41 sudo systemctl status ceph-osd* 121 19/09/19 14:19:50 ls -l /var/lib/ceph/osd/ceph-4/journal 122 19/09/19 14:19:52 sudo ls -l /var/lib/ceph/osd/ceph-4/journal 123 19/09/19 14:19:56 sudo ls -l /var/lib/ceph/osd/ceph-4/ 124 19/09/19 14:29:10 history [osd.3] host = ds-001 cluster_addr = 131.154.129.145 public_addr = 131.154.128.145 osd_journal = /dev/ram0 [osd.4] host = ds-001 cluster_addr = 131.154.129.145 public_addr = 131.154.128.145 osd_journal = /dev/ram1 [osd.5] host = ds-001 cluster_addr = 131.154.129.145 public_addr = 131.154.128.145 osd_journal = /dev/ram2 [osd.6] host = ds-001 cluster_addr = 131.154.129.145 public_addr = 131.154.128.145 osd_journal = /dev/ram3