1. .
RSA . ~/.ssh_rsa.pub /root/.ssh/a
2. /etc/hosts
: 1.1.1.10 node1
~/.ssh/config ( )
ssh-keygen ~/.ssh/config
:
Host node1
Hostname node1
User root
/etc/hosts hostname ip,
hostaname /etc/hosts hostname
3. chrony
3.1 chrony : /home/korolev_s/chrony-2..deb
3.2 chrony. /etc/chrony.conf
driftfile /var/lib/chrony/drift
commandkey 1
keyfile /etc/chrony.keys
initstepslew 10 client1 client3 client
local stratum
manual
allow <IP_1>
IP_1 - , ceph
driftfile /var/lib/chrony/drift
logdir /var/log/chrony
log measurements statistics tracking
keyfile /etc/chrony.keys
commandkey 2
server ip_of_ntpserver minpoll 2 maxpoll polltarget 30
ip_of_ntpserver - ip ()
3.3 chrony
/etc/init.d/chronyd start
chrony :
chronyc tracking
chronyc sources
chronyc sourcestats
3. chronyd
chkconfig chronyd
ceph ntpd .
, chronyd, init
ceph, /etc/init.d/ceph .
ntpd chronyd.
chronyd chkconfig
.
.1 /etc/ceph
,
mkdir /etc/ceph
.2 ceph
ceph-deploy new <hostame_of_node>
.3 ,
ceph-deploy disk list <hostname_of_node>
.
: - ,
ceph,
. disk zap .
ceph-deploy disk zap <hostname_of_node>:sd{a,b,c,d,e,f.....z}
:
ceph-deploy disk zap ceph1:sd{b,c,d}
sdb, sdc sdd ceph1
:
. osd
ceph-deploy osd create <hostname_of_node>:sd{b,c,d,e,f.....z}
.
ceph -s
ceph osd tree
, osd ( up),
ceph
/etc/init.d/ceph start
.
.1 , , :
ceph-deploy disk list <hostname_of_nwe_machine>
, ceph
:
/dev/sdb2 ceph journal, for /dev/sdb1
/dev/sdb1 ceph data, active, cluster ceph, osd., journal /dev/sdb2
.2
mkdir /etc/ceph
ceph-deploy new <hostname_of_new_node>
.3 , ,
: . .
ceph-deploy disk zap <hostname_of_new_node>:sd{b,c,d,......z}
. osd
ceph-deploy osd create <hostname_of_new_node>:sd{b,c,d,e,f.....z}
.
.1 , OSD
ceph osd out osd.OSD
.2
service ceph stop osd
.3 OSD
RUSH ceph osd crush remove osd.
. OSD
eph auth del osd.
. OSD
eph osd rm osd.
CRUSH
eph osd crush remove <hostname> OSD