본문 바로가기
카테고리 없음

pcs2

by Knowledge Store In Hyunsoft 2019. 1. 22.

centos drbd 설치




사전작업

virtualbox 별도 storage disk 필요함.


vi /etc/hosts

192.168.117.22 storage1

192.168.117.24 storage2


setenforce 0


vi /etc/selinux/config

SELINUX=disabled





1번서버(192.168.17.22) : hostnamectl set-hostname storage1


1번서버(192.168.17.24) : hostnamectl set-hostname storage2 






1,2번 서버 모두


rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org


rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm


yum install -y kmod-drbd84 drbd84-utils






1번 firewall 적용


firewall-cmd --permanent --add-rich-rule='rule family="ipv4"  source address="192.168.17.22" port port="7789" protocol="tcp" accept'


firewall-cmd --reload






2번 firewall 적용


firewall-cmd --permanent --add-rich-rule='rule family="ipv4"  source address="192.168.17.24" port port="7789" protocol="tcp" accept'


firewall-cmd --reload






1.2 모두


mv /etc/drbd.d/global_common.conf /etc/drbd.d/global_common.conf.orig


vi /etc/drbd.d/global_common.conf 






global {

 usage-count  yes;

}


common {

 net {

  protocol C;

 }

}




yum install -y lvm2




pvcreate /dev/sdb


vgcreate drbd_vg /dev/sdb


lvcreate -l 100%FREE -n drbd_lv drbd_vg






vi /etc/drbd.d/r0.res




resource r0 {

    on storage1 {

  device /dev/drbd0;

    disk /dev/drbd_vg/drbd_lv;

    meta-disk internal;

      address 192.168.117.22:7789;


    }

    on storage2  {

device /dev/drbd0;

  disk /dev/drbd_vg/drbd_lv;

    meta-disk internal;

        address 192.168.117.24:7789;

    }

}




drbdadm create-md r0




drbdadm up r0






1번


drbdadm primary --force r0


mkfs.xfs /dev/drbd0






1번 test


mount /dev/drbd0 /mnt


touch /mnt/test-01.txt


ls /mnt


umount /mnt






2번 테스트


1번에서 drbdadm secondary r0


2번에서 drbdadm primary r0


ls /mnt


umount /mnt








****


more /proc/drbd


drbdadm status










1번 2번


yum install -y pacemaker pcs psmisc policycoreutils-python 

yum groupinstall -y 'High Availability'


passwd hacluster


systemctl start pcsd











1번


pcs cluster auth storage1 storage2



pcs cluster setup --start --name nfs-cluster storage1 storage2 --force



pcs cluster cib drbd_cfg

pcs -f drbd_cfg resource create Data ocf:linbit:drbd drbd_resource=r0 op monitor timeout="20" interval="20" role="Slave" op monitor timeout="20" interval="10" role="Master"

pcs -f drbd_cfg resource create Storage Filesystem device="/dev/drbd0" directory="/storage" fstype="xfs" op monitor on-fail="restart"  interval="10" meta failure-timeout="30s" 

pcs -f drbd_cfg resource master DataSync Data master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true 

pcs -f drbd_cfg resource create NFS-Server systemd:nfs-server op monitor on-fail="restart"  interval="60"

pcs -f drbd_cfg resource group add HA-GROUP Storage  NFS-Server 

pcs -f drbd_cfg constraint colocation add HA-GROUP DataSync NFS-Server INFINITY with-rsc-role=Master

pcs -f drbd_cfg constraint order promote DataSync then start Storage

pcs -f drbd_cfg constraint order promote DataSync then start NFS-Server 

pcs -f drbd_cfg resource create ping_check ocf:pacemaker:ping host_list="192.168.117.1" multiplier="1000" dampen=10s op monitor interval=5s

pcs -f drbd_cfg resource clone ping_check globally-unique=false

pcs cluster cib-push drbd_cfg




pcs property set stonith-enabled=false



pcs resource create VIP IPaddr2 ip=192.168.117.111 cidr_netmask=24 --group HA-GROUP







pcs cluster cib fs_cfg


pcs -f fs_cfg resource create NFS-Server systemd:nfs-server op monitor interval="30s"




pcs cluster cib-push fs_cfg










1번2번


systemctl enable corosync


systemctl enable pcsd


systemctl enable pacemaker




출처: http://hyunsoft.tistory.com/ [현소프트의 지식공간]

728x90

댓글