Processing math: 100%

User Tools

Site Tools


progetti:cloud-areapd:ced-c:install_nfs_cluster

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revisionPrevious revision
Next revision
Previous revision
progetti:cloud-areapd:ced-c:install_nfs_cluster [2015/06/12 09:35] sella@infn.itprogetti:cloud-areapd:ced-c:install_nfs_cluster [2015/09/01 19:31] (current) – [On 1 nodes] sella@infn.it
Line 1: Line 1:
 +====== Installation and Configuration of NFS Cluster ======
 +
 +Author: 
 +   * Paolo E. Mazzon (DEI)
 +   * Matteo Menguzzato (DFA)
 +   * Gianpietro Sella (DISC)
 +
 +
 +===== Prerequisites =====
 +
 +2 node with:
 +   * Updated CentOS7 
 +   * Make sure that yum autoupdate is disabled
 +<code bash>
 +root@controller-01 ~]# grep ENA /etc/sysconfig/yum-autoupdate
 +# ENABLED
 +ENABLED="false"
 +</code> 
 +   * At least 20GB HD for operating system and OpenStack software and related log files
 +   * SELinux configured as "Disabled" (''/etc/selinux/config'')
 +   * EPEL 7
 +   * a data network network, IP 192.168.61.150 for node 1 and IP 192.168.61.151 for node 2
 +   * a SAN network 192.168.40.xxx
 +   * a SAN dell equallogic with IP 192.168.40.110
 +   * a VIP address 192.168.61.180 for data network
 +   * 2 iscsi volume on dell equallogin
 +   * execute iscsi login of the equallogic volume on the 2 node and obtain, for example, the next device: /dev/disk/by-id/scsi-3603be85f6c4bd251a74c25ad6409be21         /dev/disk/by-id/scsi-3603be85f6c4be246a74cb5ac64090e0f
 + 
 +
 +===== On 2 nodes =====
 +
 +<code bash>
 +yum install nfs-utils nfs-utils-lib
 +yum install pcs fence-agents-all
 +
 +#modify /usr/lib/ocf/resource.d/heartbeat/nfsserver
 +
 +nfsserver_monitor ()
 +{
 + local pid
 + pid=$(pgrep "nfsd")
 + if [ -n "$pid" ]; then
 +         ocf_log debug "process ok ---$rc"
 + else
 +         ocf_log debug "not pprocess"
 +         return $OCF_NOT_RUNNING
 + fi
 +...........
 +
 +firewall-cmd --add-service=high-availability
 +firewall-cmd --permanent --add-service=high-availability
 +firewall-cmd --add-port=2049/tcp
 +firewall-cmd --permanent --add-port=2049/tcp
 +
 +#set password for user hacluster for example 'hapassword'
 +passwd hacluster
 +
 +#insert in /etc/hosts
 +192.168.61.180 nfscluster
 +
 +systemctl start pcsd.service
 +systemctl enable pcsd.service
 +
 +</code>
 +
 +
 +
 +
 +===== On 1 nodes =====
 +
 +<code bash>
 +
 +pcs cluster auth cld-blu-01.data.pd.infn.it cld-blu-02.data.pd.infn.it -u hacluster -p hapassword
 +pcs cluster setup --start --name nfscluster cld-blu-01.data.pd.infn.it cld-blu-02.data.pd.infn.it
 +pcs cluster enable --all
 +pcs cluster status
 +
 +#create stonith fence where dracpassword is the password for login in drac device of the nodes
 +pcs stonith create drac-cld-blu-01 fence_drac5 ipaddr=cld-blu-01-rmc.lan login=root passwd=dracpassword cmd_prompt="admin1->" port=22 secure=1 pcmk_host_check=static-list pcmk_host_list=cld-blu-01.data.pd.infn.it delay=0 op monitor interval=600s
 +pcs stonith create drac-cld-blu-02 fence_drac5 ipaddr=cld-blu-02-rmc.lan login=root passwd=dracpassword cmd_prompt="admin1->" port=22 secure=1 pcmk_host_check=static-list pcmk_host_list=cld-blu-02.data.pd.infn.it delay=15 op monitor interval=600s
 +pcs constraint location drac-cld-blu-01 prefers cld-blu-01.data.pd.infn.it=-INFINITY
 +pcs constraint location drac-cld-blu-02 prefers cld-blu-02.data.pd.infn.it=-INFINITY
 +
 +#create volume with LVM
 +vgremove nfsclustervg
 +pvcreate /dev/disk/by-id/scsi-3603be85f6c4bd251a74c25ad6409be21 /dev/disk/by-id/scsi-3603be85f6c4be246a74cb5ac64090e0f
 +vgcreate nfsclustervg /dev/disk/by-id/scsi-3603be85f6c4bd251a74c25ad6409be21 /dev/disk/by-id/scsi-3603be85f6c4be246a74cb5ac64090e0f
 +lvcreate -l 100%VG -n nfsclusterlv nfsclustervg
 +mkfs.ext4 /dev/nfsclustervg/nfsclusterlv
 +mkdir /nfscluster
 +mount /dev/nfsclustervg/nfsclusterlv /nfscluster
 +mkdir -p /nfscluster/exports
 +mkdir -p /nfscluster/exports/glance
 +mkdir -p /nfscluster/exports/nova
 +umount /nfscluster
 +vgchange -an nfsclustervg
 +
 +</code>
 +
 +
 +
 +===== On 2 nodes =====
 +<code>
 +#in the /etc/lvm/lvm.conf insert:
 +volume_list=[]
 +#then execute:
 +dracut -H -f /boot/initramfs-(unamer).img(uname -r)
 +#and reboot node
 +</code>
 +
 +===== On 1 nodes =====
 +
 +<code bash>
 +
 +pcs resource create nfsclusterlv LVM volgrpname=nfsclustervg exclusive=true --group nfsclusterha
 +pcs resource create nfsclusterdata Filesystem device="/dev/nfsclustervg/nfsclusterlv" directory="/nfscluster" fstype="ext4" --group nfsclusterha
 +pcs resource create nfsclusterserver nfsserver nfs_shared_infodir=/nfscluster/nfsinfo nfs_no_notify=true --group nfsclusterha
 +pcs resource create nfsclusterroot exportfs clientspec=192.168.61.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfscluster/exports fsid=0 --group nfsclusterha
 +pcs resource create nfsclusternova exportfs clientspec=192.168.61.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfscluster/exports/nova fsid=1 --group nfsclusterha
 +pcs resource create nfsclusterglance exportfs clientspec=192.168.61.0/255.255.255.0 options=rw,sync,no_root_squash directory=/nfscluster/exports/glance fsid=2 --group nfsclusterha
 +pcs resource create nfsclustervip IPaddr2 ip=192.168.61.180 cidr_netmask=24 --group nfsclusterha
 +pcs resource create nfsclusternotify nfsnotify source_host=192.168.61.180 --group nfsclusterha
 +pcs cluster status
 +
 +</code>
 +
 +
 +===== On compute nodes =====
 +<code>
 +mkdir /Instances
 +#create in the /var/lib/nova/ simbolic link instances to /Instances (first remove instances directory)
 +chown nova:nova /Instances
 +#insert in /etc/fstab:
 +192.168.61.180:nova /Instances nfs vers=4,intr 0 0
 +mount -a
 +chown nova:nova /Instances
 +</code>
 +===== On controller/network nodes =====
 +<code>
 +mkdir /Images
 +#create in the /var/lib/glance/ simbolic link images to /Images (first remove images directory)
 +chown glance:glance /Images
 +#insert in /etc/fstab:
 +192.168.61.180:nova /Imagess nfs vers=4,intr 0 0
 +mount -a
 +chown glance:glance /Images
 +</code>
 +
  

Donate Powered by PHP Valid HTML5 Valid CSS Driven by DokuWiki