// Set on all nodes /etc/hosts Note: Remove racdb1 from 127.0.0.1 localhost entry, same on racdb2 ## Public Network -(eth0) 192.168.1.100 racdb1 192.168.1.101 racdb2 ## Private Interconnect -(eth1) 192.168.2.100 racdb1-priv 192.168.2.101 racdb2-priv ## Virtual IP (VIP) 192.168.1.200 racdb1-vip 192.168.1.201 racdb2-vip ## Storage ip 192.168.1.195 storage1 192.168.2.195 storage-priv (can be set on openfiler system tab) Racdb1) /etc/sysctl.conf net.ipv4.ip_local_port_range = 1024 65000 net.core.rmem_default=10248576 net.core.rmem_max=1048576 net.core.wmem_default=26144 net.core.wmem_max=262144 sysctl -p On racdb1 machine open browser and type https://storage1:446/ Click on add exception -- get certificate -- confirm security exception username: openfiler password: password goto service tab iscsi target server enable (status must be enable) goto system tab Network Access Configuratin // add entry of nodes which use storage server make entry of both nodes racdb1-priv 192.168.2. .... racdb2-priv ...... Goto volume tab mode= primary partation type= physical volume Now create volume groups ************************ on volume section click on volume groups on volume group name vg_rac click the check option now add logical volumes(partations) in volume group 2 for datagroup 2 for backgroup 1 for ocr and voting disk Add logicalvolume ***************** on volume section -- click on add vloumes volume name : racdb-crs description: for ocr and voting disks space: 2048 file system: iscsi volume name : racdb-asm description: for asm space: 5000 file system: iscsi (create 3 more for asm) Now create targets against logical volume ***************************************** click on iscsi targets change .1f4c to .ocr .racdb.asm1 .racdb.asm2 .racdb.asm3 .racdb.asm4 .racdb.asm5 Mapping ******* Come to Target Configuration select ocr in select iscsi target and then click change button then click on lun mapping tab click the map button for racdb-crs then come to target configuration select racdb.asm1 click change button then click on lun mapping click the map button for racdb-asm1 continue it to uptil asm5 Enabling Access *************** on Target Configuration select ocr in select iscsi target and then click change button Click on Network ACL and click allow to both repeat the process uptil asm5 Check rpm iscsi **************** rpm -qa iscsi-initiator-utils-6.2.0.868-0.18.el5.rpm // install on both nodes start service on both node ************************** service iscsid start auto start service on restart ***************************** chkconfig iscsid on chkconfig iscsi on connect to storage1 ssh storage1 vim /etc/initiators.deny comment all iqn. to see the storage on client checking Storage **************** racdb1) iscsiadm -m discovery -t sendtargets -p storage1 login to targets on both nodes ****************************** iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.racdb.asm1 -p 192.168.1.195 -l .asm2 .asm3 .asm4 .asm5 .ocr Now to automate it ****************** run for all targets .asm and .ocr run on both nodes iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.racdb.asm1 -p 192.168.1.195 --op update -n node.startup -v automatic Configure Persisten device name (device name will be persisten on system restart) **************************** ********************************************************************************* In Iscsi device name is change when system restart in linux. Only in iscsi case. on both nodes racdb1) (cd /dev/disk/by-path; ls -l *openfiler* | awk '{FS=" "; print $9 " " $10 " " $11}') (cd /dev/disk/by-path; ls -l *openfiler* | awk '{FS=" "; print $9 " " $10 " " $11}') Make Script file **************** cd /etc/udev/rules.d/ vi 55-openiscsi.rules KERNEL=="sd*", BUS=="scsi", PROGRAM="/etc/udev/scripts/iscsidev.sh %b", SYMLINK+="iscsi/%c/part%n" KERNEL=="sd*", BUS=="scsi", PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c/part%n" paste this file to other node Create script ************* racdb1) cd /etc/udev/scripts/ vi iscsidev.sh #!/bin/sh BUS=${1} HOST=${BUS%%:*} [ -e /sys/class/iscsi_host ] || exit 1 file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname" target_name=$(cat ${file}) # This is not an open-scsi drive if [ -z "${target_name}" ]; then exit 1 fi echo "${target_name}" Paste the file on racdb2 racdb1) chmod 755 iscsidev.sh // on both nodes Stop and start the script on both sides *************************************** service iscsi stop service iscsi start Now create Partation ******************** racdb1) fdisk /dev/iscsi/asm1/part m n p 1 w (create for remaining including ocr) fdisk /dev/iscsi/asm2/part fdisk -l //to view partation On racdb2 detect partation ************************** racdb2) partprobe ********** Storage Configuration Compleate******************** Install Clusterware ******************* (perform on both node) mkdir u01 groupadd dba groupadd oinstall user -g oinstall -G dba oracle passwd oracle mkdir -p /u01/app/oracle //oracle software chown -R oracle:oinstall /u01/app/oracle chmod -R 775 /u01/app/oracle mkdir -p /u01/app/crs //clusterware home chown -R oracle:oinstall /u01/app/crs chmod -R 775 /u01/app/crs mkdir -p /u02 // mount point of ocfs file system and crs and voting disk path chown -R oracle:oinstall /u02 chmod -R 775 /u02 Set date and time on both machine ********************************* racdb1 date should be 2 minute previous than racdb2 date -s "12/10/2010 12:30:00" kernel hang checker module ************************** if kernel is hang restart the node by default it is install find /lib/modules -name "hangcheck-timer.ko" //it following command write in the file /etc/modprobe.conf to ping the node after 30 sec if hang then restart the node after 180 seconds echo "options hangcheck-timer hangcheck_tick=30 hangcheck_margin=180" >> /etc/modprobe.conf now make it autostart ********************* echo "/sbin/modprobe hangcheck-timer" >> /etc/rc.local Enabling ssh from oracle user ***************************** connect to oracle user logging to other node without password racdb1) cd mkdir -p ~/.ssh chmod 700 ~/.ssh ssh-keygen -t rsa .ssh) touch authorized_keys ll perform on both nodes //Communicate particular user and machine to each other ********************* ssh racdb1 cat ~/.ssh/id_rsa.pub >> authorized_keys ssh racdb2 cat ~/.ssh/id_rsa.pub >> authorized_keys more authorized_keys chmod 600 authorized_keys User Equialiance Setup ********************** oracle racdb1) exec /usr/bin/ssh-agent $SHELL /usr/bin/ssh-add ssh racdb1 "date;hostname" Install OCFS rpm on both node ***************************** rpm -qa ocfs2-tools ocfs2-2.6.18-128 ocfs2console-1 Implementing OCFS2 file system ****************************** root) cd /dev/iscsi/ ll cd ocr Format Partation with ocfs2 *************************** mkfs.ocfs2 -b 4k -C 32K -N 4 -L oracrsfile /dev/iscsi/ocr/part1 OCFS2 CONFIGURATION ******************* ocfs2console & click cluster --- configure node name racdb1 ip 192.168.2.100 racdb2 192.168.2.101 same process in another machine OCFS2 services ************** /etc/init.d/o2cb status mounting ????? ******** mount -t ocfs2 -o datavolume,nointr -L "oracrsfile" /u02