In this setup, we have two Nodes.
Machines name are:-
  srv3
  srv11
Disk size on both the machines – 50 GB
Ram size – 4 GB
CPU count – 2
Ethernet cards – 2    (one for Public and one for Private)
Machine network configuration:-
   srv3
 Public IP   192.168.1.61/24
 Private IP 10.1.1.61/8
    srv11
Public IP  192.168.1.62/24
Private IP 10.1.1.162/8
 Editing of /etc/hosts file of both the machines
# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1               localhost.localdomain localhost
#Public IPs

192.168.1.81            srv3.***.com    srv3  #Use your domain name instead od ***

192.168.1.82            srv11.***.com   srv11
#Virtual IPs
192.168.1.83            srv3-vip.***.com        srv3-vip
192.168.1.84            srv11-vip.***.com       srv11-vip
#Private IPs
10.1.1.81               srv3-priv.***.com       srv3-priv
10.1.1.82               srv11-priv.***.com      srv11-priv
Please confirm once this IPs should be assigned on any other machines, Virtual IPs  will be assign through Oracle during the installation.
# ifconfig | grep “inet addr”
          inet addr:192.168.1.82  Bcast:192.168.1.255  Mask:255.255.255.0
          inet addr:10.1.1.82  Bcast:10.255.255.255  Mask:255.0.0.0
          inet addr:127.0.0.1  Mask:255.0.0.0
Private and Public IPs should up and running on machines RAC nodes before installation.
Public and Virtual IPs address should be in same range.
Kernel Parameters
Edit /etc/sysctl.conf
/etc/sysctl.conf file should have these lines on both the machines
# cat sysctl.conf  | grep -v -e ^#  -e ^$
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmall=2097152
kernel.shmmax= 5153960755
kernel.sem=250 32000 100 128
net.core.rmem_default=4194304
net.core.rmem_max=4194304
net.core.wmem_default=262144
net.core.wmem_max=262144
fs.file-max=65536
net.ipv4.ip_local_port_range=1024 65000
Add the following lines to the “/etc/security/limits.conf” file.
oracle               soft    nproc   2047
oracle               hard    nproc   16384
oracle               soft    nofile  1024
oracle               hard    nofile  65536
Add the following lines to the “/etc/pam.d/login” file, if it does not already exist.
session    required     pam_limits.so
Group and Users creation
Create groups and users as mentioned below on both the machines
groupadd  -g  601 asmadmin
groupadd  -g  602 asmdba
groupadd  -g  603 asmoper
groupadd  -g  606 dba
groupadd  -g  607 oinstall
useradd  -g oinstall  -G asmdba,asmoper,dba oracle
passwd oracle
mkdir -p /opt/crs/oracle/product/11.1.0/crs
mkdir -p /opt/app/oracle/product/11.1.0/db_1
chown -R oracle:oinstall /opt
Make sure both the machines Selinux and Iptables should disabled
# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#       enforcing – SELinux security policy is enforced.
#       permissive – SELinux prints warnings instead of enforcing.
#       disabled – SELinux is fully disabled.
SELINUX=disabled
# SELINUXTYPE= type of policy in use. Possible values are:
#       targeted – Only targeted network daemons are protected.
#       strict – Full SELinux protection.
SELINUXTYPE=targeted
# getenforce
Disabled
# service iptables status
Firewall is stopped.
# chkconfig –list iptables
iptables        0:off   1:off   2:off   3:off   4:off   5:off   6:off
Add these lines to oracle user’s .bash_profile file
# Oracle Settings
TMP=/tmp; export TMP
TMPDIR=$TMP; export TMPDIR
ORACLE_HOSTNAME=srv3.***.com; export ORACLE_HOSTNAME  #change as per node hostname
ORACLE_BASE=/u01/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/11.1.0/db_1; export ORACLE_HOME
ORACLE_SID=srv3; export ORACLE_SID   # change as per node hostname
ORACLE_TERM=xterm; export ORACLE_TERM
PATH=/usr/sbin:$PATH; export PATH
PATH=$ORACLE_HOME/bin:$PATH; export PATH
LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib; export LD_LIBRARY_PATH
CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib; export CLASSPATH
if [ $USER = “oracle” ]; then
  if [ $SHELL = “/bin/ksh” ]; then
    ulimit -p 16384
    ulimit -n 65536
  else
    ulimit -u 16384 -n 65536
  fi
fi
Configure ssh password free authentication for oracle user(installation User)
For this you need to make dsa or rsa public keys and provide them in authorized_keys files on other machines.
srv3# su –l oracle
srv3$ ssh-keygen -t dsa  ##(Here it ask for keys, just press enter)
srv3$ ssh-keygen -t rsa    ##(Here it ask for keys, just press enter)
srv3$ cd .ssh/
srv3$ cat id_dsa.pub id_rsa.pub >> authorized_keys
Login on srv11 machine
srv11# su –l oracle
srv11$ ssh-keygen -t dsa  ##(Here it ask for keys, just press enter)
srv11$ ssh-keygen -t rsa    ##(Here it ask for keys, just press enter)
Login on srv3 Machine as oracle user
srv3$ scp authorized_keys srv11:/home/oracle/.ssh/  ##(once it ask for yes/no)
Login on srv11 Machine as orcale user
srv11$ cd .ssh/
srv11$ cat id_dsa.pub id_rsa.pub >> authorized_keys
srv11$ scp authorized_keys srv3:/home/oracle/.ssh/   ##(once it ask for yes/no)
Now check it
srv3$  ssh srv3  ##(once it ask for yes/no)
srv3$ ssh srv11
srv11$ ssh srv3
srv11$ ssh srv11  ##(once it ask for yes/no)
…Make sure it stop prompting for yes/no while authenticating on any host(self as well)…
Make sure all required RPMs for Oracle should installed on Linux machines
For RHEL 5 (32-bit):
rpm -Uvih binutils-2*
rpm -Uvih compat-libstdc++-33*
rpm -Uvih elfutils-libelf-0*
rpm -Uvih elfutils-libelf-devel-0*
rpm -Uvih elfutils-libelf-devel-static-0*
rpm -Uvih gcc-4*
rpm -Uvih gcc-c++-4*
rpm -Uvih glibc-2*`uname -p`*
rpm -Uvih glibc-common-2*
rpm -Uvih glibc-devel-2*
rpm -Uvih glibc-headers-2*
rpm -Uvih kernel-headers-2*
rpm -Uvih ksh-20*
rpm -Uvih libaio-0*
rpm -Uvih libaio-devel-0*
rpm -Uvih libgcc-4*
rpm -Uvih libgomp-4*
rpm -Uvih libstdc++-4*
rpm -Uvih libstdc++-devel-4*
rpm -Uvih make-3*
rpm -Uvih numactl-devel-0*
rpm -Uvih sysstat-7*
rpm -Uvih unixODBC-2*
rpm -Uvih unixODBC-devel-2*
For RHEL 5 (64-bit):
rpm -Uvih binutils-2*`uname -p`*
rpm -Uvih compat-libstdc++-33*`uname -p`*
rpm -Uvih compat-libstdc++-33*i386*
rpm -Uvih elfutils-libelf-0*`uname -p`*
rpm -Uvih elfutils-libelf-devel-0*`uname -p`*
rpm -Uvih gcc-4*`uname -p`*
rpm -Uvih gcc-c++-4*`uname -p`*
rpm -Uvih glibc-2*`uname -p`*
rpm -Uvih glibc-2*i686*
rpm -Uvih glibc-common-2*`uname -p`*
rpm -Uvih glibc-devel-2*`uname -p`*
rpm -Uvih glibc-devel-2*i386*
rpm -Uvih glibc-headers-2*`uname -p`*
rpm -Uvih ksh-20*`uname -p`*
rpm -Uvih libaio-0*`uname -p`*
rpm -Uvih libaio-0*i386*
rpm -Uvih libaio-devel-0*`uname -p`*
rpm -Uvih libaio-devel-0*i386*
rpm -Uvih libgcc-4*`uname -p`*
rpm -Uvih libgcc-4*i386*
rpm -Uvih libstdc++-4*`uname -p`*
rpm -Uvih libstdc++-4*i386*
rpm -Uvih libstdc++-devel-4*`uname -p`*
rpm -Uvih make-3*`uname -p`*
rpm -Uvih numactl-devel-0*`uname -p`*
rpm -Uvih sysstat-7*`uname -p`*
rpm -Uvih unixODBC-2*`uname -p`*
rpm -Uvih unixODBC-2*i386*
rpm -Uvih unixODBC-devel-2*`uname -p`*
rpm -Uvih unixODBC-devel-2*i386*
Now we need to configure Disks for orcale Installation like. I my case I am using NAS.But  it doesn’t matter what you use NAS or SAN.
I my case I used six disks…
One small 5GB disk for OCR
One small 5GB disk for Voting
Four 100GB disk for ASM
# fdisk -l | grep Disk
Disk /dev/xvda: 53.6 GB, 53687091200 bytes  ## This is my OS local Disk
Disk /dev/sda: 5368 MB, 5368709120 bytes
Disk /dev/sdb: 5368 MB, 5368709120 bytes
Disk /dev/sdc: 107.3 GB, 107374182400 bytes
Disk /dev/sdd: 107.3 GB, 107374182400 bytes
Disk /dev/sde: 107.3 GB, 107374182400 bytes
Disk /dev/sdf: 107.3 GB, 107374182400 bytes
Do the Disk partition with fdisk, so that every disk will have primary or logical partition for using in installation, please make sure you are not trying to use extended partition.
Sometime I experience that disks were used earlier and in this case I prefer to format all disks with dd command
#dd if=/dev/zero of=/dev/sda1 bs=4096 count=100000
This will write 0 on first 100000 blocks, so that they will be no effect left for pervious
Now we need to install Oracleasm and configure it for the disk that we want to use in ASM during Oracle installation. As we going to use /dev/sda1 and /dev/sdb1 in OCR and voting disk, so they need not to configure for Oracleasm.
For Oracleasm install following RPMs:-
# rpm -qa | grep oracleasm
oracleasmlib-2.0.4-1.el5
oracleasm-support-2.1.3-1.el5
oracleasm-2.6.18-128.el5debug-2.0.5-1.el5
oracleasm-2.6.18-128.el5-2.0.5-1.el5
oracleasm-2.6.18-128.el5xen-2.0.5-1.el5
oracleasm-2.6.18-128.el5-debuginfo-2.0.5-1.el5
Actually we only need to have RPMs, here I am using xen RPM because I am using with xen kernel. If you are using normal kernel, then you need not to use this
oracleasmlib-2.0.4-1.el5
oracleasm-support-2.1.3-1.el5
oracleasm-2.6.18-128.el5xen-2.0.5-1.el5
                          or
oracleasm-2.6.18-128.el5-2.0.5-1.el5
I have already configured it on my machines, but here I am providing it with re-running the commands, so please confuse with options provided by terminal itself
# oracleasm configure -i
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets (‘[]’).  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.
Default user to own the driver interface [oracle]:
Default group to own the driver interface [dba]:
Start Oracle ASM library driver on boot (y/n) [y]:
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done
These inputs were saved in /etc/sysconfig/oracleasm file
# /usr/sbin/oracleasm init
Loading module “oracleasm”: oracleasm
Mounting ASMlib driver filesystem: /dev/oracleasm
Repeat above steps on other nodes as well
Now Do the below steps on any of node
#/usr/sbin/oracleasm createdisk DISK1 /dev/sdc1
#/usr/sbin/oracleasm createdisk DISK2 /dev/sdd1
#/usr/sbin/oracleasm createdisk DISK3 /dev/sde1
#/usr/sbin/oracleasm createdisk DISK4 /dev/sdf1
#/usr/sbin/oracleasm scandisks
# /etc/init.d/oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
Login on to other Nodes and check it
#/usr/sbin/oracleasm scandisks
# /etc/init.d/oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
Now Change the ownership of Disks.
chown oracle:oinstall /dev/sda1
chown oracle:oinstall /dev/sdb1
chown oracle:oinstall /dev/sdc1
chown oracle:oinstall /dev/sdd1
chown oracle:oinstall /dev/sde1
chown oracle:oinstall /dev/sdf1
chmod 600 /dev/sda1
chmod 600 /dev/sdb1
chmod 600 /dev/sdc1
chmod 600 /dev/sdd1
chmod 600 /dev/sde1
chmod 600 /dev/sdf1

Now we are going to install the Orcale RAC 11gr1 from Node sv3 Oracle User
We  start with installaing Clusterware from linux.x64_11gR1_clusterware.zip file, that we need to unzip and change its ownership to orcale
unzip /tmp/linux.x64_11gR1_clusterware.zip
chown  -R oracle:oinstall /tmp/clusterware
Before installing the clusterware, check the prerequisites have been met using the “runcluvfy.sh” utility in the clusterware root directory.
runcluvfy.sh stage -pre crsinst -n srv3,srv11 -verbose
Output of this should be successful, only after this we should proceed with that. Sometime I observe that installed  RPMs information that need to be installed get mis-match results unsuccssfull result of cluster verify script. So please notice carefully if upgarded version of required RPMs is installed, then you can proceed with them as well. As In RHEL5.3 glibc-2.5-34 is installed and required RPM is glibc-2.5-12, so I think you understand about the were we need to proceed or were we need to correct.
Install the Clusterware Software
Login on Primary Node(srv3) with orcale User
su  -l orcale
cd /tmp/clusterware
./runInstaller
 
Here you get the welcome screen of Oracle Clusteware. Click on Next 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.1
 
Here it comes with screen that provides you inventory directory and a OS group that have write permission on this directory, As we already gave permission to oinstall to /opt so it automatically gets permission for /opt/app/orainventory directory as well. Click to next …
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure NO.2
 
This is kind of CRS_HOME path, most of time I observ it displays the ORACLE_HOME (/opt/app/oracle/product/11.1.0/db_1/) might be because of ORACLE_HOME provided in oracle’s .bash_profile, please correct it to CRS_HOME (/opt/crs/oracle/product/11.1.0/crs/) which already made by you and set ownership for oracle:oinstall. Even if you not correct it now installation of clusterware will not get any error but during the installation of database it also want same empty diretory, So make them different from here itself. Click on next…

Figure No.3
 
This screen checks all the pre-requiements that you have checked already with cluvrfy script, so here again it check all automatically…
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.4
 
 
 
Figure No.5
Click on next…
Figure NO.6
Start Processing Oracle Clusterware…
Figure No.7
 This Screen gives you your and Cluster name and current machine hostname , you need to add another hostname as well. Click on Add…
Figure No.8
 As you click on Add button, one another screen comes to enter other nodes details. Fill them carefully and click on OK button.
Figure No.9
 
As you click on OK Button, It will show like this. So now if you enter all the nodes details, Then Click on Next Button…
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.10
This Screen shows you interface’s  network which you uses in RAC nodes like which you want to use as public and private interface. Here it shows both interfaces as private, you need to edit one of them to change it as public interface. So just select you public interface and click on edit..
Figure No.11
This Screen show you interface name subnet or network that you uses in the interface and interface type, like public or private. Select the Interface type like here in this I use Interface eth0 as public. Then Click on OK button…
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.13
As you click on Ok Button in pervious screen, it comes like this. In this you can see there is one public and one private interface type. Now Click on Next button…
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.14
This Screen ask you for OCR location, here there are two options for you, one is for Normal Redundancy in which you need to configure ocfs cluster between RAC nodes for OCR disks. Another option is for External Redundancy in which your disk should be SAN or NAS disk that should visible and read/write access on all nodes. Here in this installation I am using External Redundancy in which I’ll use /dev/sda1 for OCR Location. We will cover Normal Redundancy options is some other Post.
Then Click on Next button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.16
This Screen ask you for Voting Disk, here there are two options for you, one is for Normal Redundancy in which you need to configure ocfs cluster between RAC nodes for Voting Disk. Another option is for External Redundancy in which your disk should be SAN or NAS disk that should visible and read/write access on all nodes. Here in this installation I am using External Redundancy in which I’ll use /dev/sdb1 for Voting Disk. We will cover Normal Redundancy options is some other Post.
Then Click on Next button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.18
This is Summary screen.
We just need to click on Install button…
Figure No.19
Installation in progress…
Figure No.20
Install Successful … Linking Sun JDK in progess
Figure No.21
 Link Successful… on progress setting up Perl Interpreter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.22
Setup completed successfully. Remote operations in progress…
 Figure No.23
As all operations gets completed. This Screen comes with some instructions to follow on all the nodes. Follow them carefully and after completing them successfully. Click on OK button
Figure No.24
 
In these instructions there are two steps that need to follow on all nodes, from which root.sh is quite important and sometimes gets error in my case, so I have its logs which pasting in this post. But please don’t consider that orainstRoot.sh is not important and no need to do.
Logs of root.sh from both nodes
[root@srv3 crs]# ./root.sh
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up Network socket directories
Oracle Cluster Registry configuration upgraded successfully
clscfg: EXISTING configuration version 4 detected.
clscfg: version 4 is 11 Release 1.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node <nodenumber>: <nodename> <private interconnect name> <hostname>
node 1: srv3 srv3-priv srv3
node 2: srv11 srv11-priv srv11
clscfg: Arguments check out successfully.
NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 30 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
Cluster Synchronization Services is active on these nodes.
        srv3
Cluster Synchronization Services is inactive on these nodes.
        srv11
Local node checking complete. Run root.sh on remaining nodes to start CRS daemons.
[root@srv11 crs]# ./root.sh
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up Network socket directories
Oracle Cluster Registry configuration upgraded successfully
clscfg: EXISTING configuration version 4 detected.
clscfg: version 4 is 11 Release 1.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node <nodenumber>: <nodename> <private interconnect name> <hostname>
node 1: srv3 srv3-priv srv3
node 2: srv11 srv11-priv srv11
clscfg: Arguments check out successfully.
NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 30 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
Cluster Synchronization Services is active on these nodes.
        srv3
        srv11
Cluster Synchronization Services is active on all the nodes.
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
Creating VIP application resource on (2) nodes…
Creating GSD application resource on (2) nodes…
Creating ONS application resource on (2) nodes…
Starting VIP application resource on (2) nodes…
Starting GSD application resource on (2) nodes…
Starting ONS application resource on (2) nodes…
Done.
Error with root.sh
Sometimes I got some error while I running root.sh on second node or last node, this is because of I forget to assign gateway on machines. In Oracle11gR1 you mandatory need to assign gateway that is ping able from all the machines. In between this when you gets this error run rootdelete.sh script on all nodes from the install directory that placed in same directory where root.sh directory present. Assign gateway, check it from route  -n command. Then run again root.sh this time you will proper result from this…
Now click on Ok Button. This will give you…
Figure No.25
 
It automatically provides you this screen …
Click to Exit…
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Figure No.26
Click on Yes…
Figure No.27
 
Now, we need to install Oracle database on primary Server as we did Clusterware.
Unpack the linux.x64_11gR2_database_1of2 and linux.x64_11gR2_database_2of2 files. This will give you the database directory.
unzip /tmp/ linux.x64_11gR2_database_1of2.zip
unzip /tmp/ linux.x64_11gR2_database_2of2.zip
chown  -R oracle:oinstall /tmp/database
Install the Clusterware Software
Login on Primary Node(srv3) with orcale User
su  -l orcale
cd /tmp/database
./runInstaller
Click on Next…
 
 
Figure No.28
Select on what type of Installation you want, I choose Enterprise Edition Installation..
Click on Next…
Figure No.29
 
Specify the Oracle Base location and Oracle home location where Oracle will install, This needs to special attention as sometime installation guys choose same folder as grid home that gives you continuously error to make clear that’s is not possible in our case so better to make them separate from beginning
Click On next…
Figure No.30
Loading various Products that need to install and start making system ready for that…
Figure No.31

Choose the nodes that you want to make a part of your Cluster…

Click on next

 
Figure No.32
 
 
Figure No.33
This Screen also carries many important parts of Oracle database installation.
Disk Group Name
Redundancy type for ASM Group
ASM disks addition.
Figure No.34
Here sometime Disks name were not display, in that case we have give proper path from “change Disk Discovery path” like /dev/sd*
Click on next…
Figure No.35
 
In case, you have CSI Number Username etc, then enter that or just Click on next…
Figure No.36
 
 
This is summary screen, which show you all kinds of information for the Oracle that you just did.
Click On Install
Figure No.37
Start extracting the various installation files on Oracle home for installation
Installation is in progress…
Figure No.38
 
Extraction is completed successful with installation of software. Now Linking is in progress of various files
 
 
 
 
 
 
 
 
 
 
 
 












Figure No.39

Link process completed successfully, now setting up DBCA is on process.


























Figure No.40
As DBCA setting is completed, now Remote operations is under process in which primary nodes transfer all oracle home to other nodes as well
























Figure No.41
 
 
Everything gets done successfully, small configuration assistants is under process

























Figure No.42
 
 
Please do as instruction displayed in screen from root user on all nodes and then click on OK

























Figure No.43

This script will react like this…

[root@srv3 ~]# cd /opt/app/oracle/product/11.1.0/db_1/
[root@srv3 db_1]# ./root.sh
Running Oracle 11g root.sh script…
The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /opt/app/oracle/product/11.1.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
   Copying dbhome to /usr/local/bin …
   Copying oraenv to /usr/local/bin …
   Copying coraenv to /usr/local/bin …
Creating /etc/oratab file…
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@srv11 ~]# cd /opt/app/oracle/product/11.1.0/db_1/
[root@srv11 db_1]# ./root.sh
Running Oracle 11g root.sh script…
The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /opt/app/oracle/product/11.1.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
   Copying dbhome to /usr/local/bin …
   Copying oraenv to /usr/local/bin …
   Copying coraenv to /usr/local/bin …
Creating /etc/oratab file…
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
Oracle database Installation is completed. Click on Exit

























Figure No.44