[root@node1 ~]# cd /home/oracle/oracle/product/10.2.0/crs/install
[root@node1 install]# ls
cluster.ini make.log rootaddnode.sh rootinstall
cmdllroot.sh paramfile.crs rootconfig rootlocaladd
envVars.properties preupdate.sh rootdeinstall.sh rootupgrade
install.excl readme.txt rootdeletenode.sh templocal
install.incl rootaddnode.sbs rootdelete.sh
[root@node1 install]# ./rootaddnode.sh
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
Attempting to add 1 new nodes to the configuration
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 3: node3 node3-priv node3
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
/home/oracle/oracle/product/10.2.0/crs/bin/srvctl add nodeapps -n node3 -A node3-vip/255.255.255.0/eth0 -o /home/oracle/oracle/product/10.2.0/crs
[root@node1 install]#
[root@node3 crs]# ./root.sh
WARNING: directory '/home/oracle/oracle/product/10.2.0' is not owned by root
WARNING: directory '/home/oracle/oracle/product' is not owned by root
WARNING: directory '/home/oracle/oracle' is not owned by root
WARNING: directory '/home/oracle' is not owned by root
Checking to see if Oracle CRS stack is already configured
OCR LOCATIONS = /u03/oracrs/ocr
OCR backup directory '/home/oracle/oracle/product/10.2.0/crs/cdata/crs' does not exist. Creating now
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/home/oracle/oracle/product/10.2.0' is not owned by root
WARNING: directory '/home/oracle/oracle/product' is not owned by root
WARNING: directory '/home/oracle/oracle' is not owned by root
WARNING: directory '/home/oracle' is not owned by root
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
assigning default hostname node1 for node 1.
assigning default hostname node2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 1: node1 node1-priv node1
node 2: node2 node2-priv node2
clscfg: Arguments check out successfully.
NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
node1
node2
node3
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
IP address "node1-vip" has already been used. Enter an unused IP address.
[root@node3 crs]# cd bin/
[root@node3 bin]# ./crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[root@node3 bin]#
Configure node specific info:
[root@node1 conf]# pwd
/home/oracle/oracle/product/10.2.0/crs/opmn/conf
[root@node1 conf]# vi ons.config
localport=6100
remoteport=6200
loglevel=3
useocr=on
[root@node1 conf]# cd /home/oracle/oracle/product/10.2.0/crs/bin
[root@node1 bin]# ./racgons add_config node3:6200
[root@node1 bin]#
If successful, command executes silently
Run ./addNode.sh from $ORACLE_HOME/oui/bin --- Database Home
Run ./addNode.sh from $ORACLE_HOME/oui/bin --- ASM Home
Run DBCA from node1 or node2 and first setup ASM instance by selecting RAC in first page and then configure ASM in 2nd page.. This will prompt for extending ASM to new node and then provide sys password. ASM instance is now setup.
Run DBCA again from Node1 or Node2 and this time in 2nd page select instance management then click on add instance – This will add instance..
Final output
SQL> select * from v$active_instances;
INST_NUMBER INST_NAME
----------- ------------------------------------------------------------
1 node1:rac1
2 node2:rac2
3 node3:rac3
SQL>
No comments:
Post a Comment