Michael Dinh

Subscribe to Michael Dinh feed Michael Dinh
Michael T. Dinh, Oracle DBA
Updated: 1 week 3 days ago

Cloning Oracle Homes in 19c Part 2

Wed, 2019-10-30 08:10

You didn’t think there was going to be a part 2 did you?

$ORACLE_HOME/log is not included in creategoldimage which makes perfect sense as I was discussing on twitter for having garbage in a gold image but why is it being read?

For ol7-19-rac2, permission for $GRID_HOME/log has not been changed; hence, creategoldimage failed.

-exclFiles $ORACLE_HOME/.patch_storage (succeeded)
-exclFiles $ORACLE_HOME/log (failed)

[oracle@ol7-19-rac1 ~]$ ls /u01/app/19.0.0/grid/.patch_storage/
29401763_Apr_11_2019_22_26_25  29585399_Apr_9_2019_19_12_47   29851014_Jul_5_2019_01_15_35    NApply
29517242_Apr_17_2019_23_27_10  29834717_Jul_10_2019_02_09_26  interim_inventory.txt           record_inventory.txt
29517247_Apr_1_2019_15_08_20   29850993_Jul_5_2019_05_08_35   LatestOPatchSession.properties
[oracle@ol7-19-rac1 ~]$

[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/gridSetup.sh -creategoldimage -exclFiles $ORACLE_HOME/.patch_storage -destinationlocation /u01/app/19.0.0/grid5 -silent
Launching Oracle Grid Infrastructure Setup Wizard...

Successfully Setup Software.
Gold Image location: /u01/app/19.0.0/grid5/grid_home_2019-10-30_12-05-43PM.zip

[oracle@ol7-19-rac1 ~]$ cd /u01/app/19.0.0/grid5/
[oracle@ol7-19-rac1 grid5]$ unzip -qo grid_home_2019-10-30_12-05-43PM.zip

[oracle@ol7-19-rac1 grid5]$ ls .patch_storage
ls: cannot access .patch_storage: No such file or directory

[oracle@ol7-19-rac1 grid5]$ ls -l
total 3203396
drwxr-xr-x.  3 oracle oinstall         22 Oct  8 20:33 acfs
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfsccm
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfsccreg
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfscm
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfsiob
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfsrd
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 acfsrm
drwxr-xr-x.  2 oracle oinstall        102 Oct  1 23:45 addnode
drwxr-xr-x.  3 oracle oinstall         18 Oct  1 23:47 advmccb
drwxr-xr-x. 10 oracle oinstall       4096 Apr 17  2019 assistants
drwxr-xr-x.  2 oracle oinstall      12288 Oct 30 12:05 bin
drwxr-x---.  3 oracle oinstall         18 Oct  1 23:47 cdp
drwxr-x---.  4 oracle oinstall         31 Oct  1 23:47 cha
drwxr-xr-x.  4 oracle oinstall         87 Oct  1 23:45 clone
drwxr-xr-x. 12 oracle oinstall       4096 Oct 30 12:05 crs
drwx--x--x.  5 oracle oinstall         41 Oct  1 23:47 css
drwxr-xr-x.  2 oracle oinstall          6 Oct  1 23:47 ctss
drwxrwxr-x.  7 oracle oinstall         71 Apr 17  2019 cv
drwxr-xr-x.  3 oracle oinstall         19 Apr 17  2019 dbjava
drwxr-xr-x.  2 oracle oinstall         39 Oct 30 12:05 dbs
drwxr-xr-x.  5 oracle oinstall       4096 Oct  1 23:45 deinstall
drwxr-xr-x.  3 oracle oinstall         20 Apr 17  2019 demo
drwxr-xr-x.  3 oracle oinstall         20 Apr 17  2019 diagnostics
drwxr-xr-x. 13 oracle oinstall       4096 Apr 17  2019 dmu
-rw-r--r--.  1 oracle oinstall        852 Aug 18  2015 env.ora
drwxr-x---.  6 oracle oinstall         53 Oct 30 12:05 evm
drwxr-x---.  2 oracle oinstall          6 Oct  1 23:47 gipc
drwxr-x---.  2 oracle oinstall          6 Oct  1 23:47 gnsd
drwxr-x---.  5 oracle oinstall         49 Oct 30 12:05 gpnp
-rw-r--r--.  1 oracle oinstall 3280127704 Oct 30 12:12 grid_home_2019-10-30_12-05-43PM.zip
-rwxr-x---.  1 oracle oinstall       3294 Mar  8  2017 gridSetup.sh
drwxr-xr-x.  4 oracle oinstall         32 Apr 17  2019 has
drwxr-xr-x.  3 oracle oinstall         19 Apr 17  2019 hs
drwxr-xr-x. 11 oracle oinstall       4096 Oct 30 12:12 install
drwxr-xr-x.  2 oracle oinstall         29 Apr 17  2019 instantclient
drwxr-x---. 13 oracle oinstall       4096 Oct 30 12:05 inventory
drwxr-xr-x.  8 oracle oinstall         82 Oct 30 12:05 javavm
drwxr-xr-x.  3 oracle oinstall         35 Apr 17  2019 jdbc
drwxr-xr-x.  6 oracle oinstall       4096 Oct 30 12:05 jdk
drwxr-xr-x.  2 oracle oinstall       8192 Oct  8 20:28 jlib
drwxr-xr-x. 10 oracle oinstall       4096 Apr 17  2019 ldap
drwxr-xr-x.  4 oracle oinstall      12288 Oct 30 12:05 lib
drwxr-xr-x.  5 oracle oinstall         42 Apr 17  2019 md
drwxr-x---.  2 oracle oinstall          6 Oct  1 23:47 mdns
drwxr-xr-x. 10 oracle oinstall       4096 Oct 30 12:05 network
drwxr-xr-x.  5 oracle oinstall         46 Apr 17  2019 nls
drwxr-x---.  2 oracle oinstall          6 Oct  1 23:47 ohasd
drwxr-xr-x.  2 oracle oinstall          6 Oct  1 23:47 ologgerd
drwxr-x---. 14 oracle oinstall       4096 Oct  1 23:45 OPatch
drwxr-xr-x.  8 oracle oinstall         77 Apr 17  2019 opmn
drwxr-xr-x.  4 oracle oinstall         34 Apr 17  2019 oracore
drwxr-xr-x.  6 oracle oinstall         52 Apr 17  2019 ord
drwxr-xr-x.  4 oracle oinstall         66 Apr 17  2019 ords
drwxr-xr-x.  3 oracle oinstall         19 Apr 17  2019 oss
drwxr-xr-x.  2 oracle oinstall          6 Oct  1 23:47 osysmond
drwxr-xr-x.  8 oracle oinstall       4096 Oct  1 23:45 oui
drwxr-xr-x.  4 oracle oinstall         33 Apr 17  2019 owm
drwxr-xr-x.  5 oracle oinstall         39 Apr 17  2019 perl
drwxr-xr-x.  6 oracle oinstall         78 Apr 17  2019 plsql
drwxr-xr-x.  5 oracle oinstall         42 Apr 17  2019 precomp
drwxr-xr-x.  2 oracle oinstall         26 Apr 17  2019 QOpatch
drwxr-xr-x.  5 oracle oinstall         42 Apr 17  2019 qos
drwxr-xr-x.  5 oracle oinstall         56 Oct 30 12:05 racg
drwxr-xr-x. 15 oracle oinstall       4096 Oct 30 12:05 rdbms
drwxr-xr-x.  3 oracle oinstall         21 Apr 17  2019 relnotes
drwxr-xr-x.  7 oracle oinstall        102 Apr 17  2019 rhp
-rwxr-xr-x.  1 oracle oinstall        405 Oct  1 23:45 root.sh
-rwx------.  1 oracle oinstall        490 Apr 17  2019 root.sh.old
-rw-r-----.  1 oracle oinstall         10 Apr 17  2019 root.sh.old.1
-rwx------.  1 oracle oinstall        405 Apr 18  2019 root.sh.old.2
-rw-r-----.  1 oracle oinstall         10 Apr 17  2019 root.sh.old.3
-rwxr-xr-x.  1 oracle oinstall        414 Oct  1 23:45 rootupgrade.sh
-rwxr-x---.  1 oracle oinstall        628 Sep  3  2015 runcluvfy.sh
drwxr-xr-x.  5 oracle oinstall       4096 Apr 17  2019 sdk
drwxr-xr-x.  3 oracle oinstall         18 Apr 17  2019 slax
drwxr-xr-x.  5 oracle oinstall       4096 Oct  8 20:26 sqlpatch
drwxr-xr-x.  6 oracle oinstall         53 Oct  1 23:44 sqlplus
drwxr-xr-x.  7 oracle oinstall         66 Oct 30 12:05 srvm
drwxr-x---.  5 oracle oinstall         63 Oct 30 12:05 suptools
drwxr-xr-x.  4 oracle oinstall         29 Apr 17  2019 tomcat
drwxr-xr-x.  3 oracle oinstall         35 Apr 17  2019 ucp
drwxr-xr-x.  7 oracle oinstall         71 Apr 17  2019 usm
drwxr-xr-x.  2 oracle oinstall         33 Apr 17  2019 utl
-rw-r-----.  1 oracle oinstall        500 Feb  6  2013 welcome.html
drwxr-xr-x.  3 oracle oinstall         18 Apr 17  2019 wlm
drwxr-xr-x.  3 oracle oinstall         19 Apr 17  2019 wwg
drwxr-xr-x.  5 oracle oinstall       4096 Oct  8 20:35 xag
drwxr-x---.  6 oracle oinstall         58 Apr 17  2019 xdk

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid/log/
crs  diag  ol7-19-rac1  procwatcher

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid5/log/*
ls: cannot access /u01/app/19.0.0/grid5/log/*: No such file or directory
[oracle@ol7-19-rac1 grid5]$

================================================================================

[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/gridSetup.sh -creategoldimage -destinationlocation /u01/app/19.0.0/grid5 -silent
Launching Oracle Grid Infrastructure Setup Wizard...

Successfully Setup Software.
Gold Image location: /u01/app/19.0.0/grid5/grid_home_2019-10-30_12-23-31PM.zip

[oracle@ol7-19-rac1 ~]$ cd /u01/app/19.0.0/grid5/
[oracle@ol7-19-rac1 grid5]$ unzip -qo grid_home_2019-10-30_12-23-31PM.zip

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid/.patch_storage/
29401763_Apr_11_2019_22_26_25  29585399_Apr_9_2019_19_12_47   29851014_Jul_5_2019_01_15_35    NApply
29517242_Apr_17_2019_23_27_10  29834717_Jul_10_2019_02_09_26  interim_inventory.txt           record_inventory.txt
29517247_Apr_1_2019_15_08_20   29850993_Jul_5_2019_05_08_35   LatestOPatchSession.properties

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid5/.patch_storage/
29401763_Apr_11_2019_22_26_25  29585399_Apr_9_2019_19_12_47   29851014_Jul_5_2019_01_15_35    NApply
29517242_Apr_17_2019_23_27_10  29834717_Jul_10_2019_02_09_26  interim_inventory.txt           record_inventory.txt
29517247_Apr_1_2019_15_08_20   29850993_Jul_5_2019_05_08_35   LatestOPatchSession.properties

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid/log/
crs  diag  ol7-19-rac1  procwatcher

[oracle@ol7-19-rac1 grid5]$ ls /u01/app/19.0.0/grid5/log/*
ls: cannot access /u01/app/19.0.0/grid5/log/*: No such file or directory
[oracle@ol7-19-rac1 grid5]$

================================================================================

[oracle@ol7-19-rac2 ~]$ . oraenv <<< +ASM2
ORACLE_SID = [cdbrac2] ? The Oracle base remains unchanged with value /u01/app/oracle

[oracle@ol7-19-rac2 ~]$ ls $ORACLE_HOME/log/*
/u01/app/19.0.0/grid/log/diag:
adrci_dir.mif  asmtool  clients

/u01/app/19.0.0/grid/log/ol7-19-rac2:
acfs  admin  afd  chad  client  crsd  cssd  ctssd  diskmon  evmd  gipcd  gnsd  gpnpd  mdnsd  ohasd  racg  srvm  xag

/u01/app/19.0.0/grid/log/procwatcher:
ls: cannot access /u01/app/19.0.0/grid/log/procwatcher/prw.sh: Permission denied
ls: cannot access /u01/app/19.0.0/grid/log/procwatcher/PRW_SYS_ol7-19-rac2: Permission denied
ls: cannot access /u01/app/19.0.0/grid/log/procwatcher/prwinit.ora.org: Permission denied
ls: cannot access /u01/app/19.0.0/grid/log/procwatcher/prwinit.ora: Permission denied
ls: cannot access /u01/app/19.0.0/grid/log/procwatcher/prw_ol7-19-rac2.log: Permission denied
prwinit.ora  prwinit.ora.org  prw_ol7-19-rac2.log  prw.sh  PRW_SYS_ol7-19-rac2

[oracle@ol7-19-rac2 ~]$ $ORACLE_HOME/gridSetup.sh -creategoldimage -exclFiles $ORACLE_HOME/log -destinationlocation /u01/app/19.0.0/grid5 -silent
Launching Oracle Grid Infrastructure Setup Wizard...

[FATAL] [INS-32700] The gold image creation failed. Check the install log /u01/app/oraInventory/logs/GridSetupActions2019-10-30_12-40-37PM for more details.
Setup failed.
[oracle@ol7-19-rac2 ~]$

Cloning Oracle Homes in 19c

Tue, 2019-10-29 09:41

You may find conflicting information from Oracle’s documentation where Cloning an Oracle Database Home shows to use clone.pl and Database Upgrade Guide 19c shows Deprecation of clone.pl Script

To clone Oracle software, use createGoldImage and then install software as usual.

DEMO for DB:

Source: /u01/app/oracle/product/19.0.0/dbhome_1
Target: /u01/app/oracle/product/19.0.0/dbhome_2

[oracle@ol7-19-rac1 ~]$ ls -l /u01/app/oracle/product/19.0.0/dbhome_2/
total 0

[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/runInstaller -createGoldImage -destinationLocation /u01/app/oracle/product/19.0.0/dbhome_2 -silent
Launching Oracle Database Setup Wizard...

[oracle@ol7-19-rac1 ~]$ ls -l /u01/app/oracle/product/19.0.0/dbhome_2/
total 3069584
-rw-r--r--. 1 oracle oinstall 3143250100 Oct 29 13:09 db_home_2019-10-29_12-59-52PM.zip

[oracle@ol7-19-rac1 ~]$ cd /u01/app/oracle/product/19.0.0/dbhome_2/
[oracle@ol7-19-rac1 dbhome_2]$ unzip -qo db_home_2019-10-29_12-59-52PM.zip

[oracle@ol7-19-rac1 dbhome_2]$ ls -ld *
drwxr-xr-x. 2 oracle oinstall 102 Oct 2 00:06 addnode
drwxr-xr-x. 3 oracle oinstall 20 Oct 2 00:35 admin
drwxr-xr-x. 6 oracle oinstall 4096 Apr 17 2019 apex
drwxr-xr-x. 9 oracle oinstall 93 Apr 17 2019 assistants
drwxr-xr-x. 2 oracle oinstall 8192 Oct 29 13:00 bin
drwxr-xr-x. 4 oracle oinstall 87 Oct 2 00:06 clone
drwxr-xr-x. 6 oracle oinstall 55 Apr 17 2019 crs
drwxr-xr-x. 3 oracle oinstall 18 Apr 17 2019 css
drwxr-xr-x. 11 oracle oinstall 4096 Apr 17 2019 ctx
drwxr-xr-x. 7 oracle oinstall 71 Apr 17 2019 cv
drwxr-xr-x. 3 oracle oinstall 20 Apr 17 2019 data
-rw-r--r--. 1 oracle oinstall 3143250100 Oct 29 13:09 db_home_2019-10-29_12-59-52PM.zip
drwxr-xr-x. 3 oracle oinstall 19 Apr 17 2019 dbjava
drwxr-xr-x. 2 oracle oinstall 66 Oct 29 12:35 dbs
drwxr-xr-x. 5 oracle oinstall 4096 Oct 2 00:06 deinstall
drwxr-xr-x. 3 oracle oinstall 20 Apr 17 2019 demo
drwxr-xr-x. 3 oracle oinstall 20 Apr 17 2019 diagnostics
drwxr-xr-x. 13 oracle oinstall 4096 Apr 17 2019 dmu
drwxr-xr-x. 4 oracle oinstall 30 Apr 17 2019 drdaas
drwxr-xr-x. 3 oracle oinstall 19 Apr 17 2019 dv
-rw-r--r--. 1 oracle oinstall 852 Aug 18 2015 env.ora
drwxr-xr-x. 3 oracle oinstall 18 Apr 17 2019 has
drwxr-xr-x. 5 oracle oinstall 41 Apr 17 2019 hs
drwxr-xr-x. 10 oracle oinstall 4096 Oct 29 13:08 install
drwxr-xr-x. 2 oracle oinstall 29 Apr 17 2019 instantclient
drwxr-x---. 13 oracle oinstall 4096 Oct 29 13:00 inventory
drwxr-xr-x. 8 oracle oinstall 82 Oct 29 13:00 javavm
drwxr-xr-x. 3 oracle oinstall 35 Apr 17 2019 jdbc
drwxr-xr-x. 6 oracle oinstall 4096 Oct 29 13:00 jdk
drwxr-xr-x. 2 oracle oinstall 4096 Oct 8 20:23 jlib
drwxr-xr-x. 10 oracle oinstall 4096 Apr 17 2019 ldap
drwxr-xr-x. 4 oracle oinstall 12288 Oct 29 13:00 lib
drwxr-x---. 2 oracle oinstall 6 Oct 2 00:10 log
drwxr-xr-x. 9 oracle oinstall 98 Apr 17 2019 md
drwxr-xr-x. 4 oracle oinstall 31 Apr 17 2019 mgw
drwxr-xr-x. 10 oracle oinstall 4096 Oct 29 13:00 network
drwxr-xr-x. 5 oracle oinstall 46 Apr 17 2019 nls
drwxr-xr-x. 8 oracle oinstall 101 Apr 17 2019 odbc
drwxr-xr-x. 5 oracle oinstall 42 Apr 17 2019 olap
drwxr-x---. 14 oracle oinstall 4096 Oct 2 00:06 OPatch
drwxr-xr-x. 7 oracle oinstall 65 Apr 17 2019 opmn
drwxr-xr-x. 4 oracle oinstall 34 Apr 17 2019 oracore
drwxr-xr-x. 6 oracle oinstall 52 Apr 17 2019 ord
drwxr-xr-x. 4 oracle oinstall 66 Apr 17 2019 ords
drwxr-xr-x. 3 oracle oinstall 19 Apr 17 2019 oss
drwxr-xr-x. 8 oracle oinstall 4096 Oct 2 00:06 oui
drwxr-xr-x. 4 oracle oinstall 33 Apr 17 2019 owm
drwxr-xr-x. 5 oracle oinstall 39 Apr 17 2019 perl
drwxr-xr-x. 6 oracle oinstall 78 Apr 17 2019 plsql
drwxr-xr-x. 6 oracle oinstall 56 Oct 29 13:00 precomp
drwxr-xr-x. 2 oracle oinstall 26 Apr 17 2019 QOpatch
drwxr-xr-x. 5 oracle oinstall 52 Apr 17 2019 R
drwxr-xr-x. 4 oracle oinstall 29 Apr 17 2019 racg
drwxr-xr-x. 15 oracle oinstall 4096 Oct 29 13:00 rdbms
drwxr-xr-x. 3 oracle oinstall 21 Apr 17 2019 relnotes
-rwx------. 1 oracle oinstall 549 Oct 2 00:06 root.sh
-rwx------. 1 oracle oinstall 786 Apr 17 2019 root.sh.old
-rw-r-----. 1 oracle oinstall 10 Apr 17 2019 root.sh.old.1
-rwx------. 1 oracle oinstall 638 Apr 18 2019 root.sh.old.2
-rw-r-----. 1 oracle oinstall 10 Apr 17 2019 root.sh.old.3
-rwxr-x---. 1 oracle oinstall 1783 Mar 8 2017 runInstaller
-rw-r--r--. 1 oracle oinstall 2927 Oct 14 2016 schagent.conf
drwxr-xr-x. 5 oracle oinstall 4096 Apr 17 2019 sdk
drwxr-xr-x. 3 oracle oinstall 18 Apr 17 2019 slax
drwxr-xr-x. 4 oracle oinstall 41 Apr 17 2019 sqldeveloper
drwxr-xr-x. 3 oracle oinstall 17 Apr 17 2019 sqlj
drwxr-xr-x. 5 oracle oinstall 4096 Oct 8 20:22 sqlpatch
drwxr-xr-x. 6 oracle oinstall 53 Oct 2 00:05 sqlplus
drwxr-xr-x. 6 oracle oinstall 54 Apr 17 2019 srvm
drwxr-xr-x. 5 oracle oinstall 45 Oct 29 13:00 suptools
drwxr-xr-x. 3 oracle oinstall 35 Apr 17 2019 ucp
drwxr-xr-x. 4 oracle oinstall 31 Apr 17 2019 usm
drwxr-xr-x. 2 oracle oinstall 33 Apr 17 2019 utl
drwxr-xr-x. 3 oracle oinstall 19 Apr 17 2019 wwg
drwxr-x---. 7 oracle oinstall 69 Apr 17 2019 xdk
[oracle@ol7-19-rac1 dbhome_2]$

DEMO for GI:

Source: /u01/app/19.0.0/grid
Target: /u01/app/19.0.0/grid5

[root@ol7-19-rac1 ~]# mkdir -p /u01/app/19.0.0/grid5
[root@ol7-19-rac1 ~]# chmod 775 /u01/app/19.0.0/grid5
[root@ol7-19-rac1 ~]# chown oracle:oinstall /u01/app/19.0.0/grid5
[root@ol7-19-rac1 ~]# ls -ld /u01/app/19.0.0/grid5/
drwxrwxr-x. 2 oracle oinstall 6 Oct 29 13:15 /u01/app/19.0.0/grid5/

[oracle@ol7-19-rac1 ~]$ echo $ORACLE_HOME
/u01/app/19.0.0/grid
[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/gridSetup.sh -creategoldimage -destinationlocation /u01/app/19.0.0/grid5 -silent
Launching Oracle Grid Infrastructure Setup Wizard...
[oracle@ol7-19-rac1 ~]$

FAILED:

[oracle@ol7-19-rac1 GridSetupActions2019-10-29_01-20-38PM]$ grep -A1 "^WARNING" gridSetupActions2019-10-29_01-20-38PM.log
WARNING:  [Oct 29, 2019 1:20:54 PM] Validation disabled for the state init
INFO:  [Oct 29, 2019 1:20:54 PM] Completed validating state <init>
--
WARNING:  [Oct 29, 2019 1:20:55 PM] Command to get the files from '/u01/app/19.0.0/grid' not owned by 'oracle' failed.
WARNING:  [Oct 29, 2019 1:20:55 PM] Following files from the source home are not owned by the current user: [/u01/app/19.0.0/grid/acfs, /u01/app/19.0.0/grid/acfs/tunables, /u01/app/19.0.0/grid/acfs/tunables/acfstunables]
INFO:  [Oct 29, 2019 1:20:55 PM] Getting the last existing parent of: /u01/app/19.0.0/grid5
--
WARNING:  [Oct 29, 2019 1:20:57 PM] Files list is null or empty.
INFO:  [Oct 29, 2019 1:20:57 PM] Completed validating state <createGoldImage>
--
WARNING:  [Oct 29, 2019 1:20:58 PM] Following files are not readable: [/u01/app/19.0.0/grid/suptools/orachk/orachk, /u01/app/19.0.0/grid/log/procwatcher/prw.sh, /u01/app/19.0.0/grid/log/procwatcher/PRW_SYS_ol7-19-rac1, /u01/app/19.0.0/grid/log/procwatcher/prwinit.ora, /u01/app/19.0.0/grid/crf/admin/run/crfmond, /u01/app/19.0.0/grid/crf/admin/run/crflogd]
INFO:  [Oct 29, 2019 1:21:00 PM] Verifying whether Central Inventory is locked by any other OUI session...
--
WARNING:  [Oct 29, 2019 1:21:05 PM] Could not create symlink: /tmp/GridSetupActions2019-10-29_01-20-38PM/tempHome_1572355263979/log/procwatcher/prw.sh.
Refer associated stacktrace #oracle.install.ivw.common.driver.job.CreateGoldImageJob:7059
--
WARNING:  [Oct 29, 2019 1:21:34 PM] Could not create symlink: /tmp/GridSetupActions2019-10-29_01-20-38PM/tempHome_1572355294593/log/procwatcher/prw.sh.
Refer associated stacktrace #oracle.install.ivw.common.driver.job.CreateGoldImageJob:7118


[oracle@ol7-19-rac1 GridSetupActions2019-10-29_01-20-38PM]$ ll /u01/app/19.0.0/grid/acfs
total 0
drwxr-xr-x. 2 root root 26 Oct  8 20:33 tunables


[oracle@ol7-19-rac1 GridSetupActions2019-10-29_01-20-38PM]$ grep -i severe gridSetupActions2019-10-29_01-20-38PM.log
SEVERE: [Oct 29, 2019 1:21:11 PM] [FATAL] [INS-32700] The gold image creation failed. Check the install log /u01/app/oraInventory/logs/GridSetupActions2019-10-29_01-20-38PM for more details.
SEVERE: [Oct 29, 2019 1:21:40 PM] [FATAL] [INS-32700] The gold image creation failed. Check the install log /u01/app/oraInventory/logs/GridSetupActions2019-10-29_01-20-38PM for more details.
[oracle@ol7-19-rac1 GridSetupActions2019-10-29_01-20-38PM]$

[oracle@ol7-19-rac1 ~]$

RESEARCH:

Bug 29220079 - Error INS-32700 Creating a GI Gold Image (Doc ID 29220079.8)	
Versions confirmed as being affected: 19.3.0	
The fix for 29220079 is first included in: 
19.3.0.0.190416 (Apr 2019) Database Release Update (DB RU) and 
20.1.0

Should have been fixed but does not seems like it.

[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/OPatch/opatch lspatches
29851014;ACFS RELEASE UPDATE 19.4.0.0.0 (29851014)
29850993;OCW RELEASE UPDATE 19.4.0.0.0 (29850993)
29834717;Database Release Update : 19.4.0.0.190716 (29834717)
29401763;TOMCAT RELEASE UPDATE 19.0.0.0.0 (29401763)

OPatch succeeded.
[oracle@ol7-19-rac1 ~]$

You might have to create SR :=(

UPDATE: Thanks to https://lonedba.wordpress.com/

[oracle@ol7-19-rac1 GridSetupActions2019-10-29_03-06-03PM]$ grep "Permission denied" gridSetupActions2019-10-29_03-06-03PM.log
INFO:  [Oct 29, 2019 3:06:14 PM] find: ‘/u01/app/19.0.0/grid/log/procwatcher/prw.sh’: Permission denied
INFO:  [Oct 29, 2019 3:06:14 PM] find: ‘/u01/app/19.0.0/grid/log/procwatcher/PRW_SYS_ol7-19-rac1’: Permission denied
INFO:  [Oct 29, 2019 3:06:14 PM] find: ‘/u01/app/19.0.0/grid/log/procwatcher/prwinit.ora’: Permission denied
INFO:  [Oct 29, 2019 3:06:14 PM] find: ‘/u01/app/19.0.0/grid/crf/admin/run/crfmond’: Permission denied
INFO:  [Oct 29, 2019 3:06:14 PM] find: ‘/u01/app/19.0.0/grid/crf/admin/run/crflogd’: Permission denied
[oracle@ol7-19-rac1 GridSetupActions2019-10-29_03-06-03PM]$

[oracle@ol7-19-rac1 ~]$ echo $ORACLE_HOME; cd $ORACLE_HOME/log
/u01/app/19.0.0/grid

[oracle@ol7-19-rac1 log]$ ls -l
total 4
drwxr-x---.  4 oracle oinstall   57 Oct  1 23:57 diag
drwxr-xr-t. 20 root   oinstall 4096 Oct  1 23:55 ol7-19-rac1
drwxr--r--.  3 root   root       66 Oct 25 15:10 procwatcher

[root@ol7-19-rac1 log]# chmod 775 -R ol7-19-rac1/ procwatcher/
[root@ol7-19-rac1 log]# ls -l
total 4
drwxr-xr-x.  2 oracle oinstall    6 Oct  1 23:44 crs
drwxr-x---.  4 oracle oinstall   57 Oct  1 23:50 diag
drwxrwxr-x. 20 root   oinstall 4096 Oct  1 23:47 ol7-19-rac1
drwxrwxr-x.  3 root   root       66 Oct 25 15:08 procwatcher
[root@ol7-19-rac1 log]#

[oracle@ol7-19-rac1 ~]$ . oraenv <<< +ASM1
ORACLE_SID = [+ASM1] ? The Oracle base remains unchanged with value /u01/app/oracle

[oracle@ol7-19-rac1 ~]$ $ORACLE_HOME/gridSetup.sh -creategoldimage -destinationlocation /u01/app/19.0.0/grid5 -silent
Launching Oracle Grid Infrastructure Setup Wizard...

Successfully Setup Software.
Gold Image location: /u01/app/19.0.0/grid5/grid_home_2019-10-29_04-36-47PM.zip

[oracle@ol7-19-rac1 ~]$ ll /u01/app/19.0.0/grid5/*
-rw-r--r--. 1 oracle oinstall 4426495995 Oct 29 16:46 /u01/app/19.0.0/grid5/grid_home_2019-10-29_04-36-47PM.zip
[oracle@ol7-19-rac1 ~]$

srvctl config all

Tue, 2019-10-22 08:19

Learned something new today and not sure if it’s new feature.

Seems a lot easier to gather clusterware configuration using one command.

Works with srvctl version: 18.0.0.0.0 or higher.

19c

oracle@ol7-19-rac2 ~]$ echo $ORACLE_HOME
/u01/app/19.0.0/grid

[oracle@ol7-19-rac2 ~]$ srvctl -version
srvctl version: 19.0.0.0.0

[oracle@ol7-19-rac2 ~]$ srvctl config all

Oracle Clusterware configuration details
========================================

Oracle Clusterware basic information
------------------------------------
  Operating system          Linux
  Name                      ol7-19-cluster
  Class                     STANDALONE
  Cluster nodes             ol7-19-rac1, ol7-19-rac2
  Version                   19.0.0.0.0
  Groups                    SYSOPER: SYSASM:dba SYSRAC:dba SYSDBA:dba
  OCR locations             +DATA
  Voting disk locations     DATA
  Voting disk file paths    /dev/oracleasm/asm-disk3

Cluster network configuration details
-------------------------------------
  Interface name  Type  Subnet           Classification
  eth1            IPV4  192.168.56.0/24  PUBLIC
  eth2            IPV4  192.168.1.0/24   PRIVATE, ASM

SCAN configuration details
--------------------------

SCAN "ol7-19-scan" details
++++++++++++++++++++++++++
  Name                ol7-19-scan
  IPv4 subnet         192.168.56.0/24
  DHCP server type    static
  End points          TCP:1521

  SCAN listeners
  --------------
  Name              VIP address
  LISTENER_SCAN1    192.168.56.105
  LISTENER_SCAN2    192.168.56.106
  LISTENER_SCAN3    192.168.56.107


ASM configuration details
-------------------------
  Mode             remote
  Password file    +DATA
  SPFILE           +DATA

  ASM disk group details
  ++++++++++++++++++++++
  Name  Redundancy
  DATA  EXTERN

Database configuration details
==============================

Database "ora.cdbrac.db" details
--------------------------------
  Name                ora.cdbrac.db
  Type                RAC
  Version             19.0.0.0.0
  Role                PRIMARY
  Management          AUTOMATIC
  policy
  SPFILE              +DATA
  Password file       +DATA
  Groups              OSDBA:dba OSOPER:oper OSBACKUP:dba OSDG:dba OSKM:dba OSRAC:dba
  Oracle home         /u01/app/oracle/product/19.0.0/dbhome_1
[oracle@ol7-19-rac2 ~]$

18c

[oracle@rac1 Desktop]$ srvctl -version
srvctl version: 18.0.0.0.0

[oracle@rac1 Desktop]$ srvctl config all

Oracle Clusterware configuration details                                        
========================================                                        

Oracle Clusterware basic information                                            
------------------------------------                                            
  Operating system         Linux                                           
  Name                     scan                                            
  Class                    STANDALONE                                      
  Cluster nodes            rac1, rac2                                      
  Version                  18.0.0.0.0                                      
  Groups                   SYSOPER:dba SYSASM:dba SYSRAC:dba SYSDBA:dba    
  Cluster home             /u01/app/18.0.0/grid                            
  OCR locations            +CRS                                            
  Voting disk locations    /dev/asm-disk8, /dev/asm-disk9, /dev/asm-disk7  

Cluster network configuration details                                           
-------------------------------------                                           
  Interface name  Type  Subnet           Classification  
  eth1            IPV4  10.1.1.0/24      PRIVATE, ASM    
  eth0            IPV4  192.168.11.0/24  PUBLIC          

SCAN configuration details                                                      
--------------------------                                                      

SCAN "scan.localdomain" details                                                 
+++++++++++++++++++++++++++++++                                                 
  Name                scan.localdomain  
  IPv4 subnet         192.168.11.0/24   
  DHCP server type    static            
  End points          TCP:1521          

  SCAN listeners                                                                
  --------------                                                                
  Name        VIP address    
  LISTENER    192.168.11.60  


ASM configuration details                                                       
-------------------------                                                       
  Mode             remote  
  Password file    +RAC    
  SPFILE           +RAC    

  ASM disk group details                                                        
  ++++++++++++++++++++++                                                        
  Name  Redundancy  
  CRS   NORMAL      
  DATA  EXTERN      
  FRA   EXTERN      
  RAC   EXTERN      

Database configuration details                                                  
==============================                                                  

Database "ora.uptst.db" details                                                 
-------------------------------                                                 
  Name                ora.uptst.db                                                   
  Type                RAC                                                            
  Version             18.0.0.0.0                                                     
  Role                PRIMARY                                                        
  Management          AUTOMATIC                                                      
  policy                                                                             
  SPFILE              +DATA                                                          
  Password file       +DATA                                                          
  Groups              OSDBA:dba OSOPER:dba OSBACKUP:dba OSDG:dba OSKM:dba OSRAC:dba  
  Oracle home         /u01/app/oracle/product/18.0.0/db_home1                        

Database "ora.uptst2.db" details                                                
--------------------------------                                                
  Name                 ora.uptst2.db                                        
  Type                 RAC                                                  
  Version              12.1.0.2.0                                           
  Role                 PRIMARY                                              
  Management policy    AUTOMATIC                                            
  SPFILE               +DATA                                                
  Password file        +DATA                                                
  Groups               OSDBA:dba OSOPER:dba OSBACKUP:dba OSDG:dba OSKM:dba  
  Oracle home          /u01/app/oracle/product/12.1.0.2_1                   
[oracle@rac1 Desktop]$ 

Be Careful When Subscribing To Oracle Learning Subscription

Mon, 2019-10-07 18:53

Subscribing to Oracle Learning Subscription seems good in theory but bad in reality.

Oracle support informed. “Oracle University’s policy regarding Learning Subscription courseware materials is that they cannot be downloaded by customers.”

How convenience of Oracle as the info should have been stated at https://education.oracle.com/oracle-learning-subscriptions

Took for granted materials can be downloaded since they are made available to download for all other training formats.

This seems to be a deceptive process by not disclosing the information. because by the time one has subscribe to find the lack of full disclosure, it may be too late.

Hopefully, this will help anyone to avoid the same mistake.

 

Duplicate OMF DB Using Backupset To New Host And Directories

Sat, 2019-09-14 10:23

Database 12.1.0.2.0 is created using OMF.

At SOURCE:
db_create_file_dest=/u01/app/oracle/oradata
db_recovery_file_dest=/u01/app/oracle/fast_recovery_area

At DESTINATION:
db_create_file_dest=/u01/oradata
db_recovery_file_dest=/u01/fast_recovery_area

BACKUP LOCATION on shared storage:
/media/shared_storage/rman_backup/EMU

I has to explicitly set control_files since I was not able to determine how it can be done automatically.

set control_files='/u01/oradata/EMU/controlfile/o1_mf_gqsq2mlg_.ctl','/u01/fast_recovery_area/EMU/controlfile/o1_mf_gqsq2mpz_.ctl'

If don’t set_controlfiles then duplicate will restore to original locations.

channel ORA_AUX_DISK_1: restoring control file
channel ORA_AUX_DISK_1: restore complete, elapsed time: 00:00:01
output file name=/u01/app/oracle/oradata/EMU/controlfile/o1_mf_gqsq2mlg_.ctl
output file name=/u01/app/oracle/fast_recovery_area/EMU/controlfile/o1_mf_gqsq2mpz_.ctl

STEPS:

================================================================================
### SOURCE: Backup Database
================================================================================

--------------------------------------------------
### Retrieve controlfile locations.
--------------------------------------------------

SQL> select name from v$controlfile;

NAME
--------------------------------------------------------------------------------
/u01/app/oracle/oradata/EMU/controlfile/o1_mf_gqsq2mlg_.ctl
/u01/app/oracle/fast_recovery_area/EMU/controlfile/o1_mf_gqsq2mpz_.ctl

SQL>

--------------------------------------------------
### Retrieve redo logs locations.
--------------------------------------------------

SQL> select member from v$logfile;

MEMBER
--------------------------------------------------------------------------------
/u01/app/oracle/oradata/EMU/onlinelog/o1_mf_1_gqsq2mvy_.log
/u01/app/oracle/fast_recovery_area/EMU/onlinelog/o1_mf_1_gqsq2myy_.log
/u01/app/oracle/oradata/EMU/onlinelog/o1_mf_2_gqsq2n1o_.log
/u01/app/oracle/fast_recovery_area/EMU/onlinelog/o1_mf_2_gqsq2n3g_.log
/u01/app/oracle/oradata/EMU/onlinelog/o1_mf_3_gqsq2n50_.log
/u01/app/oracle/fast_recovery_area/EMU/onlinelog/o1_mf_3_gqsq2nql_.log

6 rows selected.

SQL>

--------------------------------------------------
### Backup database.
--------------------------------------------------

[oracle@ol741 EMU]$ export NLS_DATE_FORMAT="DD-MON-YYYY HH24:MI:SS"
[oracle@ol741 EMU]$ rman @ backup.rman

Recovery Manager: Release 12.1.0.2.0 - Production on Sat Sep 14 16:09:06 2019

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

RMAN> spool log to /media/shared_storage/rman_backup/EMU/rman_EMU_level0.log
2> set echo on
3> connect target;
4> show all;
5> set command id to "BACKUP_EMU";
6> run {
7> allocate channel d1 device type disk format '/media/shared_storage/rman_backup/EMU/%d_%I_%T_%U.bks' maxopenfiles 1;
8> allocate channel d2 device type disk format '/media/shared_storage/rman_backup/EMU/%d_%I_%T_%U.bks' maxopenfiles 1;
9> allocate channel d3 device type disk format '/media/shared_storage/rman_backup/EMU/%d_%I_%T_%U.bks' maxopenfiles 1;
10> allocate channel d4 device type disk format '/media/shared_storage/rman_backup/EMU/%d_%I_%T_%U.bks' maxopenfiles 1;
11> allocate channel d5 device type disk format '/media/shared_storage/rman_backup/EMU/%d_%I_%T_%U.bks' maxopenfiles 1;
12> backup as compressed backupset incremental level 0 check logical database filesperset 1 tag="EMU"
13> plus archivelog filesperset 8 tag="EMU"
14> ;
15> }
16> alter database backup controlfile to trace as '/media/shared_storage/rman_backup/EMU/cf_@.sql' REUSE RESETLOGS;
17> create pfile='/media/shared_storage/rman_backup/EMU/init@.ora' from spfile;
18> list backup summary tag="EMU";
19> list backup of spfile tag="EMU";
20> list backup of controlfile tag="EMU";
21> report schema;
22> exit

--------------------------------------------------
### Retrive datafiles and tempfiles locations.
--------------------------------------------------

[oracle@ol741 EMU]$ rman target /

Recovery Manager: Release 12.1.0.2.0 - Production on Sat Sep 14 16:09:44 2019

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

connected to target database: EMU (DBID=3838062773)

RMAN> report schema;

using target database control file instead of recovery catalog
Report of database schema for database with db_unique_name EMU

List of Permanent Datafiles
===========================
File Size(MB) Tablespace           RB segs Datafile Name
---- -------- -------------------- ------- ------------------------
1    700      SYSTEM               YES     /u01/app/oracle/oradata/EMU/datafile/o1_mf_system_gqsq2qw4_.dbf
2    550      SYSAUX               NO      /u01/app/oracle/oradata/EMU/datafile/o1_mf_sysaux_gqsq30xo_.dbf
3    265      UNDOTBS1             YES     /u01/app/oracle/oradata/EMU/datafile/o1_mf_undotbs1_gqsq3875_.dbf
4    5        USERS                NO      /u01/app/oracle/oradata/EMU/datafile/o1_mf_users_gqsq405f_.dbf

List of Temporary Files
=======================
File Size(MB) Tablespace           Maxsize(MB) Tempfile Name
---- -------- -------------------- ----------- --------------------
1    20       TEMP                 32767       /u01/app/oracle/oradata/EMU/datafile/o1_mf_temp_gqsq3c3d_.tmp

RMAN> exit

Recovery Manager complete.
[oracle@ol741 EMU]$


================================================================================
### TARGET: Restore Database
================================================================================

--------------------------------------------------
### Create pfile.
--------------------------------------------------

[oracle@ol742 dbs]$ pwd
/u01/app/oracle/product/12.1.0.2/db_1/dbs

[oracle@ol742 dbs]$ cat initemu.ora
*.db_name='emu'
[oracle@ol742 dbs]$

--------------------------------------------------
### Startup nomount.
--------------------------------------------------

[oracle@ol742 EMU]$ . oraenv << startup nomount;
ORACLE instance started.

Total System Global Area  234881024 bytes
Fixed Size                  2922904 bytes
Variable Size             176162408 bytes
Database Buffers           50331648 bytes
Redo Buffers                5464064 bytes
SQL>

--------------------------------------------------
### Create new directories.
--------------------------------------------------

[oracle@ol742 EMU]$ mkdir -p /u01/oradata/EMU/controlfile/
[oracle@ol742 EMU]$ mkdir -p /u01/fast_recovery_area/EMU/controlfile

--------------------------------------------------
### Duplicate database.
--------------------------------------------------

[oracle@ol742 EMU]$ export NLS_DATE_FORMAT="DD-MON-YYYY HH24:MI:SS"
[oracle@ol742 EMU]$ rman @ dup_omf_bks.rman

Recovery Manager: Release 12.1.0.2.0 - Production on Sat Sep 14 16:37:51 2019

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

RMAN> spool log to /media/shared_storage/rman_backup/EMU/rman_duplicate_database.log
2> set echo on
3> connect auxiliary *
4> show all;
5> set command id to "DUPLICATE_EMU";
6> DUPLICATE DATABASE TO emu
7>   SPFILE
8>   set db_file_name_convert='/u01/app/oracle/oradata','/u01/oradata'
9>   set log_file_name_convert='/u01/app/oracle/oradata','/u01/oradata'
10>   set db_create_file_dest='/u01/oradata'
11>   set db_recovery_file_dest='/u01/fast_recovery_area'
12>   set control_files='/u01/oradata/EMU/controlfile/o1_mf_gqsq2mlg_.ctl','/u01/fast_recovery_area/EMU/controlfile/o1_mf_gqsq2mpz_.ctl'
13>   BACKUP LOCATION '/media/shared_storage/rman_backup/EMU'
14>   NOFILENAMECHECK
15> ;
16> exit

--------------------------------------------------
### Retrive datafiles and tempfiles locations.
--------------------------------------------------

[oracle@ol742 EMU]$ rman target /

Recovery Manager: Release 12.1.0.2.0 - Production on Sat Sep 14 16:40:33 2019

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

connected to target database: EMU (DBID=3838070815)

RMAN> report schema;

using target database control file instead of recovery catalog
Report of database schema for database with db_unique_name EMU

List of Permanent Datafiles
===========================
File Size(MB) Tablespace           RB segs Datafile Name
---- -------- -------------------- ------- ------------------------
1    700      SYSTEM               YES     /u01/oradata/EMU/datafile/o1_mf_system_gqsyvo1y_.dbf
2    550      SYSAUX               NO      /u01/oradata/EMU/datafile/o1_mf_sysaux_gqsywg40_.dbf
3    265      UNDOTBS1             YES     /u01/oradata/EMU/datafile/o1_mf_undotbs1_gqsywx7n_.dbf
4    5        USERS                NO      /u01/oradata/EMU/datafile/o1_mf_users_gqsyxd90_.dbf

List of Temporary Files
=======================
File Size(MB) Tablespace           Maxsize(MB) Tempfile Name
---- -------- -------------------- ----------- --------------------
1    20       TEMP                 32767       /u01/oradata/EMU/datafile/o1_mf_temp_gqsyy469_.tmp

RMAN> exit


Recovery Manager complete.
[oracle@ol742 EMU]$

--------------------------------------------------
### Retrieve controlfile locations.
--------------------------------------------------

SQL> select name from v$controlfile;

NAME
--------------------------------------------------------------------------------
/u01/oradata/EMU/controlfile/o1_mf_gqsq2mlg_.ctl
/u01/fast_recovery_area/EMU/controlfile/o1_mf_gqsq2mpz_.ctl

SQL>

--------------------------------------------------
### Retrieve redo logs locations.
--------------------------------------------------

SQL> select member from v$logfile;

MEMBER
--------------------------------------------------------------------------------
/u01/oradata/EMU/onlinelog/o1_mf_3_gqsyy2kh_.log
/u01/fast_recovery_area/EMU/onlinelog/o1_mf_3_gqsyy2lx_.log
/u01/oradata/EMU/onlinelog/o1_mf_2_gqsyy165_.log
/u01/fast_recovery_area/EMU/onlinelog/o1_mf_2_gqsyy17p_.log
/u01/oradata/EMU/onlinelog/o1_mf_1_gqsyxztc_.log
/u01/fast_recovery_area/EMU/onlinelog/o1_mf_1_gqsyxzw3_.log

6 rows selected.

SQL>

Logs:

rman_EMU_level0.log

rman_duplicate_database.log

 

Obvious But Not For Oracle Obviously

Mon, 2019-08-12 13:38

While dropping RAC database, I found error ORA-01081: cannot start already-running ORACLE – shut it down first from dbca log.

Looking up error: Cause is obvious

$ oerr ora 01081
01081, 00000, "cannot start already-running ORACLE - shut it down first"
// *Cause:  Obvious
// *Action:

Here is the process for 12.1.0:

$ ps -ef|grep pmon
oracle   41777     1  0 Aug09 ?        00:00:30 asm_pmon_+ASM2

$ srvctl config database
DBFS

$ srvctl status database -d DBFS -v
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Instance DBFS1 is not running on node node1
Instance DBFS2 is not running on node node2
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

$ dbca -silent -deleteDatabase -sourceDB DBFS
Connecting to database
9% complete
14% complete
19% complete
23% complete
28% complete
33% complete
38% complete
47% complete
Updating network configuration files
48% complete
52% complete
Deleting instances and datafiles
66% complete
80% complete
95% complete
100% complete
Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/DBFS.log" for further details.

$ cat /u01/app/oracle/cfgtoollogs/dbca/DBFS.log
The Database Configuration Assistant will delete the Oracle instances and datafiles for your database. 
All information in the database will be deleted. Do you want to proceed?
Connecting to database
DBCA_PROGRESS : 9%
DBCA_PROGRESS : 14%
DBCA_PROGRESS : 19%
DBCA_PROGRESS : 23%
DBCA_PROGRESS : 28%
DBCA_PROGRESS : 33%
DBCA_PROGRESS : 38%
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ORA-01081: cannot start already-running ORACLE - shut it down first
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
DBCA_PROGRESS : 47%
Updating network configuration files
DBCA_PROGRESS : 48%
DBCA_PROGRESS : 52%
Deleting instances and datafiles
DBCA_PROGRESS : 66%
DBCA_PROGRESS : 80%
DBCA_PROGRESS : 95%
DBCA_PROGRESS : 100%
Database deletion completed.

$ srvctl config database
$

19c Grid Dry-Run Upgrade

Tue, 2019-08-06 07:42

First test using GUI.

[oracle@racnode-dc2-1 grid]$ /u01/app/19.3.0.0/grid/gridSetup.sh -dryRunForUpgrade
Launching Oracle Grid Infrastructure Setup Wizard...

The response file for this session can be found at:
 /u01/app/19.3.0.0/grid/install/response/grid_2019-08-06_00-20-31AM.rsp

You can find the log of this install session at:
 /u01/app/oraInventory/logs/GridSetupActions2019-08-06_00-20-31AM/gridSetupActions2019-08-06_00-20-31AM.log
[oracle@racnode-dc2-1 grid]$

Create dryRunForUpgradegrid.rsp from grid_2019-08-06_00-20-31AM.rsp (above GUI test)

[oracle@racnode-dc2-1 grid]$ grep -v "^#" /u01/app/19.3.0.0/grid/install/response/grid_2019-08-06_00-20-31AM.rsp | grep -v "=$" | awk 'NF' > /home/oracle/dryRunForUpgradegrid.rsp

[oracle@racnode-dc2-1 ~]$ cat /home/oracle/dryRunForUpgradegrid.rsp
oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0
INVENTORY_LOCATION=/u01/app/oraInventory
oracle.install.option=UPGRADE
ORACLE_BASE=/u01/app/oracle
oracle.install.crs.config.scanType=LOCAL_SCAN
oracle.install.crs.config.ClusterConfiguration=STANDALONE
oracle.install.crs.config.configureAsExtendedCluster=false
oracle.install.crs.config.clusterName=vbox-rac-dc2
oracle.install.crs.config.gpnp.configureGNS=false
oracle.install.crs.config.autoConfigureClusterNodeVIP=false
oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS
oracle.install.crs.config.clusterNodes=racnode-dc2-1:,racnode-dc2-2:
oracle.install.crs.configureGIMR=true
oracle.install.asm.configureGIMRDataDG=false
oracle.install.crs.config.storageOption=FLEX_ASM_STORAGE
oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=
oracle.install.crs.config.useIPMI=false
oracle.install.asm.diskGroup.name=CRS
oracle.install.asm.diskGroup.AUSize=0
oracle.install.asm.gimrDG.AUSize=1
oracle.install.asm.configureAFD=false
oracle.install.crs.configureRHPS=false
oracle.install.crs.config.ignoreDownNodes=false
oracle.install.config.managementOption=NONE
oracle.install.config.omsPort=0
oracle.install.crs.rootconfig.executeRootScript=false
[oracle@racnode-dc2-1 ~]$

Create directory grid home for all nodes:

[root@racnode-dc2-1 ~]# id oracle
uid=54321(oracle) gid=54321(oinstall) groups=54321(oinstall),54318(asmdba),54322(dba),54323(backupdba),54324(oper),54325(dgdba),54326(kmdba)

[root@racnode-dc2-1 ~]# mkdir -p /u01/app/19.3.0.0/grid
[root@racnode-dc2-1 ~]# chown oracle:oinstall /u01/app/19.3.0.0/grid
[root@racnode-dc2-1 ~]# chmod 775 /u01/app/19.3.0.0/grid

[root@racnode-dc2-1 ~]# ll /u01/app/19.3.0.0/
total 4
drwxrwxr-x 2 oracle oinstall 4096 Aug  6 02:07 grid
[root@racnode-dc2-1 ~]#

Extract grid software for node1 ONLY:

[oracle@racnode-dc2-1 ~]$ unzip -qo /media/swrepo/LINUX.X64_193000_grid_home.zip -d /u01/app/19.3.0.0/grid/

[oracle@racnode-dc2-1 ~]$ ls /u01/app/19.3.0.0/grid/
addnode     clone  dbjava     diagnostics  gpnp          install        jdbc  lib      OPatch   ords  perl     qos       rhp            rootupgrade.sh  sqlpatch  tomcat  welcome.html  xdk
assistants  crs    dbs        dmu          gridSetup.sh  instantclient  jdk   md       opmn     oss   plsql    racg      root.sh        runcluvfy.sh    sqlplus   ucp     wlm
bin         css    deinstall  env.ora      has           inventory      jlib  network  oracore  oui   precomp  rdbms     root.sh.old    sdk             srvm      usm     wwg
cha         cv     demo       evm          hs            javavm         ldap  nls      ord      owm   QOpatch  relnotes  root.sh.old.1  slax            suptools  utl     xag

[oracle@racnode-dc2-1 ~]$ du -sh /u01/app/19.3.0.0/grid/
6.0G    /u01/app/19.3.0.0/grid/
[oracle@racnode-dc2-1 ~]$

Run gridSetup.sh -silent -dryRunForUpgrade:

[oracle@racnode-dc2-1 ~]$ env|grep -i ora
USER=oracle
MAIL=/var/spool/mail/oracle
PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/oracle/.local/bin:/home/oracle/bin
PWD=/home/oracle
HOME=/home/oracle
LOGNAME=oracle

[oracle@racnode-dc2-1 ~]$ date
Tue Aug  6 02:35:47 CEST 2019

[oracle@racnode-dc2-1 ~]$ /u01/app/19.3.0.0/grid/gridSetup.sh -silent -dryRunForUpgrade -responseFile /home/oracle/dryRunForUpgradegrid.rsp
Launching Oracle Grid Infrastructure Setup Wizard...

[WARNING] [INS-13014] Target environment does not meet some optional requirements.
   CAUSE: Some of the optional prerequisites are not met. See logs for details. /u01/app/oraInventory/logs/GridSetupActions2019-08-06_02-35-52AM/gridSetupActions2019-08-06_02-35-52AM.log
   ACTION: Identify the list of failed prerequisite checks from the log: /u01/app/oraInventory/logs/GridSetupActions2019-08-06_02-35-52AM/gridSetupActions2019-08-06_02-35-52AM.log. Then either from the log file or from installation manual find the appropriate configuration to meet the prerequisites and fix it manually.
The response file for this session can be found at:
 /u01/app/19.3.0.0/grid/install/response/grid_2019-08-06_02-35-52AM.rsp

You can find the log of this install session at:
 /u01/app/oraInventory/logs/GridSetupActions2019-08-06_02-35-52AM/gridSetupActions2019-08-06_02-35-52AM.log


As a root user, execute the following script(s):
        1. /u01/app/19.3.0.0/grid/rootupgrade.sh

Execute /u01/app/19.3.0.0/grid/rootupgrade.sh on the following nodes:
[racnode-dc2-1]

Run the script on the local node.

Successfully Setup Software with warning(s).
[oracle@racnode-dc2-1 ~]$

Run rootupgrade.sh for node1 ONLY and review log:

[root@racnode-dc2-1 ~]# /u01/app/19.3.0.0/grid/rootupgrade.sh
Check /u01/app/19.3.0.0/grid/install/root_racnode-dc2-1_2019-08-06_02-44-59-241151038.log for the output of root script

[root@racnode-dc2-1 ~]# cat /u01/app/19.3.0.0/grid/install/root_racnode-dc2-1_2019-08-06_02-44-59-241151038.log
Performing root user operation.

The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /u01/app/19.3.0.0/grid
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Performing Dry run of the Grid Infrastructure upgrade.
Using configuration parameter file: /u01/app/19.3.0.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
  /u01/app/oracle/crsdata/racnode-dc2-1/crsconfig/rootcrs_racnode-dc2-1_2019-08-06_02-45-31AM.log
2019/08/06 02:45:44 CLSRSC-464: Starting retrieval of the cluster configuration data
2019/08/06 02:45:52 CLSRSC-729: Checking whether CRS entities are ready for upgrade, cluster upgrade will not be attempted now. This operation may take a few minutes.
2019/08/06 02:47:56 CLSRSC-693: CRS entities validation completed successfully.
[root@racnode-dc2-1 ~]#

Check grid home for node2:

[oracle@racnode-dc2-2 ~]$ du -sh /u01/app/19.3.0.0/grid/
6.6G    /u01/app/19.3.0.0/grid/
[oracle@racnode-dc2-2 ~]$

Check oraInventory for ALL nodes:

[oracle@racnode-dc2-2 ~]$ cat /u01/app/oraInventory/ContentsXML/inventory.xml
<?xml version="1.0" standalone="yes" ?>
<!-- Copyright (c) 1999, 2019, Oracle and/or its affiliates.
All rights reserved. -->
<!-- Do not modify the contents of this file by hand. -->
<INVENTORY>
<VERSION_INFO>
   <SAVED_WITH>12.2.0.7.0</SAVED_WITH>
   <MINIMUM_VER>2.1.0.6.0</MINIMUM_VER>
</VERSION_INFO>
<HOME_LIST>
<HOME NAME="OraGI12Home1" LOC="/u01/app/12.2.0.1/grid" TYPE="O" IDX="1" CRS="true"/>
<HOME NAME="OraDB12Home1" LOC="/u01/app/oracle/12.2.0.1/db1" TYPE="O" IDX="2"/>
==========================================================================================
<HOME NAME="OraGI19Home1" LOC="/u01/app/19.3.0.0/grid" TYPE="O" IDX="3"/>
==========================================================================================
</HOME_LIST>
<COMPOSITEHOME_LIST>
</COMPOSITEHOME_LIST>
</INVENTORY>
[oracle@racnode-dc2-2 ~]$

Check crs activeversion: 12.2.0.1.0

[oracle@racnode-dc2-1 ~]$ . /media/patch/gi.env
The Oracle base has been set to /u01/app/oracle
ORACLE_SID=+ASM1
ORACLE_BASE=/u01/app/oracle
GRID_HOME=/u01/app/12.2.0.1/grid
ORACLE_HOME=/u01/app/12.2.0.1/grid
Oracle Instance alive for sid "+ASM1"

[oracle@racnode-dc2-1 ~]$ crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.2.0.1.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [927320293].
[oracle@racnode-dc2-1 ~]$

Check log location:

[oracle@racnode-dc2-1 ~]$ cd /u01/app/oraInventory/logs/GridSetupActions2019-08-06_02-35-52AM/

[oracle@racnode-dc2-1 GridSetupActions2019-08-06_02-35-52AM]$ ls -alrt
total 17420
-rw-r-----  1 oracle oinstall     129 Aug  6 02:35 installerPatchActions_2019-08-06_02-35-52AM.log
-rw-r-----  1 oracle oinstall       0 Aug  6 02:35 gridSetupActions2019-08-06_02-35-52AM.err
drwxrwx---  3 oracle oinstall    4096 Aug  6 02:35 temp_ob
-rw-r-----  1 oracle oinstall       0 Aug  6 02:39 oraInstall2019-08-06_02-35-52AM.err
drwxrwx--- 17 oracle oinstall    4096 Aug  6 02:39 ..
-rw-r-----  1 oracle oinstall     157 Aug  6 02:39 oraInstall2019-08-06_02-35-52AM.out
-rw-r-----  1 oracle oinstall       0 Aug  6 02:43 oraInstall2019-08-06_02-35-52AM.err.racnode-dc2-2
-rw-r-----  1 oracle oinstall     142 Aug  6 02:43 oraInstall2019-08-06_02-35-52AM.out.racnode-dc2-2
-rw-r-----  1 oracle oinstall 9341920 Aug  6 02:43 gridSetupActions2019-08-06_02-35-52AM.out
-rw-r-----  1 oracle oinstall   13419 Aug  6 02:43 time2019-08-06_02-35-52AM.log
-rw-r-----  1 oracle oinstall 8443087 Aug  6 02:43 gridSetupActions2019-08-06_02-35-52AM.log
drwxrwx---  3 oracle oinstall    4096 Aug  6 02:56 .
[oracle@racnode-dc2-1 GridSetupActions2019-08-06_02-35-52AM]$

Since I have not performed actual upgrade, I don’t know if 19.3.0.0 grid home in oraInventory will be problematic.

It was problematic when performing test with silent mode after initial test with GUI.

To resolve the issue, detach 19.3.0.0 grid home

export ORACLE_HOME=/u01/app/19.3.0.0/grid
$ORACLE_HOME/oui/bin/runInstaller -detachHome -silent ORACLE_HOME=$ORACLE_HOME

Too Old To Remember

Thu, 2019-08-01 12:01

Is it required to run datapatch after creating database?

Why bother trying to remember versus running datapatch -prereq to find out?

Test case for 12.2.

Database July 2019 Release Update 12.2 applied:

[oracle@racnode-dc2-1 ~]$ /media/patch/lspatches.sh
+ . /media/patch/gi.env
++ set +x
The Oracle base remains unchanged with value /u01/app/oracle
ORACLE_SID=+ASM1
ORACLE_BASE=/u01/app/oracle
GRID_HOME=/u01/app/12.2.0.1/grid
ORACLE_HOME=/u01/app/12.2.0.1/grid
Oracle Instance alive for sid "+ASM1"
+ /u01/app/12.2.0.1/grid/OPatch/opatch version
OPatch Version: 12.2.0.1.17

OPatch succeeded.
+ /u01/app/12.2.0.1/grid/OPatch/opatch lspatches
29770090;ACFS JUL 2019 RELEASE UPDATE 12.2.0.1.190716 (29770090)
29770040;OCW JUL 2019 RELEASE UPDATE 12.2.0.1.190716 (29770040)
29757449;Database Jul 2019 Release Update : 12.2.0.1.190716 (29757449)
28566910;TOMCAT RELEASE UPDATE 12.2.0.1.0(ID:180802.1448.S) (28566910)
26839277;DBWLM RELEASE UPDATE 12.2.0.1.0(ID:170913) (26839277)

OPatch succeeded.
+ exit
[oracle@racnode-dc2-1 ~]$

Create 12.2 RAC database:

[oracle@racnode-dc2-1 ~]$ dbca -silent -createDatabase -characterSet AL32UTF8 \
> -createAsContainerDatabase true \
> -templateName General_Purpose.dbc \
> -gdbname hawkcdb -sid hawkcdb -responseFile NO_VALUE \
> -sysPassword Oracle_4U! -systemPassword Oracle_4U! \
> -numberOfPDBs 1 -pdbName pdb01 -pdbAdminPassword Oracle_4U! \
> -databaseType MULTIPURPOSE \
> -automaticMemoryManagement false -totalMemory 3072 \
> -storageType ASM -diskGroupName DATA -recoveryGroupName FRA \
> -redoLogFileSize 100 \
> -emConfiguration NONE \
> -nodeinfo racnode-dc2-1,racnode-dc2-2 \
> -listeners LISTENER \
> -ignorePreReqs

Copying database files
21% complete
Creating and starting Oracle instance
35% complete
Creating cluster database views
50% complete
Completing Database Creation
57% complete
Creating Pluggable Databases
78% complete
Executing Post Configuration Actions
100% complete
Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/hawkcdb/hawkcdb.log" for further details.
[oracle@racnode-dc2-1 ~]$

Run datapatch -prereq for 12.2

[oracle@racnode-dc2-1 ~]$ $ORACLE_HOME/OPatch/datapatch -prereq
SQL Patching tool version 12.2.0.1.0 Production on Thu Aug  1 17:45:13 2019
Copyright (c) 2012, 2019, Oracle.  All rights reserved.

Connecting to database...OK
Note:  Datapatch will only apply or rollback SQL fixes for PDBs
       that are in an open state, no patches will be applied to closed PDBs.
       Please refer to Note: Datapatch: Database 12c Post Patch SQL Automation
       (Doc ID 1585822.1)
Determining current state...done
Adding patches to installation queue and performing prereq checks...done

**********************************************************************
Installation queue:
  For the following PDBs: CDB$ROOT PDB$SEED PDB01
    Nothing to roll back
    Nothing to apply
**********************************************************************

SQL Patching tool complete on Thu Aug  1 17:46:39 2019
[oracle@racnode-dc2-1 ~]$

Test case for 12.1.

Database July 2019 Bundle Patch 12.1 applied:

[oracle@racnode-dc1-1 ~]$ /media/patch/lspatches.sh
+ . /media/patch/gi.env
++ set +x
The Oracle base has been set to /u01/app/oracle
ORACLE_SID=+ASM1
ORACLE_BASE=/u01/app/oracle
GRID_HOME=/u01/app/12.1.0.2/grid
ORACLE_HOME=/u01/app/12.1.0.2/grid
Oracle Instance alive for sid "+ASM1"
+ /u01/app/12.1.0.2/grid/OPatch/opatch version
OPatch Version: 12.2.0.1.17

OPatch succeeded.
+ /u01/app/12.1.0.2/grid/OPatch/opatch lspatches
29509318;OCW PATCH SET UPDATE 12.1.0.2.190716 (29509318)
29496791;Database Bundle Patch : 12.1.0.2.190716 (29496791)
29423125;ACFS PATCH SET UPDATE 12.1.0.2.190716 (29423125)
26983807;WLM Patch Set Update: 12.1.0.2.180116 (26983807)

OPatch succeeded.
+ exit
[oracle@racnode-dc1-1 ~]$

Create 12.1 RAC database:

[oracle@racnode-dc1-1 ~]$ dbca -silent -createDatabase -characterSet AL32UTF8 \
> -createAsContainerDatabase true \
> -templateName General_Purpose.dbc \
> -gdbname cdbhawk -sid cdbhawk -responseFile NO_VALUE \
> -sysPassword Oracle_4U! -systemPassword Oracle_4U! \
> -numberOfPDBs 1 -pdbName pdb01 -pdbAdminPassword Oracle_4U! \
> -databaseType MULTIPURPOSE \
> -automaticMemoryManagement false -totalMemory 3072 \
> -storageType ASM -diskGroupName DATA -recoveryGroupName FRA \
> -redoLogFileSize 100 \
> -emConfiguration NONE \
> -nodeinfo racnode-dc1-1,racnode-dc1-2 \
> -listeners LISTENER \
> -ignorePreReqs

Copying database files
23% complete
Creating and starting Oracle instance
38% complete
Creating cluster database views
54% complete
Completing Database Creation
77% complete
Creating Pluggable Databases
81% complete
100% complete
Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/cdbhawk/cdbhawk.log" for further details.
[oracle@racnode-dc1-1 ~]$

Run datapatch -prereq for 12.2

[oracle@racnode-dc1-1 ~]$ $ORACLE_HOME/OPatch/datapatch -prereq
SQL Patching tool version 12.1.0.2.0 Production on Thu Aug  1 18:24:53 2019
Copyright (c) 2012, 2017, Oracle.  All rights reserved.

Connecting to database...OK
Note:  Datapatch will only apply or rollback SQL fixes for PDBs
       that are in an open state, no patches will be applied to closed PDBs.
       Please refer to Note: Datapatch: Database 12c Post Patch SQL Automation
       (Doc ID 1585822.1)
Bootstrapping registry and package to current versions...done
Determining current state...done
Adding patches to installation queue and performing prereq checks...done

**********************************************************************
Installation queue:
  For the following PDBs: CDB$ROOT PDB$SEED PDB01
    Nothing to roll back
    The following patches will be applied:
      29496791 (DATABASE BUNDLE PATCH 12.1.0.2.190716)
**********************************************************************

SQL Patching tool complete on Thu Aug  1 18:26:26 2019
[oracle@racnode-dc1-1 ~]$

For 12.1, datapatch is required and not for 12.2.

Rsync DBFS To ACFS For GoldenGate Trail Migration

Wed, 2019-07-24 09:25

Planning to move GoldenGate trail files from DBFS to ACFS.

This is pre-work before actual migration to stress IO for ACFS.

Learned some cron along the way.

# Run every 2 hours at even hours
0 */2 * * * /home/oracle/working/dinh/acfs_ggdata02_rsync.sh > /tmp/rsync_acfs_ggdata_to_ggdata02.log 2>&1

# Run every 2 hours at odd hours
0 1-23/2 * * * /home/oracle/working/dinh/acfs_ggdata02_rsync.sh > /tmp/rsync_acfs_ggdata_to_ggdata02.log 2>&1

Syntax and ouptput.

+ /bin/rsync -vrpogt --delete-after /DBFS/ggdata/ /ACFS/ggdata
building file list ... done

dirchk/E_SOURCE.cpe
dirchk/P_TARGET.cpe

dirdat/
dirdat/aa000307647
dirdat/aa000307648
.....
dirdat/aa000307726
dirdat/aa000307727

deleting dirdat/aa000306741
deleting dirdat/aa000306740
.....
deleting dirdat/aa000306662
deleting dirdat/aa000306661

sent 16,205,328,959 bytes  received 1,743 bytes  140,305,893.52 bytes/sec
total size is 203,021,110,174  speedup is 12.53

real	1m56.671s
user	1m24.643s
sys	0m45.875s

+ '[' 0 '!=' 0 ']'

+ /bin/diff -rq /DBFS/ggdata /ACFS/ggdata

Files /DBFS/ggdata/dirchk/E_SOURCE.cpe and /ACFS/ggdata/dirchk/E_SOURCE.cpe differ
Files /DBFS/ggdata/dirchk/P_TARGET.cpe and /ACFS/ggdata/dirchk/P_TARGET.cpe differ

Only in /ACFS/ggdata/dirdat: aa000306742
Only in /ACFS/ggdata/dirdat: aa000306743
Only in /ACFS/ggdata/dirdat: aa000306744
Only in /ACFS/ggdata/dirdat: aa000306745

Only in /DBFS/ggdata/dirdat: aa000307728
Only in /DBFS/ggdata/dirdat: aa000307729

real	69m15.207s
user	2m9.242s
sys	17m3.846s

+ ls /DBFS/ggdata/dirdat/
+ wc -l
975

+ ls -alrt /DBFS/ggdata/dirdat/
+ head
total 190631492
drwxrwxrwx 24 root    root             0 Feb  9  2018 ..
-rw-r-----  1 ggsuser oinstall 199999285 Mar  8  2018 .fuse_hidden001a3c47000001c5
-rw-r-----  1 ggsuser oinstall 199999896 May 23 00:23 .fuse_hidden000002b500000001
-rw-r-----  1 ggsuser oinstall 199999934 Jul 23 06:11 aa000306798
-rw-r-----  1 ggsuser oinstall 199999194 Jul 23 06:13 aa000306799
-rw-r-----  1 ggsuser oinstall 199999387 Jul 23 06:14 aa000306800
-rw-r-----  1 ggsuser oinstall 199999122 Jul 23 06:16 aa000306801
-rw-r-----  1 ggsuser oinstall 199999172 Jul 23 06:19 aa000306802
-rw-r-----  1 ggsuser oinstall 199999288 Jul 23 06:19 aa000306803

+ ls -alrt /DBFS/ggdata/dirdat/
+ tail
-rw-r-----  1 ggsuser oinstall 199999671 Jul 24 07:59 aa000307764
-rw-r-----  1 ggsuser oinstall 199999645 Jul 24 08:01 aa000307765
-rw-r-----  1 ggsuser oinstall 199998829 Jul 24 08:02 aa000307766
-rw-r-----  1 ggsuser oinstall 199998895 Jul 24 08:04 aa000307767
-rw-r-----  1 ggsuser oinstall 199999655 Jul 24 08:05 aa000307768
-rw-r-----  1 ggsuser oinstall 199999930 Jul 24 08:07 aa000307769
-rw-r-----  1 ggsuser oinstall 199999761 Jul 24 08:09 aa000307770
-rw-r-----  1 ggsuser oinstall 199999421 Jul 24 08:11 aa000307771
-rw-r-----  1 ggsuser oinstall   7109055 Jul 24 08:11 aa000307772

+ ls /ACFS/ggdata/dirdat/
+ wc -l
986

+ ls -alrt /ACFS/ggdata/dirdat/
+ head
total 194779104
drwxrwxrwx 24 root    root          8192 Feb  9  2018 ..
-rw-r-----  1 ggsuser oinstall 199999285 Mar  8  2018 .fuse_hidden001a3c47000001c5
-rw-r-----  1 ggsuser oinstall 199999896 May 23 00:23 .fuse_hidden000002b500000001
-rw-r-----  1 ggsuser oinstall 199998453 Jul 23 04:55 aa000306742
-rw-r-----  1 ggsuser oinstall 199999657 Jul 23 04:56 aa000306743
-rw-r-----  1 ggsuser oinstall 199999227 Jul 23 04:57 aa000306744
-rw-r-----  1 ggsuser oinstall 199999389 Jul 23 04:59 aa000306745
-rw-r-----  1 ggsuser oinstall 199999392 Jul 23 05:00 aa000306746
-rw-r-----  1 ggsuser oinstall 199999116 Jul 23 05:01 aa000306747

+ ls -alrt /ACFS/ggdata/dirdat/
+ tail
-rw-r-----  1 ggsuser oinstall 199999876 Jul 24 06:48 aa000307719
-rw-r-----  1 ggsuser oinstall 199999751 Jul 24 06:50 aa000307720
-rw-r-----  1 ggsuser oinstall 199999918 Jul 24 06:51 aa000307721
-rw-r-----  1 ggsuser oinstall 199999404 Jul 24 06:52 aa000307722
-rw-r-----  1 ggsuser oinstall 199999964 Jul 24 06:54 aa000307723
-rw-r-----  1 ggsuser oinstall 199999384 Jul 24 06:56 aa000307724
-rw-r-----  1 ggsuser oinstall 199999283 Jul 24 06:57 aa000307725
-rw-r-----  1 ggsuser oinstall 199998033 Jul 24 06:59 aa000307726
-rw-r-----  1 ggsuser oinstall 199999199 Jul 24 07:00 aa000307727

Check Cluster Resources Where Target != State

Tue, 2019-07-23 10:32

Current version.

[oracle@racnode-dc2-1 patch]$ cat /etc/oratab
#Backup file is  /u01/app/12.2.0.1/grid/srvm/admin/oratab.bak.racnode-dc2-1 line added by Agent
-MGMTDB:/u01/app/12.2.0.1/grid:N
hawk1:/u01/app/oracle/12.2.0.1/db1:N
+ASM1:/u01/app/12.2.0.1/grid:N          # line added by Agent
[oracle@racnode-dc2-1 patch]$

Kill database instance process.

[oracle@racnode-dc2-1 patch]$ ps -ef|grep pmon
oracle   13542     1  0 16:09 ?        00:00:00 asm_pmon_+ASM1
oracle   27663     1  0 16:39 ?        00:00:00 ora_pmon_hawk1
oracle   29401 18930  0 16:40 pts/0    00:00:00 grep --color=auto pmon
[oracle@racnode-dc2-1 patch]$
[oracle@racnode-dc2-1 patch]$ kill -9 27663
[oracle@racnode-dc2-1 patch]$

Check cluster resource – close but no cigar (false positive)

[oracle@racnode-dc2-1 patch]$ crsctl stat res -t -w '(TARGET != ONLINE) or (STATE != ONLINE)'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.proxy_advm
               OFFLINE OFFLINE      racnode-dc2-1            STABLE
               OFFLINE OFFLINE      racnode-dc2-2            STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.asm
      3        OFFLINE OFFLINE                               STABLE
ora.hawk.db
      1        ONLINE  OFFLINE      racnode-dc2-1            Instance Shutdown,ST
                                                             ARTING
--------------------------------------------------------------------------------
[oracle@racnode-dc2-1 patch]$

Check cluster resource – BINGO!

[oracle@racnode-dc2-1 patch]$ crsctl stat res -t -w '(TARGET = ONLINE) and (STATE != ONLINE)'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.hawk.db
      1        ONLINE  OFFLINE      racnode-dc2-1            Instance Shutdown,ST
                                                             ARTING
--------------------------------------------------------------------------------
[oracle@racnode-dc2-1 patch]$

Another example:

[oracle@racnode-dc2-1 ~]$ crsctl stat res -t -w '(TARGET = ONLINE) and (STATE != ONLINE)'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.CRS.dg
               ONLINE  INTERMEDIATE racnode-dc2-2            STABLE
ora.DATA.dg
               ONLINE  INTERMEDIATE racnode-dc2-2            STABLE
ora.FRA.dg
               ONLINE  INTERMEDIATE racnode-dc2-2            STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.hawk.db
      1        ONLINE  OFFLINE      racnode-dc2-1            Instance Shutdown,ST
                                                             ARTING
--------------------------------------------------------------------------------
[oracle@racnode-dc2-1 ~]$

Learned something here.

[oracle@racnode-dc2-1 ~]$ crsctl stat res -v -w 'TYPE = ora.database.type'
NAME=ora.hawk.db
TYPE=ora.database.type
LAST_SERVER=racnode-dc2-1
STATE=ONLINE on racnode-dc2-1
TARGET=ONLINE
CARDINALITY_ID=1
OXR_SECTION=0
RESTART_COUNT=0
***** FAILURE_COUNT=1 
***** FAILURE_HISTORY=1564015051:racnode-dc2-1
ID=ora.hawk.db 1 1
INCARNATION=4
***** LAST_RESTART=07/25/2019 02:39:38
***** LAST_STATE_CHANGE=07/25/2019 02:39:51
STATE_DETAILS=Open,HOME=/u01/app/oracle/12.2.0.1/db1
INTERNAL_STATE=STABLE
TARGET_SERVER=racnode-dc2-1
RESOURCE_GROUP=
INSTANCE_COUNT=2

LAST_SERVER=racnode-dc2-2
STATE=ONLINE on racnode-dc2-2
TARGET=ONLINE
CARDINALITY_ID=2
OXR_SECTION=0
RESTART_COUNT=0
FAILURE_COUNT=0
FAILURE_HISTORY=
ID=ora.hawk.db 2 1
INCARNATION=1
LAST_RESTART=07/25/2019 02:21:45
LAST_STATE_CHANGE=07/25/2019 02:21:45
STATE_DETAILS=Open,HOME=/u01/app/oracle/12.2.0.1/db1
INTERNAL_STATE=STABLE
TARGET_SERVER=racnode-dc2-2
RESOURCE_GROUP=
INSTANCE_COUNT=2

[oracle@racnode-dc2-1 ~]$

Check cluster resource – sanity check.

[oracle@racnode-dc2-1 patch]$ crsctl stat res -t -w '((TARGET = ONLINE) and (STATE != ONLINE)'
[oracle@racnode-dc2-1 patch]$
[oracle@racnode-dc2-1 patch]$ crsctl stat res -t -w 'TYPE = ora.database.type'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.hawk.db
      1        ONLINE  ONLINE       racnode-dc2-1            Open,HOME=/u01/app/o
                                                             racle/12.2.0.1/db1,S
                                                             TABLE
      2        ONLINE  ONLINE       racnode-dc2-2            Open,HOME=/u01/app/o
                                                             racle/12.2.0.1/db1,S
                                                             TABLE
--------------------------------------------------------------------------------
[oracle@racnode-dc2-1 patch]$

Resize ACFS Volume

Mon, 2019-07-22 13:14

Current Filesystem for ACFS is 299G.

Filesystem             Size  Used Avail Use% Mounted on
/dev/asm/acfs_vol-177  299G  2.6G  248G   2% /ggdata02

Free_MB is 872 which causes paging due to insufficient FREE space from ASM Disk Group ACFS_DATA.

$ asmcmd lsdg -g ACFS_DATA
Inst_ID  State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
      1  MOUNTED  EXTERN  N         512   4096  4194304    307184      872                0             872              0             N  ACFS_DATA/
      2  MOUNTED  EXTERN  N         512   4096  4194304    307184      874                0             872              0             N  ACFS_DATA/

Review attributes for ASM Disk Group ACFS_DATA.

$ asmcmd lsattr -l -G ACFS_DATA
Name                     Value       
access_control.enabled   FALSE       
access_control.umask     066         
au_size                  4194304     
cell.smart_scan_capable  FALSE       
compatible.advm          12.1.0.0.0  
compatible.asm           12.1.0.0.0  
compatible.rdbms         12.1.0.0.0  
content.check            FALSE       
content.type             data        
disk_repair_time         3.6h        
failgroup_repair_time    24.0h       
idp.boundary             auto        
idp.type                 dynamic     
phys_meta_replicated     true        
sector_size              512         
thin_provisioned         FALSE       

Resize /ggdata02 to 250G.

$ acfsutil size 250G /ggdata02
acfsutil size: new file system size: 268435456000 (256000MB)

Review results.

$ asmcmd lsdg -g ACFS_DATA
Inst_ID  State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
      1  MOUNTED  EXTERN  N         512   4096  4194304    307184    51044                0           51044              0             N  ACFS_DATA/
      2  MOUNTED  EXTERN  N         512   4096  4194304    307184    51044                0           51044              0             N  ACFS_DATA/


$ df -h /ggdata02
Filesystem             Size  Used Avail Use% Mounted on
/dev/asm/acfs_vol-177  250G  2.6G  248G   2% /ggdata02

$ asmcmd volinfo --all
Diskgroup Name: ACFS_DATA

	 Volume Name: ACFS_VOL
	 Volume Device: /dev/asm/acfs_vol-177
	 State: ENABLED
	 Size (MB): 256000
	 Resize Unit (MB): 512
	 Redundancy: UNPROT
	 Stripe Columns: 8
	 Stripe Width (K): 1024
	 Usage: ACFS
	 Mountpath: /ggdata02

Delete MGMTDB and MGMTLSNR from OEM using emcli

Mon, 2019-07-15 18:22

Doc ID 1933649.1, MGMTDB and MGMTLSNR should not be monitored.

$ grep oms /etc/oratab 
oms:/u01/middleware/13.2.0:N

$ . oraenv <<< oms

$ emcli login -username=SYSMAN
Enter password : 
Login successful

$ emcli sync
Synchronized successfully

$ emcli get_targets -targets=oracle_listener -format=name:csv|grep -i MGMT
1,Up,oracle_listener,MGMTLSNR_host01

$ emcli delete_target -name="MGMTLSNR_host01" -type="oracle_listener" 
Target "MGMTLSNR_host01:oracle_listener" deleted successfully

$ emcli sync
$ emcli get_targets|grep -i MGMT

Note: MGMTDB was not monitored and can be deleted as follow:

$ emcli get_targets -targets=oracle_database -format=name:csv|grep -i MGMT
$ emcli delete_target -name="MGMTDB_host01" -type="oracle_database" 

The problem with monitoring MGMTDB and MGMTLSNR is getting silly page when they are relocated to a new host.

Host=host01
Target type=Listener 
Target name=MGMTLSNR_host01
Categories=Availability 
Message=The listener is down:

Dealing with the same issue for scan listener and have not reached an agreement to have them deleted as I and a few others think they should not be monitored.
Unfortunately, there is no official Oracle documentation for this.

Here’s a typical page for when all scan listeners are running from only one node.

Host=host01
Target type=Listener
Target name=LISTENER_SCAN2_cluster
Categories=Availability
Message=The listener is down: 

$ srvctl status scan_listener
SCAN Listener LISTENER_SCAN1 is enabled
SCAN listener LISTENER_SCAN1 is running on node node02
SCAN Listener LISTENER_SCAN2 is enabled
SCAN listener LISTENER_SCAN2 is running on node node02
SCAN Listener LISTENER_SCAN3 is enabled
SCAN listener LISTENER_SCAN3 is running on node node02

DGMGRL Using Help To Learn About New Validate Features

Fri, 2019-06-28 10:57

Wouldn’t be nicer and much better if Oracle would add (NF) for new features to help syntax?

DGMGRL for Linux: Release 12.2.0.1.0

[oracle@db-fs-1 bin]$ ./dgmgrl /
DGMGRL for Linux: Release 12.2.0.1.0 - Production on Fri Jun 28 17:49:16 2019

Copyright (c) 1982, 2017, Oracle and/or its affiliates.  All rights reserved.

Welcome to DGMGRL, type "help" for information.
Connected to "orclcdb"
Connected as SYSDG.
DGMGRL> help validate

Performs an exhaustive set of validations for a member

Syntax:

  VALIDATE DATABASE [VERBOSE] <database name>;

  VALIDATE DATABASE [VERBOSE] <database name> DATAFILE <datafile number>
    OUTPUT=<file name>;

  VALIDATE DATABASE [VERBOSE] <database name> SPFILE;

  VALIDATE FAR_SYNC [VERBOSE] <far_sync name>
    [WHEN PRIMARY IS <database name>];

DGMGRL>

DGMGRL for Linux: Release 18.0.0.0.0

[oracle@ADC6160274 GDS]$ dgmgrl /
DGMGRL for Linux: Release 18.0.0.0.0 - Production on Fri Jun 28 15:54:36 2019
Version 18.3.0.0.0

Copyright (c) 1982, 2018, Oracle and/or its affiliates.  All rights reserved.

Welcome to DGMGRL, type "help" for information.
Connected to "chi"
Connected as SYSDG.
DGMGRL> help validate

Performs an exhaustive set of validations for a member

Syntax:

  VALIDATE DATABASE [VERBOSE] <database name>;

  VALIDATE DATABASE [VERBOSE] <database name> DATAFILE <datafile number>
    OUTPUT=<file name>;

  VALIDATE DATABASE [VERBOSE] <database name> SPFILE;

  VALIDATE FAR_SYNC [VERBOSE] <far_sync name>
    [WHEN PRIMARY IS <database name>];

  VALIDATE NETWORK CONFIGURATION FOR { ALL | <member name> }; [*** NF ***]

  VALIDATE STATIC CONNECT IDENTIFIER FOR { ALL | <database name> }; [*** NF ***]

DGMGRL>

validate network configuration

DGMGRL> validate network configuration for all;
Connecting to instance "sales" on database "sfo" ...
Connected to "sfo"
Checking connectivity from instance "sales" on database "sfo to instance "sales" on database "chi"...
Succeeded.
Connecting to instance "sales" on database "chi" ...
Connected to "chi"
Checking connectivity from instance "sales" on database "chi to instance "sales" on database "sfo"...
Succeeded.

Oracle Clusterware is not configured on database "sfo".
Connecting to database "sfo" using static connect identifier "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=SLC02PNY.localdomain)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=sfo_DGMGRL)(INSTANCE_NAME=sales)(SERVER=DEDICATED)(STATIC_SERVICE=TRUE)))" ...
Succeeded.
The static connect identifier allows for a connection to database "sfo".

Oracle Clusterware is not configured on database "chi".
Connecting to database "chi" using static connect identifier "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=ADC6160274.localdomain)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=chi_DGMGRL)(INSTANCE_NAME=sales)(SERVER=DEDICATED)(STATIC_SERVICE=TRUE)))" ...
Succeeded.
The static connect identifier allows for a connection to database "chi".

validate static connect identifier

DGMGRL> validate static connect identifier for all;
Oracle Clusterware is not configured on database "sfo".
Connecting to database "sfo" using static connect identifier "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=SLC02PNY.localdomain)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=sfo_DGMGRL)(INSTANCE_NAME=sales)(SERVER=DEDICATED)(STATIC_SERVICE=TRUE)))" ...
Succeeded.
The static connect identifier allows for a connection to database "sfo".

Oracle Clusterware is not configured on database "chi".
Connecting to database "chi" using static connect identifier "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=ADC6160274.localdomain)(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=chi_DGMGRL)(INSTANCE_NAME=sales)(SERVER=DEDICATED)(STATIC_SERVICE=TRUE)))" ...
Succeeded.
The static connect identifier allows for a connection to database "chi".

DGMGRL>

Shell Scripting Using set -v

Sat, 2019-06-15 08:22

set -v : Print shell input lines as they are read.

show_gds_status.sh

#!/bin/sh
##############################
# GDSCTL> configure -width 132
# GDSCTL> configure -save_config
##############################
. ~/gsm1.sh
set -evx
gdsctl -show << END
status
databases
services
exit
END
exit

Excute show_gds_status.sh

[oracle@SLC02PNY GDS]$ ./show_gds_status.sh
gdsctl -show << END
status
databases
services
exit
END
+ gdsctl -show
gsm       : GSM1
TNS_ADMIN : /u01/app/oracle/product/18.0.0/gsmhome_1/network/admin
driver    : jdbc:oracle:thin:
resolve   : QUAL_HOSTNAME
timeout   : 150
log_level : OFF
version   : 18.0.0.0.0
width     : 132
verbose   : ON
spool     : OFF
showtime  : OFF
GDSCTL: Version 18.0.0.0.0 - Production on Sat Jun 15 13:01:21 UTC 2019

Copyright (c) 2011, 2018, Oracle.  All rights reserved.

Welcome to GDSCTL, type "help" for information.

Current GSM is set to GSM1
GDSCTL>
Alias                     GSM1
Version                   18.0.0.0.0
Start Date                15-JUN-2019 12:22:28
Trace Level               off
Listener Log File         /u01/app/oracle/diag/gsm/SLC02PNY/gsm1/alert/log.xml
Listener Trace File       /u01/app/oracle/diag/gsm/SLC02PNY/gsm1/trace/ora_9504_140547635764096.trc
Endpoint summary          (ADDRESS=(HOST=SLC02PNY.localdomain)(PORT=1571)(PROTOCOL=tcp))
GSMOCI Version            2.2.1
Mastership                Y
Connected to GDS catalog  Y
Process Id                9507
Number of reconnections   0
Pending tasks.     Total  0
Tasks in  process. Total  0
Regional Mastership       TRUE
Total messages published  152
Time Zone                 +00:00
Orphaned Buddy Regions:
     None
GDS region                region1
Network metrics:
   Region: region2 Network factor:0

GDSCTL>
Database: "chi" Registered: Y State: Ok ONS: N. Role: PH_STNDBY Instances: 1 Region: region2
   Service: "prim" Globally started: Y Started: N
            Scan: N Enabled: Y Preferred: Y
   Service: "stby" Globally started: Y Started: Y
            Scan: N Enabled: Y Preferred: Y
   Registered instances:
     sales%11
Database: "sfo" Registered: Y State: Ok ONS: N. Role: PRIMARY Instances: 1 Region: region1
   Service: "prim" Globally started: Y Started: Y
            Scan: N Enabled: Y Preferred: Y
   Service: "stby" Globally started: Y Started: N
            Scan: N Enabled: Y Preferred: Y
   Registered instances:
     sales%1

GDSCTL>
Service "prim.sales.oradbcloud" has 1 instance(s). Affinity: ANYWHERE
   Instance "sales%1", name: "sales", db: "sfo", region: "region1", status: ready.
Service "stby.sales.oradbcloud" has 1 instance(s). Affinity: ANYWHERE
   Instance "sales%11", name: "sales", db: "chi", region: "region2", status: ready.

GDSCTL>
exit
+ exit
[oracle@SLC02PNY GDS]$

help set

[oracle@SLC02PNY GDS]$ help set
set: set [-abefhkmnptuvxBCHP] [-o option-name] [--] [arg ...]
    Set or unset values of shell options and positional parameters.

    Change the value of shell attributes and positional parameters, or
    display the names and values of shell variables.

    Options:
      -a  Mark variables which are modified or created for export.
      -b  Notify of job termination immediately.
      -e  Exit immediately if a command exits with a non-zero status.
      -f  Disable file name generation (globbing).
      -h  Remember the location of commands as they are looked up.
      -k  All assignment arguments are placed in the environment for a
          command, not just those that precede the command name.
      -m  Job control is enabled.
      -n  Read commands but do not execute them.
      -o option-name
          Set the variable corresponding to option-name:
              allexport    same as -a
              braceexpand  same as -B
              emacs        use an emacs-style line editing interface
              errexit      same as -e
              errtrace     same as -E
              functrace    same as -T
              hashall      same as -h
              histexpand   same as -H
              history      enable command history
              ignoreeof    the shell will not exit upon reading EOF
              interactive-comments
                           allow comments to appear in interactive commands
              keyword      same as -k
              monitor      same as -m
              noclobber    same as -C
              noexec       same as -n
              noglob       same as -f
              nolog        currently accepted but ignored
              notify       same as -b
              nounset      same as -u
              onecmd       same as -t
              physical     same as -P
              pipefail     the return value of a pipeline is the status of
                           the last command to exit with a non-zero status,
                           or zero if no command exited with a non-zero status
              posix        change the behavior of bash where the default
                           operation differs from the Posix standard to
                           match the standard
              privileged   same as -p
              verbose      same as -v
              vi           use a vi-style line editing interface
              xtrace       same as -x
      -p  Turned on whenever the real and effective user ids do not match.
          Disables processing of the $ENV file and importing of shell
          functions.  Turning this option off causes the effective uid and
          gid to be set to the real uid and gid.
      -t  Exit after reading and executing one command.
      -u  Treat unset variables as an error when substituting.
================================================================================
      -v  Print shell input lines as they are read.
================================================================================
      -x  Print commands and their arguments as they are executed.
      -B  the shell will perform brace expansion
      -C  If set, disallow existing regular files to be overwritten
          by redirection of output.
      -E  If set, the ERR trap is inherited by shell functions.
      -H  Enable ! style history substitution.  This flag is on
          by default when the shell is interactive.
      -P  If set, do not follow symbolic links when executing commands
          such as cd which change the current directory.
      -T  If set, the DEBUG trap is inherited by shell functions.
      --  Assign any remaining arguments to the positional parameters.
          If there are no remaining arguments, the positional parameters
          are unset.
      -   Assign any remaining arguments to the positional parameters.
          The -x and -v options are turned off.

    Using + rather than - causes these flags to be turned off.  The
    flags can also be used upon invocation of the shell.  The current
    set of flags may be found in $-.  The remaining n ARGs are positional
    parameters and are assigned, in order, to $1, $2, .. $n.  If no
    ARGs are given, all shell variables are printed.

    Exit Status:
    Returns success unless an invalid option is given.
[oracle@SLC02PNY GDS]$

RAC Installation Logs

Fri, 2019-06-07 12:24

Note to self for 2 Nodes RAC installation and DB creation logs location.

Oracle Universal Installer logs for GI/DB:

[oracle@racnode-dc1-1 logs]$ pwd; ls -lhrt
/u01/app/oraInventory/logs
total 2.3M
-rw-r----- 1 oracle oinstall    0 Jun  7 16:39 oraInstall2019-06-07_04-39-01PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 16:43 oraInstall2019-06-07_04-39-01PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall  121 Jun  7 16:43 oraInstall2019-06-07_04-39-01PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall  11K Jun  7 16:43 AttachHome2019-06-07_04-39-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall  544 Jun  7 16:43 silentInstall2019-06-07_04-39-01PM.log
-rw-r----- 1 oracle oinstall  12K Jun  7 16:44 UpdateNodeList2019-06-07_04-39-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall 8.0K Jun  7 16:44 UpdateNodeList2019-06-07_04-39-01PM.log
-rw-r----- 1 oracle oinstall 2.8K Jun  7 16:44 oraInstall2019-06-07_04-39-01PM.out
-rw-r----- 1 oracle oinstall 1.1M Jun  7 16:44 installActions2019-06-07_04-39-01PM.log
-rw-r----- 1 oracle oinstall    0 Jun  7 16:57 oraInstall2019-06-07_04-57-13-PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 16:57 oraInstall2019-06-07_04-57-35-PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall    0 Jun  7 16:57 oraInstall2019-06-07_04-57-35-PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 16:58 UpdateNodeList2019-06-07_04-57-35-PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall 8.8K Jun  7 16:58 UpdateNodeList2019-06-07_04-57-13-PM.log
-rw-r----- 1 oracle oinstall  153 Jun  7 17:06 oraInstall2019-06-07_04-57-13-PM.out
-rw-r----- 1 oracle oinstall    0 Jun  7 17:06 oraInstall2019-06-07_05-06-42PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 17:06 oraInstall2019-06-07_05-06-42PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 17:07 UpdateNodeList2019-06-07_05-06-42PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall   33 Jun  7 17:07 oraInstall2019-06-07_05-06-42PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 17:07 UpdateNodeList2019-06-07_05-06-42PM.log
-rw-r----- 1 oracle oinstall   33 Jun  7 17:07 oraInstall2019-06-07_05-06-42PM.out
-rw-r----- 1 oracle oinstall   47 Jun  7 17:09 time2019-06-07_05-09-01PM.log
-rw-r----- 1 oracle oinstall    0 Jun  7 17:09 oraInstall2019-06-07_05-09-01PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 17:13 oraInstall2019-06-07_05-09-01PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall   29 Jun  7 17:14 oraInstall2019-06-07_05-09-01PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 17:14 AttachHome2019-06-07_05-09-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall  507 Jun  7 17:14 silentInstall2019-06-07_05-09-01PM.log
-rw-r----- 1 oracle oinstall  14K Jun  7 17:15 UpdateNodeList2019-06-07_05-09-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall 9.5K Jun  7 17:15 UpdateNodeList2019-06-07_05-09-01PM.log
-rw-r----- 1 oracle oinstall  496 Jun  7 17:15 oraInstall2019-06-07_05-09-01PM.out
-rw-r----- 1 oracle oinstall 1.1M Jun  7 17:15 installActions2019-06-07_05-09-01PM.log
[oracle@racnode-dc1-1 logs]$

silentInstall*.log

[oracle@racnode-dc1-1 logs]$ grep successful silent*.log

silentInstall2019-06-07_04-39-01PM.log:The installation of Oracle Grid Infrastructure 12c was successful.

silentInstall2019-06-07_05-09-01PM.log:The installation of Oracle Database 12c was successful.

[oracle@racnode-dc1-1 logs

installActions*.log

[oracle@racnode-dc1-1 logs]$ grep "Using paramFile" install*.log

installActions2019-06-07_04-39-01PM.log:INFO: Using paramFile: /u01/stage/12.1.0.2/grid/install/oraparam.ini

installActions2019-06-07_05-09-01PM.log:Using paramFile: /u01/stage/12.1.0.2/database/install/oraparam.ini

[oracle@racnode-dc1-1 logs]$

Run root script after installation:
$GRID_HOME/root.sh

[oracle@racnode-dc1-1 install]$ pwd; ls -lhrt root*.log
/u01/app/12.1.0.2/grid/install
-rw------- 1 oracle oinstall 7.4K Jun  7 16:51 root_racnode-dc1-1_2019-06-07_16-44-37.log
[oracle@racnode-dc1-1 install]$

Run configToolAllCommands:
$GRID_HOME/cfgtoollogs/configToolAllCommands RESPONSE_FILE=/u01/stage/rsp/configtoolallcommands.rsp

[oracle@racnode-dc1-1 oui]$ pwd; ls -lhrt
/u01/app/12.1.0.2/grid/cfgtoollogs/oui
total 1.2M
-rw-r----- 1 oracle oinstall    0 Jun  7 16:39 oraInstall2019-06-07_04-39-01PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 16:43 oraInstall2019-06-07_04-39-01PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall  121 Jun  7 16:43 oraInstall2019-06-07_04-39-01PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall  11K Jun  7 16:43 AttachHome2019-06-07_04-39-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall  544 Jun  7 16:43 silentInstall2019-06-07_04-39-01PM.log
-rw-r----- 1 oracle oinstall  12K Jun  7 16:44 UpdateNodeList2019-06-07_04-39-01PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall 8.0K Jun  7 16:44 UpdateNodeList2019-06-07_04-39-01PM.log
-rw-r----- 1 oracle oinstall 2.8K Jun  7 16:44 oraInstall2019-06-07_04-39-01PM.out
-rw-r----- 1 oracle oinstall 1.1M Jun  7 16:44 installActions2019-06-07_04-39-01PM.log
-rw-r--r-- 1 oracle oinstall    0 Jun  7 16:57 configActions2019-06-07_04-57-10-PM.err
-rw-r--r-- 1 oracle oinstall  13K Jun  7 17:06 configActions2019-06-07_04-57-10-PM.log
-rw------- 1 oracle oinstall    0 Jun  7 17:06 oraInstall2019-06-07_05-06-42PM.err
-rw-r----- 1 oracle oinstall    0 Jun  7 17:06 oraInstall2019-06-07_05-06-42PM.err.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 17:07 UpdateNodeList2019-06-07_05-06-42PM.log.racnode-dc1-2
-rw-r----- 1 oracle oinstall   33 Jun  7 17:07 oraInstall2019-06-07_05-06-42PM.out.racnode-dc1-2
-rw-r----- 1 oracle oinstall  12K Jun  7 17:07 UpdateNodeList2019-06-07_05-06-42PM.log
-rw------- 1 oracle oinstall   33 Jun  7 17:07 oraInstall2019-06-07_05-06-42PM.out
[oracle@racnode-dc1-1 oui]$

dbca

[oracle@racnode-dc1-1 dbca]$ pwd; ls -lhrt
/u01/app/oracle/cfgtoollogs/dbca
total 116K
-rwxrwxr-x 1 oracle oinstall    0 Jun  7 17:02 trace.log_OraGI12Home1_2019-06-07_05-02-52-PM.lck
drwxrwxr-x 3 oracle oinstall 4.0K Jun  7 17:02 _mgmtdb
-rwxrwxr-x 1 oracle oinstall 105K Jun  7 17:03 trace.log_OraGI12Home1_2019-06-07_05-02-52-PM
drwxr-x--- 2 oracle oinstall 4.0K Jun  7 17:23 hawk
[oracle@racnode-dc1-1 dbca]$

dbca _mgmtdb

[oracle@racnode-dc1-1 _mgmtdb]$ pwd; ls -lhrt
/u01/app/oracle/cfgtoollogs/dbca/_mgmtdb
total 19M
-rwxrwxr-x 1 oracle oinstall    0 Jun  7 16:58 trace.log.lck
-rwxrwxr-x 1 oracle oinstall  18M Jun  7 16:59 tempControl.ctl
-rwxrwxr-x 1 oracle oinstall  349 Jun  7 16:59 CloneRmanRestore.log
-rwxrwxr-x 1 oracle oinstall  596 Jun  7 16:59 cloneDBCreation.log
-rwxrwxr-x 1 oracle oinstall    0 Jun  7 17:00 rmanUtil
-rwxrwxr-x 1 oracle oinstall 2.1K Jun  7 17:00 plugDatabase.log
-rwxrwxr-x 1 oracle oinstall  428 Jun  7 17:01 dbmssml_catcon_12271.lst
-rwxrwxr-x 1 oracle oinstall 3.5K Jun  7 17:01 dbmssml0.log
-rwxrwxr-x 1 oracle oinstall  396 Jun  7 17:01 postScripts.log
-rwxrwxr-x 1 oracle oinstall    0 Jun  7 17:01 lockAccount.log
-rwxrwxr-x 1 oracle oinstall  442 Jun  7 17:01 catbundleapply_catcon_12348.lst
-rwxrwxr-x 1 oracle oinstall 3.9K Jun  7 17:01 catbundleapply0.log
-rwxrwxr-x 1 oracle oinstall  424 Jun  7 17:01 utlrp_catcon_12416.lst
-rwxrwxr-x 1 oracle oinstall 9.2K Jun  7 17:02 utlrp0.log
-rwxrwxr-x 1 oracle oinstall  964 Jun  7 17:02 postDBCreation.log
-rwxrwxr-x 1 oracle oinstall  737 Jun  7 17:02 OraGI12Home1__mgmtdb_creation_checkpoint.xml
-rwxrwxr-x 1 oracle oinstall  877 Jun  7 17:02 _mgmtdb.log
-rwxrwxr-x 1 oracle oinstall 1.1M Jun  7 17:02 trace.log
-rwxrwxr-x 1 oracle oinstall 1.3K Jun  7 17:02 DetectOption.log
drwxrwxr-x 2 oracle oinstall 4.0K Jun  7 17:03 vbox_rac_dc1

[oracle@racnode-dc1-1 _mgmtdb]$ tail _mgmtdb.log
Completing Database Creation
DBCA_PROGRESS : 68%
DBCA_PROGRESS : 79%
DBCA_PROGRESS : 89%
DBCA_PROGRESS : 100%
Database creation complete. For details check the logfiles at:
 /u01/app/oracle/cfgtoollogs/dbca/_mgmtdb.
Database Information:
Global Database Name:_mgmtdb
System Identifier(SID):-MGMTDB
[oracle@racnode-dc1-1 _mgmtdb]$

dbca hawk

[oracle@racnode-dc1-1 hawk]$ pwd; ls -lhrt
/u01/app/oracle/cfgtoollogs/dbca/hawk
total 34M
-rw-r----- 1 oracle oinstall    0 Jun  7 17:16 trace.log.lck
-rw-r----- 1 oracle oinstall    0 Jun  7 17:16 rmanUtil
-rw-r----- 1 oracle oinstall  18M Jun  7 17:17 tempControl.ctl
-rw-r----- 1 oracle oinstall  384 Jun  7 17:17 CloneRmanRestore.log
-rw-r----- 1 oracle oinstall 2.8K Jun  7 17:20 cloneDBCreation.log
-rw-r----- 1 oracle oinstall    8 Jun  7 17:20 postScripts.log
-rw-r----- 1 oracle oinstall    0 Jun  7 17:21 CreateClustDBViews.log
-rw-r----- 1 oracle oinstall    6 Jun  7 17:21 lockAccount.log
-rw-r----- 1 oracle oinstall 1.3K Jun  7 17:22 postDBCreation.log
-rw-r----- 1 oracle oinstall  511 Jun  7 17:23 OraDB12Home1_hawk_creation_checkpoint.xml
-rw-r----- 1 oracle oinstall  24K Jun  7 17:23 hawk.log
-rw-r----- 1 oracle oinstall  16M Jun  7 17:23 trace.log

[oracle@racnode-dc1-1 hawk]$ tail hawk.log
DBCA_PROGRESS : 73%
DBCA_PROGRESS : 76%
DBCA_PROGRESS : 85%
DBCA_PROGRESS : 94%
DBCA_PROGRESS : 100%
Database creation complete. For details check the logfiles at:
 /u01/app/oracle/cfgtoollogs/dbca/hawk.
Database Information:
Global Database Name:hawk
System Identifier(SID) Prefix:hawk
[oracle@racnode-dc1-1 hawk]$

Local Install rlwrap for OEL 7.6

Wed, 2019-06-05 19:13

Installing rlwrap 7.6, requires python34 local install

yum install rlwrap

[root@SLC02PNY ~]# yum install rlwrap
Loaded plugins: ulninfo
Resolving Dependencies
--> Running transaction check
---> Package rlwrap.x86_64 0:0.43-2.el7 will be installed
--> Processing Dependency: perl(Data::Dumper) for package: rlwrap-0.43-2.el7.x86_64
--> Processing Dependency: /usr/bin/python3.4 for package: rlwrap-0.43-2.el7.x86_64

****************************************************************************************************
Package python34 is obsoleted by python36, but obsoleting package does not provide for requirements
****************************************************************************************************

--> Running transaction check
---> Package perl-Data-Dumper.x86_64 0:2.145-3.el7 will be installed
---> Package rlwrap.x86_64 0:0.43-2.el7 will be installed
--> Processing Dependency: /usr/bin/python3.4 for package: rlwrap-0.43-2.el7.x86_64
Package python34 is obsoleted by python36, but obsoleting package does not provide for requirements
--> Processing Dependency: /usr/bin/python3.4 for package: rlwrap-0.43-2.el7.x86_64
Package python34 is obsoleted by python36, but obsoleting package does not provide for requirements
--> Finished Dependency Resolution

yum install python34

root@SLC02PNY ~]# yum install python34
Loaded plugins: ulninfo

****************************************************************************************************
Package python34 is obsoleted by python36, trying to install python36-3.6.8-1.el7.x86_64 instead
****************************************************************************************************

Resolving Dependencies
--> Running transaction check
---> Package python36.x86_64 0:3.6.8-1.el7 will be installed
--> Processing Dependency: python36-libs(x86-64) = 3.6.8-1.el7 for package: python36-3.6.8-1.el7.x86_64
--> Processing Dependency: libpython3.6m.so.1.0()(64bit) for package: python36-3.6.8-1.el7.x86_64
--> Running transaction check
---> Package python36-libs.x86_64 0:3.6.8-1.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

====================================================================================================================================
 Package                         Arch                     Version                        Repository                            Size
====================================================================================================================================
Installing:
 python36                        x86_64                   3.6.8-1.el7                    ol7_developer_EPEL                    66 k
Installing for dependencies:
 python36-libs                   x86_64                   3.6.8-1.el7                    ol7_developer_EPEL                   8.6 M

Transaction Summary
====================================================================================================================================
Install  1 Package (+1 Dependent package)

Total download size: 8.6 M
Installed size: 36 M
Is this ok [y/d/N]: n

cat /etc/system-release

[root@ADC6160274 ~]# cat /etc/system-release
Oracle Linux Server release 7.6
[root@ADC6160274 ~]#

yumdownloader python34-3.4.5-4.el7.x86_64

[root@ADC6160274 ~]# yumdownloader python34-3.4.5-4.el7.x86_64
python34-3.4.5-4.el7.x86_64.rpm                                                                              |  50 kB  00:00:00

yumdownloader python34-libs-3.4.5-4.el7.x86_64
[root@ADC6160274 ~]# yumdownloader python34-libs-3.4.5-4.el7.x86_64
python34-libs-3.4.5-4.el7.x86_64.rpm                                                                         | 8.2 MB  00:00:01

yum localinstall python34-libs-3.4.5-4.el7.x86_64.rpm python34-3.4.5-4.el7.x86_64.rpm

[root@ADC6160274 ~]# yum localinstall python34-libs-3.4.5-4.el7.x86_64.rpm python34-3.4.5-4.el7.x86_64.rpm
Loaded plugins: ulninfo
Examining python34-libs-3.4.5-4.el7.x86_64.rpm: python34-libs-3.4.5-4.el7.x86_64
Marking python34-libs-3.4.5-4.el7.x86_64.rpm to be installed
Examining python34-3.4.5-4.el7.x86_64.rpm: python34-3.4.5-4.el7.x86_64
Marking python34-3.4.5-4.el7.x86_64.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package python34.x86_64 0:3.4.5-4.el7 will be installed
---> Package python34-libs.x86_64 0:3.4.5-4.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

====================================================================================================================================
 Package                     Arch                 Version                     Repository                                       Size
====================================================================================================================================
Installing:
 python34                    x86_64               3.4.5-4.el7                 /python34-3.4.5-4.el7.x86_64                     36 k
 python34-libs               x86_64               3.4.5-4.el7                 /python34-libs-3.4.5-4.el7.x86_64                29 M

Transaction Summary
====================================================================================================================================
Install  2 Packages

Total size: 29 M
Installed size: 29 M
Is this ok [y/d/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : python34-libs-3.4.5-4.el7.x86_64                                                                                 1/2
  Installing : python34-3.4.5-4.el7.x86_64                                                                                      2/2
  Verifying  : python34-3.4.5-4.el7.x86_64                                                                                      1/2
  Verifying  : python34-libs-3.4.5-4.el7.x86_64                                                                                 2/2

Installed:
  python34.x86_64 0:3.4.5-4.el7                                  python34-libs.x86_64 0:3.4.5-4.el7

Complete!

yum install rlwrap

[root@ADC6160274 ~]# yum install rlwrap
Loaded plugins: ulninfo
Resolving Dependencies
--> Running transaction check
---> Package rlwrap.x86_64 0:0.43-2.el7 will be installed
--> Processing Dependency: perl(Data::Dumper) for package: rlwrap-0.43-2.el7.x86_64
--> Running transaction check
---> Package perl-Data-Dumper.x86_64 0:2.145-3.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

====================================================================================================================================
 Package                           Arch                    Version                        Repository                           Size
====================================================================================================================================
Installing:
 rlwrap                            x86_64                  0.43-2.el7                     ol7_developer_EPEL                  118 k
Installing for dependencies:
 perl-Data-Dumper                  x86_64                  2.145-3.el7                    ol7_latest                           47 k

Transaction Summary
====================================================================================================================================
Install  1 Package (+1 Dependent package)

Total download size: 165 k
Installed size: 378 k
Is this ok [y/d/N]: y
Downloading packages:
(1/2): perl-Data-Dumper-2.145-3.el7.x86_64.rpm                                                               |  47 kB  00:00:00
(2/2): rlwrap-0.43-2.el7.x86_64.rpm                                                                          | 118 kB  00:00:00
------------------------------------------------------------------------------------------------------------------------------------
Total                                                                                               311 kB/s | 165 kB  00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : perl-Data-Dumper-2.145-3.el7.x86_64                                                                              1/2
  Installing : rlwrap-0.43-2.el7.x86_64                                                                                         2/2
  Verifying  : perl-Data-Dumper-2.145-3.el7.x86_64                                                                              1/2
  Verifying  : rlwrap-0.43-2.el7.x86_64                                                                                         2/2

Installed:
  rlwrap.x86_64 0:0.43-2.el7

Dependency Installed:
  perl-Data-Dumper.x86_64 0:2.145-3.el7

Complete!
[root@ADC6160274 ~]#

Choiceology with Katy Milkman

Mon, 2019-05-27 15:15

Good listening I thought about sharing with you.

RSS Feed:
Choiceology with Katy Milkman – Exposing the psychological traps that lead to expensive mistakes

You can listen and subscribe to Choiceology with Katy Milkman for free in any podcast player—such as Apple Podcasts, Google Podcasts or Spotify.
How Do I Listen to Choiceology?

Shocking opatchauto resume works after auto-logout

Sun, 2019-05-19 12:36

WARNING: Please don’t try this at home or in production environment.

With that being said, patching was for DR production.

Oracle Interim Patch Installer version 12.2.0.1.16

Patching 2 nodes RAC cluster and node1 completed successfully.

Rationale for using -norestart because there was an issue at one time where datapatch was applied on the node1.

Don’t implement Active Data Guard and have database Start options: mount

# crsctl stat res -t -w '((TARGET != ONLINE) or (STATE != ONLINE)'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.dbproddr.db
      2        ONLINE  INTERMEDIATE node2              Mounted (Closed),STABLE
ora.dbproddr.dbdr.svc
      2        ONLINE  OFFLINE                                          STABLE
--------------------------------------------------------------------------------

$ srvctl status database -d dbproddr -v
Instance dbproddr1 is running on node node1 with online services dbdr. Instance status: Open,Readonly.
Instance dbproddr2 is running on node node2. Instance status: Mounted (Closed).

Run opatchauto and ctrl-c from session is stuck.

node2 ~ # export PATCH_TOP_DIR=/u01/software/patches/Jan2019

node2 ~ # $GRID_HOME/OPatch/opatchauto apply $PATCH_TOP_DIR/28833531 -norestart

OPatchauto session is initiated at Thu May 16 20:20:24 2019

System initialization log file is /u02/app/12.1.0/grid/cfgtoollogs/opatchautodb/systemconfig2019-05-16_08-20-26PM.log.

Session log file is /u02/app/12.1.0/grid/cfgtoollogs/opatchauto/opatchauto2019-05-16_08-20-47PM.log
The id for this session is K43Y

Executing OPatch prereq operations to verify patch applicability on home /u02/app/12.1.0/grid

Executing OPatch prereq operations to verify patch applicability on home /u01/app/oracle/product/12.1.0/db
Patch applicability verified successfully on home /u01/app/oracle/product/12.1.0/db

Patch applicability verified successfully on home /u02/app/12.1.0/grid


Verifying SQL patch applicability on home /u01/app/oracle/product/12.1.0/db
"/bin/sh -c 'cd /u01/app/oracle/product/12.1.0/db; ORACLE_HOME=/u01/app/oracle/product/12.1.0/db ORACLE_SID=dbproddr2 /u01/app/oracle/product/12.1.0/db/OPatch/datapatch -prereq -verbose'" command failed with errors. Please refer to logs for more details. SQL changes, if any, can be analyzed by manually retrying the same command.

SQL patch applicability verified successfully on home /u01/app/oracle/product/12.1.0/db


Preparing to bring down database service on home /u01/app/oracle/product/12.1.0/db
Successfully prepared home /u01/app/oracle/product/12.1.0/db to bring down database service


Bringing down CRS service on home /u02/app/12.1.0/grid
Prepatch operation log file location: /u02/app/12.1.0/grid/cfgtoollogs/crsconfig/crspatch_node2_2019-05-16_08-21-16PM.log
CRS service brought down successfully on home /u02/app/12.1.0/grid


Performing prepatch operation on home /u01/app/oracle/product/12.1.0/db
Perpatch operation completed successfully on home /u01/app/oracle/product/12.1.0/db


Start applying binary patch on home /u01/app/oracle/product/12.1.0/db
Binary patch applied successfully on home /u01/app/oracle/product/12.1.0/db


Performing postpatch operation on home /u01/app/oracle/product/12.1.0/db
Postpatch operation completed successfully on home /u01/app/oracle/product/12.1.0/db


Start applying binary patch on home /u02/app/12.1.0/grid

Binary patch applied successfully on home /u02/app/12.1.0/grid


Starting CRS service on home /u02/app/12.1.0/grid





*** Ctrl-C as shown below ***
^C
OPatchauto session completed at Thu May 16 21:41:58 2019
*** Time taken to complete the session 81 minutes, 34 seconds ***

opatchauto failed with error code 130

This is not good as session disconnected while troubleshooting in another session.

node2 ~ # timed out waiting for input: auto-logout

Even though opatchauto session was terminated cluster upgrade state is [NORMAL] vs cluster upgrade state is [ROLLING PATCH]

node2 ~ # crsctl query crs activeversion -f
Oracle Clusterware active version on the cluster is [12.1.0.2.0]. The cluster upgrade state is [NORMAL]. The cluster active patch level is [323461694].

node2 ~ # crsctl stat res -t -w '((TARGET != ONLINE) or (STATE != ONLINE)'
node2 ~ # crsctl stat res -t -w 'TYPE = ora.database.type'
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.dbproddr.db
      1        ONLINE  ONLINE       node1              Open,Readonly,STABLE
      2        ONLINE  ONLINE       node2              Open,Readonly,STABLE
--------------------------------------------------------------------------------

At this point, I was not sure what to do since everything looked good and online.

Colleague helping me with troubleshooting stated patch completed successfully and the main question if we need to try “opatchauto resume”

However, I was not comfortable with the outcome and tried opatchauto resume and it worked like magic.

Reconnect and opatchauto resume

mdinh@node2 ~ $ sudo su - 
~ # . /home/oracle/working/dinh/gi.env
The Oracle base has been set to /u01/app/oracle
ORACLE_SID=+ASM4
ORACLE_BASE=/u01/app/oracle
GRID_HOME=/u02/app/12.1.0/grid
ORACLE_HOME=/u02/app/12.1.0/grid
Oracle Instance alive for sid "+ASM4"
~ # export PATCH_TOP_DIR=/u01/software/patches/Jan2019/
~ # $GRID_HOME/OPatch/opatchauto resume

OPatchauto session is initiated at Thu May 16 22:03:09 2019
Session log file is /u02/app/12.1.0/grid/cfgtoollogs/opatchauto/opatchauto2019-05-16_10-03-10PM.log
Resuming existing session with id K43Y

Starting CRS service on home /u02/app/12.1.0/grid
Postpatch operation log file location: /u02/app/12.1.0/grid/cfgtoollogs/crsconfig/crspatch_node2_2019-05-16_10-03-17PM.log
CRS service started successfully on home /u02/app/12.1.0/grid


Preparing home /u01/app/oracle/product/12.1.0/db after database service restarted

OPatchauto is running in norestart mode. PDB instances will not be checked for database on the current node.
No step execution required.........
 

Trying to apply SQL patch on home /u01/app/oracle/product/12.1.0/db
SQL patch applied successfully on home /u01/app/oracle/product/12.1.0/db

OPatchAuto successful.

--------------------------------Summary--------------------------------

Patching is completed successfully. Please find the summary as follows:

Host:node2
RAC Home:/u01/app/oracle/product/12.1.0/db
Version:12.1.0.2.0
Summary:

==Following patches were SKIPPED:

Patch: /u01/software/patches/Jan2019/28833531/26983807
Reason: This patch is not applicable to this specified target type - "rac_database"

Patch: /u01/software/patches/Jan2019/28833531/28729220
Reason: This patch is not applicable to this specified target type - "rac_database"


==Following patches were SUCCESSFULLY applied:

Patch: /u01/software/patches/Jan2019/28833531/28729213
Log: /u01/app/oracle/product/12.1.0/db/cfgtoollogs/opatchauto/core/opatch/opatch2019-05-16_20-22-06PM_1.log

Patch: /u01/software/patches/Jan2019/28833531/28731800
Log: /u01/app/oracle/product/12.1.0/db/cfgtoollogs/opatchauto/core/opatch/opatch2019-05-16_20-22-06PM_1.log


Host:node2
CRS Home:/u02/app/12.1.0/grid
Version:12.1.0.2.0
Summary:

==Following patches were SKIPPED:

Patch: /u01/software/patches/Jan2019/28833531/26983807
Reason: This patch is already been applied, so not going to apply again.


==Following patches were SUCCESSFULLY applied:

Patch: /u01/software/patches/Jan2019/28833531/28729213
Log: /u02/app/12.1.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2019-05-16_20-23-32PM_1.log

Patch: /u01/software/patches/Jan2019/28833531/28729220
Log: /u02/app/12.1.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2019-05-16_20-23-32PM_1.log

Patch: /u01/software/patches/Jan2019/28833531/28731800
Log: /u02/app/12.1.0/grid/cfgtoollogs/opatchauto/core/opatch/opatch2019-05-16_20-23-32PM_1.log


Patching session reported following warning(s): 
_________________________________________________

[WARNING] The database instance 'drinstance2' from '/u01/app/oracle/product/12.1.0/db', in host'node2' is not running. SQL changes, if any,  will not be applied.
To apply. the SQL changes, bring up the database instance and run the command manually from any one node (run as oracle).
Refer to the readme to get the correct steps for applying the sql changes.

[WARNING] The database instances will not be brought up under the 'norestart' option. The database instance 'drinstance2' from '/u01/app/oracle/product/12.1.0/db', in host'node2' is not running. SQL changes, if any,  will not be applied.
To apply. the SQL changes, bring up the database instance and run the command manually from any one node (run as oracle).
Refer to the readme to get the correct steps for applying the sql changes.


OPatchauto session completed at Thu May 16 22:10:01 2019
Time taken to complete the session 6 minutes, 52 seconds
~ # 

Logs:

oracle@node2:/u02/app/12.1.0/grid/cfgtoollogs/crsconfig
> ls -alrt
total 508
drwxr-x--- 2 oracle oinstall   4096 Nov 23 02:15 oracle
-rwxrwxr-x 1 oracle oinstall 167579 Nov 23 02:15 rootcrs_node2_2018-11-23_02-07-58AM.log
drwxrwxr-x 9 oracle oinstall   4096 Apr 10 12:05 ..

opatchauto apply - Prepatch operation log file location: /u02/app/12.1.0/grid/cfgtoollogs/crsconfig/crspatch_node2_2019-05-16_08-21-16PM.log
====================================================================================================
-rwxrwxr-x 1 oracle oinstall  33020 May 16 20:22 crspatch_node2_2019-05-16_08-21-16PM.log
====================================================================================================

Mysterious log file - Unknown where this log is from because it was not from my terminal output.
====================================================================================================
-rwxrwxr-x 1 oracle oinstall  86983 May 16 21:42 crspatch_node2_2019-05-16_08-27-35PM.log
====================================================================================================

-rwxrwxr-x 1 oracle oinstall  56540 May 16 22:06 srvmcfg1.log
-rwxrwxr-x 1 oracle oinstall  26836 May 16 22:06 srvmcfg2.log
-rwxrwxr-x 1 oracle oinstall  21059 May 16 22:06 srvmcfg3.log
-rwxrwxr-x 1 oracle oinstall  23032 May 16 22:08 srvmcfg4.log

opatchauto resume - Postpatch operation log file location: /u02/app/12.1.0/grid/cfgtoollogs/crsconfig/crspatch_node2_2019-05-16_10-03-17PM.log
====================================================================================================
-rwxrwxr-x 1 oracle oinstall  64381 May 16 22:09 crspatch_node2_2019-05-16_10-03-17PM.log
====================================================================================================

Prepatch operation log file.

> tail -20 crspatch_node2_2019-05-16_08-21-16PM.log
2019-05-16 20:22:04: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH_OOP_REQSTEPS
2019-05-16 20:22:04: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH_OOP_REQSTEPS '
2019-05-16 20:22:04: Removing file /tmp/fileTChFoS
2019-05-16 20:22:04: Successfully removed file: /tmp/fileTChFoS
2019-05-16 20:22:04: pipe exit code: 0
2019-05-16 20:22:04: /bin/su successfully executed

2019-05-16 20:22:04: checkpoint ROOTCRS_POSTPATCH_OOP_REQSTEPS does not exist
2019-05-16 20:22:04: Done - Performing pre-pathching steps required for GI stack
2019-05-16 20:22:04: Resetting cluutil_trc_suff_pp to 0
2019-05-16 20:22:04: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state SUCCESS"
2019-05-16 20:22:04: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil0.log
2019-05-16 20:22:04: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state SUCCESS
2019-05-16 20:22:04: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state SUCCESS '
2019-05-16 20:22:04: Removing file /tmp/fileDoYyQA
2019-05-16 20:22:04: Successfully removed file: /tmp/fileDoYyQA
2019-05-16 20:22:04: pipe exit code: 0
2019-05-16 20:22:04: /bin/su successfully executed

*** 2019-05-16 20:22:04: Succeeded in writing the checkpoint:'ROOTCRS_PREPATCH' with status:SUCCESS ***

Mysterious log file – crspatch_node2_2019-05-16_08-27-35PM.log

2019-05-16 21:42:00: Succeeded in writing the checkpoint:'ROOTCRS_STACK' with status:FAIL
2019-05-16 21:42:00: ###### Begin DIE Stack Trace ######
2019-05-16 21:42:00:     Package         File                 Line Calling   
2019-05-16 21:42:00:     --------------- -------------------- ---- ----------
2019-05-16 21:42:00:  1: main            rootcrs.pl            267 crsutils::dietrap
2019-05-16 21:42:00:  2: crsutils        crsutils.pm          1631 main::__ANON__
2019-05-16 21:42:00:  3: crsutils        crsutils.pm          1586 crsutils::system_cmd_capture_noprint
2019-05-16 21:42:00:  4: crsutils        crsutils.pm          9098 crsutils::system_cmd_capture
2019-05-16 21:42:00:  5: crspatch        crspatch.pm           988 crsutils::startFullStack
2019-05-16 21:42:00:  6: crspatch        crspatch.pm          1121 crspatch::performPostPatch
2019-05-16 21:42:00:  7: crspatch        crspatch.pm           212 crspatch::crsPostPatch
2019-05-16 21:42:00:  8: main            rootcrs.pl            276 crspatch::new
2019-05-16 21:42:00: ####### End DIE Stack Trace #######

2019-05-16 21:42:00: ROOTCRS_POSTPATCH checkpoint has failed
2019-05-16 21:42:00:      ckpt: -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH
2019-05-16 21:42:00: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH"
2019-05-16 21:42:00: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil4.log
2019-05-16 21:42:00: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH
2019-05-16 21:42:00: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH '
2019-05-16 21:42:00: Removing file /tmp/filewniUim
2019-05-16 21:42:00: Successfully removed file: /tmp/filewniUim
2019-05-16 21:42:00: pipe exit code: 0
2019-05-16 21:42:00: /bin/su successfully executed

2019-05-16 21:42:00: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH -status"
2019-05-16 21:42:00: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil5.log
2019-05-16 21:42:00: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH -status
2019-05-16 21:42:00: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -chkckpt -name ROOTCRS_POSTPATCH -status '
2019-05-16 21:42:00: Removing file /tmp/fileK1Tyw6
2019-05-16 21:42:00: Successfully removed file: /tmp/fileK1Tyw6
2019-05-16 21:42:00: pipe exit code: 0
2019-05-16 21:42:00: /bin/su successfully executed

2019-05-16 21:42:00: The 'ROOTCRS_POSTPATCH' status is FAILED
2019-05-16 21:42:00: ROOTCRS_POSTPATCH state is FAIL
2019-05-16 21:42:00: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state FAIL"
2019-05-16 21:42:00: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil6.log
2019-05-16 21:42:00: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state FAIL
2019-05-16 21:42:00: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state FAIL '
2019-05-16 21:42:00: Removing file /tmp/filej20epR
2019-05-16 21:42:00: Successfully removed file: /tmp/filej20epR
2019-05-16 21:42:00: pipe exit code: 0
2019-05-16 21:42:00: /bin/su successfully executed

2019-05-16 21:42:00: Succeeded in writing the checkpoint:'ROOTCRS_POSTPATCH' with status:FAIL
2019-05-16 21:42:00: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_STACK -state FAIL"
2019-05-16 21:42:00: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil7.log
2019-05-16 21:42:00: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_STACK -state FAIL
2019-05-16 21:42:00: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_STACK -state FAIL '
2019-05-16 21:42:01: Removing file /tmp/filely834C
2019-05-16 21:42:01: Successfully removed file: /tmp/filely834C
2019-05-16 21:42:01: pipe exit code: 0
2019-05-16 21:42:01: /bin/su successfully executed

*** 2019-05-16 21:42:01: Succeeded in writing the checkpoint:'ROOTCRS_STACK' with status:FAIL ***

Postpatch operation log file.

> tail -20 crspatch_node2_2019-05-16_10-03-17PM.log
2019-05-16 22:09:59: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state START"
2019-05-16 22:09:59: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil7.log
2019-05-16 22:09:59: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state START
2019-05-16 22:09:59: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_PREPATCH -state START '
2019-05-16 22:09:59: Removing file /tmp/file0IogVl
2019-05-16 22:09:59: Successfully removed file: /tmp/file0IogVl
2019-05-16 22:09:59: pipe exit code: 0
2019-05-16 22:09:59: /bin/su successfully executed

2019-05-16 22:09:59: Succeeded in writing the checkpoint:'ROOTCRS_PREPATCH' with status:START
2019-05-16 22:09:59: Invoking "/u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state SUCCESS"
2019-05-16 22:09:59: trace file=/u01/app/oracle/crsdata/node2/crsconfig/cluutil8.log
2019-05-16 22:09:59: Running as user oracle: /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state SUCCESS
2019-05-16 22:09:59: s_run_as_user2: Running /bin/su oracle -c ' echo CLSRSC_START; /u02/app/12.1.0/grid/bin/cluutil -ckpt -oraclebase /u01/app/oracle -writeckpt -name ROOTCRS_POSTPATCH -state SUCCESS '
2019-05-16 22:09:59: Removing file /tmp/fileXDCkuM
2019-05-16 22:09:59: Successfully removed file: /tmp/fileXDCkuM
2019-05-16 22:09:59: pipe exit code: 0
2019-05-16 22:09:59: /bin/su successfully executed

*** 2019-05-16 22:09:59: Succeeded in writing the checkpoint:'ROOTCRS_POSTPATCH' with status:SUCCESS ***

Happy patching and hopefully patching primary to come will be seamlessly successful.

EM13.3 Directory Structures

Sun, 2019-05-12 09:41

Currently, I am preparing POC to migrate OMS 13.3 from OEL6 to OEL7 and wanted a high level overview of the installation.

[oracle@MGOEM ~]$ cat .bash_profile
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
        . ~/.bashrc
fi

# User specific environment and startup programs

export PATH=$PATH:$HOME/bin
export DISPLAY=127.0.0.1:10.0

export ORACLE_BASE=/u01/app/oracle
export AGENT_BASE=$ORACLE_BASE/agent

export AGENT_HOME=$AGENT_BASE/agent_13.3.0.0.0
export EM_INSTANCE_BASE=$ORACLE_BASE/gc_inst
export OMS_INSTANCE_BASE=$EM_INSTANCE_BASE
export OHS=$EM_INSTANCE_BASE/user_projects/domains/GCDomain/servers/ohs1

### Starting from 13cR1, Oracle home (or OMS home) refers to the Middleware home.
export ORACLE_HOME=$ORACLE_BASE/middleware
export MW_HOME=$ORACLE_HOME
export OMS_HOME=$ORACLE_HOME
[oracle@MGOEM ~]$

Overview of the Directories Created for OMS Installation.
The OMS instance base directory (typically, gc_inst) is maintained outside the middleware home

[oracle@MGOEM ~]$ cd $MW_HOME; pwd; ls
/u01/app/oracle/middleware
allroot.sh   common               embip          ldap           OMSPatcher     plsql                root.sh     ucp
asr          create_header.log    gccompliance   lib            OPatch         plugins              slax        user_projects
bi           crs                  has            logs           oracle_common  plugins_common       soa         webgate
bin          css                  install        network        oracore        postjava_header.log  sqlplus     wlserver
bmp          disc                 instantclient  nls            oraInst.loc    precomp              srvm        xdk
cfgtoollogs  doc                  inventory      ocm            ord            rdbms                stage
clone        domain-registry.xml  jdbc           ohs            oui            relnotes             sysman
coherence    em                   jlib           omscarespfile  perl           response             thirdparty
[oracle@MGOEM middleware]$

Overview of the Directories Created for Management Agent Installation (Central Agent).
Agent base directory for the central agent (Management Agent installed with the OMS).

[oracle@MGOEM middleware]$ cd $AGENT_BASE; pwd; ls
/u01/app/oracle/agent
agent_13.3.0.0.0  agent_inst  agentInstall.rsp
[oracle@MGOEM agent]$

Agent home that is within the agent base directory.

[oracle@MGOEM agent]$ cd $AGENT_HOME; pwd; ls
/u01/app/oracle/agent/agent_13.3.0.0.0
agent.rsp    EMStage        jdbc  jythonLib  OPatch         perl     replacebins.sh           sbin    xsds
bin          install        jdk   ldap       oracle_common  plugins  replacebins.sh.template  stage
cfgtoollogs  instantclient  jlib  lib        oraInst.loc    prereqs  root.sh                  sysman
config       inventory      js    ocm        oui            rda      root.sh.template         ucp
[oracle@MGOEM agent_13.3.0.0.0]$

The OMS instance base directory (typically, gc_inst) is maintained outside the middleware home.

[oracle@MGOEM agent_13.3.0.0.0]$ cd $OMS_INSTANCE_BASE; pwd; ls
/u01/app/oracle/gc_inst
em  user_projects
[oracle@MGOEM gc_inst]$

ORACLE_BASE

[oracle@MGOEM gc_inst]$  cd $ORACLE_BASE; pwd; ls
/u01/app/oracle
agent  bip  gc_inst  middleware  swlib
[oracle@MGOEM oracle]$

Inventory and Patches:

[oracle@MGOEM ~]$ cat /u01/app/oraInventory/ContentsXML/inventory.xml
<?xml version="1.0" standalone="yes" ?>
<!-- Copyright (c) 1999, 2015, Oracle. All rights reserved. -->
<!-- Do not modify the contents of this file by hand. -->
<INVENTORY>
<VERSION_INFO>
   <SAVED_WITH>13.8.0.0.0</SAVED_WITH>
   <MINIMUM_VER>2.1.0.6.0</MINIMUM_VER>
</VERSION_INFO>
<HOME_LIST>
<HOME NAME="oms13c1" LOC="/u01/app/oracle/middleware" TYPE="O" IDX="1"/>
<HOME NAME="agent13c1" LOC="/u01/app/oracle/agent/agent_13.3.0.0.0" TYPE="O" IDX="2"/>
</HOME_LIST>
<COMPOSITEHOME_LIST>
</COMPOSITEHOME_LIST>
</INVENTORY>
[oracle@MGOEM ~]$

[oracle@MGOEM ~]$ $AGENT_HOME/OPatch/opatch lspatches
27839641;One-off
27369653;One-off
27244723;One-off
27074880;OPSS Bundle Patch 12.1.3.0.171124
26933408;One-off
25832897;One-off
25412962;
23519804;One-off
20882747;One-off
20442348;One-off
19982906;One-off
19345252;One-off
18814458;One-off
28042003;One-off
27419391;WLS PATCH SET UPDATE 12.1.3.0.180417
23527146;One-off
20741228;JDBC 12.1.3.1 BP1

OPatch succeeded.
[oracle@MGOEM ~]$

[oracle@MGOEM ~]$ $ORACLE_HOME/OPatch/opatch lspatches
27839641;One-off
27369653;One-off
27244723;One-off
27074880;OPSS Bundle Patch 12.1.3.0.171124
26933408;One-off
25832897;One-off
25412962;
23519804;One-off
20882747;One-off
20442348;One-off
19982906;One-off
19345252;One-off
18814458;One-off
28042003;One-off
27419391;WLS PATCH SET UPDATE 12.1.3.0.180417
23527146;One-off
20741228;JDBC 12.1.3.1 BP1

OPatch succeeded.
[oracle@MGOEM ~]$

[oracle@MGOEM ~]$ $ORACLE_HOME/OPatch/opatch lsinventory
Oracle Interim Patch Installer version 13.8.0.0.0
Copyright (c) 2019, Oracle Corporation.  All rights reserved.


Oracle Home       : /u01/app/oracle/middleware
Central Inventory : /u01/app/oraInventory
   from           : /u01/app/oracle/middleware/oraInst.loc
OPatch version    : 13.8.0.0.0
OUI version       : 13.8.0.0.0
Log file location : /u01/app/oracle/middleware/cfgtoollogs/opatch/opatch2019-05-12_16-34-38PM_1.log


OPatch detects the Middleware Home as "/u01/app/oracle/middleware"

Lsinventory Output file location : /u01/app/oracle/middleware/cfgtoollogs/opatch/lsinv/lsinventory2019-05-12_16-34-38PM.txt

--------------------------------------------------------------------------------
Local Machine Information::
Hostname: MGOEM
ARU platform id: 226
ARU platform description:: Linux_AMD64

[oracle@MGOEM ~]$ cat /etc/oraInst.loc
inventory_loc=/u01/app/oraInventory
inst_group=oinstall
[oracle@MGOEM ~]$

[oracle@MGOEM ~]$ cat /u01/app/oracle/middleware/oraInst.loc
#Oracle Installer Location File Location
#Fri May 10 16:53:18 CEST 2019
inst_group=oinstall
inventory_loc=/u01/app/oraInventory
[oracle@MGOEM ~]$

Reference:
DIRECTORY STRUCTURE AND LOCATIONS OF IMPORTANT TRACE AND LOG FILES OF ENTERPRISE MANAGER CLOUD CONTROL 13C

Overview of the Directories Created for an Enterprise Manager System

Create Mount Filesystem for Vagrant VirtualBox

Sat, 2019-05-11 09:41

Once again, I am using oravirt boxes.

If you just want to create the machine, and not run the provisioning step run this:

vagrant up

Since I don’t know ansible, it was much simpler to do the work manually.

Oracle Linux Server release 7.3

Review disks:

[root@MGOEM ~]# fdisk -l /dev/sd*

Disk /dev/sda: 52.4 GB, 52428800000 bytes, 102400000 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000979b6

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048     2099199     1048576   83  Linux
/dev/sda2         2099200   102399999    50150400   8e  Linux LVM

Disk /dev/sda1: 1073 MB, 1073741824 bytes, 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/sda2: 51.4 GB, 51354009600 bytes, 100300800 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

### Disk is not partitioned since there are no # for device /dev/sdb and no Filesystem
Disk /dev/sdb: 187.9 GB, 187904819200 bytes, 367001600 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
[root@MGOEM ~]#

Create partition:

[root@MGOEM ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x37a8a8de.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p):
Using default response p
Partition number (1-4, default 1):
First sector (2048-367001599, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-367001599, default 367001599):
Using default value 367001599
Partition 1 of type Linux and of size 175 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@MGOEM ~]#

Review disk: Linux System

[root@MGOEM ~]# fdisk -l /dev/sdb

Disk /dev/sdb: 187.9 GB, 187904819200 bytes, 367001600 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x37a8a8de

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048   367001599   183499776   83  Linux
[root@MGOEM ~]#

Create Filesystem:

[root@MGOEM ~]# mkfs.ext4 /dev/sdb1
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
11468800 inodes, 45874944 blocks
2293747 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2193620992
1400 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
        4096000, 7962624, 11239424, 20480000, 23887872

Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

[root@MGOEM ~]#

Create and mount /u01:

[root@MGOEM ~]# mkdir -p /u01
[root@MGOEM ~]# mount /dev/sdb1 /u01
[root@MGOEM ~]#
[root@MGOEM ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
devtmpfs              2.8G     0  2.8G   0% /dev
tmpfs                 2.8G     0  2.8G   0% /dev/shm
tmpfs                 2.8G  8.4M  2.8G   1% /run
tmpfs                 2.8G     0  2.8G   0% /sys/fs/cgroup
/dev/mapper/ol-root    46G  2.1G   44G   5% /
/dev/sda1            1014M  167M  848M  17% /boot
vagrant               932G  283G  650G  31% /vagrant
sf_working            420G  139G  281G  33% /sf_working
media_patch           3.7T  513G  3.2T  14% /media/patch
media_swrepo          3.7T  513G  3.2T  14% /media/swrepo
sf_OracleSoftware     3.7T  513G  3.2T  14% /sf_OracleSoftware
media_shared_storage  932G  283G  650G  31% /media/shared_storage
tmpfs                 571M     0  571M   0% /run/user/1000
/dev/sdb1             173G   61M  164G   1% /u01
[root@MGOEM ~]#

Update /etc/fstab:

[root@MGOEM ~]# tail /etc/fstab
# Created by anaconda on Tue Apr 18 08:50:14 2017
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/ol-root     /                       xfs     defaults        0 0
UUID=ed2996e5-e077-4e23-83a5-10418226a725 /boot                   xfs     defaults        0 0
/dev/mapper/ol-swap     swap                    swap    defaults        0 0
/swapfile1              swap                    swap    defaults        0 0
/dev/sdb1               /u01                    ext4    defaults        1 1
[root@MGOEM ~]#

Pages