Thursday, 26 January 2017

Creating an Oracle ACFS File System 12c

Create a volume

RECO is the Disk Group name.

ASMCMD [+] > volcreate -G RECO -s 200G u02


Check the volume device name 

ASMCMD [+] > volinfo -G RECO u02
Diskgroup Name: RECO

         Volume Name: U02
         Volume Device: /dev/asm/u02-93
         State: ENABLED
         Size (MB): 204800
         Resize Unit (MB): 64
         Redundancy: HIGH
         Stripe Columns: 8
         Stripe Width (K): 1024
         Usage:
         Mountpath:

or

SELECT volume_name, volume_device FROM V$ASM_VOLUME WHERE volume_name ='U02';

Create a file system with the Oracle ACFS mkfs command. Create a file system using an existing volume device.


[grid@RAC1 ~]$ /sbin/mkfs -t acfs /dev/asm/u02-93
mkfs.acfs: version                   = 12.1.0.2.0
mkfs.acfs: on-disk version           = 39.0
mkfs.acfs: volume                    = /dev/asm/u02-93
mkfs.acfs: volume size               = 214748364800  ( 200.00 GB )
mkfs.acfs: Format complete.

Optionally register the file system as a root user

[grid@RAC1 ~]$ /sbin/acfsutil registry -a /dev/asm/u02-93 /u02
PRCN-2018 : Current user grid is not a privileged user
acfsutil registry: ACFS-03111: unable to add ACFS mount /u02 within Oracle Registry

[grid@RAC1 ~]$ sudo su - root
[sudo] password for grid:
[root@RAC1 ~]# /sbin/acfsutil registry -a /dev/asm/u02-93 /u02
acfsutil registry: mount point /u02 successfully added to Oracle Registry



[root@RAC1 ~]# df -h /u02
Filesystem       Size  Used Avail Use% Mounted on
/dev/asm/u02-93  200G  483M  200G   1% /u02
[root@RAC1 ~]#

Check status
ASMCMD> volinfo -G RECO u02
Diskgroup Name: RECO

         Volume Name: U02
         Volume Device: /dev/asm/u02-93
         State: ENABLED
         Size (MB): 204800
         Resize Unit (MB): 64
         Redundancy: HIGH
         Stripe Columns: 8
         Stripe Width (K): 1024
         Usage: ACFS
         Mountpath: /u02



Mount the file system with the Oracle ACFS mount command. You can mount a file system before or after registering the file system. If the file system has been registered, you can wait for the file system to be mounted automatically. Issue the following command on each node to mount it if not mounted already.

/bin/mount -t acfs /dev/asm/u02-93 /u02



https://docs.oracle.com/database/121/OSTMG/GUID-4C98CF06-8CCC-45F1-9316-C40FB3EFF268.htm#OSTMG94787

Tuesday, 24 January 2017

CTAS Method to Create Table

##Shell Script##


export ORACLE_SID=joshi
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0.4

sqlplus -s '/ as sysdba' << EOF

set lines 200
set pages 50
Set echo on
set serveroutput on
set timing on
set time on
spool /home/oracle/CTAS_TABLE.log

alter session set db_file_multiblock_read_count=128;
alter session set sort_multiblock_read_count=128;
alter session set parallel_force_local=true;

exec dbms_application_info.set_module ('CTAS_TABLE_TMP',null);

CREATE TABLE SCOTT.TMP AS SELECT * FROM HR.DEPT;


spool off
exit;
EOF


#########


#Sample CTAS statement:
CREATE TABLE emp2 AS SELECT * FROM emp;

#Specifying a tablespace:
CREATE TABLE emp3 TABLESPACE users AS SELECT * FROM emp;

#Parallel CTAS with nologging for faster performance
CREATE TABLE emp4 NOLOGGING PARALLEL 4 AS SELECT * FROM emp;

Thursday, 19 January 2017

ASM Disk configuration

[root@localhost backup]# rpm -Uvih oracleasm-support-2.1.7-1.el5.x86_64.rpm
warning: oracleasm-support-2.1.7-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...                ########################################### [100%]
   1:oracleasm-support      ########################################### [100%]

[root@localhost backup]# rpm -Uvih oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm
warning: oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...                ########################################### [100%]
   1:oracleasm-2.6.18-308.el########################################### [100%]

[root@localhost backup]# rpm -qa|grep oracleasm
oracleasm-support-2.1.7-1.el5
oracleasm-2.6.18-308.el5-2.0.5-1.el5

[root@localhost backup]# rpm -Uvih oracleasmlib-2.0.4-1.el5.i386.rpm
warning: oracleasmlib-2.0.4-1.el5.i386.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...                ########################################### [100%]
   1:oracleasmlib           ########################################### [100%]

[root@localhost backup]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: oracle
Default group to own the driver interface []: oinstall
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver:                     [  OK  ]
Scanning the system for Oracle ASMLib disks:
                                                           [  OK  ]
[root@localhost backup]#
[root@localhost backup]# /etc/init.d/oracleasm status
Checking if ASM is loaded: yes
Checking if /dev/oracleasm is mounted: yes
[root@localhost backup]# ls -ltrh /dev/sd*
brw-r----- 1 root disk 8,  0 Jan 19 01:01 /dev/sda
brw-r----- 1 root disk 8,  2 Jan 19 01:01 /dev/sda2
brw-r----- 1 root disk 8, 80 Jan 19 01:01 /dev/sdf
brw-r----- 1 root disk 8, 48 Jan 19 01:02 /dev/sdd
brw-r----- 1 root disk 8, 64 Jan 19 01:02 /dev/sde
brw-r----- 1 root disk 8, 32 Jan 19 01:02 /dev/sdc
brw-r----- 1 root disk 8, 16 Jan 19 01:02 /dev/sdb
brw-r----- 1 root disk 8,  1 Jan 19 01:02 /dev/sda1
[root@localhost backup]# cat /proc/partitions
major minor  #blocks  name

   8        0   31457280 sda
   8        1     104391 sda1
   8        2   31350847 sda2
   8       16   10485760 sdb
   8       32   10485760 sdc
   8       48   10485760 sdd
   8       64   52428800 sde
   8       80   52428800 sdf
 253        0   27295744 dm-0
 253        1    4030464 dm-1
[root@localhost backup]# fdisk /dev/sdf
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.


The number of cylinders for this disk is set to 6527.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-6527, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-6527, default 6527):
Using default value 6527

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@localhost backup]# cat /proc/partitions
major minor  #blocks  name

   8        0   31457280 sda
   8        1     104391 sda1
   8        2   31350847 sda2
   8       16   10485760 sdb
   8       32   10485760 sdc
   8       48   10485760 sdd
   8       64   52428800 sde
   8       80   52428800 sdf
   8       81   52428096 sdf1
 253        0   27295744 dm-0
 253        1    4030464 dm-1
[root@localhost backup]# /etc/init.d/oracleasm createdisk DATA1 /dev/sdf1
Marking disk "DATA1" as an ASM disk:                       [  OK  ]
[root@localhost backup]# /etc/init.d/oracleasm listdisks
DATA1
[root@localhost backup]#

Configure Mount Point on Linux

[root@localhost ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/mapper/VolGroup00-LogVol00
                       26G  2.6G   22G  11% /
/dev/sda1              99M   24M   71M  25% /boot
tmpfs                 978M     0  978M   0% /dev/shm

[root@localhost ~]# cat /proc/partitions
major minor  #blocks  name

   8        0   31457280 sda
   8        1     104391 sda1
   8        2   31350847 sda2
   8       16   10485760 sdb
 253        0   27295744 dm-0
 253        1    4030464 dm-1

[root@localhost ~]# ls -ltrh /dev/sd*
brw-r----- 1 root disk 8,  2 Jan 19 00:09 /dev/sda2
brw-r----- 1 root disk 8,  0 Jan 19 00:09 /dev/sda
brw-r----- 1 root disk 8, 16 Jan 19 00:09 /dev/sdb
brw-r----- 1 root disk 8,  1 Jan 19 00:10 /dev/sda1

[root@localhost ~]# fdisk -l

Disk /dev/sda: 32.2 GB, 32212254720 bytes
255 heads, 63 sectors/track, 3916 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *           1          13      104391   83  Linux
/dev/sda2              14        3916    31350847+  8e  Linux LVM

Disk /dev/sdb: 10.7 GB, 10737418240 bytes
255 heads, 63 sectors/track, 1305 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

Disk /dev/sdb doesn't contain a valid partition table

Disk /dev/dm-0: 27.9 GB, 27950841856 bytes
255 heads, 63 sectors/track, 3398 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

Disk /dev/dm-0 doesn't contain a valid partition table

Disk /dev/dm-1: 4127 MB, 4127195136 bytes
255 heads, 63 sectors/track, 501 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

Disk /dev/dm-1 doesn't contain a valid partition table

[root@localhost ~]# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.


The number of cylinders for this disk is set to 1305.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1305, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-1305, default 1305):
Using default value 1305

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.

WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
The kernel still uses the old table.
The new table will be used at the next reboot.
Syncing disks.

[root@localhost ~]# cat /proc/partitions
major minor  #blocks  name

   8        0   31457280 sda
   8        1     104391 sda1
   8        2   31350847 sda2
   8       16   10485760 sdb
   8       17   10482381 sdb1
 253        0   27295744 dm-0
 253        1    4030464 dm-1

[root@localhost ~]# cat /etc/fstab
/dev/VolGroup00/LogVol00 /                       ext3    defaults        1 1
LABEL=/boot             /boot                   ext3    defaults        1 2
tmpfs                   /dev/shm                tmpfs   defaults        0 0
devpts                  /dev/pts                devpts  gid=5,mode=620  0 0
sysfs                   /sys                    sysfs   defaults        0 0
proc                    /proc                   proc    defaults        0 0
/dev/VolGroup00/LogVol01 swap                    swap    defaults        0 0


[root@localhost ~]# mkfs.ext3 -b 4096 /dev/sdb
mke2fs 1.39 (29-May-2006)
/dev/sdb is entire device, not just one partition!
Proceed anyway? (y,n) y
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
1310720 inodes, 2621440 blocks
131072 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2684354560
80 block groups
32768 blocks per group, 32768 fragments per group
16384 inodes per group
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632

Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done

This filesystem will be automatically checked every 24 mounts or
180 days, whichever comes first.  Use tune2fs -c or -i to override.

[root@localhost ~]# cat /etc/fstab
/dev/VolGroup00/LogVol00 /                       ext3    defaults        1 1
LABEL=/boot             /boot                   ext3    defaults        1 2
tmpfs                   /dev/shm                tmpfs   defaults        0 0
devpts                  /dev/pts                devpts  gid=5,mode=620  0 0
sysfs                   /sys                    sysfs   defaults        0 0
proc                    /proc                   proc    defaults        0 0
/dev/VolGroup00/LogVol01 swap                    swap    defaults        0 0

[root@localhost ~]# vi /etc/fstab
[root@localhost ~]# cat /etc/fstab
/dev/VolGroup00/LogVol00 /                       ext3    defaults        1 1
LABEL=/boot             /boot                   ext3    defaults        1 2
tmpfs                   /dev/shm                tmpfs   defaults        0 0
devpts                  /dev/pts                devpts  gid=5,mode=620  0 0
sysfs                   /sys                    sysfs   defaults        0 0
/dev/sdb                /u01                    ext3    defaults        1 1
proc                    /proc                   proc    defaults        0 0
/dev/VolGroup00/LogVol01 swap                    swap    defaults        0 0
[root@localhost ~]# mkdir -p /u01
[root@localhost ~]# mount /dev/sdb /u01

[root@localhost ~]# df -h /u01
Filesystem            Size  Used Avail Use% Mounted on
/dev/sdb              9.9G  151M  9.2G   2% /u01
[root@localhost ~]#

Saturday, 14 January 2017

DBCONSOLE Installation on RAC

#set the environment

[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ export ORACLE_UNQNAME=joshi
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ echo $ORACLE_UNQNAME
joshi
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ echo $ORACLE_SID
joshi1
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ echo $ORACLE_HOME
/u01/app/oracle/product/11.2.0/dbhome_1


##
SQL> show parameter service

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
service_names                        string      joshi.nabh.com
SQL> show parameter db_uniq

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
db_unique_name                       string      joshi


##

[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ ./emctl status dbconsole
OC4J Configuration issue. /u01/app/oracle/product/11.2.0/dbhome_1/oc4j/j2ee/OC4J_DBConsole_rac1_joshi not found.
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $


##

[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ emca -config dbcontrol db -repos create -cluster -EM_NODE rac1 -EM_NODE_LIST rac1,rac2

##error##
INFO: This operation is being logged at /u01/app/oracle/cfgtoollogs/emca/gye/emca_2017_01_14_02_29_57.log. 
Jan 14, 2017 2:32:15 AM oracle.sysman.emcp.DatabaseChecks performReposChecks 
SEVERE: Dbcontrol Repository already exists. Fix the error(s) and run EM Configuration Assistant again in standalone mode. 
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ 



###To overcome this error please follow below steps ###

To Cleanup Repository :

DROP USER sysman CASCADE;
DROP PUBLIC SYNONYM setemviewusercontext;
DROP ROLE mgmt_user;
DROP PUBLIC SYNONYM mgmt_target_blackouts;
DROP USER mgmt_view;


The public synonym for SYSMAN objects must be dropped

DECLARE  
CURSOR c1 IS  
SELECT owner, synonym_name name  
FROM dba_synonyms  
WHERE table_owner = 'SYSMAN';  
BEGIN  
FOR r1 IN c1 LOOP  
IF r1.owner = 'PUBLIC' THEN  
EXECUTE IMMEDIATE 'DROP PUBLIC SYNONYM '||r1.name;  
ELSE  
EXECUTE IMMEDIATE 'DROP SYNONYM '||r1.owner||'.'||r1.name;  
END IF;  
END LOOP;  
END;  
/  


#######

SQL> DROP USER sysman CASCADE;

User dropped.

SQL> DROP PUBLIC SYNONYM setemviewusercontext;

Synonym dropped.

SQL> DROP ROLE mgmt_user;

Role dropped.

SQL> DROP PUBLIC SYNONYM mgmt_target_blackouts;

Synonym dropped.

SQL> DROP USER mgmt_view;

User dropped.

SQL> DECLARE
CURSOR c1 IS
SELECT owner, synonym_name name
FROM dba_synonyms
WHERE table_owner = 'SYSMAN';
BEGIN
FOR r1 IN c1 LOOP
IF r1.owner = 'PUBLIC' THEN
EXECUTE IMMEDIATE 'DROP PUBLIC SYNONYM '||r1.name;
ELSE
EXECUTE IMMEDIATE 'DROP SYNONYM '||r1.owner||'.'||r1.name;
END IF;
END LOOP;
END;
/   2    3    4    5    6    7    8    9   10   11   12   13   14   15

PL/SQL procedure successfully completed.

SQL>

###

###CREATE password file##

orapwd file=$ORACLE_HOME/dbs/orapw$ORACLE_SID password=sysoracle entries=5 force=y

##Run the below command###
emca -config dbcontrol db -repos create -cluster -EM_NODE rac1 -EM_NODE_LIST rac1,rac2


#we need sys,sysman,dbsnmp,asmsnmp password for installation##


Enter the following information:
Database unique name: pca
Service name: pca.nabh.com
Listener port number: 1521
Listener ORACLE_HOME [ /u01/app/11.2.0/grid ]:
Password for SYS user:
Password for DBSNMP user:
Password for SYSMAN user:
Password for SYSMAN user: Cluster name: joshi-cluster
Email address for notifications (optional):
Outgoing Mail (SMTP) server for notifications (optional):
ASM ORACLE_HOME [ /u01/app/11.2.0/grid ]:
ASM port [ 1521 ]:
ASM username [ ASMSNMP ]:
ASM user password: 



##

STARTED EMCA at Jan 14, 2017 3:23:48 AM
EM Configuration Assistant, Version 11.2.0.3.0 Production
Copyright (c) 2003, 2011, Oracle.  All rights reserved.

Enter the following information:
Database unique name: joshi
Service name: joshi.nabh.com
Listener port number: 1521
Listener ORACLE_HOME [ /u01/app/11.2.0/grid ]:
Password for SYS user:
Password for DBSNMP user:
Password for SYSMAN user:  a
Cluster name: msldb-cluster
Email address for notifications (optional):
Outgoing Mail (SMTP) server for notifications (optional):
ASM ORACLE_HOME [ /u01/app/11.2.0/grid ]:
ASM port [ 1521 ]:
ASM username [ ASMSNMP ]:
ASM user password:  B$y$16
Jan 14, 2017 3:24:52 AM oracle.sysman.emcp.util.GeneralUtil initSQLEngineRemotely
WARNING: Error during db connection : ORA-12514: TNS:listener does not currently know of service requested in connect descriptor

-----------------------------------------------------------------

You have specified the following settings

Database ORACLE_HOME ................ /u01/app/oracle/product/11.2.0/dbhome_1

Database instance hostname ................ Listener ORACLE_HOME ................ /u01/app/11.2.0/grid
Listener port number ................ 1521
Cluster name ................ rac-cluster
Database unique name ................ joshi
Email address for notifications ...............
Outgoing Mail (SMTP) server for notifications ...............
ASM ORACLE_HOME ................ /u01/app/11.2.0/grid
ASM port ................ 1521
ASM user role ................ SYSDBA
ASM username ................ ASMSNMP

-----------------------------------------------------------------
Do you wish to continue? [yes(Y)/no(N)]: Y
Jan 14, 2017 3:25:02 AM oracle.sysman.emcp.EMConfig perform
INFO: This operation is being logged at /u01/app/oracle/cfgtoollogs/emca/joshi/emca_2017_01_14_03_23_48.log.
Jan 14, 2017 3:25:05 AM oracle.sysman.emcp.EMReposConfig createRepository
INFO: Creating the EM repository (this may take a while) ...
Jan 14, 2017 3:30:17 AM oracle.sysman.emcp.EMReposConfig invoke
INFO: Repository successfully created
Jan 14, 2017 3:30:23 AM oracle.sysman.emcp.EMReposConfig uploadConfigDataToRepository
INFO: Uploading configuration data to EM repository (this may take a while) ...
Jan 14, 2017 3:31:07 AM oracle.sysman.emcp.EMReposConfig invoke
INFO: Uploaded configuration data successfully
Jan 14, 2017 3:31:08 AM oracle.sysman.emcp.EMDBCConfig instantiateOC4JConfigFiles
INFO: Propagating /u01/app/oracle/product/11.2.0/dbhome_1/oc4j/j2ee/OC4J_DBConsole_rac1_joshi to remote nodes ...
Jan 14, 2017 3:31:09 AM oracle.sysman.emcp.EMDBCConfig instantiateOC4JConfigFiles
INFO: Propagating /u01/app/oracle/product/11.2.0/dbhome_1/oc4j/j2ee/OC4J_DBConsole_rac2_joshi to remote nodes ...
Jan 14, 2017 3:31:13 AM oracle.sysman.emcp.EMAgentConfig deployStateDirs
INFO: Propagating /u01/app/oracle/product/11.2.0/dbhome_1/rac1_joshi to remote nodes ...
Jan 14, 2017 3:31:15 AM oracle.sysman.emcp.EMAgentConfig deployStateDirs
INFO: Propagating /u01/app/oracle/product/11.2.0/dbhome_1/rac2_joshi to remote nodes ...
Jan 14, 2017 3:31:19 AM oracle.sysman.emcp.util.DBControlUtil secureDBConsole
INFO: Securing Database Control (this may take a while) ...
Jan 14, 2017 3:31:41 AM oracle.sysman.emcp.util.DBControlUtil startOMS
INFO: Starting Database Control (this may take a while) ...
Jan 14, 2017 3:32:05 AM oracle.sysman.emcp.EMDBPostConfig performConfiguration
INFO: Database Control started successfully
Jan 14, 2017 3:32:06 AM oracle.sysman.emcp.EMDBPostConfig performConfiguration
INFO: >>>>>>>>>>> The Database Control URL is https://rac1.nabh.com:5505/em <<<<<<<<<<<
Jan 14, 2017 3:32:09 AM oracle.sysman.emcp.EMDBPostConfig showClusterDBCAgentMessage
INFO:
****************  Current Configuration  ****************
 INSTANCE            NODE           DBCONTROL_UPLOAD_HOST
----------        ----------        ---------------------

joshi               rac1               rac1.nabh.com
joshi               rac2               rac1.nabh.com


Jan 14, 2017 3:32:09 AM oracle.sysman.emcp.EMDBPostConfig invoke
WARNING:
************************  WARNING  ************************

Management Repository has been placed in secure mode wherein Enterprise Manager data will be encrypted.  The encryption key has been placed in the file: /u01/app/oracle/product/11.2.0/dbhome_1/rac1_joshi/sysman/config/emkey.ora. Ensure this file is backed up as the encrypted data will become unusable if this file is lost.

***********************************************************
Enterprise Manager configuration completed successfully
FINISHED EMCA at Jan 14, 2017 3:32:09 AM                                  ^C
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $

#####

[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ ./emctl status dbconsole
Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0
Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.
https://rac1.nabh.com:5505/em/console/aboutApplication
Oracle Enterprise Manager 11g is running.
------------------------------------------------------------------
Logs are generated in directory /u01/app/oracle/product/11.2.0/dbhome_1/rac1_joshi/sysman/log

#######
[rac1] /u01/app/oracle/product/11.2.0/dbhome_1/bin $ ./emctl status agent
Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0
Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.
---------------------------------------------------------------
Agent Version     : 10.2.0.4.5
OMS Version       : 10.2.0.4.5
Protocol Version  : 10.2.0.4.5
Agent Home        : /u01/app/oracle/product/11.2.0/dbhome_1/rac1_joshi
Agent binaries    : /u01/app/oracle/product/11.2.0/dbhome_1
Agent Process ID  : 6038
Parent Process ID : 5948
Agent URL         : https://rac1.nabh.com:1835/emd/main
Repository URL    : https://rac1.nabh.com:5505/em/upload/
Started at        : 2017-01-14 03:31:43
Started by user   : oracle
Last Reload       : 2017-01-14 03:31:43
Last successful upload                       : 2017-01-14 03:33:28
Total Megabytes of XML files uploaded so far :    16.55
Number of XML files pending upload           :        0
Size of XML files pending upload(MB)         :     0.00
Available disk space on upload filesystem    :    86.29%
Data channel upload directory                : /u01/app/oracle/product/11.2.0/dbhome_1/rac1_joshi/sysman/recv
Last successful heartbeat to OMS             : 2017-01-14 03:32:53
---------------------------------------------------------------
Agent is Running and Ready

####

Setting up Email Alerts

This section explains how to set-up and configure email alerts via Grid Control. Please note, for simplicity, these instructions deal with a single notification rule and alert. Users should create or amend the notification rules and alerts to suit their own needs.
1. Configure the mail server that dbconsole will use to send emails.
  • Click on setup in the top right-hand corner of dbconsole
  • Click on Notification Methods in the side bar
  • Enter a valid Outgoing Mail (SMTP) Server
  • Enter a suitable description for Identify Sender As
  • Enter a suitable email address for Sender's E-mail Address
  • Click on Test Mail Servers
  • Check to see if an email has been received in the Sender's E-mail Address inbox.
  • Click apply if satisfied with the test.
2. Set-up the email addresses that will receive emails from dbconsole
  • Click on preferences in the top right-hand corner of dbconsole
  • Enter a valid email recipient in the E-mail Address field.
  • Select a suitable Message Format. For SMS messages, this should be set to short
  • Ensure the Select tick box is ticked and click Test
  • Click Ok
  • Click apply if satisfied with the test.
3. Define a schedule for when you wish to receive alerts.
  • Click on preferences in the top right-hand corner of dbconsole
  • Click on schedule in the side bar.
  • Click on Edit Schedule Definition
  • Select a Rotation Frequency
  • Click Edit Existing Schedule
  • Set a suitable Start Time and End Time for the schedule.
  • In the E-mail Addresses field, enter one of the email addresses defined in section 3 above.
  • Select the Days of Week that are applicable to the schedule being created.
  • Click Batch Fill-in
  • Click Finish
4. Create a notification rules.

  • Click on preferences in the top right-hand corner of dbconsole
  • Click on rules in the side bar.
  • Click create
  • Enter a Name for the notification rule.
  • Enter a Description for the notification rule.
  • Select a Target Type. For example, database.
  • Ensure Apply rule to all targets of the target type selected is selected.
  • Click Next
  • Select the availability states to be notified on.
  • Click Next
  • Select the Severity States to be notified on.
  • Use the Arrows to move the Available Metrics into the Selected Metrics box. Any metrics in the Selected Metrics box, will be alerted on.
  • Click Next.
  • Depending on which metrics were chosen, it may be necessary to define further attributes for the metric.
  • When satisfied all additional metric properties have been defined, click Next.
  • Ensure the Send Me E-mail tick box is ticked.
  • Click Next
  • Review the Notification Rule and click Finish

Friday, 13 January 2017

TFA Diagnostic Collection


$GI_HOME/tfa/bin/tfactl diagcollect -from "MMM/dd/yyyy hh:mm:ss" -to "MMM/dd/yyyy hh:mm:ss" 

Format example: "Jul/1/2014 21:00:00" 


##CHECK STATUS##
node1:oracle:orcl /u01/app/grid/11.2.0.4/tfa/bin> tfactl print status

.-------------------------------------------------------------------------------------------------.
| Host        | Status of TFA | PID      | Port | Version | Build ID           | Inventory Status |
+-------------+---------------+----------+------+---------+--------------------+------------------+
| node1 | RUNNING       |  8519684 | 5000 | 3.2.0.2 | 320220140530060235 | COMPLETE         |
| che-casdb02 | RUNNING       | 11403394 | 5000 | 3.2.0.2 | 320220140530060235 | COMPLETE         |
| che-casdb03 | RUNNING       | 11469002 | 5000 | 3.2.0.2 | 320220140530060235 | COMPLETE         |

'-------------+---------------+----------+------+---------+--------------------+------------------'

##COLLECT LOGS##
node1:oracle:orcl /u01/app/grid/11.2.0.4/tfa/bin> tfactl diagcollect -from "Jan/13/2017 01:00:00" -to "Jan/13/2017 05:00:00"
Collecting data for all nodes
Scanning files from Jan/13/2017 01:00:00 to Jan/13/2017 05:00:00
##Re-install TFA##
$GRID_HOME/crs/install/tfa_setup.sh -silent -crshome /u01/app/11204/grid 


###Start , stop and shutdown TFA###


# /etc/init.d/init.tfa stop
# /etc/init.d/init.tfa  start
# /etc/init.d/init.tfa  restart

#   $GRID_HOME/tfa/bin/tfactl  print config

#   $GRID_HOME/tfa/bin/tfactl  print  actions

#   $GRID_HOME/tfa/bin/tfactl  print  errors 



####Check for database startups###
# $GRID_HOME/tfa/bin/tfactl print startups


###Collect tracefiles using TFA##

# $GRID_HOME/tfa/bin/tfactl diagcollect -all -since 1h


##

Oracle DOC ID 1513912.1 

ORA-14508: specified VALIDATE INTO table not found

SQL> analyze table PIN.EVENT_T validate structure cascade;
analyze table PIN.EVENT_T validate structure cascade
*
ERROR at line 1:
ORA-14508: specified VALIDATE INTO table not found

Run the below to create a validate table 

SQL> @?/rdbms/admin/utlvalid.sql;


Table created.


Now run the analyze command

SQL> analyze table PIN.EVENT_T validate structure cascade;