Monday, December 16, 2013
Foreign Keys with Missing Indexes
--FK Constraint columns not indexed
set lines 160set pages 40col FKCons format a30col SourceTable format a30col SourceColumn format a30col TargetTable format a30col TargetColumn format a30select scc.constraint_name as FKCons, scc.table_name as SourceTable, scc.column_name as SourceColumn, tcc.table_name as TargetTable, tcc.column_name as TargetColumn from all_cons_columns scc, all_constraints sc, all_cons_columns tcc where sc.constraint_name = scc.constraint_name --Join Cons to Cons Cols and sc.r_constraint_name = tcc.constraint_name --Join source to target and sc.constraint_type = 'R' --RI Constraints only and scc.owner = '&SCHEMA' --Only OMS schema and tcc.table_name||tcc.column_name
not in (select i.table_name||i.column_name from all_ind_columns i where i.index_owner = '&SCHEMA') --FKs not indexed order by scc.table_name/
I can only guess this was a problem in prior releases of the database. According to this test in 12c, it doesn't seem possible.
SQL> create table source (sourceid number, constraint sourcepk primary key (sourceid));
Table created.
SQL> create table target (targetid number, constraint targetpk primary key (targetid));
Table created.
SQL> alter table target add constraint sourceidfk foreign key (targetid) references source (sourceid);
Table altered.
SQL> col table_name format a20
SQL> col index_name format a20
SQL> col column_name format a20
SQL> select table_name,index_name,column_name from all_ind_columns where index_owner = 'KEN';
TABLE_NAME INDEX_NAME COLUMN_NAME
-------------------- -------------------- --------------------
TARGET TARGETPK TARGETID
SOURCE SOURCEPK SOURCEID
SQL> drop index sourcepk;
drop index sourcepk
*
ERROR at line 1:
ORA-02429: cannot drop index used for enforcement of unique/primary key
SQL> alter table source disable constraint sourcepk;
alter table source disable constraint sourcepk
*
ERROR at line 1:
ORA-02297: cannot disable constraint (KEN.SOURCEPK) - dependencies exist
Thursday, December 5, 2013
AWR Interval and Retention
SQL
select
extract( day from snap_interval) *24*60+
extract( hour from snap_interval) *60+
extract( minute from snap_interval ) "Snapshot Interval",
extract( day from retention) *24*60+
extract( hour from retention) *60+
extract( minute from retention ) "Retention Interval"
from dba_hist_wr_control;
select
extract( day from snap_interval) *24*60+
extract( hour from snap_interval) *60+
extract( minute from snap_interval ) "Snapshot Interval",
extract( day from retention) *24*60+
extract( hour from retention) *60+
extract( minute from retention ) "Retention Interval"
from dba_hist_wr_control;
SAMPLE OUTPUT
Snapshot Interval Retention Interval
----------------- ------------------
60 10080
Snapshot Interval Retention Interval
----------------- ------------------
60 10080
Wednesday, September 11, 2013
Hangcheck Timer Not Needed for 11gR2 RAC
Hangcheck Timer FAQ (Doc ID 232355.1)
Do I need the hangcheck-timer with 11gR2 ?
Answer
-----------
The hangcheck-timer is not needed with 11gR2. This is documented
in 'Oracle® Grid Infrastructure Installation Guide 11g Release 2
(11.2) for Linux' section 'Improved Input/Output Fencing Processes'.
Improved
Input/Output Fencing Processes
Oracle Clusterware 11g release 2 (11.2) replaces
the oprocd and Hangcheck processes with the cluster synchronization
service daemon Agent and Monitor to provide more accurate recognition of
hangs and to avoid false termination.
Monday, August 19, 2013
Identifying Corruption
IDENTIFYING CORRUPTION
Find Corrupt Segments
set linesize 140
col owner format a10
col segment_type format a10
col segment_name format a30
col partition_name format a15
col file# format 999
SELECT e.owner, e.segment_type, e.segment_name, e.partition_name, c.file#
, greatest(e.block_id, c.block#) corr_start_block#
, least(e.block_id+e.blocks-1, c.block#+c.blocks-1) corr_end_block#
, least(e.block_id+e.blocks-1, c.block#+c.blocks-1)
- greatest(e.block_id, c.block#) + 1 blocks_corrupted
, null description
FROM dba_extents e, v$database_block_corruption c
WHERE e.file_id = c.file#
AND e.block_id <= c.block# + c.blocks - 1
AND e.block_id + e.blocks - 1 >= c.block#
UNION
SELECT s.owner, s.segment_type, s.segment_name, s.partition_name, c.file#
, header_block corr_start_block#
, header_block corr_end_block#
, 1 blocks_corrupted
, 'Segment Header' description
FROM dba_segments s, v$database_block_corruption c
WHERE s.header_file = c.file#
AND s.header_block between c.block# and c.block# + c.blocks - 1
UNION
SELECT null owner, null segment_type, null segment_name, null partition_name, c.file#
, greatest(f.block_id, c.block#) corr_start_block#
, least(f.block_id+f.blocks-1, c.block#+c.blocks-1) corr_end_block#
, least(f.block_id+f.blocks-1, c.block#+c.blocks-1)
- greatest(f.block_id, c.block#) + 1 blocks_corrupted
, 'Free Block' description
FROM dba_free_space f, v$database_block_corruption c
WHERE f.file_id = c.file#
AND f.block_id <= c.block# + c.blocks - 1
AND f.block_id + f.blocks - 1 >= c.block#
order by file#, corr_start_block#
/
Tuesday, August 13, 2013
Generate Tablespace DDL
GENERATE TABLESPACE DDL
select dbms_metadata.get_ddl('TABLESPACE',tablespace_name) from dba_tablespaces;
or
select 'select dbms_metadata.get_ddl(''TABLESPACE'','''||tablespace_name||''') from dual;' from dba_tablespaces;
or
select 'select dbms_metadata.get_ddl(''TABLESPACE'','''||tablespace_name||''') from dual;' from dba_tablespaces;
RMAN Backup File Order
RMAN BACKUP FILE ORDER
Question:
Assuming a single channel and filesperset=1, what order does RMAN choose to backup datafiles?
Answer:
The order of files is random. The File Names are read from V$DATAFILE view and due to performance bug, the ORDER BY FILE# clause is removed, hence the query can return file# in random order.
Monday, August 12, 2013
Reset Oracle Sequence in-place
HOW TO RESET A SEQUENCE IN-PLACE
This avoids drop/create, and re-plumbing grants and synonyms.
1. Set the "increment by" value to -nextval-1
2. Increment the sequence
3. Set the "increment by" value back to 1
SQL>
select myschema.mysequence.nextval from dual;
NEXTVAL
----------
1887203408
SQL>
alter sequence myschema.mysequence increment by -1887203407;
Sequence
altered.
SQL>
select myschema.mysequence.nextval from dual;
NEXTVAL
----------
1
SQL>
alter sequence myschema.mysequence increment by 1;
Sequence
altered.
SQL>
select myschema.mysequence.nextval from dual;
NEXTVAL
----------
2
SQL>
alter system flush shared_pool;
System
altered.
Friday, July 19, 2013
Wednesday, July 17, 2013
Interesting 12c New Features
"I don't always build databases, but when I do, I prefer Oracle 12c."
-The Most Interesting DBA in the World
-The Most Interesting DBA in the World
Interesting 12c New Features
17-Jul-2013
- Multi-tenant Architecture
- CDB Container Database
- Common Users & Roles
- PDB Pluggable Database
- Local Users & Roles
- Up to 253 PDB's in a CDB
- Listener services for CDB and PDBs
- varchar2(32767)
- Auto Incrementing Identity Column
- Default column value can be sequence.nextval
- Fetch First and Offset syntax for top-N results
- Drop partition without "update global indexes"
- Move partition online
- CPU-Instance Fencing using "processor_group_name". Used with Linux "control groups" or Solaris "resource pools".
- Grid Scheduler - Enterprise Job Scheduling
- DataPump timestamped logging (finally)
- DataPump export a view as table
- Application Continuity - It claims to mask db side failures to the app.
- New online operations (drop index, drop constraint, etc.)
- Invisible columns
- Move a datafile online (think online migration to ASM)
- Online Index DDL change via multiple indexes on the same column(s)
- DataGuard Max Availability performance enhancement. Standby doesn't wait for RFS to write redo to standby log(s).
- The "Far Sync" instance type, control and log files only. Data Guard transaction distributor for slow WAN's etc.
- RMAN runs native SQL
- PGA_AGGREGATE_LIMIT - to stop runaway process(es) & avoid node eviction.
- Rebalance multiple ASM diskgroups simultaneously.
- ASM Flex server - decoupled from the database server
- Datafiles on ACFS
- Flex Cluster - Hub/Leaf nodes.
- OCR Backup in ASM diskgroup (visible to all nodes)
- IPv6 support
- Auditing enabled by default (audit_admin role necessary to admin audit settings/data)
- Last Login
- sysbackup admin privilege for separation of duties for the backup user
- DBUA - Faster upgrades by using parallel operations (like adpatch?)
Thursday, June 27, 2013
OEM Grid Control 12c Release 2 - Agent Errors on Upload
THE ERROR
"EMD upload error:full upload has failed: Upload Recent Sending Statistics"
> emctl upload
Oracle Enterprise Manager Cloud Control 12c Release 2
Copyright (c) 1996, 2012 Oracle Corporation. All rights reserved.
---------------------------------------------------------------
EMD upload error:full upload has failed: Upload Recent Sending Statistics
________________________________
Avg send time=0.0 milliseconds
Backoff Event List
________________________________
Upload Failure List
_______________________________
Connection Event List
_____________________________
Upload timed out before completion.
Number of files to upload before the uploadNow call: 301, total size (MB): 2.3485966
Remaining number of files to upload: 301, total size (MB): 2.3485966 (TIMEOUT)
THE SOLUTION
> emctl secure agent
Oracle Enterprise Manager Cloud Control 12c Release 2
Copyright (c) 1996, 2012 Oracle Corporation. All rights reserved.
Agent successfully stopped... Done.
Securing agent... Started.
Enter Agent Registration Password :
Agent successfully restarted... Done.
EMD gensudoprops completed successfully
Securing agent... Successful.
> emctl upload
Oracle Enterprise Manager Cloud Control 12c Release 2
Copyright (c) 1996, 2012 Oracle Corporation. All rights reserved.
---------------------------------------------------------------
EMD upload completed successfully
Monday, April 15, 2013
Got a Large SGA?, USE HugePages
3 Key Reasons:
1. Oracle "Strongly recommends always deploying HugePages with Oracle Databases"
2. Reduces memory footprint by a factor of 500
3. 4K vs. 2MB memory pages. More pages can be mapped in memory with less overhead
Great video from OpenWorld DemoGrounds
HOW TO
Do steps 1-4 on EACH NODE
1. Set Oracle User memlock Limits
---------------------------------
NOTE: Value should be 90% of total memory
<value> is in kb (ex. 200 gb = 209715200 kb)
As Root
vi /etc/security/limits.conf
oracle soft memlock <value>
oracle hard memlock <value>
2. Set kernel.shmmax
--------------------
NOTE: Value should be comfortably larger than the largest SGA
As Root
get existing value:
cat /proc/sys/kernel/shmmax
set new value:
vi /etc/sysctl.conf
AND
echo <value in bytes> > /proc/sys/kernel/shmmax
AND
sysctl –p
verify new value
cat /proc/sys/kernel/shmmax
3. Set kernel.shmall
--------------------
NOTE: Value should be sum of SGA's / pagesize
Get pagesize
getconf PAGESIZE
Determine sum of SGAs
ipcs -m |grep oracle, sum of 5th column values + future SGAs
example:
pagesize= 4,096
SGA's = 92gb + 58gb growth = 161061273600 bytes
SGA's / pagesize = 39,321,600
kernel.shmall should be 39321600
get existing value:
cat /proc/sys/kernel/shmall
set new value:
vi /etc/sysctl.conf
AND
echo <value> > /proc/sys/kernel/shmall
AND
sysctl -p
verify new value
cat /proc/sys/kernel/shmall
4. Set vm.nr_hugepages
----------------------
NOTE: Use recommended value returned from hugepages_settings.sh
get the script here
As Root, add the following to /etc/sysctl.conf
# Hugepages
# Allow oracle user access to hugepages
vm.hugetlb_shm_group = 310
vm.nr_hugepages = <value>
AND run
sysctl -p
verify setting:
cat /proc/sys/vm/nr_hugepages
5. Set use_large_pages=ONLY in spfile (11g only)
------------------------------------------------
For Each DATABASE
alter system set use_large_pages=ONLY scope=spfile;
6. Restart Database(s)
----------------------
Verify huge pages in use (alert log)
Run ipcs -m (should only have a couple segments per database)
HOW TO
Do steps 1-4 on EACH NODE
1. Set Oracle User memlock Limits
---------------------------------
NOTE: Value should be 90% of total memory
<value> is in kb (ex. 200 gb = 209715200 kb)
As Root
vi /etc/security/limits.conf
oracle soft memlock <value>
oracle hard memlock <value>
2. Set kernel.shmmax
--------------------
NOTE: Value should be comfortably larger than the largest SGA
As Root
get existing value:
cat /proc/sys/kernel/shmmax
set new value:
vi /etc/sysctl.conf
AND
echo <value in bytes> > /proc/sys/kernel/shmmax
AND
sysctl –p
verify new value
cat /proc/sys/kernel/shmmax
3. Set kernel.shmall
--------------------
NOTE: Value should be sum of SGA's / pagesize
Get pagesize
getconf PAGESIZE
Determine sum of SGAs
ipcs -m |grep oracle, sum of 5th column values + future SGAs
example:
pagesize= 4,096
SGA's = 92gb + 58gb growth = 161061273600 bytes
SGA's / pagesize = 39,321,600
kernel.shmall should be 39321600
get existing value:
cat /proc/sys/kernel/shmall
set new value:
vi /etc/sysctl.conf
AND
echo <value> > /proc/sys/kernel/shmall
AND
sysctl -p
verify new value
cat /proc/sys/kernel/shmall
4. Set vm.nr_hugepages
----------------------
NOTE: Use recommended value returned from hugepages_settings.sh
get the script here
As Root, add the following to /etc/sysctl.conf
# Hugepages
# Allow oracle user access to hugepages
vm.hugetlb_shm_group = 310
vm.nr_hugepages = <value>
AND run
sysctl -p
verify setting:
cat /proc/sys/vm/nr_hugepages
5. Set use_large_pages=ONLY in spfile (11g only)
------------------------------------------------
For Each DATABASE
alter system set use_large_pages=ONLY scope=spfile;
6. Restart Database(s)
----------------------
Verify huge pages in use (alert log)
Run ipcs -m (should only have a couple segments per database)
Labels:
11g,
hugepages,
Linux,
Oracle,
Performance,
transparent,
use_large_pages,
x64,
x86-64
Friday, March 15, 2013
Session PGA Memory Usage
A helpful script to find sessions using large amounts of system memory:
SET LINESIZE 140
SET PAGESIZE 100
COL session HEADING 'SID - User - Client' FORMAT a35
COL current_size HEADING 'Current MB' FORMAT '999,999.99'
COL maximum_size HEADING 'Max MB' FORMAT '999,999.99'
BREAK ON REPORT
COMPUTE SUM LABEL 'Total' OF current_size ON REPORT
SELECT TO_CHAR(ssn.sid, '9999') || ' - ' ||
NVL(ssn.username, NVL(bgp.name, 'background')) || ' - ' ||
NVL(lower(ssn.machine), ins.host_name) "SESSION",
TO_CHAR(prc.spid, '999999999') "PID/THREAD",
se1.value/1024/1024 current_size,
se2.value/1024/1024 maximum_size
FROM v$sesstat se1,
v$sesstat se2,
v$session ssn,
v$bgprocess bgp,
v$process prc,
v$instance ins,
v$statname stat1,
v$statname stat2
WHERE se1.statistic# = stat1.statistic#
AND stat1.name = 'session pga memory'
AND se2.statistic# = stat2.statistic#
AND stat2.name = 'session pga memory max'
AND se1.sid = ssn.sid
AND se2.sid = ssn.sid
AND ssn.paddr = bgp.paddr (+)
AND ssn.paddr = prc.addr (+)
-- AND NVL(ssn.username, NVL(bgp.name, 'background')) = '<SCHEMA>'
ORDER BY 4
/
!free
exit
/
Sample output:
SID - User - Client PID/THREAD Current MB Max MB
----------------------------------- ---------- ----------- -----------
cut...
867 - XXXX - server01 9615 3.47 223.60
729 - XXXX - server01 9611 3.47 223.78
469 - XXXX - server01 3479 3.16 223.85
605 - XXXX - server03 4007 10.97 1,373.78
1039 - XXXX - server01 21216 11.16 1,373.97
448 - XXXX - server02 3852 10.22 1,378.91
-----------
Total 980.91
total used free shared buffers cached
Mem: 132085672 125739556 6346116 0 836680 24065240
-/+ buffers/cache: 100837636 31248036
Swap: 8193140 9968 8183172
Location:
Tempe, AZ, USA
Monday, February 18, 2013
Tablespace usage over time
If you have OEM, this repository query may be very helpful:
SELECT KEY_VALUE Tablespace_Name,
ROLLUP_TIMESTAMP Sample_date,
METRIC_COLUMN metric,
AVERAGE
FROM sysman.MGMT$METRIC_DAILY
WHERE metric_name='tbspAllocation'
and TARGET_NAME='<dbname>'
and KEY_VALUE = '<tablespacename>'
ORDER BY 1,2,3;
SELECT KEY_VALUE Tablespace_Name,
ROLLUP_TIMESTAMP Sample_date,
METRIC_COLUMN metric,
AVERAGE
FROM sysman.MGMT$METRIC_DAILY
WHERE metric_name='tbspAllocation'
and TARGET_NAME='<dbname>'
and KEY_VALUE = '<tablespacename>'
ORDER BY 1,2,3;
Tuesday, January 29, 2013
Auto-Start Standby Database
1. Start Standalone Grid Infrastructure - High
Availability Services (HAS)
host > $CRS_HOME/bin/crsctl
config has
CRS-4621: Oracle High Availability Services autostart is
disabled.
host > $CRS_HOME/bin/crsctl
enable has
CRS-4622: Oracle High Availability Services autostart is
enabled.
2. Change auto_start to “always” and the default startup mode to “mount”
srvctl modify database
-d stndbydb -s mount
crsctl modify resource
ora.stndbydb.db –attr “AUTO_START=always”
Test with crs_stat -p
How to Trace SQL in the Current Session
Trace SQL in the current session:
alter session set timed_statistics=true;
..Run some things..
select 'close cursor' from dual; --to dump the rowsource information
alter session set events '10046 trace name context off';
alter session set timed_statistics=true;
alter session set max_dump_file_size=unlimited;
alter session set tracefile_identifier='20Jul2012_special_trace_1';
alter session set events '10046 TRACE NAME CONTEXT FOREVER,level 12';
..Run some things..
select 'close cursor' from dual; --to dump the rowsource information
alter session set events '10046 trace name context off';
Trace file will be in diag/admin/db_name/instance_name/trace
ASM Debugging Info
ASM Debugging Information
If you have an ASM issue, you'll want these commands handy.
If you have an ASM issue, you'll want these commands handy.
cat /etc/*release
uname -a
rpm -qa|grep oracleasm
/usr/sbin/oracleasm configure
/sbin/modinfo oracleasm
/etc/init.d/oracleasm status
/usr/sbin/oracleasm-discover
oracleasm scandisks
oracleasm listdisks
ls -l /dev/oracleasm/disks
/sbin/blkid
ls -l /dev/mpath/*
ls -l /dev/mapper/*
ls -l /dev/dm-*
another approach...
1.
uname -a
2. rpm -qa | grep oracleasm
3 cat /etc/sysconfig/oracleasm
4. upload /var/log/oracleasm
5. cat /proc/partitions
6. ls -la /dev/oracleasm/disks
7. /etc/init.d/oracleasm scandisks
8. /etc/init.d/oracleasm listdisks
9. Run "sosreport" from command line. That will generate a bzip file. Attach it to SR
10. Send me following command outputs
a. cat /proc/partitions |grep sd|while read a b c d;do echo -n $d$'\t'" scsi_id=";(echo $d|tr -d [:digit:]|xargs -i scsi_id -g -s /block/{})done
b. blkid|grep sd.*oracleasm|while read a b;do echo -n $a$b" scsi_id=";(echo $a|tr -d [:digit:]|tr -d [:]|cut -d"/" -f3|xargs -i scsi_id -g -s /block/{})done;
11. Also you can verify the disk has correct header as follows:
# dd if=/dev/path/to/disk bs=16 skip=2 count=1 | hexdump -C
example:
# dd if=/dev/mapper/data0p1 bs=16 skip=2 count=1 | hexdump -C
1+0 records in
1+0 records out
16 bytes (16 B) copied, 0.037821 seconds, 0.4 kB/s
00000000 4f 52 43 4c 44 49 53 4b 44 41 54 41 30 00 00 00 |ORCLDISKDATA0...|
Reference note: Troubleshooting a multi-node ASMLib installation (Doc ID 811457.1)
2. rpm -qa | grep oracleasm
3 cat /etc/sysconfig/oracleasm
4. upload /var/log/oracleasm
5. cat /proc/partitions
6. ls -la /dev/oracleasm/disks
7. /etc/init.d/oracleasm scandisks
8. /etc/init.d/oracleasm listdisks
9. Run "sosreport" from command line. That will generate a bzip file. Attach it to SR
10. Send me following command outputs
a. cat /proc/partitions |grep sd|while read a b c d;do echo -n $d$'\t'" scsi_id=";(echo $d|tr -d [:digit:]|xargs -i scsi_id -g -s /block/{})done
b. blkid|grep sd.*oracleasm|while read a b;do echo -n $a$b" scsi_id=";(echo $a|tr -d [:digit:]|tr -d [:]|cut -d"/" -f3|xargs -i scsi_id -g -s /block/{})done;
11. Also you can verify the disk has correct header as follows:
# dd if=/dev/path/to/disk bs=16 skip=2 count=1 | hexdump -C
example:
# dd if=/dev/mapper/data0p1 bs=16 skip=2 count=1 | hexdump -C
1+0 records in
1+0 records out
16 bytes (16 B) copied, 0.037821 seconds, 0.4 kB/s
00000000 4f 52 43 4c 44 49 53 4b 44 41 54 41 30 00 00 00 |ORCLDISKDATA0...|
Reference note: Troubleshooting a multi-node ASMLib installation (Doc ID 811457.1)
RAC - Relocating VIP and SCAN
Failover VIP (on the destination node)
./crs_relocate [vip resource name]
The VIP will now go where it's configured to be
Failover SCAN
srvctl relocate scan -i [LISTENER_NUMBER] -n [DESTINATION_NODE_NAME]
Shared TNSNAMES.ora is not supported
Oracle does not Support Shared tnsnames.ora
SR Reply: "Well, technically there is no official support for UNC (Universal Naming Convention eg. \\path\file) in the tnsnames files. I have
seen customers try this and some manage to get it to work and some do not. My
tests have always lead to it not working so I personally do not believe those
that have claimed it worked when I used the same syntax and it
fails.
Officially we do not support it because the values used for UNC are restricted values.
See this link Specifically the //
Those are used by our coding logic for ezconnect connections therefore we have logic in place for those values to be used for a different purpose.
That is just one of the many reasons its not supported not the only reason just an FYI.
So I cannot really provide the proper syntax as it technically does not exist.
My apologies.
Also so you know technically we do not officially support shared tnsnames files on remote directories.
The ifile and even TNS_ADMIN was designed to handle multiple oracle homes on the same node. It was never intended or supported to work over a network. It does work under most circumstances but is not something we recommended or support. Shared usage over the network we suggest LDAP server as the intended feature to use."
Officially we do not support it because the values used for UNC are restricted values.
See this link Specifically the //
Those are used by our coding logic for ezconnect connections therefore we have logic in place for those values to be used for a different purpose.
That is just one of the many reasons its not supported not the only reason just an FYI.
So I cannot really provide the proper syntax as it technically does not exist.
My apologies.
Also so you know technically we do not officially support shared tnsnames files on remote directories.
The ifile and even TNS_ADMIN was designed to handle multiple oracle homes on the same node. It was never intended or supported to work over a network. It does work under most circumstances but is not something we recommended or support. Shared usage over the network we suggest LDAP server as the intended feature to use."
How FULL are the BLOCKS in my TABLE?
Table Block Space Usage:
set serveroutput on size 100000
declare
v_unformatted_blocks number;
v_unformatted_bytes number;
v_fs1_blocks number;
v_fs1_bytes number;
v_fs2_blocks number;
v_fs2_bytes number;
v_fs3_blocks number;
v_fs3_bytes number;
v_fs4_blocks number;
v_fs4_bytes number;
v_full_blocks number;
v_full_bytes number;
begin
dbms_space.space_usage (
'&TABLEOWNER', --object owner
'&TABLENAME', --object name
'TABLE', --object type TABLE, INDEX, or "TABLE PARTITION"
v_unformatted_blocks,
v_unformatted_bytes,
v_fs1_blocks,
v_fs1_bytes,
v_fs2_blocks,
v_fs2_bytes,
v_fs3_blocks,
v_fs3_bytes,
v_fs4_blocks,
v_fs4_bytes,
v_full_blocks,
v_full_bytes
--'&PARTITIONNAME',
);
dbms_output.put_line('Unformatted Blocks = '||v_unformatted_blocks);
dbms_output.put_line('FS1 Blocks = '||v_fs1_blocks);
dbms_output.put_line('FS2 Blocks = '||v_fs2_blocks);
dbms_output.put_line('FS3 Blocks = '||v_fs3_blocks);
dbms_output.put_line('FS4 Blocks = '||v_fs4_blocks);
dbms_output.put_line('Full Blocks = '||v_full_blocks);
end;
/
Sample Output:
Unformatted Blocks = 16
FS1 Blocks = 42 <--- 0-25% full
FS2 Blocks = 31 <-- 25-50% full
FS3 Blocks = 35 <-- 50-75% full
FS4 Blocks = 4651 <- 75-99% full
Full Blocks = 99448
FS1 Blocks = 42 <--- 0-25% full
FS2 Blocks = 31 <-- 25-50% full
FS3 Blocks = 35 <-- 50-75% full
FS4 Blocks = 4651 <- 75-99% full
Full Blocks = 99448
Shrinking options:
-- Enable row movement.
ALTER TABLE scott.emp ENABLE ROW MOVEMENT;
ALTER TABLE scott.emp ENABLE ROW MOVEMENT;
-- Recover space and amend the high water mark (HWM).
ALTER TABLE scott.emp SHRINK SPACE;
ALTER TABLE scott.emp SHRINK SPACE;
-- Recover space, but don't amend the high water mark (HWM).
ALTER TABLE scott.emp SHRINK SPACE COMPACT;
ALTER TABLE scott.emp SHRINK SPACE COMPACT;
-- Recover space for the object and all dependant objects.
ALTER TABLE scott.emp SHRINK SPACE CASCADE;
ALTER TABLE scott.emp SHRINK SPACE CASCADE;
DML Activity by Object (including statistics)
This is a helpful script to see DML activity on tables.
By default, if the "Updates" column is over 10%, stats will be gathered on that object.
First, flush DML activity stats to dba_tab_modifications:
dbms_stats.flush_monitoring_info
set linesize 140
set pagesize 50
col table_partition heading 'Table.Partition' format a40
col analyzed heading 'Last Analyzed'
col num_rows heading '# Rows' format 99,999,999,999
col tot_updates heading 'Total DMLs' format 99,999,999,999
col truncd heading 'Truncated?'
col pct_updates heading '%|Updates' format 999.99
col ts heading 'Last DML'
select table_name||decode(partition_name,null,'','.'||partition_name) table_partition,
to_char(last_analyzed,'MM/DD/YY HH24:MI') analyzed,
num_rows,
tot_updates,
to_char(timestamp,'MM/DD/YY HH24:MI') ts,
to_number(perc_updates) pct_updates,
decode(truncated,'NO','','Yes ') truncd
from (select a.*,
nvl(decode(num_rows, 0, '-1', 100 * tot_updates / num_rows), -1) perc_updates
from (select (select num_rows
from dba_tables
where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) num_rows,
(select last_analyzed
from dba_tables
where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) last_analyzed,
(inserts + updates + deletes) tot_updates,
DBA_TAB_MODIFICATIONS.*
from sys.DBA_TAB_MODIFICATIONS
) a
) b
where perc_updates > 5
and table_owner = '&SCHEMA'
order by last_analyzed desc
/
exit
/
SAMPLE OUTPUT:
Table.Partition Last Analyzed # Rows Total DMLs Last DML Updates Truncated?
-------------------- -------------- --------------- --------------- -------------- ------- ----------
AQ$_QUEUE_TABLES 03/12/13 14:32 11 1 03/12/13 14:36 9.09
col table_partition heading 'Table.Partition' format a40
col analyzed heading 'Last Analyzed'
col num_rows heading '# Rows' format 99,999,999,999
col tot_updates heading 'Total DMLs' format 99,999,999,999
col truncd heading 'Truncated?'
col pct_updates heading '%|Updates' format 999.99
col ts heading 'Last DML'
select table_name||decode(partition_name,null,'','.'||partition_name) table_partition,
to_char(last_analyzed,'MM/DD/YY HH24:MI') analyzed,
num_rows,
tot_updates,
to_char(timestamp,'MM/DD/YY HH24:MI') ts,
to_number(perc_updates) pct_updates,
decode(truncated,'NO','','Yes ') truncd
from (select a.*,
nvl(decode(num_rows, 0, '-1', 100 * tot_updates / num_rows), -1) perc_updates
from (select (select num_rows
from dba_tables
where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) num_rows,
(select last_analyzed
from dba_tables
where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) last_analyzed,
(inserts + updates + deletes) tot_updates,
DBA_TAB_MODIFICATIONS.*
from sys.DBA_TAB_MODIFICATIONS
) a
) b
where perc_updates > 5
and table_owner = '&SCHEMA'
order by last_analyzed desc
/
exit
/
SAMPLE OUTPUT:
Table.Partition Last Analyzed # Rows Total DMLs Last DML Updates Truncated?
-------------------- -------------- --------------- --------------- -------------- ------- ----------
AQ$_QUEUE_TABLES 03/12/13 14:32 11 1 03/12/13 14:36 9.09
Manually Run Stats Gathering Job
Run Stats Gathering Job:
exec DBMS_STATS.GATHER_DATABASE_STATS_JOB_PROC()
Notes:
The GATHER_DATABASE_STATS_JOB_PROC procedure collects statistics on database objects when the object has no previously gathered statistics or the existing statistics are stale because the underlying object has been modified significantly (more than 10% of the rows).
The DBMS_STATS.GATHER_DATABASE_STATS_JOB_PROC is an internal procedure, but its operates in a very similar fashion to the DBMS_STATS.GATHER_DATABASE_STATS procedure using the GATHER AUTO option.
The primary difference is that the DBMS_STATS.GATHER_DATABASE_STATS_JOB_PROC procedure prioritizes the database objects that require statistics, so that those objects which most need updated statistics are processed first.
This ensures that the most-needed statistics are gathered before the maintenance window closes.
SQL Query for Oracle Redo Log Files
Handy SQL when manipulating logfile groups set pagesize 30 | ||
set linesize 100 | ||
column member format a60 | ||
column status format a10 | ||
column size_MB format '999,999' | ||
select group#,thread#,members,status,bytes/1024/1024 size_MB from v$log; | ||
select group#,member from v$logfile order by group#;
--alter system switch all logfile; --alter database add logfile thread &thread group &group size &size; --alter database drop logfile group &group; --alter database drop logfile member '&member'; Sample Output: GROUP# THREAD# MEMBERS STATUS SIZE_MB ---------- ---------- ---------- ---------- -------- 1 1 2 INACTIVE 4,096 2 1 2 INACTIVE 4,096 3 1 2 CURRENT 4,096 6 2 2 INACTIVE 4,096 7 2 2 CURRENT 4,096 8 2 2 INACTIVE 4,096 GROUP# MEMBER ---------- ------------------------------------------------------------ 1 +MY_DATA/mydb/onlinelog/group_1.324.807662009 1 +MY_FRA/mydb/onlinelog/group_1.2611.807662031 2 +MY_DATA/mydb/onlinelog/group_2.336.807662061 2 +MY_FRA/mydb/onlinelog/group_2.2601.807662085 3 +MY_DATA/mydb/onlinelog/group_3.321.807662115 3 +MY_FRA/mydb/onlinelog/group_3.2607.807662137 ...snip... |
Database Uptime
Database Uptime (11g):
col d_name heading 'Database' format a8
col v_logon_time heading 'Startup'
col dh_uptime heading 'Uptime' format a30
col v_logon_time heading 'Startup'
col dh_uptime heading 'Uptime' format a30
select upper(sys_context('USERENV','DB_NAME')) d_name,
to_char(logon_time,'DD-MON-YYYY hh24:mi:ss') v_logon_time,
to_char(trunc(sysdate-logon_time,0))||' days, '||trunc(((sysdate-logon_time)-floor(sysdate-logon_time))*24)||' Hours' dh_uptime
from sys.v_$session
where sid=1 /* pmon session */
/
Sample Output:
to_char(logon_time,'DD-MON-YYYY hh24:mi:ss') v_logon_time,
to_char(trunc(sysdate-logon_time,0))||' days, '||trunc(((sysdate-logon_time)-floor(sysdate-logon_time))*24)||' Hours' dh_uptime
from sys.v_$session
where sid=1 /* pmon session */
/
Sample Output:
Database Startup Uptime
-------- -------------------- ------------------------------
MYDB 05-JUL-2012 21:42:24 32 days, 13 Hours
-------- -------------------- ------------------------------
MYDB 05-JUL-2012 21:42:24 32 days, 13 Hours
Top Waits by Object
SQL to Identify Objects Creating Cluster-wide bottlenecks (in the past 24 hours)
set linesize 140
set pagesize 50
col sample_time format a26
col event format a30
col object format a45
--col num_sql heading '# SQL' format 9,999
select
ash.sql_id,
-- count(distinct ash.sql_id) Num_SQL,
ash.event,
ash.current_obj#,
o.object_type,
o.owner||'.'||o.object_name||'.'||o.subobject_name object,
count(*)
from gv$active_session_history ash,
all_objects o
where ash.current_obj# = o.object_id
and ash.current_obj# != -1
and ash.event is not null
and ash.sample_time between sysdate - 1 and sysdate
-- and ash.sample_time between sysdate - 4 and sysdate - 3
-- and to_date ('24-SEP-2010 14:28:00','DD-MON-YYYY HH24:MI:SS') and to_date ('24-SEP-2010 14:29:59','DD-MON-YYYY HH24:MI:SS')
group by
ash.sql_id,
ash.event,
ash.current_obj#,
o.object_type,
o.owner||'.'||o.object_name||'.'||o.subobject_name
having count(*) > 20
order by count(*) desc
/
exit
/
Subscribe to:
Posts (Atom)