Showing posts with label 11g. Show all posts
Showing posts with label 11g. Show all posts

Friday, July 15, 2016


MEMORY USED BY SQL STATEMENT

select machine,to_char(SQL_EXEC_START,'DD-MON-YY HH24:MI:SS')            runtime,
       pga_allocated 
  from dba_hist_active_sess_history 
 where sql_id = '&sql_id';


EXAMPLE OUTPUT:

MACHINE                   RUNTIME                 PGA_ALLOCATED

------------------------- -------------------- ----------------
MYPCNAME                  13-JUL-16 16:18:40        807,153,664
MYPCNAME                  13-JUL-16 16:18:40        329,003,008
MYPCNAME                  13-JUL-16 16:18:40        274,280,448
MYPCNAME                  13-JUL-16 16:18:40        215,887,872
MYPCNAME                  13-JUL-16 16:18:40        204,615,680

Tuesday, September 22, 2015

Top 10 - Biggest Tables and Indexes

Top 10 - Biggest Tables and Indexes

set lines 170
set pages 20
col owner      format a20
col object     format a30
col type       format a6
col tablespace format a20
col size_in_gb format 999,999.9
select * 
  from (select owner           owner,
               segment_name    object,
               segment_type    type,
               tablespace_name tablespace,
               round(bytes/1024/1024/1024,1) size_in_gb
          from dba_segments 
         where owner not in ('SYS','SYSTEM')
         order by bytes desc
       ) 
 where rownum < 11
 order by size_in_gb desc;

SQL Outlines or SQL Profiles

SQL OUTLINES or PROFILES

set lines 170
set pages 100
col created      format a14
col type         format a6
col status       format a10
col fm           format a3
col profile_name format a30
col sql_text     format a50
col comp_data    format a50
SELECT created,
       type,
       status,
       force_matching fm,
       profile_name,
       sql_text,
       comp_data
  FROM DBA_SQL_PROFILES PROF,
       DBMSHSXP_SQL_PROFILE_ATTR ATTR
 WHERE prof.name = attr.profile_name
 ORDER BY status, created desc;

Parallel Query Processes - Parent / Child Details

Parallel Process Details including Master/Slave relationships

SQL
set lines 200
set pages 100
col username   format a10
col qcslave    format a10
col slaveset   format a8
col program    format a30
col sid        format a5
col slvinst    format a7
col state      format a8
col waitevent  format a30
col qcsid      format a5
col qcinst     format a6
col reqdop     format 999
col actdop     format 999
col secelapsed format 999,999
SELECT DECODE(px.qcinst_id,NULL,username, ' - '||LOWER(SUBSTR(pp.SERVER_NAME,LENGTH(pp.SERVER_NAME)-4,4) ) ) USERNAME, 
       DECODE(px.qcinst_id,NULL, 'QC', '(Slave)') "QCSLAVE" ,
       TO_CHAR( px.server_set) SLAVESET, 
       s.program PROGRAM, 
       TO_CHAR(s.SID) SID,
       TO_CHAR(px.inst_id) SLVINST, 
       DECODE(sw.state,'WAITING', 'WAIT', 'NOT WAIT' ) STATE,
       CASE  sw.state WHEN 'WAITING' THEN SUBSTR(sw.event,1,30) ELSE NULL END WAITEVENT ,
       DECODE(px.qcinst_id, NULL ,TO_CHAR(s.SID) ,px.qcsid) QCSID,
       TO_CHAR(px.qcinst_id) QCINST, 
       px.req_degree REQDOP, 
       px.DEGREE ACTDOP,
       DECODE(px.server_set,'',s.last_call_et,'') SECELAPSED
  FROM gv$px_session px, 
       gv$session s, 
       gv$px_process pp, 
       gv$session_wait sw
 WHERE px.SID=s.SID (+)
   AND px.serial#=s.serial#(+)
   AND px.inst_id = s.inst_id(+)
   AND px.SID = pp.SID (+)
   AND px.serial#=pp.serial#(+)
   AND sw.SID = s.SID
   AND sw.inst_id = s.inst_id
 ORDER BY DECODE(px.QCINST_ID,  NULL, px.INST_ID,  px.QCINST_ID), 
          px.QCSID,
          DECODE(px.SERVER_GROUP, NULL, 0, px.SERVER_GROUP), 
          px.SERVER_SET, 
          px.INST_ID



Monday, April 15, 2013

Got a Large SGA?, USE HugePages



3 Key Reasons:

  1. Oracle "Strongly recommends always deploying HugePages with Oracle Databases"

  2. Reduces memory footprint by a factor of 500

  3. 4K vs. 2MB memory pages.  More pages can be mapped in memory with less overhead

Great video from OpenWorld DemoGrounds


HOW TO

Do steps 1-4 on EACH NODE


1. Set Oracle User memlock Limits
---------------------------------
NOTE: Value should be 90% of total memory
      <value> is in kb (ex. 200 gb = 209715200 kb)

As Root 
vi /etc/security/limits.conf

oracle soft memlock <value>
oracle hard memlock <value>


2. Set kernel.shmmax
--------------------
NOTE: Value should be comfortably larger than the largest SGA

As Root
get existing value:
 cat /proc/sys/kernel/shmmax
set new value:
 vi /etc/sysctl.conf
 AND
 echo <value in bytes> > /proc/sys/kernel/shmmax
 AND
 sysctl –p

verify new value
 cat /proc/sys/kernel/shmmax


3. Set kernel.shmall
--------------------
NOTE: Value should be sum of SGA's / pagesize 

Get pagesize
 getconf PAGESIZE
Determine sum of SGAs
 ipcs -m |grep oracle, sum of 5th column values + future SGAs

example: 
pagesize= 4,096
SGA's   = 92gb + 58gb growth = 161061273600 bytes
SGA's / pagesize = 39,321,600
kernel.shmall should be 39321600

get existing value:
 cat /proc/sys/kernel/shmall
set new value:
 vi /etc/sysctl.conf
 AND
 echo <value> > /proc/sys/kernel/shmall 
 AND
 sysctl -p
verify new value
 cat /proc/sys/kernel/shmall


4. Set vm.nr_hugepages
----------------------
NOTE: Use recommended value returned from hugepages_settings.sh
get the script here

As Root, add the following to /etc/sysctl.conf

 # Hugepages
 # Allow oracle user access to hugepages
 vm.hugetlb_shm_group = 310
 vm.nr_hugepages = <value>

AND run

sysctl -p

verify setting:
 cat /proc/sys/vm/nr_hugepages


5. Set use_large_pages=ONLY in spfile (11g only)
------------------------------------------------
 For Each DATABASE
  alter system set use_large_pages=ONLY scope=spfile;


6. Restart Database(s)
----------------------
 Verify huge pages in use (alert log)
 Run ipcs -m (should only have a couple segments per database)

Tuesday, January 29, 2013

DML Activity by Object (including statistics)


This is a helpful script to see DML activity on tables.

By default, if the "Updates" column is over 10%, stats will be gathered on that object.

First, flush DML activity stats to dba_tab_modifications:

dbms_stats.flush_monitoring_info

set linesize 140
set pagesize 50
col table_partition heading 'Table.Partition' format a40
col analyzed heading 'Last Analyzed'
col num_rows heading '# Rows' format 99,999,999,999
col tot_updates heading 'Total DMLs' format 99,999,999,999
col truncd heading 'Truncated?'
col pct_updates heading '%|Updates' format 999.99
col ts heading 'Last DML'
select table_name||decode(partition_name,null,'','.'||partition_name) table_partition,
       to_char(last_analyzed,'MM/DD/YY HH24:MI') analyzed,
       num_rows,
       tot_updates,
       to_char(timestamp,'MM/DD/YY HH24:MI') ts,
       to_number(perc_updates) pct_updates,
       decode(truncated,'NO','','Yes       ') truncd
  from (select a.*,
               nvl(decode(num_rows, 0, '-1', 100 * tot_updates / num_rows), -1) perc_updates
          from (select (select num_rows
                         from dba_tables
                        where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
                          and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) num_rows,
                       (select last_analyzed
                          from dba_tables
                         where dba_tables.table_name = DBA_TAB_MODIFICATIONS.table_name
                           and DBA_TAB_MODIFICATIONS.table_owner = dba_tables.owner) last_analyzed,
                       (inserts + updates + deletes) tot_updates,
                       DBA_TAB_MODIFICATIONS.*
                  from sys.DBA_TAB_MODIFICATIONS
               ) a
       ) b
 where perc_updates > 5
   and table_owner = '&SCHEMA'
 order by last_analyzed desc
/
exit
/


SAMPLE OUTPUT:

Table.Partition      Last Analyzed           # Rows      Total DMLs Last DML       Updates Truncated?
-------------------- -------------- --------------- --------------- -------------- ------- ----------
AQ$_QUEUE_TABLES     03/12/13 14:32              11               1 03/12/13 14:36    9.09


SQL Query for Oracle Redo Log Files


Handy SQL when manipulating logfile groups

set pagesize 30
set linesize 100
column member format a60
column status format a10
column size_MB format '999,999'
select group#,thread#,members,status,bytes/1024/1024 size_MB from v$log;
select group#,member from v$logfile order by group#;


select group#, thread#, status, bytes/1024/1024 size_MB from v$standby_log;
--show parameter db_create_online_log_dest
--alter system switch all logfile;
--alter database add logfile thread &thread group &group size &size;
--alter database drop logfile group &group;
--alter database drop logfile member '&member';


Sample Output:
    GROUP#    THREAD#    MEMBERS STATUS      SIZE_MB
---------- ---------- ---------- ---------- --------
         1          1          2 INACTIVE      4,096
         2          1          2 INACTIVE      4,096
         3          1          2 CURRENT       4,096
         6          2          2 INACTIVE      4,096
         7          2          2 CURRENT       4,096
         8          2          2 INACTIVE      4,096



    GROUP# MEMBER
---------- ------------------------------------------------------------
         1 +MY_DATA/mydb/onlinelog/group_1.324.807662009
         1 +MY_FRA/mydb/onlinelog/group_1.2611.807662031
         2 +MY_DATA/mydb/onlinelog/group_2.336.807662061
         2 +MY_FRA/mydb/onlinelog/group_2.2601.807662085
         3 +MY_DATA/mydb/onlinelog/group_3.321.807662115
         3 +MY_FRA/mydb/onlinelog/group_3.2607.807662137
...snip...


Top Waits by Object


SQL to Identify Objects Creating Cluster-wide bottlenecks (in the past 24 hours)
set linesize 140
set pagesize 50
col sample_time format a26
col event format a30
col object format a45
--col num_sql heading '# SQL' format 9,999
select
       ash.sql_id,
--       count(distinct ash.sql_id) Num_SQL,
       ash.event,
       ash.current_obj#,
       o.object_type,
       o.owner||'.'||o.object_name||'.'||o.subobject_name object,
       count(*)
  from gv$active_session_history ash,
       all_objects o
 where ash.current_obj# = o.object_id
   and ash.current_obj# != -1
   and ash.event is not null
   and ash.sample_time between  sysdate - 1 and sysdate
--   and ash.sample_time between  sysdate - 4 and sysdate - 3
--   and to_date ('24-SEP-2010 14:28:00','DD-MON-YYYY HH24:MI:SS') and to_date ('24-SEP-2010 14:29:59','DD-MON-YYYY HH24:MI:SS')
 group by
       ash.sql_id,
       ash.event,
       ash.current_obj#,
       o.object_type,
       o.owner||'.'||o.object_name||'.'||o.subobject_name
having count(*) > 20
 order by count(*) desc
/
exit
/

Top 5 Timed Wait Events


SQL: (11.1 db required, min)

set feedback off
set verify off
col dbid new_value v_dbid
col min_snap new_value v_min_snap
col max_snap new_value v_max_snap
set termout off
select (select dbid from v$database) dbid,1,min(dhs.snap_id) min_snap, max(dhs.snap_id) max_snap
  from dba_hist_snapshot dhs
 where dhs.end_interval_time >= to_date(sysdate - 1)
   and dhs.instance_number = 1
 group by dbid
/
set termout on
set heading off
--select output from table(DBMS_WORKLOAD_REPOSITORY.awr_report_text (&v_dbid,1,&v_min_snap,&v_max_snap))
WITH aa AS
(SELECT output, ROWNUM r
FROM table(DBMS_WORKLOAD_REPOSITORY.awr_report_text (&v_dbid, 1, &v_min_snap, &v_max_snap)))
SELECT output top_five
FROM aa, (SELECT r FROM aa
WHERE output LIKE 'Top 5 Timed Foreground Events%') bb
WHERE aa.r BETWEEN bb.r AND bb.r + 10
order by bb.r
/
set heading on
prompt
prompt
exit
/

Sample Output:
Top 5 Timed Foreground Events
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                                                           Avg
                                                          wait   % DB
Event                                 Waits     Time(s)   (ms)   time Wait Class
------------------------------ ------------ ----------- ------ ------ ----------
log file sync                    15,848,089     144,008      9   30.0 Commit
DB CPU                                          122,035          25.5
direct path read                    600,016     111,336    186   23.2 User I/O
db file sequential read           4,367,904      32,126      7    6.7 User I/O
enq: TM - contention                     49      18,344 4.E+05    3.8 Applicatio