Saturday, January 16, 2016

Optimizer hints can be used with SQL statements to alter execution plans. This chapter explains how to use hints to force various approaches.


/************|||  Hints for Optimization approach and goals|||***/
select /*+ all_rows */ employee_id , last_name , first_name from
employees
order by employee_id;


select /*+ first_rows */ employee_id , last_name , first_name from
employees
order by employee_id;


create table emp2
as select * from employees;


select /*+ choose */ employee_id , last_name , first_name from
emp2
order by employee_id;


select /*+ rule */ employee_id , last_name , first_name from
employees
order by employee_id;


/************|||  Hints for enabling optimizer features|||***/
select /*+ optimizer_features_enable('11.1.0.1')  */ employee_id , last_name , first_name from
employees
order by employee_id;


/************|||  Hints for access paths|||***/
-----full
select  employee_id , last_name , first_name from
employees
order by employee_id;


select /*+ Full(e)*/ employee_id , last_name , first_name from
employees e where employee_id<300
order by employee_id;


---hash
select /*+ hash(e)*/ employee_id , last_name , first_name from
employees e  employee_id<300
order by employee_id;


---index
select /*+ index( e emp_department_ix)*/ employee_id , last_name , first_name from
employees e  employee_id<300
order by employee_id;


---index_asc
select /*+ index_asc( e emp_department_ix)*/ employee_id , last_name , first_name from
employees e  employee_id<300
order by employee_id;


---index_cobine
select /*+ index_cobine( e manager_id emp_department_ix)*/ employee_id , last_name , first_name from
employees e  manager_id=108 or department_id=110
order by employee_id;


---index_join
select /*+ index_join( e employee_id emp_department_ix)*/ employee_id , last_name , first_name from
employees e  employee_id<108 and department_id<=110
order by employee_id;


---index_ffs
select /*+ index_ffs( e employee_id)*/ employee_id , last_name , first_name from
employees e
order by employee_id;


---index_ss
select /*+ index_asc( e employee_id_id emp_department_ix)*/ employee_id , last_name , first_name from
employees e  last_name='king'
order by employee_id;


---no_index
select /*+ index_asc( e)*/ employee_id , last_name , first_name from
employees e  last_name='king'
order by employee_id;


---use_hash
select  employee_id , last_name , first_name, department_name from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


select /*+ use_hash( e d)*/ employee_id , last_name , first_name, department_name from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


---use_no_hash
select  employee_id , last_name , first_name, department_name from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


select /*+ use_no_hash( e d)*/ employee_id , last_name , first_name, department_name from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


---use_nl
select  * from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


select /*+ use_nl( e d)*/  * from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


---use_nl_with_index
select  /*+ use_nl_with_index(e employee_department_indes*/ * from
employees e  , departments where e.department_id=d.department_id
and e.department_id<=40
order by employee_id;


---use merge


select   * from
employees e  , departments where e.department_id=d.department_id
order by employee_id;


select   /*+ use_merge(e d) */ * from
employees e  , departments d where e.department_id=d.department_id
order by employee_id;

---use_leading


select   * from
employees e  , departments d , job_history j where e.department_id=d.department_id
and e.hire_date=j.start_date
order by employee_id;


select   /*+ use_leading(e j) */ * from
employees e  , departments d , job_history j where e.department_id=d.department_id
and e.hire_date=j.start_date
order by employee_id;


-----parallel----


select /* parallel(auto) */ * from emp1;


select /* parallel(manual) */ * from emp1;


select /* parallel(e, 3) */ * from emp1 e;


-----parallel_index----


select /* parallel_index(e, emp_id, 3) */ * from emp1 where employee_id<50;


alter table empl parallel 4;


select /* no_parallel(e) */ * from empl e;


--------cache


select /* + full (j) cache(j) */ * from jobs j;


--------append
insert /*+ append */ into empl_bkp select * from emp;


----------result_cahce


select /*+ result_cache */ * from
employees e  , departments d , job_history j where e.department_id=d.department_id
and e.hire_date=j.start_date

select /*+ result_cache */ * from
employees e  , departments d , job_history j where e.department_id=d.department_id
and e.hire_date=j.start_date

====================


SELECT iu.ship_product_id
AS part_number, iu.merge_id AS fc_code,
oa.country_code, COUNT (1) quantity
FROM shipping.shippable sh,
shipping.order_address oa,
shipping.item_unit iu
WHERE
sh.manifest_date >
TRUNC (SYSTIMESTAMP AT TIME ZONE 'GMT')- (7 * 13)
AND oa.order_id = sh.order_id
AND oa.addr_type_code = 'S‘
AND iu.order_id = oa.order_id
AND iu.merge_id IS NOT NULL
GROUP BY iu.ship_product_id, iu.merge_id, oa.country_code

Creating an adhoc ASH (Active session history) report using timeframe
Posted by decipherinfosys on July 31, 2009
In one of our previous blog post, we demonstrated that how can we generate adhoc awr report based on the given range of snap_id.  As we are aware that default timeframe for collecting snapshot data is every hour and data is retained up to 7 days. Now let us assume that snapshot is taken every hour but we want some statistics within one hour period let’s say between 1:15 PM and 1:30 PM or between 2:17 PM to 2:48 PM etc. How can we generate report for such time frame? Again we can resort back to DBMS_WORKLOAD_REPOSITORY package. This package has quite a few procedures, which can help us out to troubleshoot the problem. Keep in mind that Oracle always recommends using awrrpt.sql and ashrpt.sql to generate the AWR or ASH reports. In this blog post, we will see once again how we can get the data for specific time range using ASH_REPORT_TEXT procedure.
ASH data is sampled every second and whenever AWR snapshot is flushed to the disk, content from v$active_session_history is also flushed to the disk. It system during that time is heavily utilized, then only sample data will be flushed to the disk.  Historical data is stored in another view called DBA_HIST_ACTIVE_SESS_HISTORY. Now let us get the data:

select dbid,instance_number ino,begin_interval_time,end_interval_time
from dba_hist_snapshot
order by begin_interval_time desc
DBID    INO    BEGIN_INTERVAL_TIME        END_INTERVAL_TIME
2234839021    1    7/29/2009 5:00:46.793 PM    7/29/2009 5:30:37.953 PM
2234839021    1    7/29/2009 4:30:55.638 PM    7/29/2009 5:00:46.793 PM
2234839021    1    7/29/2009 4:00:01.491 PM    7/29/2009 4:30:55.638 PM
2234839021    1    7/29/2009 3:30:10.348 PM    7/29/2009 4:00:01.491 PM
Now, we know the value of dbid, instance_number and interval start time and interval end time. We can take this time as is and run report for the time range or we can run report for in between time range as well. In this case, we will run report for in between time range. But before that here is the signature of the procedure.
DBMS_WORKLOAD_REPOSITORY.ASH_REPORT_TEXT(
l_dbid       IN NUMBER,
l_inst_num   IN NUMBER,
l_btime      IN DATE,
l_etime      IN DATE,
l_options    IN NUMBER    DEFAULT 0,
l_slot_width IN NUMBER DEFAULT 0,
l_sid        IN NUMBER DEFAULT NULL,
l_sql_id     IN VARCHAR2  DEFAULT NULL,
l_wait_class IN VARCHAR2  DEFAULT NULL,
l_service_hash  IN NUMBER DEFAULT NULL,
l_module     IN VARCHAR2  DEFAULT NULL,
l_action     IN VARCHAR2  DEFAULT NULL,
l_client_id IN VARCHAR2  DEFAULT NULL)
RETURN awrrpt_text_type_table PIPELINED;
We already know the value for l_Dbid, l_instnum, l_btime and l_etime from the first sql. For rest of the parameters we will go with the default values. Here is the sql to run for a specific time range.

SELECT output
FROM
TABLE(dbms_workload_repository.ash_report_text
(2234839021,1,
to_Date('07/29/2009 16:10','MM/DD/YYYY HH24:MI'),
to_date('07/29/2009 16:25','MM/DD/YYYY HH24:MI'))
);

We can spool the output to the file from above sql and open it up as a text file to investigate the results. Here is the sample that confirms that report is generated for a specified time frame.
OUTPUT
Analysis Begin Time:   29-Jul-09 16:10:00
Analysis End Time:   29-Jul-09 16:25:00
Elapsed Time:     15.0 (mins)
Sample Count:      527
Average Active Sessions:     0.59
Avg. Active Session per CPU:     0.05
Report Target:   None specified
As mentioned earlier, policy to collect AWR data varies from company to company as each company has its own set of standards to fo


================================= AWR =====================

1. Example for the details of the snapshot with MIN AND MAX SNAP_ID
SELECT startup_time, min(snap_id),Max(snap_id) FROM dba_hist_snapshot
GROUP BY startup_time
ORDER BY startup_time desc;

Note: Here startup_time is the database uptime.
2. Example for the details of the snapshot for SNAP_ID BETWEEN 12723 AND 13103
select * from dba_hist_snapshot
where snap_id between 12790and 12807
order by snap_id desc;
3. Example for the details of the snapshot with BEGIN AND END TIME INTERVAL
SELECT * FROM dba_hist_snapshot
WHERE BEGIN_INTERVAL_TIME>'26-JUL-13 02.00.00 AM'
AND END_INTERVAL_TIME<'26-JUL-13 08.03.00 PM'
ORDER BY SNAP_ID;
STEPS:
1.       Use these above queries to gain access to the snap_id for the required time interval.
2.       Then,  cd $ORACLE_HOME/rdbms/admin
3.       ls awr* (there should be a sql file named awrrpt.sql)
4.       Now, connect to sqlplus using utility.
5.       SQL> start awrrpt.sql
6.       Now, it will prompt to choose the report type. Enter for html format.
7.       Again, it will prompt to choose the no. of days to list the awr.
The snapshot of the database will be listed as below.
Instance DB Name     Snap Id Snap Started   Snap Level
------------ ------------ --------- ------------------ --------- ----------------
Also, we can use example 3 to list out the snap_id and enter the values.
Enter value for begin_snap:
Enter value for end_snap:
8.       Now, the required file will be created in the same folder $ORACLE_HOME/rdbms/admin
Eg. awrrpt_1_12790_12807.html


===========
--sql_id and cpu usage
select * from(
select sql_id, inst_id,
sum(decode(vash.session_state,'On CPU',1,0)) as "number on CPU",
sum(decode(vash.session_state,'waiting',1,0)) as "number wai CPU"
from gv$active_session_history vash
where sample_time>sysdate-5/(60*24)
group by sql_id,inst_id
order by 3 desc
)
where rownum<11
snap shopt
exec dbms_workload_repository.create_snapshot;

---------
top 10 sqlid
select * from (
select nvl(sql_id,'null')as sql_id,
sum(1) as "DBTIME in seconds"
from v$Active_session_history
where sample_time>sysdate-10/(24*60)
group by sql_id
order by 2 desc)
where rownum<11;

--sql_id and cpu usage
select * from(
select sql_id, inst_id,
sum(decode(vash.session_state,'On CPU',1,0)) as "number on CPU",
sum(decode(vash.session_state,'waiting',1,0)) as "number wai CPU"
from gv$active_session_history vash
where sample_time>sysdate-5/(60*24)
group by sql_id,inst_id
order by 3 desc
)
where rownum<11

select * from  v$Active_session_history where  sql_id='ct28zg2rf03m0' and
rownum<11


==================
1.    event 10053
2.    explain plan , dbms_xplan, auto trace
3.    sql trace(event 10046) and tkprof
4.    method-R,  Trace Analyzer(TRCA)
5.    Active session History(ASH)
6.    Active Monitor Report
7.    DBMS_SQLDIAG and SQL tuning Advisor
8.    sqlTXPLAN(sqlt) and SQL health-check(SQLHC)

--------Tool for the sql tuning
1.         event 10053
2.         explain plan , dbms_xplan, auto trace
3.         sql trace(event 10046) and tkprof
4.         method-R,  Trace Analyzer(TRCA)
5.         Active session History(ASH)
6.         Active Monitor Report
7.         DBMS_SQLDIAG and SQL tuning Advisor
8.         sqlTXPLAN(sqlt) and SQL health-check(SQLHC)
---------sqlT Main Methods
All act on one SQL statement
1.                   SQLT XTRACT
2.                   SQLT XECUTE
3.                   SQLT XTRXEC
4.                   SQLT XPLAIN
5.                   SQLT XTRSBY