sql plan baseline


optimizer_capture_sql_plan_baselines=FALSE
optimizer_use_sql_plan_baselines=TRUE;


-- create tuning set
BEGIN
  dbms_sqltune.create_sqlset(
      sqlset_name => 'STS1', 
      sqlset_owner => 'SYSTEM');
END;
/

-- populate tuning set from cursor cache
DECLARE
  stscur dbms_sqltune.sqlset_cursor;
BEGIN
  
  open stscur for
    select value(p)
    from   table(dbms_sqltune.select_cursor_cache(
                   basic_filter      => 'sql_id = ''...''',
                   object_filter     => null,
                   ranking_measure1  => null,
                   ranking_measure2  => null,
                   ranking_measure3  => null,
                   result_percentage => null,
                   result_limit      => null,
                   attribute_list    => 'ALL')) P;
                   
  DBMS_SQLTUNE.LOAD_SQLSET(sqlset_name => 'STS1',
                           populate_cursor => stscur,
                           sqlset_owner    => 'SYSTEM');
END;
/

-- populate tuning set from AWR
DECLARE
  cur DBMS_SQLTUNE.SQLSET_CURSOR;
BEGIN  
  OPEN cur FOR
    SELECT VALUE(P)
      FROM table(
        DBMS_SQLTUNE.SELECT_WORKLOAD_REPOSITORY(
                    begin_snap        => 1234,
                    end_snap          => 1235,
                    basic_filter      => 'sql_id = ''...''',
                    object_filter     => NULL, 
                    ranking_measure1  => NULL,
                    ranking_measure2  => NULL,
                    ranking_measure3  => NULL,
                    result_percentage => NULL,
                    result_limit      => NULL,
                    attribute_list    => 'ALL')) P;

  DBMS_SQLTUNE.LOAD_SQLSET(sqlset_name => 'STS1',
                           populate_cursor => cur,
                           sqlset_owner    => 'SYSTEM');
END;
/

-- select plans in tuning set
SELECT
     first_load_time,
     executions as execs,
     parsing_schema_name,
     elapsed_time  / 1000000 as elapsed_time_secs  ,
     cpu_time / 1000000 as cpu_time_secs           ,
     buffer_gets,
     disk_reads,
     direct_writes,
     rows_processed,
     fetches,
     optimizer_cost,
     sql_plan,
     plan_hash_value,
     sql_id,
     sql_text
   FROM TABLE(DBMS_SQLTUNE.SELECT_SQLSET(
                   sqlset_name  => 'STS1',
                   sqlset_owner => 'SYSTEM'))
;


-- create baseline using the plan in tunning set                           
DECLARE
  my_plans pls_integer;
BEGIN
  my_plans := DBMS_SPM.LOAD_PLANS_FROM_SQLSET(
       sqlset_name  => 'STS1', 
       sqlset_owner => 'SYSTEM',
       basic_filter=>'plan_hash_value = ''1239572551'''
     );
 dbms_output.put_line(my_plans);                           
END;
/

-- check baselines
select sql_handle, 
       plan_name, 
       enabled, 
       accepted, 
       sql_text
  from dba_sql_plan_baselines
 where sql_text like '%...%'
;

-- accept plan for baseline
DECLARE
  report clob;
BEGIN
  report := dbms_spm.evolve_sql_plan_baseline(
    sql_handle => 'SQL_a...', 
    plan_name  => 'SQL_PLAN_a...');
END;
/

unplug and plug PDB

alter pluggable database pdb1 close instances=all;
alter pluggable database pdb1 unplug into '/tmp/pdb1.xml';
drop pluggable database pdb1 keep datafiles;
create pluggable database pdb1 using '/tmp/pdb1.xml' NOCOPY TEMPFILE REUSE;

transport sql tunning sets from 11g to 12c

 

 

1. create sql tunning set

2. add sql to tunning set from AWR

3. check sql tunning set
   SELECT SQL_ID, PARSING_SCHEMA_NAME AS "SCH", SQL_TEXT, 
          ELAPSED_TIME AS "ELAPSED", BUFFER_GETS
   FROM   TABLE( DBMS_SQLTUNE.SELECT_SQLSET( 
                    sqlset_name =>  '<sts_name>', 
                    sqlset_owner => '<sts_owner>' ) );

4. create staging table
   BEGIN
     DBMS_SQLTUNE.CREATE_STGTAB_SQLSET ( 
       table_name  => '<staging_table_name>',   
       schema_name => '<schema>',   
       db_version  => DBMS_SQLTUNE.STS_STGTAB_11_2_VERSION 
     );
   END;
   /

5. pack tunning set
   BEGIN
     DBMS_SQLTUNE.PACK_STGTAB_SQLSET (      
       sqlset_name          => '<sts_name>', 
       sqlset_owner         => '<sts_owner>',   
       staging_table_name   => '<staging_table_name>',   
       staging_schema_owner => '<schema>',   
       db_version           => DBMS_SQLTUNE.STS_STGTAB_11_2_VERSION 
     );
   END;
   / 

6. remap con_dbid
   BEGIN
     DBMS_SQLTUNE.REMAP_STGTAB_SQLSET (
       staging_table_name   => '<staging_table_name>',   
       staging_schema_owner => '<schema>',   
       old_con_dbid         => 1234,   
       new_con_dbid         => 5678
   );

7. expdp/impdp

8. unpack tunning set from staging table
   BEGIN
     DBMS_SQLTUNE.UNPACK_STGTAB_SQLSET (
       sqlset_name        => '%',   
       replace            => true,   
       staging_table_name => '<staging_table_name>');
     END;
   /

certificates, keystores, wallets

cerficicates can be imported into oracle wallet, or java keystore.

option 1: import certificates into oracle wallet,

orapki wallet create -wallet <wallet_location> 

orapki wallet add 
  -wallet <wallet_location> 
  -trusted_cert -cert <certificate_location>

orapki wallet display -wallet <wallet_location> 

./okvutil upload -l "<wallet_location>" 
  -t wallet 
  -g "<virtual_wallet_name>"

option 2: import certificates into java keystore

keytool 
  -import 
  -trustcacerts 
  -alias <alias_name> 
  -file <cert_filename.crt> 
  -keystore <filename.jks>

keytool -list -v -keystore <filename.jks>

./okvutil upload -l "<keystore_location>" 
  -t jks 
  -g "<virtual_wallet_name>"

create materialized view with prebuilt table

-- step 1: create table
create table scott.weekly_orders
...

-- step 2: create materialized view
create materialized view scott.weekly_orders
  ON PREBUILT TABLE WITHOUT REDUCED PRECISION
  USING INDEX 
  REFRESH COMPLETE ON DEMAND
  USING DEFAULT LOCAL ROLLBACK SEGMENT
  USING ENFORCED CONSTRAINTS DISABLE QUERY REWRITE
as
select ...
from scott.orders
where ...
;

duplicate database to another host

pre-duplicate

alter system set cluster_database=false scope=spfile sid='*';

duplicate

-- shutdown auxiliary database
-- startup nomount

-- rman script
run
{
}

-- run command
rman catalog rcat/rcat@rcatdb auxiliary / msglog= cmdfile=

post-duplicate

alter system set cluster_database=true scope=spfile sid='*';

create physical standby in oracle 12

prepare standby

prepare primary

rman duplicate

-- backup primary
run
{
sql "alter system switch logfile";
sql "alter system checkpoint";
backup database;
backup archivelog all delete input;
backup current controlfile for standby;
}

-- duplicate to auxiliary
run
{
  duplicate target database for standby nofilenamecheck dorecover;
}

dataguard broker setup

enable/disable log transport

validation