Thursday, February 21, 2008

Oracle 11g Interval Partitioning

New in 11g is Interval Partitioning. This extends the functionality of range partitioning to where you define equal partitions using and interval definition. When using Interval partitioning Oracle will automatically create new partitions as they are needed. Oracle creates the new partition at the time of the first record insert for the new partition. This greatly helps the manageability of partitioned tables by saving the DBA from having to manually create new partitions.

The new interval partitioning valid combinations are Interval, Interval-List, Interval-Hash and Interval-Range.

This will work well for range partitioning where partitioning was done based on date ranges.

Original Range Partitioning Example:

CREATE TABLE mrmessin.emp
(
EMPNO NUMBER(4),
ENAME VARCHAR2(30),
JOB VARCHAR2(20),
MGR NUMBER(4),
HIREDATE DATE,
SAL NUMBER(7,2),
COMM NUMBER(7,2),
DEPTNO NUMBER(2)
)
PARTITION BY RANGE (hiredate)
( PARTITION part_1999 values LESS THAN (TO_DATE('01-JAN-2000','DD-MON-YYYY'))
PARTITION part_2000 values LESS THAN (TO_DATE('01-JAN-2001','DD-MON-YYYY'))
PARTITION part_2001 values LESS THAN (TO_DATE('01-JAN-2002','DD-MON-YYYY'))
PARTITION part_2002 values LESS THAN (TO_DATE('01-JAN-2003','DD-MON-YYYY'))
PARTITION part_2003 values LESS THAN (TO_DATE('01-JAN-2004','DD-MON-YYYY'))
) ;

The new interval partitioning can simplify the creation of the table and then partitions created automatically as data is added that would go into the new partitions.

New Interval Ranger Partitioning Example:

CREATE TABLE mrmessin.emp
(
EMPNO NUMBER(4),
ENAME VARCHAR2(30),
JOB VARCHAR2(20),
MGR NUMBER(4),
HIREDATE DATE,
SAL NUMBER(7,2),
COMM NUMBER(7,2),
DEPTNO NUMBER(2)
)
PARTITION BY RANGE (hiredate)
INTERVAL (NUMTOYMINTERVAL(1,'YEAR'))
( PARTITION part_1999 values LESS THAN (TO_DATE('01-JAN-2000','DD-MON-YYYY'))
) ;

Now lets go through an example to see if the partitions are created as the data is inserted. This will allow us to see how the partitions are created and how oracle names them. We will also see if we can rename the new partitions to better names after the fact.

SQL> -- drop the table if it already exists
SQL> drop table mrmessin.emp ;

Table dropped.

SQL>
SQL> -- create a interval partition table.
SQL> -- we will use the definition of the scott.emp table
SQL> CREATE TABLE mrmessin.emp
2 (
3 EMPNO NUMBER(4),
4 ENAME VARCHAR2(30),
5 JOB VARCHAR2(20),
6 MGR NUMBER(4),
7 HIREDATE DATE,
8 SAL NUMBER(7,2),
9 COMM NUMBER(7,2),
10 DEPTNO NUMBER(2)
11 )
12 PARTITION BY RANGE (hiredate)
13 INTERVAL (NUMTOYMINTERVAL(1,'YEAR'))
14 (
15 PARTITION part_1999 values LESS THAN (TO_DATE('01-JAN-2000','DD-MON-YYYY'))
16 ) ;

Table created.

SQL>
SQL>
SQL> -- add a primary key to the partitioned table
SQL> alter table mrmessin.emp add constraint emp_empno_pk primary key (empno) disable ;

Table altered.

SQL>
SQL> -- create the unique index for our primary key
SQL> create unique index mrmessin.emp_empno_pk on mrmessin.emp (empno) tablespace users ;

Index created.

SQL>
SQL> -- enable our primary key
SQL> alter table mrmessin.emp enable primary key using index ;

Table altered.

SQL>
SQL> -- gather stats
SQL> EXEC DBMS_STATS.gather_table_stats('MRMESSIN','EMP') ;

PL/SQL procedure successfully completed.

SQL>
SQL> -- take a look at the partitions of the table
SQL> select table_owner, table_name, partition_name, tablespace_name, num_rows
2 from dba_tab_partitions
3 where table_name = 'EMP' ;

TABLE_OWNER TABLE_NAME
------------------------------ ------------------------------
PARTITION_NAME TABLESPACE_NAME NUM_ROWS
------------------------------ ------------------------------ ----------
MRMESSIN EMP
PART_1999 USERS 0


SQL> -- let insert some data for a partition.
SQL> insert into mrmessin.emp values (1, 'Mike Messina', 'CEO', 1, to_date('15-NOV-1999', 'DD-MON-YYYY'), 10000, 1000, 1) ;

1 row created.

SQL> insert into mrmessin.emp values (2, 'Joe Messina', 'COO', 1, to_date('10-JAN-2000', 'DD-MON-YYYY'), 10000, 1000, 1) ;

1 row created.

SQL> commit ;

Commit complete.

SQL>
SQL> -- gather stats again
SQL> EXEC DBMS_STATS.gather_table_stats('MRMESSIN','EMP') ;

PL/SQL procedure successfully completed.

SQL>
SQL> -- take a look at the partitions of the table
SQL> select table_owner, table_name, partition_name, tablespace_name, num_rows
2 from dba_tab_partitions
3 where table_name = 'EMP' ;

TABLE_OWNER TABLE_NAME
------------------------------ ------------------------------
PARTITION_NAME TABLESPACE_NAME NUM_ROWS
------------------------------ ------------------------------ ----------
MRMESSIN EMP
SYS_P46 USERS 1

MRMESSIN EMP
PART_1999 USERS 1


SQL>
SQL> -- insert some more data that should cause more partitions to be created
SQL> insert into mrmessin.emp values (3, 'Frank Messina', 'CIO', 1, to_date('12-DEC-2001', 'DD-MON-YYYY'), 10000, 1000, 1) ;

1 row created.

SQL> insert into mrmessin.emp values (4, 'Joe Joe Shamo', 'Bouncer', 1, to_date('14-FEB-2002', 'DD-MON-YYYY'), 1000, 100, 1) ;

1 row created.

SQL> insert into mrmessin.emp values (5, 'Mixer Man', 'Bar Tender', 1, to_date('04-APR-2003', 'DD-MON-YYYY'), 1000, 100, 1) ;

1 row created.

SQL> commit ;

Commit complete.

SQL>
SQL> -- gather stats again
SQL> EXEC DBMS_STATS.gather_table_stats('MRMESSIN','EMP') ;

PL/SQL procedure successfully completed.

SQL>
SQL> -- take a look at the partitions of the table
SQL> select table_owner, table_name, partition_name, tablespace_name, num_rows
2 from dba_tab_partitions
3 where table_name = 'EMP' ;

TABLE_OWNER TABLE_NAME
------------------------------ ------------------------------
PARTITION_NAME TABLESPACE_NAME NUM_ROWS
------------------------------ ------------------------------ ----------
MRMESSIN EMP
SYS_P46 USERS 1

MRMESSIN EMP
SYS_P47 USERS 1

MRMESSIN EMP
SYS_P48 USERS 1

MRMESSIN EMP
SYS_P49 USERS 1

MRMESSIN EMP
PART_1999 USERS 1


SQL> alter table mrmessin.emp rename partition sys_p46 to part_2000 ;

Table altered.

SQL> alter table mrmessin.emp rename partition sys_p47 to part_2001 ;

Table altered.

SQL> alter table mrmessin.emp rename partition sys_p48 to part_2002 ;

Table altered.

SQL> alter table mrmessin.emp rename partition sys_p49 to part_2003 ;

Table altered.

SQL> select table_owner, table_name, partition_name, tablespace_name, num_rows
2 from dba_tab_partitions
3 where table_name = 'EMP' ;


TABLE_OWNER TABLE_NAME
------------------------------ ------------------------------
PARTITION_NAME TABLESPACE_NAME NUM_ROWS
------------------------------ ------------------------------ ---------
MRMESSIN EMP
PART_2000 USERS 1

MRMESSIN EMP
PART_2001 USERS 1

MRMESSIN EMP
PART_2002 USERS 1


MRMESSIN EMP
PART_2003 USERS 1

MRMESSIN EMP
PART_1999 USERS 1

SQL>


Summary

Interval Partitioning provides a huge boost in partition management. This makes implementing partitioning much more attractive to those DBAs that have very full plates making partition maintenance more automatic and reducing our DBA maintenance activities. All other activity appears to match normal range partitioning keeping all the other benefits of range partitioning.

Oracle 11g REF Partitioning

Oracle REF partitioning is a new partitioning feature with the 11g version of the Oracle database. REF Partitioning is the ability to partition a table based on the foreign key parent-child relationship. In REF partitioning the partitioning key of the child table is inherited from the parent table. REF partitioning also has all partition maintenance operations that change the logical shape of the parent table into the child table. REF Partitioning also improves performance for joins between the parent and child table by enabling partition-wise joins.

Ref partitioning can not be used when the parent table is partitioned using range interval partitioning or virtual column partitioning. The following message will happen upon attempt to create the partitioned table with ref partitioning.

CREATE TABLE order_items
*
ERROR at line 1:
ORA-14659: Partitioning method of the parent table is not supported

Another oddity found in this test was that if I created a partitioned index on the parent table with parallel I could not create the child table with ref partitioning as it got an ORA-0600, however if I created the index without parallel the child table with ref partitioning created ok. To get around this issue I created the partitioned index on the parent table after creating the child table with the ref partitioning.

For the basis as an example lets use the OE.ORDERS and OE.ORDER_ITEMS so create our own versions of the tables insert records where the rows in both sets are exactly the same. This will allow the comparison of a query that could be used against both sets of tables for a comparison. The original tables from OE schema will remain unchanged. For the copies for the partitioning testing we will use a range interval partition for the orders table and a ref partition for the order_items table.

Create the tables and indexes

CREATE TABLE orders
( order_id NUMBER(12) CONSTRAINT orders_order_id_nn NOT NULL,
order_date DATE CONSTRAINT orders_order_date_nn NOT NULL,
order_mode VARCHAR2(8),
customer_id NUMBER(6) CONSTRAINT orders_customer_id_nn NOT NULL,
order_status VARCHAR2(2),
order_total NUMBER(8,2),
sales_rep_id NUMBER(6),
promotion_id NUMBER(6),
CONSTRAINT orders_order_id_pk PRIMARY KEY (order_id)
)
PARTITION BY RANGE (order_date)
--INTERVAL (NUMTOYMINTERVAL(1,'month'))
( PARTITION p_pre_1999 VALUES LESS THAN (TO_DATE('01-JAN-1999','dd-MON-yyyy')),
PARTITION p_JAN_1999 VALUES LESS THAN (TO_DATE('01-FEB-1999','dd-MON-yyyy')),
PARTITION p_FEB_1999 VALUES LESS THAN (TO_DATE('01-MAR-1999','dd-MON-yyyy')),
PARTITION p_MAR_1999 VALUES LESS THAN (TO_DATE('01-APR-1999','dd-MON-yyyy')),
PARTITION p_APR_1999 VALUES LESS THAN (TO_DATE('01-MAY-1999','dd-MON-yyyy')),
PARTITION p_MAY_1999 VALUES LESS THAN (TO_DATE('01-JUN-1999','dd-MON-yyyy')),
PARTITION p_JUN_1999 VALUES LESS THAN (TO_DATE('01-JUL-1999','dd-MON-yyyy')),
PARTITION p_JUL_1999 VALUES LESS THAN (TO_DATE('01-AUG-1999','dd-MON-yyyy')),
PARTITION p_AUG_1999 VALUES LESS THAN (TO_DATE('01-SEP-1999','dd-MON-yyyy')),
PARTITION p_SEP_1999 VALUES LESS THAN (TO_DATE('01-OCT-1999','dd-MON-yyyy')),
PARTITION p_OCT_1999 VALUES LESS THAN (TO_DATE('01-NOV-1999','dd-MON-yyyy')),
PARTITION p_NOV_1999 VALUES LESS THAN (TO_DATE('01-DEC-1999','dd-MON-yyyy')),
PARTITION p_DEC_1999 VALUES LESS THAN (TO_DATE('01-JAN-2000','dd-MON-yyyy')),
PARTITION p_JAN_2000 VALUES LESS THAN (TO_DATE('01-FEB-2000','dd-MON-yyyy')),
PARTITION p_FEB_2000 VALUES LESS THAN (TO_DATE('01-MAR-2000','dd-MON-yyyy')),
PARTITION p_MAR_2000 VALUES LESS THAN (TO_DATE('01-APR-2000','dd-MON-yyyy')),
PARTITION p_APR_2000 VALUES LESS THAN (TO_DATE('01-MAY-2000','dd-MON-yyyy')),
PARTITION p_MAY_2000 VALUES LESS THAN (TO_DATE('01-JUN-2000','dd-MON-yyyy')),
PARTITION p_JUN_2000 VALUES LESS THAN (TO_DATE('01-JUL-2000','dd-MON-yyyy')),
PARTITION p_JUL_2000 VALUES LESS THAN (TO_DATE('01-AUG-2000','dd-MON-yyyy')),
PARTITION p_AUG_2000 VALUES LESS THAN (TO_DATE('01-SEP-2000','dd-MON-yyyy')),
PARTITION p_SEP_2000 VALUES LESS THAN (TO_DATE('01-OCT-2000','dd-MON-yyyy')),
PARTITION p_OCT_2000 VALUES LESS THAN (TO_DATE('01-NOV-2000','dd-MON-yyyy')),
PARTITION p_NOV_2000 VALUES LESS THAN (TO_DATE('01-DEC-2000','dd-MON-yyyy')),
PARTITION p_DEC_2000 VALUES LESS THAN (TO_DATE('01-JAN-2001','dd-MON-yyyy'))
)
PARALLEL ;

CREATE TABLE order_items
( order_id NUMBER(12) CONSTRAINT oitems_order_id_nn NOT NULL,
line_item_id NUMBER(3) CONSTRAINT oitems_line_item_id_nn NOT NULL,
product_id NUMBER(6) CONSTRAINT oitems_product_id_nn NOT NULL,
unit_price NUMBER(8) CONSTRAINT oitems_unit_price_nn NOT NULL,
quantity NUMBER(8,2) CONSTRAINT oitems_quantity_nn NOT NULL,
sales_amount NUMBER(12,2) CONSTRAINT oitems_sales_amount_nn NOT NULL,
CONSTRAINT order_items_orders_fk
FOREIGN KEY (order_id) REFERENCES orders(order_id)
)
PARTITION BY REFERENCE (order_items_orders_fk)
PARALLEL ;

CREATE UNIQUE INDEX ORDER_ITEMS_PK
ON ORDER_ITEMS (ORDER_ID, LINE_ITEM_ID)
PARALLEL ;

-- have to create this index with parallel after the ref partition
-- table is created to avoid ORA-0600.
CREATE INDEX ORDERS_ORDER_DATE_NU
ON ORDERS (ORDER_DATE, ORDER_ID)
LOCAL
( PARTITION p_pre_1999,
PARTITION p_JAN_1999,
PARTITION p_FEB_1999,
PARTITION p_MAR_1999,
PARTITION p_APR_1999,
PARTITION p_MAY_1999,
PARTITION p_JUN_1999,
PARTITION p_JUL_1999,
PARTITION p_AUG_1999,
PARTITION p_SEP_1999,
PARTITION p_OCT_1999,
PARTITION p_NOV_1999,
PARTITION p_DEC_1999,
PARTITION p_JAN_2000,
PARTITION p_FEB_2000,
PARTITION p_MAR_2000,
PARTITION p_APR_2000,
PARTITION p_MAY_2000,
PARTITION p_JUN_2000,
PARTITION p_JUL_2000,
PARTITION p_AUG_2000,
PARTITION p_SEP_2000,
PARTITION p_OCT_2000,
PARTITION p_NOV_2000,
PARTITION p_DEC_2000
)
PARALLEL ;
-- Populate the new partitioned tables with the same data
-- from the original tables
insert into orders select * from oe.orders ;
commit ;

insert into order_items
select order_id, line_item_id, product_id, unit_price, quantity, (unit_price*quantity) sales_amount
from oe.order_items ;
commit;

execute dbms_stats.gather_table_stats ('MRMESSIN','ORDERS') ;
execute dbms_stats.gather_table_stats ('MRMESSIN','ORDER_ITEMS') ;


Test the Outcome

Execution One

SQL> set autotrace on
SQL> SELECT TO_DATE(to_char(o.order_date, 'DD-MON-YYYY'), 'DD-MON-YYYY') order_date, sum(oi.sales_amount) sum_sales
2 FROM oe.orders o,
3 oe.order_items oi
4 WHERE o.order_id = oi.order_id
5 AND o.order_date BETWEEN TO_DATE('01-JAN-1999','DD-MON-YYYY')
6 AND TO_DATE('01-MAY-1999','DD-MON-YYYY')
7 GROUP BY order_date
8 ORDER BY order_date ;

ORDER_DAT SUM_SALES
--------- ----------

26-FEB-99 3322700.8
27-FEB-99 526310.4
11-MAR-99 1688284.8
12-MAR-99 2330752
13-MAR-99 952000

20-MAR-99 61651.2
20-MAR-99 1010368
29-MAR-99 345427.2
29-MAR-99 12288
11-APR-99 52352

10 rows selected.

Elapsed: 00:00:00.01

Execution Plan
----------------------------------------------------------

Plan hash value: 795276733

------------------------------------------------------------------------------------------------------
Id Operation Name Rows Bytes Cost (%CPU) Time
------------------------------------------------------------------------------------------------------
0 SELECT STATEMENT 1 24 6 (17) 00:00:01
1 SORT ORDER BY 1 24 6 (17) 00:00:01
2 SORT GROUP BY NOSORT 1 24 6 (17) 00:00:01
* 3 FILTER
4 NESTED LOOPS
5 NESTED LOOPS 2 48 5 (0) 00:00:01
6 TABLE ACCESS BY INDEX ROWID ORDERS 1 15 2 (0) 00:00:01
* 7 INDEX RANGE SCAN ORD_ORDER_DATE_IX 1 1 (0) 00:00:01
* 8 INDEX RANGE SCAN ITEM_ORDER_IX 6 1 (0) 00:00:01
9 TABLE ACCESS BY INDEX ROWID ORDER_ITEMS 6 54 3 (0) 00:00:01
------------------------------------------------------------------------------------------------------
Predicate Information (identified by operation id):

---------------------------------------------------
3 - filter(TIMESTAMP' 1999-01-01 00:00:00'<=TIMESTAMP' 1999-05-01 00:00:00')
7 - access("O"."ORDER_DATE">=TIMESTAMP' 1999-01-01 00:00:00' AND "O"."ORDER_DATE"<=TIMESTAMP' 1999-05-01 00:00:00')

8 - access("O"."ORDER_ID"="OI"."ORDER_ID")

Statistics
----------------------------------------------------------

0 recursive calls
0 db block gets
875 consistent gets

0 physical reads
0 redo size
667 bytes sent via SQL*Net to client
396 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
1 sorts (memory)
0 sorts (disk)
10 rows processed


Execution Two

SQL> SELECT o.order_date, sum(oi.sales_amount) sum_sales
2 FROM orders o,
3 order_items oi
4 WHERE o.order_date BETWEEN TO_DATE('01-JAN-1999','DD-MON-YYYY')
5 AND TO_DATE('01-MAY-1999','DD-MON-YYYY')
6 AND o.order_id = oi.order_id
7 GROUP BY order_date
8 ORDER BY order_date ;

ORDER_DAT SUM_SALES
--------- ----------
26-FEB-99 3322700.8

27-FEB-99 526310.4
11-MAR-99 1688284.8
12-MAR-99 2330752
13-MAR-99 952000
20-MAR-99 61651.2
20-MAR-99 1010368
29-MAR-99 345427.2
29-MAR-99 12288
11-APR-99 52352

10 rows selected.

Elapsed: 00:00:00.07

Execution Plan
----------------------------------------------------------

Plan hash value: 1413427856
----------------------------------------------------------------------------------------------------------------------------------------------------------
Id Operation Name Rows Bytes Cost (%CPU) Time Pstart Pstop TQ IN-OUT PQ Distrib
----------------------------------------------------------------------------------------------------------------------------------------------------------
0 SELECT STATEMENT 5 105 14 (15) 00:00:01
1 PX COORDINATOR
2 PX SEND QC (ORDER) :TQ10002 5 105 14 (15) 00:00:01 Q1,02 P->S QC (ORDER)
3 SORT GROUP BY 5 105 14 (15) 00:00:01 Q1,02 PCWP
4 PX RECEIVE 5 105 14 (15) 00:00:01 Q1,02 PCWP
5 PX SEND RANGE :TQ10001 5 105 14 (15) 00:00:01 Q1,01 P->P RANGE
6 SORT GROUP BY 5 105 14 (15) 00:00:01 Q1,01 PCWP
* 7 HASH JOIN 1115 23415 13 (8) 00:00:01 Q1,01 PCWP
8 PX RECEIVE 176 2112 1 (0) 00:00:01 Q1,01 PCWP
9 PX SEND BROADCAST :TQ10000 176 2112 1 (0) 00:00:01 Q1,00 P->P BROADCAST
10 PX PARTITION RANGE ITERATOR 176 2112 1 (0) 00:00:01 2 6 Q1,00 PCWC
* 11 INDEX RANGE SCAN ORDERS_ORDER_DATE_NU 176 2112 1 (0) 00:00:01 2 6 Q1,00 PCWP
12 PX BLOCK ITERATOR 21280 187K 11 (0) 00:00:01 1 25 Q1,01 PCWC
13 TABLE ACCESS FULL ORDER_ITEMS 21280 187K 11 (0) 00:00:01 1 25 Q1,01 PCWP
----------------------------------------------------------------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):

---------------------------------------------------
7 - access("O"."ORDER_ID"="OI"."ORDER_ID")
11 - access("O"."ORDER_DATE">=TO_DATE(' 1999-01-01 00:00:00', 'syyyy-mm-dd hh24:mi:ss') AND "O"."ORDER_DATE"<=TO_DATE(' 1999-05-01 00:00:00', 'syyyy-mm-dd hh24:mi:ss'))

Statistics
----------------------------------------------------------

45 reursive calls
3 db block gets

357 consistent gets
0 physical reads
664 redo size

683 bytes sent via SQL*Net to client
396 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
34 sorts (memory)
0 sorts (disk)

10 rows processed

SQL>


Execution Three

SQL> set linesize 200
SQL> set timing on
SQL>
SQL> spool ref_partitioning_test.out
SQL>
SQL> set autotrace on
SQL> SELECT TO_DATE(to_char(o.order_date, 'DD-MON-YYYY'), 'DD-MON-YYYY') order_date, sum(oi.sales_am
2 FROM oe.orders o,
3 oe.order_items oi
4 WHERE o.order_id = oi.order_id
5 AND o.order_date BETWEEN TO_DATE('01-JAN-1999','DD-MON-YYYY')
6 AND TO_DATE('01-MAY-1999','DD-MON-YYYY')
7 GROUP BY order_date
8 ORDER BY order_date ;

ORDER_DAT SUM_SALES
--------- ----------
26-FEB-99 6645401.6
27-FEB-99 1052620.8
11-MAR-99 3376569.6
12-MAR-99 4661504
13-MAR-99 1904000
20-MAR-99 123302.4
20-MAR-99 2020736
29-MAR-99 690854.4
29-MAR-99 24576
11-APR-99 104704

10 rows selected.

Elapsed: 00:00:00.04

Execution Plan
----------------------------------------------------------
Plan hash value: 795276733

------------------------------------------------------------------------------------
Id Operation Name Rows Bytes Cost (%CPU) Time
------------------------------------------------------------------------------------
0 SELECT STATEMENT 1 24 6 (17) 00:00:01
1 SORT ORDER BY 1 24 6 (17) 00:00:01
2 SORT GROUP BY NOSORT 1 24 6 (17) 00:00:01
* 3 FILTER
4 NESTED LOOPS
5 NESTED LOOPS 2 48 5 (0) 00:00:01
6 TABLE ACCESS BY INDEX ROWID ORDERS 1 15 2 (0) 00:00:01
* 7 INDEX RANGE SCAN ORD_ORDER_DATE_IX 1 1 (0) 00:00:01
* 8 INDEX RANGE SCAN ITEM_ORDER_IX 6 1 (0) 00:00:01
9 TABLE ACCESS BY INDEX ROWID ORDER_ITEMS 6 54 3 (0) 00:00:01
------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------
3 - filter(TIMESTAMP' 1999-01-01 00:00:00'<=TIMESTAMP' 1999-05-01 00:00:00')
7 - access("O"."ORDER_DATE">=TIMESTAMP' 1999-01-01 00:00:00' AND
"O"."ORDER_DATE"<=TIMESTAMP' 1999-05-01 00:00:00')
8 - access("O"."ORDER_ID"="OI"."ORDER_ID")


Statistics
----------------------------------------------------------
0 recursive calls
0 db block gets
1724 consistent gets
0 physical reads
116 redo size
669 bytes sent via SQL*Net to client
396 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
1 sorts (memory)
0 sorts (disk)
10 rows processed

SQL> SELECT o.order_date, sum(oi.sales_amount) sum_sales
2 FROM orders o,
3 order_items oi
4 WHERE o.order_date BETWEEN TO_DATE('01-JAN-1999','DD-MON-YYYY')
5 AND TO_DATE('01-MAY-1999','DD-MON-YYYY')
6 AND o.order_id = oi.order_id
7 GROUP BY order_date
8 ORDER BY order_date ;

ORDER_DAT SUM_SALES
--------- ----------
26-FEB-99 6645401.6
27-FEB-99 1052620.8
11-MAR-99 3376569.6
12-MAR-99 4661504
13-MAR-99 1904000
20-MAR-99 123302.4
20-MAR-99 2020736
29-MAR-99 690854.4
29-MAR-99 24576
11-APR-99 104704

10 rows selected.

Elapsed: 00:00:00.07

Execution Plan
----------------------------------------------------------
Plan hash value: 1413427856
------------------------------------------------------------------------------------ Id Operation Name Rows Bytes Cost (%CPU) Time Pstart Pstop
------------------------------------------------------------------------------------
0 SELECT STATEMENT 5 105 21 (10) 00:00:01
1 PX COORDINATOR
2 PX SEND QC (ORDER) :TQ10002 5 105 21 (10) 00:00:01
3 SORT GROUP BY 5 105 21 (10) 00:00:01
4 PX RECEIVE 5 105 21 (10) 00:00:01
5 PX SEND RANGE :TQ10001 5 105 21 (10) 00:00:01
6 SORT GROUP BY 5 105 21 (10) 00:00:01
* 7 HASH JOIN 2231 46851 20 (5) 00:00:01
8 PX RECEIVE 352 4224 1 (0) 00:00:01
9 PX SEND BROADCAST :TQ10000 352 4224 1 (0) 00:00:01
10 PX PARTITION RANGE ITERATOR 352 4224 1 (0) 00:00:01 2 6
* 11 INDEX RANGE SCAN ORDERS_ORDER_DATE_NU 352 4224 1 (0) 00:00:01 2
12 PX BLOCK ITERATOR 42560 374K 18 (0) 00:00:01 1 25
13 TABLE ACCESS FULL ORDER_ITEMS 42560 374K 18 (0) 00:00:01 1 25
------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------
7 - access("O"."ORDER_ID"="OI"."ORDER_ID")
11 - access("O"."ORDER_DATE">=TO_DATE(' 1999-01-01 00:00:00', 'syyyy-mm-dd hh24:mi:ss') AND "O"."O
'syyyy-mm-dd hh24:mi:ss'))

Statistics
----------------------------------------------------------
45 recursive calls
3 db block gets
481 consistent gets
0 physical reads
664 redo size
685 bytes sent via SQL*Net to client
396 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
34 sorts (memory)
0 sorts (disk)
10 rows processed


Summary

Initial tests were done without parallel and did not yield expected results, expected results did not really start to happen until parallel was used and the number of rows in the parent and child tables significantly increased. Keep in mind that parallel operations increase CPU utilization and therefore a CPU bound system may not encounter the same results.

Running many tests and increasing the number of rows proportionately in each table with each execution. As the number of rows in orders and orders items increased the larger the gap in buffer gets to resolve the query. On a very small table the non-partitioned table used far less resources, as the number of rows increased the closer the buffer gets got until the partitioned table was taking less resources. Even as the difference in the buffer gets changed as the number of records increased the number of sorts in memory stayed consistent. Another thing noticed in the testing is that the runtime performance stayed more consistent with the partitioned tables more so the run time increase was lower then with the non-partitioned tables as the number of rows increased.

This is a nice new partitioning feature for parent-child relationships and initial testing shows this partitioning could have benefits for tables that are large. Smaller tables did not show any advantages, but the advantages seem to gain as the table sizes/number of rows increases.

Initial testing has shown that this new Oracle Partitioning Feature is worth the time to do further testing on very large data sets.

Tuesday, February 5, 2008

Oracle Hidden (_) Parameter Values

To query the hidden (_) parameters for an oracle instance use the following, can limit the list using the name or partial of the name of the parameter you are looking for:

set pagesize 60
set linesize 100
select a.ksppinm NAME,
a.ksppdesc DESCRIPTION,
b.ksppstvl SESSION_VALUE,
c.ksppstvl SYSTEM_VALUE
from x$ksppi a,
x$ksppcv b,
x$ksppsv c
where a.indx = b.indx
and a.indx = c.indx
and (a.ksppinm like '__%') ;

Oracle see who has been granted SYSDBA

In Oracle to query who has been granted SYSDBA is done by the v$pwfile_users

SQL> select * from v$pwfile_users;
USERNAME SYSDB SYSOP SYSAS
-------- ----- ----- -----
SYS TRUE TRUE FALSE
MRMESSIN TRUE FALSE FALSE
SQL>

Saturday, February 2, 2008

RMAN Backup using a Wallet for Authentication

RMAN Backup using a Wallet for Authentication

Ever want to not have passwords in you RMAN backup scripts?
Oracle Wallet will provide a way to connect to the RMAN repository without having to create a way to script in a password.

1. setup password files that allow and disallow logging in with SYSDBA (This is optional and only if you want to be able to control remote login as sysdba)

a. cd $ORACLE_HOME/dbs
b. export ORACLE_SID=rman
c. orapwd file=orapwdbname.nosysdba password= nosysdba=y
d. orapwd file=orapwdbname.sysdba password= nosysdba=n


2. Add processes to enable sysdba for backup and disable when backup is complete (optional only if you want to control when remote sysdba connections are allowed)
a. enable sysdba logins
cp –p orapwdbname.sysdba orapwdbname

b. disable sysdba logins
cp –p orapwdbname.nosysdba orapwdbname

* For our example we will assume that we will have it on and it is left on, but this will give the option to allow and disallow sysdba connections as needed.


3. Avoid having to pass username and password at the command line.
a. add TNS Alias to tnsnames.ora file for RMANSYS connections

RMANSYS =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = )(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = rman)
)
)

b. Create Wallet
cd $ORACLE_HOME/network/admin
mkstore –wrl $ORACLE_HOME/network/admin –create

* Enter password for wallet (will have to be entered 2 times for confirmation)
* Creates 2 files ewallet.p12 and cwallet.sso

c. Create credential for Wallet
mkstore -wrl $ORACLE_HOME/network/admin –createCredential rmansys rman

* should see something like Create credential oracle.security.client.connect_string1

d. Edit sqlnet.ora
WALLET_LOCATION =
(SOURCE =
(METHOD = FILE)
(METHOD_DATA =
(DIRECTORY = <$ORACLE_HOME*>/network/admin)))

SQLNET.WALLET_OVERRIDE = TRUE
SSL_CLIENT_AUTHENTICATION = FALSE
SSL_VERSION = 0


* Must put in path for $ORACLE_HOME

e. Restart the database listener
lsnrctl stop
lsnrctl start

4. Now you can use the wallet credentials to login as sys.
sqlplus /@RMANSYS

export ORACLE_SID=trn01
rman target=/ catalog=/@rmansys

Oracle Fine Grained Auditing

Regulations such as HIPPA have placed upon Oracle Database Administrators (DBAs) the need to audit DDL (database schema changes), DML (inserts/updates/deletes) as well as select access to what is considered “protected information”. Auditing changes has always had value and was typical auditing done in most critical databases. Auditing changes would allow information to be gathered to show when a change happened and who made the change. Another audit need has become more critical due to regulations and that is select or read access to “protected information”. Auditing is now required to gather information on when and who read or accessed certain information.

Oracle auditing has existed in the Oracle database for quite some time and is used to gather information on changes and access to the data in the database. It has provided the ability to audit inserts, updates and deletes on table as well as the ability to audit object creations, modifications and removals as auditing in the database progressed later versions allowed the auditing of selects. Auditing is also able to audit log on and log off from the database and many other operations. However the auditing is always at a high level, what if the need was to only audit under more specific circumstances. To get this level of auditing might require a trigger that must be coded and there fore the code developed and maintained so that some level of criteria could be used to determine if an audit record should be written or not. Oracle Fine Grained Auditing provides the ability to add criteria based auditing to gather information (data) on what has changed or been accessed within the database without the need to write trigger code.


Components for FGA

Oracle FGA is setup and managed though the dbms_fga package and all audit information gathered is accessed via the dba_fga_audit_trail view, the actual sys table that the audit information goes into is fga_log$.

Examine the definition of the dbms_fga package, the dba_fga_audit_trail view and the sys table fga_log$ as these are the objects that will be used in the management of FGA.


SQL> desc dbms_fga
PROCEDURE ADD_POLICY
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
OBJECT_SCHEMA VARCHAR2 IN DEFAULT
OBJECT_NAME VARCHAR2 IN
POLICY_NAME VARCHAR2 IN
AUDIT_CONDITION VARCHAR2 IN DEFAULT
AUDIT_COLUMN VARCHAR2 IN DEFAULT
HANDLER_SCHEMA VARCHAR2 IN DEFAULT
HANDLER_MODULE VARCHAR2 IN DEFAULT
ENABLE BOOLEAN IN DEFAULT
STATEMENT_TYPES VARCHAR2 IN DEFAULT
AUDIT_TRAIL BINARY_INTEGER IN DEFAULT
AUDIT_COLUMN_OPTS BINARY_INTEGER IN DEFAULT
PROCEDURE DISABLE_POLICY
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
OBJECT_SCHEMA VARCHAR2 IN DEFAULT
OBJECT_NAME VARCHAR2 IN
POLICY_NAME VARCHAR2 IN
PROCEDURE DROP_POLICY
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
OBJECT_SCHEMA VARCHAR2 IN DEFAULT
OBJECT_NAME VARCHAR2 IN
POLICY_NAME VARCHAR2 IN
PROCEDURE ENABLE_POLICY
Argument Name Type In/Out Default?
------------------------------ ----------------------- ------ --------
OBJECT_SCHEMA VARCHAR2 IN DEFAULT
OBJECT_NAME VARCHAR2 IN
POLICY_NAME VARCHAR2 IN
ENABLE BOOLEAN IN DEFAULT

SQL> desc dba_fga_audit_trail
Name Null? Type
----------------------------------------- -------- ------------------
SESSION_ID NOT NULL NUMBER
TIMESTAMP DATE
DB_USER VARCHAR2(30)
OS_USER VARCHAR2(255)
USERHOST VARCHAR2(128)
CLIENT_ID VARCHAR2(64)
ECONTEXT_ID VARCHAR2(64)
EXT_NAME VARCHAR2(4000)
OBJECT_SCHEMA VARCHAR2(30)
OBJECT_NAME VARCHAR2(128)
POLICY_NAME VARCHAR2(30)
SCN NUMBER
SQL_TEXT NVARCHAR2(2000)
SQL_BIND NVARCHAR2(2000)
COMMENT$TEXT VARCHAR2(4000)
STATEMENT_TYPE VARCHAR2(7)
EXTENDED_TIMESTAMP TIMESTAMP(6) WITH TIME ZONE
PROXY_SESSIONID NUMBER
GLOBAL_UID VARCHAR2(32)
INSTANCE_NUMBER NUMBER
OS_PROCESS VARCHAR2(16)
TRANSACTIONID RAW(8)
STATEMENTID NUMBER
ENTRYID NUMBER
OBJ_EDITION_NAME VARCHAR2(30)


SQL> desc sys.fga_log$
Name Null? Type
----------------------------------------- -------- -------------------------
SESSIONID NOT NULL NUMBER
TIMESTAMP# DATE
DBUID VARCHAR2(30)
OSUID VARCHAR2(255)
OSHST VARCHAR2(128)
CLIENTID VARCHAR2(64)
EXTID VARCHAR2(4000)
OBJ$SCHEMA VARCHAR2(30)
OBJ$NAME VARCHAR2(128)
POLICYNAME VARCHAR2(30)
SCN NUMBER
SQLTEXT VARCHAR2(4000)
LSQLTEXT CLOB
SQLBIND VARCHAR2(4000)
COMMENT$TEXT VARCHAR2(4000)
PLHOL LONG
STMT_TYPE NUMBER
NTIMESTAMP# TIMESTAMP(6)
PROXY$SID NUMBER
USER$GUID VARCHAR2(32)
INSTANCE# NUMBER
PROCESS# VARCHAR2(16)
XID RAW(8)
AUDITID VARCHAR2(64)
STATEMENT NUMBER
ENTRYID NUMBER
DBID NUMBER
LSQLBIND CLOB
OBJ$EDITION VARCHAR2(30)


Define Auditing Policies

To implement Oracle FGA for a successful audit trail the audit rules and policies must be determined and documented. The organization must determine the compliance rules that the organization is subject to, such as HIPPA and SOX. Identify the auditing requirements to fulfill the compliance rules such as DDL auditing (create table, alter table/index, drop table/index, insert, update and delete on protected data as well as select/read access to protected data) The organization is also free to add additional auditing for its own internal use potentially for security purposes for use in forensic activities in cases of security breaches or activity monitoring.


Setup FGA

To setup FGA policies that trigger audit records will need to be defined using policies. The policies will be created based on the audit rules and policies identified that the organization is subject to as well as auditing rule the business has determined itself wants to have possibly for better tracking for security purposes.


-- drop existing audit policy
execute dbms_fga.drop_policy ('SCOTT','EMP','SALARY_INCREASES') ;

-- Audit all Salary Changes to EMP
begin
dbms_fga.add_policy(
object_schema => 'SCOTT',
object_name => 'EMP',
policy_name => 'SALARY_INCREASES',
audit_column => 'SAL',
enable => true, statement_types => 'UPDATE', audit_trail => DBMS_FGA.DB_EXTENDED, audit_column_opts => DBMS_FGA.ANY_COLUMNS
);
end;
/

-- drop existing audit policy
execute dbms_fga.drop_policy ('SCOTT','EMP','NEW_EMP_HIGH_SALARY') ;

-- Audit all new employee creations with salary > 2000
begin
dbms_fga.add_policy(
object_schema => 'SCOTT',
object_name => 'EMP',
policy_name => 'NEW_EMP_HIGH_SALARY',
audit_condition => 'SAL>=2000',
enable => true, statement_types => 'INSERT', audit_trail => DBMS_FGA.DB_EXTENDED, audit_column_opts => DBMS_FGA.ANY_COLUMNS
);
end;
/

-- drop existing audit policy
execute dbms_fga.drop_policy ('SCOTT','BONUS','BONUS_CHANGES') ;

-- Audit all changes on BONUS
begin
dbms_fga.add_policy(
object_schema => 'SCOTT',
object_name => 'BONUS',
policy_name => 'BONUS_CHANGES',
enable => true, statement_types => 'INSERT,UPDATE,DELETE', audit_trail => DBMS_FGA.DB_EXTENDED, audit_column_opts => DBMS_FGA.ANY_COLUMNS
);
end;
/


Show Audit Policies

set linesize 120
set pagesize 50

TTITLE 'FGA Policies'
COL object_schema FORMAT A10 HEADING 'Schema'
COL object_name FORMAT A20 HEADING 'Object Name' WRAP
COL policy_name FORMAT A16 HEADING 'Policy Name' WRAP
COL policy_text FORMAT A24 HEADING 'Policy Text' WRAP
COL policy_column FORMAT A16 HEADING 'Policy Column' WRAP
COL enabled FORMAT A05 HEADING 'Enabled'
COL siud FORMAT A04 HEADING 'SIUD'

SELECT policy_name, policy_text, policy_column, enabled, object_schema, object_name,
DECODE(sel,'YES','Y','N') DECODE(ins,'YES','Y','N')
DECODE(upd,'YES','Y','N') DECODE(del,'YES','Y','N') siud
FROM dba_audit_policies ;

TTITLE OFF


Show Audit Results

Using Fine Grained Audit Trail:


set linesize 120
set pagesize 50

TTITLE 'Fine-Grained Auditing (FGA) Audit Trail'
COL auditdate FORMAT A20 HEADING 'Date/Time'
COL policy_name FORMAT A16 HEADING 'Policy Name' WRAP
COL object_schema FORMAT A10 HEADING 'Schema'
COL object_name FORMAT A20 HEADING 'Object Name' WRAP
COL db_user FORMAT A10 HEADING 'User'
COL sql_text FORMAT A36 HEADING 'SQL Text' WRAP
SELECT TO_CHAR(timestamp,'mm/dd/yyyy hh24:mi:ss') auditdate,
db_user,
object_schema,
object_name,
policy_name,
sql_text
FROM dba_fga_audit_trail
ORDER BY timestamp ;

TTITLE OFF

Could also use the combined audit trail as well to query the FGA Audit Trail this would allow the use of a single query/report for the entire audit trail:

Combined Audit Trail:


SET linesize 120
SET pagesize 50

TTITLE 'Standard/FGA Audit Trail'
COL audittype FORMAT A03 HEADING 'Audit Type'
COL db_user FORMAT A10 HEADING 'User'
COL object_schema FORMAT A06 HEADING 'Schema'
COL object_name FORMAT A20 HEADING 'Object Name' WRAP
COL policy_name FORMAT A16 HEADING 'Policy Name' WRAP
COL auditdate FORMAT A20 HEADING 'Date/Time'
COL sql_text FORMAT A32 HEADING 'SQL Text' WRAP

SELECT DECODE(audit_type, 'Fine Grained Audit', 'FGA’, 'Standard Audit', 'STD', 'U')
auditype,
db_user,
object_schema,
object_name,
policy_name,
TO_CHAR(extended_timestamp,'mm/dd/yyyy hh24:mi:ss') auditdate,
sql_text
FROM dba_common_audit_trail
WHERE db_user NOT IN ('SYS','SYSTEM','DBSNMP','SYSMAN')
ORDER BY extended_timestamp, db_user, object_schema, object_name ;

TTITLE OFF



Testing Results

PL/SQL procedure successfully completed.

PL/SQL procedure successfully completed.

PL/SQL procedure successfully completed.

PL/SQL procedure successfully completed.

PL/SQL procedure successfully completed.

PL/SQL procedure successfully completed.


Wed Jan 30 page 1
FGA Policies

Policy Name Policy Text Policy Column Enabl Schema Object Name SIUD
----------- ----------- ------------- ----- ------ ----------- ----
BONUS_CHANGES YES SCOTT BONUS NYYY
SALARY_INCREASES SAL YES SCOTT EMP NNYN
NEW_EMP_HIGH_SALARY SAL>=2000 YES SCOTT EMP NYNN

1 row created.

1 row updated.

1 row deleted.

Commit complete.

1 row created.

Commit complete.


Wed Jan 30 page 1
Fine-Grained Auditing (FGA) Audit Trail

Date User Schema Object Name Policy Name SQL Text
---------- ---------- ---------- ----------------- ---------------- ------------------------------------
01/30/2008 SYSTEM SCOTT BONUS BONUS_CHANGES delete from scott.bonus
08:07:28
01/30/2008 SYSTEM SCOTT BONUS BONUS_CHANGES insert into scott.bonus values ('BIG
08:07:28 BOSSMAN', 'THEMAN', 10000, 50000)
01/30/2008 SYSTEM SCOTT EMP SALARY_INCREASES update scott.emp set sal=4000 where
08:07:28 empno=7844
01/30/2008 SYSTEM SCOTT EMP NEW_EMP_HIGH_SAL insert into scott.emp values (8000,
08:07:28 ARY 'THEMAN', 'BIGBOSSMAN', NULL, TO_DATE('01-JAN-2008','DD-MON-YYYY'), 10000, 50000, 10)

Managing FGA

We have to manage this audit information being generated as if we do not the amount of audit information will continue to grow and potential lead to performance issues within the database. Therefore was must move audit information out of the system table to another table or even another database so that we can keep it in accordance with the business rules and regulations. After all the audit data is moved and the audit information verified that it has been properly moved we will need to purge the audit data from the system table.

A simple way to manage the fine grained auditing audit trail would be to create a package and use that package to move the data in the audit trail for more long term storage and remove the audit records that have been moved. Simple development of a PL/SQL package that can be scheduled within the database to keep the system audit trail clean by placing in a staging area that can be used by Extraction Transformation Load (ETL) processes for longer term storage and reporting.



Example Package, not complete, but just to give an idea of how this might be approached:

CREATE OR REPLACE PACKAGE manage_audit_trail
IS
PROCEDURE move_fga (p_msg OUT VARCHAR2) ;
PROCEDURE remove_fga (p_days IN NUMBER, p_msg OUT VARCHAR2) ;

PROCEDURE remove_audit (p_days IN NUMBER, p_msg OUT VARCHAR2) ;
END ;
/

CREATE OR REPLACE PACKAGE BODY manage_audit_trail
IS
-- Move Fine Grained Audit data to a staging table
PROCEDURE move_fga (p_msg OUT VARCHAR2)
IS
BEGIN
MERGE INTO AUDIT_STAGE a

USING dba_fga_audit_trail at
ON
(a.SESSION_ID = at.SESSION_ID
a.TIMESTAMP = at.TIMESTAMP
a.DB_USER = at.DB_USER
a.ENTRYID = at.ENTRYID)

WHEN NOT MATCHED THEN
INSERT (a.SESSION_ID, a.TIMESTAMP, a.DB_USER,
a.OS_USER, a.USERHOST, a.CLIENT_ID,
a.ECONTEXT_ID, a.EXT_NAME, a.OBJECT_SCHEMA,
a.OBJECT_NAME, a.POLICY_NAME, a.SCN,
a.SQL_TEXT, a.SQL_BIND, a.COMMENT$TEXT,
a.STATEMENT_TYPE, a.EXTENDED_TIMESTAMP,
a.PROXY_SESSIONID, a.GLOBAL_UID,
a.INSTANCE_NUMBER, a.OS_PROCESS,
a.TRANSACTIONID, a.STATEMENTID,
a.ENTRYID, a.OBJ_EDITION_NAME)

VALUES
(at.SESSION_ID, at.TIMESTAMP, at.DB_USER,
at.OS_USER, at.USERHOST, at.CLIENT_ID,
at.ECONTEXT_ID, at.EXT_NAME, at.OBJECT_SCHEMA,
at.OBJECT_NAME, at.POLICY_NAME, at.SCN,
at.SQL_TEXT, at.SQL_BIND, at.COMMENT$TEXT,
at.STATEMENT_TYPE, at.EXTENDED_TIMESTAMP,
at.PROXY_SESSIONID, at.GLOBAL_UID,
at.INSTANCE_NUMBER, at.OS_PROCESS,
at.TRANSACTIONID, at.STATEMENTID,
at.ENTRYID, at.OBJ_EDITION_NAME) ;


EXCEPTION
WHEN OTHERS THEN
p_msg := SQLERRM ;
END move_fga ;

-- Removes records from the fga audit trail (sys.fga_log$) <=

-- sysdate – number of days passed. # of days defaults to 30 days
PROCEDURE remove_fga (p_days IN NUMBER, p_msg OUT VARCHAR2)
IS
BEGIN
-- initialize p_msg to null
p_msg := NULL ;

-- Check number of days if not default to 30 days

IF p_days IS NULL then
p_days = 30 ;
END IF ;

-- remove the records from the fine grained audit trail
delete
from sys.fga_log$
where timestamp# <= SYSDATE – p_days ;

EXCEPTION
WHEN OTHERS THEN
p_msg := SQLERRM ;
END remove_fga ;

-- Removes records from the audit trail (sys.aud$) <=
-- sysdate – number of days passed. # of days defaults to 30 days
PROCEDURE remove_audit (p_days IN NUMBER, p_msg OUT VARCHAR2)
IS
BEGIN
-- initialize p_msg to null ;
p_msg := NULL ;

-- Check number of days if not default to 30 days
IF p_days IS NULL then
p_days = 30 ;
END IF ;

-- remove the records from the audit trail
delete
from sys.aud$
where timestamp# <= SYSDATE – p_days ;

EXCEPTION
WHEN OTHERS THEN
p_msg := SQLERRM ;
END remove_audit ;
END ;

/


Summary

Oracle Fine Grained Auditing provides the Oracle DBAs a great way to gather the required audit information, define criteria for the gathering of the audit information and actually helps simplify managing the audit data by consolidating all audit data into a single audit trail making the movement and management of audit data for long term reporting simpler. The key to a complete successful audit trail is the ability to report the audit information to answer the questions coming from regulation agencies and internal business entities, like security and compliance offices.

Accessing Monitored Database Objects with OEM Grid Control Reports

Accessing Monitored Database Objects with OEM Grid Control Reports

Ever want to create an Oracle Enterprise Manager Grid Control Report on using objects in the monitored database not from the gird control repository objects? There is a way

We can create a user in the grid control repository database create a database link to the monitored database create view(s) of the across the database link to the object(s) that we want to be able to built the OEM Grid Control Report on then grant access to the view(s) to the MGMT_VIEW user in the grid control repository. Then you can use the view created in a OEM Grid Control Report.

Example:

-- will set up a user in the grid control repository that will own
-- the private database link and views here we will name the user same
-- as the database
create user prd identified by password ;
alter user prd default tablespace users ;
alter user prd temporary tablespace temp ;

grant create session to prd ;
grant create database link to prd ;
grant create view to prd ;

-- connect as our user so that we can create the database
-- link and view as well as grant to the MGMT_VIEW user
connect prd/password@gridrepository

-- drop the database link if it already exists
drop database link prd ;

-- create the database link to the monitored database where the
-- objects we want to use in our OEM Grid Control Reports exists
create database link prd connect to dbsnmp identified by password using 'PRD' ;

-- Create our view
create or replace view prd.DBA_HIST_SYSMETRIC_SUMMARY as select * from DBA_HIST_SYSMETRIC_SUMMARY@prd ;

-- grant permissions to the MGMT_VIEW user
grant select on prd.DBA_HIST_SYSMETRIC_SUMMARY to MGMT_VIEW ;

-- Now ready to use the prd.DBA_HIST_SYSMETRIC_SUMMARY
-- in our Grid Control Reports

Oracle 11g RMAN New Compression Type (ZLIB)

Oracle 11g RMAN New Compression Type (ZLIB)

RMAN had compression of backups in Oracle Database 10g to conserve network bandwidth. Third-party compression utilities provided faster alternatives to RMAN's own, but RMAN 10g compression provided some features that the third-party ones did not. For example, when RMAN 10g restored datafiles, it did not need to uncompress the files first. This approach offers significant bandwidth savings during restores.

Now in Oracle Database 11g, RMAN offers another algorithm, ZLIB, in addition to the previously available BZIP2. ZLIB is a much faster algorithm but it does not compress as much. However, it will not consume near as much CPU. Therefore if you need to reduce CPU consumption ZLIB compression can offer you some compression at a lower cost to the CPU. (Note that BZIP2 is the default in version 11.1.x; if you want to use the new ZLIB compression you will need to license a new option called Advanced Compression Option.)

To use ZLIB compression just set the RMAN configuration parameter:

RMAN> configure compression algorithm 'ZLIB' ;

To change it to BZIP2:

RMAN> configure compression algorithm 'bzip2';

Oracle ASM Management Overview

Oracle ASM Management Overview

This is to give a brief overview of managing ASM disk groups by providing some information on viewing the disk groups, listing files and directories in a disk group, create, rename and remove directories in a disk group as well as add and remove disks from the disk group.


List Disk Groups defined in ASM instance:

SQL> select name from v$asm_diskgroup ;

NAME
--------------------------------------------------------------------------------
P1CSM2_ASM_DG1

SQL>



List Directories in an ASM disk group:

-------------------------------------------------
-- script: see_asm_directories.sql
-- author: Michael Messina
-------------------------------------------------
set pagesize 0
set head off
set linesize 80
column diskgroup format a20
column name format a50
break on diskgroup NODUPLICATES

select adg.name '/' as diskgroup, LPAD(' ', 2*(level-1)) a.name as name
from v$asm_alias a,
v$asm_diskgroup adg
where adg.group_number = a.group_number
and a.alias_directory = 'Y'
start with parent_index = (select min(parent_index) from v$asm_alias)
connect by prior a.reference_index = a.parent_index ;

set pagesize 30
set head on
clear breaks


SQL> @see_asm_directories
P1CSM2_ASM_DG1/ CSP1
DATAFILE
CSP2
DATAFILE
CSP3

SQL>


List Files and their directories in ASM Disk Group:

-----------------------------------------------------------------------
-- script: see_asm_file_structure.sql
-- author: Michael Messina
-- date: 12/17/2007
--
-- Description: Lists Structure and Files of ASM Disk Groups
--
-- Instructions: Run from ASM Instance
-----------------------------------------------------------------------
set pagesize 0
set head off
set linesize 80
column diskgroup format a20
column name format a50
break on diskgroup NODUPLICATES

select adg.name '/' as diskgroup, LPAD(' ', 2*(level-1)) a.name as name
from v$asm_alias a,
v$asm_diskgroup adg
where adg.group_number = a.group_number
start with parent_index = (select min(parent_index) from v$asm_alias)
connect by prior reference_index = parent_index ;

clear breaks
set pagesize 30


SQL> @see_asm_file_structure
P1CSM2_ASM_DG1/ CSP1
DATAFILE
UNDOTBS1.257.1
UNDOTBS1.261.1
UNDOTBS1.265.1
UNDOTBS1.284.628437133
USERS.275.628437153
WCRSYS_TS.335.628461993
UDDISYS_TS.281.628462009
CSP2
DATAFILE
IC_METRIC.304.641245055
IC_SYSTEM.305.641245059
CONTENT_IFS_CTX_X.306.641245065
CONTENT_IFS_CTX_K.307.641245071
CONTENT_IFS_CTX_I.308.641245075
CONTENT_IFS_LOB_M.309.641245079
CONTENT_IFS_LOB_I.310.641245085
CONTENT_IFS_LOB_N.311.641245095
CONTENT_IFS_MAIN.318.641245105
CWSYS_MAIN_TBS.319.641245111
OVFMETRICSTBL.320.641245115
UMTBL.321.641245121
ESTERSTORE.322.641245125
ESSMLTBL.270.641245129
ESPERFTBL.323.641245133
ESORATEXT.317.641245139
ESNEWS.326.641245143
ESMRLMNR.329.641245147
ESINFREQIDX.330.641245151
ESFREQTBL.268.641245157
ESFREQIDX.328.641245161
ESBIGTBL.269.641245165
RTC_IM_INDEX.325.641245169
RTC_IM_DATA.327.641245177
RTC_REPORT_INDEX.315.641245187
RTC_REPORT_DATA.314.641245191
RTC_TRANSIENT_LOB_INDEX.324.641245195
RTC_TRANSIENT_LOB_DATA.272.641245201
RTC_TRANSIENT_INDEX.312.641245205
RTC_TRANSIENT_DATA.313.641245209
RTC_RECORDING_INDEX.316.641245215
RTC_RECORDING_DATA.331.641245219
RTC_DOCUMENT_INDEX.333.641245227
RTC_DOCUMENT_DATA.334.641245231
RTC_ARCHIVE_INDEX.271.641245235
RTC_ARCHIVE_DATA.273.641245241
RTC_TRANSACTION_INDEX.336.641245247
RTC_TRANSACTION_DATA.337.641245251
RTC_LOOKUP_INDEX.338.641245257
RTC_LOOKUP_DATA.339.641245261
OLTS_SVRMGSTORE.340.641245267
OLTS_DEFAULT.341.641245271
OLTS_CT_STORE.342.641245275
OLTS_BATTRSTORE.343.641245279
OLTS_ATTRSTORE.344.641245285
UDDISYS_TS.345.641245289
OCATS.346.641245293
BAM.347.641245297
ORABPEL.348.641245307
B2B_LOB.349.641245315
B2B_IDX.350.641245319
B2B_DT.351.641245323
B2B_RT.352.641245327
WCRSYS_TS.353.641245333
DSGATEWAY_TAB.354.641245339
DCM.355.641245343
DISCO_PTM5_CACHE.356.641245353
DISCO_PTM5_META.357.641245359
IAS_META.358.641245363
PORTAL_LOG.359.641245373
PORTAL_IDX.360.641245377
PORTAL_DOC.361.641245381
PORTAL.362.641245387
USERS.363.641245393
SYSAUX.364.641245397
UNDOTBS1.365.641245403
SYSTEM.366.641245411

78 rows selected.



Create a Directory in an ASM Disk Group from list of available ASM Disk Groups:

SQL> alter diskgroup P1CSM2_ASM_DG1 add directory '+P1CSM2_ASM_DG1/CSP3' ;

Diskgroup altered.

SQL> @see_asm_directories
P1CSM2_ASM_DG1/ CSP1
DATAFILE
CSP2
DATAFILE
CSP3


Rename a Directory in an ASM Disk Group from list of available ASM Disk Groups to another name:

SQL> alter diskgroup P1CSM2_ASM_DG1 rename directory '+P1CSM2_ASM_DG1/CSP3' TO '+P1CSM2_ASM_DG1/CSP4' ;

Diskgroup altered.

SQL> @see_asm_directories
P1CSM2_ASM_DG1/ CSP1
DATAFILE
CSP2
DATAFILE
CSP4


Remove/Drop file from ASM Disk Group using the fully qualified file name from see_asm_file_structure.sql script output:

SQL> alter diskgroup P1CSM2_ASM_DG1 drop file '+P1CSM2_ASM_DG1/CSP1/DATAFILE/UNDOTBS1.257.1';

Diskgroup altered.

SQL>



Remove/Drop a directory and all its contents from an ASM Disk Group using the fully qualified directory from see_asm_directories.sql script output:

SQL> alter diskgroup P1CSM2_ASM_DG1 drop directory '+P1CSM2_ASM_DG1/CSP4' FORCE;

Diskgroup altered.

SQL> @see_asm_directories
P1CSM2_ASM_DG1/ CSP1
DATAFILE
CSP2
DATAFILE

** Note can not remove a directory and its contents if created by default.

SQL> alter diskgroup P1CSM2_ASM_DG1 drop directory '+P1CSM2_ASM_DG1/CSP1' FORCE;
alter diskgroup P1CSM2_ASM_DG1 drop directory '+P1CSM2_ASM_DG1/CSP1' FORCE
*
ERROR at line 1:
ORA-15032: not all alterations performed
ORA-15177: cannot operate on system aliases



Add New Disk to Disk Group

SQL> ALTER DISKGROUP P1CSM2_ASM_DG1 ADD DISK '/dev/rrawlv01' NAME P1CSM2_ASM_DG1_0010 NOFORCE ; Diskgroup altered.



Remove a disk from an ASM disk group

SQL> ALTER DISKGROUP P1CSM2_ASM_DG1 DROP DISK P1CSM2_ASM_DG1_0000 ;Diskgroup altered.

Oracle Global Temporary Tables

Oracle Global Temporary Tables

Global temporary tables can be used to drastically improve query performance for complex queries, especially for the complex queries that have multiple summarizations and comparisons. Global temporary tables are used much like regular tables (heap table), but have some major differences that must be understood.

Think of a global temporary table as a “work table” and used more in reporting environments then in online systems as the performance gain from there use applies more to the complex queries generated to report back information. A lot of code I have seen is using permanent tables used by many session for “temporary work”. Overall performance can be improved using a global temporary table due to reduced redo logging with global temporary tables as well as reduce the maintenance overhead of removing rows and all user sessions using the same table and table segments. However global temporary tables is not a replacement for using PL/SQL tables, if using PL/SQL and the data size is not too large it is far better to use a PL/SQL table.

There may those that question the use of global temporary tables over SQL using the WITH clause, keep in mind that depending on the version of Oracle global temporary tables with an index might be the better option.

1. Global temporary tables do not use a permanent tablespace, but rather use the users assigned temporary tablespace therefore global temporary tables do not have a storage clause.
2. Global temporary tables do not allocate a segment when created segments are not allocated until a row is inserted into the global temporary table.
3. Global temporary tables do not preserve the data across a user's sessions or transactions, the data is temporary to a session or transaction depending on how the global temporary table is defined. When defined transaction wise the data is removed upon commit or rollback, on session wise the data is remove upon session end.
4. When a global temporary table is used by multiple users each user has their own temporary segment for storing data therefore all users can see the structure but user a can not see user b's data even though they may be using the same global temporary table.
5. Global temporary tables can have indexes created on them, but only when the global temporary table is empty.
6. Global temporary tables do not recognize logging/no/logging clause.
7. Truncate does work on a global temporary table, but only removes the data for the session executing the truncate.
8. Global temporary tables can not contain varray or nested table columns.
9. Global temporary tables do not support parallel query or parallel DML operations.
10. DML on global temporary tables do not generate redo for data changes, however it does record in the undo and that record is recorded in the redo. Therefore global temporary tables reduce redo, but doe not eliminate it. This makes redo generate at least ½ or possible less then ½ that of a permanent (heap) table.


Uses for Global Temporary Tables

1. when using PL/SQL and the data is too large for a PL/SQL table.
2. when not using PL/SQL and a PL/SQL table is not an option.
3. complex queries that do complex summarizations and comparisons.
4. some correlated sub-queries to improve performance.
5. in situations where data is needed for a transaction for a session and does not need to live longer then the session and the data does not need to be shared across sessions.



Create Global Temporary Tables

Transaction level global temporary table

create global temporary table temp_emp_trn
( empno number primary key,
deptno number,
ename varchar2(30)
on commit delete rows ;

Session level global temporary table

create global temporary table temp_emp_ses
( empno number primary key,
deptno number,
ename varchar2(30)
on commit preserve rows ;



Example using global temporary tables to improve performance/resource usage

Improve performance on nested sub-queries

An example may be we want to see if we have any employees that have never had a performance review. Employees can have many performance reviews over time therefore there could be many records in the review table for each employee so over time as the review table grows this could get to be a more intensive operation.

The query would look like this.

select empno, deptno, ename
from scott.emp
where empno NOT IN
(select empno from review) ;

Elapsed: 00:00:00.03

-----------------------------------------------------------------------------
Id Operation Name Rows Bytes Cost (%CPU) Time
-----------------------------------------------------------------------------
0 SELECT STATEMENT 108 7128 244 (1) 00:00:03
* 1 HASH JOIN ANTI NA 108 7128 244 (1) 00:00:03
2 TABLE ACCESS FULL EMP 2005 103K 5 (0) 00:00:01
3 TABLE ACCESS FULL REVIEW 41175 522K 239 (1) 00:00:03
-----------------------------------------------------------------------------

Statistics
----------------------------------------------------------
52 recursive calls
0 db block gets
897 consistent gets
0 physical reads
0 redo size
659 bytes sent via SQL*Net to client
381 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
2 sorts (memory)
0 sorts (disk)
5 rows processed


Let see what it would look like using global temporary tables where we can generate a list of distinct employee’s empno from the review table. This will reduce the read for each iteration of the not in.

create global temporary table temp_review
on commit delete rows
as
select empno
from review
where 1=0 ;

-- populate the global tempoary table
insert into temp_review
select distinct empno
from review ;

-- select using the global temporary table
select empno, deptno, ename
from emp
where
empno not in
(select empno from temp_review) ;

Elapsed: 00:00:00.01

-------------------------------------------------------------------------------------
Id Operation Name Rows Bytes Cost (%CPU) Time
-------------------------------------------------------------------------------------
0 SELECT STATEMENT 5 330 9 (12) 00:00:01
* 1 HASH JOIN RIGHT ANTI NA 5 330 9 (12) 00:00:01
2 TABLE ACCESS FULL TEMP_REVIEW 2000 26000 3 (0) 00:00:01
3 TABLE ACCESS FULL EMP 2005 103K 5 (0) 00:00:01
-------------------------------------------------------------------------------------

Statistics
----------------------------------------------------------
8 recursive calls
0 db block gets
48 consistent gets
0 physical reads
0 redo size
659 bytes sent via SQL*Net to client
381 bytes received via SQL*Net from client
2 SQL*Net roundtrips to/from client
2 sorts (memory)
0 sorts (disk)
5 rows processed


Now this has to be balanced out with the cost and time it takes to build the global temporary tables, but as you can se we can significantly reduce the work of a query by using a global temporary table. Now reduction in work does not always equal a reduction in elapse runtime, but a system under load should see performance improvement when amount of work is reduced. The example here over several executions either showed elapse time improvement or the elapse time was equal.



Complete Script using Global Temporary Tables

set linesize 300

-- spool our output to a file for review
spool global_temporary_tables.out

-- drop our 2 tables in case they aready exist
drop table emp ;
drop table review ;

-- create our 2 permenant tables
create table emp
( empno number,
deptno number,
ename varchar2(50)
)
tablespace users ;

create index emp_empno
on emp (empno)
tablespace users ;

create table review
( empno number,
review_date date,
overall_rating number,
review_file varchar2(500),
comments varchar2(1000)
)
tablespace users ;

create index review_empno
on review (empno)
tablespace users ;

-- create the global temporary table we need
create global temporary table temp_review
on commit delete rows
as
select empno
from review
where 1=0 ;

---------------------------------------------
-- populate the 2 permenant tables with data
---------------------------------------------
-- populate emp with 2000 employees
declare
v_deptno number := 10 ;
v_ename varchar2(50) ;
v_loop_cnt number := 0 ;

begin
-- loop
loop
exit when v_loop_cnt = 2000 ;

-- add to our loop count
v_loop_cnt := v_loop_cnt + 1 ;

v_ename := 'Employyee ' TO_CHAR (v_loop_cnt) ;

-- insert our employee record
insert into emp values (v_loop_cnt, v_deptno, v_ename) ;
end loop ;

commit ;
end ;
/

commit ;

-- populate review 6 reivews for each employee
declare
v_comment varchar2(1000) := 'Put the same comment in for all employees since it does not matter for our example' ;
v_empno number ;
v_file varchar2(500) := '/same/file/for/all/employees.doc' ;
v_review_date date ;
v_loop_cnt number := 0 ;
cursor c_emp is
select empno from emp ;

begin
-- loop though all employees 20 times
loop
exit when v_loop_cnt >= 20 ;

v_loop_cnt := v_loop_cnt + 1 ;

v_review_date := to_date( to_char(sysdate, 'DD-MON') '-' to_char( to_number( to_char(sysdate, 'YYYY')) + v_loop_cnt), 'DD-MON-YYYY') ;

-- open the employee cursor
open c_emp ;

-- loop though all the employees putting a review in for each employeee
loop
fetch c_emp into v_empno ;
exit when c_emp%notfound ;

insert into review values (v_empno, v_review_date, 4, v_file, v_comment) ;
end loop ;

-- close employee cursor
close c_emp ;
end loop ;
end ;
/

commit ;

-- Add a few employess without reviews
insert into emp values (9000, 20, 'Employee 9000') ;
insert into emp values (9001, 20, 'Employee 9001') ;
insert into emp values (9002, 20, 'Employee 9002') ;
insert into emp values (9003, 20, 'Employee 9003') ;
insert into emp values (9004, 20, 'Employee 9004') ;
commit ;

-- get the count of employees
select count(*) from emp ;

-- get the count of reviews
select count(*) from review ;

set timing on

set autotrace on
-- select our data normal, how long does it take
select empno, deptno, ename
from emp
where empno not in (select empno from review) ;
set autotrace off

-- populate the global tempoary table
insert into temp_review
select distinct empno
from review ;

set autotrace on
-- get our results
select empno, deptno, ename
from emp
where
empno not in
(select empno from temp_review) ;
set autotrace off
set timing off

-- commit we are all done
commit ;

-- turn our spooling off
spool off