Tuesday, August 23, 2022
----------------------------------------------------------------------------------------------
*********************************************************************************************
0) OS COMMAND
0.1) check system info ( in sun solaris)
prstat
prstat -a
(give user memories)
0.2)where to check SERVER (IO) logs?
/var/adm/messages
0.3) ls
ls -l|sort +4 -nr
(check in size order)
ls -lah
(readable format)
0.4) check windows server uptime
net statistics server
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
1. how to find remote process job?
SQL> !ssh jde812@usbderp323 pargs 7517
7517: runbatch LEWISA /u02/jdedwards/temp/APLofpybm JNAPD *ALL 529283 /u04/e1vhbd323/
argv[0]: runbatch
argv[1]: LEWISA
argv[2]: /u02/jdedwards/temp/APLofpybm
argv[3]: JNAPD
argv[4]: *ALL
argv[5]: 529283
argv[6]: /u04/e1vhbd323/PrintQueue/R55FRGL84_BXBLMAAO_529283_PDF
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
!ssh jde812@usbderp322 pargs 29495
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
2. how to find temp space usage in RAC ,and identifying which sessions are consuming most resources
2.1) To check instance-wise total allocated, total used TEMP for both rac and non-rac
set lines 152
col FreeSpaceGB format 999.999
col UsedSpaceGB format 999.999
col TotalSpaceGB format 999.999
col host_name format a30
col tablespace_name format a30
select tablespace_name,
(free_blocks*8)/1024/1024 FreeSpaceGB,
(used_blocks*8)/1024/1024 UsedSpaceGB,
(total_blocks*8)/1024/1024 TotalSpaceGB,
i.instance_name,i.host_name
from gv$sort_segment ss,gv$instance i where ss.tablespace_name in (select tablespace_name from dba_tablespaces where contents='TEMPORARY') and
i.inst_id=ss.inst_id;
FOR example:
TABLESPACE_NAME FREESPACEGB USEDSPACEGB TOTALSPACEGB INSTANCE_NAME HOST_NAME
------------------------------ ----------- ----------- ------------ ---------------- ------------------------------
TMP_TEMP 77.138 .004 77.142 userpp3 usbderp230
TMP_USR 1.453 3.992 5.445 userpp3 usbderp230
TMP_UCANE1 3.414 .001 3.415 userpp3 usbderp230
TMP_INSIGHT 4.566 .177 4.743 userpp3 usbderp230
TMP_ET 4.495 .000 4.495 userpp3 usbderp230
TMP_DBA 1.365 .001 1.366 userpp3 usbderp230
TMP_FIN 32.927 .006 32.933 userpp3 usbderp230
TMP_BMC 42.734 .020 42.754 userpp3 usbderp230
TMP_TEMP 5.143 .004 5.146 userpp2 usbderp220
TMP_USR .000 90.549 90.549 userpp2 usbderp220
TMP_UCANE1 56.554 60.882 117.436 userpp2 usbderp220
TABLESPACE_NAME FREESPACEGB USEDSPACEGB TOTALSPACEGB INSTANCE_NAME HOST_NAME
------------------------------ ----------- ----------- ------------ ---------------- ------------------------------
TMP_ET 3.365 .006 3.371 userpp2 usbderp220
TMP_DBA 9.786 .014 9.800 userpp2 usbderp220
TMP_FIN 57.956 5.082 63.038 userpp2 usbderp220
TMP_BMC .002 .005 .007 userpp2 usbderp220
TMP_TEMP 13.612 .004 13.616 userpp1 usbderp210
TMP_ET 16.762 .008 16.770 userpp1 usbderp210
TMP_DBA 5.336 .000 5.336 userpp1 usbderp210
you can see for TMP_USR free space on instance2 is 0.
2.2) Go to that node ( for example instance 2 in this case ) and find what top sessions are using temp space.
set linesize 250
set pagesize 1000
set verify off
cursor bigtemp_sids is
select * from (
select s.sid,
s.status,
s.sql_hash_value sesshash,
u.SQLHASH sorthash,
s.username,
u.tablespace,
sum(u.blocks*p.value/1024/1024) mbused ,
sum(u.extents) noexts,
nvl(s.module,s.program) proginfo,
floor(last_call_et/3600)||':'||
floor(mod(last_call_et,3600)/60)||':'||
mod(mod(last_call_et,3600),60) lastcallet
from v$sort_usage u,
v$session s,
v$parameter p
where u.session_addr = s.saddr
and p.name = 'db_block_size'
group by s.sid,s.status,s.sql_hash_value,u.sqlhash,s.username,u.tablespace,
nvl(s.module,s.program),
floor(last_call_et/3600)||':'||
floor(mod(last_call_et,3600)/60)||':'||
mod(mod(last_call_et,3600),60)
order by 7 desc,3)
where rownum < 11;
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
3174 INACTIVE 1064915270 1079509855 UCANE1US TMP_UCANE1 51687 51687
234:19:13
3334 INACTIVE 2243203096 1079509855 UCANE1USR TMP_USR 7752 7752
17:39:29
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
3348 INACTIVE 2243203096 1079509855 UCANE1USR TMP_USR 7732 7732
40:23:45
1987 INACTIVE 2243203096 3387509314 UCANE1USR TMP_USR 7732 7732
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
40:13:55
6713 INACTIVE 2243203096 3109884340 UCANE1USR TMP_USR 7663 7663
162:25:0
2881 INACTIVE 142550302 875490233 UCANE1USR TMP_USR 7602 7602
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
218:23:35
6628 INACTIVE 2243203096 3329920338 UCANE1US TMP_UCANE1 7582 7582
240:35:46
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
6831 INACTIVE 820835938 2121987863 UCANE1FIN TMP_FIN 1373 1373
72:10:5
6538 INACTIVE 820835938 1079509855 UCANE1FIN TMP_FIN 1373 1373
72:13:38
SID STATUS SESSHASH SORTHASH USERNAME TABLESPACE MBUSED NOEXTS
---------- -------- ---------- ---------- ------------------------------ ------------------------------- ---------- ----------
PROGINFO
------------------------------------------------
LASTCALLET
--------------------------------------------------------------------------------------------------------------------------
2624 INACTIVE 820835938 1079509855 UCANE1FIN TMP_FIN 1367 1367
165:56:50
10 rows selected.
2.3) decide what to do with these sessions!
sess_by_sid.sql (Input is SID)
set linesize 250
set pagesize 1000
set verify off
column SidSerial format a12 heading "SID-SERIAL"
column sid format 9999 heading "SID"
column serial format 99999 heading "Serial "
column osuser format a8 heading "OS User"
column username format a16 heading "Username"
column module format a14 heading "Module"
column spid format 999999 heading "Shadow|Process"
column process format 999999 heading "Remote|Process"
column machine format a20 heading "Remote|Machine"
column time format a15 heading "Logon Time"
column status format a8 heading "Status"
select ''''||s.sid||','||s.serial#||'''' SidSerial,
--select s.sid sid ,
--s.serial# serial,
s.osuser osuser,
s.username username,
s.module module,
p.spid spid,
s.process process,
s.machine machine,
to_char(s.logon_time, 'mm/dd hh24:mi:ss') time,
s.status status
from v$session s, v$process p
where s.paddr = p.addr (+)
and s.sid = &sid
/
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
3. get session from pid ( here is the process id from remote server )
3.1) given a PID , find the session details.
sess_by_pid.sql (Input is PID)
set linesize 250
set pagesize 1000
set verify off
column sid format 99999 heading "SID"
column serial format 99999 heading "Serial "
column osuser format a8 heading "OS User"
column username format a16 heading "Username"
column module format a14 heading "Module"
column spid format 999999 heading "Shadow|Process"
column process format 999999 heading "Remote|Process"
column machine format a20 heading "Remote|Machine"
column time heading "Logon Time"
column status format a8 heading "Status"
select
s.INST_ID iid,
s.sid sid,
s.serial# serial,
s.osuser osuser,
s.username username,
s.module module,
p.spid spid,
s.process process,
s.machine machine,
to_char(s.logon_time, 'mm/dd hh24:mi:ss') time,
s.status status
from gv$session s, gv$process p
where s.paddr = p.addr (+)
and s.process = '&pid'
and s.INST_ID=p.INST_ID
/
For single node
select s.sid sid,
s.serial# serial,
s.osuser osuser,
s.username username,
s.module module,
p.spid spid,
s.process process,
s.machine machine,
to_char(s.logon_time, 'mm/dd hh24:mi:ss') time,
s.status status
from v$session s, v$process p
where s.paddr = p.addr (+)
and s.process = '&pid'
/
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
3.2) HOW TO FIND BIND VALUE?
find binds
select * from table(dbms_xplan.display_cursor('bscu1grtkg20y', format=>'typical +peeked_binds'))
SELECT NAME,POSITION,DATATYPE_STRING,VALUE_STRING FROM gv$sql_bind_capture WHERE sql_id='akt2ntu7z38c6';
historical binds
SELECT SNAP_ID,NAME,POSITION,DATATYPE_STRING,VALUE_STRING FROM dba_hist_sqlbind WHERE sql_id='9z226w663vkbt' and SNAP_ID in (58089,58090,58091);
oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
3.3) HOW TO CHEK BIP ALERT
like
o narrow the search for the bad SQL, the Machine is usroerp422 and the PID will be the generic ID of 1234.
a) know what's UCAN and what's LATAM? MACHINE end with 3 ( like usroerp423 ) are about UCAN. MACHINE ended with 2 (like usroerp422) are LATAM.
b) check pid=1234, and instance id, and part of sql ( SQL MAY NOT BE EXACTLY THE SAME)
ALTER SESSION SET NLS_DATE_FORMAT='DD-MON-YYYY HH24:MI:SS';
set pages 1000
select a.USERNAME,a.OSUSER, a.inst_id,a.sid,a.serial#,a.status,a.machine,a.process,b.sql_fulltext,a.LOGON_TIME
from gv$session a,gv$sql b where a.sql_id=b.sql_id and a.process='1234' and a.inst_id=b.inst_id
and a.MACHINE='usroerp423' and b.SQL_TEXT like '%JDE.F4801%';
c) count number of EMAILS (SQL) GIVEN TO YOU, THAT IS THE NUMBER OF SESSIONS YOU ARE TO KILL. REMEMBER TO KILL IT ON LOCAL MACHINE>
d) don't check crossnode:( if server is awfully busy.
USUALLY YOU CAN USE PROCESS AND MACHINE TO IDENTIFY process details.
select a.sid,a.serial#,a.status,a.logon_time,a.last_call_et,a.machine,a.process,b.sql_fulltext
from v$session a,v$sql b where a.sql_id=b.sql_id and a.process='1234' and a.MACHINE='usroerp423' ;
3.4) HOW TO GO FROM SQL TEXT TO SESSION
give sql text get sql id
select INST_ID, SQL_ID,SQL_FULLTEXT from gv$sql where SQL_TEXT like '%AND ( ( CCCO = :KEY3 OR CCCO BETWEEN :KEY4 AND :KEY5 OR CCCO = :KEY6 OR CCCO = :KEY7 OR CCCO = :KEY8 OR CCCO = :KEY9 OR CCCO BETWEEN :KEY10 AND :KEY11 )%';
GIVE HISTORICAL SQL TEXT
select DBID, SQL_ID, SQL_TEXT from dba_hist_sqltext where sql_text like ‘%your query%’;
3.5) HOW TO GO FROM SQLID TO SESSION;
select SID,SERIAL# from v$session where SQL_ID='5gr84n0fxfdfr';
3.6) HOW TO KILL SESSIONS IN ONE BUNCH;
select 'alter system kill session '''||b.sid||','||b.serial#||'''immediate;' from v$session b where sql_ID='';
select 'alter system kill session '''||s.sid||','||s.serial#||'''immediate;' from v$session s , v$process p where s.machine='usbderp433' and s.process='1234' and s.paddr = p.addr (+) and s.status='ACTIVE';
select count(*) from v$session s where s.process='1234';
********************************************************************************************
--------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
4. add datafile in baxter
alter tablespace lasm812 add datafile '/global/la/data1/oradata/laerpp/lasm81202.dbf' size 100M autoextend on next 10M;
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
5. LET"S DO SOME DBLINKS;
IN LAUT
sqlplus dzhang
ALTER session SET GLOBAL_NAMES=FALSE;
CREATE DATABASE LINK edlink CONNECT TO dzhang IDENTIFIED BY jicheng1105 USING 'LAERPP';
Insert into lautdta.f43121 select * from lapddta.f43121@ed where prKCOO = '03420' and prDOCO in ( 34955, 50074) and prDCTO in ( 'OP', 'OK') and prDOC in ( 91459, 91949) and prDCT = 'OV';
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
6. sql trace and performance test
1.) GIVE SQL ID FIND HISOTRY
1.1
prompt enter start and end times in format DD-MON-YYYY [HH24:MI]
set pages 1000
column sample_end format a21
select to_char(min(s.end_interval_time),'DD-MON-YYYY DY HH24:MI') sample_end
, q.sql_id
, q.plan_hash_value
, sum(q.EXECUTIONS_DELTA) executions
, round(sum(DISK_READS_delta)/greatest(sum(executions_delta),1),1) pio_per_exec
, round(sum(BUFFER_GETS_delta)/greatest(sum(executions_delta),1),1) lio_per_exec
, round((sum(ELAPSED_TIME_delta)/greatest(sum(executions_delta),1)/1000),1) msec_exec
from dba_hist_sqlstat q, dba_hist_snapshot s
where q.SQL_ID=trim('&sqlid.')
and s.snap_id = q.snap_id
and s.dbid = q.dbid
and s.instance_number = q.instance_number
and s.end_interval_time >= to_date(trim('&start_time.'),'dd-mon-yyyy hh24:mi')
and s.begin_interval_time <= to_date(trim('&end_time.'),'dd-mon-yyyy hh24:mi')
and substr(to_char(s.end_interval_time,'DD-MON-YYYY DY HH24:MI'),13,2) like '%&hr24_filter.%'
group by s.snap_id
, q.sql_id
, q.plan_hash_value
order by s.snap_id, q.sql_id, q.plan_hash_value
/
9bpw0p3u8fjqr
01-MAR-2015 16:30
04-MAY-2015 17:00
bk829dnqk0m90
.................
ANother sql running history, number of sessions running the sql history, which is important when tracing spikes.
column sample_end format a21
select to_char(min(s.end_interval_time),'DD-MON-YYYY DY HH24:MI') sample_end
, q.sql_id
, count(q.SESSION_ID) ACTIVE_SESSIONS
from dba_hist_active_sess_history q, dba_hist_snapshot s
where q.SQL_ID=trim('&sqlid.')
and s.snap_id = q.snap_id
and s.dbid = q.dbid
and s.instance_number = q.instance_number
and q.instance_number=3
and s.end_interval_time >= to_date(trim('&start_time.'),'dd-mon-yyyy hh24:mi')
and s.begin_interval_time <= to_date(trim('&end_time.'),'dd-mon-yyyy hh24:mi')
and substr(to_char(s.end_interval_time,'DD-MON-YYYY DY HH24:MI'),13,2) like '%&hr24_filter.%'
group by s.snap_id
, q.sql_id
order by s.snap_id, q.sql_id;
1.2
or you can use
SELECT * FROM table(dbms_xplan.display_awr('b386y052gwx5'));
to check historical sql execution in awr
1.3
USE
@?/rdbms/admin/awrsqrpt.sql
to get historical execution plans.
2) check future plan
explain plan for
xxxxxxx
;
SELECT * FROM table(dbms_xplan.display);
3) CHECK CURRENT PLAN
SELECT * FROM table(dbms_xplan.display_cursor('b386y052gwx5',1));
4) check current execution plan loading time
SQL> select SQL_ID,LAST_LOAD_TIME,FIRST_LOAD_TIME,LAST_ACTIVE_TIME,PLAN_HASH_VALUE from v$sql where SQL_ID='a29ja6kcz1sxr';
SQL_ID LAST_LOAD_TIME FIRST_LOAD_TIME LAST_ACTI PLAN_HASH_VALUE
------------- ------------------- ------------------- --------- ---------------
a29ja6kcz1sxr 2014-01-15/12:14:48 2013-12-29/08:25:04 15-JAN-14 1654179965
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
7. ggs long transaction warnings
7.1 check email alert, find which node(thread) has long transaction , note down time
7.2 go to the node, run this script, to get transaction details ,,link to SID and kill it when possibe.
set pages 100
col sid format 99999 heading "SID"
col uname format a16 heading "Username"
col osuser format a10 heading "OSUser"
col stime format a17 heading "Start Time"
col lio format 9999999 heading "Log_IO"
col pio format 9999999 heading "Phy_IO"
col cgets format 999999999 heading "Consistent| Gets"
col cchng format 999999999 heading "Consistent| Changes"
col ublk format 9999999 heading "Used|Undo|Blks"
col status format a9 heading "Status"
select s.sid sid,
s.osuser osuser,
s.username uname,
t.start_time stime,
t.log_io lio,
t.phy_io pio,
t.cr_get cgets,
t.cr_change cchng,
t.used_ublk ublk,
t.status status
from v$session s, v$transaction t
where t.ses_addr = s.saddr
order by t.start_time
/
....
FORMAL HANDLING
GGSCI (ggnode1) 7>send extract EXRAJ, showtrans duration 20 MIN
Sending showtrans request to EXTRACT EXRAJ ...
Oldest redo log files necessary to restart Extract are:
Redo Thread 1, Redo Log Sequence Number 74058, SCN 1586.3900894393 (6815719025849), RBA 104066064
Redo Thread 2, Redo Log Sequence Number 79546, SCN 1586.3894494112 (6815712625568), RBA 1458358288
------------------------------------------------------------
XID: 1630.17.201166
Items: 1
Extract: EXRAJ
Redo Thread: 1
Start Time: 2013-05-10:21:43:50
SCN: 1586.3900894393 (6815719025849)
Redo Seq: 74058
Redo RBA: 104066064
Status: Running
------------------------------------------------------------
XID: 9068.29.296116
Items: 1
Extract: EXRAJ
Redo Thread: 2
Start Time: 2013-05-10:21:13:50
SCN: 1586.3894494112 (6815712625568)
Redo Seq: 79546
Redo RBA: 1458358288
Status: Running
------------------------------------------------------------
XID: 8805.6.296139
Items: 1
Extract: EXRAJ
Redo Thread: 2
Start Time: 2013-05-10:21:24:33
SCN: 1586.3896755063 (6815714886519)
Redo Seq: 79547
Redo RBA: 791874576
Status: Running
22:09:08 SQL> select * from gv$transaction where xidusn=8805;
INST_ID ADDR XIDUSN XIDSLOT XIDSQN UBAFIL UBABLK UBASQN UBAREC STATUS START_TIME START_SCNB START_SCNW START_UEXT START_UBAFIL
---------- ---------------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------------- -------------------- ---------- ---------- ---------- ------------
START_UBABLK START_UBASQN START_UBAREC SES_ADDR FLAG SPA REC NOU PTX NAME PRV_XIDUSN PRV_XIDSLT PRV_XIDSQN PTX_XIDUSN
------------ ------------ ------------ ---------------- ---------- --- --- --- --- ---------------------------------------------------------------------- ---------- ---------- ---------- ----------
PTX_XIDSLT PTX_XIDSQN DSCN-B DSCN-W USED_UBLK USED_UREC LOG_IO PHY_IO CR_GET CR_CHANGE START_DATE DSCN_BASE DSCN_WRAP START_SCN DEPENDENT_SCN XID
---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- --------------- ---------- ---------- ---------- ------------- ----------------
PRV_XID PTX_XID
---------------- ----------------
2 0000001BFF3DEBD8 8805 6 296139 856 125452 27952 5 ACTIVE 05/10/13 21:24:33 3896755063 1586 4 856
125452 27952 5 0000001C84587F40 7683 NO NO NO NO 0 0 0 0
0 0 0 0 1 1 5 0 0 0 10-MAY-13 0 0 6.8157E+12 0 22650006000484CB
0000000000000000 0000000000000000
22:09:38 SQL> select sid,serial#,event,machine,sql_id,seconds_in_wait,prev_sql_id,module,program,action from gv$session where taddr='0000001BFF3DEBD8';
SID SERIAL# EVENT MACHINE SQL_ID SECONDS_IN_WAIT PREV_SQL_ID MODULE PROGRAM
----- ---------- ------------------------------ --------------- ------------- --------------- ------------- -------------- ------------------------------------------------
ACTION
----------------------------------------------------------------
9871 167 SQL*Net message from client client07 2710 7zwu0n8myp5vn occ-raj-jar occworker@client07 (TNS V1-V3)
20f669b057722
22:09:43 SQL> select
hash_value, address,
executions,buffer_gets, disk_reads,
round(buffer_gets/decode(executions, 0, 1, executions), 1) avg_gets,
round(disk_reads/decode(executions, 0, 1, executions), 1) avg_disk,
last_load_time,
module,
sql_fulltext
from v$sqlarea
where sql_id='&sql_id';
Enter value for sql_id: 7zwu0n8myp5vn
HASH_VALUE ADDRESS EXECUTIONS BUFFER_GETS DISK_READS AVG_GETS AVG_DISK LAST_LOAD_TIME MODULE
---------- ---------------- ---------- ----------- ---------- ---------- ---------- ------------------- -------------------
SQL_FULLTEXT
--------------------------------------------------------------------------------
669685620 0000001A4E4E8840 16774570 43243144 5728 2.6 0 10-MAY-13 occ-raj-jarers
INSERT INTO user_info (uid, part_key, t_id, uname) VALUES (:uid, :part_key, :t_id) /*Truncated the sql*/
you will see the following error message in ggserr.log file
2013-05-10 22:15:15 INFO OGG-00987 Oracle GoldenGate Command Interpreter for Oracle: GGSCI command (oracle): send EXRAJ showtrans duration 45m.
2013-05-10 22:15:16 INFO OGG-01021 Oracle GoldenGate Capture for Oracle, EXRAJ.prm: Command received from GGSCI: showtrans duration 45m.
Long Running Transaction: XID 8805.6.296139, Items 1, Extract EXRAJ, Redo Thread 2, SCN 1586.3896755063 (6815714886519), Redo Seq #79547, Redo RBA 791874576.
2013-05-10 22:15:20 WARNING OGG-01027 Oracle GoldenGate Capture for Oracle, EXRAJ.prm: Long Running Transaction: XID 8805.6.296139, Items 1, Extract EXRAJ, Redo Thread 2, SCN 1586.3896755063 (6815714886519), Redo Seq #79547, Redo RBA 791874576.
2013-05-10 22:15:26 INFO OGG-00987 Oracle GoldenGate Command Interpreter for Oracle: GGSCI command (oracle): send EXRAJ showtrans duration 90m.
2013-05-10 22:15:28 INFO OGG-01021 Oracle GoldenGate Capture for Oracle, EXRAJ.prm: Command received from GGSCI: showtrans duration 90m.
select logon_time,status,LAST_CALL_ET from gv$session where sid=9871 and inst_id=2;
LOGON_TIME STATUS LAST_CALL_ET
------------------- -------- ------------
05/10/2013 19:26:36 INACTIVE 3226
or you can use:
set echo on
set timing on
col sid format 999999
col serial# format 999999
alter session set nls_date_format='MM/DD/YYYY HH24:MI:SS';
select t.start_time,t.status TSTATUS, s.status SSTATUS,s.sid, s.serial# ,s.machine , s.sql_id,s.prev_sql_id,s.process,t.XIDUSN||'.'||t.XIDSLOT||'.'||t.XIDSQN XID from gv$transaction t, gv$session s where t.addr=s.taddr and t.inst_id=s.inst_id and t.start_date < (sysdate-1/142) order by t.start_time;
-- if needed, you can go ahead and kill the sql (if it is not of a major impact)
alter system kill session '9871,167';
********************************************************************************************
---------------------------------------------------------------------------------------------
8. use rman to remove archives
DELETE NOPROMPT OBSOLETE RECOVERY WINDOW OF 4 DAYS;
RMAN>delete archivelog all;
RMAN>delete archivelog until time ‘SYSDATE-10';
RMAN>delete archivelog from time ‘SYSDATE-10'
RMAN>delete archivelog from time ‘SYSDATE-10' until time ‘SYSDATE-2';
RMAN>delete archivelog from sequence 1000;
RMAN>delete archivelog until sequence 1500;
RMAN>delete archivelog from sequence 1000 until sequence 1500;
or use this one
delete noprompt archivelog all completed before 'sysdate-3';
use os command to delete archive logs:
For example:
find . -mtime +1 # find files modified more than 48 hours ago
find /global/fs4/oraarch/aperpp/*.arc -mtime +3 -exec rm {} \;
find /global/fs4/oraarch/aperpp/*.arc -mtime +3 -exec ls -lt {} \;
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
9. logminer
9.1)
check doc at
userpqdr3 /export/home/oracle/output.txt
SQL> ALTER SESSION SET NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI:SS';
Session altered.
SQL> set numformat 9999999999999999999999
SQL> exec dbms_logmnr.add_logfile('/global/us/recovery/db_recovery/USERPQDR/archivelog/2014_02_18/o1_mf_3_129636_9j73dyd7_.arc')
PL/SQL procedure successfully completed.
SQL> exec dbms_logmnr.start_logmnr(options=>dbms_logmnr.dict_from_online_catalog);
PL/SQL procedure successfully completed.
SQL> select username, os_username, DATA_OBJ#, rbasqn, RBABLK, rbabyte, RS_ID, row_id, rollback,
2 seg_name,seg_owner,operation, timestamp, sql_redo, scn,
3 cscn from v$logmnr_contents where xidusn || '.' || xidslt || '.' || xidsqn='107.33.4880933'
4 ;
9.2) MINING DATE RANGE.....
alter session set nls_date_format="DD-MON-YYYY HH24:MI:SS";
begin
dbms_logmnr.start_logmnr (
starttime => '14-APR-2014 07:00:00',
endtime => '14-APR-2014 14:00:00',
options => dbms_logmnr.dict_from_online_catalog +
dbms_logmnr.continuous_mine +
dbms_logmnr.no_sql_delimiter +
dbms_logmnr.print_pretty_sql
);
end;
/
analysis
column sql_undo format a35
column sql_redo format a35
set lines 10000
set pages 200
select scn ,operation, sql_redo from v$logmnr_contents
where seg_owner = 'NAPDDTA'
and seg_name = 'F4941';
////
create table dzhang.logm0923 as select * from v$logmnr_contents
where seg_owner = 'NAPDDTA'
and seg_name = 'F4941';
execute DBMS_LOGMNR.END_LOGMNR();;
select THREAD# ,operation, count(operation) from v$logmnr_contents where seg_owner = 'NAPDDTA' and seg_name = 'F590008' group by THREAD#,operation order by 1,2;
(select operation,thread#,
sql_redo,
substr(sql_redo,instr(sql_redo,'A1PID')+10,10) A1PID,
substr(sql_redo,instr(sql_redo,'A1UPMT')+10,6) A1UPMT
from dzhang.logm1010
where operation = 'INSERT')
select thread#,a1pid,a1upmt,count(*) from list group by thread#,a1pid,a1upmt order by a1upmt;
mining at logminer........
with codes as (select trim(substr(sql_redo,instr(sql_redo,'"NNSY" = ')+10,4))as nnsy
from dzhang.logm1022_2
where sql_redo like 'update%')
select nnsy,count(*) from codes group by nnsy order by count(*) desc
with codes as (select to_char(timestamp,'HH24:MI') dt ,trim(substr(sql_redo,instr(sql_redo,'"AIAN8" = ')+10,8))as nnsy
from dzhang.logm1203
where sql_redo like 'update%' and sql_redo like '%where%')
select dt, nnsy,count(*) from codes group by dt,nnsy order by count(*) desc
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
10. create db link template
sqlplus zhangd/
create database link ed_naprd connect to ggate identified by bax4gate USING 'USERPP_REPORT';
alter session set global_names=false;
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
11. CLUSTER WAIT EVENTS
11.1) for recent cluster waits
userpp2 [/export/home/oracle]
usbderp220:oracle> cat gc_buffer.sql
set pages 10000
select count(*), a.inst_id,a.sql_id,event,b.sql_text,current_obj# from (
select inst_id,sample_time, sql_id, event, current_obj#
from gv$active_session_history
where sample_time bETWEEN TO_DATE ('04-AUG-2014 08:10:00','DD-MON-YYYY HH24:MI:SS') and
to_date ('04-AUG-2014 08:20:00','DD-MON-YYYY HH24:MI:SS')
and event like 'gc%'
--and event like '%sync%'
) a, gv$sql b
where a.sql_id=b.sql_id(+)
and a.inst_id=b.inst_id(+)
AND A.INST_ID =3
group by a.inst_id,a.sql_id,event, b.sql_text,current_obj#
order by count(*)
/
better version: join dba_objects;
set pages 10000
select cnt, sql_id,event, txt, object_name from (
select count(*) cnt, a.sql_id,event,substr(b.sql_text,1,30) txt,current_obj# from (
select inst_id,sample_time, sql_id, event, current_obj#
from gv$active_session_history
where sample_time bETWEEN TO_DATE ('16-SEP-2014 14:19:00','DD-MON-YYYY HH24:MI:SS') and
to_date ('16-SEP-2014 14:20:00','DD-MON-YYYY HH24:MI:SS')
and event like 'gc%'
--and event like '%sync%'
) a, gv$sql b
where a.sql_id=b.sql_id(+)
and a.inst_id=b.inst_id(+)
AND A.INST_ID =2
group by a.sql_id,event, substr(b.sql_text,1,30),current_obj#
), dba_objects
where current_obj# = object_id
order by cnt
/
select count(*) , substr(sql_redo, instr(sql_redo,'NNSY'),14),thread# from dzhang.logm0923_2 where sql_redo not like '%select%' group by substr(sql_redo, instr(sql_redo,'NNSY'),14), thread#;
11.2) for historical cluster waits
set pages 10000
select count(*), a.INSTANCE_NUMBER,a.sql_id,event,current_obj# from (
select INSTANCE_NUMBER,sample_time, sql_id, event, current_obj#
from dba_hist_active_sess_history
where sample_time bETWEEN TO_DATE ('04-AUG-2014 08:10:00','DD-MON-YYYY HH24:MI:SS') and
to_date ('04-AUG-2014 08:20:00','DD-MON-YYYY HH24:MI:SS')
and event like 'gc%'
) a, DBA_HIST_SQLTEXT b
where a.sql_id=b.sql_id(+)
AND A.INSTANCE_NUMBER =3
group by a.INSTANCE_NUMBER,a.sql_id,event, current_obj#
order by count(*)
/
select SQL_TEXT from dba_hist_sqltext where SQL_ID='a9k8zn1y4j7jz';
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
12. pin down long running sqls
On an instance, this will show us the SQL (and E1 process/machine)that has been executing for over 60 mins.
col process format a10
col machine format a16
select a.sid,b.process,b.machine, to_char(First_refresh_time,'DD-MM-YYYY HH24:MI:SS') start_time,
round((sysdate-first_refresh_time)*24*60) run_mins
from v$sql_monitor a , v$session b
where a.status = 'EXECUTING'
and round((sysdate-first_refresh_time)*24*60) > 60
and a.sid=b.sid
order by disk_reads
/
********************************************************************************************
---------------------------------------------------------------------------------------------
------------------------------------------------------------------
******************************************************************
13. SPM REALCASE in 11g
SPM ( sql plan management/ SQL BASELINE) is very powerful in 11g.
13.1. Sometimes we are not allowed to change code ( including using HINT), Still we can bind the execution plan with hint to existing sqls.HEre are steps:
Steps for exec plan fixation via SPM
1. Find current sql and current ( BAD) plan , load it into SPM.
1.1) Find SQL details with current (BAD) plan
Target sql is :
SELECT ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942 ORDER BY ISSHPN ASC;
SQL> select INST_ID, PLAN_HASH_VALUE, SQL_TEXT, SQL_ID from gv$sql where SQL_TEXT like '%ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942%';
INST_ID PLAN_HASH_VALUE
---------- ---------------
SQL_TEXT
--------------------------------------------------------------------------------
SQL_ID
-------------
1 3258657625
SELECT ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942 ORDER BY ISSHPN A
SC
f09vb5ds4ar12
Note down
SQLID: f09vb5ds4ar12
HASH PLAN: 3258657625
1.2) Load into SPM:
variable cnt number;
execute :cnt :=DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(SQL_ID => '&SQL_ID', PLAN_HASH_VALUE => &HASH_VALUE) ;
SQLID: f09vb5ds4ar12
HASH PLAN: 3258657625
1.3) verify if it is loaded in SPM now, note down SQL_HANDLE, which will be used in next steps.
select SQL_HANDLE, PLAN_NAME, ENABLED, ACCEPTED, SQL_TEXT from dba_SQL_PLAN_BASELINES where ACCEPTED = 'YES' order by LAST_MODIFIED;
SQL_HANDLE PLAN_NAME ENA ACC
------------------------------ ------------------------------ --- ---
SQL_TEXT
--------------------------------------------------------------------------------
SYS_SQL_f8973d17c2533445 SYS_SQL_PLAN_c25334452ccb01da YES YES
SELECT ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942 ORDER BY ISSHPN A
SQL_HANDLE: SYS_SQL_f8973d17c2533445
2. Add hint, run it and locate new sql_id and new plan hash:
SQL WITH HINT:
SELECT /*+ index(F4942, F4942_0) */ ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942 ORDER BY ISSHPN ASC;
select INST_ID, PLAN_HASH_VALUE, SQL_TEXT, SQL_ID from gv$sql where SQL_TEXT like '%/*+ index(F4942, F4942_0) */%';
INST_ID PLAN_HASH_VALUE
---------- ---------------
SQL_TEXT
--------------------------------------------------------------------------------
SQL_ID
-------------
1 1083079824
SELECT /*+ index(F4942, F4942_0) */ ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUT
DTA.F4942 ORDER BY ISSHPN ASC
a45abk6pnq0r2
so NEW SQL ID is a45abk6pnq0r2
and NEW PLAN HASH is 1083079824
3. Load new sql id and new plan into SPM and ready to replace old one, using same SQL_HANDLE though.
variable cnt number ;
exec :cnt :=dbms_spm.LOAD_PLANS_FROM_CURSOR_CACHE (SQL_ID => '&SQL_ID',PLAN_HASH_VALUE => &plan_hash_value,SQL_HANDLE => '&SQL_HANDLE' ) ;
SQL_ID: a45abk6pnq0r2 –from step 2, new sql id
Plan hash value: 1083079824 –from step 2 new sql plan hash value
SQL_HANDLE: SYS_SQL_f8973d17c2533445 --from step 1
4. verify contents of SPM
You should see two sets of plans for the same sql_handle now,one is for old (BAD) and one is for good (NEW). Notice timestamps of "created" ,
select SQL_HANDLE,PLAN_NAME , CREATED from dba_sql_plan_baselines order by 3;
SQL_HANDLE PLAN_NAME
------------------------------ ------------------------------
CREATED
---------------------------------------------------------------------------
SYS_SQL_f8973d17c2533445 SYS_SQL_PLAN_c25334452ccb01da
31-MAR-14 10.30.19.000000 AM
SYS_SQL_f8973d17c2533445 SYS_SQL_PLAN_c253344547b384c2
31-MAR-14 12.10.53.000000 PM
5. Find the old (bad) plan in SPM , and drop it . Only keep the modified ( new and good) plan in SPM
variable cnt number ;
exec :cnt :=dbms_spm.DROP_SQL_PLAN_BASELINE(SQL_HANDLE=> '&SQL_HANDLE', PLAN_NAME=> '&PLAN_NAME');
SQL_HANDLE: SYS_SQL_f8973d17c2533445
PLAN_NAME: SYS_SQL_PLAN_c25334452ccb01da
You should input correct Plan Name, the one associated with time stamp 10:30 am (old bad plan)
6. Finally check your explain plan and see if it follows the expected path: i.e. index is used
explain plan for
SELECT ISKCOO,ISSHPN,ISDCTO,ISDOCO,ISCRCD FROM NAUTDTA.F4942 ORDER BY ISSHPN ASC;
SQL> SELECT * FROM table(dbms_xplan.display);
PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
Plan hash value: 1083079824
--------------------------------------------------------------------------------
-------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Tim
e |
--------------------------------------------------------------------------------
-------
PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 10M| 337M| 3674K (1)| 01:
39:58 |
| 1 | TABLE ACCESS BY INDEX ROWID| F4942 | 10M| 337M| 3674K (1)| 01:
39:58 |
| 2 | INDEX FULL SCAN | F4942_0 | 10M| | 56335 (2)| 00:
01:32 |
--------------------------------------------------------------------------------
-------
PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
Note
-----
- SQL plan baseline "SYS_SQL_PLAN_c253344547b384c2" used for this statement
13 rows selected.
Notice that baseline SYS_SQL_PLAN_c253344547b384c2 (New and good one) is applied here.
13.2
We are facing a different problem. The good plan for existing sql only appeared in AWR. HERE are the steps of LOADING PLAN FROM AWR TO SQL PLAN BASELINE.
1. CONNECTION
sqlplus xxx/xxx
SQL_ID : 1h4km708h2dp5
2. CREATE SQL SET
SQL> exec dbms_sqltune.create_sqlset(sqlset_name => '1h4km708h2dp5_sqlset_test',description => 'sqlset descriptions');
3. READ from AWR
(MAKE SURE THAT only THE GOOD PLAN APPEARS DURING THAT WINDOW)
need to input start snapshot // end snapshot // sqlid
2977 2979 1h4km708h2dp5
declare
baseline_ref_cur DBMS_SQLTUNE.SQLSET_CURSOR;
begin
open baseline_ref_cur for
select VALUE(p) from table(
DBMS_SQLTUNE.SELECT_WORKLOAD_REPOSITORY(&begin_snap_id, &end_snap_id,'sql_id='||CHR(39)||'&sql_id'||CHR(39)||'',NULL,NULL,NULL,NULL,NULL,NULL,'ALL')) p;
DBMS_SQLTUNE.LOAD_SQLSET('1h4km708h2dp5_sqlset_test', baseline_ref_cur);
end;
/
4 view sql set
SELECT NAME,OWNER,CREATED,STATEMENT_COUNT FROM DBA_SQLSET where name='1h4km708h2dp5_sqlset_test';
(notice that count should be 1, which means plan is indeed captured)
(if it is 0, it means that the plan is not captured at all)
verify sql set
select * from table(dbms_xplan.display_sqlset('1h4km708h2dp5_sqlset_test','&sql_id'));
from here you can see the plan you intend to use.
(yes, here you should see the right plan now, it it is empty, it means you still didn't capture the right plan )
6. LOADING to baseline
set serveroutput on
declare
my_integer pls_integer;
begin
my_integer := dbms_spm.load_plans_from_sqlset(sqlset_name => '1h4km708h2dp5_sqlset_test',
sqlset_owner => 'UPCHECK',
fixed => 'NO',
enabled => 'YES');
DBMS_OUTPUT.PUT_line(my_integer);
end;
/
7. verify in baseline:
(Notice timestamp, it should be recently created )
SELECT SQL_HANDLE, PLAN_NAME, ORIGIN, ENABLED, ACCEPTED, FIXED, MODULE, created FROM DBA_SQL_PLAN_BASELINES order by created;
....
SQL_HANDLE PLAN_NAME ORIGIN ENA
------------------------------ ------------------------------ -------------- ---
ACC FIX MODULE
--- --- ----------------------------------------------------------------
CREATED
---------------------------------------------------------------------------
SQL_a4977112b0edc769 SQL_PLAN_a95vj2asfvjv9f43605a7 MANUAL-LOAD YES
YES NO PSRUN@usbdpsdbs801 (TNS V1-V3)
07-MAY-14 09.12.13.000000 AM
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
13.3 SPM III
Another typical sceneria: We have a good plan in QA and we are trying to move it to PROD. How we can do it?
STEPS:
variable cnt number;
execute :cnt :=DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id=>'3kn6am8a9jw05');
----- pack sql plan to stage table
SET SERVEROUTPUT ON
DECLARE
l_plans_unpacked PLS_INTEGER;
BEGIN
l_plans_unpacked := DBMS_SPM.pack_stgtab_baseline(
table_name => 'stage',
table_owner => 'UPCHECK',
creator => 'UPCHECK');
DBMS_OUTPUT.put_line('Plans packed: ' || l_plans_unpacked);
END;
/
---- unpack sql plan from stage table to SQL Plan baseline
SET SERVEROUTPUT ON
DECLARE
l_plans_unpacked PLS_INTEGER;
BEGIN
l_plans_unpacked := DBMS_SPM.unpack_stgtab_baseline(
table_name => 'stage2',
table_owner => 'UPCHECK',
creator => 'UPCHECK');
DBMS_OUTPUT.put_line('Plans Unpacked: ' || l_plans_unpacked);
END;
/
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
15. REFRESH STEPS
Before starting import for data refresh, Please do following first (example on schema NAQADTA). Check with Peter or Edward for GGATE processes.
lock user usqadta, eai_integration_naqa
1. disable all triggers in naqadta
2. stop Goldengate processes
Just stop following extracts. --from Peter Cai
ggsci> stop eallitm
ggsci> stop ef41021
ggsci> stop enhval
2. disable CDC capture process: CDC$C_RECALL_NAQA_SET, stop scheduler job
3. run drop big index script
4. run truncate table script
5. import data
****************************** **************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
16. FLASHBACK QUERY
select count(*) from NAPDDTA.F590101 as of TIMESTAMP (SYSTIMESTAMP-1/6);
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
17. LONG RUNNING sessions
IN NA
usbderp230:/export/home/oracle/admin/scripts/edward/sqlmonNA.ksh
IN LAERPP
usbderp210:/export/home/oracle/admin/scripts/edward/sqlmonLA.ksh
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
18. TABLE/INDEX FRAGMENTATION EVALUATION
.
select table_name,round((blocks*8),2)||'kb' "size" from dba_tables where table_name = 'A4108' and owner='APPDDTA';
TABLE_NAME size
------------------------------ ------------------------------------------
F4211 2313936kb
SQL> select table_name,round((num_rows*avg_row_len/1024),2)||'kb' "size" from dba_tables where table_name = 'A4108' and owner='APPDDTA';
TABLE_NAME size
------------------------------ ------------------------------------------
F4211 491496.86kb
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
19. relocate servcie in NAQADTA
19.1) when bouncing NAQA, maske sure
alter system set events '1399 trace name context forever, level 131072';
19.2)
usbderp811:oracle> srvctl status service -d userpqdr
Service userpq_et is running on instance(s) userpqdr2
Service userpq_na is running on instance(s) userpqdr2
Service userpq_integration is running on instance(s) userpqdr2
Service userpq_report is running on instance(s) userpqdr2
Service userpq_css is running on instance(s) userpqdr2
Service userpq_dm is running on instance(s) userpqdr2
Service userpq_naqabmc is running on instance(s) userpqdr2
Service userpq_naut is running on instance(s) userpqdr2
Service userpq_naqa is running on instance(s) userpqdr2
Service userpq_tr is running on instance(s) userpqdr2
srvctl relocate service -s userpq_et -d userpqdr -i userpqdr2 -t userpqdr1
(relocate is like start servce in new node and stop service in old node)
srvctl start service -d ete1dv -i ete1dv1 -s ete1dv_cdc
srvctl start service -d ete1dv -i ete1dv1 -s ete1dv_int
srvctl start service -d ete1dv -i ete1dv2 -s ete1dv_bat
srvctl start service -d ete1dv -i ete1dv2 -s ete1dv_bmc
srvctl start service -d ete1dv -i ete1dv2 -s ete1dv_integration
srvctl start service -d ete1dv -i ete1dv2 -s ete1dv_et
srvctl start service -d ete1dv -i ete1dv2 -s ete1dv_report
srvctl stop service -d ete1dv -i ete1dv2 -s ete1dv_cdc
srvctl stop service -d ete1dv -i ete1dv2 -s ete1dv_int
srvctl stop service -d ete1dv -i ete1dv1 -s ete1dv_bat
srvctl stop service -d ete1dv -i ete1dv1 -s ete1dv_bmc
srvctl stop service -d ete1dv -i ete1dv1 -s ete1dv_integration
srvctl stop service -d ete1dv -i ete1dv1 -s ete1dv_et
srvctl stop service -d ete1dv -i ete1dv1 -s ete1dv_report
********************************************************************************************
---------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------
*********************************************************************************************
20. QUICK INSERT AND DELETE BY CHUNKS TIPS
20.1) INSERT by chunk
declare
cursor oldtab_csr is select * from dzhang.TEST;
rec_count number := 1;
begin
for oldtab_rec in oldtab_csr loop
begin
insert into dzhang.TEST2 values (oldtab_rec.ID, oldtab_rec.VALUE,'XXXXX');
end;
rec_count := rec_count + 1;
if mod(rec_count,2) = 0 then
commit;
end if;
end loop;
commit;
end;
/
20.2) delete by chunks
set serveroutput on timing on
declare
RecCount Number(12):=0;
LoopCount Number(12):=0;
Begin
select count(*) Into RecCount from etpymdmdta.A0901 where gmdatetime <= '2012-12-31';
LoopCount := (RecCount/50000)+1;
dbms_output.put_line('A0901 Total Rows ==> '||RecCount);
For i in 1..LoopCount loop
Delete from etpymdmdta.A0901 where gmdatetime <= '2012-12-31' and rownum < 50001;
dbms_output.put_line('Rows Deleted ==> '||SQL%ROWCOUNT);
Commit;
End loop;
commit;
end;
/
----------------------------------------------------------------------------------------------
*********************************************************************************************
21. TABLESPACE GROWTH MONITOR
--TABLESPACE GROWTH REPORT(USING SQLPLUS)
set linesize 120
column name format a15
column variance format a20
alter session set nls_date_format='yyyy-mm-dd';
with t as (
select ss.run_time,ts.name,round(su.tablespace_size*dt.block_size/1024/1024/1024,2) alloc_size_gb,
round(su.tablespace_usedsize*dt.block_size/1024/1024/1024,2) used_size_gb
from
dba_hist_tbspc_space_usage su,
(select trunc(BEGIN_INTERVAL_TIME) run_time,max(snap_id) snap_id from dba_hist_snapshot
group by trunc(BEGIN_INTERVAL_TIME) ) ss,
v$tablespace ts,
dba_tablespaces dt
where su.snap_id = ss.snap_id
and su.tablespace_id = ts.ts#
and ts.name =upper('&TABLESPACE_NAME')
and ts.name = dt.tablespace_name )
select e.run_time,e.name,e.alloc_size_gb,e.used_size_gb curr_used_size_gb,b.used_size_gb prev_used_size_gb,
case when e.used_size_gb > b.used_size_gb then to_char(e.used_size_gb - b.used_size_gb)
when e.used_size_gb = b.used_size_gb then '***NO DATA GROWTH'
when e.used_size_gb < b.used_size_gb then '******DATA PURGED' end variance
from t e, t b
where e.run_time = b.run_time + 1
order by 1;
********************************************************************************************
---------------------------------------------------------------------------------------------
22. OEM ISSUES
1) checkup database and listener
set ORACLE_SID=oemrepo
sqlplus "/as sysdba"
sqlplus "sys@oemrepo as sysdba"
enter password: temp2013
cd $ORACLE_HOME= E:\app\oracle\product\11.2.0\dbhome_1
also lsnctl status
need to blunce listner sometimes.
make sure you can connect @oemrepo
2) go to OMS home
E:\app\oms12\oms\BIN
.\emctl start oms
4) CLEANUP OMS LOGS
we have big files :
E:\app\oms12\gc_inst\user_projects\domains\GCDomain\servers\EMGC_OMS1\logs
********************************************************************************************
---------------------------------------------------------------------------------------------
23. RAC AND ASM
recreate spfile from pfile;
create spfile='+ET_DATA/ete1dv/spfileete1dv.ora' from pfile='';
always specify full path !!!!!!!!
read http://zhefeng.wordpress.com/2011/03/02/recreating-spfile-on-asm-storage-from-pfile/
********************************************************************************************
---------------------------------------------------------------------------------------------
24. change current schema
alter session set current_schema=NADVDTA;
********************************************************************************************
---------------------------------------------------------------------------------------------
25. HOT BLOCKS in RAC?
select dbms_rowid.rowid_block_number(rowid), count(*)
from NAPDDTA.F00022
group by dbms_rowid.rowid_block_number(rowid);
alter table NAPDDTA.F00022 move tablespace NAPDDTA_TABLE2;
alter index NAPDDTA.F00022_0 rebuild online tablespace NAPDDTA_TABLE2;
drop table NAPDDTA.F00022_PCTFREEBAK;
lock table NAPDDTA.F00022 in exclusive mode;
alter table NAPDDTA.F00022 pctfree 99;
alter table NAPDDTA.F00022 minimize records_per_block;
create table NAPDDTA.F00022_PCTFREEBAK as select * from NAPDDTA.F00022;
delete from NAPDDTA.F00022;
insert into NAPDDTA.F00022 select * from NAPDDTA.F00022_PCTFREEBAK;
commit;
select dbms_rowid.rowid_block_number(rowid), count(*)
from NAPDDTA.F00022
group by dbms_rowid.rowid_block_number(rowid);
********************************************************************************************
---------------------------------------------------------------------------------------------
26. manually create snapshoit
EXECUTE dbms_workload_repository.create_snapshot();
********************************************************************************************
---------------------------------------------------------------------------------------------
27. desc CDC_NAPD.F4211_CT;
select count(*) ,SDPID , OPERATION$ from CDC_NAPD.F4211_CT where COMMIT_TIMESTAMP$ bETWEEN TO_DATE ('30-SEP-2014 09:08:00','DD-MON-YYYY HH24:MI:SS') and
to_date ('30-SEP-2014 09:22:00','DD-MON-YYYY HH24:MI:SS') group by OPERATION$,SDPID ;
********************************************************************************************
---------------------------------------------------------------------------------------------
28 . histogram stories
f you choose to collect histograms manually, you should only compute histograms for columns which you know have highly-skewed data distribution.
For example, if you wanted to create a 10-bucket histogram on the SAL column of the EMP table, issue the following statement:
DBMS_STATS.GATHER_TABLE_STATS (NULL,'EMP', method_opt => 'FOR COLUMNS sal SIZE 254');
The SIZE keyword states the maximum number of buckets for the histogram. You would create a histogram on the SAL column if there were an unusual number of employees with the same salary and few employees with other salaries.
......
typical one for stats
exec dbms_stats.gather_table_stats(ownname=>'NADVDTA',Tabname=>'F47047', cascade=> true);
ALSO FOR
SELECT stattype_locked FROM dba_tab_statistics WHERE table_name = 'TEST' and owner = 'SCOTT';
********************************************************************************************
---------------------------------------------------------------------------------------------
29. mviews
refresh mview
NAUTDTA.F58211W3_MV
dbms_refresh.refresh('NAQADTA.F58211W3_MV');
29.1 ) create a mview refresh group
begin
DBMS_REFRESH.MAKE (
name => 'NAQADTA.mrefresh',
list => '',
next_date => sysdate,
interval => 'sysdate + 1/24'
);
end;
/
29.2)
begin
DBMS_REFRESH.ADD(
name => 'NAQADTA.mrefresh',
list => 'F58211W3_MV'
);
end;
/
29.3)
exec DBMS_REFRESH.REFRESH('NAQADTA.mrefresh');
29.4) change intervals
begin
DBMS_REFRESH.CHANGE(
name => 'NAQADTA.mrefresh',
next_date => sysdate+1/48,
interval => 'sysdate + 1/24'
);
end;
/
29.5) check mview
SELECT owner, mview_name, last_refresh_date
FROM user_mviews
WHERE mview_name = 'F58211W3_MV';
29.6) check refresh group
select rowner,rname,job,next_date,interval from user_refresh;
select owner,name,rowner,rname,job from user_refresh_children;
and check
col priv_user for a20
SQL> col schema_user for a20
SQL> col interval for a20
SQL> col what for a20
SQL> select LAST_DATE,LAST_SEC,THIS_DATE,NEXT_DATE,TOTAL_TIME,FAILURES from dba_jobs where job=40003;
........
********************************************************************************************
---------------------------------------------------------------------------------------------
********************************************************************************************
---------------------------------------------------------------------------------------------
30 log file sync issues
1.) recent log file sync.
col name for a15
select m.INST_ID,
n.name ,
round(m.time_waited,3)*10 time_waited,
m.wait_count,
round(10*m.time_waited/nullif(m.wait_count,0),3) avgms
from gv$eventmetric m,
gv$event_name n
where m.INST_ID=n.INST_ID and m.event_id=n.event_id
and n.name in (
'log file sync'
)
order by 1;
log file sync realtime recent 60 seconds
2) log file sync history from awr
/* LOG FILE SYNC WAIT ON EACH NODE */
set pagesize 100
col event_name format a30
col avg_ms format 99999.99
col ct format 999,999,999
select
btime, event_name,
(time_ms_end-time_ms_beg)/nullif(count_end-count_beg,0) avg_ms,
(count_end-count_beg) ct
from (
select
e.event_name,
to_char(s.BEGIN_INTERVAL_TIME,'YY-MM-DD HH24:MI') btime,
total_waits count_end,
time_waited_micro/1000 time_ms_end,
Lag (e.time_waited_micro/1000)
OVER( PARTITION BY e.event_name ORDER BY s.snap_id) time_ms_beg,
Lag (e.total_waits)
OVER( PARTITION BY e.event_name ORDER BY s.snap_id) count_beg
from
DBA_HIST_SYSTEM_EVENT e,
DBA_HIST_SNAPSHOT s
where
s.snap_id=e.snap_id
and e.event_name in ( 'log file sync')
and e.dbid=s.dbid
and e.INSTANCE_NUMBER=&INSTANCE_NUMBER
order by e.event_name, begin_interval_time
)
where (count_end-count_beg) > 0
order by btime
/
********************************************************************************************
---------------------------------------------------------------------------------------------
31. auditing on logon sessions
select * from dba_audit_session where username = 'HC_DS_INTERFACE' and action_name = 'LOGON' and timestamp >= sysdate-1/24 order by timestamp desc
********************************************************************************************
---------------------------------------------------------------------------------------------
********************************************************************************************
---------------------------------------------------------------------------------------------
32. Rac interconnect
In regards to how the Interconnect is performing, the aforementioned script runs a couple of key queries; here are a couple of the more important ones:
-- GLOBAL CACHE CR PERFORMANCE
-- This shows the average latency of a consistent block request.
-- AVG CR BLOCK RECEIVE TIME should typically be about 15 milliseconds depending
-- on your system configuration and volume, is the average latency of a
-- consistent-read request round-trip from the requesting instance to the holding
-- instance and back to the requesting instance. If your CPU has limited idle time
-- and your system typically processes long-running queries, then the latency may
-- be higher. However, it is possible to have an average latency of less than one
-- millisecond with User-mode IPC. Latency can be influenced by a high value for
-- the DB_MULTI_BLOCK_READ_COUNT parameter. This is because a requesting process
-- can issue more than one request for a block depending on the setting of this
-- parameter. Correspondingly, the requesting process may wait longer. Also check
-- interconnect badwidth, OS tcp settings, and OS udp settings if
-- AVG CR BLOCK RECEIVE TIME is high.
--
set numwidth 20
column "AVG CR BLOCK RECEIVE TIME (ms)" format 9999999.9
select b1.inst_id, b2.value "GCS CR BLOCKS RECEIVED",
b1.value "GCS CR BLOCK RECEIVE TIME",
((b1.value / b2.value) * 10) "AVG CR BLOCK RECEIVE TIME (ms)"
from gv$sysstat b1, gv$sysstat b2
where b1.name = 'global cache cr block receive time' and
b2.name = 'global cache cr blocks received' and b1.inst_id = b2.inst_id
or b1.name = 'gc cr block receive time' and
b2.name = 'gc cr blocks received' and b1.inst_id = b2.inst_id ;
-- GLOBAL CACHE LOCK PERFORMANCE
-- This shows the average global enqueue get time.
-- Typically AVG GLOBAL LOCK GET TIME should be 20-30 milliseconds. the elapsed
-- time for a get includes the allocation and initialization of a new global
-- enqueue. If the average global enqueue get (global cache get time) or average
-- global enqueue conversion times are excessive, then your system may be
-- experiencing timeouts. See the 'WAITING SESSIONS', 'GES LOCK BLOCKERS',
-- 'GES LOCK WAITERS', and 'TOP 10 WAIT EVENTS ON SYSTEM' sections if the
-- AVG GLOBAL LOCK GET TIME is high.
--
set numwidth 20
column "AVG GLOBAL LOCK GET TIME (ms)" format 9999999.9
select b1.inst_id, (b1.value + b2.value) "GLOBAL LOCK GETS",
b3.value "GLOBAL LOCK GET TIME",
(b3.value / (b1.value + b2.value) * 10) "AVG GLOBAL LOCK GET TIME (ms)"
from gv$sysstat b1, gv$sysstat b2, gv$sysstat b3
where b1.name = 'global lock sync gets' and
b2.name = 'global lock async gets' and b3.name = 'global lock get time'
and b1.inst_id = b2.inst_id and b2.inst_id = b3.inst_id
or b1.name = 'global enqueue gets sync' and
b2.name = 'global enqueue gets async' and b3.name = 'global enqueue get time'
and b1.inst_id = b2.inst_id and b2.inst_id = b3.inst_id;
-- DLM TRAFFIC INFORMATION
-- This section shows how many tickets are available in the DLM. If the
-- TCKT_WAIT columns says "YES" then we have run out of DLM tickets which could
-- cause a DLM hang. Make sure that you also have enough TCKT_AVAIL.
--
set numwidth 5
select * from gv$dlm_traffic_controller
order by TCKT_AVAIL;
33. USING ASH
Query the V$ACTIVE_SESSION_HISTORY view to get information about the most common wait events, and
the SQL statements, database objects, and users responsible for those waits. The following are some
useful queries you can use.
To find the most important wait events in the last 15 minutes, issue the following query:
SQL> select event,
sum(wait_time +
time_waited) total_wait_time
from v$active_session_history
where sample_time between
sysdate-10/2880 and sysdate
group by event
order by total_wait_time desc
To find out which of your users experienced the most waits in the past 15 minutes, issue the
following query:
SQL> select s.sid, s.username,
sum(a.wait_time +
a.time_waited) total_wait_time
from v$active_session_history a,
v$session s
where a.sample_time between sysdate-10/2880 and sysdate
and a.session_id=s.sid
group by s.sid, s.username
order by total_wait_time desc;
Execute the following query to find out the objects with the highest waits.
SQL>select a.current_obj#, o.object_name, o.object_type, a.event,
sum(a.wait_time +
a.time_waited) total_wait_time
from v$active_session_history a,
dba_objects d
where a.sample_time between sysdate-10/2880 and sysdate
CHAPTER 5 ¦ MINIMIZING SYSTEM CONTENTION
175
and a.current_obj# = d.object_id
group by a.current_obj#, d.object_name, d.object_type, a.event
order by total_wait_time;
You can identify the SQL statements that have been waiting the most during the last 15 minutes
with this query.
SQL> select a.user_id,u.username,s.sql_text,
sum(a.wait_time + a.time_waited) total_wait_time
from v$active_session_history a,
v$sqlarea s,
dba_users u
where a.sample_time between sysdate-10/2880 and sysdate
and a.sql_id = s.sql_id
and a.user_id = u.user_id
group by a.user_id,s.sql_text, u.username;
34 . connection storms
check on logons
select count(*), username from dba_audit_session where TIMESTAMP > sysdate -1/24 group by username order by 1;
notice that HC_DS_INTERFACE is from DataStage.
we have a five-minute window for logon storms
SELECT timeslot, COUNT(*)
FROM
(
SELECT to_char(timestamp, 'YYYY-MM-DD hh24') timeslot
FROM
(
SELECT timestamp FROM dba_audit_session
)
) GROUP BY timeslot order by 2;
select * from dba_audit_session where TIMESTAMP > sysdate +1/24-5/(24*60) AND USERNAME='HC_DS_INTERFACE' AND os_username='dsrfroper' order by timestamp;
35. logon trigger template
Table script:
CREATE TABLE AUD_USR
( "SID" NUMBER,
"USERNAME" VARCHAR2(25 BYTE),
"OSUSER" VARCHAR2(25 BYTE),
"HOST" VARCHAR2(25 BYTE),
"IDENTITY_TYPE" VARCHAR2(20 BYTE),
"IP_ADDRESS" VARCHAR2(15 BYTE),
"TIMESTAMP" DATE
);
Trigger Script:
create or replace TRIGGER After_logon
AFTER LOGON
ON DATABASE
DECLARE
BEGIN
if UPPER(SYS_CONTEXT('USERENV', 'SESSION_USER')) = 'DZHANG' then
insert into aud_usr select sys_context('USERENV','SID'),sys_context('USERENV','SESSION_USER'),sys_context('USERENV','OS_USER'),sys_context('USERENV','HOST'),sys_context('USERENV','IDENTIFICATION_TYPE'),sys_context('USERENV','IP_ADDRESS'),sysdate from dual;
commit;
end if;
END ;
/
this is trace on logon sessions.
"HAVE TO RUN AS SYSDBA"
create or replace trigger trace_trigger_deng
AFTER LOGON ON DATABASE
WHEN (USER='DZHANG')
declare
stmt varchar2(100);
hname varchar2(20);
uname varchar2(20);
begin
select sys_context('USERENV','HOST'),
sys_context('USERENV','SESSION_USER')
into hname,uname from dual;
stmt := 'alter session set tracefile_identifier='||hname||'_'||uname;
EXECUTE IMMEDIATE stmt;
EXECUTE IMMEDIATE 'alter session set sql_trace=true';
end;
36. RAC crs resources list
Format of crs_stat -t OR crsctl status resource
crsctl status resource |grep -v "^$"|awk -F "=" 'BEGIN {print " "} {printf("%s",NR%4 ? $2"|" : $2"\n")}'|sed -e 's/ *, /,/g' -e 's/, /,/g'|awk -F "|" 'BEGIN { printf "%-40s%-35s%-20s%-50s\n","Resource Name","Resource Type","Target ","State" }{ split ($3,trg,",") split ($4,st,",")}{for (i in trg) {printf "%-40s%-35s%-20s%-50s\n",$1,$2,trg[i],st[i]}}'
crs_stat | awk -F= '/NAME=/{n=$2}/TYPE=/{t=$2}/TARGET=/{g=$2}/STATE=/{s=$2; printf("%-75s%-18s%-15s%-30s\n", n,t,g,s)}'
and you can use grep to find what you want
crsctl status resource |grep -v "^$"|awk -F "=" 'BEGIN {print " "} {printf("%s",NR%4 ? $2"|" : $2"\n")}'|sed -e 's/ *, /,/g' -e 's/, /,/g'|awk -F "|" 'BEGIN { printf "%-40s%-35s%-20s%-50s\n","Resource Name","Resource Type","Target ","State" }{ split ($3,trg,",") split ($4,st,",")}{for (i in trg) {printf "%-40s%-35s%-20s%-50s\n",$1,$2,trg[i],st[i]}}' > 1.txt
37. ASM REMOTE LOGIN
notice use host fullname.......
ASM_TEST =
(DESCRIPTION=(ADDRESS_LIST = (ADDRESS=(PROTOCOL=tcp)(HOST=usroe1r712.baxter.com)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=+ASM1)(INSTANCE_NAME=+ASM1)(UR=A)(SERVER=DEDICATED)))
sqlplus asmsnmp/temp2014@ASM_TEST as sysdba
38. OEM JAVA_HOME
JAVA_HOME should be E:\app\oms12\jdk16\jdk
39, A good capture for all sqls execution times today
usbderp230:oracle> ls -lt | grep what
-rw-r--r-- 1 oracle oinstall 5012 Nov 24 12:22 whats_changed.sql
userpp3 [/export/home/oracle]
usbderp230:oracle> cat /export/home/oracle/whats_changed.sql
----------------------------------------------------------------------------------------
--
-- File name: whats_changed.sql
--
-- Purpose: Find statements that have significantly different elapsed time than before.
-
-- Author: Kerry Osborne
--
-- Usage: This scripts prompts for four values.
--
-- days_ago: how long ago was the change made that you wish to evaluate
-- (this could easily be changed to a snap_id for more precision)
--
-- min_stddev: the minimum "normalized" standard deviation between plans
-- (the default is 2 - which means twice as fast/slow)
--
-- min_etime: only include statements that have an avg. etime > this value
-- (the default is .1 second)
--
--
-- faster_slower: a flag to indicate if you want only Faster or Slower SQL
-- (the default is both - use S% for slower and F% for faster)
--
-- Description: This scripts attempts to find statements with significantly different
-- average elapsed times per execution. It uses AWR data and computes a
-- normalized standard deviation between the average elapsed time per
-- execution before and after the date specified by the days_ago parameter.
--
-- The ouput includes the following:
--
-- SQL_ID - the sql_id of a statement that is in the shared pool (v$sqlarea)
--
-- EXECS - the total number of executions in the AWR tables
--
-- AVG_ETIME_BEFORE - the average elapsed time per execution before the REFERENCE_TIME
--
-- AVG_ETIME_AFTER - the average elapsed time per execution after the REFERENCE_TIME
--
-- NORM_STDDEV - this is a normalized standard deviation (i.e. how many times slower/faster is it now)
--
-- See http://kerryosborne.oracle-guy.com for additional information.
----------------------------------------------------------------------------------------
accept days_ago -
prompt 'Enter Days ago: ' -
default '1'
set lines 155
col execs for 999,999,999
col before_etime for 999,990.99
col after_etime for 999,990.99
col before_avg_etime for 999,990.99 head AVG_ETIME_BEFORE
col after_avg_etime for 999,990.99 head AVG_ETIME_AFTER
col min_etime for 999,990.99
col max_etime for 999,990.99
col avg_etime for 999,990.999
col avg_lio for 999,999,990.9
col norm_stddev for 999,990.9999
col begin_interval_time for a30
col node for 99999
break on plan_hash_value on startup_time skip 1
select * from (
select sql_id, execs, before_avg_etime, after_avg_etime, norm_stddev,
case when to_number(before_avg_etime) < to_number(after_avg_etime) then 'Slower' else 'Faster' end result
-- select *
from (
select sql_id, sum(execs) execs, sum(before_execs) before_execs, sum(after_execs) after_execs,
sum(before_avg_etime) before_avg_etime, sum(after_avg_etime) after_avg_etime,
min(avg_etime) min_etime, max(avg_etime) max_etime, stddev_etime/min(avg_etime) norm_stddev,
case when sum(before_avg_etime) > sum(after_avg_etime) then 'Slower' else 'Faster' end better_or_worse
from (
select sql_id,
period_flag,
execs,
avg_etime,
stddev_etime,
case when period_flag = 'Before' then execs else 0 end before_execs,
case when period_flag = 'Before' then avg_etime else 0 end before_avg_etime,
case when period_flag = 'After' then execs else 0 end after_execs,
case when period_flag = 'After' then avg_etime else 0 end after_avg_etime
from (
select sql_id, period_flag, execs, avg_etime,
stddev(avg_etime) over (partition by sql_id) stddev_etime
from (
select sql_id, period_flag, sum(execs) execs, sum(etime)/sum(decode(execs,0,1,execs)) avg_etime from (
select sql_id, 'Before' period_flag,
nvl(executions_delta,0) execs,
(elapsed_time_delta)/1000000 etime
-- sum((buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta))) avg_lio
from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS
where ss.snap_id = S.snap_id
and ss.instance_number = S.instance_number
and executions_delta > 0
and elapsed_time_delta > 0
and ss.begin_interval_time <= sysdate-&&days_ago
union
select sql_id, 'After' period_flag,
nvl(executions_delta,0) execs,
(elapsed_time_delta)/1000000 etime
-- (elapsed_time_delta)/decode(nvl(executions_delta,0),0,1,executions_delta)/1000000 avg_etime
-- sum((buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta))) avg_lio
from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS
where ss.snap_id = S.snap_id
and ss.instance_number = S.instance_number
and executions_delta > 0
and elapsed_time_delta > 0
and ss.begin_interval_time > sysdate-&&days_ago
-- and s.snap_id > 7113
)
group by sql_id, period_flag
)
)
)
group by sql_id, stddev_etime
)
where norm_stddev > nvl(to_number('&min_stddev'),2)
and max_etime > nvl(to_number('&min_etime'),.1)
)
where result like nvl('&Faster_Slower',result)
order by norm_stddev
/
40. GRANt
SQL> grant select on dzhang.logm1126_F0002 to rodris3;
Grant succeeded.
SQL> grant select on dzhang.logm1126_F01131 to rodris3;
Grant succeeded.
41. export import examples:
dumpfile=s_VERTEXUSER_TPS_compr_01.dmp, s_VERTEXUSER_TPS_compr_02.dmp
job_name=export_s_01
parallel=2
schemas= VERTEXUSER_TPS
include=TABLE:"IN (
'LINEITEMTAXOVRFLW',
'LINEITEMLOCATION',
'LINEITEM'
)"
content=data_only
logfile=s_VERTEXUSER_TPS_compr_exp.log
==============
dumpfile=s_VERTEXUSER_TPS_compr_01.dmp, s_VERTEXUSER_TPS_compr_02.dmp
job_name=import_s_01
parallel=2
schemas= VERTEXUSER_TPS
include=TABLE:"IN (
'LINEITEMTAXOVRFLW',
'LINEITEMLOCATION',
'LINEITEM'
)"
table_exists_action=truncate
content=data_only
logfile=s_VERTEXUSER_TPS_compr_imp.log
==========
expdp bert/bert directory=data_pump_dir dumpfile=multi_table.dmp tables=movies.customer,movies.employee,bert.junk
=================
mutilpe schema and multiple tbs
impdp GCSSATSTAD/gcssatstad4_you remap_schema=GCSSAPP:GCSSAPPA remap_tablespace=TBS_GCSS_DATA:TBS_GCSSA_DATA directory=EXPDP_DIR dumpfile=GCSSAPP_0226.dmp logfile=imp_GCSSAPP.log
42. CURRENT GC WAITS
gc current block busy when a request needs a block in current mode, it sends a request to the master instance. The requester eventually gets the block via cache fusion transfer.
However sometimes the block transfer is delayed due to either the block was being used by a session on another instance or the block transfer was delayed because the holding instance
could not write the corresponding redo records to the online logfile immediately.
One can use the session level dynamic performance views v$session and v$session_event to find the programs or sessions causing the most waits on this events
select a.sid , a.time_waited , b.program , b.module from v$session_event a , v$session b where a.sid=b.sid and a.event='gc current block busy' order by a.time_waited;
gc cr block busy when a request needs a block in CR mode , it sends a request to the master instance. The requester eventually gets the block via cache fusion transfer.
However sometimes the block transfer is delayed due to either the block was being used by a session on another instance or the block transfer was delayed because
the holding instance could not write the corresponding redo records to the online logfile immediately.
One can use the session level dynamic performance views v$session and v$session_event to find the programs or sesions causing the most waits on this events
select a.sid , a.time_waited , b.program , b.module from v$session_event a , v$session b where a.sid=b.sid and a.event='gc cr block busy' order by a.time_waited;
43. GENERIC FORM for group by n minutes
select trunc(utils.get_time_fromjde(sysdate, SLTDAY),'hh24')+(trunc(to_char(utils.get_time_fromjde(sysdate, SLTDAY),'mi')/:n)*:n)/24/60, count(*)
from napddta.F42199
where SLUPMJ=UTILS.JUL_FROM_DATE(sysdate) and SLTDAY>123000
group by trunc(utils.get_time_fromjde(sysdate, SLTDAY),'hh24')+(trunc(to_char(utils.get_time_fromjde(sysdate, SLTDAY),'mi')/:n)*:n)/24/60
order by 1
44. trace session
how to trace session
trace file look like?
rememeber there is a pid in trace file NAME.
45. create tablespace template
CREATE BIGFILE TABLESPACE "VERTEX_TPS" DATAFILE
SIZE 209715200
AUTOEXTEND ON NEXT 10485760 MAXSIZE 33554431M
LOGGING ONLINE PERMANENT BLOCKSIZE 8192
EXTENT MANAGEMENT LOCAL AUTOALLOCATE DEFAULT
NOCOMPRESS SEGMENT SPACE MANAGEMENT AUTO
ALTER DATABASE DATAFILE
'+NA_DATA/nae1dv/datafile/vertex_tps.317.857489207' RESIZE 36217815040
46 delete all objects in one schema
SELECT 'alter table '
|| owner
|| '.'
|| table_name
|| ' drop constraint '
|| constraint_name
|| ' cascade;' code
FROM dba_constraints
WHERE (constraint_type = 'R') AND (owner = UPPER ('&&enter_user_name'))
UNION
SELECT DISTINCT 'DROP '
|| object_type
|| ' '
|| owner
|| '.'
|| object_name
|| ';' code
FROM dba_objects
WHERE (object_type NOT IN ('INDEX'))
AND (owner = UPPER ('&&enter_user_name'))
ORDER BY code DESC;
47 unlock and lock table stats
1. how to check what tables atats are locked.
> select owner, table_name
from dba_tab_statistics where stattype_locked='ALL' and owner='NAPDDTA';
exec dbms_stats.unlock_table_stats('NAPDDTA', 'F42119');
exec dbms_stats.lock_table_stats('NAPDDTA', 'F42119');
48. extarct user ddl script
select dbms_metadata.get_ddl('USER', 'DZHANG') from dual;
49, PLAY WITH HISOGRAM
Disable one
SQL> declare
be 2 gin
3 dbms_stats.unlock_table_stats(ownname=>'APPDDTA',tabname=> 'F42119');
4 dbms_stats.delete_column_stats(ownname=>'APPDDTA',tabname=>'F42119',colname=>'SDAN8',col_stat_type=> 'HISTOGRAM');
5 dbms_stats.lock_table_stats(ownname=> 'APPDDTA' , tabname=> 'F42119');
end 6 ;
50 play with quotes when spooling sqls
select 'exec dbms_stats.lock_table_stats(''NAPDDTA'',''' ||table_name|| ''');'
from dba_tab_statistics where stattype_locked='ALL' and owner='NAPDDTA';
looks like exec dbms_stats.lock_table_stats('NAPDDTA','F42199');
51 how to check in index is used by some sqls right now?
for example, F4201_8 is used or not?
select SQL_ID from gv$sql_plan where OBJECT_OWNER='NAPDDTA' and OBJECT_NAME='F4201_8';
52. lock monitor
CREATE OR REPLACE PROCEDURE UTILS.LOCK_MONITOR
authid current_user as
v_message varchar2(4000):= ' Database locks have been detected '||chr(10)||'--------------------------------'||chr(10)||' Please investigate'||chr(10);
send boolean:=false;
v_tablist varchar2(4000);
cursor locks is select distinct process,machine, uname,sid session_id, inst_id db_instance from (
SELECT se.inst_id,
lk.SID,
se.username uname,
se.Machine,
se.process,
ob.owner ,
ob.object_name
FROM sys.GV_$lock lk,
sys.dba_objects ob,
sys.GV_$session se,
( select distinct session_id,inst_id,oracle_username from sys.gv_$locked_object where xidusn > 0) locked_objs
WHERE lk.TYPE IN ('TM','UL')
AND lk.SID = se.SID
AND lk.id1 = ob.object_id (+)
AND lk.inst_id = se.inst_id
and lk.ctime > 180
and lk.block >=1
and lk.sid=locked_objs.session_id
);
cursor objects (v_sid number) is SELECT distinct
ob.object_name
FROM sys.GV_$lock lk,
sys.dba_objects ob,
sys.GV_$session se,
( select distinct session_id,inst_id,oracle_username
from sys.gv_$locked_object where xidusn > 0) locked_objs
WHERE lk.TYPE IN ('TM','UL')
AND lk.SID = se.SID
AND lk.id1 = ob.object_id (+)
AND lk.inst_id = se.inst_id
and lk.sid=v_sid
and lk.block >=1
and lk.sid=locked_objs.session_id;
begin
for c1rec in locks loop
--dbms_output.put_line( 'Process '||c1rec.process ||' on '||c1rec.machine ||' causing locks - Sid ' ||c1rec.session_id ||' on inst '|| c1rec.db_instance);
v_message:=v_message||'DB User '||c1rec.uname|| ' Process '||c1rec.process ||' on '||c1rec.machine ||' causing locks - Sid ' ||c1rec.session_id ||' on inst '|| c1rec.db_instance||chr(10);
v_tablist:= ' Tables Impacted - ';
for c2rec in objects(c1rec.session_id) loop
v_tablist:=v_tablist||' '||c2rec.object_name;
end loop;
v_message:=v_message||v_tablist||chr(10);
send:=true;
end loop;
if send then
Utl_Mail.Send(Sender => 'oracle@NAE1PD.baxter.com',
-- Recipients => 'global.corp.global_cnc%baxter@baxter.com',
Recipients => 'GLBL.CORP.ERP.DBA@baxter.com, Global.CORP.Global_CNC@baxter.com',
subject => '**AUTOMATIC MESSAGE*** Do Not Reply*** Locking detected on NAE1PD**',
MESSAGE => v_message);
end if;
end;
/
53. leesoons learned in OEM upgrade
+Checked the emgc.properties file and verified for all ports using below command
netstat -o -n -a | findstr
All ports were free except the HTTPS Console Port 7799
netstat -o -n -a | findstr 7799
+Killed the process running on this port using below command
taskkill /F /PID 16932
+But the process kept on spawning on this port
+Used the PID obtained from below command :
netstat -o -n -a | findstr 7799
In Task manager and found out that this port was being used by a web application which was continuously spawning process on this port even after killing all sub processes
+Deng wanted to change the port 7799 for OMS .Explained him that this would need a repository restore and then while kicking off upgrade we can customize ports
+Deng preferred the approach of de-installing the web application running on 7799.
+Deinstalled the application and then resumed Upgrade in OUI using retry
The Upgrade completed successfully
C:\Users\_zhangd2>netstat -o -n -a | find "7799"
TCP 0.0.0.0:7799 0.0.0.0:0 LISTENING 49116
TCP 0.0.0.0:7799 0.0.0.0:0 LISTENING 42760
TCP 10.18.137.68:7799 10.22.54.158:1844 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1851 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1859 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1864 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1874 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1886 TIME_WAIT 0
TCP 10.18.137.68:7799 10.22.54.158:1897 TIME_WAIT 0
notice that 49116 is PID
check from taskmanager
54. login issues (remote or local)
sqlplus "sys/d0GPile_cc as sysdba"
sqlplus sysman/d0GPile_cc
...............r\......
55 get user hash password:
SELECT substr (trim(DBMS_METADATA.get_ddl ('USER','DZHANG')),
REGEXP_INSTR(trim(DBMS_METADATA.get_ddl ('USER','DZHANG')),'''',1,1,0,'m'), 20
) PASSWD
FROM DUAL;
set long 150
set linesize 150
set longchunksize 150
select 'alter user '||username||' identified by values '||REGEXP_SUBSTR(DBMS_METADATA.get_ddl ('USER',USERNAME), '''[^'']+''')||';' from dba_users;
56 how to verify asm user password
sqlplus asmcdc/temp2014 as sysasm
=========================================================================
56 location of DM jobs run from controlM
cdcadm@usbde1r813zadm08:/u03/scripts/cdc$ cat test.ksh
===========================================================================
57 reset services
nae1dv1 [/u02/dba_script]
usbde1r913zadm05:oracle> ls
reset_service_nae1dv
58. archive generation report
cat log_history.sql
column h0 format 999
column h1 format 999
column h2 format 999
column h3 format 999
column h4 format 999
column h5 format 999
column h6 format 999
column h7 format 999
column h8 format 999
column h9 format 999
column h10 format 999
column h11 format 999
column h12 format 999
column h13 format 999
column h14 format 999
column h15 format 999
column h16 format 999
column h17 format 999
column h18 format 999
column h19 format 999
column h20 format 999
column h21 format 999
column h22 format 999
column h23 format 999
column avg format 999.99
column day format a6
SELECT TRUNC (first_time) "Date", TO_CHAR (first_time, 'Dy') "Day", COUNT (1) "Total",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '00', 1, 0)) h0,
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '01', 1, 0)) "h1",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '02', 1, 0)) "h2",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '03', 1, 0)) "h3",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '04', 1, 0)) "h4",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '05', 1, 0)) "h5",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '06', 1, 0)) "h6",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '07', 1, 0)) "h7",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '08', 1, 0)) "h8",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '09', 1, 0)) "h9",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '10', 1, 0)) "h10",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '11', 1, 0)) "h11",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '12', 1, 0)) "h12",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '13', 1, 0)) "h13",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '14', 1, 0)) "h14",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '15', 1, 0)) "h15",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '16', 1, 0)) "h16",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '17', 1, 0)) "h17",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '18', 1, 0)) "h18",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '19', 1, 0)) "h19",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '20', 1, 0)) "h20",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '21', 1, 0)) "h21",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '22', 1, 0)) "h22",
SUM (DECODE (TO_CHAR (first_time, 'hh24'), '23', 1, 0)) "h23", ROUND (COUNT (1) / 24, 2) "Avg"
FROM gv$log_history
WHERE first_time >= trunc(SYSDATE) - 30
and thread# = inst_id
GROUP BY TRUNC (first_time), TO_CHAR (first_time, 'Dy')
ORDER BY 1 DESC;