Академический Документы
Профессиональный Документы
Культура Документы
===============
To measure Smart Scan statistics systemwide
select inst.instance_name,
b.name,
a.value/1024/1024/1024 value
from gv$sysstat a, gv$statname b, gv$instance inst
where a.statistic# = b.statistic#
and b.name in
('cell physical IO bytes eligible for predicate offload',
'cell physical IO interconnect bytes',
'cell physical IO interconnect bytes returned by Smart Scan')
and inst.inst_id=a.inst_id
and inst.inst_id=b.inst_id
order by 1,2;
To measure Smart Scan statistics systemwide, execute the script in Listing 15-1.
In this query, we can see that for this database, each instance had almost 60 TB of
data eligible for Smart Scan, approximately 28 TB transmitted over the storage
interconnect, and about 25 TB returned via Smart Scan queries.
To measure Smart Scan statistics for a specific session, identify an Oracle session
and execute the script in Listing 15-2.
Listing 15-2. lst15-02-exass-session.sql
SQL> select sess.sid,
stat.name,
round(sess.value/1024/1024/1024,2) value
from v$sesstat sess,
v$statname stat
where stat.statistic# = sess.statistic#
and sess.sid = '&&sid'
and stat.name in
('cell physical IO bytes eligible for predicate offload',
'cell physical IO interconnect bytes',
'cell physical IO interconnect bytes returned by Smart Scan')
union
select -1,
stat.name,
round(sum(sess.value)/1024/1024/1024,2) value
from v$sesstat sess,
v$statname stat
where stat.statistic# = sess.statistic#
and sess.sid in (select sid from v$px_session where qcsid='&&sid')
and stat.name in
('cell physical IO bytes eligible for predicate offload',
'cell physical IO interconnect bytes',
'cell physical IO interconnect bytes returned by Smart Scan')
group by stat.name
order by 1 desc,2;
eligbytes,
(sum(ss3.value) - lag(sum(ss3.value),1,0) over (order by
ss3.snap_id)) ssbytes,
rank() over (order by ss1.snap_id) myrank
from
dba_hist_sysstat ss1,
dba_hist_sysstat ss2,
dba_hist_sysstat ss3
where ss1.snap_id=ss2.snap_id
and ss2.snap_id=ss3.snap_id
and ss1.snap_id between &&snap_low-1 and &&snap_hi
and ss2.dbid=ss1.dbid
and ss3.dbid=ss2.dbid
and ss1.stat_name='cell physical IO interconnect bytes'
and ss2.stat_name='cell physical IO bytes eligible for predicate offload'
and ss3.stat_name='cell physical IO interconnect bytes returned by Smart Scan'
group by ss1.snap_id,ss2.snap_id,ss3.snap_id
order by ss1.snap_id) snaps,
dba_hist_snapshot snap
where snap.snap_id=snaps.snap_id
order by 1)
where myrank>1;
select a.name,a.total_mb,a.free_mb,a.type,
a.total_mb/2 avail_mb,
a.free_mb/2 usable_mb,
count(b.path) cdisks
from v$asm_diskgroup a,v$asm_disk b
where a.group_number=b.group_number
group by a.name,a.total_mb,a.free_mb,a.type
order by 2,1
1) execute the following query to show how many grid disks are used per storage
cell per ASM disk group:
select a.name,b.failgroup,count(*) cnt
from v$asm_diskgroup a, v$asm_disk b
where a.group_number=b.group_number
group by a.name,b.failgroup
storage cell
--------------
shows how dcli and CellCLI commands can be used together to report the status of
all storage cells in a half rack cluster-
dcli -g /root/cell_group -l root cellcli -e "list cell"
To measure the small and large I/O waits per resource consumer group, use the
CT_IO_WT_.*_RQ metrics with the following dcli command:
dcli -g ./cell_group "cellcli -e list metriccurrent where
objectType=\'IORM_CATEGORY\'
SELECT
cellname cv_cellname
, CAST(extract(xmltype(confval), '/cli-output/cell/releaseVersion/text()') AS
VARCHAR2(20)) cv_cellVersion
, CAST(extract(xmltype(confval), '/cli-output/cell/flashCacheMode/text()') AS
VARCHAR2(20)) cv_flashcachemode
, CAST(extract(xmltype(confval), '/cli-output/cell/cpuCount/text()') AS
VARCHAR2(10)) cpu_count
, CAST(extract(xmltype(confval), '/cli-output/cell/upTime/text()') AS
VARCHAR2(20)) uptime
, CAST(extract(xmltype(confval), '/cli-output/cell/kernelVersion/text()') AS
VARCHAR2(30)) kernel_version
, CAST(extract(xmltype(confval), '/cli-output/cell/makeModel/text()') AS
VARCHAR2(50)) make_model
FROM
v$cell_config -- gv$ isn't needed, all cells should be visible in all
instances
WHERE
conftype = 'CELL'
ORDER BY
# identify unassigned Exadata grid disks to use as your disk group disks.
list griddisk attributes name,asmDiskGroupName,asmDiskname where
asmDiskGroupName=\'\'