回复 13# zhenzongjian 因为这个存储是刚实施不久的,我对系统和存储也不了解。我主要想知道存储有没有优化的空间,我就只做了一个DD操作
[root@dev:/]#lsvg -l devvg
devvg:
LV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT
lv_dev jfs2 3608 3608 2 open/syncd /test
loglv03 jfs2log 1 1 1 open/syncd N/A
lv_dg01 jfs2 400 400 1 open/syncd /u01
lv_dg02 jfs2 1200 1200 1 open/syncd /u02
lv_dg03 jfs2 400 400 2 open/syncd /u03
[root@dev:/]#lslv -l lv_dev
lv_dev:/test
PV COPIES IN BAND DISTRIBUTION
hdiskpower6 1999:000:000 20% 400:400:399:400:400
hdiskpower4 1609:000:000 24% 400:400:399:400:010
[root@dev:/]#powermt display dev=hdiskpower4
Pseudo name=hdiskpower4
CLARiiON ID=FCN00121800144 [ERP_Dev]
Logical device ID=6006016061113100407A825600EDE111 [LUN 103]
state=alive; policy=CLAROpt; priority=0; queued-IOs=0
Owner: default=SP A, current=SP A Array failover mode: 4
==============================================================================
---------------- Host --------------- - Stor - -- I/O Path - -- Stats ---
### HW Path I/O Paths Interf. Mode State Q-IOs Errors
==============================================================================
0 fscsi0 hdisk11 SP B0 active alive 0 0
0 fscsi0 hdisk19 SP A0 active alive 0 0
1 fscsi1 hdisk26 SP B0 active alive 0 0
1 fscsi1 hdisk30 SP A0 active alive 0 0
[root@dev:/]#powermt display dev=hdiskpower6
Pseudo name=hdiskpower6
CLARiiON ID=FCN00121800144 [ERP_Dev]
Logical device ID=6006016061113100417A825600EDE111 [LUN 104]
state=alive; policy=CLAROpt; priority=0; queued-IOs=0
Owner: default=SP B, current=SP B Array failover mode: 4
==============================================================================
---------------- Host --------------- - Stor - -- I/O Path - -- Stats ---
### HW Path I/O Paths Interf. Mode State Q-IOs Errors
==============================================================================
0 fscsi0 hdisk15 SP B0 active alive 0 0
0 fscsi0 hdisk24 SP A0 active alive 0 0
1 fscsi1 hdisk28 SP B0 active alive 0 0
1 fscsi1 hdisk32 SP A0 active alive 0 0
[root@dev:/]#
刚才又做了一次dd写
IOSTAT输出:
tty: tin tout avg-cpu: % user % sys % idle % iowait
0.0 2607.0 2.7 22.9 36.2 38.2
Disks: % tm_act Kbps tps Kb_read Kb_wrtn
hdisk1 0.0 0.0 0.0 0 0
hdisk5 0.0 0.0 0.0 0 0
hdisk10 0.0 0.0 0.0 0 0
hdisk14 0.0 0.0 0.0 0 0
hdisk2 0.0 0.0 0.0 0 0
hdisk4 0.0 0.0 0.0 0 0
hdisk6 0.0 0.0 0.0 0 0
hdisk9 0.0 0.0 0.0 0 0
hdisk3 0.0 0.0 0.0 0 0
hdisk7 0.0 0.0 0.0 0 0
hdisk12 0.0 0.0 0.0 0 0
hdisk16 0.0 0.0 1.0 0 0
hdisk17 0.0 0.0 0.0 0 0
hdisk21 0.0 0.0 0.0 0 0
hdisk22 0.0 0.0 0.0 0 0
hdisk23 0.0 0.0 0.0 0 0
hdisk8 0.0 0.0 0.0 0 0
hdisk0 0.0 0.0 0.0 0 0
cd0 0.0 0.0 0.0 0 0
hdiskpower1 0.0 0.0 0.0 0 0
hdiskpower2 0.0 0.0 0.0 0 0
hdiskpower3 0.0 0.0 1.0 0 0
hdiskpower0 0.0 0.0 0.0 0 0
hdisk11 0.0 0.0 0.0 0 0
hdisk13 0.0 0.0 0.0 0 0
hdisk15 1.0 28.0 2.0 0 28
hdisk19 100.0 75532.0 86.0 4 75528
hdisk20 0.0 8.0 2.0 0 8
hdisk24 0.0 0.0 0.0 0 0
hdisk26 0.0 0.0 0.0 0 0
hdisk28 7.0 36.0 2.0 16 20
hdisk31 0.0 0.0 0.0 0 0
hdisk30 100.0 75140.0 82.0 16 75124
hdisk32 0.0 0.0 0.0 0 0
hdisk27 0.0 0.0 0.0 0 0
hdiskpower4 100.0 150544.0 103.0 20 150524
hdiskpower5 0.0 8.0 2.0 0 8
hdiskpower6 7.0 64.0 4.0 16 48
sar -d输出:
12:28:10 hdisk1 0 0.0 0 0 0.0 0.0
hdisk5 0 0.0 0 0 0.0 0.0
hdisk10 0 0.0 0 0 0.0 0.0
hdisk14 0 0.0 0 0 0.0 0.3
hdisk2 0 0.0 0 0 0.0 0.0
hdisk4 0 0.0 0 0 0.0 0.0
hdisk6 0 0.0 0 0 0.0 0.0
hdisk9 0 0.0 0 0 0.0 0.0
hdisk3 0 0.0 0 0 0.0 0.0
hdisk7 0 0.0 0 0 0.0 0.0
hdisk12 0 0.0 0 0 0.0 0.0
hdisk16 0 0.0 0 0 0.0 0.0
hdisk17 0 0.0 0 0 0.0 0.0
hdisk21 0 0.0 0 0 0.0 0.0
hdisk22 0 0.0 0 0 0.0 0.0
hdisk23 0 0.0 0 0 0.0 0.0
hdisk8 0 0.0 0 0 0.0 0.0
hdisk0 0 0.0 0 0 0.0 0.0
cd0 0 0.0 0 0 0.0 0.0
hdiskpower1 0 0.0 0 0 0.0 0.0
hdiskpower2 0 0.0 0 0 0.0 0.0
hdiskpower3 0 0.0 0 0 0.0 0.0
hdiskpower0 0 0.0 0 0 0.0 0.0
hdisk11 0 0.0 0 0 0.0 0.0
hdisk13 0 0.0 0 0 0.0 0.0
hdisk15 99 0.0 291 77133 1.9 108.7
hdisk19 27 0.0 235 67406 0.9 28.1
hdisk20 0 0.0 17 147 0.0 0.5
hdisk24 0 0.0 0 0 0.0 0.0
hdisk26 0 0.0 0 0 0.0 0.0
hdisk28 99 0.0 283 75663 2.1 108.1
hdisk31 4 0.0 18 154 0.0 2.7
hdisk30 22 0.0 250 66087 0.8 22.7
hdisk32 0 0.0 0 0 0.0 0.0
hdisk27 0 0.0 0 0 0.0 0.0
hdiskpower4 27 0.0 471 133366 0.0 0.0
hdiskpower5 4 0.0 36 301 0.0 0.0
hdiskpower6 99 0.0 593 152924 0.0 0.0
^C[root@dev:/]#
topas输出:
Topas Monitor for host: dev EVENTS/QUEUES FILE/TTY
Tue Nov 6 12:27:33 2012 Interval: 2 Cswitch 91870 Readch 147.2M
Syscall 112.4K Writech 147.2M
CPU User% Kern% Wait% Idle% Reads 56665 Rawin 0
ALL 2.1 31.5 50.5 16.0 Writes 56682 Ttyout 3217
Forks 0 Igets 0
Network KBPS I-Pack O-Pack KB-In KB-Out Execs 0 Namei 53
Total 10.4 35.1 34.1 3.9 6.5 Runqueue 1.0 Dirblk 0
Waitqueue 19.6
Disk Busy% KBPS TPS KB-Read KB-Writ MEMORY
Total 100.0 194.8K 1090.0 29.1 194.8K PAGING Real,MB 47647
Faults 26 % Comp 77
FileSystem KBPS TPS KB-Read KB-Writ Steals 37589 % Noncomp 22
Total 147.1K 18.4K 145.2 147.0K PgspIn 0 % Client 22
PgspOut 0
Name PID CPU% PgSp Owner PageIn 3 PAGING SPACE
dd 30933554 15.9 0.1 root PageOut 24925 Size,MB 16384
lrud 262160 9.5 0.7 root Sios 23140 % Used 1
dd 7143778 3.9 0.1 root % Free 99
pp2def 1376892 0.3 0.4 root NFS (calls/sec)
java 25428038 0.2 71.6 appluat SerV2 0 WPAR Activ 0
topas 8519688 0.1 15.3 oradg CliV2 0 WPAR Total 0
gil 459320 0.0 0.9 root SerV3 0 Press: "h"-help
java 16450098 0.0 178.7 appltest CliV3 0 "q"-quit
swapper 524 0.0 0.4 root
pilegc 655388 0.0 0.6 root
java 2687394 0.0 32.0 root
oracle 30409470 0.0 8.4 oradg
oracle 20578866 0.0 10.1 oradg
oracle 26280220 0.0 8.5 oratest
sar 15335562 0.0 3.3 root
oracle 10879654 0.0 8.5 oradev
swapper 264 0.0 0.4 root
oracle 6161146 0.0 11.0 oratest
j2pg 524652 0.0 5.1 root
oracle 4981106 0.0 12.1 oradev
收起