(以前是写给自己看)
2 virtualboxes (以前练oracle用的,装的oracle linux 6)
s1: 192.168.1.31
s2: 192.168.1.32
-- make sure pre-check pass
[root@s1 stage]# ./db2prereqcheck
-- install on both s1 and s2
[root@s1 stage]# ./db2_install
-- create cluster domain
[root@s1 ~]# db2cluster -cfs -create -domain expdomain -host s1
The shared file system cluster has been successfully created.
[root@s1 ~]# db2cluster -cfs -add -host s2
Host 's2' has been successfully added to the shared file system cluster.
-- add license
[root@s1 ~]# mmchlicense server --accept -N s1,s2
The following nodes will be designated as possessing GPFS server licenses:
s1
s2
mmchlicense: Command successfully completed
mmchlicense: Propagating the cluster configuration data to all
affected nodes. This is an asynchronous process.
[root@s1 ~]# db2cluster -cfs -add -license
The license for the shared file system cluster has been successfully added.
-- create nsd
[root@s1 ~]# mmcrnsd -F ~/nsd.txt
mmcrnsd: Processing disk sdb
mmcrnsd: Propagating the cluster configuration data to all
[root@s1 ~]# mmlsnsd
File system Disk name NSD servers
---------------------------------------------------------------------------
(free disk) expnsd (directly attached)
-- create FS
[root@s1 ~]# mmstartup -a
Wed Dec 17 20:46:18 GMT 2014: mmstartup: Starting GPFS ...
s1: The GPFS subsystem is already active.
[root@s1 ~]# mmgetstate -a
Node number Node name GPFS state
------------------------------------------
1 s1 active
2 s2 active
[root@s1 ~]# mmcrfs -T /expnsd expnsd expnsd
The following disks of expnsd will be formatted on node s1:
expnsd: size 6291456 KB
Formatting file system ...
Disks up to size 103 GB can be added to storage pool system.
Creating Inode File
Creating Allocation Maps
Creating Log Files
Clearing Inode Allocation Map
Clearing Block Allocation Map
Formatting Allocation Map for storage pool system
Completed creation of file system /dev/expnsd.
mmcrfs: Propagating the cluster configuration data to all
[root@s1 ~]# mmmount all -a
Wed Dec 17 20:55:39 GMT 2014: mmmount: Mounting file systems ...
[root@s1 ~]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg_ai0-lv_root
44969032 25317712 17366984 60% /
tmpfs 380372 88 380284 1% /dev/shm
/dev/sda1 495844 100408 369836 22% /boot
/dev/expnsd 6291456 172800 6118656 3% /expnsd
[root@s2 ~]# df
44969032 25358984 17325712 60% /
/dev/expnsd 6291456 1221376 5070080 20% /expnsd
## another way to do
[root@s1 ~]# fdisk -l | grep dev
Disk /dev/sda: 51.5 GB, 51539607552 bytes
/dev/sda1 * 1 64 512000 83 Linux
/dev/sda2 64 6267 49818624 8e Linux LVM
Disk /dev/sdb: 11.8 GB, 11811160064 bytes
Disk /dev/mapper/vg_ai0-lv_root: 46.8 GB, 46783266816 bytes
Disk /dev/mapper/vg_ai0-lv_swap: 4227 MB, 4227858432 bytes
[root@s1 ~]# db2cluster_prepare -instance_shared_dev /dev/sdb
DBI1446I The db2cluster_prepare command is running.
DB2 installation is being initialized.
Total number of tasks to be performed: 1
Total estimated time for all tasks to be performed: 60 second(s)
Task #1 start
Description: Creating IBM General Parallel File System (GPFS) Cluster and Filesystem
Estimated time 60 second(s)
Task #1 end
The execution completed successfully.
For more information see the DB2 installation log at
"/tmp/db2cluster_prepare.log".
DBI1070I Program db2cluster_prepare completed successfully.
Fri Dec 19 20:35:53 GMT 2014: mmstartup: Starting GPFS ...
Fri Dec 19 20:36:26 GMT 2014: mmmount: Mounting file systems ...
d[root@s1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
43G 24G 18G 58% /
tmpfs 1004M 88K 1004M 1% /dev/shm
/dev/sda1 485M 99M 362M 22% /boot
/dev/db2fs1 11G 613M 11G 6% /db2sd_20141219203116
[root@s1 ~]# ssh s2 df -h
### Continued
[root@s0 server_t]# db2cluster -cfs -create -domain mygpfsdomain -host s0
[root@s0 server_t]# mmstartup
Sat Mar 21 12:40:27 GMT 2015: mmstartup: Starting GPFS ...
[root@s0 server_t]# db2cluster -cfs -create -filesystem gpfs_db2data -disk /dev/sdb -mount /gpfs_db2data
File system 'gpfs_db2data' has been successfully created.
[root@s0 server_t]# mmgetstate
1 s0 active
[root@s0 server_t]# df -h
43G 22G 19G 54% /
/dev/gpfs_db2data 12G 612M 12G 5% /gpfs_db2data
Part II -- set up second PS (s2 and s3)
[root@s2 ~]# db2cluster -cfs -create -domain stbydomain -host s2
[root@s2 ~]# db2cluster -cfs -add -host S3
Host 'S3' has been successfully added to the shared file system cluster.
[root@s2 ~]# mmchlicense server --accept -N s2,S3
s3
[root@s2 ~]# db2cluster -cfs -start -all
All specified hosts have been started successfully.
[root@s2 ~]# mmgetstate -a
1 s2 active
2 s3 active
[root@s2 ~]# db2cluster -cfs -create -filesystem gpfs_db2data -disk /dev/sdb -mount /gpfs_db2data
[root@s2 ~]# df -h
43G 8.8G 32G 22% /
/dev/gpfs_db2data 11G 613M 11G 6% /gpfs_db2data
[root@s2 ~]# ssh s3 df -h
43G 12G 30G 29% /
[root@s2 ~]# db2icrt -cf s2 -cfnet s2 -m s3 -mnet s3 -instance_shared_dir /gpfs_db2data -tbdev 192.168.1.1 -u i1 i1
DBI1446I The db2icrt command is running.
Total number of tasks to be performed: 11
Total estimated time for all tasks to be performed: 1104 second(s)
Description: Installing DB2 files on remote hosts
Estimated time 600 second(s)
Task #2 start
Description: Installing or updating DB2 HA scripts for IBM Tivoli System Automation for Multiplatforms (Tivoli SA MP)
Estimated time 40 second(s)
Task #2 end
Task #3 start
Description: Installing or updating DB2 Cluster Scripts for IBM General Parallel File System (GPFS)
Task #3 end
Task #4 start
Description: Registering licenses on remote hosts
Task #4 end
Task #5 start
Description: Compiling GPL
Estimated time 30 second(s)
Task #5 end
Task #6 start
Description: Setting default global profile registry variables
Estimated time 1 second(s)
Task #6 end
Task #7 start
Description: Register NTP
Task #7 end
Task #8 start
Description: Initializing instance list
Estimated time 5 second(s)
Task #8 end
Task #9 start
Description: Initiating the remote host list
Task #9 end
Task #10 start
Description: Configuring DB2 instances
Estimated time 300 second(s)
Task #10 end
Task #11 start
Description: Updating global profile registry
Estimated time 3 second(s)
Task #11 end
For more information see the DB2 installation log at "/tmp/db2icrt.log.10888".
DBI1070I Program db2icrt completed successfully.
[i1@s3 ~]$ db2start
03/28/2015 01:19:13 0 0 SQL1063N DB2START processing was successful.
SQL1063N DB2START processing was successful.
[i1@s3 ~]$ db2 connect to sample
Database Connection Information
Database server = DB2/LINUXX8664 10.5.5
SQL authorization ID = I1
Local database alias = SAMPLE
[i1@s3 ~]$ db2 list tables | head
Table/View Schema Type Creation time
------------------------------- --------------- ----- --------------------------
ACT I1 T 2015-03-28-01.41.05.289833
ADEFUSR I1 S 2015-03-28-01.41.11.032454
CL_SCHED I1 T 2015-03-28-01.41.00.837140
DEPARTMENT I1 T 2015-03-28-01.41.01.787346
DEPT I1 A 2015-03-28-01.41.02.644292
EMP I1 A 2015-03-28-01.41.03.095928
EMPACT I1 A 2015-03-28-01.41.05.282848
[i1@s3 ~]$ db2instance -list
ID TYPE STATE HOME_HOST CURRENT_HOST ALERT PARTITION_NUMBER LOGICAL_PORT NETNAME
-- ---- ----- --------- ------------ ----- ---------------- ------------ -------
0 MEMBER STARTED s3 s3 NO 0 0 s3
128 CF PRIMARY s2 s2 NO - 0 s2
HOSTNAME STATE INSTANCE_STOPPED ALERT
-------- ----- ---------------- -----
s2 ACTIVE NO NO
s3 ACTIVE NO NO
[i1@s3 ~]$
Part III HADR
Reference:
==========
http://www-01.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/com.ibm.db2.luw.admin.ha.doc/doc/c0061088.html
Given Env:
Database name: sample
Instance owner on all hosts: i1
TCP port that is used for HADR primary-standby communication: 54321
TCP port that is used for SQL client/server communication: 50000
Hosts for cluster caching facilities and members on the primary: s0, and s1
Hosts for cluster caching facilities and members on the standby: s2, and s3
Configure HADR:
==============
The DBA takes a backup of the intended primary database sample:
db2 BACKUP DB sample TO /tmp
The DBA restores the backup onto the intended standby cluster:
db2 RESTORE DB sample FROM /tmp
On the primary, the DBA sets the cluster-level HADR parameters
db2 "UPDATE DB CFG FOR sample USING
HADR_TARGET_LIST {s3:54321}
HADR_REMOTE_HOST {s3:54321}
HADR_REMOTE_INST i1
HADR_SYNCMODE async"
For member 0:
db2 "UPDATE DB CFG FOR sample MEMBER 0 USING
HADR_LOCAL_HOST s1
HADR_LOCAL_SVC 54321"
[root@s1 ~]# su - i1
[i1@s1 ~]$ db2 update db cfg for sample using logindexbuild on
DB20000I The UPDATE DATABASE CONFIGURATION command completed successfully.
[i1@s1 ~]$ db2 update db cfg for sample using indexrec restart
[i1@s1 ~]$ db2 backup db sample to /tmp
Backup successful. The timestamp for this backup image is : 20150328033939
[i1@s1 ~]$
[i1@s1 ~]$ db2 "UPDATE DB CFG FOR sample USING
> HADR_TARGET_LIST {s3:54321}
> HADR_REMOTE_HOST {s3:54321}
> HADR_REMOTE_INST i1
> HADR_SYNCMODE async"
[i1@s1 ~]$ db2 "UPDATE DB CFG FOR sample MEMBER 0 USING
> HADR_LOCAL_HOST s1
> HADR_LOCAL_SVC 54321"
[i1@s1 ~]$ ls /tmp | grep SAM
SAMPLE.0.i1.DBPART000.20150328033939.001
[i1@s1 ~]$ scp /tmp/SAMPLE.0.i1.DBPART000.20150328033939.001 s3:/tmp
On the standby, the DBA sets the cluster-level HADR parameters that
HADR_TARGET_LIST {s1:54321}
HADR_REMOTE_HOST {s1:54321}
db2 "UPDATE DB CFG FOR sample MEMBER 0 USING HADR_LOCAL_HOST s3 HADR_LOCAL_SVC 54321"
[i1@s3 ~]$ db2 RESTORE DB sample FROM /tmp
DB20000I The RESTORE DATABASE command completed successfully.
[root@s3 ~]# su - i1
[i1@s3 ~]$ db2 "UPDATE DB CFG FOR sample USING
> HADR_TARGET_LIST {s1:54321}
> HADR_REMOTE_HOST {s1:54321}
[i1@s3 ~]$ db2 "UPDATE DB CFG FOR sample MEMBER 0 USING HADR_LOCAL_HOST s3 HADR_LOCAL_SVC 54321"
Starting HADR
As with other HADR environments, the standby database must be started first.
Because the member that the START HADR command is issued from is designated
the preferred replay member, the DBA issues the following commands:
From member 0 on the standby:
db2 START HADR ON DB sample AS STANDBY
From member 0 on the primary:
db2 START HADR ON DB sample AS PRIMARY
[i1@s3 ~]$ db2 START HADR ON DB sample AS STANDBY
DB20000I The START HADR ON DATABASE command completed successfully.
[i1@s1 ~]$ db2 START HADR ON DB sample AS primary
[i1@s1 ~]$ db2 connect to sample
[i1@s1 ~]$ db2 "select LOG_STREAM_ID, PRIMARY_MEMBER, STANDBY_MEMBER,
HADR_STATE from table (mon_get_hadr(-2))"
LOG_STREAM_ID PRIMARY_MEMBER STANDBY_MEMBER HADR_STATE
------------- -------------- -------------- -----------------------
0 0 0 PEER
1 record(s) selected.
Role swicth
===========
At site B, the DBA issues the TAKEOVER HADR command on member 0:
TAKEOVER HADR ON DB hadr_db
============================
[i1@s3 ~]$ db2 takeover hadr on db sample
DB20000I The TAKEOVER HADR ON DATABASE command completed successfully.
[i1@s3 ~]$ db2 "select LOG_STREAM_ID, PRIMARY_MEMBER, STANDBY_MEMBER,
Failover
========
[i1@s1 ~]$ db2pd -d sample -hadr | head -17
Database Member 0 -- Database SAMPLE -- Standby -- Up 0 days 00:06:18 -- Date
2015-03-28-12.51.19.819014
HADR_ROLE = STANDBY
REPLAY_TYPE = PHYSICAL
HADR_SYNCMODE = ASYNC
STANDBY_ID = 0
LOG_STREAM_ID = 0
HADR_STATE = REMOTE_CATCHUP_PENDING
HADR_FLAGS =
PRIMARY_MEMBER_HOST = s3
PRIMARY_INSTANCE = i1
PRIMARY_MEMBER = 0
STANDBY_MEMBER_HOST = s1
STANDBY_INSTANCE = i1
STANDBY_MEMBER = 0
HADR_CONNECT_STATUS = DISCONNECTED
[i1@s1 ~]$ db2 TAKEOVER HADR ON DB sample BY FORCE
Database Member 0 -- Database SAMPLE -- Active -- Up 0 days 00:06:33 -- Date
2015-03-28-12.51.34.921518
HADR_ROLE = PRIMARY
STANDBY_ID = 1
HADR_STATE = DISCONNECTED
PRIMARY_MEMBER_HOST = s1
STANDBY_MEMBER_HOST = s3
如果觉得我的文章对您有用,请点赞。您的支持将鼓励我继续创作!
关于TWT使用指南社区专家合作厂商入驻社区企业招聘投诉建议版权与免责声明联系我们 © 2024talkwithtrend — talk with trend,talk with technologist京ICP备09031017号-30
添加新评论1 条评论
2015-07-17 10:18