1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
Add ASM Disks to existing Diskgroup with out any hassle :::: ####### Step 1 : Check the value asm_diskstring from GRID(ASM) sqlplus / as sysasm SQL> show parameter string NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ asm_diskstring string ####### Step 2 : Check existing disks oracleasm listdisks | grep PROD_GIS_FRA | sort PROD_GIS_FRA_1 PROD_GIS_FRA_2 PROD_GIS_FRA_3 PROD_GIS_FRA_4 ####### Step 3 : Below information will be provided by SYS Admins: emcpoweraj1 emcpowerak1 emcpoweral1 emcpoweram1 emcpoweran1 emcpowerao1 ####### Step 4: Validate whether below disks exists or not? ls -ltr /dev/emcpoweraj1 ls -ltr /dev/emcpowerak1 ls -ltr /dev/emcpoweral1 ls -ltr /dev/emcpoweram1 ls -ltr /dev/emcpoweran1 ls -ltr /dev/emcpowerao1 ####### Step 5 : create label for above new disks: oracleasm createdisk PROD_GIS_FRA_5 /dev/emcpoweraj1 oracleasm createdisk PROD_GIS_FRA_6 /dev/emcpowerak1 oracleasm createdisk PROD_GIS_FRA_7 /dev/emcpoweral1 oracleasm createdisk PROD_GIS_FRA_8 /dev/emcpoweram1 oracleasm createdisk PROD_GIS_FRA_9 /dev/emcpoweran1 oracleasm createdisk PROD_GIS_FRA_10 /dev/emcpowerao1 ####### Step 6 : From other node check these new disks information using oracleasm [root@server2 ~]# oracleasm listdisks | grep PROD_ATG_FRA | sort PROD_GIS_FRA_1 PROD_GIS_FRA_2 PROD_GIS_FRA_3 PROD_GIS_FRA_4 [root@server2 ~]# oracleasm scandisks Reloading disk partitions: done Cleaning any stale ASM disks... Scanning system for ASM disks... Instantiating disk "PROD_GIS_FRA_5" Instantiating disk "PROD_GIS_FRA_6" Instantiating disk "PROD_GIS_FRA_7" Instantiating disk "PROD_GIS_FRA_10" Instantiating disk "PROD_GIS_FRA_8" Instantiating disk "PROD_GIS_FRA_9" [root@server2 ~]# oracleasm listdisks | grep PROD_GIS_FRA | sort PROD_GIS_FRA_1 PROD_GIS_FRA_10 PROD_GIS_FRA_2 PROD_GIS_FRA_3 PROD_GIS_FRA_4 PROD_GIS_FRA_5 PROD_GIS_FRA_6 PROD_GIS_FRA_7 PROD_GIS_FRA_8 PROD_GIS_FRA_9 ####### Step 7 : ========================================================================== Test Check and Verify the Validity of the newly allocated disks so that it does not cause any issues while being added to the existing PRODUCTION DiskGroup: ========================================================================== Create a new temporary diskgroup (Node01): ============================================ CREATE DISKGROUP TEST_F EXTERNAL REDUNDANCY DISK 'ORCL:PROD_GIS_FRA_5' , 'ORCL:PROD_GIS_FRA_6' , 'ORCL:PROD_GIS_FRA_7' , 'ORCL:PROD_GIS_FRA_8','ORCL:PROD_GIS_FRA_9','ORCL:PROD_GIS_FRA_10' ; ####### Step 8 : Check if the diskgroup is created and mounted on Node01: ========================================================= SELECT STATE, NAME FROM V$ASM_DISKGROUP; ####### Step 9 : Then try mounting it on the rest of the nodes too: ---------------------------------------------------- SQL> ALTER DISKGROUP TEST_F MOUNT; SQL> SELECT STATE, NAME FROM V$ASM_DISKGROUP; SQL> alter diskgroup test_f dismount; --(from node02 and node03 only i.e. from all the instances except from one). SQL> DROP DISKGROUP TEST_f; --(from the ASM instance, which the diskgroup is still mounted i.e. node01). ####### Step 10 : ========================================================================= Then the candidate disk is ready to be added to the desired diskgroup: ========================================================================= ALTER DISKGROUP PROD_GIS_FRA ADD DISK 'ORCL:PROD_GIS_FRA_5' , 'ORCL:PROD_GIS_FRA_6' , 'ORCL:PROD_GIS_FRA_7' , 'ORCL:PROD_GIS_FRA_8','ORCL:PROD_GIS_FRA_9','ORCL:PROD_GIS_FRA_10' rebalance power 11; SQL> select group_number from v$asm_diskgroup where name='PROD_GIS_FRA'; GROUP_NUMBER ------------ 4 ####### Step 11 : Monitor the Rebalance operation as follows: ============================================= SELECT * FROM GV$ASM_OPERATION order by 1; ##### If you have value for asm_diskstring then follow below steps : ##### SQL> show parameter string NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ asm_diskstring string /dev/oracleasm/disks/* CREATE DISKGROUP TEST_D EXTERNAL REDUNDANCY DISK '/dev/oracleasm/disks/GIS_QA_DATA_11' , '/dev/oracleasm/disks/GIS_QA_DATA_12' , '/dev/oracleasm/disks/GIS_QA_DATA_13' , '/dev/oracleasm/disks/GIS_QA_DATA_14','/dev/oracleasm/disks/GIS_QA_DATA_15'; ALTER DISKGROUP DATA01 ADD DISK '/dev/oracleasm/disks/GIS_QA_DATA_11' name GIS_QA_DATA_11 rebalance power 11; ####### Step 12 : Validate disks information: set pagesize 100 set linesize 300 col DISK_GROUP_NAME for a20 col DISK_FILE_PATH for a50 col DISK_FILE_FAIL_GROUP for a40 col DISK_FILE_NAME for a25 SELECT NVL(a.name, '[CANDIDATE]') disk_group_name , b.path disk_file_path , b.name disk_file_name , b.failgroup disk_file_fail_group FROM v$asm_diskgroup a RIGHT OUTER JOIN v$asm_disk b USING (group_number) ORDER BY a.name; SET TERMOUT OFF; COLUMN current_instance NEW_VALUE current_instance NOPRINT; SELECT rpad(sys_context('USERENV', 'INSTANCE_NAME'), 17) current_instance FROM dual; SET TERMOUT ON; PROMPT PROMPT +------------------------------------------------------------------------+ PROMPT | Report : ASM Disk Groups | PROMPT | Instance : ¤t_instance | PROMPT +------------------------------------------------------------------------+ SET ECHO OFF SET FEEDBACK 6 SET HEADING ON SET LINESIZE 180 SET PAGESIZE 50000 SET TERMOUT ON SET TIMING OFF SET TRIMOUT ON SET TRIMSPOOL ON SET VERIFY OFF CLEAR COLUMNS CLEAR BREAKS CLEAR COMPUTES COLUMN group_name FORMAT a25 HEAD 'Disk Group|Name' COLUMN sector_size FORMAT 99,999 HEAD 'Sector|Size' COLUMN block_size FORMAT 99,999 HEAD 'Block|Size' COLUMN allocation_unit_size FORMAT 999,999,999 HEAD 'Allocation|Unit Size' COLUMN state FORMAT a11 HEAD 'State' COLUMN type FORMAT a6 HEAD 'Type' COLUMN total_mb FORMAT 999,999,999 HEAD 'Total Size (MB)' COLUMN used_mb FORMAT 999,999,999 HEAD 'Used Size (MB)' COLUMN pct_used FORMAT 999.99 HEAD 'Pct. Used' BREAK ON report ON disk_group_name SKIP 1 COMPUTE sum LABEL "Grand Total: " OF total_mb used_mb ON report SELECT name group_name , sector_size sector_size , block_size block_size , allocation_unit_size allocation_unit_size , state state , type type , total_mb total_mb , (total_mb - free_mb) used_mb , ROUND((1- (free_mb / total_mb))*100, 2) pct_used FROM v$asm_diskgroup WHERE total_mb != 0 ORDER BY name / Misc commands which will be useful : (Be careful while running below commands) #fdisk -l > /tmp/diskinfo $ sudo su - root [root@awsserver ~]# oracleasm querydisk -d PROD_DATA_29 Disk "PROD_DATA_29" is a valid ASM disk on device [120,865] [root@awsserver ~]# ls -ltr /dev | grep 120 | grep 865 brw-rw----. 1 root disk 120, 865 Jul 14 02:17 emcpowerbc1 [root@awsserver ~]# [root@awsserver mapper]# oracleasm createdisk POC_DATA_06 /dev/mapper/3624a937031bda5346db148f900011de9 Writing disk header: done Instantiating disk: done [root@awsserver mapper]# oracleasm createdisk POC_DATA_06 /dev/mapper/3624a937031bda5346db148f900011dea Disk "POC_DATA_06" already exists [root@awsserver mapper]# oracleasm createdisk POC_DATA_07 /dev/mapper/3624a937031bda5346db148f900011de9 Device "/dev/mapper/3624a937031bda5346db148f900011de9" is already labeled for ASM disk "POC_DATA_06" [grid@awsserver ~]$ asmcmd lsdsk --candidate Path ORCL:GIS_DATA_23 ORCL:GIS_DATA_24 ORCL:GIS_DATA_25 ORCL:GIS_DATA_26 ORCL:GIS_DATA_27 ORCL:GIS_DATA_28 ORCL:GIS_DATA_29 ORCL:GIS_DATA_30 ORCL:GIS_FRA_11 ORCL:GIS_FRA_12 ORCL:GIS_FRA_13 ORCL:GIS_FRA_14 [root@server11 ~]# oracleasm querydisk /dev/mapper/9dd3856b480990001105a Device "/dev/mapper/9dd3856b480990001105a" is marked an ASM disk with the label "PROD_DATA01_61" [root@server11 ~]# oracleasm querydisk /dev/mapper/9dd3856b480990001106f Device "/dev/mapper/9dd3856b480990001106f" is not marked as an ASM disk |
Note: Please test scripts in Non Prod before trying in Production.
Swetha
It seems to be very useful information
Manikanta
It was very usefull information
Sai Krishna
Good Data
Pavan
It is very useful information
sai
Useful Information
Lei Lu
very good document