1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_sim.h> 48 #include <cam/cam_xpt_sim.h> 49 #include <cam/cam_debug.h> 50 #include <cam/cam_periph.h> 51 #include <cam/cam_xpt_periph.h> 52 53 54 /* 55 * Function prototypes 56 */ 57 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 58 u_int8_t 59 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 60 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 61 u_int8_t 62 MR_BuildRaidContext(struct mrsas_softc *sc, 63 struct IO_REQUEST_INFO *io_info, 64 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 65 u_int8_t 66 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 67 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 68 RAID_CONTEXT * pRAID_Context, 69 MR_DRV_RAID_MAP_ALL * map); 70 u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map); 71 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 72 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 73 u_int16_t 74 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 75 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 76 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 77 u_int32_t 78 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 79 MR_DRV_RAID_MAP_ALL * map, int *div_error); 80 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 81 void 82 mrsas_update_load_balance_params(struct mrsas_softc *sc, 83 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 84 void 85 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 86 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 87 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 88 u_int32_t ld_block_size); 89 static u_int16_t 90 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 91 MR_DRV_RAID_MAP_ALL * map); 92 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 93 static u_int16_t 94 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 95 MR_DRV_RAID_MAP_ALL * map); 96 static MR_LD_SPAN * 97 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 98 MR_DRV_RAID_MAP_ALL * map); 99 static u_int8_t 100 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 101 MR_DRV_RAID_MAP_ALL * map); 102 static MR_SPAN_BLOCK_INFO * 103 MR_LdSpanInfoGet(u_int32_t ld, 104 MR_DRV_RAID_MAP_ALL * map); 105 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 106 static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 107 108 109 /* 110 * Spanset related function prototypes Added for PRL11 configuration (Uneven 111 * span support) 112 */ 113 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 114 static u_int8_t 115 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 116 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 117 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 118 static u_int64_t 119 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 120 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 121 static u_int32_t 122 mr_spanset_get_span_block(struct mrsas_softc *sc, 123 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 124 MR_DRV_RAID_MAP_ALL * map, int *div_error); 125 static u_int8_t 126 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 127 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 128 129 130 /* 131 * Spanset related defines Added for PRL11 configuration(Uneven span support) 132 */ 133 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 134 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 135 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 136 #define SPAN_INVALID 0xff 137 #define SPAN_DEBUG 0 138 139 /* 140 * Related Defines 141 */ 142 143 typedef u_int64_t REGION_KEY; 144 typedef u_int32_t REGION_LEN; 145 146 #define MR_LD_STATE_OPTIMAL 3 147 #define FALSE 0 148 #define TRUE 1 149 150 #define LB_PENDING_CMDS_DEFAULT 4 151 152 153 /* 154 * Related Macros 155 */ 156 157 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 158 159 #define swap32(x) \ 160 ((unsigned int)( \ 161 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 162 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 163 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 164 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 165 166 167 /* 168 * In-line functions for mod and divide of 64-bit dividend and 32-bit 169 * divisor. Assumes a check for a divisor of zero is not possible. 170 * 171 * @param dividend: Dividend 172 * @param divisor: Divisor 173 * @return remainder 174 */ 175 176 #define mega_mod64(dividend, divisor) ({ \ 177 int remainder; \ 178 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 179 remainder;}) 180 181 #define mega_div64_32(dividend, divisor) ({ \ 182 int quotient; \ 183 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 184 quotient;}) 185 186 187 /* 188 * Various RAID map access functions. These functions access the various 189 * parts of the RAID map and returns the appropriate parameters. 190 */ 191 192 MR_LD_RAID * 193 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 194 { 195 return (&map->raidMap.ldSpanMap[ld].ldRaid); 196 } 197 198 u_int16_t 199 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 200 { 201 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 202 } 203 204 static u_int16_t 205 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 206 { 207 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 208 } 209 210 static u_int8_t 211 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 212 { 213 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 214 } 215 216 static u_int16_t 217 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 218 { 219 return map->raidMap.devHndlInfo[pd].curDevHdl; 220 } 221 222 static u_int8_t MR_PdInterfaceTypeGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map) 223 { 224 return map->raidMap.devHndlInfo[pd].interfaceType; 225 } 226 227 228 static u_int16_t 229 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 230 { 231 return map->raidMap.arMapInfo[ar].pd[arm]; 232 } 233 234 static MR_LD_SPAN * 235 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 236 { 237 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 238 } 239 240 static MR_SPAN_BLOCK_INFO * 241 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 242 { 243 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 244 } 245 246 u_int8_t 247 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 248 { 249 return map->raidMap.ldTgtIdToLd[ldTgtId]; 250 } 251 252 u_int32_t 253 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 254 { 255 MR_LD_RAID *raid; 256 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 257 258 ld = MR_TargetIdToLdGet(ldTgtId, map); 259 260 /* 261 * Check if logical drive was removed. 262 */ 263 if (ld >= MAX_LOGICAL_DRIVES) 264 return ldBlockSize; 265 266 raid = MR_LdRaidGet(ld, map); 267 ldBlockSize = raid->logicalBlockLength; 268 if (!ldBlockSize) 269 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 270 271 return ldBlockSize; 272 } 273 274 /* 275 * This function will Populate Driver Map using Dynamic firmware raid map 276 */ 277 static int 278 MR_PopulateDrvRaidMapVentura(struct mrsas_softc *sc) 279 { 280 unsigned int i, j; 281 u_int16_t ld_count; 282 283 MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 284 MR_RAID_MAP_DESC_TABLE *desc_table; 285 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 286 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 287 void *raid_map_data = NULL; 288 289 fw_map_dyn = (MR_FW_RAID_MAP_DYNAMIC *) sc->raidmap_mem[(sc->map_id & 1)]; 290 291 if (fw_map_dyn == NULL) { 292 device_printf(sc->mrsas_dev, 293 "from %s %d map0 %p map1 %p map size %d \n", __func__, __LINE__, 294 sc->raidmap_mem[0], sc->raidmap_mem[1], sc->maxRaidMapSize); 295 return 1; 296 } 297 #if VD_EXT_DEBUG 298 device_printf(sc->mrsas_dev, 299 " raidMapSize 0x%x, descTableOffset 0x%x, " 300 " descTableSize 0x%x, descTableNumElements 0x%x \n", 301 fw_map_dyn->raidMapSize, fw_map_dyn->descTableOffset, 302 fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); 303 #endif 304 desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + 305 fw_map_dyn->descTableOffset); 306 if (desc_table != fw_map_dyn->raidMapDescTable) { 307 device_printf(sc->mrsas_dev, 308 "offsets of desc table are not matching returning " 309 " FW raid map has been changed: desc %p original %p\n", 310 desc_table, fw_map_dyn->raidMapDescTable); 311 } 312 memset(drv_map, 0, sc->drv_map_sz); 313 ld_count = fw_map_dyn->ldCount; 314 pDrvRaidMap->ldCount = ld_count; 315 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; 316 pDrvRaidMap->totalSize = sizeof(MR_DRV_RAID_MAP_ALL); 317 /* point to actual data starting point */ 318 raid_map_data = (char *)fw_map_dyn + 319 fw_map_dyn->descTableOffset + fw_map_dyn->descTableSize; 320 321 for (i = 0; i < fw_map_dyn->descTableNumElements; ++i) { 322 if (!desc_table) { 323 device_printf(sc->mrsas_dev, 324 "desc table is null, coming out %p \n", desc_table); 325 return 1; 326 } 327 #if VD_EXT_DEBUG 328 device_printf(sc->mrsas_dev, "raid_map_data %p \n", raid_map_data); 329 device_printf(sc->mrsas_dev, 330 "desc table %p \n", desc_table); 331 device_printf(sc->mrsas_dev, 332 "raidmap type %d, raidmapOffset 0x%x, " 333 " raid map number of elements 0%x, raidmapsize 0x%x\n", 334 desc_table->raidMapDescType, desc_table->raidMapDescOffset, 335 desc_table->raidMapDescElements, desc_table->raidMapDescBufferSize); 336 #endif 337 switch (desc_table->raidMapDescType) { 338 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 339 fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) 340 ((char *)raid_map_data + desc_table->raidMapDescOffset); 341 #if VD_EXT_DEBUG 342 device_printf(sc->mrsas_dev, 343 "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); 344 #endif 345 memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, 346 sizeof(MR_DEV_HANDLE_INFO) * desc_table->raidMapDescElements); 347 break; 348 case RAID_MAP_DESC_TYPE_TGTID_INFO: 349 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) 350 ((char *)raid_map_data + desc_table->raidMapDescOffset); 351 #if VD_EXT_DEBUG 352 device_printf(sc->mrsas_dev, 353 "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); 354 #endif 355 for (j = 0; j < desc_table->raidMapDescElements; j++) { 356 pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; 357 #if VD_EXT_DEBUG 358 device_printf(sc->mrsas_dev, 359 " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); 360 #endif 361 } 362 break; 363 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 364 fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + 365 desc_table->raidMapDescOffset); 366 #if VD_EXT_DEBUG 367 device_printf(sc->mrsas_dev, 368 "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); 369 #endif 370 memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, 371 sizeof(MR_ARRAY_INFO) * desc_table->raidMapDescElements); 372 break; 373 case RAID_MAP_DESC_TYPE_SPAN_INFO: 374 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + 375 desc_table->raidMapDescOffset); 376 memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, 377 sizeof(MR_LD_SPAN_MAP) * desc_table->raidMapDescElements); 378 #if VD_EXT_DEBUG 379 device_printf(sc->mrsas_dev, 380 "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); 381 device_printf(sc->mrsas_dev, 382 "MR_LD_SPAN_MAP size 0x%lx\n", sizeof(MR_LD_SPAN_MAP)); 383 for (j = 0; j < ld_count; j++) { 384 printf("mrsas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x " 385 "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 386 j, j, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.targetId, j, 387 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.seqNum, 388 (u_int32_t)fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.rowSize); 389 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 390 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 391 j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j, 392 pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum, 393 (u_int32_t)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); 394 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 395 drv_map, pDrvRaidMap, &fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid, 396 &pDrvRaidMap->ldSpanMap[j].ldRaid); 397 } 398 #endif 399 break; 400 default: 401 device_printf(sc->mrsas_dev, 402 "wrong number of desctableElements %d\n", 403 fw_map_dyn->descTableNumElements); 404 } 405 ++desc_table; 406 } 407 return 0; 408 } 409 410 /* 411 * This function will Populate Driver Map using firmware raid map 412 */ 413 static int 414 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 415 { 416 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 417 MR_FW_RAID_MAP_EXT *fw_map_ext; 418 MR_FW_RAID_MAP *pFwRaidMap = NULL; 419 unsigned int i; 420 u_int16_t ld_count; 421 422 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 423 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 424 425 if (sc->maxRaidMapSize) { 426 return MR_PopulateDrvRaidMapVentura(sc); 427 } else if (sc->max256vdSupport) { 428 fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; 429 ld_count = (u_int16_t)(fw_map_ext->ldCount); 430 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 431 device_printf(sc->mrsas_dev, 432 "mrsas: LD count exposed in RAID map in not valid\n"); 433 return 1; 434 } 435 #if VD_EXT_DEBUG 436 for (i = 0; i < ld_count; i++) { 437 printf("mrsas : Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", 438 i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, 439 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 440 fw_map_ext->ldSpanMap[i].ldRaid.size); 441 } 442 #endif 443 memset(drv_map, 0, sc->drv_map_sz); 444 pDrvRaidMap->ldCount = ld_count; 445 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 446 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { 447 pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; 448 } 449 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(MR_LD_SPAN_MAP) * ld_count); 450 #if VD_EXT_DEBUG 451 for (i = 0; i < ld_count; i++) { 452 printf("mrsas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x " 453 "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 454 i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i, 455 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 456 (u_int32_t)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); 457 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 458 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 459 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, i, 460 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 461 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 462 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 463 drv_map, pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, 464 &pDrvRaidMap->ldSpanMap[i].ldRaid); 465 } 466 #endif 467 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 468 sizeof(MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 469 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 470 sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); 471 472 pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); 473 } else { 474 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 475 pFwRaidMap = &fw_map_old->raidMap; 476 477 #if VD_EXT_DEBUG 478 for (i = 0; i < pFwRaidMap->ldCount; i++) { 479 device_printf(sc->mrsas_dev, 480 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 481 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 482 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 483 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 484 } 485 #endif 486 487 memset(drv_map, 0, sc->drv_map_sz); 488 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 489 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 490 pDrvRaidMap->fpPdIoTimeoutSec = 491 pFwRaidMap->fpPdIoTimeoutSec; 492 493 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 494 pDrvRaidMap->ldTgtIdToLd[i] = 495 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 496 } 497 498 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 499 pDrvRaidMap->ldSpanMap[i] = 500 pFwRaidMap->ldSpanMap[i]; 501 502 #if VD_EXT_DEBUG 503 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 504 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 505 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 506 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 507 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 508 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 509 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 510 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 511 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 512 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 513 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 514 drv_map, pDrvRaidMap, 515 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 516 #endif 517 } 518 519 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 520 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 521 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 522 sizeof(MR_DEV_HANDLE_INFO) * 523 MAX_RAIDMAP_PHYSICAL_DEVICES); 524 } 525 return 0; 526 } 527 528 /* 529 * MR_ValidateMapInfo: Validate RAID map 530 * input: Adapter instance soft state 531 * 532 * This function checks and validates the loaded RAID map. It returns 0 if 533 * successful, and 1 otherwise. 534 */ 535 u_int8_t 536 MR_ValidateMapInfo(struct mrsas_softc *sc) 537 { 538 if (!sc) { 539 return 1; 540 } 541 if (MR_PopulateDrvRaidMap(sc)) 542 return 0; 543 544 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 545 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 546 547 u_int32_t expected_map_size; 548 549 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 550 pDrvRaidMap = &drv_map->raidMap; 551 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 552 553 if (sc->maxRaidMapSize) 554 expected_map_size = sizeof(MR_DRV_RAID_MAP_ALL); 555 else if (sc->max256vdSupport) 556 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 557 else 558 expected_map_size = 559 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 560 (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); 561 562 if (pDrvRaidMap->totalSize != expected_map_size) { 563 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 564 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 565 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); 566 return 1; 567 } 568 if (sc->UnevenSpanSupport) { 569 mr_update_span_set(drv_map, ldSpanInfo); 570 } 571 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 572 573 return 0; 574 } 575 576 /* 577 * 578 * Function to print info about span set created in driver from FW raid map 579 * 580 * Inputs: map 581 * ldSpanInfo: ld map span info per HBA instance 582 * 583 * 584 */ 585 #if SPAN_DEBUG 586 static int 587 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 588 { 589 590 u_int8_t span; 591 u_int32_t element; 592 MR_LD_RAID *raid; 593 LD_SPAN_SET *span_set; 594 MR_QUAD_ELEMENT *quad; 595 int ldCount; 596 u_int16_t ld; 597 598 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 599 ld = MR_TargetIdToLdGet(ldCount, map); 600 if (ld >= MAX_LOGICAL_DRIVES) { 601 continue; 602 } 603 raid = MR_LdRaidGet(ld, map); 604 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 605 for (span = 0; span < raid->spanDepth; span++) 606 printf("Span=%x, number of quads=%x\n", span, 607 map->raidMap.ldSpanMap[ld].spanBlock[span]. 608 block_span_info.noElements); 609 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 610 span_set = &(ldSpanInfo[ld].span_set[element]); 611 if (span_set->span_row_data_width == 0) 612 break; 613 614 printf("Span Set %x: width=%x, diff=%x\n", element, 615 (unsigned int)span_set->span_row_data_width, 616 (unsigned int)span_set->diff); 617 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 618 (long unsigned int)span_set->log_start_lba, 619 (long unsigned int)span_set->log_end_lba); 620 printf("span row start=0x%08lx, end=0x%08lx\n", 621 (long unsigned int)span_set->span_row_start, 622 (long unsigned int)span_set->span_row_end); 623 printf("data row start=0x%08lx, end=0x%08lx\n", 624 (long unsigned int)span_set->data_row_start, 625 (long unsigned int)span_set->data_row_end); 626 printf("data strip start=0x%08lx, end=0x%08lx\n", 627 (long unsigned int)span_set->data_strip_start, 628 (long unsigned int)span_set->data_strip_end); 629 630 for (span = 0; span < raid->spanDepth; span++) { 631 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 632 block_span_info.noElements >= element + 1) { 633 quad = &map->raidMap.ldSpanMap[ld]. 634 spanBlock[span].block_span_info. 635 quad[element]; 636 printf("Span=%x, Quad=%x, diff=%x\n", span, 637 element, quad->diff); 638 printf("offset_in_span=0x%08lx\n", 639 (long unsigned int)quad->offsetInSpan); 640 printf("logical start=0x%08lx, end=0x%08lx\n", 641 (long unsigned int)quad->logStart, 642 (long unsigned int)quad->logEnd); 643 } 644 } 645 } 646 } 647 return 0; 648 } 649 650 #endif 651 /* 652 * 653 * This routine calculates the Span block for given row using spanset. 654 * 655 * Inputs : HBA instance 656 * ld: Logical drive number 657 * row: Row number 658 * map: LD map 659 * 660 * Outputs : span - Span number block 661 * - Absolute Block number in the physical disk 662 * div_error - Devide error code. 663 */ 664 665 u_int32_t 666 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 667 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 668 { 669 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 670 LD_SPAN_SET *span_set; 671 MR_QUAD_ELEMENT *quad; 672 u_int32_t span, info; 673 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 674 675 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 676 span_set = &(ldSpanInfo[ld].span_set[info]); 677 678 if (span_set->span_row_data_width == 0) 679 break; 680 if (row > span_set->data_row_end) 681 continue; 682 683 for (span = 0; span < raid->spanDepth; span++) 684 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 685 block_span_info.noElements >= info + 1) { 686 quad = &map->raidMap.ldSpanMap[ld]. 687 spanBlock[span]. 688 block_span_info.quad[info]; 689 if (quad->diff == 0) { 690 *div_error = 1; 691 return span; 692 } 693 if (quad->logStart <= row && 694 row <= quad->logEnd && 695 (mega_mod64(row - quad->logStart, 696 quad->diff)) == 0) { 697 if (span_blk != NULL) { 698 u_int64_t blk; 699 700 blk = mega_div64_32 701 ((row - quad->logStart), 702 quad->diff); 703 blk = (blk + quad->offsetInSpan) 704 << raid->stripeShift; 705 *span_blk = blk; 706 } 707 return span; 708 } 709 } 710 } 711 return SPAN_INVALID; 712 } 713 714 /* 715 * 716 * This routine calculates the row for given strip using spanset. 717 * 718 * Inputs : HBA instance 719 * ld: Logical drive number 720 * Strip: Strip 721 * map: LD map 722 * 723 * Outputs : row - row associated with strip 724 */ 725 726 static u_int64_t 727 get_row_from_strip(struct mrsas_softc *sc, 728 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 729 { 730 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 731 LD_SPAN_SET *span_set; 732 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 733 u_int32_t info, strip_offset, span, span_offset; 734 u_int64_t span_set_Strip, span_set_Row; 735 736 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 737 span_set = &(ldSpanInfo[ld].span_set[info]); 738 739 if (span_set->span_row_data_width == 0) 740 break; 741 if (strip > span_set->data_strip_end) 742 continue; 743 744 span_set_Strip = strip - span_set->data_strip_start; 745 strip_offset = mega_mod64(span_set_Strip, 746 span_set->span_row_data_width); 747 span_set_Row = mega_div64_32(span_set_Strip, 748 span_set->span_row_data_width) * span_set->diff; 749 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 750 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 751 block_span_info.noElements >= info + 1) { 752 if (strip_offset >= 753 span_set->strip_offset[span]) 754 span_offset++; 755 else 756 break; 757 } 758 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 759 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 760 (unsigned long long)span_set_Strip, 761 (unsigned long long)span_set_Row, 762 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 763 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 764 (unsigned long long)span_set->data_row_start + 765 (unsigned long long)span_set_Row + (span_offset - 1)); 766 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 767 } 768 return -1LLU; 769 } 770 771 772 /* 773 * 774 * This routine calculates the Start Strip for given row using spanset. 775 * 776 * Inputs: HBA instance 777 * ld: Logical drive number 778 * row: Row number 779 * map: LD map 780 * 781 * Outputs : Strip - Start strip associated with row 782 */ 783 784 static u_int64_t 785 get_strip_from_row(struct mrsas_softc *sc, 786 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 787 { 788 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 789 LD_SPAN_SET *span_set; 790 MR_QUAD_ELEMENT *quad; 791 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 792 u_int32_t span, info; 793 u_int64_t strip; 794 795 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 796 span_set = &(ldSpanInfo[ld].span_set[info]); 797 798 if (span_set->span_row_data_width == 0) 799 break; 800 if (row > span_set->data_row_end) 801 continue; 802 803 for (span = 0; span < raid->spanDepth; span++) 804 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 805 block_span_info.noElements >= info + 1) { 806 quad = &map->raidMap.ldSpanMap[ld]. 807 spanBlock[span].block_span_info.quad[info]; 808 if (quad->logStart <= row && 809 row <= quad->logEnd && 810 mega_mod64((row - quad->logStart), 811 quad->diff) == 0) { 812 strip = mega_div64_32 813 (((row - span_set->data_row_start) 814 - quad->logStart), 815 quad->diff); 816 strip *= span_set->span_row_data_width; 817 strip += span_set->data_strip_start; 818 strip += span_set->strip_offset[span]; 819 return strip; 820 } 821 } 822 } 823 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 824 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 825 return -1; 826 } 827 828 /* 829 * ***************************************************************************** 830 * 831 * 832 * This routine calculates the Physical Arm for given strip using spanset. 833 * 834 * Inputs : HBA instance 835 * Logical drive number 836 * Strip 837 * LD map 838 * 839 * Outputs : Phys Arm - Phys Arm associated with strip 840 */ 841 842 static u_int32_t 843 get_arm_from_strip(struct mrsas_softc *sc, 844 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 845 { 846 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 847 LD_SPAN_SET *span_set; 848 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 849 u_int32_t info, strip_offset, span, span_offset; 850 851 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 852 span_set = &(ldSpanInfo[ld].span_set[info]); 853 854 if (span_set->span_row_data_width == 0) 855 break; 856 if (strip > span_set->data_strip_end) 857 continue; 858 859 strip_offset = (u_int32_t)mega_mod64 860 ((strip - span_set->data_strip_start), 861 span_set->span_row_data_width); 862 863 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 864 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 865 block_span_info.noElements >= info + 1) { 866 if (strip_offset >= span_set->strip_offset[span]) 867 span_offset = span_set->strip_offset[span]; 868 else 869 break; 870 } 871 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 872 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 873 (long unsigned int)strip, (strip_offset - span_offset)); 874 return (strip_offset - span_offset); 875 } 876 877 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 878 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 879 880 return -1; 881 } 882 883 884 /* This Function will return Phys arm */ 885 u_int8_t 886 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 887 MR_DRV_RAID_MAP_ALL * map) 888 { 889 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 890 891 /* Need to check correct default value */ 892 u_int32_t arm = 0; 893 894 switch (raid->level) { 895 case 0: 896 case 5: 897 case 6: 898 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 899 break; 900 case 1: 901 /* start with logical arm */ 902 arm = get_arm_from_strip(sc, ld, stripe, map); 903 arm *= 2; 904 break; 905 } 906 907 return arm; 908 } 909 910 /* 911 * 912 * This routine calculates the arm, span and block for the specified stripe and 913 * reference in stripe using spanset 914 * 915 * Inputs : 916 * sc - HBA instance 917 * ld - Logical drive number 918 * stripRow: Stripe number 919 * stripRef: Reference in stripe 920 * 921 * Outputs : span - Span number block - Absolute Block 922 * number in the physical disk 923 */ 924 static u_int8_t 925 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 926 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 927 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 928 { 929 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 930 u_int32_t pd, arRef, r1_alt_pd; 931 u_int8_t physArm, span; 932 u_int64_t row; 933 u_int8_t retval = TRUE; 934 u_int64_t *pdBlock = &io_info->pdBlock; 935 u_int16_t *pDevHandle = &io_info->devHandle; 936 u_int8_t *pPdInterface = &io_info->pdInterface; 937 938 u_int32_t logArm, rowMod, armQ, arm; 939 940 /* Get row and span from io_info for Uneven Span IO. */ 941 row = io_info->start_row; 942 span = io_info->start_span; 943 944 945 if (raid->level == 6) { 946 logArm = get_arm_from_strip(sc, ld, stripRow, map); 947 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 948 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 949 arm = armQ + 1 + logArm; 950 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 951 arm -= SPAN_ROW_SIZE(map, ld, span); 952 physArm = (u_int8_t)arm; 953 } else 954 /* Calculate the arm */ 955 physArm = get_arm(sc, ld, span, stripRow, map); 956 957 958 arRef = MR_LdSpanArrayGet(ld, span, map); 959 pd = MR_ArPdGet(arRef, physArm, map); 960 961 if (pd != MR_PD_INVALID) { 962 *pDevHandle = MR_PdDevHandleGet(pd, map); 963 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 964 /* get second pd also for raid 1/10 fast path writes */ 965 if ((raid->level == 1) && !io_info->isRead) { 966 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 967 if (r1_alt_pd != MR_PD_INVALID) 968 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 969 } 970 } else { 971 *pDevHandle = MR_DEVHANDLE_INVALID; 972 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 973 (sc->mrsas_gen3_ctrl && 974 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 975 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 976 else if (raid->level == 1) { 977 pd = MR_ArPdGet(arRef, physArm + 1, map); 978 if (pd != MR_PD_INVALID) { 979 *pDevHandle = MR_PdDevHandleGet(pd, map); 980 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 981 } 982 } 983 } 984 985 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 986 if (sc->is_ventura || sc->is_aero) { 987 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 988 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 989 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 990 } else { 991 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 992 io_info->span_arm = pRAID_Context->spanArm; 993 } 994 return retval; 995 } 996 997 /* 998 * MR_BuildRaidContext: Set up Fast path RAID context 999 * 1000 * This function will initiate command processing. The start/end row and strip 1001 * information is calculated then the lock is acquired. This function will 1002 * return 0 if region lock was acquired OR return num strips. 1003 */ 1004 u_int8_t 1005 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 1006 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1007 { 1008 MR_LD_RAID *raid; 1009 u_int32_t ld, stripSize, stripe_mask; 1010 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 1011 REGION_KEY regStart; 1012 REGION_LEN regSize; 1013 u_int8_t num_strips, numRows; 1014 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 1015 u_int64_t ldStartBlock; 1016 u_int32_t numBlocks, ldTgtId; 1017 u_int8_t isRead, stripIdx; 1018 u_int8_t retval = 0; 1019 u_int8_t startlba_span = SPAN_INVALID; 1020 u_int64_t *pdBlock = &io_info->pdBlock; 1021 int error_code = 0; 1022 1023 ldStartBlock = io_info->ldStartBlock; 1024 numBlocks = io_info->numBlocks; 1025 ldTgtId = io_info->ldTgtId; 1026 isRead = io_info->isRead; 1027 1028 io_info->IoforUnevenSpan = 0; 1029 io_info->start_span = SPAN_INVALID; 1030 1031 ld = MR_TargetIdToLdGet(ldTgtId, map); 1032 raid = MR_LdRaidGet(ld, map); 1033 1034 /* check read ahead bit */ 1035 io_info->raCapable = raid->capability.raCapable; 1036 1037 if (raid->rowDataSize == 0) { 1038 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1039 return FALSE; 1040 else if (sc->UnevenSpanSupport) { 1041 io_info->IoforUnevenSpan = 1; 1042 } else { 1043 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 1044 " but there is _NO_ UnevenSpanSupport\n", 1045 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1046 return FALSE; 1047 } 1048 } 1049 stripSize = 1 << raid->stripeShift; 1050 stripe_mask = stripSize - 1; 1051 /* 1052 * calculate starting row and stripe, and number of strips and rows 1053 */ 1054 start_strip = ldStartBlock >> raid->stripeShift; 1055 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 1056 endLba = ldStartBlock + numBlocks - 1; 1057 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 1058 endStrip = endLba >> raid->stripeShift; 1059 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 1060 if (io_info->IoforUnevenSpan) { 1061 start_row = get_row_from_strip(sc, ld, start_strip, map); 1062 endRow = get_row_from_strip(sc, ld, endStrip, map); 1063 if (raid->spanDepth == 1) { 1064 startlba_span = 0; 1065 *pdBlock = start_row << raid->stripeShift; 1066 } else { 1067 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 1068 pdBlock, map, &error_code); 1069 if (error_code == 1) { 1070 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 1071 __func__, __LINE__); 1072 return FALSE; 1073 } 1074 } 1075 if (startlba_span == SPAN_INVALID) { 1076 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 1077 "start strip %llx endSrip %llx\n", __func__, 1078 __LINE__, (unsigned long long)start_row, 1079 (unsigned long long)start_strip, 1080 (unsigned long long)endStrip); 1081 return FALSE; 1082 } 1083 io_info->start_span = startlba_span; 1084 io_info->start_row = start_row; 1085 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 1086 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 1087 __func__, __LINE__, (unsigned long long)start_row, 1088 (unsigned long long)start_strip, 1089 (unsigned long long)endStrip, startlba_span); 1090 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 1091 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 1092 } else { 1093 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1094 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1095 } 1096 1097 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 1098 1099 /* 1100 * Calculate region info. (Assume region at start of first row, and 1101 * assume this IO needs the full row - will adjust if not true.) 1102 */ 1103 regStart = start_row << raid->stripeShift; 1104 regSize = stripSize; 1105 1106 /* Check if we can send this I/O via FastPath */ 1107 if (raid->capability.fpCapable) { 1108 if (isRead) 1109 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1110 ((num_strips == 1) || 1111 raid->capability.fpReadAcrossStripe)); 1112 else 1113 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1114 ((num_strips == 1) || 1115 raid->capability.fpWriteAcrossStripe)); 1116 } else 1117 io_info->fpOkForIo = FALSE; 1118 1119 if (numRows == 1) { 1120 if (num_strips == 1) { 1121 regStart += ref_in_start_stripe; 1122 regSize = numBlocks; 1123 } 1124 } else if (io_info->IoforUnevenSpan == 0) { 1125 /* 1126 * For Even span region lock optimization. If the start strip 1127 * is the last in the start row 1128 */ 1129 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1130 regStart += ref_in_start_stripe; 1131 /* 1132 * initialize count to sectors from startRef to end 1133 * of strip 1134 */ 1135 regSize = stripSize - ref_in_start_stripe; 1136 } 1137 /* add complete rows in the middle of the transfer */ 1138 if (numRows > 2) 1139 regSize += (numRows - 2) << raid->stripeShift; 1140 1141 /* if IO ends within first strip of last row */ 1142 if (endStrip == endRow * raid->rowDataSize) 1143 regSize += ref_in_end_stripe + 1; 1144 else 1145 regSize += stripSize; 1146 } else { 1147 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 1148 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1149 regStart += ref_in_start_stripe; 1150 /* 1151 * initialize count to sectors from startRef to end 1152 * of strip 1153 */ 1154 regSize = stripSize - ref_in_start_stripe; 1155 } 1156 /* add complete rows in the middle of the transfer */ 1157 if (numRows > 2) 1158 regSize += (numRows - 2) << raid->stripeShift; 1159 1160 /* if IO ends within first strip of last row */ 1161 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 1162 regSize += ref_in_end_stripe + 1; 1163 else 1164 regSize += stripSize; 1165 } 1166 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 1167 if (sc->mrsas_gen3_ctrl) 1168 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1169 else if (sc->device_id == MRSAS_TBOLT) 1170 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1171 pRAID_Context->VirtualDiskTgtId = raid->targetId; 1172 pRAID_Context->regLockRowLBA = regStart; 1173 pRAID_Context->regLockLength = regSize; 1174 pRAID_Context->configSeqNum = raid->seqNum; 1175 1176 /* 1177 * Get Phy Params only if FP capable, or else leave it to MR firmware 1178 * to do the calculation. 1179 */ 1180 if (io_info->fpOkForIo) { 1181 retval = io_info->IoforUnevenSpan ? 1182 mr_spanset_get_phy_params(sc, ld, start_strip, 1183 ref_in_start_stripe, io_info, pRAID_Context, map) : 1184 MR_GetPhyParams(sc, ld, start_strip, 1185 ref_in_start_stripe, io_info, pRAID_Context, map); 1186 /* If IO on an invalid Pd, then FP is not possible */ 1187 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1188 io_info->fpOkForIo = FALSE; 1189 /* 1190 * if FP possible, set the SLUD bit in regLockFlags for 1191 * ventura 1192 */ 1193 else if ((sc->is_ventura || sc->is_aero) && !isRead && 1194 (raid->writeMode == MR_RL_WRITE_BACK_MODE) && (raid->level <= 1) && 1195 raid->capability.fpCacheBypassCapable) { 1196 ((RAID_CONTEXT_G35 *) pRAID_Context)->routingFlags.bits.sld = 1; 1197 } 1198 1199 return retval; 1200 } else if (isRead) { 1201 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1202 retval = io_info->IoforUnevenSpan ? 1203 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 1204 ref_in_start_stripe, io_info, pRAID_Context, map) : 1205 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 1206 ref_in_start_stripe, io_info, pRAID_Context, map); 1207 if (!retval) 1208 return TRUE; 1209 } 1210 } 1211 #if SPAN_DEBUG 1212 /* Just for testing what arm we get for strip. */ 1213 get_arm_from_strip(sc, ld, start_strip, map); 1214 #endif 1215 return TRUE; 1216 } 1217 1218 /* 1219 * 1220 * This routine pepare spanset info from Valid Raid map and store it into local 1221 * copy of ldSpanInfo per instance data structure. 1222 * 1223 * Inputs : LD map 1224 * ldSpanInfo per HBA instance 1225 * 1226 */ 1227 void 1228 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1229 { 1230 u_int8_t span, count; 1231 u_int32_t element, span_row_width; 1232 u_int64_t span_row; 1233 MR_LD_RAID *raid; 1234 LD_SPAN_SET *span_set, *span_set_prev; 1235 MR_QUAD_ELEMENT *quad; 1236 int ldCount; 1237 u_int16_t ld; 1238 1239 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1240 ld = MR_TargetIdToLdGet(ldCount, map); 1241 if (ld >= MAX_LOGICAL_DRIVES) 1242 continue; 1243 raid = MR_LdRaidGet(ld, map); 1244 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1245 for (span = 0; span < raid->spanDepth; span++) { 1246 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1247 block_span_info.noElements < element + 1) 1248 continue; 1249 /* TO-DO */ 1250 span_set = &(ldSpanInfo[ld].span_set[element]); 1251 quad = &map->raidMap.ldSpanMap[ld]. 1252 spanBlock[span].block_span_info.quad[element]; 1253 1254 span_set->diff = quad->diff; 1255 1256 for (count = 0, span_row_width = 0; 1257 count < raid->spanDepth; count++) { 1258 if (map->raidMap.ldSpanMap[ld].spanBlock[count]. 1259 block_span_info.noElements >= element + 1) { 1260 span_set->strip_offset[count] = span_row_width; 1261 span_row_width += 1262 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1263 #if SPAN_DEBUG 1264 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1265 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1266 #endif 1267 } 1268 } 1269 1270 span_set->span_row_data_width = span_row_width; 1271 span_row = mega_div64_32(((quad->logEnd - 1272 quad->logStart) + quad->diff), quad->diff); 1273 1274 if (element == 0) { 1275 span_set->log_start_lba = 0; 1276 span_set->log_end_lba = 1277 ((span_row << raid->stripeShift) * span_row_width) - 1; 1278 1279 span_set->span_row_start = 0; 1280 span_set->span_row_end = span_row - 1; 1281 1282 span_set->data_strip_start = 0; 1283 span_set->data_strip_end = (span_row * span_row_width) - 1; 1284 1285 span_set->data_row_start = 0; 1286 span_set->data_row_end = (span_row * quad->diff) - 1; 1287 } else { 1288 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1289 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1290 span_set->log_end_lba = span_set->log_start_lba + 1291 ((span_row << raid->stripeShift) * span_row_width) - 1; 1292 1293 span_set->span_row_start = span_set_prev->span_row_end + 1; 1294 span_set->span_row_end = 1295 span_set->span_row_start + span_row - 1; 1296 1297 span_set->data_strip_start = 1298 span_set_prev->data_strip_end + 1; 1299 span_set->data_strip_end = span_set->data_strip_start + 1300 (span_row * span_row_width) - 1; 1301 1302 span_set->data_row_start = span_set_prev->data_row_end + 1; 1303 span_set->data_row_end = span_set->data_row_start + 1304 (span_row * quad->diff) - 1; 1305 } 1306 break; 1307 } 1308 if (span == raid->spanDepth) 1309 break; /* no quads remain */ 1310 } 1311 } 1312 #if SPAN_DEBUG 1313 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1314 #endif 1315 } 1316 1317 /* 1318 * mrsas_update_load_balance_params: Update load balance parmas 1319 * Inputs: 1320 * sc - driver softc instance 1321 * drv_map - driver RAID map 1322 * lbInfo - Load balance info 1323 * 1324 * This function updates the load balance parameters for the LD config of a two 1325 * drive optimal RAID-1. 1326 */ 1327 void 1328 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1329 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1330 { 1331 int ldCount; 1332 u_int16_t ld; 1333 MR_LD_RAID *raid; 1334 1335 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1336 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1337 1338 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1339 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1340 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1341 lbInfo[ldCount].loadBalanceFlag = 0; 1342 continue; 1343 } 1344 raid = MR_LdRaidGet(ld, drv_map); 1345 if ((raid->level != 1) || 1346 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1347 lbInfo[ldCount].loadBalanceFlag = 0; 1348 continue; 1349 } 1350 lbInfo[ldCount].loadBalanceFlag = 1; 1351 } 1352 } 1353 1354 1355 /* 1356 * mrsas_set_pd_lba: Sets PD LBA 1357 * input: io_request pointer 1358 * CDB length 1359 * io_info pointer 1360 * Pointer to CCB 1361 * Local RAID map pointer 1362 * Start block of IO Block Size 1363 * 1364 * Used to set the PD logical block address in CDB for FP IOs. 1365 */ 1366 void 1367 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1368 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1369 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1370 u_int32_t ld_block_size) 1371 { 1372 MR_LD_RAID *raid; 1373 u_int32_t ld; 1374 u_int64_t start_blk = io_info->pdBlock; 1375 u_int8_t *cdb = io_request->CDB.CDB32; 1376 u_int32_t num_blocks = io_info->numBlocks; 1377 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1378 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1379 1380 /* Check if T10 PI (DIF) is enabled for this LD */ 1381 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1382 raid = MR_LdRaidGet(ld, local_map_ptr); 1383 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1384 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1385 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1386 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1387 1388 if (ccb_h->flags == CAM_DIR_OUT) 1389 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1390 else 1391 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1392 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1393 1394 /* LBA */ 1395 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1396 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1397 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1398 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1399 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1400 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1401 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1402 cdb[19] = (u_int8_t)(start_blk & 0xff); 1403 1404 /* Logical block reference tag */ 1405 io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); 1406 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1407 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1408 1409 /* Transfer length */ 1410 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1411 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1412 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1413 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1414 1415 /* set SCSI IO EEDP Flags */ 1416 if (ccb_h->flags == CAM_DIR_OUT) { 1417 io_request->EEDPFlags = 1418 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1419 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1420 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1421 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1422 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1423 } else { 1424 io_request->EEDPFlags = 1425 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1426 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1427 } 1428 io_request->Control |= (0x4 << 26); 1429 io_request->EEDPBlockSize = ld_block_size; 1430 } else { 1431 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1432 if (((cdb_len == 12) || (cdb_len == 16)) && 1433 (start_blk <= 0xffffffff)) { 1434 if (cdb_len == 16) { 1435 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1436 flagvals = cdb[1]; 1437 groupnum = cdb[14]; 1438 control = cdb[15]; 1439 } else { 1440 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1441 flagvals = cdb[1]; 1442 groupnum = cdb[10]; 1443 control = cdb[11]; 1444 } 1445 1446 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1447 1448 cdb[0] = opcode; 1449 cdb[1] = flagvals; 1450 cdb[6] = groupnum; 1451 cdb[9] = control; 1452 1453 /* Transfer length */ 1454 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1455 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1456 1457 io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1458 cdb_len = 10; 1459 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1460 /* Convert to 16 byte CDB for large LBA's */ 1461 switch (cdb_len) { 1462 case 6: 1463 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1464 control = cdb[5]; 1465 break; 1466 case 10: 1467 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1468 flagvals = cdb[1]; 1469 groupnum = cdb[6]; 1470 control = cdb[9]; 1471 break; 1472 case 12: 1473 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1474 flagvals = cdb[1]; 1475 groupnum = cdb[10]; 1476 control = cdb[11]; 1477 break; 1478 } 1479 1480 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1481 1482 cdb[0] = opcode; 1483 cdb[1] = flagvals; 1484 cdb[14] = groupnum; 1485 cdb[15] = control; 1486 1487 /* Transfer length */ 1488 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1489 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1490 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1491 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1492 1493 io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1494 cdb_len = 16; 1495 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1496 /* convert to 10 byte CDB */ 1497 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1498 control = cdb[5]; 1499 1500 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1501 cdb[0] = opcode; 1502 cdb[9] = control; 1503 1504 /* Set transfer length */ 1505 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1506 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1507 1508 /* Specify 10-byte cdb */ 1509 cdb_len = 10; 1510 } 1511 /* Fall through normal case, just load LBA here */ 1512 u_int8_t val = cdb[1] & 0xE0; 1513 1514 switch (cdb_len) { 1515 case 6: 1516 cdb[3] = (u_int8_t)(start_blk & 0xff); 1517 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1518 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1519 break; 1520 case 10: 1521 cdb[5] = (u_int8_t)(start_blk & 0xff); 1522 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1523 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1524 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1525 break; 1526 case 16: 1527 cdb[9] = (u_int8_t)(start_blk & 0xff); 1528 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1529 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1530 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1531 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1532 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1533 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1534 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1535 break; 1536 } 1537 } 1538 } 1539 1540 /* 1541 * mrsas_get_best_arm_pd: Determine the best spindle arm 1542 * Inputs: 1543 * sc - HBA instance 1544 * lbInfo - Load balance info 1545 * io_info - IO request info 1546 * 1547 * This function determines and returns the best arm by looking at the 1548 * parameters of the last PD access. 1549 */ 1550 u_int8_t 1551 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1552 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1553 { 1554 MR_LD_RAID *raid; 1555 MR_DRV_RAID_MAP_ALL *drv_map; 1556 u_int16_t pd1_devHandle; 1557 u_int16_t pend0, pend1, ld; 1558 u_int64_t diff0, diff1; 1559 u_int8_t bestArm, pd0, pd1, span, arm; 1560 u_int32_t arRef, span_row_size; 1561 1562 u_int64_t block = io_info->ldStartBlock; 1563 u_int32_t count = io_info->numBlocks; 1564 1565 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1566 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1567 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1568 1569 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1570 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1571 raid = MR_LdRaidGet(ld, drv_map); 1572 span_row_size = sc->UnevenSpanSupport ? 1573 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1574 1575 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1576 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1577 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1578 (arm + 1 - span_row_size) : arm + 1, drv_map); 1579 1580 /* Get PD1 Dev Handle */ 1581 pd1_devHandle = MR_PdDevHandleGet(pd1, drv_map); 1582 if (pd1_devHandle == MR_DEVHANDLE_INVALID) { 1583 bestArm = arm; 1584 } else { 1585 /* get the pending cmds for the data and mirror arms */ 1586 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1587 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1588 1589 /* Determine the disk whose head is nearer to the req. block */ 1590 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1591 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1592 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1593 1594 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1595 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1596 bestArm ^= 1; 1597 1598 /* Update the last accessed block on the correct pd */ 1599 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1600 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1601 } 1602 1603 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1604 #if SPAN_DEBUG 1605 if (arm != bestArm) 1606 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1607 "io_info->span_arm 0x%x\n", 1608 span, arm, bestArm, io_info->span_arm); 1609 #endif 1610 1611 return io_info->pd_after_lb; 1612 } 1613 1614 /* 1615 * mrsas_get_updated_dev_handle: Get the update dev handle 1616 * Inputs: 1617 * sc - Adapter instance soft state 1618 * lbInfo - Load balance info 1619 * io_info - io_info pointer 1620 * 1621 * This function determines and returns the updated dev handle. 1622 */ 1623 u_int16_t 1624 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1625 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1626 { 1627 u_int8_t arm_pd; 1628 u_int16_t devHandle; 1629 MR_DRV_RAID_MAP_ALL *drv_map; 1630 1631 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1632 1633 /* get best new arm */ 1634 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1635 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1636 io_info->pdInterface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1637 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1638 1639 return devHandle; 1640 } 1641 1642 /* 1643 * MR_GetPhyParams: Calculates arm, span, and block 1644 * Inputs: Adapter soft state 1645 * Logical drive number (LD) 1646 * Stripe number(stripRow) 1647 * Reference in stripe (stripRef) 1648 * 1649 * Outputs: Absolute Block number in the physical disk 1650 * 1651 * This routine calculates the arm, span and block for the specified stripe and 1652 * reference in stripe. 1653 */ 1654 u_int8_t 1655 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1656 u_int64_t stripRow, 1657 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1658 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1659 { 1660 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1661 u_int32_t pd, arRef, r1_alt_pd; 1662 u_int8_t physArm, span; 1663 u_int64_t row; 1664 u_int8_t retval = TRUE; 1665 int error_code = 0; 1666 u_int64_t *pdBlock = &io_info->pdBlock; 1667 u_int16_t *pDevHandle = &io_info->devHandle; 1668 u_int8_t *pPdInterface = &io_info->pdInterface; 1669 u_int32_t rowMod, armQ, arm, logArm; 1670 1671 row = mega_div64_32(stripRow, raid->rowDataSize); 1672 1673 if (raid->level == 6) { 1674 /* logical arm within row */ 1675 logArm = mega_mod64(stripRow, raid->rowDataSize); 1676 if (raid->rowSize == 0) 1677 return FALSE; 1678 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1679 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1680 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1681 if (arm >= raid->rowSize) /* handle wrap condition */ 1682 arm -= raid->rowSize; 1683 physArm = (u_int8_t)arm; 1684 } else { 1685 if (raid->modFactor == 0) 1686 return FALSE; 1687 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1688 } 1689 1690 if (raid->spanDepth == 1) { 1691 span = 0; 1692 *pdBlock = row << raid->stripeShift; 1693 } else { 1694 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1695 if (error_code == 1) 1696 return FALSE; 1697 } 1698 1699 /* Get the array on which this span is present */ 1700 arRef = MR_LdSpanArrayGet(ld, span, map); 1701 1702 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1703 1704 if (pd != MR_PD_INVALID) { 1705 /* Get dev handle from Pd */ 1706 *pDevHandle = MR_PdDevHandleGet(pd, map); 1707 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1708 /* get second pd also for raid 1/10 fast path writes */ 1709 if ((raid->level == 1) && !io_info->isRead) { 1710 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 1711 if (r1_alt_pd != MR_PD_INVALID) 1712 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 1713 } 1714 } else { 1715 *pDevHandle = MR_DEVHANDLE_INVALID; /* set dev handle as invalid. */ 1716 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 1717 (sc->mrsas_gen3_ctrl && 1718 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1719 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1720 else if (raid->level == 1) { 1721 /* Get Alternate Pd. */ 1722 pd = MR_ArPdGet(arRef, physArm + 1, map); 1723 if (pd != MR_PD_INVALID) { 1724 /* Get dev handle from Pd. */ 1725 *pDevHandle = MR_PdDevHandleGet(pd, map); 1726 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1727 } 1728 } 1729 } 1730 1731 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 1732 if (sc->is_ventura || sc->is_aero) { 1733 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 1734 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1735 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1736 } else { 1737 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1738 io_info->span_arm = pRAID_Context->spanArm; 1739 } 1740 return retval; 1741 } 1742 1743 /* 1744 * MR_GetSpanBlock: Calculates span block 1745 * Inputs: LD 1746 * row PD 1747 * span block 1748 * RAID map pointer 1749 * 1750 * Outputs: Span number Error code 1751 * 1752 * This routine calculates the span from the span block info. 1753 */ 1754 u_int32_t 1755 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1756 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1757 { 1758 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1759 MR_QUAD_ELEMENT *quad; 1760 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1761 u_int32_t span, j; 1762 u_int64_t blk, debugBlk; 1763 1764 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1765 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1766 quad = &pSpanBlock->block_span_info.quad[j]; 1767 if (quad->diff == 0) { 1768 *div_error = 1; 1769 return span; 1770 } 1771 if (quad->logStart <= row && row <= quad->logEnd && 1772 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1773 if (span_blk != NULL) { 1774 blk = mega_div64_32((row - quad->logStart), quad->diff); 1775 debugBlk = blk; 1776 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1777 *span_blk = blk; 1778 } 1779 return span; 1780 } 1781 } 1782 } 1783 return span; 1784 } 1785