1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_sim.h> 48 #include <cam/cam_xpt_sim.h> 49 #include <cam/cam_debug.h> 50 #include <cam/cam_periph.h> 51 #include <cam/cam_xpt_periph.h> 52 53 /* 54 * Function prototypes 55 */ 56 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 57 u_int8_t 58 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 59 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 60 u_int8_t 61 MR_BuildRaidContext(struct mrsas_softc *sc, 62 struct IO_REQUEST_INFO *io_info, 63 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 64 u_int8_t 65 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 66 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 67 RAID_CONTEXT * pRAID_Context, 68 MR_DRV_RAID_MAP_ALL * map); 69 u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map); 70 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 71 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 72 u_int16_t 73 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 74 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 75 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 76 u_int32_t 77 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 78 MR_DRV_RAID_MAP_ALL * map, int *div_error); 79 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 80 void 81 mrsas_update_load_balance_params(struct mrsas_softc *sc, 82 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 83 void 84 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 85 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 86 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 87 u_int32_t ld_block_size); 88 static u_int16_t 89 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 90 MR_DRV_RAID_MAP_ALL * map); 91 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 92 static u_int16_t 93 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 94 MR_DRV_RAID_MAP_ALL * map); 95 static MR_LD_SPAN * 96 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 97 MR_DRV_RAID_MAP_ALL * map); 98 static u_int8_t 99 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 100 MR_DRV_RAID_MAP_ALL * map); 101 static MR_SPAN_BLOCK_INFO * 102 MR_LdSpanInfoGet(u_int32_t ld, 103 MR_DRV_RAID_MAP_ALL * map); 104 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 105 static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 106 107 /* 108 * Spanset related function prototypes Added for PRL11 configuration (Uneven 109 * span support) 110 */ 111 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 112 static u_int8_t 113 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 114 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 115 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 116 static u_int64_t 117 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 118 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 119 static u_int32_t 120 mr_spanset_get_span_block(struct mrsas_softc *sc, 121 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 122 MR_DRV_RAID_MAP_ALL * map, int *div_error); 123 static u_int8_t 124 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 125 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 126 127 /* 128 * Spanset related defines Added for PRL11 configuration(Uneven span support) 129 */ 130 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 131 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 132 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 133 #define SPAN_INVALID 0xff 134 #define SPAN_DEBUG 0 135 136 /* 137 * Related Defines 138 */ 139 140 typedef u_int64_t REGION_KEY; 141 typedef u_int32_t REGION_LEN; 142 143 #define MR_LD_STATE_OPTIMAL 3 144 #define FALSE 0 145 #define TRUE 1 146 147 #define LB_PENDING_CMDS_DEFAULT 4 148 149 /* 150 * Related Macros 151 */ 152 153 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 154 155 #define swap32(x) \ 156 ((unsigned int)( \ 157 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 158 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 159 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 160 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 161 162 /* 163 * In-line functions for mod and divide of 64-bit dividend and 32-bit 164 * divisor. Assumes a check for a divisor of zero is not possible. 165 * 166 * @param dividend: Dividend 167 * @param divisor: Divisor 168 * @return remainder 169 */ 170 171 #define mega_mod64(dividend, divisor) ({ \ 172 int remainder; \ 173 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 174 remainder;}) 175 176 #define mega_div64_32(dividend, divisor) ({ \ 177 int quotient; \ 178 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 179 quotient;}) 180 181 /* 182 * Various RAID map access functions. These functions access the various 183 * parts of the RAID map and returns the appropriate parameters. 184 */ 185 186 MR_LD_RAID * 187 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 188 { 189 return (&map->raidMap.ldSpanMap[ld].ldRaid); 190 } 191 192 u_int16_t 193 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 194 { 195 return le16toh(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 196 } 197 198 static u_int16_t 199 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 200 { 201 return le16toh(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 202 } 203 204 static u_int8_t 205 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 206 { 207 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 208 } 209 210 static u_int16_t 211 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 212 { 213 return map->raidMap.devHndlInfo[pd].curDevHdl; 214 } 215 216 static u_int8_t MR_PdInterfaceTypeGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map) 217 { 218 return map->raidMap.devHndlInfo[pd].interfaceType; 219 } 220 221 static u_int16_t 222 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 223 { 224 return le16toh(map->raidMap.arMapInfo[ar].pd[arm]); 225 } 226 227 static MR_LD_SPAN * 228 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 229 { 230 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 231 } 232 233 static MR_SPAN_BLOCK_INFO * 234 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 235 { 236 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 237 } 238 239 u_int8_t 240 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 241 { 242 return map->raidMap.ldTgtIdToLd[ldTgtId]; 243 } 244 245 u_int32_t 246 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 247 { 248 MR_LD_RAID *raid; 249 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 250 251 ld = MR_TargetIdToLdGet(ldTgtId, map); 252 253 /* 254 * Check if logical drive was removed. 255 */ 256 if (ld >= MAX_LOGICAL_DRIVES) 257 return ldBlockSize; 258 259 raid = MR_LdRaidGet(ld, map); 260 ldBlockSize = raid->logicalBlockLength; 261 if (!ldBlockSize) 262 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 263 264 return ldBlockSize; 265 } 266 267 /* 268 * This function will Populate Driver Map using Dynamic firmware raid map 269 */ 270 static int 271 MR_PopulateDrvRaidMapVentura(struct mrsas_softc *sc) 272 { 273 unsigned int i, j; 274 u_int16_t ld_count; 275 276 MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 277 MR_RAID_MAP_DESC_TABLE *desc_table; 278 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 279 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 280 void *raid_map_data = NULL; 281 282 fw_map_dyn = (MR_FW_RAID_MAP_DYNAMIC *) sc->raidmap_mem[(sc->map_id & 1)]; 283 284 if (fw_map_dyn == NULL) { 285 device_printf(sc->mrsas_dev, 286 "from %s %d map0 %p map1 %p map size %d \n", __func__, __LINE__, 287 sc->raidmap_mem[0], sc->raidmap_mem[1], sc->maxRaidMapSize); 288 return 1; 289 } 290 #if VD_EXT_DEBUG 291 device_printf(sc->mrsas_dev, 292 " raidMapSize 0x%x, descTableOffset 0x%x, " 293 " descTableSize 0x%x, descTableNumElements 0x%x \n", 294 fw_map_dyn->raidMapSize, le32toh(fw_map_dyn->descTableOffset), 295 fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); 296 #endif 297 desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + 298 le32toh(fw_map_dyn->descTableOffset)); 299 if (desc_table != fw_map_dyn->raidMapDescTable) { 300 device_printf(sc->mrsas_dev, 301 "offsets of desc table are not matching returning " 302 " FW raid map has been changed: desc %p original %p\n", 303 desc_table, fw_map_dyn->raidMapDescTable); 304 } 305 memset(drv_map, 0, sc->drv_map_sz); 306 ld_count = le16toh(fw_map_dyn->ldCount); 307 pDrvRaidMap->ldCount = htole16(ld_count); 308 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; 309 pDrvRaidMap->totalSize = htole32(sizeof(MR_DRV_RAID_MAP_ALL)); 310 /* point to actual data starting point */ 311 raid_map_data = (char *)fw_map_dyn + 312 le32toh(fw_map_dyn->descTableOffset) + 313 le32toh(fw_map_dyn->descTableSize); 314 315 for (i = 0; i < le32toh(fw_map_dyn->descTableNumElements); ++i) { 316 if (!desc_table) { 317 device_printf(sc->mrsas_dev, 318 "desc table is null, coming out %p \n", desc_table); 319 return 1; 320 } 321 #if VD_EXT_DEBUG 322 device_printf(sc->mrsas_dev, "raid_map_data %p \n", raid_map_data); 323 device_printf(sc->mrsas_dev, 324 "desc table %p \n", desc_table); 325 device_printf(sc->mrsas_dev, 326 "raidmap type %d, raidmapOffset 0x%x, " 327 " raid map number of elements 0%x, raidmapsize 0x%x\n", 328 le32toh(desc_table->raidMapDescType), desc_table->raidMapDescOffset, 329 le32toh(desc_table->raidMapDescElements), desc_table->raidMapDescBufferSize); 330 #endif 331 switch (le32toh(desc_table->raidMapDescType)) { 332 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 333 fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) 334 ((char *)raid_map_data + le32toh(desc_table->raidMapDescOffset)); 335 #if VD_EXT_DEBUG 336 device_printf(sc->mrsas_dev, 337 "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); 338 #endif 339 memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, 340 sizeof(MR_DEV_HANDLE_INFO) * le32toh(desc_table->raidMapDescElements)); 341 break; 342 case RAID_MAP_DESC_TYPE_TGTID_INFO: 343 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) 344 ((char *)raid_map_data + 345 le32toh(desc_table->raidMapDescOffset)); 346 #if VD_EXT_DEBUG 347 device_printf(sc->mrsas_dev, 348 "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); 349 #endif 350 for (j = 0; j < le32toh(desc_table->raidMapDescElements); j++) { 351 pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; 352 #if VD_EXT_DEBUG 353 device_printf(sc->mrsas_dev, 354 " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); 355 #endif 356 } 357 break; 358 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 359 fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + 360 le32toh(desc_table->raidMapDescOffset)); 361 #if VD_EXT_DEBUG 362 device_printf(sc->mrsas_dev, 363 "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); 364 #endif 365 memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, 366 sizeof(MR_ARRAY_INFO) * le32toh(desc_table->raidMapDescElements)); 367 break; 368 case RAID_MAP_DESC_TYPE_SPAN_INFO: 369 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + 370 le32toh(desc_table->raidMapDescOffset)); 371 memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, 372 sizeof(MR_LD_SPAN_MAP) * 373 le32toh(desc_table->raidMapDescElements)); 374 #if VD_EXT_DEBUG 375 device_printf(sc->mrsas_dev, 376 "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); 377 device_printf(sc->mrsas_dev, 378 "MR_LD_SPAN_MAP size 0x%lx\n", sizeof(MR_LD_SPAN_MAP)); 379 for (j = 0; j < ld_count; j++) { 380 printf("mrsas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x " 381 "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 382 j, j, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.targetId, j, 383 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.seqNum, 384 (u_int32_t)fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.rowSize); 385 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 386 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 387 j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j, 388 pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum, 389 (u_int32_t)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); 390 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 391 drv_map, pDrvRaidMap, &fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid, 392 &pDrvRaidMap->ldSpanMap[j].ldRaid); 393 } 394 #endif 395 break; 396 default: 397 device_printf(sc->mrsas_dev, 398 "wrong number of desctableElements %d\n", 399 fw_map_dyn->descTableNumElements); 400 } 401 ++desc_table; 402 } 403 return 0; 404 } 405 406 /* 407 * This function will Populate Driver Map using firmware raid map 408 */ 409 static int 410 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 411 { 412 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 413 MR_FW_RAID_MAP_EXT *fw_map_ext; 414 MR_FW_RAID_MAP *pFwRaidMap = NULL; 415 unsigned int i; 416 u_int16_t ld_count; 417 418 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 419 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 420 421 if (sc->maxRaidMapSize) { 422 return MR_PopulateDrvRaidMapVentura(sc); 423 } else if (sc->max256vdSupport) { 424 fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; 425 ld_count = (u_int16_t)le16toh(fw_map_ext->ldCount); 426 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 427 device_printf(sc->mrsas_dev, 428 "mrsas: LD count exposed in RAID map in not valid\n"); 429 return 1; 430 } 431 #if VD_EXT_DEBUG 432 for (i = 0; i < ld_count; i++) { 433 printf("mrsas : Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", 434 i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, 435 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 436 fw_map_ext->ldSpanMap[i].ldRaid.size); 437 } 438 #endif 439 memset(drv_map, 0, sc->drv_map_sz); 440 pDrvRaidMap->ldCount = htole16(ld_count); 441 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 442 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { 443 pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; 444 } 445 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(MR_LD_SPAN_MAP) * ld_count); 446 #if VD_EXT_DEBUG 447 for (i = 0; i < ld_count; i++) { 448 printf("mrsas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x " 449 "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 450 i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i, 451 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 452 (u_int32_t)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); 453 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 454 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 455 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, i, 456 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 457 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 458 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 459 drv_map, pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, 460 &pDrvRaidMap->ldSpanMap[i].ldRaid); 461 } 462 #endif 463 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 464 sizeof(MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 465 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 466 sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); 467 468 pDrvRaidMap->totalSize = htole32(sizeof(MR_FW_RAID_MAP_EXT)); 469 } else { 470 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 471 pFwRaidMap = &fw_map_old->raidMap; 472 473 #if VD_EXT_DEBUG 474 for (i = 0; i < le32toh(pFwRaidMap->ldCount); i++) { 475 device_printf(sc->mrsas_dev, 476 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 477 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 478 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 479 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 480 } 481 #endif 482 483 memset(drv_map, 0, sc->drv_map_sz); 484 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 485 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 486 pDrvRaidMap->fpPdIoTimeoutSec = 487 pFwRaidMap->fpPdIoTimeoutSec; 488 489 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 490 pDrvRaidMap->ldTgtIdToLd[i] = 491 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 492 } 493 494 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 495 pDrvRaidMap->ldSpanMap[i] = 496 pFwRaidMap->ldSpanMap[i]; 497 498 #if VD_EXT_DEBUG 499 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 500 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 501 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 502 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 503 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 504 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 505 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 506 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 507 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 508 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 509 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 510 drv_map, pDrvRaidMap, 511 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 512 #endif 513 } 514 515 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 516 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 517 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 518 sizeof(MR_DEV_HANDLE_INFO) * 519 MAX_RAIDMAP_PHYSICAL_DEVICES); 520 } 521 return 0; 522 } 523 524 /* 525 * MR_ValidateMapInfo: Validate RAID map 526 * input: Adapter instance soft state 527 * 528 * This function checks and validates the loaded RAID map. It returns 0 if 529 * successful, and 1 otherwise. 530 */ 531 u_int8_t 532 MR_ValidateMapInfo(struct mrsas_softc *sc) 533 { 534 if (!sc) { 535 return 1; 536 } 537 if (MR_PopulateDrvRaidMap(sc)) 538 return 0; 539 540 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 541 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 542 543 u_int32_t expected_map_size; 544 545 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 546 pDrvRaidMap = &drv_map->raidMap; 547 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 548 549 if (sc->maxRaidMapSize) 550 expected_map_size = sizeof(MR_DRV_RAID_MAP_ALL); 551 else if (sc->max256vdSupport) 552 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 553 else 554 expected_map_size = 555 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 556 (sizeof(MR_LD_SPAN_MAP) * le16toh(pDrvRaidMap->ldCount)); 557 558 if (le32toh(pDrvRaidMap->totalSize) != expected_map_size) { 559 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 560 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 561 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", le32toh(pDrvRaidMap->totalSize)); 562 return 1; 563 } 564 if (sc->UnevenSpanSupport) { 565 mr_update_span_set(drv_map, ldSpanInfo); 566 } 567 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 568 569 return 0; 570 } 571 572 /* 573 * 574 * Function to print info about span set created in driver from FW raid map 575 * 576 * Inputs: map 577 * ldSpanInfo: ld map span info per HBA instance 578 * 579 * 580 */ 581 #if SPAN_DEBUG 582 static int 583 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 584 { 585 586 u_int8_t span; 587 u_int32_t element; 588 MR_LD_RAID *raid; 589 LD_SPAN_SET *span_set; 590 MR_QUAD_ELEMENT *quad; 591 int ldCount; 592 u_int16_t ld; 593 594 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 595 ld = MR_TargetIdToLdGet(ldCount, map); 596 if (ld >= MAX_LOGICAL_DRIVES) { 597 continue; 598 } 599 raid = MR_LdRaidGet(ld, map); 600 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 601 for (span = 0; span < raid->spanDepth; span++) 602 printf("Span=%x, number of quads=%x\n", span, 603 le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 604 block_span_info.noElements)); 605 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 606 span_set = &(ldSpanInfo[ld].span_set[element]); 607 if (span_set->span_row_data_width == 0) 608 break; 609 610 printf("Span Set %x: width=%x, diff=%x\n", element, 611 (unsigned int)span_set->span_row_data_width, 612 (unsigned int)span_set->diff); 613 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 614 (long unsigned int)span_set->log_start_lba, 615 (long unsigned int)span_set->log_end_lba); 616 printf("span row start=0x%08lx, end=0x%08lx\n", 617 (long unsigned int)span_set->span_row_start, 618 (long unsigned int)span_set->span_row_end); 619 printf("data row start=0x%08lx, end=0x%08lx\n", 620 (long unsigned int)span_set->data_row_start, 621 (long unsigned int)span_set->data_row_end); 622 printf("data strip start=0x%08lx, end=0x%08lx\n", 623 (long unsigned int)span_set->data_strip_start, 624 (long unsigned int)span_set->data_strip_end); 625 626 for (span = 0; span < raid->spanDepth; span++) { 627 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 628 block_span_info.noElements >= element + 1) { 629 quad = &map->raidMap.ldSpanMap[ld]. 630 spanBlock[span].block_span_info. 631 quad[element]; 632 printf("Span=%x, Quad=%x, diff=%x\n", span, 633 element, le32toh(quad->diff)); 634 printf("offset_in_span=0x%08lx\n", 635 (long unsigned int)le64toh(quad->offsetInSpan)); 636 printf("logical start=0x%08lx, end=0x%08lx\n", 637 (long unsigned int)le64toh(quad->logStart), 638 (long unsigned int)le64toh(quad->logEnd)); 639 } 640 } 641 } 642 } 643 return 0; 644 } 645 646 #endif 647 /* 648 * 649 * This routine calculates the Span block for given row using spanset. 650 * 651 * Inputs : HBA instance 652 * ld: Logical drive number 653 * row: Row number 654 * map: LD map 655 * 656 * Outputs : span - Span number block 657 * - Absolute Block number in the physical disk 658 * div_error - Devide error code. 659 */ 660 661 u_int32_t 662 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 663 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 664 { 665 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 666 LD_SPAN_SET *span_set; 667 MR_QUAD_ELEMENT *quad; 668 u_int32_t span, info; 669 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 670 671 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 672 span_set = &(ldSpanInfo[ld].span_set[info]); 673 674 if (span_set->span_row_data_width == 0) 675 break; 676 if (row > span_set->data_row_end) 677 continue; 678 679 for (span = 0; span < raid->spanDepth; span++) 680 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 681 block_span_info.noElements) >= info + 1) { 682 quad = &map->raidMap.ldSpanMap[ld]. 683 spanBlock[span]. 684 block_span_info.quad[info]; 685 if (quad->diff == 0) { 686 *div_error = 1; 687 return span; 688 } 689 if (le64toh(quad->logStart) <= row && 690 row <= le64toh(quad->logEnd) && 691 (mega_mod64(row - le64toh(quad->logStart), 692 le32toh(quad->diff))) == 0) { 693 if (span_blk != NULL) { 694 u_int64_t blk; 695 696 blk = mega_div64_32 697 ((row - le64toh(quad->logStart)), 698 le32toh(quad->diff)); 699 blk = (blk + le64toh(quad->offsetInSpan)) 700 << raid->stripeShift; 701 *span_blk = blk; 702 } 703 return span; 704 } 705 } 706 } 707 return SPAN_INVALID; 708 } 709 710 /* 711 * 712 * This routine calculates the row for given strip using spanset. 713 * 714 * Inputs : HBA instance 715 * ld: Logical drive number 716 * Strip: Strip 717 * map: LD map 718 * 719 * Outputs : row - row associated with strip 720 */ 721 722 static u_int64_t 723 get_row_from_strip(struct mrsas_softc *sc, 724 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 725 { 726 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 727 LD_SPAN_SET *span_set; 728 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 729 u_int32_t info, strip_offset, span, span_offset; 730 u_int64_t span_set_Strip, span_set_Row; 731 732 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 733 span_set = &(ldSpanInfo[ld].span_set[info]); 734 735 if (span_set->span_row_data_width == 0) 736 break; 737 if (strip > span_set->data_strip_end) 738 continue; 739 740 span_set_Strip = strip - span_set->data_strip_start; 741 strip_offset = mega_mod64(span_set_Strip, 742 span_set->span_row_data_width); 743 span_set_Row = mega_div64_32(span_set_Strip, 744 span_set->span_row_data_width) * span_set->diff; 745 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 746 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 747 block_span_info.noElements) >= info + 1) { 748 if (strip_offset >= 749 span_set->strip_offset[span]) 750 span_offset++; 751 else 752 break; 753 } 754 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 755 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 756 (unsigned long long)span_set_Strip, 757 (unsigned long long)span_set_Row, 758 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 759 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 760 (unsigned long long)span_set->data_row_start + 761 (unsigned long long)span_set_Row + (span_offset - 1)); 762 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 763 } 764 return -1LLU; 765 } 766 767 /* 768 * 769 * This routine calculates the Start Strip for given row using spanset. 770 * 771 * Inputs: HBA instance 772 * ld: Logical drive number 773 * row: Row number 774 * map: LD map 775 * 776 * Outputs : Strip - Start strip associated with row 777 */ 778 779 static u_int64_t 780 get_strip_from_row(struct mrsas_softc *sc, 781 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 782 { 783 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 784 LD_SPAN_SET *span_set; 785 MR_QUAD_ELEMENT *quad; 786 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 787 u_int32_t span, info; 788 u_int64_t strip; 789 790 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 791 span_set = &(ldSpanInfo[ld].span_set[info]); 792 793 if (span_set->span_row_data_width == 0) 794 break; 795 if (row > span_set->data_row_end) 796 continue; 797 798 for (span = 0; span < raid->spanDepth; span++) 799 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 800 block_span_info.noElements) >= info + 1) { 801 quad = &map->raidMap.ldSpanMap[ld]. 802 spanBlock[span].block_span_info.quad[info]; 803 if (le64toh(quad->logStart) <= row && 804 row <= le64toh(quad->logEnd) && 805 mega_mod64((row - le64toh(quad->logStart)), 806 le32toh(quad->diff)) == 0) { 807 strip = mega_div64_32 808 (((row - span_set->data_row_start) 809 - le64toh(quad->logStart)), 810 le32toh(quad->diff)); 811 strip *= span_set->span_row_data_width; 812 strip += span_set->data_strip_start; 813 strip += span_set->strip_offset[span]; 814 return strip; 815 } 816 } 817 } 818 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 819 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 820 return -1; 821 } 822 823 /* 824 * ***************************************************************************** 825 * 826 * 827 * This routine calculates the Physical Arm for given strip using spanset. 828 * 829 * Inputs : HBA instance 830 * Logical drive number 831 * Strip 832 * LD map 833 * 834 * Outputs : Phys Arm - Phys Arm associated with strip 835 */ 836 837 static u_int32_t 838 get_arm_from_strip(struct mrsas_softc *sc, 839 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 840 { 841 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 842 LD_SPAN_SET *span_set; 843 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 844 u_int32_t info, strip_offset, span, span_offset; 845 846 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 847 span_set = &(ldSpanInfo[ld].span_set[info]); 848 849 if (span_set->span_row_data_width == 0) 850 break; 851 if (strip > span_set->data_strip_end) 852 continue; 853 854 strip_offset = (u_int32_t)mega_mod64 855 ((strip - span_set->data_strip_start), 856 span_set->span_row_data_width); 857 858 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 859 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 860 block_span_info.noElements) >= info + 1) { 861 if (strip_offset >= span_set->strip_offset[span]) 862 span_offset = span_set->strip_offset[span]; 863 else 864 break; 865 } 866 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 867 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 868 (long unsigned int)strip, (strip_offset - span_offset)); 869 return (strip_offset - span_offset); 870 } 871 872 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 873 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 874 875 return -1; 876 } 877 878 /* This Function will return Phys arm */ 879 u_int8_t 880 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 881 MR_DRV_RAID_MAP_ALL * map) 882 { 883 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 884 885 /* Need to check correct default value */ 886 u_int32_t arm = 0; 887 888 switch (raid->level) { 889 case 0: 890 case 5: 891 case 6: 892 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 893 break; 894 case 1: 895 /* start with logical arm */ 896 arm = get_arm_from_strip(sc, ld, stripe, map); 897 arm *= 2; 898 break; 899 } 900 901 return arm; 902 } 903 904 /* 905 * 906 * This routine calculates the arm, span and block for the specified stripe and 907 * reference in stripe using spanset 908 * 909 * Inputs : 910 * sc - HBA instance 911 * ld - Logical drive number 912 * stripRow: Stripe number 913 * stripRef: Reference in stripe 914 * 915 * Outputs : span - Span number block - Absolute Block 916 * number in the physical disk 917 */ 918 static u_int8_t 919 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 920 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 921 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 922 { 923 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 924 u_int32_t pd, arRef, r1_alt_pd; 925 u_int8_t physArm, span; 926 u_int64_t row; 927 u_int8_t retval = TRUE; 928 u_int64_t *pdBlock = &io_info->pdBlock; 929 u_int16_t *pDevHandle = &io_info->devHandle; 930 u_int8_t *pPdInterface = &io_info->pdInterface; 931 932 u_int32_t logArm, rowMod, armQ, arm; 933 934 /* Get row and span from io_info for Uneven Span IO. */ 935 row = io_info->start_row; 936 span = io_info->start_span; 937 938 if (raid->level == 6) { 939 logArm = get_arm_from_strip(sc, ld, stripRow, map); 940 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 941 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 942 arm = armQ + 1 + logArm; 943 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 944 arm -= SPAN_ROW_SIZE(map, ld, span); 945 physArm = (u_int8_t)arm; 946 } else 947 /* Calculate the arm */ 948 physArm = get_arm(sc, ld, span, stripRow, map); 949 950 arRef = MR_LdSpanArrayGet(ld, span, map); 951 pd = MR_ArPdGet(arRef, physArm, map); 952 953 if (pd != MR_PD_INVALID) { 954 *pDevHandle = MR_PdDevHandleGet(pd, map); 955 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 956 /* get second pd also for raid 1/10 fast path writes */ 957 if ((raid->level == 1) && !io_info->isRead) { 958 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 959 if (r1_alt_pd != MR_PD_INVALID) 960 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 961 } 962 } else { 963 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); 964 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 965 (sc->mrsas_gen3_ctrl && 966 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 967 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 968 else if (raid->level == 1) { 969 pd = MR_ArPdGet(arRef, physArm + 1, map); 970 if (pd != MR_PD_INVALID) { 971 *pDevHandle = MR_PdDevHandleGet(pd, map); 972 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 973 } 974 } 975 } 976 977 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 978 if (sc->is_ventura || sc->is_aero) { 979 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 980 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 981 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 982 } else { 983 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 984 io_info->span_arm = pRAID_Context->spanArm; 985 } 986 return retval; 987 } 988 989 /* 990 * MR_BuildRaidContext: Set up Fast path RAID context 991 * 992 * This function will initiate command processing. The start/end row and strip 993 * information is calculated then the lock is acquired. This function will 994 * return 0 if region lock was acquired OR return num strips. 995 */ 996 u_int8_t 997 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 998 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 999 { 1000 MR_LD_RAID *raid; 1001 u_int32_t ld, stripSize, stripe_mask; 1002 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 1003 REGION_KEY regStart; 1004 REGION_LEN regSize; 1005 u_int8_t num_strips, numRows; 1006 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 1007 u_int64_t ldStartBlock; 1008 u_int32_t numBlocks, ldTgtId; 1009 u_int8_t isRead, stripIdx; 1010 u_int8_t retval = 0; 1011 u_int8_t startlba_span = SPAN_INVALID; 1012 u_int64_t *pdBlock = &io_info->pdBlock; 1013 int error_code = 0; 1014 1015 ldStartBlock = io_info->ldStartBlock; 1016 numBlocks = io_info->numBlocks; 1017 ldTgtId = io_info->ldTgtId; 1018 isRead = io_info->isRead; 1019 1020 io_info->IoforUnevenSpan = 0; 1021 io_info->start_span = SPAN_INVALID; 1022 1023 ld = MR_TargetIdToLdGet(ldTgtId, map); 1024 raid = MR_LdRaidGet(ld, map); 1025 1026 /* check read ahead bit */ 1027 io_info->raCapable = raid->capability.raCapable; 1028 1029 if (raid->rowDataSize == 0) { 1030 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1031 return FALSE; 1032 else if (sc->UnevenSpanSupport) { 1033 io_info->IoforUnevenSpan = 1; 1034 } else { 1035 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 1036 " but there is _NO_ UnevenSpanSupport\n", 1037 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1038 return FALSE; 1039 } 1040 } 1041 stripSize = 1 << raid->stripeShift; 1042 stripe_mask = stripSize - 1; 1043 /* 1044 * calculate starting row and stripe, and number of strips and rows 1045 */ 1046 start_strip = ldStartBlock >> raid->stripeShift; 1047 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 1048 endLba = ldStartBlock + numBlocks - 1; 1049 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 1050 endStrip = endLba >> raid->stripeShift; 1051 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 1052 if (io_info->IoforUnevenSpan) { 1053 start_row = get_row_from_strip(sc, ld, start_strip, map); 1054 endRow = get_row_from_strip(sc, ld, endStrip, map); 1055 if (raid->spanDepth == 1) { 1056 startlba_span = 0; 1057 *pdBlock = start_row << raid->stripeShift; 1058 } else { 1059 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 1060 pdBlock, map, &error_code); 1061 if (error_code == 1) { 1062 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 1063 __func__, __LINE__); 1064 return FALSE; 1065 } 1066 } 1067 if (startlba_span == SPAN_INVALID) { 1068 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 1069 "start strip %llx endSrip %llx\n", __func__, 1070 __LINE__, (unsigned long long)start_row, 1071 (unsigned long long)start_strip, 1072 (unsigned long long)endStrip); 1073 return FALSE; 1074 } 1075 io_info->start_span = startlba_span; 1076 io_info->start_row = start_row; 1077 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 1078 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 1079 __func__, __LINE__, (unsigned long long)start_row, 1080 (unsigned long long)start_strip, 1081 (unsigned long long)endStrip, startlba_span); 1082 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 1083 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 1084 } else { 1085 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1086 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1087 } 1088 1089 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 1090 1091 /* 1092 * Calculate region info. (Assume region at start of first row, and 1093 * assume this IO needs the full row - will adjust if not true.) 1094 */ 1095 regStart = start_row << raid->stripeShift; 1096 regSize = stripSize; 1097 1098 /* Check if we can send this I/O via FastPath */ 1099 if (raid->capability.fpCapable) { 1100 if (isRead) 1101 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1102 ((num_strips == 1) || 1103 raid->capability.fpReadAcrossStripe)); 1104 else 1105 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1106 ((num_strips == 1) || 1107 raid->capability.fpWriteAcrossStripe)); 1108 } else 1109 io_info->fpOkForIo = FALSE; 1110 1111 if (numRows == 1) { 1112 if (num_strips == 1) { 1113 regStart += ref_in_start_stripe; 1114 regSize = numBlocks; 1115 } 1116 } else if (io_info->IoforUnevenSpan == 0) { 1117 /* 1118 * For Even span region lock optimization. If the start strip 1119 * is the last in the start row 1120 */ 1121 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1122 regStart += ref_in_start_stripe; 1123 /* 1124 * initialize count to sectors from startRef to end 1125 * of strip 1126 */ 1127 regSize = stripSize - ref_in_start_stripe; 1128 } 1129 /* add complete rows in the middle of the transfer */ 1130 if (numRows > 2) 1131 regSize += (numRows - 2) << raid->stripeShift; 1132 1133 /* if IO ends within first strip of last row */ 1134 if (endStrip == endRow * raid->rowDataSize) 1135 regSize += ref_in_end_stripe + 1; 1136 else 1137 regSize += stripSize; 1138 } else { 1139 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 1140 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1141 regStart += ref_in_start_stripe; 1142 /* 1143 * initialize count to sectors from startRef to end 1144 * of strip 1145 */ 1146 regSize = stripSize - ref_in_start_stripe; 1147 } 1148 /* add complete rows in the middle of the transfer */ 1149 if (numRows > 2) 1150 regSize += (numRows - 2) << raid->stripeShift; 1151 1152 /* if IO ends within first strip of last row */ 1153 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 1154 regSize += ref_in_end_stripe + 1; 1155 else 1156 regSize += stripSize; 1157 } 1158 pRAID_Context->timeoutValue = htole16(map->raidMap.fpPdIoTimeoutSec); 1159 if (sc->mrsas_gen3_ctrl) 1160 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1161 else if (sc->device_id == MRSAS_TBOLT) 1162 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1163 pRAID_Context->VirtualDiskTgtId = raid->targetId; 1164 pRAID_Context->regLockRowLBA = htole64(regStart); 1165 pRAID_Context->regLockLength = htole32(regSize); 1166 pRAID_Context->configSeqNum = raid->seqNum; 1167 1168 /* 1169 * Get Phy Params only if FP capable, or else leave it to MR firmware 1170 * to do the calculation. 1171 */ 1172 if (io_info->fpOkForIo) { 1173 retval = io_info->IoforUnevenSpan ? 1174 mr_spanset_get_phy_params(sc, ld, start_strip, 1175 ref_in_start_stripe, io_info, pRAID_Context, map) : 1176 MR_GetPhyParams(sc, ld, start_strip, 1177 ref_in_start_stripe, io_info, pRAID_Context, map); 1178 /* If IO on an invalid Pd, then FP is not possible */ 1179 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1180 io_info->fpOkForIo = FALSE; 1181 /* 1182 * if FP possible, set the SLUD bit in regLockFlags for 1183 * ventura 1184 */ 1185 else if ((sc->is_ventura || sc->is_aero) && !isRead && 1186 (raid->writeMode == MR_RL_WRITE_BACK_MODE) && (raid->level <= 1) && 1187 raid->capability.fpCacheBypassCapable) { 1188 ((RAID_CONTEXT_G35 *) pRAID_Context)->routingFlags.bits.sld = 1; 1189 } 1190 1191 return retval; 1192 } else if (isRead) { 1193 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1194 retval = io_info->IoforUnevenSpan ? 1195 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 1196 ref_in_start_stripe, io_info, pRAID_Context, map) : 1197 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 1198 ref_in_start_stripe, io_info, pRAID_Context, map); 1199 if (!retval) 1200 return TRUE; 1201 } 1202 } 1203 #if SPAN_DEBUG 1204 /* Just for testing what arm we get for strip. */ 1205 get_arm_from_strip(sc, ld, start_strip, map); 1206 #endif 1207 return TRUE; 1208 } 1209 1210 /* 1211 * 1212 * This routine pepare spanset info from Valid Raid map and store it into local 1213 * copy of ldSpanInfo per instance data structure. 1214 * 1215 * Inputs : LD map 1216 * ldSpanInfo per HBA instance 1217 * 1218 */ 1219 void 1220 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1221 { 1222 u_int8_t span, count; 1223 u_int32_t element, span_row_width; 1224 u_int64_t span_row; 1225 MR_LD_RAID *raid; 1226 LD_SPAN_SET *span_set, *span_set_prev; 1227 MR_QUAD_ELEMENT *quad; 1228 int ldCount; 1229 u_int16_t ld; 1230 1231 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1232 ld = MR_TargetIdToLdGet(ldCount, map); 1233 if (ld >= MAX_LOGICAL_DRIVES) 1234 continue; 1235 raid = MR_LdRaidGet(ld, map); 1236 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1237 for (span = 0; span < raid->spanDepth; span++) { 1238 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1239 block_span_info.noElements) < element + 1) 1240 continue; 1241 /* TO-DO */ 1242 span_set = &(ldSpanInfo[ld].span_set[element]); 1243 quad = &map->raidMap.ldSpanMap[ld]. 1244 spanBlock[span].block_span_info.quad[element]; 1245 1246 span_set->diff = le32toh(quad->diff); 1247 1248 for (count = 0, span_row_width = 0; 1249 count < raid->spanDepth; count++) { 1250 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[count]. 1251 block_span_info.noElements) >= element + 1) { 1252 span_set->strip_offset[count] = span_row_width; 1253 span_row_width += 1254 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1255 #if SPAN_DEBUG 1256 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1257 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1258 #endif 1259 } 1260 } 1261 1262 span_set->span_row_data_width = span_row_width; 1263 span_row = mega_div64_32(((le64toh(quad->logEnd) - 1264 le64toh(quad->logStart)) + le32toh(quad->diff)), 1265 le32toh(quad->diff)); 1266 1267 if (element == 0) { 1268 span_set->log_start_lba = 0; 1269 span_set->log_end_lba = 1270 ((span_row << raid->stripeShift) * span_row_width) - 1; 1271 1272 span_set->span_row_start = 0; 1273 span_set->span_row_end = span_row - 1; 1274 1275 span_set->data_strip_start = 0; 1276 span_set->data_strip_end = (span_row * span_row_width) - 1; 1277 1278 span_set->data_row_start = 0; 1279 span_set->data_row_end = 1280 (span_row * le32toh(quad->diff)) - 1; 1281 } else { 1282 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1283 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1284 span_set->log_end_lba = span_set->log_start_lba + 1285 ((span_row << raid->stripeShift) * span_row_width) - 1; 1286 1287 span_set->span_row_start = span_set_prev->span_row_end + 1; 1288 span_set->span_row_end = 1289 span_set->span_row_start + span_row - 1; 1290 1291 span_set->data_strip_start = 1292 span_set_prev->data_strip_end + 1; 1293 span_set->data_strip_end = span_set->data_strip_start + 1294 (span_row * span_row_width) - 1; 1295 1296 span_set->data_row_start = span_set_prev->data_row_end + 1; 1297 span_set->data_row_end = span_set->data_row_start + 1298 (span_row * le32toh(quad->diff)) - 1; 1299 } 1300 break; 1301 } 1302 if (span == raid->spanDepth) 1303 break; /* no quads remain */ 1304 } 1305 } 1306 #if SPAN_DEBUG 1307 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1308 #endif 1309 } 1310 1311 /* 1312 * mrsas_update_load_balance_params: Update load balance parmas 1313 * Inputs: 1314 * sc - driver softc instance 1315 * drv_map - driver RAID map 1316 * lbInfo - Load balance info 1317 * 1318 * This function updates the load balance parameters for the LD config of a two 1319 * drive optimal RAID-1. 1320 */ 1321 void 1322 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1323 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1324 { 1325 int ldCount; 1326 u_int16_t ld; 1327 MR_LD_RAID *raid; 1328 1329 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1330 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1331 1332 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1333 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1334 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1335 lbInfo[ldCount].loadBalanceFlag = 0; 1336 continue; 1337 } 1338 raid = MR_LdRaidGet(ld, drv_map); 1339 le32_to_cpus(&raid->capability); 1340 if ((raid->level != 1) || 1341 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1342 lbInfo[ldCount].loadBalanceFlag = 0; 1343 continue; 1344 } 1345 lbInfo[ldCount].loadBalanceFlag = 1; 1346 } 1347 } 1348 1349 /* 1350 * mrsas_set_pd_lba: Sets PD LBA 1351 * input: io_request pointer 1352 * CDB length 1353 * io_info pointer 1354 * Pointer to CCB 1355 * Local RAID map pointer 1356 * Start block of IO Block Size 1357 * 1358 * Used to set the PD logical block address in CDB for FP IOs. 1359 */ 1360 void 1361 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1362 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1363 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1364 u_int32_t ld_block_size) 1365 { 1366 MR_LD_RAID *raid; 1367 u_int32_t ld; 1368 u_int64_t start_blk = io_info->pdBlock; 1369 u_int8_t *cdb = io_request->CDB.CDB32; 1370 u_int32_t num_blocks = io_info->numBlocks; 1371 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1372 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1373 1374 /* Check if T10 PI (DIF) is enabled for this LD */ 1375 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1376 raid = MR_LdRaidGet(ld, local_map_ptr); 1377 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1378 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1379 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1380 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1381 1382 if (ccb_h->flags == CAM_DIR_OUT) 1383 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1384 else 1385 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1386 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1387 1388 /* LBA */ 1389 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1390 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1391 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1392 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1393 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1394 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1395 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1396 cdb[19] = (u_int8_t)(start_blk & 0xff); 1397 1398 /* Logical block reference tag */ 1399 io_request->CDB.EEDP32.PrimaryReferenceTag = htobe32(ref_tag); 1400 io_request->CDB.EEDP32.PrimaryApplicationTagMask = htobe16(0xffff); 1401 io_request->IoFlags = htole16(32); /* Specify 32-byte cdb */ 1402 1403 /* Transfer length */ 1404 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1405 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1406 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1407 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1408 1409 /* set SCSI IO EEDP Flags */ 1410 if (ccb_h->flags == CAM_DIR_OUT) { 1411 io_request->EEDPFlags = htole16( 1412 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1413 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1414 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1415 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1416 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1417 } else { 1418 io_request->EEDPFlags = htole16( 1419 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1420 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1421 } 1422 io_request->Control |= htole32(0x4 << 26); 1423 io_request->EEDPBlockSize = htole32(ld_block_size); 1424 } else { 1425 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1426 if (((cdb_len == 12) || (cdb_len == 16)) && 1427 (start_blk <= 0xffffffff)) { 1428 if (cdb_len == 16) { 1429 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1430 flagvals = cdb[1]; 1431 groupnum = cdb[14]; 1432 control = cdb[15]; 1433 } else { 1434 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1435 flagvals = cdb[1]; 1436 groupnum = cdb[10]; 1437 control = cdb[11]; 1438 } 1439 1440 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1441 1442 cdb[0] = opcode; 1443 cdb[1] = flagvals; 1444 cdb[6] = groupnum; 1445 cdb[9] = control; 1446 1447 /* Transfer length */ 1448 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1449 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1450 1451 io_request->IoFlags = htole16(10); /* Specify 10-byte cdb */ 1452 cdb_len = 10; 1453 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1454 /* Convert to 16 byte CDB for large LBA's */ 1455 switch (cdb_len) { 1456 case 6: 1457 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1458 control = cdb[5]; 1459 break; 1460 case 10: 1461 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1462 flagvals = cdb[1]; 1463 groupnum = cdb[6]; 1464 control = cdb[9]; 1465 break; 1466 case 12: 1467 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1468 flagvals = cdb[1]; 1469 groupnum = cdb[10]; 1470 control = cdb[11]; 1471 break; 1472 } 1473 1474 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1475 1476 cdb[0] = opcode; 1477 cdb[1] = flagvals; 1478 cdb[14] = groupnum; 1479 cdb[15] = control; 1480 1481 /* Transfer length */ 1482 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1483 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1484 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1485 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1486 1487 io_request->IoFlags = htole16(16); /* Specify 16-byte cdb */ 1488 cdb_len = 16; 1489 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1490 /* convert to 10 byte CDB */ 1491 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1492 control = cdb[5]; 1493 1494 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1495 cdb[0] = opcode; 1496 cdb[9] = control; 1497 1498 /* Set transfer length */ 1499 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1500 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1501 1502 /* Specify 10-byte cdb */ 1503 cdb_len = 10; 1504 } 1505 /* Fall through normal case, just load LBA here */ 1506 u_int8_t val = cdb[1] & 0xE0; 1507 1508 switch (cdb_len) { 1509 case 6: 1510 cdb[3] = (u_int8_t)(start_blk & 0xff); 1511 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1512 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1513 break; 1514 case 10: 1515 cdb[5] = (u_int8_t)(start_blk & 0xff); 1516 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1517 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1518 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1519 break; 1520 case 16: 1521 cdb[9] = (u_int8_t)(start_blk & 0xff); 1522 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1523 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1524 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1525 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1526 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1527 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1528 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1529 break; 1530 } 1531 } 1532 } 1533 1534 /* 1535 * mrsas_get_best_arm_pd: Determine the best spindle arm 1536 * Inputs: 1537 * sc - HBA instance 1538 * lbInfo - Load balance info 1539 * io_info - IO request info 1540 * 1541 * This function determines and returns the best arm by looking at the 1542 * parameters of the last PD access. 1543 */ 1544 u_int8_t 1545 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1546 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1547 { 1548 MR_LD_RAID *raid; 1549 MR_DRV_RAID_MAP_ALL *drv_map; 1550 u_int16_t pd1_devHandle; 1551 u_int16_t pend0, pend1, ld; 1552 u_int64_t diff0, diff1; 1553 u_int8_t bestArm, pd0, pd1, span, arm; 1554 u_int32_t arRef, span_row_size; 1555 1556 u_int64_t block = io_info->ldStartBlock; 1557 u_int32_t count = io_info->numBlocks; 1558 1559 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1560 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1561 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1562 1563 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1564 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1565 raid = MR_LdRaidGet(ld, drv_map); 1566 span_row_size = sc->UnevenSpanSupport ? 1567 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1568 1569 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1570 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1571 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1572 (arm + 1 - span_row_size) : arm + 1, drv_map); 1573 1574 /* Get PD1 Dev Handle */ 1575 pd1_devHandle = MR_PdDevHandleGet(pd1, drv_map); 1576 if (pd1_devHandle == MR_DEVHANDLE_INVALID) { 1577 bestArm = arm; 1578 } else { 1579 /* get the pending cmds for the data and mirror arms */ 1580 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1581 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1582 1583 /* Determine the disk whose head is nearer to the req. block */ 1584 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1585 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1586 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1587 1588 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1589 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1590 bestArm ^= 1; 1591 1592 /* Update the last accessed block on the correct pd */ 1593 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1594 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1595 } 1596 1597 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1598 #if SPAN_DEBUG 1599 if (arm != bestArm) 1600 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1601 "io_info->span_arm 0x%x\n", 1602 span, arm, bestArm, io_info->span_arm); 1603 #endif 1604 1605 return io_info->pd_after_lb; 1606 } 1607 1608 /* 1609 * mrsas_get_updated_dev_handle: Get the update dev handle 1610 * Inputs: 1611 * sc - Adapter instance soft state 1612 * lbInfo - Load balance info 1613 * io_info - io_info pointer 1614 * 1615 * This function determines and returns the updated dev handle. 1616 */ 1617 u_int16_t 1618 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1619 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1620 { 1621 u_int8_t arm_pd; 1622 u_int16_t devHandle; 1623 MR_DRV_RAID_MAP_ALL *drv_map; 1624 1625 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1626 1627 /* get best new arm */ 1628 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1629 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1630 io_info->pdInterface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1631 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1632 1633 return devHandle; 1634 } 1635 1636 /* 1637 * MR_GetPhyParams: Calculates arm, span, and block 1638 * Inputs: Adapter soft state 1639 * Logical drive number (LD) 1640 * Stripe number(stripRow) 1641 * Reference in stripe (stripRef) 1642 * 1643 * Outputs: Absolute Block number in the physical disk 1644 * 1645 * This routine calculates the arm, span and block for the specified stripe and 1646 * reference in stripe. 1647 */ 1648 u_int8_t 1649 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1650 u_int64_t stripRow, 1651 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1652 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1653 { 1654 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1655 u_int32_t pd, arRef, r1_alt_pd; 1656 u_int8_t physArm, span; 1657 u_int64_t row; 1658 u_int8_t retval = TRUE; 1659 int error_code = 0; 1660 u_int64_t *pdBlock = &io_info->pdBlock; 1661 u_int16_t *pDevHandle = &io_info->devHandle; 1662 u_int8_t *pPdInterface = &io_info->pdInterface; 1663 u_int32_t rowMod, armQ, arm, logArm; 1664 1665 row = mega_div64_32(stripRow, raid->rowDataSize); 1666 1667 if (raid->level == 6) { 1668 /* logical arm within row */ 1669 logArm = mega_mod64(stripRow, raid->rowDataSize); 1670 if (raid->rowSize == 0) 1671 return FALSE; 1672 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1673 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1674 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1675 if (arm >= raid->rowSize) /* handle wrap condition */ 1676 arm -= raid->rowSize; 1677 physArm = (u_int8_t)arm; 1678 } else { 1679 if (raid->modFactor == 0) 1680 return FALSE; 1681 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1682 } 1683 1684 if (raid->spanDepth == 1) { 1685 span = 0; 1686 *pdBlock = row << raid->stripeShift; 1687 } else { 1688 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1689 if (error_code == 1) 1690 return FALSE; 1691 } 1692 1693 /* Get the array on which this span is present */ 1694 arRef = MR_LdSpanArrayGet(ld, span, map); 1695 1696 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1697 1698 if (pd != MR_PD_INVALID) { 1699 /* Get dev handle from Pd */ 1700 *pDevHandle = MR_PdDevHandleGet(pd, map); 1701 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1702 /* get second pd also for raid 1/10 fast path writes */ 1703 if ((raid->level == 1) && !io_info->isRead) { 1704 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 1705 if (r1_alt_pd != MR_PD_INVALID) 1706 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 1707 } 1708 } else { 1709 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); /* set dev handle as invalid. */ 1710 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 1711 (sc->mrsas_gen3_ctrl && 1712 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1713 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1714 else if (raid->level == 1) { 1715 /* Get Alternate Pd. */ 1716 pd = MR_ArPdGet(arRef, physArm + 1, map); 1717 if (pd != MR_PD_INVALID) { 1718 /* Get dev handle from Pd. */ 1719 *pDevHandle = MR_PdDevHandleGet(pd, map); 1720 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1721 } 1722 } 1723 } 1724 1725 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 1726 if (sc->is_ventura || sc->is_aero) { 1727 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 1728 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1729 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1730 } else { 1731 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1732 io_info->span_arm = pRAID_Context->spanArm; 1733 } 1734 return retval; 1735 } 1736 1737 /* 1738 * MR_GetSpanBlock: Calculates span block 1739 * Inputs: LD 1740 * row PD 1741 * span block 1742 * RAID map pointer 1743 * 1744 * Outputs: Span number Error code 1745 * 1746 * This routine calculates the span from the span block info. 1747 */ 1748 u_int32_t 1749 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1750 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1751 { 1752 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1753 MR_QUAD_ELEMENT *quad; 1754 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1755 u_int32_t span, j; 1756 u_int64_t blk, debugBlk; 1757 1758 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1759 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1760 quad = &pSpanBlock->block_span_info.quad[j]; 1761 if (quad->diff == 0) { 1762 *div_error = 1; 1763 return span; 1764 } 1765 if (quad->logStart <= row && row <= quad->logEnd && 1766 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1767 if (span_blk != NULL) { 1768 blk = mega_div64_32((row - quad->logStart), quad->diff); 1769 debugBlk = blk; 1770 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1771 *span_blk = blk; 1772 } 1773 return span; 1774 } 1775 } 1776 } 1777 return span; 1778 } 1779