1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 #include <dev/mrsas/mrsas.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_sim.h> 46 #include <cam/cam_xpt_sim.h> 47 #include <cam/cam_debug.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_xpt_periph.h> 50 51 /* 52 * Function prototypes 53 */ 54 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 55 u_int8_t 56 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 57 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 58 u_int8_t 59 MR_BuildRaidContext(struct mrsas_softc *sc, 60 struct IO_REQUEST_INFO *io_info, 61 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 62 u_int8_t 63 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 64 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 65 RAID_CONTEXT * pRAID_Context, 66 MR_DRV_RAID_MAP_ALL * map); 67 u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map); 68 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 69 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 70 u_int16_t 71 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 72 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 73 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 74 u_int32_t 75 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 76 MR_DRV_RAID_MAP_ALL * map, int *div_error); 77 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 78 void 79 mrsas_update_load_balance_params(struct mrsas_softc *sc, 80 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 81 void 82 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 83 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 84 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 85 u_int32_t ld_block_size); 86 static u_int16_t 87 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 88 MR_DRV_RAID_MAP_ALL * map); 89 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 90 static u_int16_t 91 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 92 MR_DRV_RAID_MAP_ALL * map); 93 static MR_LD_SPAN * 94 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 95 MR_DRV_RAID_MAP_ALL * map); 96 static u_int8_t 97 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 98 MR_DRV_RAID_MAP_ALL * map); 99 static MR_SPAN_BLOCK_INFO * 100 MR_LdSpanInfoGet(u_int32_t ld, 101 MR_DRV_RAID_MAP_ALL * map); 102 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 103 static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 104 105 /* 106 * Spanset related function prototypes Added for PRL11 configuration (Uneven 107 * span support) 108 */ 109 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 110 static u_int8_t 111 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 112 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 113 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 114 static u_int64_t 115 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 116 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 117 static u_int32_t 118 mr_spanset_get_span_block(struct mrsas_softc *sc, 119 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 120 MR_DRV_RAID_MAP_ALL * map, int *div_error); 121 static u_int8_t 122 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 123 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 124 125 /* 126 * Spanset related defines Added for PRL11 configuration(Uneven span support) 127 */ 128 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 129 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 130 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 131 #define SPAN_INVALID 0xff 132 #define SPAN_DEBUG 0 133 134 /* 135 * Related Defines 136 */ 137 138 typedef u_int64_t REGION_KEY; 139 typedef u_int32_t REGION_LEN; 140 141 #define MR_LD_STATE_OPTIMAL 3 142 #define FALSE 0 143 #define TRUE 1 144 145 #define LB_PENDING_CMDS_DEFAULT 4 146 147 /* 148 * Related Macros 149 */ 150 151 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 152 153 #define swap32(x) \ 154 ((unsigned int)( \ 155 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 156 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 157 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 158 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 159 160 /* 161 * In-line functions for mod and divide of 64-bit dividend and 32-bit 162 * divisor. Assumes a check for a divisor of zero is not possible. 163 * 164 * @param dividend: Dividend 165 * @param divisor: Divisor 166 * @return remainder 167 */ 168 169 #define mega_mod64(dividend, divisor) ({ \ 170 int remainder; \ 171 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 172 remainder;}) 173 174 #define mega_div64_32(dividend, divisor) ({ \ 175 int quotient; \ 176 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 177 quotient;}) 178 179 /* 180 * Various RAID map access functions. These functions access the various 181 * parts of the RAID map and returns the appropriate parameters. 182 */ 183 184 MR_LD_RAID * 185 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 186 { 187 return (&map->raidMap.ldSpanMap[ld].ldRaid); 188 } 189 190 u_int16_t 191 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 192 { 193 return le16toh(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 194 } 195 196 static u_int16_t 197 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 198 { 199 return le16toh(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 200 } 201 202 static u_int8_t 203 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 204 { 205 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 206 } 207 208 static u_int16_t 209 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 210 { 211 return map->raidMap.devHndlInfo[pd].curDevHdl; 212 } 213 214 static u_int8_t MR_PdInterfaceTypeGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map) 215 { 216 return map->raidMap.devHndlInfo[pd].interfaceType; 217 } 218 219 static u_int16_t 220 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 221 { 222 return le16toh(map->raidMap.arMapInfo[ar].pd[arm]); 223 } 224 225 static MR_LD_SPAN * 226 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 227 { 228 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 229 } 230 231 static MR_SPAN_BLOCK_INFO * 232 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 233 { 234 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 235 } 236 237 u_int8_t 238 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 239 { 240 return map->raidMap.ldTgtIdToLd[ldTgtId]; 241 } 242 243 u_int32_t 244 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 245 { 246 MR_LD_RAID *raid; 247 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 248 249 ld = MR_TargetIdToLdGet(ldTgtId, map); 250 251 /* 252 * Check if logical drive was removed. 253 */ 254 if (ld >= MAX_LOGICAL_DRIVES) 255 return ldBlockSize; 256 257 raid = MR_LdRaidGet(ld, map); 258 ldBlockSize = raid->logicalBlockLength; 259 if (!ldBlockSize) 260 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 261 262 return ldBlockSize; 263 } 264 265 /* 266 * This function will Populate Driver Map using Dynamic firmware raid map 267 */ 268 static int 269 MR_PopulateDrvRaidMapVentura(struct mrsas_softc *sc) 270 { 271 unsigned int i, j; 272 u_int16_t ld_count; 273 274 MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 275 MR_RAID_MAP_DESC_TABLE *desc_table; 276 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 277 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 278 void *raid_map_data = NULL; 279 280 fw_map_dyn = (MR_FW_RAID_MAP_DYNAMIC *) sc->raidmap_mem[(sc->map_id & 1)]; 281 282 if (fw_map_dyn == NULL) { 283 device_printf(sc->mrsas_dev, 284 "from %s %d map0 %p map1 %p map size %d \n", __func__, __LINE__, 285 sc->raidmap_mem[0], sc->raidmap_mem[1], sc->maxRaidMapSize); 286 return 1; 287 } 288 #if VD_EXT_DEBUG 289 device_printf(sc->mrsas_dev, 290 " raidMapSize 0x%x, descTableOffset 0x%x, " 291 " descTableSize 0x%x, descTableNumElements 0x%x \n", 292 fw_map_dyn->raidMapSize, le32toh(fw_map_dyn->descTableOffset), 293 fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); 294 #endif 295 desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + 296 le32toh(fw_map_dyn->descTableOffset)); 297 if (desc_table != fw_map_dyn->raidMapDescTable) { 298 device_printf(sc->mrsas_dev, 299 "offsets of desc table are not matching returning " 300 " FW raid map has been changed: desc %p original %p\n", 301 desc_table, fw_map_dyn->raidMapDescTable); 302 } 303 memset(drv_map, 0, sc->drv_map_sz); 304 ld_count = le16toh(fw_map_dyn->ldCount); 305 pDrvRaidMap->ldCount = htole16(ld_count); 306 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; 307 pDrvRaidMap->totalSize = htole32(sizeof(MR_DRV_RAID_MAP_ALL)); 308 /* point to actual data starting point */ 309 raid_map_data = (char *)fw_map_dyn + 310 le32toh(fw_map_dyn->descTableOffset) + 311 le32toh(fw_map_dyn->descTableSize); 312 313 for (i = 0; i < le32toh(fw_map_dyn->descTableNumElements); ++i) { 314 if (!desc_table) { 315 device_printf(sc->mrsas_dev, 316 "desc table is null, coming out %p \n", desc_table); 317 return 1; 318 } 319 #if VD_EXT_DEBUG 320 device_printf(sc->mrsas_dev, "raid_map_data %p \n", raid_map_data); 321 device_printf(sc->mrsas_dev, 322 "desc table %p \n", desc_table); 323 device_printf(sc->mrsas_dev, 324 "raidmap type %d, raidmapOffset 0x%x, " 325 " raid map number of elements 0%x, raidmapsize 0x%x\n", 326 le32toh(desc_table->raidMapDescType), desc_table->raidMapDescOffset, 327 le32toh(desc_table->raidMapDescElements), desc_table->raidMapDescBufferSize); 328 #endif 329 switch (le32toh(desc_table->raidMapDescType)) { 330 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 331 fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) 332 ((char *)raid_map_data + le32toh(desc_table->raidMapDescOffset)); 333 #if VD_EXT_DEBUG 334 device_printf(sc->mrsas_dev, 335 "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); 336 #endif 337 memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, 338 sizeof(MR_DEV_HANDLE_INFO) * le32toh(desc_table->raidMapDescElements)); 339 break; 340 case RAID_MAP_DESC_TYPE_TGTID_INFO: 341 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) 342 ((char *)raid_map_data + 343 le32toh(desc_table->raidMapDescOffset)); 344 #if VD_EXT_DEBUG 345 device_printf(sc->mrsas_dev, 346 "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); 347 #endif 348 for (j = 0; j < le32toh(desc_table->raidMapDescElements); j++) { 349 pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; 350 #if VD_EXT_DEBUG 351 device_printf(sc->mrsas_dev, 352 " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); 353 #endif 354 } 355 break; 356 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 357 fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + 358 le32toh(desc_table->raidMapDescOffset)); 359 #if VD_EXT_DEBUG 360 device_printf(sc->mrsas_dev, 361 "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); 362 #endif 363 memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, 364 sizeof(MR_ARRAY_INFO) * le32toh(desc_table->raidMapDescElements)); 365 break; 366 case RAID_MAP_DESC_TYPE_SPAN_INFO: 367 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + 368 le32toh(desc_table->raidMapDescOffset)); 369 memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, 370 sizeof(MR_LD_SPAN_MAP) * 371 le32toh(desc_table->raidMapDescElements)); 372 #if VD_EXT_DEBUG 373 device_printf(sc->mrsas_dev, 374 "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); 375 device_printf(sc->mrsas_dev, 376 "MR_LD_SPAN_MAP size 0x%lx\n", sizeof(MR_LD_SPAN_MAP)); 377 for (j = 0; j < ld_count; j++) { 378 printf("mrsas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x " 379 "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 380 j, j, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.targetId, j, 381 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.seqNum, 382 (u_int32_t)fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.rowSize); 383 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 384 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 385 j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j, 386 pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum, 387 (u_int32_t)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); 388 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 389 drv_map, pDrvRaidMap, &fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid, 390 &pDrvRaidMap->ldSpanMap[j].ldRaid); 391 } 392 #endif 393 break; 394 default: 395 device_printf(sc->mrsas_dev, 396 "wrong number of desctableElements %d\n", 397 fw_map_dyn->descTableNumElements); 398 } 399 ++desc_table; 400 } 401 return 0; 402 } 403 404 /* 405 * This function will Populate Driver Map using firmware raid map 406 */ 407 static int 408 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 409 { 410 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 411 MR_FW_RAID_MAP_EXT *fw_map_ext; 412 MR_FW_RAID_MAP *pFwRaidMap = NULL; 413 unsigned int i; 414 u_int16_t ld_count; 415 416 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 417 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 418 419 if (sc->maxRaidMapSize) { 420 return MR_PopulateDrvRaidMapVentura(sc); 421 } else if (sc->max256vdSupport) { 422 fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; 423 ld_count = (u_int16_t)le16toh(fw_map_ext->ldCount); 424 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 425 device_printf(sc->mrsas_dev, 426 "mrsas: LD count exposed in RAID map in not valid\n"); 427 return 1; 428 } 429 #if VD_EXT_DEBUG 430 for (i = 0; i < ld_count; i++) { 431 printf("mrsas : Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", 432 i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, 433 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 434 fw_map_ext->ldSpanMap[i].ldRaid.size); 435 } 436 #endif 437 memset(drv_map, 0, sc->drv_map_sz); 438 pDrvRaidMap->ldCount = htole16(ld_count); 439 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 440 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { 441 pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; 442 } 443 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(MR_LD_SPAN_MAP) * ld_count); 444 #if VD_EXT_DEBUG 445 for (i = 0; i < ld_count; i++) { 446 printf("mrsas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x " 447 "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 448 i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i, 449 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 450 (u_int32_t)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); 451 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 452 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 453 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, i, 454 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 455 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 456 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 457 drv_map, pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, 458 &pDrvRaidMap->ldSpanMap[i].ldRaid); 459 } 460 #endif 461 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 462 sizeof(MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 463 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 464 sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); 465 466 pDrvRaidMap->totalSize = htole32(sizeof(MR_FW_RAID_MAP_EXT)); 467 } else { 468 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 469 pFwRaidMap = &fw_map_old->raidMap; 470 471 #if VD_EXT_DEBUG 472 for (i = 0; i < le32toh(pFwRaidMap->ldCount); i++) { 473 device_printf(sc->mrsas_dev, 474 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 475 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 476 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 477 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 478 } 479 #endif 480 481 memset(drv_map, 0, sc->drv_map_sz); 482 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 483 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 484 pDrvRaidMap->fpPdIoTimeoutSec = 485 pFwRaidMap->fpPdIoTimeoutSec; 486 487 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 488 pDrvRaidMap->ldTgtIdToLd[i] = 489 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 490 } 491 492 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 493 pDrvRaidMap->ldSpanMap[i] = 494 pFwRaidMap->ldSpanMap[i]; 495 496 #if VD_EXT_DEBUG 497 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 498 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 499 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 500 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 501 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 502 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 503 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 504 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 505 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 506 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 507 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 508 drv_map, pDrvRaidMap, 509 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 510 #endif 511 } 512 513 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 514 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 515 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 516 sizeof(MR_DEV_HANDLE_INFO) * 517 MAX_RAIDMAP_PHYSICAL_DEVICES); 518 } 519 return 0; 520 } 521 522 /* 523 * MR_ValidateMapInfo: Validate RAID map 524 * input: Adapter instance soft state 525 * 526 * This function checks and validates the loaded RAID map. It returns 0 if 527 * successful, and 1 otherwise. 528 */ 529 u_int8_t 530 MR_ValidateMapInfo(struct mrsas_softc *sc) 531 { 532 if (!sc) { 533 return 1; 534 } 535 if (MR_PopulateDrvRaidMap(sc)) 536 return 0; 537 538 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 539 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 540 541 u_int32_t expected_map_size; 542 543 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 544 pDrvRaidMap = &drv_map->raidMap; 545 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 546 547 if (sc->maxRaidMapSize) 548 expected_map_size = sizeof(MR_DRV_RAID_MAP_ALL); 549 else if (sc->max256vdSupport) 550 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 551 else 552 expected_map_size = 553 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 554 (sizeof(MR_LD_SPAN_MAP) * le16toh(pDrvRaidMap->ldCount)); 555 556 if (le32toh(pDrvRaidMap->totalSize) != expected_map_size) { 557 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 558 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 559 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", le32toh(pDrvRaidMap->totalSize)); 560 return 1; 561 } 562 if (sc->UnevenSpanSupport) { 563 mr_update_span_set(drv_map, ldSpanInfo); 564 } 565 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 566 567 return 0; 568 } 569 570 /* 571 * 572 * Function to print info about span set created in driver from FW raid map 573 * 574 * Inputs: map 575 * ldSpanInfo: ld map span info per HBA instance 576 * 577 * 578 */ 579 #if SPAN_DEBUG 580 static int 581 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 582 { 583 584 u_int8_t span; 585 u_int32_t element; 586 MR_LD_RAID *raid; 587 LD_SPAN_SET *span_set; 588 MR_QUAD_ELEMENT *quad; 589 int ldCount; 590 u_int16_t ld; 591 592 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 593 ld = MR_TargetIdToLdGet(ldCount, map); 594 if (ld >= MAX_LOGICAL_DRIVES) { 595 continue; 596 } 597 raid = MR_LdRaidGet(ld, map); 598 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 599 for (span = 0; span < raid->spanDepth; span++) 600 printf("Span=%x, number of quads=%x\n", span, 601 le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 602 block_span_info.noElements)); 603 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 604 span_set = &(ldSpanInfo[ld].span_set[element]); 605 if (span_set->span_row_data_width == 0) 606 break; 607 608 printf("Span Set %x: width=%x, diff=%x\n", element, 609 (unsigned int)span_set->span_row_data_width, 610 (unsigned int)span_set->diff); 611 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 612 (long unsigned int)span_set->log_start_lba, 613 (long unsigned int)span_set->log_end_lba); 614 printf("span row start=0x%08lx, end=0x%08lx\n", 615 (long unsigned int)span_set->span_row_start, 616 (long unsigned int)span_set->span_row_end); 617 printf("data row start=0x%08lx, end=0x%08lx\n", 618 (long unsigned int)span_set->data_row_start, 619 (long unsigned int)span_set->data_row_end); 620 printf("data strip start=0x%08lx, end=0x%08lx\n", 621 (long unsigned int)span_set->data_strip_start, 622 (long unsigned int)span_set->data_strip_end); 623 624 for (span = 0; span < raid->spanDepth; span++) { 625 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 626 block_span_info.noElements >= element + 1) { 627 quad = &map->raidMap.ldSpanMap[ld]. 628 spanBlock[span].block_span_info. 629 quad[element]; 630 printf("Span=%x, Quad=%x, diff=%x\n", span, 631 element, le32toh(quad->diff)); 632 printf("offset_in_span=0x%08lx\n", 633 (long unsigned int)le64toh(quad->offsetInSpan)); 634 printf("logical start=0x%08lx, end=0x%08lx\n", 635 (long unsigned int)le64toh(quad->logStart), 636 (long unsigned int)le64toh(quad->logEnd)); 637 } 638 } 639 } 640 } 641 return 0; 642 } 643 644 #endif 645 /* 646 * 647 * This routine calculates the Span block for given row using spanset. 648 * 649 * Inputs : HBA instance 650 * ld: Logical drive number 651 * row: Row number 652 * map: LD map 653 * 654 * Outputs : span - Span number block 655 * - Absolute Block number in the physical disk 656 * div_error - Devide error code. 657 */ 658 659 u_int32_t 660 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 661 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 662 { 663 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 664 LD_SPAN_SET *span_set; 665 MR_QUAD_ELEMENT *quad; 666 u_int32_t span, info; 667 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 668 669 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 670 span_set = &(ldSpanInfo[ld].span_set[info]); 671 672 if (span_set->span_row_data_width == 0) 673 break; 674 if (row > span_set->data_row_end) 675 continue; 676 677 for (span = 0; span < raid->spanDepth; span++) 678 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 679 block_span_info.noElements) >= info + 1) { 680 quad = &map->raidMap.ldSpanMap[ld]. 681 spanBlock[span]. 682 block_span_info.quad[info]; 683 if (quad->diff == 0) { 684 *div_error = 1; 685 return span; 686 } 687 if (le64toh(quad->logStart) <= row && 688 row <= le64toh(quad->logEnd) && 689 (mega_mod64(row - le64toh(quad->logStart), 690 le32toh(quad->diff))) == 0) { 691 if (span_blk != NULL) { 692 u_int64_t blk; 693 694 blk = mega_div64_32 695 ((row - le64toh(quad->logStart)), 696 le32toh(quad->diff)); 697 blk = (blk + le64toh(quad->offsetInSpan)) 698 << raid->stripeShift; 699 *span_blk = blk; 700 } 701 return span; 702 } 703 } 704 } 705 return SPAN_INVALID; 706 } 707 708 /* 709 * 710 * This routine calculates the row for given strip using spanset. 711 * 712 * Inputs : HBA instance 713 * ld: Logical drive number 714 * Strip: Strip 715 * map: LD map 716 * 717 * Outputs : row - row associated with strip 718 */ 719 720 static u_int64_t 721 get_row_from_strip(struct mrsas_softc *sc, 722 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 723 { 724 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 725 LD_SPAN_SET *span_set; 726 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 727 u_int32_t info, strip_offset, span, span_offset; 728 u_int64_t span_set_Strip, span_set_Row; 729 730 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 731 span_set = &(ldSpanInfo[ld].span_set[info]); 732 733 if (span_set->span_row_data_width == 0) 734 break; 735 if (strip > span_set->data_strip_end) 736 continue; 737 738 span_set_Strip = strip - span_set->data_strip_start; 739 strip_offset = mega_mod64(span_set_Strip, 740 span_set->span_row_data_width); 741 span_set_Row = mega_div64_32(span_set_Strip, 742 span_set->span_row_data_width) * span_set->diff; 743 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 744 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 745 block_span_info.noElements) >= info + 1) { 746 if (strip_offset >= 747 span_set->strip_offset[span]) 748 span_offset++; 749 else 750 break; 751 } 752 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 753 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 754 (unsigned long long)span_set_Strip, 755 (unsigned long long)span_set_Row, 756 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 757 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 758 (unsigned long long)span_set->data_row_start + 759 (unsigned long long)span_set_Row + (span_offset - 1)); 760 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 761 } 762 return -1LLU; 763 } 764 765 /* 766 * 767 * This routine calculates the Start Strip for given row using spanset. 768 * 769 * Inputs: HBA instance 770 * ld: Logical drive number 771 * row: Row number 772 * map: LD map 773 * 774 * Outputs : Strip - Start strip associated with row 775 */ 776 777 static u_int64_t 778 get_strip_from_row(struct mrsas_softc *sc, 779 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 780 { 781 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 782 LD_SPAN_SET *span_set; 783 MR_QUAD_ELEMENT *quad; 784 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 785 u_int32_t span, info; 786 u_int64_t strip; 787 788 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 789 span_set = &(ldSpanInfo[ld].span_set[info]); 790 791 if (span_set->span_row_data_width == 0) 792 break; 793 if (row > span_set->data_row_end) 794 continue; 795 796 for (span = 0; span < raid->spanDepth; span++) 797 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 798 block_span_info.noElements) >= info + 1) { 799 quad = &map->raidMap.ldSpanMap[ld]. 800 spanBlock[span].block_span_info.quad[info]; 801 if (le64toh(quad->logStart) <= row && 802 row <= le64toh(quad->logEnd) && 803 mega_mod64((row - le64toh(quad->logStart)), 804 le32toh(quad->diff)) == 0) { 805 strip = mega_div64_32 806 (((row - span_set->data_row_start) 807 - le64toh(quad->logStart)), 808 le32toh(quad->diff)); 809 strip *= span_set->span_row_data_width; 810 strip += span_set->data_strip_start; 811 strip += span_set->strip_offset[span]; 812 return strip; 813 } 814 } 815 } 816 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 817 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 818 return -1; 819 } 820 821 /* 822 * ***************************************************************************** 823 * 824 * 825 * This routine calculates the Physical Arm for given strip using spanset. 826 * 827 * Inputs : HBA instance 828 * Logical drive number 829 * Strip 830 * LD map 831 * 832 * Outputs : Phys Arm - Phys Arm associated with strip 833 */ 834 835 static u_int32_t 836 get_arm_from_strip(struct mrsas_softc *sc, 837 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 838 { 839 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 840 LD_SPAN_SET *span_set; 841 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 842 u_int32_t info, strip_offset, span, span_offset; 843 844 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 845 span_set = &(ldSpanInfo[ld].span_set[info]); 846 847 if (span_set->span_row_data_width == 0) 848 break; 849 if (strip > span_set->data_strip_end) 850 continue; 851 852 strip_offset = (u_int32_t)mega_mod64 853 ((strip - span_set->data_strip_start), 854 span_set->span_row_data_width); 855 856 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 857 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 858 block_span_info.noElements) >= info + 1) { 859 if (strip_offset >= span_set->strip_offset[span]) 860 span_offset = span_set->strip_offset[span]; 861 else 862 break; 863 } 864 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 865 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 866 (long unsigned int)strip, (strip_offset - span_offset)); 867 return (strip_offset - span_offset); 868 } 869 870 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 871 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 872 873 return -1; 874 } 875 876 /* This Function will return Phys arm */ 877 u_int8_t 878 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 879 MR_DRV_RAID_MAP_ALL * map) 880 { 881 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 882 883 /* Need to check correct default value */ 884 u_int32_t arm = 0; 885 886 switch (raid->level) { 887 case 0: 888 case 5: 889 case 6: 890 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 891 break; 892 case 1: 893 /* start with logical arm */ 894 arm = get_arm_from_strip(sc, ld, stripe, map); 895 arm *= 2; 896 break; 897 } 898 899 return arm; 900 } 901 902 /* 903 * 904 * This routine calculates the arm, span and block for the specified stripe and 905 * reference in stripe using spanset 906 * 907 * Inputs : 908 * sc - HBA instance 909 * ld - Logical drive number 910 * stripRow: Stripe number 911 * stripRef: Reference in stripe 912 * 913 * Outputs : span - Span number block - Absolute Block 914 * number in the physical disk 915 */ 916 static u_int8_t 917 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 918 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 919 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 920 { 921 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 922 u_int32_t pd, arRef, r1_alt_pd; 923 u_int8_t physArm, span; 924 u_int64_t row; 925 u_int8_t retval = TRUE; 926 u_int64_t *pdBlock = &io_info->pdBlock; 927 u_int16_t *pDevHandle = &io_info->devHandle; 928 u_int8_t *pPdInterface = &io_info->pdInterface; 929 930 u_int32_t logArm, rowMod, armQ, arm; 931 932 /* Get row and span from io_info for Uneven Span IO. */ 933 row = io_info->start_row; 934 span = io_info->start_span; 935 936 if (raid->level == 6) { 937 logArm = get_arm_from_strip(sc, ld, stripRow, map); 938 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 939 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 940 arm = armQ + 1 + logArm; 941 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 942 arm -= SPAN_ROW_SIZE(map, ld, span); 943 physArm = (u_int8_t)arm; 944 } else 945 /* Calculate the arm */ 946 physArm = get_arm(sc, ld, span, stripRow, map); 947 948 arRef = MR_LdSpanArrayGet(ld, span, map); 949 pd = MR_ArPdGet(arRef, physArm, map); 950 951 if (pd != MR_PD_INVALID) { 952 *pDevHandle = MR_PdDevHandleGet(pd, map); 953 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 954 /* get second pd also for raid 1/10 fast path writes */ 955 if ((raid->level == 1) && !io_info->isRead) { 956 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 957 if (r1_alt_pd != MR_PD_INVALID) 958 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 959 } 960 } else { 961 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); 962 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 963 (sc->mrsas_gen3_ctrl && 964 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 965 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 966 else if (raid->level == 1) { 967 physArm++; 968 pd = MR_ArPdGet(arRef, physArm, map); 969 if (pd != MR_PD_INVALID) { 970 *pDevHandle = MR_PdDevHandleGet(pd, map); 971 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 972 } 973 } 974 } 975 976 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 977 if (sc->is_ventura || sc->is_aero) { 978 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 979 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 980 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 981 } else { 982 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 983 io_info->span_arm = pRAID_Context->spanArm; 984 } 985 return retval; 986 } 987 988 /* 989 * MR_BuildRaidContext: Set up Fast path RAID context 990 * 991 * This function will initiate command processing. The start/end row and strip 992 * information is calculated then the lock is acquired. This function will 993 * return 0 if region lock was acquired OR return num strips. 994 */ 995 u_int8_t 996 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 997 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 998 { 999 MR_LD_RAID *raid; 1000 u_int32_t ld, stripSize, stripe_mask; 1001 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 1002 REGION_KEY regStart; 1003 REGION_LEN regSize; 1004 u_int8_t num_strips, numRows; 1005 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 1006 u_int64_t ldStartBlock; 1007 u_int32_t numBlocks, ldTgtId; 1008 u_int8_t isRead, stripIdx; 1009 u_int8_t retval = 0; 1010 u_int8_t startlba_span = SPAN_INVALID; 1011 u_int64_t *pdBlock = &io_info->pdBlock; 1012 int error_code = 0; 1013 1014 ldStartBlock = io_info->ldStartBlock; 1015 numBlocks = io_info->numBlocks; 1016 ldTgtId = io_info->ldTgtId; 1017 isRead = io_info->isRead; 1018 1019 io_info->IoforUnevenSpan = 0; 1020 io_info->start_span = SPAN_INVALID; 1021 1022 ld = MR_TargetIdToLdGet(ldTgtId, map); 1023 raid = MR_LdRaidGet(ld, map); 1024 1025 /* check read ahead bit */ 1026 io_info->raCapable = raid->capability.raCapable; 1027 1028 if (raid->rowDataSize == 0) { 1029 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1030 return FALSE; 1031 else if (sc->UnevenSpanSupport) { 1032 io_info->IoforUnevenSpan = 1; 1033 } else { 1034 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 1035 " but there is _NO_ UnevenSpanSupport\n", 1036 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1037 return FALSE; 1038 } 1039 } 1040 stripSize = 1 << raid->stripeShift; 1041 stripe_mask = stripSize - 1; 1042 /* 1043 * calculate starting row and stripe, and number of strips and rows 1044 */ 1045 start_strip = ldStartBlock >> raid->stripeShift; 1046 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 1047 endLba = ldStartBlock + numBlocks - 1; 1048 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 1049 endStrip = endLba >> raid->stripeShift; 1050 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 1051 if (io_info->IoforUnevenSpan) { 1052 start_row = get_row_from_strip(sc, ld, start_strip, map); 1053 endRow = get_row_from_strip(sc, ld, endStrip, map); 1054 if (raid->spanDepth == 1) { 1055 startlba_span = 0; 1056 *pdBlock = start_row << raid->stripeShift; 1057 } else { 1058 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 1059 pdBlock, map, &error_code); 1060 if (error_code == 1) { 1061 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 1062 __func__, __LINE__); 1063 return FALSE; 1064 } 1065 } 1066 if (startlba_span == SPAN_INVALID) { 1067 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 1068 "start strip %llx endSrip %llx\n", __func__, 1069 __LINE__, (unsigned long long)start_row, 1070 (unsigned long long)start_strip, 1071 (unsigned long long)endStrip); 1072 return FALSE; 1073 } 1074 io_info->start_span = startlba_span; 1075 io_info->start_row = start_row; 1076 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 1077 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 1078 __func__, __LINE__, (unsigned long long)start_row, 1079 (unsigned long long)start_strip, 1080 (unsigned long long)endStrip, startlba_span); 1081 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 1082 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 1083 } else { 1084 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1085 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1086 } 1087 1088 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 1089 1090 /* 1091 * Calculate region info. (Assume region at start of first row, and 1092 * assume this IO needs the full row - will adjust if not true.) 1093 */ 1094 regStart = start_row << raid->stripeShift; 1095 regSize = stripSize; 1096 1097 /* Check if we can send this I/O via FastPath */ 1098 if (raid->capability.fpCapable) { 1099 if (isRead) 1100 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1101 ((num_strips == 1) || 1102 raid->capability.fpReadAcrossStripe)); 1103 else 1104 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1105 ((num_strips == 1) || 1106 raid->capability.fpWriteAcrossStripe)); 1107 } else 1108 io_info->fpOkForIo = FALSE; 1109 1110 if (numRows == 1) { 1111 if (num_strips == 1) { 1112 regStart += ref_in_start_stripe; 1113 regSize = numBlocks; 1114 } 1115 } else if (io_info->IoforUnevenSpan == 0) { 1116 /* 1117 * For Even span region lock optimization. If the start strip 1118 * is the last in the start row 1119 */ 1120 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1121 regStart += ref_in_start_stripe; 1122 /* 1123 * initialize count to sectors from startRef to end 1124 * of strip 1125 */ 1126 regSize = stripSize - ref_in_start_stripe; 1127 } 1128 /* add complete rows in the middle of the transfer */ 1129 if (numRows > 2) 1130 regSize += (numRows - 2) << raid->stripeShift; 1131 1132 /* if IO ends within first strip of last row */ 1133 if (endStrip == endRow * raid->rowDataSize) 1134 regSize += ref_in_end_stripe + 1; 1135 else 1136 regSize += stripSize; 1137 } else { 1138 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 1139 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1140 regStart += ref_in_start_stripe; 1141 /* 1142 * initialize count to sectors from startRef to end 1143 * of strip 1144 */ 1145 regSize = stripSize - ref_in_start_stripe; 1146 } 1147 /* add complete rows in the middle of the transfer */ 1148 if (numRows > 2) 1149 regSize += (numRows - 2) << raid->stripeShift; 1150 1151 /* if IO ends within first strip of last row */ 1152 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 1153 regSize += ref_in_end_stripe + 1; 1154 else 1155 regSize += stripSize; 1156 } 1157 pRAID_Context->timeoutValue = htole16(map->raidMap.fpPdIoTimeoutSec); 1158 if (sc->mrsas_gen3_ctrl) 1159 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1160 else if (sc->device_id == MRSAS_TBOLT) 1161 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1162 pRAID_Context->VirtualDiskTgtId = raid->targetId; 1163 pRAID_Context->regLockRowLBA = htole64(regStart); 1164 pRAID_Context->regLockLength = htole32(regSize); 1165 pRAID_Context->configSeqNum = raid->seqNum; 1166 1167 /* 1168 * Get Phy Params only if FP capable, or else leave it to MR firmware 1169 * to do the calculation. 1170 */ 1171 if (io_info->fpOkForIo) { 1172 retval = io_info->IoforUnevenSpan ? 1173 mr_spanset_get_phy_params(sc, ld, start_strip, 1174 ref_in_start_stripe, io_info, pRAID_Context, map) : 1175 MR_GetPhyParams(sc, ld, start_strip, 1176 ref_in_start_stripe, io_info, pRAID_Context, map); 1177 /* If IO on an invalid Pd, then FP is not possible */ 1178 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1179 io_info->fpOkForIo = FALSE; 1180 /* 1181 * if FP possible, set the SLUD bit in regLockFlags for 1182 * ventura 1183 */ 1184 else if ((sc->is_ventura || sc->is_aero) && !isRead && 1185 (raid->writeMode == MR_RL_WRITE_BACK_MODE) && (raid->level <= 1) && 1186 raid->capability.fpCacheBypassCapable) { 1187 ((RAID_CONTEXT_G35 *) pRAID_Context)->routingFlags.bits.sld = 1; 1188 } 1189 1190 return retval; 1191 } else if (isRead) { 1192 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1193 retval = io_info->IoforUnevenSpan ? 1194 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 1195 ref_in_start_stripe, io_info, pRAID_Context, map) : 1196 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 1197 ref_in_start_stripe, io_info, pRAID_Context, map); 1198 if (!retval) 1199 return TRUE; 1200 } 1201 } 1202 #if SPAN_DEBUG 1203 /* Just for testing what arm we get for strip. */ 1204 get_arm_from_strip(sc, ld, start_strip, map); 1205 #endif 1206 return TRUE; 1207 } 1208 1209 /* 1210 * 1211 * This routine pepare spanset info from Valid Raid map and store it into local 1212 * copy of ldSpanInfo per instance data structure. 1213 * 1214 * Inputs : LD map 1215 * ldSpanInfo per HBA instance 1216 * 1217 */ 1218 void 1219 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1220 { 1221 u_int8_t span, count; 1222 u_int32_t element, span_row_width; 1223 u_int64_t span_row; 1224 MR_LD_RAID *raid; 1225 LD_SPAN_SET *span_set, *span_set_prev; 1226 MR_QUAD_ELEMENT *quad; 1227 int ldCount; 1228 u_int16_t ld; 1229 1230 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1231 ld = MR_TargetIdToLdGet(ldCount, map); 1232 if (ld >= MAX_LOGICAL_DRIVES) 1233 continue; 1234 raid = MR_LdRaidGet(ld, map); 1235 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1236 for (span = 0; span < raid->spanDepth; span++) { 1237 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1238 block_span_info.noElements) < element + 1) 1239 continue; 1240 /* TO-DO */ 1241 span_set = &(ldSpanInfo[ld].span_set[element]); 1242 quad = &map->raidMap.ldSpanMap[ld]. 1243 spanBlock[span].block_span_info.quad[element]; 1244 1245 span_set->diff = le32toh(quad->diff); 1246 1247 for (count = 0, span_row_width = 0; 1248 count < raid->spanDepth; count++) { 1249 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[count]. 1250 block_span_info.noElements) >= element + 1) { 1251 span_set->strip_offset[count] = span_row_width; 1252 span_row_width += 1253 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1254 #if SPAN_DEBUG 1255 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1256 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1257 #endif 1258 } 1259 } 1260 1261 span_set->span_row_data_width = span_row_width; 1262 span_row = mega_div64_32(((le64toh(quad->logEnd) - 1263 le64toh(quad->logStart)) + le32toh(quad->diff)), 1264 le32toh(quad->diff)); 1265 1266 if (element == 0) { 1267 span_set->log_start_lba = 0; 1268 span_set->log_end_lba = 1269 ((span_row << raid->stripeShift) * span_row_width) - 1; 1270 1271 span_set->span_row_start = 0; 1272 span_set->span_row_end = span_row - 1; 1273 1274 span_set->data_strip_start = 0; 1275 span_set->data_strip_end = (span_row * span_row_width) - 1; 1276 1277 span_set->data_row_start = 0; 1278 span_set->data_row_end = 1279 (span_row * le32toh(quad->diff)) - 1; 1280 } else { 1281 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1282 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1283 span_set->log_end_lba = span_set->log_start_lba + 1284 ((span_row << raid->stripeShift) * span_row_width) - 1; 1285 1286 span_set->span_row_start = span_set_prev->span_row_end + 1; 1287 span_set->span_row_end = 1288 span_set->span_row_start + span_row - 1; 1289 1290 span_set->data_strip_start = 1291 span_set_prev->data_strip_end + 1; 1292 span_set->data_strip_end = span_set->data_strip_start + 1293 (span_row * span_row_width) - 1; 1294 1295 span_set->data_row_start = span_set_prev->data_row_end + 1; 1296 span_set->data_row_end = span_set->data_row_start + 1297 (span_row * le32toh(quad->diff)) - 1; 1298 } 1299 break; 1300 } 1301 if (span == raid->spanDepth) 1302 break; /* no quads remain */ 1303 } 1304 } 1305 #if SPAN_DEBUG 1306 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1307 #endif 1308 } 1309 1310 /* 1311 * mrsas_update_load_balance_params: Update load balance parmas 1312 * Inputs: 1313 * sc - driver softc instance 1314 * drv_map - driver RAID map 1315 * lbInfo - Load balance info 1316 * 1317 * This function updates the load balance parameters for the LD config of a two 1318 * drive optimal RAID-1. 1319 */ 1320 void 1321 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1322 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1323 { 1324 int ldCount; 1325 u_int16_t ld; 1326 MR_LD_RAID *raid; 1327 1328 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1329 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1330 1331 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1332 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1333 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1334 lbInfo[ldCount].loadBalanceFlag = 0; 1335 continue; 1336 } 1337 raid = MR_LdRaidGet(ld, drv_map); 1338 le32_to_cpus(&raid->capability); 1339 if ((raid->level != 1) || 1340 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1341 lbInfo[ldCount].loadBalanceFlag = 0; 1342 continue; 1343 } 1344 lbInfo[ldCount].loadBalanceFlag = 1; 1345 } 1346 } 1347 1348 /* 1349 * mrsas_set_pd_lba: Sets PD LBA 1350 * input: io_request pointer 1351 * CDB length 1352 * io_info pointer 1353 * Pointer to CCB 1354 * Local RAID map pointer 1355 * Start block of IO Block Size 1356 * 1357 * Used to set the PD logical block address in CDB for FP IOs. 1358 */ 1359 void 1360 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1361 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1362 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1363 u_int32_t ld_block_size) 1364 { 1365 MR_LD_RAID *raid; 1366 u_int32_t ld; 1367 u_int64_t start_blk = io_info->pdBlock; 1368 u_int8_t *cdb = io_request->CDB.CDB32; 1369 u_int32_t num_blocks = io_info->numBlocks; 1370 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1371 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1372 1373 /* Check if T10 PI (DIF) is enabled for this LD */ 1374 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1375 raid = MR_LdRaidGet(ld, local_map_ptr); 1376 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1377 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1378 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1379 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1380 1381 if (ccb_h->flags == CAM_DIR_OUT) 1382 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1383 else 1384 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1385 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1386 1387 /* LBA */ 1388 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1389 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1390 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1391 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1392 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1393 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1394 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1395 cdb[19] = (u_int8_t)(start_blk & 0xff); 1396 1397 /* Logical block reference tag */ 1398 io_request->CDB.EEDP32.PrimaryReferenceTag = htobe32(ref_tag); 1399 io_request->CDB.EEDP32.PrimaryApplicationTagMask = htobe16(0xffff); 1400 io_request->IoFlags = htole16(32); /* Specify 32-byte cdb */ 1401 1402 /* Transfer length */ 1403 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1404 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1405 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1406 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1407 1408 /* set SCSI IO EEDP Flags */ 1409 if (ccb_h->flags == CAM_DIR_OUT) { 1410 io_request->EEDPFlags = htole16( 1411 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1412 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1413 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1414 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1415 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1416 } else { 1417 io_request->EEDPFlags = htole16( 1418 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1419 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1420 } 1421 io_request->Control |= htole32(0x4 << 26); 1422 io_request->EEDPBlockSize = htole32(ld_block_size); 1423 } else { 1424 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1425 if (((cdb_len == 12) || (cdb_len == 16)) && 1426 (start_blk <= 0xffffffff)) { 1427 if (cdb_len == 16) { 1428 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1429 flagvals = cdb[1]; 1430 groupnum = cdb[14]; 1431 control = cdb[15]; 1432 } else { 1433 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1434 flagvals = cdb[1]; 1435 groupnum = cdb[10]; 1436 control = cdb[11]; 1437 } 1438 1439 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1440 1441 cdb[0] = opcode; 1442 cdb[1] = flagvals; 1443 cdb[6] = groupnum; 1444 cdb[9] = control; 1445 1446 /* Transfer length */ 1447 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1448 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1449 1450 io_request->IoFlags = htole16(10); /* Specify 10-byte cdb */ 1451 cdb_len = 10; 1452 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1453 /* Convert to 16 byte CDB for large LBA's */ 1454 switch (cdb_len) { 1455 case 6: 1456 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1457 control = cdb[5]; 1458 break; 1459 case 10: 1460 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1461 flagvals = cdb[1]; 1462 groupnum = cdb[6]; 1463 control = cdb[9]; 1464 break; 1465 case 12: 1466 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1467 flagvals = cdb[1]; 1468 groupnum = cdb[10]; 1469 control = cdb[11]; 1470 break; 1471 } 1472 1473 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1474 1475 cdb[0] = opcode; 1476 cdb[1] = flagvals; 1477 cdb[14] = groupnum; 1478 cdb[15] = control; 1479 1480 /* Transfer length */ 1481 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1482 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1483 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1484 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1485 1486 io_request->IoFlags = htole16(16); /* Specify 16-byte cdb */ 1487 cdb_len = 16; 1488 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1489 /* convert to 10 byte CDB */ 1490 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1491 control = cdb[5]; 1492 1493 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1494 cdb[0] = opcode; 1495 cdb[9] = control; 1496 1497 /* Set transfer length */ 1498 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1499 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1500 1501 /* Specify 10-byte cdb */ 1502 cdb_len = 10; 1503 } 1504 /* Fall through normal case, just load LBA here */ 1505 u_int8_t val = cdb[1] & 0xE0; 1506 1507 switch (cdb_len) { 1508 case 6: 1509 cdb[3] = (u_int8_t)(start_blk & 0xff); 1510 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1511 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1512 break; 1513 case 10: 1514 cdb[5] = (u_int8_t)(start_blk & 0xff); 1515 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1516 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1517 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1518 break; 1519 case 16: 1520 cdb[9] = (u_int8_t)(start_blk & 0xff); 1521 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1522 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1523 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1524 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1525 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1526 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1527 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1528 break; 1529 } 1530 } 1531 } 1532 1533 /* 1534 * mrsas_get_best_arm_pd: Determine the best spindle arm 1535 * Inputs: 1536 * sc - HBA instance 1537 * lbInfo - Load balance info 1538 * io_info - IO request info 1539 * 1540 * This function determines and returns the best arm by looking at the 1541 * parameters of the last PD access. 1542 */ 1543 u_int8_t 1544 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1545 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1546 { 1547 MR_LD_RAID *raid; 1548 MR_DRV_RAID_MAP_ALL *drv_map; 1549 u_int16_t pd1_devHandle; 1550 u_int16_t pend0, pend1, ld; 1551 u_int64_t diff0, diff1; 1552 u_int8_t bestArm, pd0, pd1, span, arm; 1553 u_int32_t arRef, span_row_size; 1554 1555 u_int64_t block = io_info->ldStartBlock; 1556 u_int32_t count = io_info->numBlocks; 1557 1558 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1559 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1560 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1561 1562 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1563 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1564 raid = MR_LdRaidGet(ld, drv_map); 1565 span_row_size = sc->UnevenSpanSupport ? 1566 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1567 1568 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1569 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1570 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1571 (arm + 1 - span_row_size) : arm + 1, drv_map); 1572 1573 /* Get PD1 Dev Handle */ 1574 pd1_devHandle = MR_PdDevHandleGet(pd1, drv_map); 1575 if (pd1_devHandle == MR_DEVHANDLE_INVALID) { 1576 bestArm = arm; 1577 } else { 1578 /* get the pending cmds for the data and mirror arms */ 1579 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1580 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1581 1582 /* Determine the disk whose head is nearer to the req. block */ 1583 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1584 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1585 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1586 1587 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1588 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1589 bestArm ^= 1; 1590 1591 /* Update the last accessed block on the correct pd */ 1592 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1593 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1594 } 1595 1596 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1597 #if SPAN_DEBUG 1598 if (arm != bestArm) 1599 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1600 "io_info->span_arm 0x%x\n", 1601 span, arm, bestArm, io_info->span_arm); 1602 #endif 1603 1604 return io_info->pd_after_lb; 1605 } 1606 1607 /* 1608 * mrsas_get_updated_dev_handle: Get the update dev handle 1609 * Inputs: 1610 * sc - Adapter instance soft state 1611 * lbInfo - Load balance info 1612 * io_info - io_info pointer 1613 * 1614 * This function determines and returns the updated dev handle. 1615 */ 1616 u_int16_t 1617 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1618 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1619 { 1620 u_int8_t arm_pd; 1621 u_int16_t devHandle; 1622 MR_DRV_RAID_MAP_ALL *drv_map; 1623 1624 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1625 1626 /* get best new arm */ 1627 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1628 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1629 io_info->pdInterface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1630 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1631 1632 return devHandle; 1633 } 1634 1635 /* 1636 * MR_GetPhyParams: Calculates arm, span, and block 1637 * Inputs: Adapter soft state 1638 * Logical drive number (LD) 1639 * Stripe number(stripRow) 1640 * Reference in stripe (stripRef) 1641 * 1642 * Outputs: Absolute Block number in the physical disk 1643 * 1644 * This routine calculates the arm, span and block for the specified stripe and 1645 * reference in stripe. 1646 */ 1647 u_int8_t 1648 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1649 u_int64_t stripRow, 1650 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1651 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1652 { 1653 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1654 u_int32_t pd, arRef, r1_alt_pd; 1655 u_int8_t physArm, span; 1656 u_int64_t row; 1657 u_int8_t retval = TRUE; 1658 int error_code = 0; 1659 u_int64_t *pdBlock = &io_info->pdBlock; 1660 u_int16_t *pDevHandle = &io_info->devHandle; 1661 u_int8_t *pPdInterface = &io_info->pdInterface; 1662 u_int32_t rowMod, armQ, arm, logArm; 1663 1664 row = mega_div64_32(stripRow, raid->rowDataSize); 1665 1666 if (raid->level == 6) { 1667 /* logical arm within row */ 1668 logArm = mega_mod64(stripRow, raid->rowDataSize); 1669 if (raid->rowSize == 0) 1670 return FALSE; 1671 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1672 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1673 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1674 if (arm >= raid->rowSize) /* handle wrap condition */ 1675 arm -= raid->rowSize; 1676 physArm = (u_int8_t)arm; 1677 } else { 1678 if (raid->modFactor == 0) 1679 return FALSE; 1680 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1681 } 1682 1683 if (raid->spanDepth == 1) { 1684 span = 0; 1685 *pdBlock = row << raid->stripeShift; 1686 } else { 1687 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1688 if (error_code == 1) 1689 return FALSE; 1690 } 1691 1692 /* Get the array on which this span is present */ 1693 arRef = MR_LdSpanArrayGet(ld, span, map); 1694 1695 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1696 1697 if (pd != MR_PD_INVALID) { 1698 /* Get dev handle from Pd */ 1699 *pDevHandle = MR_PdDevHandleGet(pd, map); 1700 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1701 /* get second pd also for raid 1/10 fast path writes */ 1702 if ((raid->level == 1) && !io_info->isRead) { 1703 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 1704 if (r1_alt_pd != MR_PD_INVALID) 1705 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 1706 } 1707 } else { 1708 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); /* set dev handle as invalid. */ 1709 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 1710 (sc->mrsas_gen3_ctrl && 1711 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1712 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1713 else if (raid->level == 1) { 1714 /* Get Alternate Pd. */ 1715 physArm++; 1716 pd = MR_ArPdGet(arRef, physArm, map); 1717 if (pd != MR_PD_INVALID) { 1718 /* Get dev handle from Pd. */ 1719 *pDevHandle = MR_PdDevHandleGet(pd, map); 1720 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1721 } 1722 } 1723 } 1724 1725 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 1726 if (sc->is_ventura || sc->is_aero) { 1727 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 1728 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1729 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1730 } else { 1731 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1732 io_info->span_arm = pRAID_Context->spanArm; 1733 } 1734 return retval; 1735 } 1736 1737 /* 1738 * MR_GetSpanBlock: Calculates span block 1739 * Inputs: LD 1740 * row PD 1741 * span block 1742 * RAID map pointer 1743 * 1744 * Outputs: Span number Error code 1745 * 1746 * This routine calculates the span from the span block info. 1747 */ 1748 u_int32_t 1749 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1750 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1751 { 1752 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1753 MR_QUAD_ELEMENT *quad; 1754 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1755 u_int32_t span, j; 1756 u_int64_t blk; 1757 1758 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1759 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1760 quad = &pSpanBlock->block_span_info.quad[j]; 1761 if (quad->diff == 0) { 1762 *div_error = 1; 1763 return span; 1764 } 1765 if (quad->logStart <= row && row <= quad->logEnd && 1766 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1767 if (span_blk != NULL) { 1768 blk = mega_div64_32((row - quad->logStart), quad->diff); 1769 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1770 *span_blk = blk; 1771 } 1772 return span; 1773 } 1774 } 1775 } 1776 return span; 1777 } 1778