1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 #include <dev/mrsas/mrsas.h> 42 43 #include <cam/cam.h> 44 #include <cam/cam_ccb.h> 45 #include <cam/cam_sim.h> 46 #include <cam/cam_xpt_sim.h> 47 #include <cam/cam_debug.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_xpt_periph.h> 50 51 /* 52 * Function prototypes 53 */ 54 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 55 u_int8_t 56 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 57 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 58 u_int8_t 59 MR_BuildRaidContext(struct mrsas_softc *sc, 60 struct IO_REQUEST_INFO *io_info, 61 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 62 u_int8_t 63 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 64 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 65 RAID_CONTEXT * pRAID_Context, 66 MR_DRV_RAID_MAP_ALL * map); 67 u_int8_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map); 68 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 69 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 70 u_int16_t 71 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 72 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 73 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 74 u_int32_t 75 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 76 MR_DRV_RAID_MAP_ALL * map, int *div_error); 77 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 78 void 79 mrsas_update_load_balance_params(struct mrsas_softc *sc, 80 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 81 void 82 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 83 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 84 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 85 u_int32_t ld_block_size); 86 static u_int16_t 87 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 88 MR_DRV_RAID_MAP_ALL * map); 89 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 90 static u_int16_t 91 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 92 MR_DRV_RAID_MAP_ALL * map); 93 static MR_LD_SPAN * 94 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 95 MR_DRV_RAID_MAP_ALL * map); 96 static u_int8_t 97 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 98 MR_DRV_RAID_MAP_ALL * map); 99 static MR_SPAN_BLOCK_INFO * 100 MR_LdSpanInfoGet(u_int32_t ld, 101 MR_DRV_RAID_MAP_ALL * map); 102 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 103 static int MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 104 105 /* 106 * Spanset related function prototypes Added for PRL11 configuration (Uneven 107 * span support) 108 */ 109 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 110 static u_int8_t 111 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 112 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 113 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 114 static u_int64_t 115 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 116 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 117 static u_int32_t 118 mr_spanset_get_span_block(struct mrsas_softc *sc, 119 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 120 MR_DRV_RAID_MAP_ALL * map, int *div_error); 121 static u_int8_t 122 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 123 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 124 125 /* 126 * Spanset related defines Added for PRL11 configuration(Uneven span support) 127 */ 128 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 129 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 130 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 131 #define SPAN_INVALID 0xff 132 #define SPAN_DEBUG 0 133 134 /* 135 * Related Defines 136 */ 137 138 typedef u_int64_t REGION_KEY; 139 typedef u_int32_t REGION_LEN; 140 141 #define MR_LD_STATE_OPTIMAL 3 142 #define FALSE 0 143 #define TRUE 1 144 145 #define LB_PENDING_CMDS_DEFAULT 4 146 147 /* 148 * Related Macros 149 */ 150 151 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 152 153 #define swap32(x) \ 154 ((unsigned int)( \ 155 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 156 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 157 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 158 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 159 160 /* 161 * In-line functions for mod and divide of 64-bit dividend and 32-bit 162 * divisor. Assumes a check for a divisor of zero is not possible. 163 * 164 * @param dividend: Dividend 165 * @param divisor: Divisor 166 * @return remainder 167 */ 168 169 #define mega_mod64(dividend, divisor) ({ \ 170 int remainder; \ 171 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 172 remainder;}) 173 174 #define mega_div64_32(dividend, divisor) ({ \ 175 int quotient; \ 176 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 177 quotient;}) 178 179 /* 180 * Various RAID map access functions. These functions access the various 181 * parts of the RAID map and returns the appropriate parameters. 182 */ 183 184 MR_LD_RAID * 185 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 186 { 187 return (&map->raidMap.ldSpanMap[ld].ldRaid); 188 } 189 190 u_int16_t 191 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 192 { 193 return le16toh(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 194 } 195 196 static u_int16_t 197 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 198 { 199 return le16toh(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 200 } 201 202 static u_int8_t 203 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 204 { 205 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 206 } 207 208 static u_int16_t 209 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 210 { 211 return map->raidMap.devHndlInfo[pd].curDevHdl; 212 } 213 214 static u_int8_t MR_PdInterfaceTypeGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map) 215 { 216 return map->raidMap.devHndlInfo[pd].interfaceType; 217 } 218 219 static u_int16_t 220 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 221 { 222 return le16toh(map->raidMap.arMapInfo[ar].pd[arm]); 223 } 224 225 static MR_LD_SPAN * 226 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 227 { 228 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 229 } 230 231 static MR_SPAN_BLOCK_INFO * 232 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 233 { 234 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 235 } 236 237 u_int8_t 238 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 239 { 240 return map->raidMap.ldTgtIdToLd[ldTgtId]; 241 } 242 243 u_int32_t 244 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 245 { 246 MR_LD_RAID *raid; 247 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 248 249 ld = MR_TargetIdToLdGet(ldTgtId, map); 250 251 /* 252 * Check if logical drive was removed. 253 */ 254 if (ld >= MAX_LOGICAL_DRIVES) 255 return ldBlockSize; 256 257 raid = MR_LdRaidGet(ld, map); 258 ldBlockSize = raid->logicalBlockLength; 259 if (!ldBlockSize) 260 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 261 262 return ldBlockSize; 263 } 264 265 /* 266 * This function will Populate Driver Map using Dynamic firmware raid map 267 */ 268 static int 269 MR_PopulateDrvRaidMapVentura(struct mrsas_softc *sc) 270 { 271 unsigned int i, j; 272 u_int16_t ld_count; 273 274 MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; 275 MR_RAID_MAP_DESC_TABLE *desc_table; 276 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 277 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 278 void *raid_map_data = NULL; 279 280 fw_map_dyn = (MR_FW_RAID_MAP_DYNAMIC *) sc->raidmap_mem[(sc->map_id & 1)]; 281 282 if (fw_map_dyn == NULL) { 283 device_printf(sc->mrsas_dev, 284 "from %s %d map0 %p map1 %p map size %d \n", __func__, __LINE__, 285 sc->raidmap_mem[0], sc->raidmap_mem[1], sc->maxRaidMapSize); 286 return 1; 287 } 288 #if VD_EXT_DEBUG 289 device_printf(sc->mrsas_dev, 290 " raidMapSize 0x%x, descTableOffset 0x%x, " 291 " descTableSize 0x%x, descTableNumElements 0x%x \n", 292 fw_map_dyn->raidMapSize, le32toh(fw_map_dyn->descTableOffset), 293 fw_map_dyn->descTableSize, fw_map_dyn->descTableNumElements); 294 #endif 295 desc_table = (MR_RAID_MAP_DESC_TABLE *) ((char *)fw_map_dyn + 296 le32toh(fw_map_dyn->descTableOffset)); 297 if (desc_table != fw_map_dyn->raidMapDescTable) { 298 device_printf(sc->mrsas_dev, 299 "offsets of desc table are not matching returning " 300 " FW raid map has been changed: desc %p original %p\n", 301 desc_table, fw_map_dyn->raidMapDescTable); 302 } 303 memset(drv_map, 0, sc->drv_map_sz); 304 ld_count = le16toh(fw_map_dyn->ldCount); 305 pDrvRaidMap->ldCount = htole16(ld_count); 306 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_dyn->fpPdIoTimeoutSec; 307 pDrvRaidMap->totalSize = htole32(sizeof(MR_DRV_RAID_MAP_ALL)); 308 /* point to actual data starting point */ 309 raid_map_data = (char *)fw_map_dyn + 310 le32toh(fw_map_dyn->descTableOffset) + 311 le32toh(fw_map_dyn->descTableSize); 312 313 for (i = 0; i < le32toh(fw_map_dyn->descTableNumElements); ++i) { 314 if (!desc_table) { 315 device_printf(sc->mrsas_dev, 316 "desc table is null, coming out %p \n", desc_table); 317 return 1; 318 } 319 #if VD_EXT_DEBUG 320 device_printf(sc->mrsas_dev, "raid_map_data %p \n", raid_map_data); 321 device_printf(sc->mrsas_dev, 322 "desc table %p \n", desc_table); 323 device_printf(sc->mrsas_dev, 324 "raidmap type %d, raidmapOffset 0x%x, " 325 " raid map number of elements 0%x, raidmapsize 0x%x\n", 326 le32toh(desc_table->raidMapDescType), desc_table->raidMapDescOffset, 327 le32toh(desc_table->raidMapDescElements), desc_table->raidMapDescBufferSize); 328 #endif 329 switch (le32toh(desc_table->raidMapDescType)) { 330 case RAID_MAP_DESC_TYPE_DEVHDL_INFO: 331 fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo = (MR_DEV_HANDLE_INFO *) 332 ((char *)raid_map_data + le32toh(desc_table->raidMapDescOffset)); 333 #if VD_EXT_DEBUG 334 device_printf(sc->mrsas_dev, 335 "devHndlInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo); 336 #endif 337 memcpy(pDrvRaidMap->devHndlInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.devHndlInfo, 338 sizeof(MR_DEV_HANDLE_INFO) * le32toh(desc_table->raidMapDescElements)); 339 break; 340 case RAID_MAP_DESC_TYPE_TGTID_INFO: 341 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd = (u_int16_t *) 342 ((char *)raid_map_data + 343 le32toh(desc_table->raidMapDescOffset)); 344 #if VD_EXT_DEBUG 345 device_printf(sc->mrsas_dev, 346 "ldTgtIdToLd address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd); 347 #endif 348 for (j = 0; j < le32toh(desc_table->raidMapDescElements); j++) { 349 pDrvRaidMap->ldTgtIdToLd[j] = fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldTgtIdToLd[j]; 350 #if VD_EXT_DEBUG 351 device_printf(sc->mrsas_dev, 352 " %d drv ldTgtIdToLd %d\n", j, pDrvRaidMap->ldTgtIdToLd[j]); 353 #endif 354 } 355 break; 356 case RAID_MAP_DESC_TYPE_ARRAY_INFO: 357 fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo = (MR_ARRAY_INFO *) ((char *)raid_map_data + 358 le32toh(desc_table->raidMapDescOffset)); 359 #if VD_EXT_DEBUG 360 device_printf(sc->mrsas_dev, 361 "arMapInfo address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo); 362 #endif 363 memcpy(pDrvRaidMap->arMapInfo, fw_map_dyn->RaidMapDescPtrs.ptrStruct.arMapInfo, 364 sizeof(MR_ARRAY_INFO) * le32toh(desc_table->raidMapDescElements)); 365 break; 366 case RAID_MAP_DESC_TYPE_SPAN_INFO: 367 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap = (MR_LD_SPAN_MAP *) ((char *)raid_map_data + 368 le32toh(desc_table->raidMapDescOffset)); 369 memcpy(pDrvRaidMap->ldSpanMap, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap, 370 sizeof(MR_LD_SPAN_MAP) * 371 le32toh(desc_table->raidMapDescElements)); 372 #if VD_EXT_DEBUG 373 device_printf(sc->mrsas_dev, 374 "ldSpanMap address %p\n", fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap); 375 device_printf(sc->mrsas_dev, 376 "MR_LD_SPAN_MAP size 0x%lx\n", sizeof(MR_LD_SPAN_MAP)); 377 for (j = 0; j < ld_count; j++) { 378 printf("mrsas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x " 379 "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 380 j, j, fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.targetId, j, 381 fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.seqNum, 382 (u_int32_t)fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid.rowSize); 383 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 384 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 385 j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j, 386 pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum, 387 (u_int32_t)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize); 388 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 389 drv_map, pDrvRaidMap, &fw_map_dyn->RaidMapDescPtrs.ptrStruct.ldSpanMap[j].ldRaid, 390 &pDrvRaidMap->ldSpanMap[j].ldRaid); 391 } 392 #endif 393 break; 394 default: 395 device_printf(sc->mrsas_dev, 396 "wrong number of desctableElements %d\n", 397 fw_map_dyn->descTableNumElements); 398 } 399 ++desc_table; 400 } 401 return 0; 402 } 403 404 /* 405 * This function will Populate Driver Map using firmware raid map 406 */ 407 static int 408 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 409 { 410 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 411 MR_FW_RAID_MAP_EXT *fw_map_ext; 412 MR_FW_RAID_MAP *pFwRaidMap = NULL; 413 unsigned int i; 414 u_int16_t ld_count; 415 416 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 417 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 418 419 if (sc->maxRaidMapSize) { 420 return MR_PopulateDrvRaidMapVentura(sc); 421 } else if (sc->max256vdSupport) { 422 fw_map_ext = (MR_FW_RAID_MAP_EXT *) sc->raidmap_mem[(sc->map_id & 1)]; 423 ld_count = (u_int16_t)le16toh(fw_map_ext->ldCount); 424 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 425 device_printf(sc->mrsas_dev, 426 "mrsas: LD count exposed in RAID map in not valid\n"); 427 return 1; 428 } 429 #if VD_EXT_DEBUG 430 for (i = 0; i < ld_count; i++) { 431 printf("mrsas : Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", 432 i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, 433 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 434 fw_map_ext->ldSpanMap[i].ldRaid.size); 435 } 436 #endif 437 memset(drv_map, 0, sc->drv_map_sz); 438 pDrvRaidMap->ldCount = htole16(ld_count); 439 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; 440 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) { 441 pDrvRaidMap->ldTgtIdToLd[i] = (u_int16_t)fw_map_ext->ldTgtIdToLd[i]; 442 } 443 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, sizeof(MR_LD_SPAN_MAP) * ld_count); 444 #if VD_EXT_DEBUG 445 for (i = 0; i < ld_count; i++) { 446 printf("mrsas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x " 447 "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 448 i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i, 449 fw_map_ext->ldSpanMap[i].ldRaid.seqNum, 450 (u_int32_t)fw_map_ext->ldSpanMap[i].ldRaid.rowSize); 451 printf("mrsas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 452 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 453 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, i, 454 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 455 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 456 printf("mrsas : drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 457 drv_map, pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid, 458 &pDrvRaidMap->ldSpanMap[i].ldRaid); 459 } 460 #endif 461 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, 462 sizeof(MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); 463 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, 464 sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); 465 466 pDrvRaidMap->totalSize = htole32(sizeof(MR_FW_RAID_MAP_EXT)); 467 } else { 468 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 469 pFwRaidMap = &fw_map_old->raidMap; 470 471 #if VD_EXT_DEBUG 472 for (i = 0; i < le32toh(pFwRaidMap->ldCount); i++) { 473 device_printf(sc->mrsas_dev, 474 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 475 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 476 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 477 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 478 } 479 #endif 480 481 memset(drv_map, 0, sc->drv_map_sz); 482 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 483 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 484 pDrvRaidMap->fpPdIoTimeoutSec = 485 pFwRaidMap->fpPdIoTimeoutSec; 486 487 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 488 pDrvRaidMap->ldTgtIdToLd[i] = 489 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 490 } 491 492 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 493 pDrvRaidMap->ldSpanMap[i] = 494 pFwRaidMap->ldSpanMap[i]; 495 496 #if VD_EXT_DEBUG 497 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 498 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 499 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 500 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 501 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 502 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 503 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 504 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 505 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 506 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 507 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 508 drv_map, pDrvRaidMap, 509 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 510 #endif 511 } 512 513 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 514 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 515 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 516 sizeof(MR_DEV_HANDLE_INFO) * 517 MAX_RAIDMAP_PHYSICAL_DEVICES); 518 } 519 return 0; 520 } 521 522 /* 523 * MR_ValidateMapInfo: Validate RAID map 524 * input: Adapter instance soft state 525 * 526 * This function checks and validates the loaded RAID map. It returns 0 if 527 * successful, and 1 otherwise. 528 */ 529 u_int8_t 530 MR_ValidateMapInfo(struct mrsas_softc *sc) 531 { 532 if (!sc) { 533 return 1; 534 } 535 if (MR_PopulateDrvRaidMap(sc)) 536 return 0; 537 538 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 539 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 540 541 u_int32_t expected_map_size; 542 543 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 544 pDrvRaidMap = &drv_map->raidMap; 545 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 546 547 if (sc->maxRaidMapSize) 548 expected_map_size = sizeof(MR_DRV_RAID_MAP_ALL); 549 else if (sc->max256vdSupport) 550 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 551 else 552 expected_map_size = 553 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 554 (sizeof(MR_LD_SPAN_MAP) * le16toh(pDrvRaidMap->ldCount)); 555 556 if (le32toh(pDrvRaidMap->totalSize) != expected_map_size) { 557 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 558 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 559 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", le32toh(pDrvRaidMap->totalSize)); 560 return 1; 561 } 562 if (sc->UnevenSpanSupport) { 563 mr_update_span_set(drv_map, ldSpanInfo); 564 } 565 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 566 567 return 0; 568 } 569 570 /* 571 * 572 * Function to print info about span set created in driver from FW raid map 573 * 574 * Inputs: map 575 * ldSpanInfo: ld map span info per HBA instance 576 * 577 * 578 */ 579 #if SPAN_DEBUG 580 static int 581 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 582 { 583 584 u_int8_t span; 585 u_int32_t element; 586 MR_LD_RAID *raid; 587 LD_SPAN_SET *span_set; 588 MR_QUAD_ELEMENT *quad; 589 int ldCount; 590 u_int16_t ld; 591 592 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 593 ld = MR_TargetIdToLdGet(ldCount, map); 594 if (ld >= MAX_LOGICAL_DRIVES) { 595 continue; 596 } 597 raid = MR_LdRaidGet(ld, map); 598 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 599 for (span = 0; span < raid->spanDepth; span++) 600 printf("Span=%x, number of quads=%x\n", span, 601 le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 602 block_span_info.noElements)); 603 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 604 span_set = &(ldSpanInfo[ld].span_set[element]); 605 if (span_set->span_row_data_width == 0) 606 break; 607 608 printf("Span Set %x: width=%x, diff=%x\n", element, 609 (unsigned int)span_set->span_row_data_width, 610 (unsigned int)span_set->diff); 611 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 612 (long unsigned int)span_set->log_start_lba, 613 (long unsigned int)span_set->log_end_lba); 614 printf("span row start=0x%08lx, end=0x%08lx\n", 615 (long unsigned int)span_set->span_row_start, 616 (long unsigned int)span_set->span_row_end); 617 printf("data row start=0x%08lx, end=0x%08lx\n", 618 (long unsigned int)span_set->data_row_start, 619 (long unsigned int)span_set->data_row_end); 620 printf("data strip start=0x%08lx, end=0x%08lx\n", 621 (long unsigned int)span_set->data_strip_start, 622 (long unsigned int)span_set->data_strip_end); 623 624 for (span = 0; span < raid->spanDepth; span++) { 625 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 626 block_span_info.noElements >= element + 1) { 627 quad = &map->raidMap.ldSpanMap[ld]. 628 spanBlock[span].block_span_info. 629 quad[element]; 630 printf("Span=%x, Quad=%x, diff=%x\n", span, 631 element, le32toh(quad->diff)); 632 printf("offset_in_span=0x%08lx\n", 633 (long unsigned int)le64toh(quad->offsetInSpan)); 634 printf("logical start=0x%08lx, end=0x%08lx\n", 635 (long unsigned int)le64toh(quad->logStart), 636 (long unsigned int)le64toh(quad->logEnd)); 637 } 638 } 639 } 640 } 641 return 0; 642 } 643 644 #endif 645 /* 646 * 647 * This routine calculates the Span block for given row using spanset. 648 * 649 * Inputs : HBA instance 650 * ld: Logical drive number 651 * row: Row number 652 * map: LD map 653 * 654 * Outputs : span - Span number block 655 * - Absolute Block number in the physical disk 656 * div_error - Devide error code. 657 */ 658 659 u_int32_t 660 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 661 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 662 { 663 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 664 LD_SPAN_SET *span_set; 665 MR_QUAD_ELEMENT *quad; 666 u_int32_t span, info; 667 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 668 669 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 670 span_set = &(ldSpanInfo[ld].span_set[info]); 671 672 if (span_set->span_row_data_width == 0) 673 break; 674 if (row > span_set->data_row_end) 675 continue; 676 677 for (span = 0; span < raid->spanDepth; span++) 678 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 679 block_span_info.noElements) >= info + 1) { 680 quad = &map->raidMap.ldSpanMap[ld]. 681 spanBlock[span]. 682 block_span_info.quad[info]; 683 if (quad->diff == 0) { 684 *div_error = 1; 685 return span; 686 } 687 if (le64toh(quad->logStart) <= row && 688 row <= le64toh(quad->logEnd) && 689 (mega_mod64(row - le64toh(quad->logStart), 690 le32toh(quad->diff))) == 0) { 691 if (span_blk != NULL) { 692 u_int64_t blk; 693 694 blk = mega_div64_32 695 ((row - le64toh(quad->logStart)), 696 le32toh(quad->diff)); 697 blk = (blk + le64toh(quad->offsetInSpan)) 698 << raid->stripeShift; 699 *span_blk = blk; 700 } 701 return span; 702 } 703 } 704 } 705 return SPAN_INVALID; 706 } 707 708 /* 709 * 710 * This routine calculates the row for given strip using spanset. 711 * 712 * Inputs : HBA instance 713 * ld: Logical drive number 714 * Strip: Strip 715 * map: LD map 716 * 717 * Outputs : row - row associated with strip 718 */ 719 720 static u_int64_t 721 get_row_from_strip(struct mrsas_softc *sc, 722 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 723 { 724 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 725 LD_SPAN_SET *span_set; 726 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 727 u_int32_t info, strip_offset, span, span_offset; 728 u_int64_t span_set_Strip, span_set_Row; 729 730 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 731 span_set = &(ldSpanInfo[ld].span_set[info]); 732 733 if (span_set->span_row_data_width == 0) 734 break; 735 if (strip > span_set->data_strip_end) 736 continue; 737 738 span_set_Strip = strip - span_set->data_strip_start; 739 strip_offset = mega_mod64(span_set_Strip, 740 span_set->span_row_data_width); 741 span_set_Row = mega_div64_32(span_set_Strip, 742 span_set->span_row_data_width) * span_set->diff; 743 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 744 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 745 block_span_info.noElements) >= info + 1) { 746 if (strip_offset >= 747 span_set->strip_offset[span]) 748 span_offset++; 749 else 750 break; 751 } 752 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 753 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 754 (unsigned long long)span_set_Strip, 755 (unsigned long long)span_set_Row, 756 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 757 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 758 (unsigned long long)span_set->data_row_start + 759 (unsigned long long)span_set_Row + (span_offset - 1)); 760 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 761 } 762 return -1LLU; 763 } 764 765 /* 766 * 767 * This routine calculates the Start Strip for given row using spanset. 768 * 769 * Inputs: HBA instance 770 * ld: Logical drive number 771 * row: Row number 772 * map: LD map 773 * 774 * Outputs : Strip - Start strip associated with row 775 */ 776 777 static u_int64_t 778 get_strip_from_row(struct mrsas_softc *sc, 779 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 780 { 781 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 782 LD_SPAN_SET *span_set; 783 MR_QUAD_ELEMENT *quad; 784 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 785 u_int32_t span, info; 786 u_int64_t strip; 787 788 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 789 span_set = &(ldSpanInfo[ld].span_set[info]); 790 791 if (span_set->span_row_data_width == 0) 792 break; 793 if (row > span_set->data_row_end) 794 continue; 795 796 for (span = 0; span < raid->spanDepth; span++) 797 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 798 block_span_info.noElements) >= info + 1) { 799 quad = &map->raidMap.ldSpanMap[ld]. 800 spanBlock[span].block_span_info.quad[info]; 801 if (le64toh(quad->logStart) <= row && 802 row <= le64toh(quad->logEnd) && 803 mega_mod64((row - le64toh(quad->logStart)), 804 le32toh(quad->diff)) == 0) { 805 strip = mega_div64_32 806 (((row - span_set->data_row_start) 807 - le64toh(quad->logStart)), 808 le32toh(quad->diff)); 809 strip *= span_set->span_row_data_width; 810 strip += span_set->data_strip_start; 811 strip += span_set->strip_offset[span]; 812 return strip; 813 } 814 } 815 } 816 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 817 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 818 return -1; 819 } 820 821 /* 822 * ***************************************************************************** 823 * 824 * 825 * This routine calculates the Physical Arm for given strip using spanset. 826 * 827 * Inputs : HBA instance 828 * Logical drive number 829 * Strip 830 * LD map 831 * 832 * Outputs : Phys Arm - Phys Arm associated with strip 833 */ 834 835 static u_int32_t 836 get_arm_from_strip(struct mrsas_softc *sc, 837 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 838 { 839 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 840 LD_SPAN_SET *span_set; 841 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 842 u_int32_t info, strip_offset, span, span_offset; 843 844 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 845 span_set = &(ldSpanInfo[ld].span_set[info]); 846 847 if (span_set->span_row_data_width == 0) 848 break; 849 if (strip > span_set->data_strip_end) 850 continue; 851 852 strip_offset = (u_int32_t)mega_mod64 853 ((strip - span_set->data_strip_start), 854 span_set->span_row_data_width); 855 856 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 857 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 858 block_span_info.noElements) >= info + 1) { 859 if (strip_offset >= span_set->strip_offset[span]) 860 span_offset = span_set->strip_offset[span]; 861 else 862 break; 863 } 864 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 865 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 866 (long unsigned int)strip, (strip_offset - span_offset)); 867 return (strip_offset - span_offset); 868 } 869 870 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 871 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 872 873 return -1; 874 } 875 876 /* This Function will return Phys arm */ 877 u_int8_t 878 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 879 MR_DRV_RAID_MAP_ALL * map) 880 { 881 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 882 883 /* Need to check correct default value */ 884 u_int32_t arm = 0; 885 886 switch (raid->level) { 887 case 0: 888 case 5: 889 case 6: 890 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 891 break; 892 case 1: 893 /* start with logical arm */ 894 arm = get_arm_from_strip(sc, ld, stripe, map); 895 arm *= 2; 896 break; 897 } 898 899 return arm; 900 } 901 902 /* 903 * 904 * This routine calculates the arm, span and block for the specified stripe and 905 * reference in stripe using spanset 906 * 907 * Inputs : 908 * sc - HBA instance 909 * ld - Logical drive number 910 * stripRow: Stripe number 911 * stripRef: Reference in stripe 912 * 913 * Outputs : span - Span number block - Absolute Block 914 * number in the physical disk 915 */ 916 static u_int8_t 917 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 918 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 919 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 920 { 921 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 922 u_int32_t pd, arRef, r1_alt_pd; 923 u_int8_t physArm, span; 924 u_int64_t row; 925 u_int8_t retval = TRUE; 926 u_int64_t *pdBlock = &io_info->pdBlock; 927 u_int16_t *pDevHandle = &io_info->devHandle; 928 u_int8_t *pPdInterface = &io_info->pdInterface; 929 930 u_int32_t logArm, rowMod, armQ, arm; 931 932 /* Get row and span from io_info for Uneven Span IO. */ 933 row = io_info->start_row; 934 span = io_info->start_span; 935 936 if (raid->level == 6) { 937 logArm = get_arm_from_strip(sc, ld, stripRow, map); 938 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 939 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 940 arm = armQ + 1 + logArm; 941 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 942 arm -= SPAN_ROW_SIZE(map, ld, span); 943 physArm = (u_int8_t)arm; 944 } else 945 /* Calculate the arm */ 946 physArm = get_arm(sc, ld, span, stripRow, map); 947 948 arRef = MR_LdSpanArrayGet(ld, span, map); 949 pd = MR_ArPdGet(arRef, physArm, map); 950 951 if (pd != MR_PD_INVALID) { 952 *pDevHandle = MR_PdDevHandleGet(pd, map); 953 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 954 /* get second pd also for raid 1/10 fast path writes */ 955 if ((raid->level == 1) && !io_info->isRead) { 956 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 957 if (r1_alt_pd != MR_PD_INVALID) 958 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 959 } 960 } else { 961 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); 962 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 963 (sc->mrsas_gen3_ctrl && 964 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 965 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 966 else if (raid->level == 1) { 967 pd = MR_ArPdGet(arRef, physArm + 1, map); 968 if (pd != MR_PD_INVALID) { 969 *pDevHandle = MR_PdDevHandleGet(pd, map); 970 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 971 } 972 } 973 } 974 975 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 976 if (sc->is_ventura || sc->is_aero) { 977 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 978 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 979 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 980 } else { 981 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 982 io_info->span_arm = pRAID_Context->spanArm; 983 } 984 return retval; 985 } 986 987 /* 988 * MR_BuildRaidContext: Set up Fast path RAID context 989 * 990 * This function will initiate command processing. The start/end row and strip 991 * information is calculated then the lock is acquired. This function will 992 * return 0 if region lock was acquired OR return num strips. 993 */ 994 u_int8_t 995 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 996 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 997 { 998 MR_LD_RAID *raid; 999 u_int32_t ld, stripSize, stripe_mask; 1000 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 1001 REGION_KEY regStart; 1002 REGION_LEN regSize; 1003 u_int8_t num_strips, numRows; 1004 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 1005 u_int64_t ldStartBlock; 1006 u_int32_t numBlocks, ldTgtId; 1007 u_int8_t isRead, stripIdx; 1008 u_int8_t retval = 0; 1009 u_int8_t startlba_span = SPAN_INVALID; 1010 u_int64_t *pdBlock = &io_info->pdBlock; 1011 int error_code = 0; 1012 1013 ldStartBlock = io_info->ldStartBlock; 1014 numBlocks = io_info->numBlocks; 1015 ldTgtId = io_info->ldTgtId; 1016 isRead = io_info->isRead; 1017 1018 io_info->IoforUnevenSpan = 0; 1019 io_info->start_span = SPAN_INVALID; 1020 1021 ld = MR_TargetIdToLdGet(ldTgtId, map); 1022 raid = MR_LdRaidGet(ld, map); 1023 1024 /* check read ahead bit */ 1025 io_info->raCapable = raid->capability.raCapable; 1026 1027 if (raid->rowDataSize == 0) { 1028 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 1029 return FALSE; 1030 else if (sc->UnevenSpanSupport) { 1031 io_info->IoforUnevenSpan = 1; 1032 } else { 1033 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 1034 " but there is _NO_ UnevenSpanSupport\n", 1035 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 1036 return FALSE; 1037 } 1038 } 1039 stripSize = 1 << raid->stripeShift; 1040 stripe_mask = stripSize - 1; 1041 /* 1042 * calculate starting row and stripe, and number of strips and rows 1043 */ 1044 start_strip = ldStartBlock >> raid->stripeShift; 1045 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 1046 endLba = ldStartBlock + numBlocks - 1; 1047 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 1048 endStrip = endLba >> raid->stripeShift; 1049 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 1050 if (io_info->IoforUnevenSpan) { 1051 start_row = get_row_from_strip(sc, ld, start_strip, map); 1052 endRow = get_row_from_strip(sc, ld, endStrip, map); 1053 if (raid->spanDepth == 1) { 1054 startlba_span = 0; 1055 *pdBlock = start_row << raid->stripeShift; 1056 } else { 1057 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 1058 pdBlock, map, &error_code); 1059 if (error_code == 1) { 1060 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 1061 __func__, __LINE__); 1062 return FALSE; 1063 } 1064 } 1065 if (startlba_span == SPAN_INVALID) { 1066 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 1067 "start strip %llx endSrip %llx\n", __func__, 1068 __LINE__, (unsigned long long)start_row, 1069 (unsigned long long)start_strip, 1070 (unsigned long long)endStrip); 1071 return FALSE; 1072 } 1073 io_info->start_span = startlba_span; 1074 io_info->start_row = start_row; 1075 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 1076 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 1077 __func__, __LINE__, (unsigned long long)start_row, 1078 (unsigned long long)start_strip, 1079 (unsigned long long)endStrip, startlba_span); 1080 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 1081 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 1082 } else { 1083 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1084 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1085 } 1086 1087 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 1088 1089 /* 1090 * Calculate region info. (Assume region at start of first row, and 1091 * assume this IO needs the full row - will adjust if not true.) 1092 */ 1093 regStart = start_row << raid->stripeShift; 1094 regSize = stripSize; 1095 1096 /* Check if we can send this I/O via FastPath */ 1097 if (raid->capability.fpCapable) { 1098 if (isRead) 1099 io_info->fpOkForIo = (raid->capability.fpReadCapable && 1100 ((num_strips == 1) || 1101 raid->capability.fpReadAcrossStripe)); 1102 else 1103 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 1104 ((num_strips == 1) || 1105 raid->capability.fpWriteAcrossStripe)); 1106 } else 1107 io_info->fpOkForIo = FALSE; 1108 1109 if (numRows == 1) { 1110 if (num_strips == 1) { 1111 regStart += ref_in_start_stripe; 1112 regSize = numBlocks; 1113 } 1114 } else if (io_info->IoforUnevenSpan == 0) { 1115 /* 1116 * For Even span region lock optimization. If the start strip 1117 * is the last in the start row 1118 */ 1119 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 1120 regStart += ref_in_start_stripe; 1121 /* 1122 * initialize count to sectors from startRef to end 1123 * of strip 1124 */ 1125 regSize = stripSize - ref_in_start_stripe; 1126 } 1127 /* add complete rows in the middle of the transfer */ 1128 if (numRows > 2) 1129 regSize += (numRows - 2) << raid->stripeShift; 1130 1131 /* if IO ends within first strip of last row */ 1132 if (endStrip == endRow * raid->rowDataSize) 1133 regSize += ref_in_end_stripe + 1; 1134 else 1135 regSize += stripSize; 1136 } else { 1137 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 1138 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 1139 regStart += ref_in_start_stripe; 1140 /* 1141 * initialize count to sectors from startRef to end 1142 * of strip 1143 */ 1144 regSize = stripSize - ref_in_start_stripe; 1145 } 1146 /* add complete rows in the middle of the transfer */ 1147 if (numRows > 2) 1148 regSize += (numRows - 2) << raid->stripeShift; 1149 1150 /* if IO ends within first strip of last row */ 1151 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 1152 regSize += ref_in_end_stripe + 1; 1153 else 1154 regSize += stripSize; 1155 } 1156 pRAID_Context->timeoutValue = htole16(map->raidMap.fpPdIoTimeoutSec); 1157 if (sc->mrsas_gen3_ctrl) 1158 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1159 else if (sc->device_id == MRSAS_TBOLT) 1160 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1161 pRAID_Context->VirtualDiskTgtId = raid->targetId; 1162 pRAID_Context->regLockRowLBA = htole64(regStart); 1163 pRAID_Context->regLockLength = htole32(regSize); 1164 pRAID_Context->configSeqNum = raid->seqNum; 1165 1166 /* 1167 * Get Phy Params only if FP capable, or else leave it to MR firmware 1168 * to do the calculation. 1169 */ 1170 if (io_info->fpOkForIo) { 1171 retval = io_info->IoforUnevenSpan ? 1172 mr_spanset_get_phy_params(sc, ld, start_strip, 1173 ref_in_start_stripe, io_info, pRAID_Context, map) : 1174 MR_GetPhyParams(sc, ld, start_strip, 1175 ref_in_start_stripe, io_info, pRAID_Context, map); 1176 /* If IO on an invalid Pd, then FP is not possible */ 1177 if (io_info->devHandle == MR_DEVHANDLE_INVALID) 1178 io_info->fpOkForIo = FALSE; 1179 /* 1180 * if FP possible, set the SLUD bit in regLockFlags for 1181 * ventura 1182 */ 1183 else if ((sc->is_ventura || sc->is_aero) && !isRead && 1184 (raid->writeMode == MR_RL_WRITE_BACK_MODE) && (raid->level <= 1) && 1185 raid->capability.fpCacheBypassCapable) { 1186 ((RAID_CONTEXT_G35 *) pRAID_Context)->routingFlags.bits.sld = 1; 1187 } 1188 1189 return retval; 1190 } else if (isRead) { 1191 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 1192 retval = io_info->IoforUnevenSpan ? 1193 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 1194 ref_in_start_stripe, io_info, pRAID_Context, map) : 1195 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 1196 ref_in_start_stripe, io_info, pRAID_Context, map); 1197 if (!retval) 1198 return TRUE; 1199 } 1200 } 1201 #if SPAN_DEBUG 1202 /* Just for testing what arm we get for strip. */ 1203 get_arm_from_strip(sc, ld, start_strip, map); 1204 #endif 1205 return TRUE; 1206 } 1207 1208 /* 1209 * 1210 * This routine pepare spanset info from Valid Raid map and store it into local 1211 * copy of ldSpanInfo per instance data structure. 1212 * 1213 * Inputs : LD map 1214 * ldSpanInfo per HBA instance 1215 * 1216 */ 1217 void 1218 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1219 { 1220 u_int8_t span, count; 1221 u_int32_t element, span_row_width; 1222 u_int64_t span_row; 1223 MR_LD_RAID *raid; 1224 LD_SPAN_SET *span_set, *span_set_prev; 1225 MR_QUAD_ELEMENT *quad; 1226 int ldCount; 1227 u_int16_t ld; 1228 1229 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1230 ld = MR_TargetIdToLdGet(ldCount, map); 1231 if (ld >= MAX_LOGICAL_DRIVES) 1232 continue; 1233 raid = MR_LdRaidGet(ld, map); 1234 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1235 for (span = 0; span < raid->spanDepth; span++) { 1236 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1237 block_span_info.noElements) < element + 1) 1238 continue; 1239 /* TO-DO */ 1240 span_set = &(ldSpanInfo[ld].span_set[element]); 1241 quad = &map->raidMap.ldSpanMap[ld]. 1242 spanBlock[span].block_span_info.quad[element]; 1243 1244 span_set->diff = le32toh(quad->diff); 1245 1246 for (count = 0, span_row_width = 0; 1247 count < raid->spanDepth; count++) { 1248 if (le32toh(map->raidMap.ldSpanMap[ld].spanBlock[count]. 1249 block_span_info.noElements) >= element + 1) { 1250 span_set->strip_offset[count] = span_row_width; 1251 span_row_width += 1252 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1253 #if SPAN_DEBUG 1254 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1255 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1256 #endif 1257 } 1258 } 1259 1260 span_set->span_row_data_width = span_row_width; 1261 span_row = mega_div64_32(((le64toh(quad->logEnd) - 1262 le64toh(quad->logStart)) + le32toh(quad->diff)), 1263 le32toh(quad->diff)); 1264 1265 if (element == 0) { 1266 span_set->log_start_lba = 0; 1267 span_set->log_end_lba = 1268 ((span_row << raid->stripeShift) * span_row_width) - 1; 1269 1270 span_set->span_row_start = 0; 1271 span_set->span_row_end = span_row - 1; 1272 1273 span_set->data_strip_start = 0; 1274 span_set->data_strip_end = (span_row * span_row_width) - 1; 1275 1276 span_set->data_row_start = 0; 1277 span_set->data_row_end = 1278 (span_row * le32toh(quad->diff)) - 1; 1279 } else { 1280 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1281 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1282 span_set->log_end_lba = span_set->log_start_lba + 1283 ((span_row << raid->stripeShift) * span_row_width) - 1; 1284 1285 span_set->span_row_start = span_set_prev->span_row_end + 1; 1286 span_set->span_row_end = 1287 span_set->span_row_start + span_row - 1; 1288 1289 span_set->data_strip_start = 1290 span_set_prev->data_strip_end + 1; 1291 span_set->data_strip_end = span_set->data_strip_start + 1292 (span_row * span_row_width) - 1; 1293 1294 span_set->data_row_start = span_set_prev->data_row_end + 1; 1295 span_set->data_row_end = span_set->data_row_start + 1296 (span_row * le32toh(quad->diff)) - 1; 1297 } 1298 break; 1299 } 1300 if (span == raid->spanDepth) 1301 break; /* no quads remain */ 1302 } 1303 } 1304 #if SPAN_DEBUG 1305 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1306 #endif 1307 } 1308 1309 /* 1310 * mrsas_update_load_balance_params: Update load balance parmas 1311 * Inputs: 1312 * sc - driver softc instance 1313 * drv_map - driver RAID map 1314 * lbInfo - Load balance info 1315 * 1316 * This function updates the load balance parameters for the LD config of a two 1317 * drive optimal RAID-1. 1318 */ 1319 void 1320 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1321 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1322 { 1323 int ldCount; 1324 u_int16_t ld; 1325 MR_LD_RAID *raid; 1326 1327 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1328 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1329 1330 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1331 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1332 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1333 lbInfo[ldCount].loadBalanceFlag = 0; 1334 continue; 1335 } 1336 raid = MR_LdRaidGet(ld, drv_map); 1337 le32_to_cpus(&raid->capability); 1338 if ((raid->level != 1) || 1339 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1340 lbInfo[ldCount].loadBalanceFlag = 0; 1341 continue; 1342 } 1343 lbInfo[ldCount].loadBalanceFlag = 1; 1344 } 1345 } 1346 1347 /* 1348 * mrsas_set_pd_lba: Sets PD LBA 1349 * input: io_request pointer 1350 * CDB length 1351 * io_info pointer 1352 * Pointer to CCB 1353 * Local RAID map pointer 1354 * Start block of IO Block Size 1355 * 1356 * Used to set the PD logical block address in CDB for FP IOs. 1357 */ 1358 void 1359 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1360 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1361 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1362 u_int32_t ld_block_size) 1363 { 1364 MR_LD_RAID *raid; 1365 u_int32_t ld; 1366 u_int64_t start_blk = io_info->pdBlock; 1367 u_int8_t *cdb = io_request->CDB.CDB32; 1368 u_int32_t num_blocks = io_info->numBlocks; 1369 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1370 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1371 1372 /* Check if T10 PI (DIF) is enabled for this LD */ 1373 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1374 raid = MR_LdRaidGet(ld, local_map_ptr); 1375 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1376 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1377 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1378 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1379 1380 if (ccb_h->flags == CAM_DIR_OUT) 1381 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1382 else 1383 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1384 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1385 1386 /* LBA */ 1387 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1388 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1389 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1390 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1391 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1392 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1393 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1394 cdb[19] = (u_int8_t)(start_blk & 0xff); 1395 1396 /* Logical block reference tag */ 1397 io_request->CDB.EEDP32.PrimaryReferenceTag = htobe32(ref_tag); 1398 io_request->CDB.EEDP32.PrimaryApplicationTagMask = htobe16(0xffff); 1399 io_request->IoFlags = htole16(32); /* Specify 32-byte cdb */ 1400 1401 /* Transfer length */ 1402 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1403 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1404 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1405 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1406 1407 /* set SCSI IO EEDP Flags */ 1408 if (ccb_h->flags == CAM_DIR_OUT) { 1409 io_request->EEDPFlags = htole16( 1410 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1411 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1412 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1413 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1414 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1415 } else { 1416 io_request->EEDPFlags = htole16( 1417 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1418 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1419 } 1420 io_request->Control |= htole32(0x4 << 26); 1421 io_request->EEDPBlockSize = htole32(ld_block_size); 1422 } else { 1423 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1424 if (((cdb_len == 12) || (cdb_len == 16)) && 1425 (start_blk <= 0xffffffff)) { 1426 if (cdb_len == 16) { 1427 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1428 flagvals = cdb[1]; 1429 groupnum = cdb[14]; 1430 control = cdb[15]; 1431 } else { 1432 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1433 flagvals = cdb[1]; 1434 groupnum = cdb[10]; 1435 control = cdb[11]; 1436 } 1437 1438 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1439 1440 cdb[0] = opcode; 1441 cdb[1] = flagvals; 1442 cdb[6] = groupnum; 1443 cdb[9] = control; 1444 1445 /* Transfer length */ 1446 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1447 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1448 1449 io_request->IoFlags = htole16(10); /* Specify 10-byte cdb */ 1450 cdb_len = 10; 1451 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1452 /* Convert to 16 byte CDB for large LBA's */ 1453 switch (cdb_len) { 1454 case 6: 1455 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1456 control = cdb[5]; 1457 break; 1458 case 10: 1459 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1460 flagvals = cdb[1]; 1461 groupnum = cdb[6]; 1462 control = cdb[9]; 1463 break; 1464 case 12: 1465 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1466 flagvals = cdb[1]; 1467 groupnum = cdb[10]; 1468 control = cdb[11]; 1469 break; 1470 } 1471 1472 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1473 1474 cdb[0] = opcode; 1475 cdb[1] = flagvals; 1476 cdb[14] = groupnum; 1477 cdb[15] = control; 1478 1479 /* Transfer length */ 1480 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1481 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1482 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1483 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1484 1485 io_request->IoFlags = htole16(16); /* Specify 16-byte cdb */ 1486 cdb_len = 16; 1487 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1488 /* convert to 10 byte CDB */ 1489 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1490 control = cdb[5]; 1491 1492 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1493 cdb[0] = opcode; 1494 cdb[9] = control; 1495 1496 /* Set transfer length */ 1497 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1498 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1499 1500 /* Specify 10-byte cdb */ 1501 cdb_len = 10; 1502 } 1503 /* Fall through normal case, just load LBA here */ 1504 u_int8_t val = cdb[1] & 0xE0; 1505 1506 switch (cdb_len) { 1507 case 6: 1508 cdb[3] = (u_int8_t)(start_blk & 0xff); 1509 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1510 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1511 break; 1512 case 10: 1513 cdb[5] = (u_int8_t)(start_blk & 0xff); 1514 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1515 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1516 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1517 break; 1518 case 16: 1519 cdb[9] = (u_int8_t)(start_blk & 0xff); 1520 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1521 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1522 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1523 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1524 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1525 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1526 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1527 break; 1528 } 1529 } 1530 } 1531 1532 /* 1533 * mrsas_get_best_arm_pd: Determine the best spindle arm 1534 * Inputs: 1535 * sc - HBA instance 1536 * lbInfo - Load balance info 1537 * io_info - IO request info 1538 * 1539 * This function determines and returns the best arm by looking at the 1540 * parameters of the last PD access. 1541 */ 1542 u_int8_t 1543 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1544 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1545 { 1546 MR_LD_RAID *raid; 1547 MR_DRV_RAID_MAP_ALL *drv_map; 1548 u_int16_t pd1_devHandle; 1549 u_int16_t pend0, pend1, ld; 1550 u_int64_t diff0, diff1; 1551 u_int8_t bestArm, pd0, pd1, span, arm; 1552 u_int32_t arRef, span_row_size; 1553 1554 u_int64_t block = io_info->ldStartBlock; 1555 u_int32_t count = io_info->numBlocks; 1556 1557 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1558 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1559 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1560 1561 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1562 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1563 raid = MR_LdRaidGet(ld, drv_map); 1564 span_row_size = sc->UnevenSpanSupport ? 1565 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1566 1567 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1568 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1569 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1570 (arm + 1 - span_row_size) : arm + 1, drv_map); 1571 1572 /* Get PD1 Dev Handle */ 1573 pd1_devHandle = MR_PdDevHandleGet(pd1, drv_map); 1574 if (pd1_devHandle == MR_DEVHANDLE_INVALID) { 1575 bestArm = arm; 1576 } else { 1577 /* get the pending cmds for the data and mirror arms */ 1578 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1579 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1580 1581 /* Determine the disk whose head is nearer to the req. block */ 1582 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1583 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1584 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1585 1586 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1587 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1588 bestArm ^= 1; 1589 1590 /* Update the last accessed block on the correct pd */ 1591 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1592 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1593 } 1594 1595 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1596 #if SPAN_DEBUG 1597 if (arm != bestArm) 1598 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1599 "io_info->span_arm 0x%x\n", 1600 span, arm, bestArm, io_info->span_arm); 1601 #endif 1602 1603 return io_info->pd_after_lb; 1604 } 1605 1606 /* 1607 * mrsas_get_updated_dev_handle: Get the update dev handle 1608 * Inputs: 1609 * sc - Adapter instance soft state 1610 * lbInfo - Load balance info 1611 * io_info - io_info pointer 1612 * 1613 * This function determines and returns the updated dev handle. 1614 */ 1615 u_int16_t 1616 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1617 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1618 { 1619 u_int8_t arm_pd; 1620 u_int16_t devHandle; 1621 MR_DRV_RAID_MAP_ALL *drv_map; 1622 1623 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1624 1625 /* get best new arm */ 1626 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1627 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1628 io_info->pdInterface = MR_PdInterfaceTypeGet(arm_pd, drv_map); 1629 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1630 1631 return devHandle; 1632 } 1633 1634 /* 1635 * MR_GetPhyParams: Calculates arm, span, and block 1636 * Inputs: Adapter soft state 1637 * Logical drive number (LD) 1638 * Stripe number(stripRow) 1639 * Reference in stripe (stripRef) 1640 * 1641 * Outputs: Absolute Block number in the physical disk 1642 * 1643 * This routine calculates the arm, span and block for the specified stripe and 1644 * reference in stripe. 1645 */ 1646 u_int8_t 1647 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1648 u_int64_t stripRow, 1649 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1650 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1651 { 1652 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1653 u_int32_t pd, arRef, r1_alt_pd; 1654 u_int8_t physArm, span; 1655 u_int64_t row; 1656 u_int8_t retval = TRUE; 1657 int error_code = 0; 1658 u_int64_t *pdBlock = &io_info->pdBlock; 1659 u_int16_t *pDevHandle = &io_info->devHandle; 1660 u_int8_t *pPdInterface = &io_info->pdInterface; 1661 u_int32_t rowMod, armQ, arm, logArm; 1662 1663 row = mega_div64_32(stripRow, raid->rowDataSize); 1664 1665 if (raid->level == 6) { 1666 /* logical arm within row */ 1667 logArm = mega_mod64(stripRow, raid->rowDataSize); 1668 if (raid->rowSize == 0) 1669 return FALSE; 1670 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1671 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1672 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1673 if (arm >= raid->rowSize) /* handle wrap condition */ 1674 arm -= raid->rowSize; 1675 physArm = (u_int8_t)arm; 1676 } else { 1677 if (raid->modFactor == 0) 1678 return FALSE; 1679 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1680 } 1681 1682 if (raid->spanDepth == 1) { 1683 span = 0; 1684 *pdBlock = row << raid->stripeShift; 1685 } else { 1686 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1687 if (error_code == 1) 1688 return FALSE; 1689 } 1690 1691 /* Get the array on which this span is present */ 1692 arRef = MR_LdSpanArrayGet(ld, span, map); 1693 1694 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1695 1696 if (pd != MR_PD_INVALID) { 1697 /* Get dev handle from Pd */ 1698 *pDevHandle = MR_PdDevHandleGet(pd, map); 1699 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1700 /* get second pd also for raid 1/10 fast path writes */ 1701 if ((raid->level == 1) && !io_info->isRead) { 1702 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 1703 if (r1_alt_pd != MR_PD_INVALID) 1704 io_info->r1_alt_dev_handle = MR_PdDevHandleGet(r1_alt_pd, map); 1705 } 1706 } else { 1707 *pDevHandle = htole16(MR_DEVHANDLE_INVALID); /* set dev handle as invalid. */ 1708 if ((raid->level >= 5) && ((sc->device_id == MRSAS_TBOLT) || 1709 (sc->mrsas_gen3_ctrl && 1710 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1711 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1712 else if (raid->level == 1) { 1713 /* Get Alternate Pd. */ 1714 pd = MR_ArPdGet(arRef, physArm + 1, map); 1715 if (pd != MR_PD_INVALID) { 1716 /* Get dev handle from Pd. */ 1717 *pDevHandle = MR_PdDevHandleGet(pd, map); 1718 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 1719 } 1720 } 1721 } 1722 1723 *pdBlock += stripRef + le64toh(MR_LdSpanPtrGet(ld, span, map)->startBlk); 1724 if (sc->is_ventura || sc->is_aero) { 1725 ((RAID_CONTEXT_G35 *) pRAID_Context)->spanArm = 1726 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1727 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1728 } else { 1729 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1730 io_info->span_arm = pRAID_Context->spanArm; 1731 } 1732 return retval; 1733 } 1734 1735 /* 1736 * MR_GetSpanBlock: Calculates span block 1737 * Inputs: LD 1738 * row PD 1739 * span block 1740 * RAID map pointer 1741 * 1742 * Outputs: Span number Error code 1743 * 1744 * This routine calculates the span from the span block info. 1745 */ 1746 u_int32_t 1747 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1748 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1749 { 1750 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1751 MR_QUAD_ELEMENT *quad; 1752 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1753 u_int32_t span, j; 1754 u_int64_t blk; 1755 1756 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1757 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1758 quad = &pSpanBlock->block_span_info.quad[j]; 1759 if (quad->diff == 0) { 1760 *div_error = 1; 1761 return span; 1762 } 1763 if (quad->logStart <= row && row <= quad->logEnd && 1764 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1765 if (span_blk != NULL) { 1766 blk = mega_div64_32((row - quad->logStart), quad->diff); 1767 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1768 *span_blk = blk; 1769 } 1770 return span; 1771 } 1772 } 1773 } 1774 return span; 1775 } 1776