1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_sim.h> 48 #include <cam/cam_xpt_sim.h> 49 #include <cam/cam_debug.h> 50 #include <cam/cam_periph.h> 51 #include <cam/cam_xpt_periph.h> 52 53 54 /* 55 * Function prototypes 56 */ 57 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 58 u_int8_t 59 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 60 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 61 u_int8_t 62 MR_BuildRaidContext(struct mrsas_softc *sc, 63 struct IO_REQUEST_INFO *io_info, 64 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 65 u_int8_t 66 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 67 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 68 RAID_CONTEXT * pRAID_Context, 69 MR_DRV_RAID_MAP_ALL * map); 70 u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 71 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 72 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 73 u_int16_t 74 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 75 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 76 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 77 u_int32_t 78 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 79 MR_DRV_RAID_MAP_ALL * map, int *div_error); 80 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 81 void 82 mrsas_update_load_balance_params(struct mrsas_softc *sc, 83 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 84 void 85 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 86 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 87 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 88 u_int32_t ld_block_size); 89 static u_int16_t 90 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 91 MR_DRV_RAID_MAP_ALL * map); 92 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 93 static u_int16_t 94 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 95 MR_DRV_RAID_MAP_ALL * map); 96 static MR_LD_SPAN * 97 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 98 MR_DRV_RAID_MAP_ALL * map); 99 static u_int8_t 100 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 101 MR_DRV_RAID_MAP_ALL * map); 102 static MR_SPAN_BLOCK_INFO * 103 MR_LdSpanInfoGet(u_int32_t ld, 104 MR_DRV_RAID_MAP_ALL * map); 105 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 106 void MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 107 108 109 /* 110 * Spanset related function prototypes Added for PRL11 configuration (Uneven 111 * span support) 112 */ 113 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 114 static u_int8_t 115 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 116 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 117 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 118 static u_int64_t 119 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 120 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 121 static u_int32_t 122 mr_spanset_get_span_block(struct mrsas_softc *sc, 123 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 124 MR_DRV_RAID_MAP_ALL * map, int *div_error); 125 static u_int8_t 126 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 127 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 128 129 130 /* 131 * Spanset related defines Added for PRL11 configuration(Uneven span support) 132 */ 133 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 134 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 135 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 136 #define SPAN_INVALID 0xff 137 #define SPAN_DEBUG 0 138 139 /* 140 * Related Defines 141 */ 142 143 typedef u_int64_t REGION_KEY; 144 typedef u_int32_t REGION_LEN; 145 146 #define MR_LD_STATE_OPTIMAL 3 147 #define FALSE 0 148 #define TRUE 1 149 150 #define LB_PENDING_CMDS_DEFAULT 4 151 152 153 /* 154 * Related Macros 155 */ 156 157 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 158 159 #define swap32(x) \ 160 ((unsigned int)( \ 161 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 162 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 163 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 164 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 165 166 167 /* 168 * In-line functions for mod and divide of 64-bit dividend and 32-bit 169 * divisor. Assumes a check for a divisor of zero is not possible. 170 * 171 * @param dividend: Dividend 172 * @param divisor: Divisor 173 * @return remainder 174 */ 175 176 #define mega_mod64(dividend, divisor) ({ \ 177 int remainder; \ 178 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 179 remainder;}) 180 181 #define mega_div64_32(dividend, divisor) ({ \ 182 int quotient; \ 183 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 184 quotient;}) 185 186 187 /* 188 * Various RAID map access functions. These functions access the various 189 * parts of the RAID map and returns the appropriate parameters. 190 */ 191 192 MR_LD_RAID * 193 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 194 { 195 return (&map->raidMap.ldSpanMap[ld].ldRaid); 196 } 197 198 u_int16_t 199 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 200 { 201 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 202 } 203 204 static u_int16_t 205 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 206 { 207 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 208 } 209 210 static u_int8_t 211 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 212 { 213 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 214 } 215 216 static u_int16_t 217 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 218 { 219 return map->raidMap.devHndlInfo[pd].curDevHdl; 220 } 221 222 static u_int16_t 223 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 224 { 225 return map->raidMap.arMapInfo[ar].pd[arm]; 226 } 227 228 static MR_LD_SPAN * 229 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 230 { 231 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 232 } 233 234 static MR_SPAN_BLOCK_INFO * 235 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 236 { 237 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 238 } 239 240 u_int16_t 241 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 242 { 243 return map->raidMap.ldTgtIdToLd[ldTgtId]; 244 } 245 246 u_int32_t 247 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 248 { 249 MR_LD_RAID *raid; 250 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 251 252 ld = MR_TargetIdToLdGet(ldTgtId, map); 253 254 /* 255 * Check if logical drive was removed. 256 */ 257 if (ld >= MAX_LOGICAL_DRIVES) 258 return ldBlockSize; 259 260 raid = MR_LdRaidGet(ld, map); 261 ldBlockSize = raid->logicalBlockLength; 262 if (!ldBlockSize) 263 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 264 265 return ldBlockSize; 266 } 267 268 /* 269 * This function will Populate Driver Map using firmware raid map 270 */ 271 void 272 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 273 { 274 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 275 MR_FW_RAID_MAP *pFwRaidMap = NULL; 276 unsigned int i; 277 278 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 279 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 280 281 if (sc->max256vdSupport) { 282 memcpy(sc->ld_drv_map[sc->map_id & 1], 283 sc->raidmap_mem[sc->map_id & 1], 284 sc->current_map_sz); 285 /* 286 * New Raid map will not set totalSize, so keep expected 287 * value for legacy code in ValidateMapInfo 288 */ 289 pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); 290 } else { 291 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 292 pFwRaidMap = &fw_map_old->raidMap; 293 294 #if VD_EXT_DEBUG 295 for (i = 0; i < pFwRaidMap->ldCount; i++) { 296 device_printf(sc->mrsas_dev, 297 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 298 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 299 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 300 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 301 } 302 #endif 303 304 memset(drv_map, 0, sc->drv_map_sz); 305 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 306 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 307 pDrvRaidMap->fpPdIoTimeoutSec = 308 pFwRaidMap->fpPdIoTimeoutSec; 309 310 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 311 pDrvRaidMap->ldTgtIdToLd[i] = 312 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 313 } 314 315 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 316 pDrvRaidMap->ldSpanMap[i] = 317 pFwRaidMap->ldSpanMap[i]; 318 319 #if VD_EXT_DEBUG 320 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 321 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 322 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 323 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 324 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 325 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 326 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 327 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 328 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 329 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 330 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 331 drv_map, pDrvRaidMap, 332 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 333 #endif 334 } 335 336 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 337 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 338 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 339 sizeof(MR_DEV_HANDLE_INFO) * 340 MAX_RAIDMAP_PHYSICAL_DEVICES); 341 } 342 } 343 344 /* 345 * MR_ValidateMapInfo: Validate RAID map 346 * input: Adapter instance soft state 347 * 348 * This function checks and validates the loaded RAID map. It returns 0 if 349 * successful, and 1 otherwise. 350 */ 351 u_int8_t 352 MR_ValidateMapInfo(struct mrsas_softc *sc) 353 { 354 if (!sc) { 355 return 1; 356 } 357 MR_PopulateDrvRaidMap(sc); 358 359 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 360 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 361 362 u_int32_t expected_map_size; 363 364 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 365 pDrvRaidMap = &drv_map->raidMap; 366 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 367 368 if (sc->max256vdSupport) 369 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 370 else 371 expected_map_size = 372 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 373 (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); 374 375 if (pDrvRaidMap->totalSize != expected_map_size) { 376 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 377 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 378 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); 379 return 1; 380 } 381 if (sc->UnevenSpanSupport) { 382 mr_update_span_set(drv_map, ldSpanInfo); 383 } 384 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 385 386 return 0; 387 } 388 389 /* 390 * 391 * Function to print info about span set created in driver from FW raid map 392 * 393 * Inputs: map 394 * ldSpanInfo: ld map span info per HBA instance 395 * 396 * 397 */ 398 #if SPAN_DEBUG 399 static int 400 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 401 { 402 403 u_int8_t span; 404 u_int32_t element; 405 MR_LD_RAID *raid; 406 LD_SPAN_SET *span_set; 407 MR_QUAD_ELEMENT *quad; 408 int ldCount; 409 u_int16_t ld; 410 411 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 412 ld = MR_TargetIdToLdGet(ldCount, map); 413 if (ld >= MAX_LOGICAL_DRIVES) { 414 continue; 415 } 416 raid = MR_LdRaidGet(ld, map); 417 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 418 for (span = 0; span < raid->spanDepth; span++) 419 printf("Span=%x, number of quads=%x\n", span, 420 map->raidMap.ldSpanMap[ld].spanBlock[span]. 421 block_span_info.noElements); 422 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 423 span_set = &(ldSpanInfo[ld].span_set[element]); 424 if (span_set->span_row_data_width == 0) 425 break; 426 427 printf("Span Set %x: width=%x, diff=%x\n", element, 428 (unsigned int)span_set->span_row_data_width, 429 (unsigned int)span_set->diff); 430 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 431 (long unsigned int)span_set->log_start_lba, 432 (long unsigned int)span_set->log_end_lba); 433 printf("span row start=0x%08lx, end=0x%08lx\n", 434 (long unsigned int)span_set->span_row_start, 435 (long unsigned int)span_set->span_row_end); 436 printf("data row start=0x%08lx, end=0x%08lx\n", 437 (long unsigned int)span_set->data_row_start, 438 (long unsigned int)span_set->data_row_end); 439 printf("data strip start=0x%08lx, end=0x%08lx\n", 440 (long unsigned int)span_set->data_strip_start, 441 (long unsigned int)span_set->data_strip_end); 442 443 for (span = 0; span < raid->spanDepth; span++) { 444 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 445 block_span_info.noElements >= element + 1) { 446 quad = &map->raidMap.ldSpanMap[ld]. 447 spanBlock[span].block_span_info. 448 quad[element]; 449 printf("Span=%x, Quad=%x, diff=%x\n", span, 450 element, quad->diff); 451 printf("offset_in_span=0x%08lx\n", 452 (long unsigned int)quad->offsetInSpan); 453 printf("logical start=0x%08lx, end=0x%08lx\n", 454 (long unsigned int)quad->logStart, 455 (long unsigned int)quad->logEnd); 456 } 457 } 458 } 459 } 460 return 0; 461 } 462 463 #endif 464 /* 465 * 466 * This routine calculates the Span block for given row using spanset. 467 * 468 * Inputs : HBA instance 469 * ld: Logical drive number 470 * row: Row number 471 * map: LD map 472 * 473 * Outputs : span - Span number block 474 * - Absolute Block number in the physical disk 475 * div_error - Devide error code. 476 */ 477 478 u_int32_t 479 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 480 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 481 { 482 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 483 LD_SPAN_SET *span_set; 484 MR_QUAD_ELEMENT *quad; 485 u_int32_t span, info; 486 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 487 488 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 489 span_set = &(ldSpanInfo[ld].span_set[info]); 490 491 if (span_set->span_row_data_width == 0) 492 break; 493 if (row > span_set->data_row_end) 494 continue; 495 496 for (span = 0; span < raid->spanDepth; span++) 497 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 498 block_span_info.noElements >= info + 1) { 499 quad = &map->raidMap.ldSpanMap[ld]. 500 spanBlock[span]. 501 block_span_info.quad[info]; 502 if (quad->diff == 0) { 503 *div_error = 1; 504 return span; 505 } 506 if (quad->logStart <= row && 507 row <= quad->logEnd && 508 (mega_mod64(row - quad->logStart, 509 quad->diff)) == 0) { 510 if (span_blk != NULL) { 511 u_int64_t blk; 512 513 blk = mega_div64_32 514 ((row - quad->logStart), 515 quad->diff); 516 blk = (blk + quad->offsetInSpan) 517 << raid->stripeShift; 518 *span_blk = blk; 519 } 520 return span; 521 } 522 } 523 } 524 return SPAN_INVALID; 525 } 526 527 /* 528 * 529 * This routine calculates the row for given strip using spanset. 530 * 531 * Inputs : HBA instance 532 * ld: Logical drive number 533 * Strip: Strip 534 * map: LD map 535 * 536 * Outputs : row - row associated with strip 537 */ 538 539 static u_int64_t 540 get_row_from_strip(struct mrsas_softc *sc, 541 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 542 { 543 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 544 LD_SPAN_SET *span_set; 545 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 546 u_int32_t info, strip_offset, span, span_offset; 547 u_int64_t span_set_Strip, span_set_Row; 548 549 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 550 span_set = &(ldSpanInfo[ld].span_set[info]); 551 552 if (span_set->span_row_data_width == 0) 553 break; 554 if (strip > span_set->data_strip_end) 555 continue; 556 557 span_set_Strip = strip - span_set->data_strip_start; 558 strip_offset = mega_mod64(span_set_Strip, 559 span_set->span_row_data_width); 560 span_set_Row = mega_div64_32(span_set_Strip, 561 span_set->span_row_data_width) * span_set->diff; 562 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 563 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 564 block_span_info.noElements >= info + 1) { 565 if (strip_offset >= 566 span_set->strip_offset[span]) 567 span_offset++; 568 else 569 break; 570 } 571 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 572 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 573 (unsigned long long)span_set_Strip, 574 (unsigned long long)span_set_Row, 575 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 576 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 577 (unsigned long long)span_set->data_row_start + 578 (unsigned long long)span_set_Row + (span_offset - 1)); 579 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 580 } 581 return -1LLU; 582 } 583 584 585 /* 586 * 587 * This routine calculates the Start Strip for given row using spanset. 588 * 589 * Inputs: HBA instance 590 * ld: Logical drive number 591 * row: Row number 592 * map: LD map 593 * 594 * Outputs : Strip - Start strip associated with row 595 */ 596 597 static u_int64_t 598 get_strip_from_row(struct mrsas_softc *sc, 599 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 600 { 601 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 602 LD_SPAN_SET *span_set; 603 MR_QUAD_ELEMENT *quad; 604 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 605 u_int32_t span, info; 606 u_int64_t strip; 607 608 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 609 span_set = &(ldSpanInfo[ld].span_set[info]); 610 611 if (span_set->span_row_data_width == 0) 612 break; 613 if (row > span_set->data_row_end) 614 continue; 615 616 for (span = 0; span < raid->spanDepth; span++) 617 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 618 block_span_info.noElements >= info + 1) { 619 quad = &map->raidMap.ldSpanMap[ld]. 620 spanBlock[span].block_span_info.quad[info]; 621 if (quad->logStart <= row && 622 row <= quad->logEnd && 623 mega_mod64((row - quad->logStart), 624 quad->diff) == 0) { 625 strip = mega_div64_32 626 (((row - span_set->data_row_start) 627 - quad->logStart), 628 quad->diff); 629 strip *= span_set->span_row_data_width; 630 strip += span_set->data_strip_start; 631 strip += span_set->strip_offset[span]; 632 return strip; 633 } 634 } 635 } 636 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 637 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 638 return -1; 639 } 640 641 /* 642 * ***************************************************************************** 643 * 644 * 645 * This routine calculates the Physical Arm for given strip using spanset. 646 * 647 * Inputs : HBA instance 648 * Logical drive number 649 * Strip 650 * LD map 651 * 652 * Outputs : Phys Arm - Phys Arm associated with strip 653 */ 654 655 static u_int32_t 656 get_arm_from_strip(struct mrsas_softc *sc, 657 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 658 { 659 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 660 LD_SPAN_SET *span_set; 661 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 662 u_int32_t info, strip_offset, span, span_offset; 663 664 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 665 span_set = &(ldSpanInfo[ld].span_set[info]); 666 667 if (span_set->span_row_data_width == 0) 668 break; 669 if (strip > span_set->data_strip_end) 670 continue; 671 672 strip_offset = (u_int32_t)mega_mod64 673 ((strip - span_set->data_strip_start), 674 span_set->span_row_data_width); 675 676 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 677 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 678 block_span_info.noElements >= info + 1) { 679 if (strip_offset >= span_set->strip_offset[span]) 680 span_offset = span_set->strip_offset[span]; 681 else 682 break; 683 } 684 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 685 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 686 (long unsigned int)strip, (strip_offset - span_offset)); 687 return (strip_offset - span_offset); 688 } 689 690 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 691 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 692 693 return -1; 694 } 695 696 697 /* This Function will return Phys arm */ 698 u_int8_t 699 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 700 MR_DRV_RAID_MAP_ALL * map) 701 { 702 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 703 704 /* Need to check correct default value */ 705 u_int32_t arm = 0; 706 707 switch (raid->level) { 708 case 0: 709 case 5: 710 case 6: 711 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 712 break; 713 case 1: 714 /* start with logical arm */ 715 arm = get_arm_from_strip(sc, ld, stripe, map); 716 arm *= 2; 717 break; 718 } 719 720 return arm; 721 } 722 723 /* 724 * 725 * This routine calculates the arm, span and block for the specified stripe and 726 * reference in stripe using spanset 727 * 728 * Inputs : 729 * sc - HBA instance 730 * ld - Logical drive number 731 * stripRow: Stripe number 732 * stripRef: Reference in stripe 733 * 734 * Outputs : span - Span number block - Absolute Block 735 * number in the physical disk 736 */ 737 static u_int8_t 738 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 739 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 740 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 741 { 742 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 743 u_int32_t pd, arRef; 744 u_int8_t physArm, span; 745 u_int64_t row; 746 u_int8_t retval = TRUE; 747 u_int64_t *pdBlock = &io_info->pdBlock; 748 u_int16_t *pDevHandle = &io_info->devHandle; 749 u_int32_t logArm, rowMod, armQ, arm; 750 751 /* Get row and span from io_info for Uneven Span IO. */ 752 row = io_info->start_row; 753 span = io_info->start_span; 754 755 756 if (raid->level == 6) { 757 logArm = get_arm_from_strip(sc, ld, stripRow, map); 758 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 759 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 760 arm = armQ + 1 + logArm; 761 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 762 arm -= SPAN_ROW_SIZE(map, ld, span); 763 physArm = (u_int8_t)arm; 764 } else 765 /* Calculate the arm */ 766 physArm = get_arm(sc, ld, span, stripRow, map); 767 768 769 arRef = MR_LdSpanArrayGet(ld, span, map); 770 pd = MR_ArPdGet(arRef, physArm, map); 771 772 if (pd != MR_PD_INVALID) 773 *pDevHandle = MR_PdDevHandleGet(pd, map); 774 else { 775 *pDevHandle = MR_PD_INVALID; 776 if ((raid->level >= 5) && ((!sc->mrsas_gen3_ctrl) || (sc->mrsas_gen3_ctrl && 777 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 778 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 779 else if (raid->level == 1) { 780 pd = MR_ArPdGet(arRef, physArm + 1, map); 781 if (pd != MR_PD_INVALID) 782 *pDevHandle = MR_PdDevHandleGet(pd, map); 783 } 784 } 785 786 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 787 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 788 io_info->span_arm = pRAID_Context->spanArm; 789 return retval; 790 } 791 792 /* 793 * MR_BuildRaidContext: Set up Fast path RAID context 794 * 795 * This function will initiate command processing. The start/end row and strip 796 * information is calculated then the lock is acquired. This function will 797 * return 0 if region lock was acquired OR return num strips. 798 */ 799 u_int8_t 800 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 801 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 802 { 803 MR_LD_RAID *raid; 804 u_int32_t ld, stripSize, stripe_mask; 805 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 806 REGION_KEY regStart; 807 REGION_LEN regSize; 808 u_int8_t num_strips, numRows; 809 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 810 u_int64_t ldStartBlock; 811 u_int32_t numBlocks, ldTgtId; 812 u_int8_t isRead, stripIdx; 813 u_int8_t retval = 0; 814 u_int8_t startlba_span = SPAN_INVALID; 815 u_int64_t *pdBlock = &io_info->pdBlock; 816 int error_code = 0; 817 818 ldStartBlock = io_info->ldStartBlock; 819 numBlocks = io_info->numBlocks; 820 ldTgtId = io_info->ldTgtId; 821 isRead = io_info->isRead; 822 823 io_info->IoforUnevenSpan = 0; 824 io_info->start_span = SPAN_INVALID; 825 826 ld = MR_TargetIdToLdGet(ldTgtId, map); 827 raid = MR_LdRaidGet(ld, map); 828 829 if (raid->rowDataSize == 0) { 830 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 831 return FALSE; 832 else if (sc->UnevenSpanSupport) { 833 io_info->IoforUnevenSpan = 1; 834 } else { 835 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 836 " but there is _NO_ UnevenSpanSupport\n", 837 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 838 return FALSE; 839 } 840 } 841 stripSize = 1 << raid->stripeShift; 842 stripe_mask = stripSize - 1; 843 /* 844 * calculate starting row and stripe, and number of strips and rows 845 */ 846 start_strip = ldStartBlock >> raid->stripeShift; 847 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 848 endLba = ldStartBlock + numBlocks - 1; 849 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 850 endStrip = endLba >> raid->stripeShift; 851 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 852 if (io_info->IoforUnevenSpan) { 853 start_row = get_row_from_strip(sc, ld, start_strip, map); 854 endRow = get_row_from_strip(sc, ld, endStrip, map); 855 if (raid->spanDepth == 1) { 856 startlba_span = 0; 857 *pdBlock = start_row << raid->stripeShift; 858 } else { 859 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 860 pdBlock, map, &error_code); 861 if (error_code == 1) { 862 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 863 __func__, __LINE__); 864 return FALSE; 865 } 866 } 867 if (startlba_span == SPAN_INVALID) { 868 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 869 "start strip %llx endSrip %llx\n", __func__, 870 __LINE__, (unsigned long long)start_row, 871 (unsigned long long)start_strip, 872 (unsigned long long)endStrip); 873 return FALSE; 874 } 875 io_info->start_span = startlba_span; 876 io_info->start_row = start_row; 877 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 878 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 879 __func__, __LINE__, (unsigned long long)start_row, 880 (unsigned long long)start_strip, 881 (unsigned long long)endStrip, startlba_span); 882 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 883 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 884 } else { 885 start_row = mega_div64_32(start_strip, raid->rowDataSize); 886 endRow = mega_div64_32(endStrip, raid->rowDataSize); 887 } 888 889 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 890 891 /* 892 * Calculate region info. (Assume region at start of first row, and 893 * assume this IO needs the full row - will adjust if not true.) 894 */ 895 regStart = start_row << raid->stripeShift; 896 regSize = stripSize; 897 898 /* Check if we can send this I/O via FastPath */ 899 if (raid->capability.fpCapable) { 900 if (isRead) 901 io_info->fpOkForIo = (raid->capability.fpReadCapable && 902 ((num_strips == 1) || 903 raid->capability.fpReadAcrossStripe)); 904 else 905 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 906 ((num_strips == 1) || 907 raid->capability.fpWriteAcrossStripe)); 908 } else 909 io_info->fpOkForIo = FALSE; 910 911 if (numRows == 1) { 912 if (num_strips == 1) { 913 regStart += ref_in_start_stripe; 914 regSize = numBlocks; 915 } 916 } else if (io_info->IoforUnevenSpan == 0) { 917 /* 918 * For Even span region lock optimization. If the start strip 919 * is the last in the start row 920 */ 921 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 922 regStart += ref_in_start_stripe; 923 /* 924 * initialize count to sectors from startRef to end 925 * of strip 926 */ 927 regSize = stripSize - ref_in_start_stripe; 928 } 929 /* add complete rows in the middle of the transfer */ 930 if (numRows > 2) 931 regSize += (numRows - 2) << raid->stripeShift; 932 933 /* if IO ends within first strip of last row */ 934 if (endStrip == endRow * raid->rowDataSize) 935 regSize += ref_in_end_stripe + 1; 936 else 937 regSize += stripSize; 938 } else { 939 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 940 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 941 regStart += ref_in_start_stripe; 942 /* 943 * initialize count to sectors from startRef to end 944 * of strip 945 */ 946 regSize = stripSize - ref_in_start_stripe; 947 } 948 /* add complete rows in the middle of the transfer */ 949 if (numRows > 2) 950 regSize += (numRows - 2) << raid->stripeShift; 951 952 /* if IO ends within first strip of last row */ 953 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 954 regSize += ref_in_end_stripe + 1; 955 else 956 regSize += stripSize; 957 } 958 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 959 if (sc->mrsas_gen3_ctrl) 960 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 961 else 962 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 963 pRAID_Context->VirtualDiskTgtId = raid->targetId; 964 pRAID_Context->regLockRowLBA = regStart; 965 pRAID_Context->regLockLength = regSize; 966 pRAID_Context->configSeqNum = raid->seqNum; 967 968 /* 969 * Get Phy Params only if FP capable, or else leave it to MR firmware 970 * to do the calculation. 971 */ 972 if (io_info->fpOkForIo) { 973 retval = io_info->IoforUnevenSpan ? 974 mr_spanset_get_phy_params(sc, ld, start_strip, 975 ref_in_start_stripe, io_info, pRAID_Context, map) : 976 MR_GetPhyParams(sc, ld, start_strip, 977 ref_in_start_stripe, io_info, pRAID_Context, map); 978 /* If IO on an invalid Pd, then FP is not possible */ 979 if (io_info->devHandle == MR_PD_INVALID) 980 io_info->fpOkForIo = FALSE; 981 return retval; 982 } else if (isRead) { 983 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 984 retval = io_info->IoforUnevenSpan ? 985 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 986 ref_in_start_stripe, io_info, pRAID_Context, map) : 987 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 988 ref_in_start_stripe, io_info, pRAID_Context, map); 989 if (!retval) 990 return TRUE; 991 } 992 } 993 #if SPAN_DEBUG 994 /* Just for testing what arm we get for strip. */ 995 get_arm_from_strip(sc, ld, start_strip, map); 996 #endif 997 return TRUE; 998 } 999 1000 /* 1001 * 1002 * This routine pepare spanset info from Valid Raid map and store it into local 1003 * copy of ldSpanInfo per instance data structure. 1004 * 1005 * Inputs : LD map 1006 * ldSpanInfo per HBA instance 1007 * 1008 */ 1009 void 1010 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1011 { 1012 u_int8_t span, count; 1013 u_int32_t element, span_row_width; 1014 u_int64_t span_row; 1015 MR_LD_RAID *raid; 1016 LD_SPAN_SET *span_set, *span_set_prev; 1017 MR_QUAD_ELEMENT *quad; 1018 int ldCount; 1019 u_int16_t ld; 1020 1021 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1022 ld = MR_TargetIdToLdGet(ldCount, map); 1023 if (ld >= MAX_LOGICAL_DRIVES) 1024 continue; 1025 raid = MR_LdRaidGet(ld, map); 1026 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1027 for (span = 0; span < raid->spanDepth; span++) { 1028 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1029 block_span_info.noElements < element + 1) 1030 continue; 1031 /* TO-DO */ 1032 span_set = &(ldSpanInfo[ld].span_set[element]); 1033 quad = &map->raidMap.ldSpanMap[ld]. 1034 spanBlock[span].block_span_info.quad[element]; 1035 1036 span_set->diff = quad->diff; 1037 1038 for (count = 0, span_row_width = 0; 1039 count < raid->spanDepth; count++) { 1040 if (map->raidMap.ldSpanMap[ld].spanBlock[count]. 1041 block_span_info.noElements >= element + 1) { 1042 span_set->strip_offset[count] = span_row_width; 1043 span_row_width += 1044 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1045 #if SPAN_DEBUG 1046 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1047 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1048 #endif 1049 } 1050 } 1051 1052 span_set->span_row_data_width = span_row_width; 1053 span_row = mega_div64_32(((quad->logEnd - 1054 quad->logStart) + quad->diff), quad->diff); 1055 1056 if (element == 0) { 1057 span_set->log_start_lba = 0; 1058 span_set->log_end_lba = 1059 ((span_row << raid->stripeShift) * span_row_width) - 1; 1060 1061 span_set->span_row_start = 0; 1062 span_set->span_row_end = span_row - 1; 1063 1064 span_set->data_strip_start = 0; 1065 span_set->data_strip_end = (span_row * span_row_width) - 1; 1066 1067 span_set->data_row_start = 0; 1068 span_set->data_row_end = (span_row * quad->diff) - 1; 1069 } else { 1070 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1071 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1072 span_set->log_end_lba = span_set->log_start_lba + 1073 ((span_row << raid->stripeShift) * span_row_width) - 1; 1074 1075 span_set->span_row_start = span_set_prev->span_row_end + 1; 1076 span_set->span_row_end = 1077 span_set->span_row_start + span_row - 1; 1078 1079 span_set->data_strip_start = 1080 span_set_prev->data_strip_end + 1; 1081 span_set->data_strip_end = span_set->data_strip_start + 1082 (span_row * span_row_width) - 1; 1083 1084 span_set->data_row_start = span_set_prev->data_row_end + 1; 1085 span_set->data_row_end = span_set->data_row_start + 1086 (span_row * quad->diff) - 1; 1087 } 1088 break; 1089 } 1090 if (span == raid->spanDepth) 1091 break; /* no quads remain */ 1092 } 1093 } 1094 #if SPAN_DEBUG 1095 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1096 #endif 1097 } 1098 1099 /* 1100 * mrsas_update_load_balance_params: Update load balance parmas 1101 * Inputs: 1102 * sc - driver softc instance 1103 * drv_map - driver RAID map 1104 * lbInfo - Load balance info 1105 * 1106 * This function updates the load balance parameters for the LD config of a two 1107 * drive optimal RAID-1. 1108 */ 1109 void 1110 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1111 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1112 { 1113 int ldCount; 1114 u_int16_t ld; 1115 MR_LD_RAID *raid; 1116 1117 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1118 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1119 1120 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1121 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1122 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1123 lbInfo[ldCount].loadBalanceFlag = 0; 1124 continue; 1125 } 1126 raid = MR_LdRaidGet(ld, drv_map); 1127 if ((raid->level != 1) || 1128 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1129 lbInfo[ldCount].loadBalanceFlag = 0; 1130 continue; 1131 } 1132 lbInfo[ldCount].loadBalanceFlag = 1; 1133 } 1134 } 1135 1136 1137 /* 1138 * mrsas_set_pd_lba: Sets PD LBA 1139 * input: io_request pointer 1140 * CDB length 1141 * io_info pointer 1142 * Pointer to CCB 1143 * Local RAID map pointer 1144 * Start block of IO Block Size 1145 * 1146 * Used to set the PD logical block address in CDB for FP IOs. 1147 */ 1148 void 1149 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1150 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1151 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1152 u_int32_t ld_block_size) 1153 { 1154 MR_LD_RAID *raid; 1155 u_int32_t ld; 1156 u_int64_t start_blk = io_info->pdBlock; 1157 u_int8_t *cdb = io_request->CDB.CDB32; 1158 u_int32_t num_blocks = io_info->numBlocks; 1159 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1160 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1161 1162 /* Check if T10 PI (DIF) is enabled for this LD */ 1163 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1164 raid = MR_LdRaidGet(ld, local_map_ptr); 1165 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1166 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1167 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1168 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1169 1170 if (ccb_h->flags == CAM_DIR_OUT) 1171 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1172 else 1173 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1174 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1175 1176 /* LBA */ 1177 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1178 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1179 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1180 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1181 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1182 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1183 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1184 cdb[19] = (u_int8_t)(start_blk & 0xff); 1185 1186 /* Logical block reference tag */ 1187 io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); 1188 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1189 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1190 1191 /* Transfer length */ 1192 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1193 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1194 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1195 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1196 1197 /* set SCSI IO EEDP Flags */ 1198 if (ccb_h->flags == CAM_DIR_OUT) { 1199 io_request->EEDPFlags = 1200 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1201 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1202 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1203 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1204 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1205 } else { 1206 io_request->EEDPFlags = 1207 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1208 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1209 } 1210 io_request->Control |= (0x4 << 26); 1211 io_request->EEDPBlockSize = ld_block_size; 1212 } else { 1213 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1214 if (((cdb_len == 12) || (cdb_len == 16)) && 1215 (start_blk <= 0xffffffff)) { 1216 if (cdb_len == 16) { 1217 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1218 flagvals = cdb[1]; 1219 groupnum = cdb[14]; 1220 control = cdb[15]; 1221 } else { 1222 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1223 flagvals = cdb[1]; 1224 groupnum = cdb[10]; 1225 control = cdb[11]; 1226 } 1227 1228 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1229 1230 cdb[0] = opcode; 1231 cdb[1] = flagvals; 1232 cdb[6] = groupnum; 1233 cdb[9] = control; 1234 1235 /* Transfer length */ 1236 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1237 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1238 1239 io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1240 cdb_len = 10; 1241 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1242 /* Convert to 16 byte CDB for large LBA's */ 1243 switch (cdb_len) { 1244 case 6: 1245 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1246 control = cdb[5]; 1247 break; 1248 case 10: 1249 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1250 flagvals = cdb[1]; 1251 groupnum = cdb[6]; 1252 control = cdb[9]; 1253 break; 1254 case 12: 1255 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1256 flagvals = cdb[1]; 1257 groupnum = cdb[10]; 1258 control = cdb[11]; 1259 break; 1260 } 1261 1262 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1263 1264 cdb[0] = opcode; 1265 cdb[1] = flagvals; 1266 cdb[14] = groupnum; 1267 cdb[15] = control; 1268 1269 /* Transfer length */ 1270 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1271 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1272 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1273 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1274 1275 io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1276 cdb_len = 16; 1277 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1278 /* convert to 10 byte CDB */ 1279 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1280 control = cdb[5]; 1281 1282 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1283 cdb[0] = opcode; 1284 cdb[9] = control; 1285 1286 /* Set transfer length */ 1287 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1288 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1289 1290 /* Specify 10-byte cdb */ 1291 cdb_len = 10; 1292 } 1293 /* Fall through normal case, just load LBA here */ 1294 u_int8_t val = cdb[1] & 0xE0; 1295 1296 switch (cdb_len) { 1297 case 6: 1298 cdb[3] = (u_int8_t)(start_blk & 0xff); 1299 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1300 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1301 break; 1302 case 10: 1303 cdb[5] = (u_int8_t)(start_blk & 0xff); 1304 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1305 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1306 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1307 break; 1308 case 16: 1309 cdb[9] = (u_int8_t)(start_blk & 0xff); 1310 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1311 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1312 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1313 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1314 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1315 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1316 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1317 break; 1318 } 1319 } 1320 } 1321 1322 /* 1323 * mrsas_get_best_arm_pd: Determine the best spindle arm 1324 * Inputs: 1325 * sc - HBA instance 1326 * lbInfo - Load balance info 1327 * io_info - IO request info 1328 * 1329 * This function determines and returns the best arm by looking at the 1330 * parameters of the last PD access. 1331 */ 1332 u_int8_t 1333 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1334 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1335 { 1336 MR_LD_RAID *raid; 1337 MR_DRV_RAID_MAP_ALL *drv_map; 1338 u_int16_t pend0, pend1, ld; 1339 u_int64_t diff0, diff1; 1340 u_int8_t bestArm, pd0, pd1, span, arm; 1341 u_int32_t arRef, span_row_size; 1342 1343 u_int64_t block = io_info->ldStartBlock; 1344 u_int32_t count = io_info->numBlocks; 1345 1346 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1347 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1348 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1349 1350 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1351 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1352 raid = MR_LdRaidGet(ld, drv_map); 1353 span_row_size = sc->UnevenSpanSupport ? 1354 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1355 1356 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1357 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1358 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1359 (arm + 1 - span_row_size) : arm + 1, drv_map); 1360 1361 /* get the pending cmds for the data and mirror arms */ 1362 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1363 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1364 1365 /* Determine the disk whose head is nearer to the req. block */ 1366 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1367 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1368 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1369 1370 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1371 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1372 bestArm ^= 1; 1373 1374 /* Update the last accessed block on the correct pd */ 1375 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1376 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1377 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1378 #if SPAN_DEBUG 1379 if (arm != bestArm) 1380 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1381 "io_info->span_arm 0x%x\n", 1382 span, arm, bestArm, io_info->span_arm); 1383 #endif 1384 1385 return io_info->pd_after_lb; 1386 } 1387 1388 /* 1389 * mrsas_get_updated_dev_handle: Get the update dev handle 1390 * Inputs: 1391 * sc - Adapter instance soft state 1392 * lbInfo - Load balance info 1393 * io_info - io_info pointer 1394 * 1395 * This function determines and returns the updated dev handle. 1396 */ 1397 u_int16_t 1398 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1399 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1400 { 1401 u_int8_t arm_pd; 1402 u_int16_t devHandle; 1403 MR_DRV_RAID_MAP_ALL *drv_map; 1404 1405 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1406 1407 /* get best new arm */ 1408 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1409 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1410 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1411 1412 return devHandle; 1413 } 1414 1415 /* 1416 * MR_GetPhyParams: Calculates arm, span, and block 1417 * Inputs: Adapter soft state 1418 * Logical drive number (LD) 1419 * Stripe number(stripRow) 1420 * Reference in stripe (stripRef) 1421 * 1422 * Outputs: Absolute Block number in the physical disk 1423 * 1424 * This routine calculates the arm, span and block for the specified stripe and 1425 * reference in stripe. 1426 */ 1427 u_int8_t 1428 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1429 u_int64_t stripRow, 1430 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1431 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1432 { 1433 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1434 u_int32_t pd, arRef; 1435 u_int8_t physArm, span; 1436 u_int64_t row; 1437 u_int8_t retval = TRUE; 1438 int error_code = 0; 1439 u_int64_t *pdBlock = &io_info->pdBlock; 1440 u_int16_t *pDevHandle = &io_info->devHandle; 1441 u_int32_t rowMod, armQ, arm, logArm; 1442 1443 row = mega_div64_32(stripRow, raid->rowDataSize); 1444 1445 if (raid->level == 6) { 1446 /* logical arm within row */ 1447 logArm = mega_mod64(stripRow, raid->rowDataSize); 1448 if (raid->rowSize == 0) 1449 return FALSE; 1450 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1451 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1452 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1453 if (arm >= raid->rowSize) /* handle wrap condition */ 1454 arm -= raid->rowSize; 1455 physArm = (u_int8_t)arm; 1456 } else { 1457 if (raid->modFactor == 0) 1458 return FALSE; 1459 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1460 } 1461 1462 if (raid->spanDepth == 1) { 1463 span = 0; 1464 *pdBlock = row << raid->stripeShift; 1465 } else { 1466 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1467 if (error_code == 1) 1468 return FALSE; 1469 } 1470 1471 /* Get the array on which this span is present */ 1472 arRef = MR_LdSpanArrayGet(ld, span, map); 1473 1474 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1475 1476 if (pd != MR_PD_INVALID) 1477 /* Get dev handle from Pd */ 1478 *pDevHandle = MR_PdDevHandleGet(pd, map); 1479 else { 1480 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 1481 if ((raid->level >= 5) && ((!sc->mrsas_gen3_ctrl) || (sc->mrsas_gen3_ctrl && 1482 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1483 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1484 else if (raid->level == 1) { 1485 /* Get Alternate Pd. */ 1486 pd = MR_ArPdGet(arRef, physArm + 1, map); 1487 if (pd != MR_PD_INVALID) 1488 /* Get dev handle from Pd. */ 1489 *pDevHandle = MR_PdDevHandleGet(pd, map); 1490 } 1491 } 1492 1493 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 1494 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1495 io_info->span_arm = pRAID_Context->spanArm; 1496 return retval; 1497 } 1498 1499 /* 1500 * MR_GetSpanBlock: Calculates span block 1501 * Inputs: LD 1502 * row PD 1503 * span block 1504 * RAID map pointer 1505 * 1506 * Outputs: Span number Error code 1507 * 1508 * This routine calculates the span from the span block info. 1509 */ 1510 u_int32_t 1511 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1512 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1513 { 1514 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1515 MR_QUAD_ELEMENT *quad; 1516 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1517 u_int32_t span, j; 1518 u_int64_t blk, debugBlk; 1519 1520 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1521 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1522 quad = &pSpanBlock->block_span_info.quad[j]; 1523 if (quad->diff == 0) { 1524 *div_error = 1; 1525 return span; 1526 } 1527 if (quad->logStart <= row && row <= quad->logEnd && 1528 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1529 if (span_blk != NULL) { 1530 blk = mega_div64_32((row - quad->logStart), quad->diff); 1531 debugBlk = blk; 1532 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1533 *span_blk = blk; 1534 } 1535 return span; 1536 } 1537 } 1538 } 1539 return span; 1540 } 1541