1 /* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES, 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <dev/mrsas/mrsas.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_sim.h> 48 #include <cam/cam_xpt_sim.h> 49 #include <cam/cam_debug.h> 50 #include <cam/cam_periph.h> 51 #include <cam/cam_xpt_periph.h> 52 53 54 /* 55 * Function prototypes 56 */ 57 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 58 u_int8_t 59 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 60 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 61 u_int8_t 62 MR_BuildRaidContext(struct mrsas_softc *sc, 63 struct IO_REQUEST_INFO *io_info, 64 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 65 u_int8_t 66 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 67 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 68 RAID_CONTEXT * pRAID_Context, 69 MR_DRV_RAID_MAP_ALL * map); 70 u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 71 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 72 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 73 u_int16_t 74 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 75 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 76 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 77 u_int32_t 78 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 79 MR_DRV_RAID_MAP_ALL * map, int *div_error); 80 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 81 void 82 mrsas_update_load_balance_params(struct mrsas_softc *sc, 83 MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); 84 void 85 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 86 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 87 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 88 u_int32_t ld_block_size); 89 static u_int16_t 90 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 91 MR_DRV_RAID_MAP_ALL * map); 92 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 93 static u_int16_t 94 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 95 MR_DRV_RAID_MAP_ALL * map); 96 static MR_LD_SPAN * 97 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 98 MR_DRV_RAID_MAP_ALL * map); 99 static u_int8_t 100 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 101 MR_DRV_RAID_MAP_ALL * map); 102 static MR_SPAN_BLOCK_INFO * 103 MR_LdSpanInfoGet(u_int32_t ld, 104 MR_DRV_RAID_MAP_ALL * map); 105 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 106 void MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 107 108 109 /* 110 * Spanset related function prototypes Added for PRL11 configuration (Uneven 111 * span support) 112 */ 113 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 114 static u_int8_t 115 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 116 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 117 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 118 static u_int64_t 119 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 120 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 121 static u_int32_t 122 mr_spanset_get_span_block(struct mrsas_softc *sc, 123 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 124 MR_DRV_RAID_MAP_ALL * map, int *div_error); 125 static u_int8_t 126 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 127 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 128 129 130 /* 131 * Spanset related defines Added for PRL11 configuration(Uneven span support) 132 */ 133 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 134 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 135 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 136 #define SPAN_INVALID 0xff 137 #define SPAN_DEBUG 0 138 139 /* 140 * Related Defines 141 */ 142 143 typedef u_int64_t REGION_KEY; 144 typedef u_int32_t REGION_LEN; 145 146 #define MR_LD_STATE_OPTIMAL 3 147 #define FALSE 0 148 #define TRUE 1 149 150 #define LB_PENDING_CMDS_DEFAULT 4 151 152 153 /* 154 * Related Macros 155 */ 156 157 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 158 159 #define swap32(x) \ 160 ((unsigned int)( \ 161 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 162 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 163 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 164 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 165 166 167 /* 168 * In-line functions for mod and divide of 64-bit dividend and 32-bit 169 * divisor. Assumes a check for a divisor of zero is not possible. 170 * 171 * @param dividend: Dividend 172 * @param divisor: Divisor 173 * @return remainder 174 */ 175 176 #define mega_mod64(dividend, divisor) ({ \ 177 int remainder; \ 178 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 179 remainder;}) 180 181 #define mega_div64_32(dividend, divisor) ({ \ 182 int quotient; \ 183 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 184 quotient;}) 185 186 187 /* 188 * Various RAID map access functions. These functions access the various 189 * parts of the RAID map and returns the appropriate parameters. 190 */ 191 192 MR_LD_RAID * 193 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 194 { 195 return (&map->raidMap.ldSpanMap[ld].ldRaid); 196 } 197 198 u_int16_t 199 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 200 { 201 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 202 } 203 204 static u_int16_t 205 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 206 { 207 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 208 } 209 210 static u_int8_t 211 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 212 { 213 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 214 } 215 216 static u_int16_t 217 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 218 { 219 return map->raidMap.devHndlInfo[pd].curDevHdl; 220 } 221 222 static u_int16_t 223 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 224 { 225 return map->raidMap.arMapInfo[ar].pd[arm]; 226 } 227 228 static MR_LD_SPAN * 229 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 230 { 231 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 232 } 233 234 static MR_SPAN_BLOCK_INFO * 235 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 236 { 237 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 238 } 239 240 u_int16_t 241 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 242 { 243 return map->raidMap.ldTgtIdToLd[ldTgtId]; 244 } 245 246 u_int32_t 247 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 248 { 249 MR_LD_RAID *raid; 250 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 251 252 ld = MR_TargetIdToLdGet(ldTgtId, map); 253 254 /* 255 * Check if logical drive was removed. 256 */ 257 if (ld >= MAX_LOGICAL_DRIVES) 258 return ldBlockSize; 259 260 raid = MR_LdRaidGet(ld, map); 261 ldBlockSize = raid->logicalBlockLength; 262 if (!ldBlockSize) 263 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 264 265 return ldBlockSize; 266 } 267 268 /* 269 * This function will Populate Driver Map using firmware raid map 270 */ 271 void 272 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 273 { 274 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 275 MR_FW_RAID_MAP *pFwRaidMap = NULL; 276 unsigned int i; 277 278 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 279 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 280 281 if (sc->max256vdSupport) { 282 memcpy(sc->ld_drv_map[sc->map_id & 1], 283 sc->raidmap_mem[sc->map_id & 1], 284 sc->current_map_sz); 285 /* 286 * New Raid map will not set totalSize, so keep expected 287 * value for legacy code in ValidateMapInfo 288 */ 289 pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); 290 } else { 291 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 292 pFwRaidMap = &fw_map_old->raidMap; 293 294 #if VD_EXT_DEBUG 295 for (i = 0; i < pFwRaidMap->ldCount; i++) { 296 device_printf(sc->mrsas_dev, 297 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 298 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 299 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 300 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 301 } 302 #endif 303 304 memset(drv_map, 0, sc->drv_map_sz); 305 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 306 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 307 pDrvRaidMap->fpPdIoTimeoutSec = 308 pFwRaidMap->fpPdIoTimeoutSec; 309 310 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 311 pDrvRaidMap->ldTgtIdToLd[i] = 312 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 313 } 314 315 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 316 pDrvRaidMap->ldSpanMap[i] = 317 pFwRaidMap->ldSpanMap[i]; 318 319 #if VD_EXT_DEBUG 320 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 321 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 322 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 323 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 324 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 325 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 326 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 327 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 328 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 329 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 330 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 331 drv_map, pDrvRaidMap, 332 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 333 #endif 334 } 335 336 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 337 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 338 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 339 sizeof(MR_DEV_HANDLE_INFO) * 340 MAX_RAIDMAP_PHYSICAL_DEVICES); 341 } 342 } 343 344 /* 345 * MR_ValidateMapInfo: Validate RAID map 346 * input: Adapter instance soft state 347 * 348 * This function checks and validates the loaded RAID map. It returns 0 if 349 * successful, and 1 otherwise. 350 */ 351 u_int8_t 352 MR_ValidateMapInfo(struct mrsas_softc *sc) 353 { 354 if (!sc) { 355 return 1; 356 } 357 MR_PopulateDrvRaidMap(sc); 358 359 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 360 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 361 362 u_int32_t expected_map_size; 363 364 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 365 pDrvRaidMap = &drv_map->raidMap; 366 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 367 368 if (sc->max256vdSupport) 369 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 370 else 371 expected_map_size = 372 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 373 (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); 374 375 if (pDrvRaidMap->totalSize != expected_map_size) { 376 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 377 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 378 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); 379 return 1; 380 } 381 if (sc->UnevenSpanSupport) { 382 mr_update_span_set(drv_map, ldSpanInfo); 383 } 384 mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); 385 386 return 0; 387 } 388 389 /* 390 * 391 * Function to print info about span set created in driver from FW raid map 392 * 393 * Inputs: map 394 * ldSpanInfo: ld map span info per HBA instance 395 * 396 * 397 */ 398 #if SPAN_DEBUG 399 static int 400 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 401 { 402 403 u_int8_t span; 404 u_int32_t element; 405 MR_LD_RAID *raid; 406 LD_SPAN_SET *span_set; 407 MR_QUAD_ELEMENT *quad; 408 int ldCount; 409 u_int16_t ld; 410 411 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 412 ld = MR_TargetIdToLdGet(ldCount, map); 413 if (ld >= MAX_LOGICAL_DRIVES) { 414 continue; 415 } 416 raid = MR_LdRaidGet(ld, map); 417 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 418 for (span = 0; span < raid->spanDepth; span++) 419 printf("Span=%x, number of quads=%x\n", span, 420 map->raidMap.ldSpanMap[ld].spanBlock[span]. 421 block_span_info.noElements); 422 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 423 span_set = &(ldSpanInfo[ld].span_set[element]); 424 if (span_set->span_row_data_width == 0) 425 break; 426 427 printf("Span Set %x: width=%x, diff=%x\n", element, 428 (unsigned int)span_set->span_row_data_width, 429 (unsigned int)span_set->diff); 430 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 431 (long unsigned int)span_set->log_start_lba, 432 (long unsigned int)span_set->log_end_lba); 433 printf("span row start=0x%08lx, end=0x%08lx\n", 434 (long unsigned int)span_set->span_row_start, 435 (long unsigned int)span_set->span_row_end); 436 printf("data row start=0x%08lx, end=0x%08lx\n", 437 (long unsigned int)span_set->data_row_start, 438 (long unsigned int)span_set->data_row_end); 439 printf("data strip start=0x%08lx, end=0x%08lx\n", 440 (long unsigned int)span_set->data_strip_start, 441 (long unsigned int)span_set->data_strip_end); 442 443 for (span = 0; span < raid->spanDepth; span++) { 444 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 445 block_span_info.noElements >= element + 1) { 446 quad = &map->raidMap.ldSpanMap[ld]. 447 spanBlock[span].block_span_info. 448 quad[element]; 449 printf("Span=%x, Quad=%x, diff=%x\n", span, 450 element, quad->diff); 451 printf("offset_in_span=0x%08lx\n", 452 (long unsigned int)quad->offsetInSpan); 453 printf("logical start=0x%08lx, end=0x%08lx\n", 454 (long unsigned int)quad->logStart, 455 (long unsigned int)quad->logEnd); 456 } 457 } 458 } 459 } 460 return 0; 461 } 462 463 #endif 464 /* 465 * 466 * This routine calculates the Span block for given row using spanset. 467 * 468 * Inputs : HBA instance 469 * ld: Logical drive number 470 * row: Row number 471 * map: LD map 472 * 473 * Outputs : span - Span number block 474 * - Absolute Block number in the physical disk 475 * div_error - Devide error code. 476 */ 477 478 u_int32_t 479 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 480 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 481 { 482 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 483 LD_SPAN_SET *span_set; 484 MR_QUAD_ELEMENT *quad; 485 u_int32_t span, info; 486 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 487 488 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 489 span_set = &(ldSpanInfo[ld].span_set[info]); 490 491 if (span_set->span_row_data_width == 0) 492 break; 493 if (row > span_set->data_row_end) 494 continue; 495 496 for (span = 0; span < raid->spanDepth; span++) 497 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 498 block_span_info.noElements >= info + 1) { 499 quad = &map->raidMap.ldSpanMap[ld]. 500 spanBlock[span]. 501 block_span_info.quad[info]; 502 if (quad->diff == 0) { 503 *div_error = 1; 504 return span; 505 } 506 if (quad->logStart <= row && 507 row <= quad->logEnd && 508 (mega_mod64(row - quad->logStart, 509 quad->diff)) == 0) { 510 if (span_blk != NULL) { 511 u_int64_t blk; 512 513 blk = mega_div64_32 514 ((row - quad->logStart), 515 quad->diff); 516 blk = (blk + quad->offsetInSpan) 517 << raid->stripeShift; 518 *span_blk = blk; 519 } 520 return span; 521 } 522 } 523 } 524 return SPAN_INVALID; 525 } 526 527 /* 528 * 529 * This routine calculates the row for given strip using spanset. 530 * 531 * Inputs : HBA instance 532 * ld: Logical drive number 533 * Strip: Strip 534 * map: LD map 535 * 536 * Outputs : row - row associated with strip 537 */ 538 539 static u_int64_t 540 get_row_from_strip(struct mrsas_softc *sc, 541 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 542 { 543 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 544 LD_SPAN_SET *span_set; 545 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 546 u_int32_t info, strip_offset, span, span_offset; 547 u_int64_t span_set_Strip, span_set_Row; 548 549 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 550 span_set = &(ldSpanInfo[ld].span_set[info]); 551 552 if (span_set->span_row_data_width == 0) 553 break; 554 if (strip > span_set->data_strip_end) 555 continue; 556 557 span_set_Strip = strip - span_set->data_strip_start; 558 strip_offset = mega_mod64(span_set_Strip, 559 span_set->span_row_data_width); 560 span_set_Row = mega_div64_32(span_set_Strip, 561 span_set->span_row_data_width) * span_set->diff; 562 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 563 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 564 block_span_info.noElements >= info + 1) { 565 if (strip_offset >= 566 span_set->strip_offset[span]) 567 span_offset++; 568 else 569 break; 570 } 571 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 572 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 573 (unsigned long long)span_set_Strip, 574 (unsigned long long)span_set_Row, 575 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 576 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 577 (unsigned long long)span_set->data_row_start + 578 (unsigned long long)span_set_Row + (span_offset - 1)); 579 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 580 } 581 return -1LLU; 582 } 583 584 585 /* 586 * 587 * This routine calculates the Start Strip for given row using spanset. 588 * 589 * Inputs: HBA instance 590 * ld: Logical drive number 591 * row: Row number 592 * map: LD map 593 * 594 * Outputs : Strip - Start strip associated with row 595 */ 596 597 static u_int64_t 598 get_strip_from_row(struct mrsas_softc *sc, 599 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 600 { 601 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 602 LD_SPAN_SET *span_set; 603 MR_QUAD_ELEMENT *quad; 604 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 605 u_int32_t span, info; 606 u_int64_t strip; 607 608 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 609 span_set = &(ldSpanInfo[ld].span_set[info]); 610 611 if (span_set->span_row_data_width == 0) 612 break; 613 if (row > span_set->data_row_end) 614 continue; 615 616 for (span = 0; span < raid->spanDepth; span++) 617 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 618 block_span_info.noElements >= info + 1) { 619 quad = &map->raidMap.ldSpanMap[ld]. 620 spanBlock[span].block_span_info.quad[info]; 621 if (quad->logStart <= row && 622 row <= quad->logEnd && 623 mega_mod64((row - quad->logStart), 624 quad->diff) == 0) { 625 strip = mega_div64_32 626 (((row - span_set->data_row_start) 627 - quad->logStart), 628 quad->diff); 629 strip *= span_set->span_row_data_width; 630 strip += span_set->data_strip_start; 631 strip += span_set->strip_offset[span]; 632 return strip; 633 } 634 } 635 } 636 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " 637 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 638 return -1; 639 } 640 641 /* 642 * ***************************************************************************** 643 * 644 * 645 * This routine calculates the Physical Arm for given strip using spanset. 646 * 647 * Inputs : HBA instance 648 * Logical drive number 649 * Strip 650 * LD map 651 * 652 * Outputs : Phys Arm - Phys Arm associated with strip 653 */ 654 655 static u_int32_t 656 get_arm_from_strip(struct mrsas_softc *sc, 657 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 658 { 659 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 660 LD_SPAN_SET *span_set; 661 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 662 u_int32_t info, strip_offset, span, span_offset; 663 664 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 665 span_set = &(ldSpanInfo[ld].span_set[info]); 666 667 if (span_set->span_row_data_width == 0) 668 break; 669 if (strip > span_set->data_strip_end) 670 continue; 671 672 strip_offset = (u_int32_t)mega_mod64 673 ((strip - span_set->data_strip_start), 674 span_set->span_row_data_width); 675 676 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 677 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 678 block_span_info.noElements >= info + 1) { 679 if (strip_offset >= span_set->strip_offset[span]) 680 span_offset = span_set->strip_offset[span]; 681 else 682 break; 683 } 684 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " 685 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 686 (long unsigned int)strip, (strip_offset - span_offset)); 687 return (strip_offset - span_offset); 688 } 689 690 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" 691 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 692 693 return -1; 694 } 695 696 697 /* This Function will return Phys arm */ 698 u_int8_t 699 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 700 MR_DRV_RAID_MAP_ALL * map) 701 { 702 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 703 704 /* Need to check correct default value */ 705 u_int32_t arm = 0; 706 707 switch (raid->level) { 708 case 0: 709 case 5: 710 case 6: 711 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 712 break; 713 case 1: 714 /* start with logical arm */ 715 arm = get_arm_from_strip(sc, ld, stripe, map); 716 arm *= 2; 717 break; 718 } 719 720 return arm; 721 } 722 723 /* 724 * 725 * This routine calculates the arm, span and block for the specified stripe and 726 * reference in stripe using spanset 727 * 728 * Inputs : 729 * sc - HBA instance 730 * ld - Logical drive number 731 * stripRow: Stripe number 732 * stripRef: Reference in stripe 733 * 734 * Outputs : span - Span number block - Absolute Block 735 * number in the physical disk 736 */ 737 static u_int8_t 738 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 739 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 740 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 741 { 742 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 743 u_int32_t pd, arRef; 744 u_int8_t physArm, span; 745 u_int64_t row; 746 u_int8_t retval = TRUE; 747 u_int64_t *pdBlock = &io_info->pdBlock; 748 u_int16_t *pDevHandle = &io_info->devHandle; 749 u_int32_t logArm, rowMod, armQ, arm; 750 u_int8_t do_invader = 0; 751 752 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 753 do_invader = 1; 754 755 /* Get row and span from io_info for Uneven Span IO. */ 756 row = io_info->start_row; 757 span = io_info->start_span; 758 759 760 if (raid->level == 6) { 761 logArm = get_arm_from_strip(sc, ld, stripRow, map); 762 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 763 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 764 arm = armQ + 1 + logArm; 765 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 766 arm -= SPAN_ROW_SIZE(map, ld, span); 767 physArm = (u_int8_t)arm; 768 } else 769 /* Calculate the arm */ 770 physArm = get_arm(sc, ld, span, stripRow, map); 771 772 773 arRef = MR_LdSpanArrayGet(ld, span, map); 774 pd = MR_ArPdGet(arRef, physArm, map); 775 776 if (pd != MR_PD_INVALID) 777 *pDevHandle = MR_PdDevHandleGet(pd, map); 778 else { 779 *pDevHandle = MR_PD_INVALID; 780 if ((raid->level >= 5) && ((!do_invader) || (do_invader && 781 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 782 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 783 else if (raid->level == 1) { 784 pd = MR_ArPdGet(arRef, physArm + 1, map); 785 if (pd != MR_PD_INVALID) 786 *pDevHandle = MR_PdDevHandleGet(pd, map); 787 } 788 } 789 790 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 791 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 792 io_info->span_arm = pRAID_Context->spanArm; 793 return retval; 794 } 795 796 /* 797 * MR_BuildRaidContext: Set up Fast path RAID context 798 * 799 * This function will initiate command processing. The start/end row and strip 800 * information is calculated then the lock is acquired. This function will 801 * return 0 if region lock was acquired OR return num strips. 802 */ 803 u_int8_t 804 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 805 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 806 { 807 MR_LD_RAID *raid; 808 u_int32_t ld, stripSize, stripe_mask; 809 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 810 REGION_KEY regStart; 811 REGION_LEN regSize; 812 u_int8_t num_strips, numRows; 813 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 814 u_int64_t ldStartBlock; 815 u_int32_t numBlocks, ldTgtId; 816 u_int8_t isRead, stripIdx; 817 u_int8_t retval = 0; 818 u_int8_t startlba_span = SPAN_INVALID; 819 u_int64_t *pdBlock = &io_info->pdBlock; 820 int error_code = 0; 821 822 ldStartBlock = io_info->ldStartBlock; 823 numBlocks = io_info->numBlocks; 824 ldTgtId = io_info->ldTgtId; 825 isRead = io_info->isRead; 826 827 io_info->IoforUnevenSpan = 0; 828 io_info->start_span = SPAN_INVALID; 829 830 ld = MR_TargetIdToLdGet(ldTgtId, map); 831 raid = MR_LdRaidGet(ld, map); 832 833 if (raid->rowDataSize == 0) { 834 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 835 return FALSE; 836 else if (sc->UnevenSpanSupport) { 837 io_info->IoforUnevenSpan = 1; 838 } else { 839 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 840 " but there is _NO_ UnevenSpanSupport\n", 841 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 842 return FALSE; 843 } 844 } 845 stripSize = 1 << raid->stripeShift; 846 stripe_mask = stripSize - 1; 847 /* 848 * calculate starting row and stripe, and number of strips and rows 849 */ 850 start_strip = ldStartBlock >> raid->stripeShift; 851 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 852 endLba = ldStartBlock + numBlocks - 1; 853 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 854 endStrip = endLba >> raid->stripeShift; 855 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 856 if (io_info->IoforUnevenSpan) { 857 start_row = get_row_from_strip(sc, ld, start_strip, map); 858 endRow = get_row_from_strip(sc, ld, endStrip, map); 859 if (raid->spanDepth == 1) { 860 startlba_span = 0; 861 *pdBlock = start_row << raid->stripeShift; 862 } else { 863 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 864 pdBlock, map, &error_code); 865 if (error_code == 1) { 866 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", 867 __func__, __LINE__); 868 return FALSE; 869 } 870 } 871 if (startlba_span == SPAN_INVALID) { 872 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," 873 "start strip %llx endSrip %llx\n", __func__, 874 __LINE__, (unsigned long long)start_row, 875 (unsigned long long)start_strip, 876 (unsigned long long)endStrip); 877 return FALSE; 878 } 879 io_info->start_span = startlba_span; 880 io_info->start_row = start_row; 881 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " 882 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 883 __func__, __LINE__, (unsigned long long)start_row, 884 (unsigned long long)start_strip, 885 (unsigned long long)endStrip, startlba_span); 886 mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 887 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 888 } else { 889 start_row = mega_div64_32(start_strip, raid->rowDataSize); 890 endRow = mega_div64_32(endStrip, raid->rowDataSize); 891 } 892 893 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 894 895 /* 896 * Calculate region info. (Assume region at start of first row, and 897 * assume this IO needs the full row - will adjust if not true.) 898 */ 899 regStart = start_row << raid->stripeShift; 900 regSize = stripSize; 901 902 /* Check if we can send this I/O via FastPath */ 903 if (raid->capability.fpCapable) { 904 if (isRead) 905 io_info->fpOkForIo = (raid->capability.fpReadCapable && 906 ((num_strips == 1) || 907 raid->capability.fpReadAcrossStripe)); 908 else 909 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 910 ((num_strips == 1) || 911 raid->capability.fpWriteAcrossStripe)); 912 } else 913 io_info->fpOkForIo = FALSE; 914 915 if (numRows == 1) { 916 if (num_strips == 1) { 917 regStart += ref_in_start_stripe; 918 regSize = numBlocks; 919 } 920 } else if (io_info->IoforUnevenSpan == 0) { 921 /* 922 * For Even span region lock optimization. If the start strip 923 * is the last in the start row 924 */ 925 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 926 regStart += ref_in_start_stripe; 927 /* 928 * initialize count to sectors from startRef to end 929 * of strip 930 */ 931 regSize = stripSize - ref_in_start_stripe; 932 } 933 /* add complete rows in the middle of the transfer */ 934 if (numRows > 2) 935 regSize += (numRows - 2) << raid->stripeShift; 936 937 /* if IO ends within first strip of last row */ 938 if (endStrip == endRow * raid->rowDataSize) 939 regSize += ref_in_end_stripe + 1; 940 else 941 regSize += stripSize; 942 } else { 943 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 944 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 945 regStart += ref_in_start_stripe; 946 /* 947 * initialize count to sectors from startRef to end 948 * of strip 949 */ 950 regSize = stripSize - ref_in_start_stripe; 951 } 952 /* add complete rows in the middle of the transfer */ 953 if (numRows > 2) 954 regSize += (numRows - 2) << raid->stripeShift; 955 956 /* if IO ends within first strip of last row */ 957 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 958 regSize += ref_in_end_stripe + 1; 959 else 960 regSize += stripSize; 961 } 962 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 963 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 964 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 965 else 966 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 967 pRAID_Context->VirtualDiskTgtId = raid->targetId; 968 pRAID_Context->regLockRowLBA = regStart; 969 pRAID_Context->regLockLength = regSize; 970 pRAID_Context->configSeqNum = raid->seqNum; 971 972 /* 973 * Get Phy Params only if FP capable, or else leave it to MR firmware 974 * to do the calculation. 975 */ 976 if (io_info->fpOkForIo) { 977 retval = io_info->IoforUnevenSpan ? 978 mr_spanset_get_phy_params(sc, ld, start_strip, 979 ref_in_start_stripe, io_info, pRAID_Context, map) : 980 MR_GetPhyParams(sc, ld, start_strip, 981 ref_in_start_stripe, io_info, pRAID_Context, map); 982 /* If IO on an invalid Pd, then FP is not possible */ 983 if (io_info->devHandle == MR_PD_INVALID) 984 io_info->fpOkForIo = FALSE; 985 return retval; 986 } else if (isRead) { 987 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 988 retval = io_info->IoforUnevenSpan ? 989 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 990 ref_in_start_stripe, io_info, pRAID_Context, map) : 991 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 992 ref_in_start_stripe, io_info, pRAID_Context, map); 993 if (!retval) 994 return TRUE; 995 } 996 } 997 #if SPAN_DEBUG 998 /* Just for testing what arm we get for strip. */ 999 get_arm_from_strip(sc, ld, start_strip, map); 1000 #endif 1001 return TRUE; 1002 } 1003 1004 /* 1005 * 1006 * This routine pepare spanset info from Valid Raid map and store it into local 1007 * copy of ldSpanInfo per instance data structure. 1008 * 1009 * Inputs : LD map 1010 * ldSpanInfo per HBA instance 1011 * 1012 */ 1013 void 1014 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1015 { 1016 u_int8_t span, count; 1017 u_int32_t element, span_row_width; 1018 u_int64_t span_row; 1019 MR_LD_RAID *raid; 1020 LD_SPAN_SET *span_set, *span_set_prev; 1021 MR_QUAD_ELEMENT *quad; 1022 int ldCount; 1023 u_int16_t ld; 1024 1025 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1026 ld = MR_TargetIdToLdGet(ldCount, map); 1027 if (ld >= MAX_LOGICAL_DRIVES) 1028 continue; 1029 raid = MR_LdRaidGet(ld, map); 1030 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1031 for (span = 0; span < raid->spanDepth; span++) { 1032 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1033 block_span_info.noElements < element + 1) 1034 continue; 1035 /* TO-DO */ 1036 span_set = &(ldSpanInfo[ld].span_set[element]); 1037 quad = &map->raidMap.ldSpanMap[ld]. 1038 spanBlock[span].block_span_info.quad[element]; 1039 1040 span_set->diff = quad->diff; 1041 1042 for (count = 0, span_row_width = 0; 1043 count < raid->spanDepth; count++) { 1044 if (map->raidMap.ldSpanMap[ld].spanBlock[count]. 1045 block_span_info.noElements >= element + 1) { 1046 span_set->strip_offset[count] = span_row_width; 1047 span_row_width += 1048 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1049 #if SPAN_DEBUG 1050 printf("AVAGO Debug span %x rowDataSize %x\n", count, 1051 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1052 #endif 1053 } 1054 } 1055 1056 span_set->span_row_data_width = span_row_width; 1057 span_row = mega_div64_32(((quad->logEnd - 1058 quad->logStart) + quad->diff), quad->diff); 1059 1060 if (element == 0) { 1061 span_set->log_start_lba = 0; 1062 span_set->log_end_lba = 1063 ((span_row << raid->stripeShift) * span_row_width) - 1; 1064 1065 span_set->span_row_start = 0; 1066 span_set->span_row_end = span_row - 1; 1067 1068 span_set->data_strip_start = 0; 1069 span_set->data_strip_end = (span_row * span_row_width) - 1; 1070 1071 span_set->data_row_start = 0; 1072 span_set->data_row_end = (span_row * quad->diff) - 1; 1073 } else { 1074 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1075 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1076 span_set->log_end_lba = span_set->log_start_lba + 1077 ((span_row << raid->stripeShift) * span_row_width) - 1; 1078 1079 span_set->span_row_start = span_set_prev->span_row_end + 1; 1080 span_set->span_row_end = 1081 span_set->span_row_start + span_row - 1; 1082 1083 span_set->data_strip_start = 1084 span_set_prev->data_strip_end + 1; 1085 span_set->data_strip_end = span_set->data_strip_start + 1086 (span_row * span_row_width) - 1; 1087 1088 span_set->data_row_start = span_set_prev->data_row_end + 1; 1089 span_set->data_row_end = span_set->data_row_start + 1090 (span_row * quad->diff) - 1; 1091 } 1092 break; 1093 } 1094 if (span == raid->spanDepth) 1095 break; /* no quads remain */ 1096 } 1097 } 1098 #if SPAN_DEBUG 1099 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1100 #endif 1101 } 1102 1103 /* 1104 * mrsas_update_load_balance_params: Update load balance parmas 1105 * Inputs: 1106 * sc - driver softc instance 1107 * drv_map - driver RAID map 1108 * lbInfo - Load balance info 1109 * 1110 * This function updates the load balance parameters for the LD config of a two 1111 * drive optimal RAID-1. 1112 */ 1113 void 1114 mrsas_update_load_balance_params(struct mrsas_softc *sc, 1115 MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) 1116 { 1117 int ldCount; 1118 u_int16_t ld; 1119 MR_LD_RAID *raid; 1120 1121 if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) 1122 sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; 1123 1124 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1125 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1126 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1127 lbInfo[ldCount].loadBalanceFlag = 0; 1128 continue; 1129 } 1130 raid = MR_LdRaidGet(ld, drv_map); 1131 if ((raid->level != 1) || 1132 (raid->ldState != MR_LD_STATE_OPTIMAL)) { 1133 lbInfo[ldCount].loadBalanceFlag = 0; 1134 continue; 1135 } 1136 lbInfo[ldCount].loadBalanceFlag = 1; 1137 } 1138 } 1139 1140 1141 /* 1142 * mrsas_set_pd_lba: Sets PD LBA 1143 * input: io_request pointer 1144 * CDB length 1145 * io_info pointer 1146 * Pointer to CCB 1147 * Local RAID map pointer 1148 * Start block of IO Block Size 1149 * 1150 * Used to set the PD logical block address in CDB for FP IOs. 1151 */ 1152 void 1153 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1154 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1155 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1156 u_int32_t ld_block_size) 1157 { 1158 MR_LD_RAID *raid; 1159 u_int32_t ld; 1160 u_int64_t start_blk = io_info->pdBlock; 1161 u_int8_t *cdb = io_request->CDB.CDB32; 1162 u_int32_t num_blocks = io_info->numBlocks; 1163 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1164 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1165 1166 /* Check if T10 PI (DIF) is enabled for this LD */ 1167 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1168 raid = MR_LdRaidGet(ld, local_map_ptr); 1169 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1170 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1171 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1172 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1173 1174 if (ccb_h->flags == CAM_DIR_OUT) 1175 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1176 else 1177 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1178 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1179 1180 /* LBA */ 1181 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1182 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1183 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1184 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1185 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1186 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1187 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1188 cdb[19] = (u_int8_t)(start_blk & 0xff); 1189 1190 /* Logical block reference tag */ 1191 io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); 1192 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1193 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1194 1195 /* Transfer length */ 1196 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1197 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1198 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1199 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1200 1201 /* set SCSI IO EEDP Flags */ 1202 if (ccb_h->flags == CAM_DIR_OUT) { 1203 io_request->EEDPFlags = 1204 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1205 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1206 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1207 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1208 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1209 } else { 1210 io_request->EEDPFlags = 1211 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1212 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1213 } 1214 io_request->Control |= (0x4 << 26); 1215 io_request->EEDPBlockSize = ld_block_size; 1216 } else { 1217 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1218 if (((cdb_len == 12) || (cdb_len == 16)) && 1219 (start_blk <= 0xffffffff)) { 1220 if (cdb_len == 16) { 1221 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1222 flagvals = cdb[1]; 1223 groupnum = cdb[14]; 1224 control = cdb[15]; 1225 } else { 1226 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1227 flagvals = cdb[1]; 1228 groupnum = cdb[10]; 1229 control = cdb[11]; 1230 } 1231 1232 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1233 1234 cdb[0] = opcode; 1235 cdb[1] = flagvals; 1236 cdb[6] = groupnum; 1237 cdb[9] = control; 1238 1239 /* Transfer length */ 1240 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1241 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1242 1243 io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1244 cdb_len = 10; 1245 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1246 /* Convert to 16 byte CDB for large LBA's */ 1247 switch (cdb_len) { 1248 case 6: 1249 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1250 control = cdb[5]; 1251 break; 1252 case 10: 1253 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1254 flagvals = cdb[1]; 1255 groupnum = cdb[6]; 1256 control = cdb[9]; 1257 break; 1258 case 12: 1259 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1260 flagvals = cdb[1]; 1261 groupnum = cdb[10]; 1262 control = cdb[11]; 1263 break; 1264 } 1265 1266 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1267 1268 cdb[0] = opcode; 1269 cdb[1] = flagvals; 1270 cdb[14] = groupnum; 1271 cdb[15] = control; 1272 1273 /* Transfer length */ 1274 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1275 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1276 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1277 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1278 1279 io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1280 cdb_len = 16; 1281 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1282 /* convert to 10 byte CDB */ 1283 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1284 control = cdb[5]; 1285 1286 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1287 cdb[0] = opcode; 1288 cdb[9] = control; 1289 1290 /* Set transfer length */ 1291 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1292 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1293 1294 /* Specify 10-byte cdb */ 1295 cdb_len = 10; 1296 } 1297 /* Fall through normal case, just load LBA here */ 1298 u_int8_t val = cdb[1] & 0xE0; 1299 1300 switch (cdb_len) { 1301 case 6: 1302 cdb[3] = (u_int8_t)(start_blk & 0xff); 1303 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1304 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1305 break; 1306 case 10: 1307 cdb[5] = (u_int8_t)(start_blk & 0xff); 1308 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1309 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1310 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1311 break; 1312 case 12: 1313 cdb[5] = (u_int8_t)(start_blk & 0xff); 1314 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1315 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1316 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1317 break; 1318 case 16: 1319 cdb[9] = (u_int8_t)(start_blk & 0xff); 1320 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1321 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1322 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1323 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1324 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1325 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1326 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1327 break; 1328 } 1329 } 1330 } 1331 1332 /* 1333 * mrsas_get_best_arm_pd: Determine the best spindle arm 1334 * Inputs: 1335 * sc - HBA instance 1336 * lbInfo - Load balance info 1337 * io_info - IO request info 1338 * 1339 * This function determines and returns the best arm by looking at the 1340 * parameters of the last PD access. 1341 */ 1342 u_int8_t 1343 mrsas_get_best_arm_pd(struct mrsas_softc *sc, 1344 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1345 { 1346 MR_LD_RAID *raid; 1347 MR_DRV_RAID_MAP_ALL *drv_map; 1348 u_int16_t pend0, pend1, ld; 1349 u_int64_t diff0, diff1; 1350 u_int8_t bestArm, pd0, pd1, span, arm; 1351 u_int32_t arRef, span_row_size; 1352 1353 u_int64_t block = io_info->ldStartBlock; 1354 u_int32_t count = io_info->numBlocks; 1355 1356 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) 1357 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1358 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1359 1360 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1361 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1362 raid = MR_LdRaidGet(ld, drv_map); 1363 span_row_size = sc->UnevenSpanSupport ? 1364 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; 1365 1366 arRef = MR_LdSpanArrayGet(ld, span, drv_map); 1367 pd0 = MR_ArPdGet(arRef, arm, drv_map); 1368 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1369 (arm + 1 - span_row_size) : arm + 1, drv_map); 1370 1371 /* get the pending cmds for the data and mirror arms */ 1372 pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1373 pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1374 1375 /* Determine the disk whose head is nearer to the req. block */ 1376 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1377 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1378 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1379 1380 if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || 1381 (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) 1382 bestArm ^= 1; 1383 1384 /* Update the last accessed block on the correct pd */ 1385 lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; 1386 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; 1387 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; 1388 #if SPAN_DEBUG 1389 if (arm != bestArm) 1390 printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " 1391 "io_info->span_arm 0x%x\n", 1392 span, arm, bestArm, io_info->span_arm); 1393 #endif 1394 1395 return io_info->pd_after_lb; 1396 } 1397 1398 /* 1399 * mrsas_get_updated_dev_handle: Get the update dev handle 1400 * Inputs: 1401 * sc - Adapter instance soft state 1402 * lbInfo - Load balance info 1403 * io_info - io_info pointer 1404 * 1405 * This function determines and returns the updated dev handle. 1406 */ 1407 u_int16_t 1408 mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 1409 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) 1410 { 1411 u_int8_t arm_pd; 1412 u_int16_t devHandle; 1413 MR_DRV_RAID_MAP_ALL *drv_map; 1414 1415 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 1416 1417 /* get best new arm */ 1418 arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); 1419 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1420 mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1421 1422 return devHandle; 1423 } 1424 1425 /* 1426 * MR_GetPhyParams: Calculates arm, span, and block 1427 * Inputs: Adapter soft state 1428 * Logical drive number (LD) 1429 * Stripe number(stripRow) 1430 * Reference in stripe (stripRef) 1431 * 1432 * Outputs: Absolute Block number in the physical disk 1433 * 1434 * This routine calculates the arm, span and block for the specified stripe and 1435 * reference in stripe. 1436 */ 1437 u_int8_t 1438 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1439 u_int64_t stripRow, 1440 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1441 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1442 { 1443 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1444 u_int32_t pd, arRef; 1445 u_int8_t physArm, span; 1446 u_int64_t row; 1447 u_int8_t retval = TRUE; 1448 int error_code = 0; 1449 u_int64_t *pdBlock = &io_info->pdBlock; 1450 u_int16_t *pDevHandle = &io_info->devHandle; 1451 u_int32_t rowMod, armQ, arm, logArm; 1452 u_int8_t do_invader = 0; 1453 1454 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 1455 do_invader = 1; 1456 1457 row = mega_div64_32(stripRow, raid->rowDataSize); 1458 1459 if (raid->level == 6) { 1460 /* logical arm within row */ 1461 logArm = mega_mod64(stripRow, raid->rowDataSize); 1462 if (raid->rowSize == 0) 1463 return FALSE; 1464 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1465 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1466 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1467 if (arm >= raid->rowSize) /* handle wrap condition */ 1468 arm -= raid->rowSize; 1469 physArm = (u_int8_t)arm; 1470 } else { 1471 if (raid->modFactor == 0) 1472 return FALSE; 1473 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1474 } 1475 1476 if (raid->spanDepth == 1) { 1477 span = 0; 1478 *pdBlock = row << raid->stripeShift; 1479 } else { 1480 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1481 if (error_code == 1) 1482 return FALSE; 1483 } 1484 1485 /* Get the array on which this span is present */ 1486 arRef = MR_LdSpanArrayGet(ld, span, map); 1487 1488 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1489 1490 if (pd != MR_PD_INVALID) 1491 /* Get dev handle from Pd */ 1492 *pDevHandle = MR_PdDevHandleGet(pd, map); 1493 else { 1494 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 1495 if ((raid->level >= 5) && ((!do_invader) || (do_invader && 1496 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1497 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1498 else if (raid->level == 1) { 1499 /* Get Alternate Pd. */ 1500 pd = MR_ArPdGet(arRef, physArm + 1, map); 1501 if (pd != MR_PD_INVALID) 1502 /* Get dev handle from Pd. */ 1503 *pDevHandle = MR_PdDevHandleGet(pd, map); 1504 } 1505 } 1506 1507 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 1508 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1509 io_info->span_arm = pRAID_Context->spanArm; 1510 return retval; 1511 } 1512 1513 /* 1514 * MR_GetSpanBlock: Calculates span block 1515 * Inputs: LD 1516 * row PD 1517 * span block 1518 * RAID map pointer 1519 * 1520 * Outputs: Span number Error code 1521 * 1522 * This routine calculates the span from the span block info. 1523 */ 1524 u_int32_t 1525 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1526 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1527 { 1528 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1529 MR_QUAD_ELEMENT *quad; 1530 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1531 u_int32_t span, j; 1532 u_int64_t blk, debugBlk; 1533 1534 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1535 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1536 quad = &pSpanBlock->block_span_info.quad[j]; 1537 if (quad->diff == 0) { 1538 *div_error = 1; 1539 return span; 1540 } 1541 if (quad->logStart <= row && row <= quad->logEnd && 1542 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1543 if (span_blk != NULL) { 1544 blk = mega_div64_32((row - quad->logStart), quad->diff); 1545 debugBlk = blk; 1546 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1547 *span_blk = blk; 1548 } 1549 return span; 1550 } 1551 } 1552 } 1553 return span; 1554 } 1555