1 /* 2 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 3 * Support: freebsdraid@lsi.com 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 2. Redistributions 11 * in binary form must reproduce the above copyright notice, this list of 12 * conditions and the following disclaimer in the documentation and/or other 13 * materials provided with the distribution. 3. Neither the name of the 14 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 15 * promote products derived from this software without specific prior written 16 * permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 * 30 * The views and conclusions contained in the software and documentation are 31 * those of the authors and should not be interpreted as representing 32 * official policies,either expressed or implied, of the FreeBSD Project. 33 * 34 * Send feedback to: <megaraidfbsd@lsi.com> Mail to: LSI Corporation, 1621 35 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <dev/mrsas/mrsas.h> 43 44 #include <cam/cam.h> 45 #include <cam/cam_ccb.h> 46 #include <cam/cam_sim.h> 47 #include <cam/cam_xpt_sim.h> 48 #include <cam/cam_debug.h> 49 #include <cam/cam_periph.h> 50 #include <cam/cam_xpt_periph.h> 51 52 53 /* 54 * Function prototypes 55 */ 56 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 57 u_int8_t 58 mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm, 59 u_int64_t block, u_int32_t count); 60 u_int8_t 61 MR_BuildRaidContext(struct mrsas_softc *sc, 62 struct IO_REQUEST_INFO *io_info, 63 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 64 u_int8_t 65 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 66 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 67 RAID_CONTEXT * pRAID_Context, 68 MR_DRV_RAID_MAP_ALL * map); 69 u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 70 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 71 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 72 u_int16_t 73 mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, 74 struct IO_REQUEST_INFO *io_info); 75 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); 76 u_int32_t 77 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 78 MR_DRV_RAID_MAP_ALL * map, int *div_error); 79 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); 80 void 81 mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL * map, 82 PLD_LOAD_BALANCE_INFO lbInfo); 83 void 84 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 85 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 86 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 87 u_int32_t ld_block_size); 88 static u_int16_t 89 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 90 MR_DRV_RAID_MAP_ALL * map); 91 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); 92 static u_int16_t 93 MR_ArPdGet(u_int32_t ar, u_int32_t arm, 94 MR_DRV_RAID_MAP_ALL * map); 95 static MR_LD_SPAN * 96 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, 97 MR_DRV_RAID_MAP_ALL * map); 98 static u_int8_t 99 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, 100 MR_DRV_RAID_MAP_ALL * map); 101 static MR_SPAN_BLOCK_INFO * 102 MR_LdSpanInfoGet(u_int32_t ld, 103 MR_DRV_RAID_MAP_ALL * map); 104 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 105 void MR_PopulateDrvRaidMap(struct mrsas_softc *sc); 106 107 108 /* 109 * Spanset related function prototypes Added for PRL11 configuration (Uneven 110 * span support) 111 */ 112 void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); 113 static u_int8_t 114 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, 115 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 116 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); 117 static u_int64_t 118 get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, 119 u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); 120 static u_int32_t 121 mr_spanset_get_span_block(struct mrsas_softc *sc, 122 u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 123 MR_DRV_RAID_MAP_ALL * map, int *div_error); 124 static u_int8_t 125 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, 126 u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); 127 128 129 /* 130 * Spanset related defines Added for PRL11 configuration(Uneven span support) 131 */ 132 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize 133 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ 134 MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize 135 #define SPAN_INVALID 0xff 136 #define SPAN_DEBUG 0 137 138 /* 139 * Related Defines 140 */ 141 142 typedef u_int64_t REGION_KEY; 143 typedef u_int32_t REGION_LEN; 144 145 #define MR_LD_STATE_OPTIMAL 3 146 #define FALSE 0 147 #define TRUE 1 148 149 150 /* 151 * Related Macros 152 */ 153 154 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) 155 156 #define swap32(x) \ 157 ((unsigned int)( \ 158 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ 159 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ 160 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ 161 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) 162 163 164 /* 165 * In-line functions for mod and divide of 64-bit dividend and 32-bit 166 * divisor. Assumes a check for a divisor of zero is not possible. 167 * 168 * @param dividend: Dividend 169 * @param divisor: Divisor 170 * @return remainder 171 */ 172 173 #define mega_mod64(dividend, divisor) ({ \ 174 int remainder; \ 175 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ 176 remainder;}) 177 178 #define mega_div64_32(dividend, divisor) ({ \ 179 int quotient; \ 180 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ 181 quotient;}) 182 183 184 /* 185 * Various RAID map access functions. These functions access the various 186 * parts of the RAID map and returns the appropriate parameters. 187 */ 188 189 MR_LD_RAID * 190 MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 191 { 192 return (&map->raidMap.ldSpanMap[ld].ldRaid); 193 } 194 195 u_int16_t 196 MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 197 { 198 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); 199 } 200 201 static u_int16_t 202 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 203 { 204 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 205 } 206 207 static u_int8_t 208 MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) 209 { 210 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 211 } 212 213 static u_int16_t 214 MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) 215 { 216 return map->raidMap.devHndlInfo[pd].curDevHdl; 217 } 218 219 static u_int16_t 220 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) 221 { 222 return map->raidMap.arMapInfo[ar].pd[arm]; 223 } 224 225 static MR_LD_SPAN * 226 MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) 227 { 228 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 229 } 230 231 static MR_SPAN_BLOCK_INFO * 232 MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) 233 { 234 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 235 } 236 237 u_int16_t 238 MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 239 { 240 return map->raidMap.ldTgtIdToLd[ldTgtId]; 241 } 242 243 u_int32_t 244 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) 245 { 246 MR_LD_RAID *raid; 247 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; 248 249 ld = MR_TargetIdToLdGet(ldTgtId, map); 250 251 /* 252 * Check if logical drive was removed. 253 */ 254 if (ld >= MAX_LOGICAL_DRIVES) 255 return ldBlockSize; 256 257 raid = MR_LdRaidGet(ld, map); 258 ldBlockSize = raid->logicalBlockLength; 259 if (!ldBlockSize) 260 ldBlockSize = MRSAS_SCSIBLOCKSIZE; 261 262 return ldBlockSize; 263 } 264 265 /* 266 * This function will Populate Driver Map using firmware raid map 267 */ 268 void 269 MR_PopulateDrvRaidMap(struct mrsas_softc *sc) 270 { 271 MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 272 MR_FW_RAID_MAP *pFwRaidMap = NULL; 273 unsigned int i; 274 275 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 276 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 277 278 if (sc->max256vdSupport) { 279 memcpy(sc->ld_drv_map[sc->map_id & 1], 280 sc->raidmap_mem[sc->map_id & 1], 281 sc->current_map_sz); 282 /* 283 * New Raid map will not set totalSize, so keep expected 284 * value for legacy code in ValidateMapInfo 285 */ 286 pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); 287 } else { 288 fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; 289 pFwRaidMap = &fw_map_old->raidMap; 290 291 #if VD_EXT_DEBUG 292 for (i = 0; i < pFwRaidMap->ldCount; i++) { 293 device_printf(sc->mrsas_dev, 294 "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, 295 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, 296 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, 297 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); 298 } 299 #endif 300 301 memset(drv_map, 0, sc->drv_map_sz); 302 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 303 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 304 pDrvRaidMap->fpPdIoTimeoutSec = 305 pFwRaidMap->fpPdIoTimeoutSec; 306 307 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { 308 pDrvRaidMap->ldTgtIdToLd[i] = 309 (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; 310 } 311 312 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 313 pDrvRaidMap->ldSpanMap[i] = 314 pFwRaidMap->ldSpanMap[i]; 315 316 #if VD_EXT_DEBUG 317 device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " 318 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", 319 i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, 320 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, 321 (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); 322 device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" 323 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, 324 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, 325 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, 326 (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); 327 device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", 328 drv_map, pDrvRaidMap, 329 &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); 330 #endif 331 } 332 333 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 334 sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 335 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, 336 sizeof(MR_DEV_HANDLE_INFO) * 337 MAX_RAIDMAP_PHYSICAL_DEVICES); 338 } 339 } 340 341 /* 342 * MR_ValidateMapInfo: Validate RAID map 343 * input: Adapter instance soft state 344 * 345 * This function checks and validates the loaded RAID map. It returns 0 if 346 * successful, and 1 otherwise. 347 */ 348 u_int8_t 349 MR_ValidateMapInfo(struct mrsas_softc *sc) 350 { 351 if (!sc) { 352 return 1; 353 } 354 MR_PopulateDrvRaidMap(sc); 355 356 MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 357 MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 358 359 u_int32_t expected_map_size; 360 361 drv_map = sc->ld_drv_map[(sc->map_id & 1)]; 362 pDrvRaidMap = &drv_map->raidMap; 363 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; 364 365 if (sc->max256vdSupport) 366 expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); 367 else 368 expected_map_size = 369 (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + 370 (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); 371 372 if (pDrvRaidMap->totalSize != expected_map_size) { 373 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); 374 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); 375 device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); 376 return 1; 377 } 378 if (sc->UnevenSpanSupport) { 379 printf("Updating span set\n\n"); 380 mr_update_span_set(drv_map, ldSpanInfo); 381 } 382 mrsas_update_load_balance_params(drv_map, sc->load_balance_info); 383 384 return 0; 385 } 386 387 /* 388 * 389 * Function to print info about span set created in driver from FW raid map 390 * 391 * Inputs: map 392 * ldSpanInfo: ld map span info per HBA instance 393 * 394 * 395 */ 396 #if SPAN_DEBUG 397 static int 398 getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 399 { 400 401 u_int8_t span; 402 u_int32_t element; 403 MR_LD_RAID *raid; 404 LD_SPAN_SET *span_set; 405 MR_QUAD_ELEMENT *quad; 406 int ldCount; 407 u_int16_t ld; 408 409 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 410 ld = MR_TargetIdToLdGet(ldCount, map); 411 if (ld >= MAX_LOGICAL_DRIVES) { 412 continue; 413 } 414 raid = MR_LdRaidGet(ld, map); 415 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); 416 for (span = 0; span < raid->spanDepth; span++) 417 printf("Span=%x, number of quads=%x\n", span, 418 map->raidMap.ldSpanMap[ld].spanBlock[span]. 419 block_span_info.noElements); 420 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 421 span_set = &(ldSpanInfo[ld].span_set[element]); 422 if (span_set->span_row_data_width == 0) 423 break; 424 425 printf("Span Set %x: width=%x, diff=%x\n", element, 426 (unsigned int)span_set->span_row_data_width, 427 (unsigned int)span_set->diff); 428 printf("logical LBA start=0x%08lx, end=0x%08lx\n", 429 (long unsigned int)span_set->log_start_lba, 430 (long unsigned int)span_set->log_end_lba); 431 printf("span row start=0x%08lx, end=0x%08lx\n", 432 (long unsigned int)span_set->span_row_start, 433 (long unsigned int)span_set->span_row_end); 434 printf("data row start=0x%08lx, end=0x%08lx\n", 435 (long unsigned int)span_set->data_row_start, 436 (long unsigned int)span_set->data_row_end); 437 printf("data strip start=0x%08lx, end=0x%08lx\n", 438 (long unsigned int)span_set->data_strip_start, 439 (long unsigned int)span_set->data_strip_end); 440 441 for (span = 0; span < raid->spanDepth; span++) { 442 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 443 block_span_info.noElements >= element + 1) { 444 quad = &map->raidMap.ldSpanMap[ld]. 445 spanBlock[span].block_span_info. 446 quad[element]; 447 printf("Span=%x, Quad=%x, diff=%x\n", span, 448 element, quad->diff); 449 printf("offset_in_span=0x%08lx\n", 450 (long unsigned int)quad->offsetInSpan); 451 printf("logical start=0x%08lx, end=0x%08lx\n", 452 (long unsigned int)quad->logStart, 453 (long unsigned int)quad->logEnd); 454 } 455 } 456 } 457 } 458 return 0; 459 } 460 461 #endif 462 /* 463 * 464 * This routine calculates the Span block for given row using spanset. 465 * 466 * Inputs : HBA instance 467 * ld: Logical drive number 468 * row: Row number 469 * map: LD map 470 * 471 * Outputs : span - Span number block 472 * - Absolute Block number in the physical disk 473 * div_error - Devide error code. 474 */ 475 476 u_int32_t 477 mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, 478 u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) 479 { 480 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 481 LD_SPAN_SET *span_set; 482 MR_QUAD_ELEMENT *quad; 483 u_int32_t span, info; 484 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 485 486 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 487 span_set = &(ldSpanInfo[ld].span_set[info]); 488 489 if (span_set->span_row_data_width == 0) 490 break; 491 if (row > span_set->data_row_end) 492 continue; 493 494 for (span = 0; span < raid->spanDepth; span++) 495 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 496 block_span_info.noElements >= info + 1) { 497 quad = &map->raidMap.ldSpanMap[ld]. 498 spanBlock[span]. 499 block_span_info.quad[info]; 500 if (quad->diff == 0) { 501 *div_error = 1; 502 return span; 503 } 504 if (quad->logStart <= row && 505 row <= quad->logEnd && 506 (mega_mod64(row - quad->logStart, 507 quad->diff)) == 0) { 508 if (span_blk != NULL) { 509 u_int64_t blk; 510 511 blk = mega_div64_32 512 ((row - quad->logStart), 513 quad->diff); 514 blk = (blk + quad->offsetInSpan) 515 << raid->stripeShift; 516 *span_blk = blk; 517 } 518 return span; 519 } 520 } 521 } 522 return SPAN_INVALID; 523 } 524 525 /* 526 * 527 * This routine calculates the row for given strip using spanset. 528 * 529 * Inputs : HBA instance 530 * ld: Logical drive number 531 * Strip: Strip 532 * map: LD map 533 * 534 * Outputs : row - row associated with strip 535 */ 536 537 static u_int64_t 538 get_row_from_strip(struct mrsas_softc *sc, 539 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 540 { 541 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 542 LD_SPAN_SET *span_set; 543 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 544 u_int32_t info, strip_offset, span, span_offset; 545 u_int64_t span_set_Strip, span_set_Row; 546 547 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 548 span_set = &(ldSpanInfo[ld].span_set[info]); 549 550 if (span_set->span_row_data_width == 0) 551 break; 552 if (strip > span_set->data_strip_end) 553 continue; 554 555 span_set_Strip = strip - span_set->data_strip_start; 556 strip_offset = mega_mod64(span_set_Strip, 557 span_set->span_row_data_width); 558 span_set_Row = mega_div64_32(span_set_Strip, 559 span_set->span_row_data_width) * span_set->diff; 560 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 561 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 562 block_span_info.noElements >= info + 1) { 563 if (strip_offset >= 564 span_set->strip_offset[span]) 565 span_offset++; 566 else 567 break; 568 } 569 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " 570 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, 571 (unsigned long long)span_set_Strip, 572 (unsigned long long)span_set_Row, 573 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); 574 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, 575 (unsigned long long)span_set->data_row_start + 576 (unsigned long long)span_set_Row + (span_offset - 1)); 577 return (span_set->data_row_start + span_set_Row + (span_offset - 1)); 578 } 579 return -1LLU; 580 } 581 582 583 /* 584 * 585 * This routine calculates the Start Strip for given row using spanset. 586 * 587 * Inputs: HBA instance 588 * ld: Logical drive number 589 * row: Row number 590 * map: LD map 591 * 592 * Outputs : Strip - Start strip associated with row 593 */ 594 595 static u_int64_t 596 get_strip_from_row(struct mrsas_softc *sc, 597 u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) 598 { 599 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 600 LD_SPAN_SET *span_set; 601 MR_QUAD_ELEMENT *quad; 602 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 603 u_int32_t span, info; 604 u_int64_t strip; 605 606 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 607 span_set = &(ldSpanInfo[ld].span_set[info]); 608 609 if (span_set->span_row_data_width == 0) 610 break; 611 if (row > span_set->data_row_end) 612 continue; 613 614 for (span = 0; span < raid->spanDepth; span++) 615 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 616 block_span_info.noElements >= info + 1) { 617 quad = &map->raidMap.ldSpanMap[ld]. 618 spanBlock[span].block_span_info.quad[info]; 619 if (quad->logStart <= row && 620 row <= quad->logEnd && 621 mega_mod64((row - quad->logStart), 622 quad->diff) == 0) { 623 strip = mega_div64_32 624 (((row - span_set->data_row_start) 625 - quad->logStart), 626 quad->diff); 627 strip *= span_set->span_row_data_width; 628 strip += span_set->data_strip_start; 629 strip += span_set->strip_offset[span]; 630 return strip; 631 } 632 } 633 } 634 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug - get_strip_from_row: returns invalid " 635 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); 636 return -1; 637 } 638 639 /* 640 * ***************************************************************************** 641 * 642 * 643 * This routine calculates the Physical Arm for given strip using spanset. 644 * 645 * Inputs : HBA instance 646 * Logical drive number 647 * Strip 648 * LD map 649 * 650 * Outputs : Phys Arm - Phys Arm associated with strip 651 */ 652 653 static u_int32_t 654 get_arm_from_strip(struct mrsas_softc *sc, 655 u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) 656 { 657 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 658 LD_SPAN_SET *span_set; 659 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; 660 u_int32_t info, strip_offset, span, span_offset; 661 662 for (info = 0; info < MAX_QUAD_DEPTH; info++) { 663 span_set = &(ldSpanInfo[ld].span_set[info]); 664 665 if (span_set->span_row_data_width == 0) 666 break; 667 if (strip > span_set->data_strip_end) 668 continue; 669 670 strip_offset = (u_int32_t)mega_mod64 671 ((strip - span_set->data_strip_start), 672 span_set->span_row_data_width); 673 674 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 675 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 676 block_span_info.noElements >= info + 1) { 677 if (strip_offset >= span_set->strip_offset[span]) 678 span_offset = span_set->strip_offset[span]; 679 else 680 break; 681 } 682 mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: " 683 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, 684 (long unsigned int)strip, (strip_offset - span_offset)); 685 return (strip_offset - span_offset); 686 } 687 688 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm" 689 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); 690 691 return -1; 692 } 693 694 695 /* This Function will return Phys arm */ 696 u_int8_t 697 get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, 698 MR_DRV_RAID_MAP_ALL * map) 699 { 700 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 701 702 /* Need to check correct default value */ 703 u_int32_t arm = 0; 704 705 switch (raid->level) { 706 case 0: 707 case 5: 708 case 6: 709 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); 710 break; 711 case 1: 712 /* start with logical arm */ 713 arm = get_arm_from_strip(sc, ld, stripe, map); 714 arm *= 2; 715 break; 716 } 717 718 return arm; 719 } 720 721 /* 722 * 723 * This routine calculates the arm, span and block for the specified stripe and 724 * reference in stripe using spanset 725 * 726 * Inputs : Logical drive number 727 * stripRow: Stripe number 728 * stripRef: Reference in stripe 729 * 730 * Outputs : span - Span number block - Absolute Block 731 * number in the physical disk 732 */ 733 static u_int8_t 734 mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, 735 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 736 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 737 { 738 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 739 u_int32_t pd, arRef; 740 u_int8_t physArm, span; 741 u_int64_t row; 742 u_int8_t retval = TRUE; 743 u_int64_t *pdBlock = &io_info->pdBlock; 744 u_int16_t *pDevHandle = &io_info->devHandle; 745 u_int32_t logArm, rowMod, armQ, arm; 746 u_int8_t do_invader = 0; 747 748 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 749 do_invader = 1; 750 751 /* Get row and span from io_info for Uneven Span IO. */ 752 row = io_info->start_row; 753 span = io_info->start_span; 754 755 756 if (raid->level == 6) { 757 logArm = get_arm_from_strip(sc, ld, stripRow, map); 758 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); 759 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; 760 arm = armQ + 1 + logArm; 761 if (arm >= SPAN_ROW_SIZE(map, ld, span)) 762 arm -= SPAN_ROW_SIZE(map, ld, span); 763 physArm = (u_int8_t)arm; 764 } else 765 /* Calculate the arm */ 766 physArm = get_arm(sc, ld, span, stripRow, map); 767 768 769 arRef = MR_LdSpanArrayGet(ld, span, map); 770 pd = MR_ArPdGet(arRef, physArm, map); 771 772 if (pd != MR_PD_INVALID) 773 *pDevHandle = MR_PdDevHandleGet(pd, map); 774 else { 775 *pDevHandle = MR_PD_INVALID; 776 if ((raid->level >= 5) && ((!do_invader) || (do_invader && 777 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 778 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 779 else if (raid->level == 1) { 780 pd = MR_ArPdGet(arRef, physArm + 1, map); 781 if (pd != MR_PD_INVALID) 782 *pDevHandle = MR_PdDevHandleGet(pd, map); 783 } 784 } 785 786 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 787 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 788 return retval; 789 } 790 791 /* 792 * MR_BuildRaidContext: Set up Fast path RAID context 793 * 794 * This function will initiate command processing. The start/end row and strip 795 * information is calculated then the lock is acquired. This function will 796 * return 0 if region lock was acquired OR return num strips. 797 */ 798 u_int8_t 799 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, 800 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 801 { 802 MR_LD_RAID *raid; 803 u_int32_t ld, stripSize, stripe_mask; 804 u_int64_t endLba, endStrip, endRow, start_row, start_strip; 805 REGION_KEY regStart; 806 REGION_LEN regSize; 807 u_int8_t num_strips, numRows; 808 u_int16_t ref_in_start_stripe, ref_in_end_stripe; 809 u_int64_t ldStartBlock; 810 u_int32_t numBlocks, ldTgtId; 811 u_int8_t isRead, stripIdx; 812 u_int8_t retval = 0; 813 u_int8_t startlba_span = SPAN_INVALID; 814 u_int64_t *pdBlock = &io_info->pdBlock; 815 int error_code = 0; 816 817 ldStartBlock = io_info->ldStartBlock; 818 numBlocks = io_info->numBlocks; 819 ldTgtId = io_info->ldTgtId; 820 isRead = io_info->isRead; 821 822 io_info->IoforUnevenSpan = 0; 823 io_info->start_span = SPAN_INVALID; 824 825 ld = MR_TargetIdToLdGet(ldTgtId, map); 826 raid = MR_LdRaidGet(ld, map); 827 828 if (raid->rowDataSize == 0) { 829 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) 830 return FALSE; 831 else if (sc->UnevenSpanSupport) { 832 io_info->IoforUnevenSpan = 1; 833 } else { 834 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," 835 " but there is _NO_ UnevenSpanSupport\n", 836 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); 837 return FALSE; 838 } 839 } 840 stripSize = 1 << raid->stripeShift; 841 stripe_mask = stripSize - 1; 842 /* 843 * calculate starting row and stripe, and number of strips and rows 844 */ 845 start_strip = ldStartBlock >> raid->stripeShift; 846 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); 847 endLba = ldStartBlock + numBlocks - 1; 848 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); 849 endStrip = endLba >> raid->stripeShift; 850 num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ 851 if (io_info->IoforUnevenSpan) { 852 start_row = get_row_from_strip(sc, ld, start_strip, map); 853 endRow = get_row_from_strip(sc, ld, endStrip, map); 854 if (raid->spanDepth == 1) { 855 startlba_span = 0; 856 *pdBlock = start_row << raid->stripeShift; 857 } else { 858 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, 859 pdBlock, map, &error_code); 860 if (error_code == 1) { 861 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n", 862 __func__, __LINE__); 863 return FALSE; 864 } 865 } 866 if (startlba_span == SPAN_INVALID) { 867 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx," 868 "start strip %llx endSrip %llx\n", __func__, 869 __LINE__, (unsigned long long)start_row, 870 (unsigned long long)start_strip, 871 (unsigned long long)endStrip); 872 return FALSE; 873 } 874 io_info->start_span = startlba_span; 875 io_info->start_row = start_row; 876 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, " 877 " start strip 0x%llx endSrip 0x%llx span 0x%x\n", 878 __func__, __LINE__, (unsigned long long)start_row, 879 (unsigned long long)start_strip, 880 (unsigned long long)endStrip, startlba_span); 881 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", 882 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); 883 } else { 884 start_row = mega_div64_32(start_strip, raid->rowDataSize); 885 endRow = mega_div64_32(endStrip, raid->rowDataSize); 886 } 887 888 numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ 889 890 /* 891 * Calculate region info. (Assume region at start of first row, and 892 * assume this IO needs the full row - will adjust if not true.) 893 */ 894 regStart = start_row << raid->stripeShift; 895 regSize = stripSize; 896 897 /* Check if we can send this I/O via FastPath */ 898 if (raid->capability.fpCapable) { 899 if (isRead) 900 io_info->fpOkForIo = (raid->capability.fpReadCapable && 901 ((num_strips == 1) || 902 raid->capability.fpReadAcrossStripe)); 903 else 904 io_info->fpOkForIo = (raid->capability.fpWriteCapable && 905 ((num_strips == 1) || 906 raid->capability.fpWriteAcrossStripe)); 907 } else 908 io_info->fpOkForIo = FALSE; 909 910 if (numRows == 1) { 911 if (num_strips == 1) { 912 regStart += ref_in_start_stripe; 913 regSize = numBlocks; 914 } 915 } else if (io_info->IoforUnevenSpan == 0) { 916 /* 917 * For Even span region lock optimization. If the start strip 918 * is the last in the start row 919 */ 920 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { 921 regStart += ref_in_start_stripe; 922 /* 923 * initialize count to sectors from startRef to end 924 * of strip 925 */ 926 regSize = stripSize - ref_in_start_stripe; 927 } 928 /* add complete rows in the middle of the transfer */ 929 if (numRows > 2) 930 regSize += (numRows - 2) << raid->stripeShift; 931 932 /* if IO ends within first strip of last row */ 933 if (endStrip == endRow * raid->rowDataSize) 934 regSize += ref_in_end_stripe + 1; 935 else 936 regSize += stripSize; 937 } else { 938 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + 939 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { 940 regStart += ref_in_start_stripe; 941 /* 942 * initialize count to sectors from startRef to end 943 * of strip 944 */ 945 regSize = stripSize - ref_in_start_stripe; 946 } 947 /* add complete rows in the middle of the transfer */ 948 if (numRows > 2) 949 regSize += (numRows - 2) << raid->stripeShift; 950 951 /* if IO ends within first strip of last row */ 952 if (endStrip == get_strip_from_row(sc, ld, endRow, map)) 953 regSize += ref_in_end_stripe + 1; 954 else 955 regSize += stripSize; 956 } 957 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 958 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 959 pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 960 else 961 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 962 pRAID_Context->VirtualDiskTgtId = raid->targetId; 963 pRAID_Context->regLockRowLBA = regStart; 964 pRAID_Context->regLockLength = regSize; 965 pRAID_Context->configSeqNum = raid->seqNum; 966 967 /* 968 * Get Phy Params only if FP capable, or else leave it to MR firmware 969 * to do the calculation. 970 */ 971 if (io_info->fpOkForIo) { 972 retval = io_info->IoforUnevenSpan ? 973 mr_spanset_get_phy_params(sc, ld, start_strip, 974 ref_in_start_stripe, io_info, pRAID_Context, map) : 975 MR_GetPhyParams(sc, ld, start_strip, 976 ref_in_start_stripe, io_info, pRAID_Context, map); 977 /* If IO on an invalid Pd, then FP is not possible */ 978 if (io_info->devHandle == MR_PD_INVALID) 979 io_info->fpOkForIo = FALSE; 980 return retval; 981 } else if (isRead) { 982 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 983 retval = io_info->IoforUnevenSpan ? 984 mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, 985 ref_in_start_stripe, io_info, pRAID_Context, map) : 986 MR_GetPhyParams(sc, ld, start_strip + stripIdx, 987 ref_in_start_stripe, io_info, pRAID_Context, map); 988 if (!retval) 989 return TRUE; 990 } 991 } 992 #if SPAN_DEBUG 993 /* Just for testing what arm we get for strip. */ 994 get_arm_from_strip(sc, ld, start_strip, map); 995 #endif 996 return TRUE; 997 } 998 999 /* 1000 * 1001 * This routine pepare spanset info from Valid Raid map and store it into local 1002 * copy of ldSpanInfo per instance data structure. 1003 * 1004 * Inputs : LD map 1005 * ldSpanInfo per HBA instance 1006 * 1007 */ 1008 void 1009 mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) 1010 { 1011 u_int8_t span, count; 1012 u_int32_t element, span_row_width; 1013 u_int64_t span_row; 1014 MR_LD_RAID *raid; 1015 LD_SPAN_SET *span_set, *span_set_prev; 1016 MR_QUAD_ELEMENT *quad; 1017 int ldCount; 1018 u_int16_t ld; 1019 1020 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1021 ld = MR_TargetIdToLdGet(ldCount, map); 1022 if (ld >= MAX_LOGICAL_DRIVES) 1023 continue; 1024 raid = MR_LdRaidGet(ld, map); 1025 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1026 for (span = 0; span < raid->spanDepth; span++) { 1027 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1028 block_span_info.noElements < element + 1) 1029 continue; 1030 /* TO-DO */ 1031 span_set = &(ldSpanInfo[ld].span_set[element]); 1032 quad = &map->raidMap.ldSpanMap[ld]. 1033 spanBlock[span].block_span_info.quad[element]; 1034 1035 span_set->diff = quad->diff; 1036 1037 for (count = 0, span_row_width = 0; 1038 count < raid->spanDepth; count++) { 1039 if (map->raidMap.ldSpanMap[ld].spanBlock[count]. 1040 block_span_info.noElements >= element + 1) { 1041 span_set->strip_offset[count] = span_row_width; 1042 span_row_width += 1043 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; 1044 #if SPAN_DEBUG 1045 printf("LSI Debug span %x rowDataSize %x\n", count, 1046 MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); 1047 #endif 1048 } 1049 } 1050 1051 span_set->span_row_data_width = span_row_width; 1052 span_row = mega_div64_32(((quad->logEnd - 1053 quad->logStart) + quad->diff), quad->diff); 1054 1055 if (element == 0) { 1056 span_set->log_start_lba = 0; 1057 span_set->log_end_lba = 1058 ((span_row << raid->stripeShift) * span_row_width) - 1; 1059 1060 span_set->span_row_start = 0; 1061 span_set->span_row_end = span_row - 1; 1062 1063 span_set->data_strip_start = 0; 1064 span_set->data_strip_end = (span_row * span_row_width) - 1; 1065 1066 span_set->data_row_start = 0; 1067 span_set->data_row_end = (span_row * quad->diff) - 1; 1068 } else { 1069 span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); 1070 span_set->log_start_lba = span_set_prev->log_end_lba + 1; 1071 span_set->log_end_lba = span_set->log_start_lba + 1072 ((span_row << raid->stripeShift) * span_row_width) - 1; 1073 1074 span_set->span_row_start = span_set_prev->span_row_end + 1; 1075 span_set->span_row_end = 1076 span_set->span_row_start + span_row - 1; 1077 1078 span_set->data_strip_start = 1079 span_set_prev->data_strip_end + 1; 1080 span_set->data_strip_end = span_set->data_strip_start + 1081 (span_row * span_row_width) - 1; 1082 1083 span_set->data_row_start = span_set_prev->data_row_end + 1; 1084 span_set->data_row_end = span_set->data_row_start + 1085 (span_row * quad->diff) - 1; 1086 } 1087 break; 1088 } 1089 if (span == raid->spanDepth) 1090 break; /* no quads remain */ 1091 } 1092 } 1093 #if SPAN_DEBUG 1094 getSpanInfo(map, ldSpanInfo); /* to get span set info */ 1095 #endif 1096 } 1097 1098 /* 1099 * mrsas_update_load_balance_params: Update load balance parmas 1100 * Inputs: map pointer 1101 * Load balance info 1102 * 1103 * This function updates the load balance parameters for the LD config of a two 1104 * drive optimal RAID-1. 1105 */ 1106 void 1107 mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL * map, 1108 PLD_LOAD_BALANCE_INFO lbInfo) 1109 { 1110 int ldCount; 1111 u_int16_t ld; 1112 u_int32_t pd, arRef; 1113 MR_LD_RAID *raid; 1114 1115 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1116 ld = MR_TargetIdToLdGet(ldCount, map); 1117 if (ld >= MAX_LOGICAL_DRIVES) { 1118 lbInfo[ldCount].loadBalanceFlag = 0; 1119 continue; 1120 } 1121 raid = MR_LdRaidGet(ld, map); 1122 1123 /* Two drive Optimal RAID 1 */ 1124 if ((raid->level == 1) && (raid->rowSize == 2) && 1125 (raid->spanDepth == 1) 1126 && raid->ldState == MR_LD_STATE_OPTIMAL) { 1127 lbInfo[ldCount].loadBalanceFlag = 1; 1128 1129 /* Get the array on which this span is present */ 1130 arRef = MR_LdSpanArrayGet(ld, 0, map); 1131 1132 /* Get the PD */ 1133 pd = MR_ArPdGet(arRef, 0, map); 1134 /* Get dev handle from PD */ 1135 lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map); 1136 pd = MR_ArPdGet(arRef, 1, map); 1137 lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map); 1138 } else 1139 lbInfo[ldCount].loadBalanceFlag = 0; 1140 } 1141 } 1142 1143 1144 /* 1145 * mrsas_set_pd_lba: Sets PD LBA 1146 * input: io_request pointer 1147 * CDB length 1148 * io_info pointer 1149 * Pointer to CCB 1150 * Local RAID map pointer 1151 * Start block of IO Block Size 1152 * 1153 * Used to set the PD logical block address in CDB for FP IOs. 1154 */ 1155 void 1156 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, 1157 struct IO_REQUEST_INFO *io_info, union ccb *ccb, 1158 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 1159 u_int32_t ld_block_size) 1160 { 1161 MR_LD_RAID *raid; 1162 u_int32_t ld; 1163 u_int64_t start_blk = io_info->pdBlock; 1164 u_int8_t *cdb = io_request->CDB.CDB32; 1165 u_int32_t num_blocks = io_info->numBlocks; 1166 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1167 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1168 1169 /* Check if T10 PI (DIF) is enabled for this LD */ 1170 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1171 raid = MR_LdRaidGet(ld, local_map_ptr); 1172 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1173 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1174 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; 1175 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; 1176 1177 if (ccb_h->flags == CAM_DIR_OUT) 1178 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; 1179 else 1180 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; 1181 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; 1182 1183 /* LBA */ 1184 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); 1185 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); 1186 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); 1187 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); 1188 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); 1189 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); 1190 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); 1191 cdb[19] = (u_int8_t)(start_blk & 0xff); 1192 1193 /* Logical block reference tag */ 1194 io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); 1195 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1196 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1197 1198 /* Transfer length */ 1199 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); 1200 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); 1201 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); 1202 cdb[31] = (u_int8_t)(num_blocks & 0xff); 1203 1204 /* set SCSI IO EEDP Flags */ 1205 if (ccb_h->flags == CAM_DIR_OUT) { 1206 io_request->EEDPFlags = 1207 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1208 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1209 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1210 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1211 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1212 } else { 1213 io_request->EEDPFlags = 1214 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1215 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1216 } 1217 io_request->Control |= (0x4 << 26); 1218 io_request->EEDPBlockSize = ld_block_size; 1219 } else { 1220 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1221 if (((cdb_len == 12) || (cdb_len == 16)) && 1222 (start_blk <= 0xffffffff)) { 1223 if (cdb_len == 16) { 1224 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1225 flagvals = cdb[1]; 1226 groupnum = cdb[14]; 1227 control = cdb[15]; 1228 } else { 1229 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1230 flagvals = cdb[1]; 1231 groupnum = cdb[10]; 1232 control = cdb[11]; 1233 } 1234 1235 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1236 1237 cdb[0] = opcode; 1238 cdb[1] = flagvals; 1239 cdb[6] = groupnum; 1240 cdb[9] = control; 1241 1242 /* Transfer length */ 1243 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1244 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1245 1246 io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1247 cdb_len = 10; 1248 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1249 /* Convert to 16 byte CDB for large LBA's */ 1250 switch (cdb_len) { 1251 case 6: 1252 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1253 control = cdb[5]; 1254 break; 1255 case 10: 1256 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; 1257 flagvals = cdb[1]; 1258 groupnum = cdb[6]; 1259 control = cdb[9]; 1260 break; 1261 case 12: 1262 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; 1263 flagvals = cdb[1]; 1264 groupnum = cdb[10]; 1265 control = cdb[11]; 1266 break; 1267 } 1268 1269 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1270 1271 cdb[0] = opcode; 1272 cdb[1] = flagvals; 1273 cdb[14] = groupnum; 1274 cdb[15] = control; 1275 1276 /* Transfer length */ 1277 cdb[13] = (u_int8_t)(num_blocks & 0xff); 1278 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); 1279 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); 1280 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); 1281 1282 io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1283 cdb_len = 16; 1284 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { 1285 /* convert to 10 byte CDB */ 1286 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; 1287 control = cdb[5]; 1288 1289 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1290 cdb[0] = opcode; 1291 cdb[9] = control; 1292 1293 /* Set transfer length */ 1294 cdb[8] = (u_int8_t)(num_blocks & 0xff); 1295 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); 1296 1297 /* Specify 10-byte cdb */ 1298 cdb_len = 10; 1299 } 1300 /* Fall through normal case, just load LBA here */ 1301 u_int8_t val = cdb[1] & 0xE0; 1302 switch (cdb_len) { 1303 case 6: 1304 cdb[3] = (u_int8_t)(start_blk & 0xff); 1305 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); 1306 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); 1307 break; 1308 case 10: 1309 cdb[5] = (u_int8_t)(start_blk & 0xff); 1310 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1311 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1312 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1313 break; 1314 case 12: 1315 cdb[5] = (u_int8_t)(start_blk & 0xff); 1316 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); 1317 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); 1318 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); 1319 break; 1320 case 16: 1321 cdb[9] = (u_int8_t)(start_blk & 0xff); 1322 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); 1323 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); 1324 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); 1325 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); 1326 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); 1327 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); 1328 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); 1329 break; 1330 } 1331 } 1332 } 1333 1334 /* 1335 * mrsas_get_best_arm: Determine the best spindle arm 1336 * Inputs: Load balance info 1337 * 1338 * This function determines and returns the best arm by looking at the 1339 * parameters of the last PD access. 1340 */ 1341 u_int8_t 1342 mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm, 1343 u_int64_t block, u_int32_t count) 1344 { 1345 u_int16_t pend0, pend1; 1346 u_int64_t diff0, diff1; 1347 u_int8_t bestArm; 1348 1349 /* get the pending cmds for the data and mirror arms */ 1350 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]); 1351 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]); 1352 1353 /* Determine the disk whose head is nearer to the req. block */ 1354 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); 1355 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); 1356 bestArm = (diff0 <= diff1 ? 0 : 1); 1357 1358 if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16)) 1359 bestArm ^= 1; 1360 1361 /* Update the last accessed block on the correct pd */ 1362 lbInfo->last_accessed_block[bestArm] = block + count - 1; 1363 1364 return bestArm; 1365 } 1366 1367 /* 1368 * mrsas_get_updated_dev_handle: Get the update dev handle 1369 * Inputs: Load balance info io_info pointer 1370 * 1371 * This function determines and returns the updated dev handle. 1372 */ 1373 u_int16_t 1374 mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, 1375 struct IO_REQUEST_INFO *io_info) 1376 { 1377 u_int8_t arm, old_arm; 1378 u_int16_t devHandle; 1379 1380 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; 1381 1382 /* get best new arm */ 1383 arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks); 1384 devHandle = lbInfo->raid1DevHandle[arm]; 1385 atomic_inc(&lbInfo->scsi_pending_cmds[arm]); 1386 1387 return devHandle; 1388 } 1389 1390 /* 1391 * MR_GetPhyParams: Calculates arm, span, and block 1392 * Inputs: Adapter soft state 1393 * Logical drive number (LD) 1394 * Stripe number(stripRow) 1395 * Reference in stripe (stripRef) 1396 * 1397 * Outputs: Absolute Block number in the physical disk 1398 * 1399 * This routine calculates the arm, span and block for the specified stripe and 1400 * reference in stripe. 1401 */ 1402 u_int8_t 1403 MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, 1404 u_int64_t stripRow, 1405 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, 1406 RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) 1407 { 1408 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1409 u_int32_t pd, arRef; 1410 u_int8_t physArm, span; 1411 u_int64_t row; 1412 u_int8_t retval = TRUE; 1413 int error_code = 0; 1414 u_int64_t *pdBlock = &io_info->pdBlock; 1415 u_int16_t *pDevHandle = &io_info->devHandle; 1416 u_int32_t rowMod, armQ, arm, logArm; 1417 u_int8_t do_invader = 0; 1418 1419 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) 1420 do_invader = 1; 1421 1422 row = mega_div64_32(stripRow, raid->rowDataSize); 1423 1424 if (raid->level == 6) { 1425 /* logical arm within row */ 1426 logArm = mega_mod64(stripRow, raid->rowDataSize); 1427 if (raid->rowSize == 0) 1428 return FALSE; 1429 rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ 1430 armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ 1431 arm = armQ + 1 + logArm;/* data always logically follows Q */ 1432 if (arm >= raid->rowSize) /* handle wrap condition */ 1433 arm -= raid->rowSize; 1434 physArm = (u_int8_t)arm; 1435 } else { 1436 if (raid->modFactor == 0) 1437 return FALSE; 1438 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); 1439 } 1440 1441 if (raid->spanDepth == 1) { 1442 span = 0; 1443 *pdBlock = row << raid->stripeShift; 1444 } else { 1445 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); 1446 if (error_code == 1) 1447 return FALSE; 1448 } 1449 1450 /* Get the array on which this span is present */ 1451 arRef = MR_LdSpanArrayGet(ld, span, map); 1452 1453 pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ 1454 1455 if (pd != MR_PD_INVALID) 1456 /* Get dev handle from Pd */ 1457 *pDevHandle = MR_PdDevHandleGet(pd, map); 1458 else { 1459 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 1460 if ((raid->level >= 5) && ((!do_invader) || (do_invader && 1461 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) 1462 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 1463 else if (raid->level == 1) { 1464 /* Get Alternate Pd. */ 1465 pd = MR_ArPdGet(arRef, physArm + 1, map); 1466 if (pd != MR_PD_INVALID) 1467 /* Get dev handle from Pd. */ 1468 *pDevHandle = MR_PdDevHandleGet(pd, map); 1469 } 1470 } 1471 1472 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 1473 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 1474 return retval; 1475 } 1476 1477 /* 1478 * MR_GetSpanBlock: Calculates span block 1479 * Inputs: LD 1480 * row PD 1481 * span block 1482 * RAID map pointer 1483 * 1484 * Outputs: Span number Error code 1485 * 1486 * This routine calculates the span from the span block info. 1487 */ 1488 u_int32_t 1489 MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, 1490 MR_DRV_RAID_MAP_ALL * map, int *div_error) 1491 { 1492 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 1493 MR_QUAD_ELEMENT *quad; 1494 MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 1495 u_int32_t span, j; 1496 u_int64_t blk, debugBlk; 1497 1498 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 1499 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 1500 quad = &pSpanBlock->block_span_info.quad[j]; 1501 if (quad->diff == 0) { 1502 *div_error = 1; 1503 return span; 1504 } 1505 if (quad->logStart <= row && row <= quad->logEnd && 1506 (mega_mod64(row - quad->logStart, quad->diff)) == 0) { 1507 if (span_blk != NULL) { 1508 blk = mega_div64_32((row - quad->logStart), quad->diff); 1509 debugBlk = blk; 1510 blk = (blk + quad->offsetInSpan) << raid->stripeShift; 1511 *span_blk = blk; 1512 } 1513 return span; 1514 } 1515 } 1516 } 1517 return span; 1518 } 1519