1 /*
2 * **********************************************************************
3 *
4 * ld_pd_map.c
5 *
6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 * All rights reserved.
9 *
10 * Version:
11 * Author:
12 * Swaminathan K S
13 * Arun Chandrashekhar
14 * Manju R
15 * Rasheed
16 * Shakeel Bukhari
17 *
18 *
19 * This module contains functions for device drivers
20 * to get pd-ld mapping information.
21 *
22 * **********************************************************************
23 */
24
25 #include <sys/scsi/scsi.h>
26 #include "mr_sas.h"
27 #include "ld_pd_map.h"
28
29 /*
30 * This function will check if FAST IO is possible on this logical drive
31 * by checking the EVENT information available in the driver
32 */
33 #define MR_LD_STATE_OPTIMAL 3
34 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
35
36 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *,
37 PLD_LOAD_BALANCE_INFO);
38
39 #define FALSE 0
40 #define TRUE 1
41
42 typedef U64 REGION_KEY;
43 typedef U32 REGION_LEN;
44 extern int debug_level_g;
45
46
47 MR_LD_RAID
MR_LdRaidGet(U32 ld,MR_FW_RAID_MAP_ALL * map)48 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
49 {
50 return (&map->raidMap.ldSpanMap[ld].ldRaid);
51 }
52
53 U16
MR_GetLDTgtId(U32 ld,MR_FW_RAID_MAP_ALL * map)54 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map)
55 {
56 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
57 }
58
59
60 static MR_SPAN_BLOCK_INFO *
MR_LdSpanInfoGet(U32 ld,MR_FW_RAID_MAP_ALL * map)61 MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
62 {
63 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
64 }
65
66 static U8
MR_LdDataArmGet(U32 ld,U32 armIdx,MR_FW_RAID_MAP_ALL * map)67 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
68 {
69 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
70 }
71
72 static U16
MR_ArPdGet(U32 ar,U32 arm,MR_FW_RAID_MAP_ALL * map)73 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
74 {
75 return (map->raidMap.arMapInfo[ar].pd[arm]);
76 }
77
78 static U16
MR_LdSpanArrayGet(U32 ld,U32 span,MR_FW_RAID_MAP_ALL * map)79 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
80 {
81 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
82 }
83
84 static U16
MR_PdDevHandleGet(U32 pd,MR_FW_RAID_MAP_ALL * map)85 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
86 {
87 return (map->raidMap.devHndlInfo[pd].curDevHdl);
88 }
89
90 U16
MR_TargetIdToLdGet(U32 ldTgtId,MR_FW_RAID_MAP_ALL * map)91 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
92 {
93 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
94 }
95
96 U16
MR_CheckDIF(U32 ldTgtId,MR_FW_RAID_MAP_ALL * map)97 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
98 {
99 MR_LD_RAID *raid;
100 U32 ld;
101
102 ld = MR_TargetIdToLdGet(ldTgtId, map);
103
104 if (ld >= MAX_LOGICAL_DRIVES) {
105 return (FALSE);
106 }
107
108 raid = MR_LdRaidGet(ld, map);
109
110 return (raid->capability.ldPiMode == 0x8);
111 }
112
113 static MR_LD_SPAN *
MR_LdSpanPtrGet(U32 ld,U32 span,MR_FW_RAID_MAP_ALL * map)114 MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
115 {
116 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
117 }
118
119 /*
120 * This function will validate Map info data provided by FW
121 */
122 U8
MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL * map,PLD_LOAD_BALANCE_INFO lbInfo)123 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
124 {
125 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
126 U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
127 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount);
128
129 if (pFwRaidMap->totalSize != fwsize) {
130
131 con_log(CL_ANN1, (CE_NOTE,
132 "map info structure size 0x%x is "
133 "not matching with ld count\n", fwsize));
134 /* sizeof (foo) returns size_t, which is *LONG*. */
135 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
136 (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
137
138 return (0);
139 }
140
141 mr_update_load_balance_params(map, lbInfo);
142
143 return (1);
144 }
145
146 U32
MR_GetSpanBlock(U32 ld,U64 row,U64 * span_blk,MR_FW_RAID_MAP_ALL * map,int * div_error)147 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map,
148 int *div_error)
149 {
150 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
151 MR_QUAD_ELEMENT *qe;
152 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
153 U32 span, j;
154
155 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
156 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
157 qe = &pSpanBlock->block_span_info.quads[j];
158 if (qe->diff == 0) {
159 *div_error = 1;
160 return (span);
161 }
162 if (qe->logStart <= row && row <= qe->logEnd &&
163 (((row - qe->logStart) % qe->diff)) == 0) {
164 if (span_blk != NULL) {
165 U64 blk;
166 blk = ((row - qe->logStart) /
167 (qe->diff));
168
169 blk = (blk + qe->offsetInSpan) <<
170 raid->stripeShift;
171 *span_blk = blk;
172 }
173 return (span);
174 }
175 }
176 }
177 return (span);
178 }
179
180
181 /*
182 * *************************************************************
183 *
184 * This routine calculates the arm, span and block for
185 * the specified stripe and reference in stripe.
186 *
187 * Inputs :
188 *
189 * ld - Logical drive number
190 * stripRow - Stripe number
191 * stripRef - Reference in stripe
192 *
193 * Outputs :
194 *
195 * span - Span number
196 * block - Absolute Block number in the physical disk
197 */
198 U8
MR_GetPhyParams(struct mrsas_instance * instance,U32 ld,U64 stripRow,U16 stripRef,U64 * pdBlock,U16 * pDevHandle,MPI2_SCSI_IO_VENDOR_UNIQUE * pRAID_Context,MR_FW_RAID_MAP_ALL * map)199 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow,
200 U16 stripRef, U64 *pdBlock, U16 *pDevHandle,
201 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
202 {
203 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
204 U32 pd, arRef;
205 U8 physArm, span;
206 U64 row;
207 int error_code = 0;
208 U8 retval = TRUE;
209 U32 rowMod;
210 U32 armQ;
211 U32 arm;
212
213 ASSERT(raid->rowDataSize != 0);
214
215 row = (stripRow / raid->rowDataSize);
216
217 if (raid->level == 6) {
218 U32 logArm = (stripRow % (raid->rowDataSize));
219
220 if (raid->rowSize == 0) {
221 return (FALSE);
222 }
223 rowMod = (row % (raid->rowSize));
224 armQ = raid->rowSize-1-rowMod;
225 arm = armQ + 1 + logArm;
226 if (arm >= raid->rowSize)
227 arm -= raid->rowSize;
228 physArm = (U8)arm;
229 } else {
230 if (raid->modFactor == 0)
231 return (FALSE);
232 physArm = MR_LdDataArmGet(ld,
233 (stripRow % (raid->modFactor)), map);
234 }
235 if (raid->spanDepth == 1) {
236 span = 0;
237 *pdBlock = row << raid->stripeShift;
238 } else
239 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
240
241 if (error_code == 1)
242 return (FALSE);
243
244 /* Get the array on which this span is present. */
245 arRef = MR_LdSpanArrayGet(ld, span, map);
246 /* Get the Pd. */
247 pd = MR_ArPdGet(arRef, physArm, map);
248 /* Get dev handle from Pd. */
249 if (pd != MR_PD_INVALID) {
250 *pDevHandle = MR_PdDevHandleGet(pd, map);
251 } else {
252 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
253 if ((raid->level >= 5) &&
254 ((instance->device_id != PCI_DEVICE_ID_LSI_INVADER) ||
255 (instance->device_id == PCI_DEVICE_ID_LSI_INVADER &&
256 raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) {
257 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
258 } else if (raid->level == 1) {
259 /* Get Alternate Pd. */
260 pd = MR_ArPdGet(arRef, physArm + 1, map);
261 /* Get dev handle from Pd. */
262 if (pd != MR_PD_INVALID)
263 *pDevHandle = MR_PdDevHandleGet(pd, map);
264 }
265 }
266
267 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
268
269 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
270 physArm;
271
272 return (retval);
273 }
274
275
276
277 /*
278 * ***********************************************************************
279 *
280 * MR_BuildRaidContext function
281 *
282 * This function will initiate command processing. The start/end row and strip
283 * information is calculated then the lock is acquired.
284 * This function will return 0 if region lock
285 * was acquired OR return num strips ???
286 */
287
288 U8
MR_BuildRaidContext(struct mrsas_instance * instance,struct IO_REQUEST_INFO * io_info,MPI2_SCSI_IO_VENDOR_UNIQUE * pRAID_Context,MR_FW_RAID_MAP_ALL * map)289 MR_BuildRaidContext(struct mrsas_instance *instance,
290 struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
291 MR_FW_RAID_MAP_ALL *map)
292 {
293 MR_LD_RAID *raid;
294 U32 ld, stripSize, stripe_mask;
295 U64 endLba, endStrip, endRow;
296 U64 start_row, start_strip;
297 REGION_KEY regStart;
298 REGION_LEN regSize;
299 U8 num_strips, numRows;
300 U16 ref_in_start_stripe;
301 U16 ref_in_end_stripe;
302
303 U64 ldStartBlock;
304 U32 numBlocks, ldTgtId;
305 U8 isRead;
306 U8 retval = 0;
307
308 ldStartBlock = io_info->ldStartBlock;
309 numBlocks = io_info->numBlocks;
310 ldTgtId = io_info->ldTgtId;
311 isRead = io_info->isRead;
312
313 if (map == NULL) {
314 io_info->fpOkForIo = FALSE;
315 return (FALSE);
316 }
317
318 ld = MR_TargetIdToLdGet(ldTgtId, map);
319
320 if (ld >= MAX_LOGICAL_DRIVES) {
321 io_info->fpOkForIo = FALSE;
322 return (FALSE);
323 }
324
325 raid = MR_LdRaidGet(ld, map);
326
327 stripSize = 1 << raid->stripeShift;
328 stripe_mask = stripSize-1;
329 /*
330 * calculate starting row and stripe, and number of strips and rows
331 */
332 start_strip = ldStartBlock >> raid->stripeShift;
333 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
334 endLba = ldStartBlock + numBlocks - 1;
335 ref_in_end_stripe = (U16)(endLba & stripe_mask);
336 endStrip = endLba >> raid->stripeShift;
337 num_strips = (U8)(endStrip - start_strip + 1);
338 /* Check to make sure is not dividing by zero */
339 if (raid->rowDataSize == 0)
340 return (FALSE);
341 start_row = (start_strip / raid->rowDataSize);
342 endRow = (endStrip / raid->rowDataSize);
343 /* get the row count */
344 numRows = (U8)(endRow - start_row + 1);
345
346 /*
347 * calculate region info.
348 */
349 regStart = start_row << raid->stripeShift;
350 regSize = stripSize;
351
352 /* Check if we can send this I/O via FastPath */
353 if (raid->capability.fpCapable) {
354 if (isRead) {
355 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
356 ((num_strips == 1) ||
357 raid->capability.fpReadAcrossStripe));
358 } else {
359 io_info->fpOkForIo =
360 (raid->capability.fpWriteCapable &&
361 ((num_strips == 1) ||
362 raid->capability.fpWriteAcrossStripe));
363 }
364 } else
365 io_info->fpOkForIo = FALSE;
366
367
368 /*
369 * Check for DIF support
370 */
371 if (!raid->capability.ldPiMode) {
372 io_info->ldPI = FALSE;
373 } else {
374 io_info->ldPI = TRUE;
375 }
376
377 if (numRows == 1) {
378 if (num_strips == 1) {
379 regStart += ref_in_start_stripe;
380 regSize = numBlocks;
381 }
382 } else {
383 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
384 regStart += ref_in_start_stripe;
385 regSize = stripSize - ref_in_start_stripe;
386 }
387
388 if (numRows > 2) {
389 regSize += (numRows - 2) << raid->stripeShift;
390 }
391
392 if (endStrip == endRow * raid->rowDataSize) {
393 regSize += ref_in_end_stripe + 1;
394 } else {
395 regSize += stripSize;
396 }
397 }
398
399 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
400
401 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
402 pRAID_Context->regLockFlags = (isRead) ?
403 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
404 } else {
405 pRAID_Context->regLockFlags = (isRead) ?
406 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
407 }
408
409 pRAID_Context->ldTargetId = raid->targetId;
410 pRAID_Context->regLockRowLBA = regStart;
411 pRAID_Context->regLockLength = regSize;
412 pRAID_Context->configSeqNum = raid->seqNum;
413
414 /*
415 * Get Phy Params only if FP capable,
416 * or else leave it to MR firmware to do the calculation.
417 */
418 if (io_info->fpOkForIo) {
419 /* if fast path possible then get the physical parameters */
420 retval = MR_GetPhyParams(instance, ld, start_strip,
421 ref_in_start_stripe, &io_info->pdBlock,
422 &io_info->devHandle, pRAID_Context, map);
423
424 /* If IO on an invalid Pd, then FP is not possible. */
425 if (io_info->devHandle == MR_PD_INVALID)
426 io_info->fpOkForIo = FALSE;
427
428 return (retval);
429
430 } else if (isRead) {
431 uint_t stripIdx;
432
433 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
434 if (!MR_GetPhyParams(instance, ld,
435 start_strip + stripIdx, ref_in_start_stripe,
436 &io_info->pdBlock, &io_info->devHandle,
437 pRAID_Context, map)) {
438 return (TRUE);
439 }
440 }
441 }
442 return (TRUE);
443 }
444
445
446 void
mr_update_load_balance_params(MR_FW_RAID_MAP_ALL * map,PLD_LOAD_BALANCE_INFO lbInfo)447 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
448 PLD_LOAD_BALANCE_INFO lbInfo)
449 {
450 int ldCount;
451 U16 ld;
452 MR_LD_RAID *raid;
453
454 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
455 ld = MR_TargetIdToLdGet(ldCount, map);
456
457 if (ld >= MAX_LOGICAL_DRIVES) {
458 con_log(CL_ANN1,
459 (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld));
460 continue;
461 }
462
463 raid = MR_LdRaidGet(ld, map);
464
465 /* Two drive Optimal RAID 1 */
466 if ((raid->level == 1) && (raid->rowSize == 2) &&
467 (raid->spanDepth == 1) &&
468 raid->ldState == MR_LD_STATE_OPTIMAL) {
469 U32 pd, arRef;
470
471 lbInfo[ldCount].loadBalanceFlag = 1;
472
473 /* Get the array on which this span is present. */
474 arRef = MR_LdSpanArrayGet(ld, 0, map);
475
476 pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */
477 /* Get dev handle from Pd. */
478 lbInfo[ldCount].raid1DevHandle[0] =
479 MR_PdDevHandleGet(pd, map);
480
481 pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */
482 /* Get dev handle from Pd. */
483 lbInfo[ldCount].raid1DevHandle[1] =
484 MR_PdDevHandleGet(pd, map);
485 con_log(CL_ANN1, (CE_NOTE,
486 "mrsas: ld=%d load balancing enabled \n", ldCount));
487 } else {
488 lbInfo[ldCount].loadBalanceFlag = 0;
489 }
490 }
491 }
492
493
494 U8
megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo,U8 arm,U64 block,U32 count)495 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block,
496 U32 count)
497 {
498 U16 pend0, pend1;
499 U64 diff0, diff1;
500 U8 bestArm;
501
502 /* get the pending cmds for the data and mirror arms */
503 pend0 = lbInfo->scsi_pending_cmds[0];
504 pend1 = lbInfo->scsi_pending_cmds[1];
505
506 /* Determine the disk whose head is nearer to the req. block */
507 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
508 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
509 bestArm = (diff0 <= diff1 ? 0 : 1);
510
511 if ((bestArm == arm && pend0 > pend1 + 16) ||
512 (bestArm != arm && pend1 > pend0 + 16)) {
513 bestArm ^= 1;
514 }
515
516 /* Update the last accessed block on the correct pd */
517 lbInfo->last_accessed_block[bestArm] = block + count - 1;
518 return (bestArm);
519 }
520
521 U16
get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,struct IO_REQUEST_INFO * io_info)522 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
523 struct IO_REQUEST_INFO *io_info)
524 {
525 U8 arm, old_arm;
526 U16 devHandle;
527
528 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
529
530 /* get best new arm */
531 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
532 io_info->numBlocks);
533
534 devHandle = lbInfo->raid1DevHandle[arm];
535
536 lbInfo->scsi_pending_cmds[arm]++;
537
538 return (devHandle);
539 }
540