xref: /titanic_51/usr/src/uts/common/io/mr_sas/ld_pd_map.c (revision a1ed883a3c2acc53bbb9a171ef07aeb8eaf78736)
1 /*
2  * **********************************************************************
3  *
4  * ld_pd_map.c
5  *
6  * Solaris MegaRAID device driver for SAS2.0 controllers
7  * Copyright (c) 2008-2012, LSI Logic Corporation.
8  * All rights reserved.
9  *
10  * Version:
11  * Author:
12  *		Swaminathan K S
13  *		Arun Chandrashekhar
14  *		Manju R
15  *		Rasheed
16  *		Shakeel Bukhari
17  *
18  *
19  * This module contains functions for device drivers
20  * to get pd-ld mapping information.
21  *
22  * **********************************************************************
23  */
24 /*
25  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include "mr_sas.h"
30 #include "ld_pd_map.h"
31 
32 /*
33  * This function will check if FAST IO is possible on this logical drive
34  * by checking the EVENT information available in the driver
35  */
36 #define	MR_LD_STATE_OPTIMAL 3
37 #define	ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
38 
39 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *,
40     PLD_LOAD_BALANCE_INFO);
41 
42 #define	FALSE 0
43 #define	TRUE 1
44 
45 typedef	U64	REGION_KEY;
46 typedef	U32	REGION_LEN;
47 extern int 	debug_level_g;
48 
49 
50 MR_LD_RAID
51 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
52 {
53 	return (&map->raidMap.ldSpanMap[ld].ldRaid);
54 }
55 
56 U16
57 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map)
58 {
59 	return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
60 }
61 
62 
63 static MR_SPAN_BLOCK_INFO *
64 MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
65 {
66 	return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
67 }
68 
69 static U8
70 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
71 {
72 	return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
73 }
74 
75 static U16
76 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
77 {
78 	return (map->raidMap.arMapInfo[ar].pd[arm]);
79 }
80 
81 static U16
82 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
83 {
84 	return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
85 }
86 
87 static U16
88 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
89 {
90 	return (map->raidMap.devHndlInfo[pd].curDevHdl);
91 }
92 
93 U16
94 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
95 {
96 	return (map->raidMap.ldTgtIdToLd[ldTgtId]);
97 }
98 
99 U16
100 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
101 {
102 	MR_LD_RAID	*raid;
103 	U32		ld;
104 
105 	ld = MR_TargetIdToLdGet(ldTgtId, map);
106 
107 	if (ld >= MAX_LOGICAL_DRIVES) {
108 		return (FALSE);
109 	}
110 
111 	raid = MR_LdRaidGet(ld, map);
112 
113 	return (raid->capability.ldPiMode == 0x8);
114 }
115 
116 static MR_LD_SPAN *
117 MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
118 {
119 	return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
120 }
121 
122 /*
123  * This function will validate Map info data provided by FW
124  */
125 U8
126 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
127 {
128 	MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
129 	U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
130 	    (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount);
131 
132 	if (pFwRaidMap->totalSize != fwsize) {
133 
134 		con_log(CL_ANN1, (CE_NOTE,
135 		    "map info structure size 0x%x is "
136 		    "not matching with ld count\n", fwsize));
137 		/* sizeof (foo) returns size_t, which is *LONG*. */
138 		con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
139 		    (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
140 
141 		return (0);
142 	}
143 
144 	mr_update_load_balance_params(map, lbInfo);
145 
146 	return (1);
147 }
148 
149 U32
150 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map,
151     int *div_error)
152 {
153 	MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
154 	MR_QUAD_ELEMENT	*qe;
155 	MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
156 	U32		span, j;
157 
158 	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
159 		for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
160 			qe = &pSpanBlock->block_span_info.quads[j];
161 			if (qe->diff == 0) {
162 				*div_error = 1;
163 				return (span);
164 			}
165 			if (qe->logStart <= row && row <= qe->logEnd &&
166 			    (((row - qe->logStart) % qe->diff)) == 0) {
167 				if (span_blk != NULL) {
168 					U64	blk;
169 					blk = ((row - qe->logStart) /
170 					    (qe->diff));
171 
172 					blk = (blk + qe->offsetInSpan) <<
173 					    raid->stripeShift;
174 					*span_blk = blk;
175 				}
176 				return (span);
177 			}
178 		}
179 	}
180 	return (span);
181 }
182 
183 
184 /*
185  * *************************************************************
186  *
187  * This routine calculates the arm, span and block for
188  * the specified stripe and reference in stripe.
189  *
190  * Inputs :
191  *
192  *    ld   - Logical drive number
193  *    stripRow        - Stripe number
194  *    stripRef    - Reference in stripe
195  *
196  * Outputs :
197  *
198  *    span          - Span number
199  *    block         - Absolute Block number in the physical disk
200  */
201 U8
202 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow,
203     U16 stripRef, U64 *pdBlock, U16 *pDevHandle,
204     MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
205 {
206 	MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
207 	U32		pd, arRef;
208 	U8		physArm, span;
209 	U64		row;
210 	int		error_code = 0;
211 	U8		retval = TRUE;
212 	U32		rowMod;
213 	U32		armQ;
214 	U32		arm;
215 	U16		devid = instance->device_id;
216 
217 	ASSERT(raid->rowDataSize != 0);
218 
219 	row = (stripRow / raid->rowDataSize);
220 
221 	if (raid->level == 6) {
222 		U32 logArm =  (stripRow % (raid->rowDataSize));
223 
224 		if (raid->rowSize == 0) {
225 			return (FALSE);
226 		}
227 		rowMod = (row % (raid->rowSize));
228 		armQ = raid->rowSize-1-rowMod;
229 		arm = armQ + 1 + logArm;
230 		if (arm >= raid->rowSize)
231 			arm -= raid->rowSize;
232 		physArm = (U8)arm;
233 	} else {
234 		if (raid->modFactor == 0)
235 			return (FALSE);
236 		physArm = MR_LdDataArmGet(ld,
237 		    (stripRow % (raid->modFactor)), map);
238 	}
239 	if (raid->spanDepth == 1) {
240 		span = 0;
241 		*pdBlock = row << raid->stripeShift;
242 	} else
243 		span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
244 
245 	if (error_code == 1)
246 		return (FALSE);
247 
248 	/* Get the array on which this span is present. */
249 	arRef		= MR_LdSpanArrayGet(ld, span, map);
250 	/* Get the Pd. */
251 	pd		= MR_ArPdGet(arRef, physArm, map);
252 	/* Get dev handle from Pd. */
253 	if (pd != MR_PD_INVALID) {
254 		*pDevHandle	= MR_PdDevHandleGet(pd, map);
255 	} else {
256 		*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
257 		if ((raid->level >= 5) &&
258 		    ((devid != PCI_DEVICE_ID_LSI_INVADER) ||
259 		    ((devid == PCI_DEVICE_ID_LSI_INVADER ||
260 		    (devid == PCI_DEVICE_ID_LSI_FURY)) &&
261 		    raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) {
262 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
263 		} else if (raid->level == 1) {
264 			/* Get Alternate Pd. */
265 			pd = MR_ArPdGet(arRef, physArm + 1, map);
266 			/* Get dev handle from Pd. */
267 			if (pd != MR_PD_INVALID)
268 				*pDevHandle = MR_PdDevHandleGet(pd, map);
269 		}
270 	}
271 
272 	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
273 
274 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
275 	    physArm;
276 
277 	return (retval);
278 }
279 
280 
281 
282 /*
283  * ***********************************************************************
284  *
285  * MR_BuildRaidContext function
286  *
287  * This function will initiate command processing.  The start/end row and strip
288  * information is calculated then the lock is acquired.
289  * This function will return 0 if region lock
290  * was acquired OR return num strips ???
291  */
292 
293 U8
294 MR_BuildRaidContext(struct mrsas_instance *instance,
295     struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
296     MR_FW_RAID_MAP_ALL *map)
297 {
298 	MR_LD_RAID	*raid;
299 	U32		ld, stripSize, stripe_mask;
300 	U64		endLba, endStrip, endRow;
301 	U64		start_row, start_strip;
302 	REGION_KEY	regStart;
303 	REGION_LEN	regSize;
304 	U8		num_strips, numRows;
305 	U16		ref_in_start_stripe;
306 	U16		ref_in_end_stripe;
307 
308 	U64		ldStartBlock;
309 	U32		numBlocks, ldTgtId;
310 	U8		isRead;
311 	U8		retval = 0;
312 
313 	ldStartBlock = io_info->ldStartBlock;
314 	numBlocks = io_info->numBlocks;
315 	ldTgtId = io_info->ldTgtId;
316 	isRead = io_info->isRead;
317 
318 	if (map == NULL) {
319 		io_info->fpOkForIo = FALSE;
320 		return (FALSE);
321 	}
322 
323 	ld = MR_TargetIdToLdGet(ldTgtId, map);
324 
325 	if (ld >= MAX_LOGICAL_DRIVES) {
326 		io_info->fpOkForIo = FALSE;
327 		return (FALSE);
328 	}
329 
330 	raid = MR_LdRaidGet(ld, map);
331 
332 	stripSize = 1 << raid->stripeShift;
333 	stripe_mask = stripSize-1;
334 	/*
335 	 * calculate starting row and stripe, and number of strips and rows
336 	 */
337 	start_strip		= ldStartBlock >> raid->stripeShift;
338 	ref_in_start_stripe	= (U16)(ldStartBlock & stripe_mask);
339 	endLba			= ldStartBlock + numBlocks - 1;
340 	ref_in_end_stripe	= (U16)(endLba & stripe_mask);
341 	endStrip		= endLba >> raid->stripeShift;
342 	num_strips		= (U8)(endStrip - start_strip + 1);
343 	/* Check to make sure is not dividing by zero */
344 	if (raid->rowDataSize == 0)
345 		return (FALSE);
346 	start_row		=  (start_strip / raid->rowDataSize);
347 	endRow			=  (endStrip  / raid->rowDataSize);
348 	/* get the row count */
349 	numRows			= (U8)(endRow - start_row + 1);
350 
351 	/*
352 	 * calculate region info.
353 	 */
354 	regStart	= start_row << raid->stripeShift;
355 	regSize		= stripSize;
356 
357 	/* Check if we can send this I/O via FastPath */
358 	if (raid->capability.fpCapable) {
359 		if (isRead) {
360 			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
361 			    ((num_strips == 1) ||
362 			    raid->capability.fpReadAcrossStripe));
363 		} else {
364 			io_info->fpOkForIo =
365 			    (raid->capability.fpWriteCapable &&
366 			    ((num_strips == 1) ||
367 			    raid->capability.fpWriteAcrossStripe));
368 		}
369 	} else
370 		io_info->fpOkForIo = FALSE;
371 
372 
373 	/*
374 	 * Check for DIF support
375 	 */
376 	if (!raid->capability.ldPiMode) {
377 		io_info->ldPI = FALSE;
378 	} else {
379 		io_info->ldPI = TRUE;
380 	}
381 
382 	if (numRows == 1) {
383 		if (num_strips == 1) {
384 			regStart += ref_in_start_stripe;
385 			regSize = numBlocks;
386 		}
387 	} else {
388 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
389 			regStart += ref_in_start_stripe;
390 		regSize = stripSize - ref_in_start_stripe;
391 		}
392 
393 		if (numRows > 2) {
394 			regSize += (numRows - 2) << raid->stripeShift;
395 		}
396 
397 		if (endStrip == endRow * raid->rowDataSize) {
398 			regSize += ref_in_end_stripe + 1;
399 		} else {
400 			regSize += stripSize;
401 		}
402 	}
403 
404 	pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
405 
406 	if ((instance->device_id == PCI_DEVICE_ID_LSI_INVADER) ||
407 	    (instance->device_id == PCI_DEVICE_ID_LSI_FURY)) {
408 		pRAID_Context->regLockFlags = (isRead) ?
409 		    raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
410 	} else {
411 		pRAID_Context->regLockFlags = (isRead) ?
412 		    REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
413 	}
414 
415 	pRAID_Context->ldTargetId = raid->targetId;
416 	pRAID_Context->regLockRowLBA = regStart;
417 	pRAID_Context->regLockLength = regSize;
418 	pRAID_Context->configSeqNum = raid->seqNum;
419 
420 	/*
421 	 * Get Phy Params only if FP capable,
422 	 * or else leave it to MR firmware to do the calculation.
423 	 */
424 	if (io_info->fpOkForIo) {
425 		/* if fast path possible then get the physical parameters */
426 		retval = MR_GetPhyParams(instance, ld, start_strip,
427 		    ref_in_start_stripe, &io_info->pdBlock,
428 		    &io_info->devHandle, pRAID_Context, map);
429 
430 		/* If IO on an invalid Pd, then FP is not possible. */
431 		if (io_info->devHandle == MR_PD_INVALID)
432 			io_info->fpOkForIo = FALSE;
433 
434 		return (retval);
435 
436 	} else if (isRead) {
437 		uint_t stripIdx;
438 
439 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
440 			if (!MR_GetPhyParams(instance, ld,
441 			    start_strip + stripIdx, ref_in_start_stripe,
442 			    &io_info->pdBlock, &io_info->devHandle,
443 			    pRAID_Context, map)) {
444 				return (TRUE);
445 			}
446 		}
447 	}
448 	return (TRUE);
449 }
450 
451 
452 void
453 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
454     PLD_LOAD_BALANCE_INFO lbInfo)
455 {
456 	int ldCount;
457 	U16 ld;
458 	MR_LD_RAID *raid;
459 
460 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
461 		ld = MR_TargetIdToLdGet(ldCount, map);
462 
463 		if (ld >= MAX_LOGICAL_DRIVES) {
464 			con_log(CL_ANN1,
465 			    (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld));
466 			continue;
467 		}
468 
469 		raid = MR_LdRaidGet(ld, map);
470 
471 		/* Two drive Optimal RAID 1 */
472 		if ((raid->level == 1) && (raid->rowSize == 2) &&
473 		    (raid->spanDepth == 1) &&
474 		    raid->ldState == MR_LD_STATE_OPTIMAL) {
475 			U32 pd, arRef;
476 
477 			lbInfo[ldCount].loadBalanceFlag = 1;
478 
479 			/* Get the array on which this span is present. */
480 			arRef = MR_LdSpanArrayGet(ld, 0, map);
481 
482 			pd = MR_ArPdGet(arRef, 0, map);	    /* Get the Pd. */
483 			/* Get dev handle from Pd. */
484 			lbInfo[ldCount].raid1DevHandle[0] =
485 			    MR_PdDevHandleGet(pd, map);
486 
487 			pd = MR_ArPdGet(arRef, 1, map);	    /* Get the Pd. */
488 			/* Get dev handle from Pd. */
489 			lbInfo[ldCount].raid1DevHandle[1] =
490 			    MR_PdDevHandleGet(pd, map);
491 			con_log(CL_ANN1, (CE_NOTE,
492 			    "mrsas: ld=%d load balancing enabled \n", ldCount));
493 		} else {
494 			lbInfo[ldCount].loadBalanceFlag = 0;
495 		}
496 	}
497 }
498 
499 
500 U8
501 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block,
502     U32 count)
503 {
504 	U16 pend0, pend1;
505 	U64 diff0, diff1;
506 	U8 bestArm;
507 
508 	/* get the pending cmds for the data and mirror arms */
509 	pend0 = lbInfo->scsi_pending_cmds[0];
510 	pend1 = lbInfo->scsi_pending_cmds[1];
511 
512 	/* Determine the disk whose head is nearer to the req. block */
513 	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
514 	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
515 	bestArm = (diff0 <= diff1 ? 0 : 1);
516 
517 	if ((bestArm == arm && pend0 > pend1 + 16) ||
518 	    (bestArm != arm && pend1 > pend0 + 16)) {
519 		bestArm ^= 1;
520 	}
521 
522 	/* Update the last accessed block on the correct pd */
523 	lbInfo->last_accessed_block[bestArm] = block + count - 1;
524 	return (bestArm);
525 }
526 
527 U16
528 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
529     struct IO_REQUEST_INFO *io_info)
530 {
531 	U8 arm, old_arm;
532 	U16 devHandle;
533 
534 	old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
535 
536 	/* get best new arm */
537 	arm  = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
538 	    io_info->numBlocks);
539 
540 	devHandle = lbInfo->raid1DevHandle[arm];
541 
542 	lbInfo->scsi_pending_cmds[arm]++;
543 
544 	return (devHandle);
545 }
546