xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 3642298923e528d795e3a30ec165d2b469e28b40)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <dev/mpt/mpt.h>
39 #include <dev/mpt/mpt_raid.h>
40 
41 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
42 #include "dev/mpt/mpilib/mpi_raid.h"
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
48 
49 #include <cam/cam_periph.h>
50 
51 #include <sys/callout.h>
52 #include <sys/kthread.h>
53 #include <sys/sysctl.h>
54 
55 #include <machine/stdarg.h>
56 
57 struct mpt_raid_action_result
58 {
59 	union {
60 		MPI_RAID_VOL_INDICATOR	indicator_struct;
61 		uint32_t		new_settings;
62 		uint8_t			phys_disk_num;
63 	} action_data;
64 	uint16_t			action_status;
65 };
66 
67 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
68 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
69 
70 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
71 
72 
73 static mpt_probe_handler_t	mpt_raid_probe;
74 static mpt_attach_handler_t	mpt_raid_attach;
75 static mpt_event_handler_t	mpt_raid_event;
76 static mpt_shutdown_handler_t	mpt_raid_shutdown;
77 static mpt_reset_handler_t	mpt_raid_ioc_reset;
78 static mpt_detach_handler_t	mpt_raid_detach;
79 
80 static struct mpt_personality mpt_raid_personality =
81 {
82 	.name		= "mpt_raid",
83 	.probe		= mpt_raid_probe,
84 	.attach		= mpt_raid_attach,
85 	.event		= mpt_raid_event,
86 	.reset		= mpt_raid_ioc_reset,
87 	.shutdown	= mpt_raid_shutdown,
88 	.detach		= mpt_raid_detach,
89 };
90 
91 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
92 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
93 
94 static mpt_reply_handler_t mpt_raid_reply_handler;
95 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
96 					MSG_DEFAULT_REPLY *reply_frame);
97 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
98 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
99 static void mpt_raid_thread(void *arg);
100 static timeout_t mpt_raid_timer;
101 static timeout_t mpt_raid_quiesce_timeout;
102 #if UNUSED
103 static void mpt_enable_vol(struct mpt_softc *mpt,
104 			   struct mpt_raid_volume *mpt_vol, int enable);
105 #endif
106 static void mpt_verify_mwce(struct mpt_softc *mpt,
107 			    struct mpt_raid_volume *mpt_vol);
108 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
109 				   struct mpt_raid_volume *mpt_vol,
110 				   struct cam_path *path);
111 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
112 
113 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
114 
115 const char *
116 mpt_vol_type(struct mpt_raid_volume *vol)
117 {
118 	switch (vol->config_page->VolumeType) {
119 	case MPI_RAID_VOL_TYPE_IS:
120 		return ("RAID-0");
121 	case MPI_RAID_VOL_TYPE_IME:
122 		return ("RAID-1E");
123 	case MPI_RAID_VOL_TYPE_IM:
124 		return ("RAID-1");
125 	default:
126 		return ("Unknown");
127 	}
128 }
129 
130 const char *
131 mpt_vol_state(struct mpt_raid_volume *vol)
132 {
133 	switch (vol->config_page->VolumeStatus.State) {
134 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
135 		return ("Optimal");
136 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
137 		return ("Degraded");
138 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
139 		return ("Failed");
140 	default:
141 		return ("Unknown");
142 	}
143 }
144 
145 const char *
146 mpt_disk_state(struct mpt_raid_disk *disk)
147 {
148 	switch (disk->config_page.PhysDiskStatus.State) {
149 	case MPI_PHYSDISK0_STATUS_ONLINE:
150 		return ("Online");
151 	case MPI_PHYSDISK0_STATUS_MISSING:
152 		return ("Missing");
153 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
154 		return ("Incompatible");
155 	case MPI_PHYSDISK0_STATUS_FAILED:
156 		return ("Failed");
157 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
158 		return ("Initializing");
159 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
160 		return ("Offline Requested");
161 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
162 		return ("Failed per Host Request");
163 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
164 		return ("Offline");
165 	default:
166 		return ("Unknown");
167 	}
168 }
169 
170 void
171 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
172 	    const char *fmt, ...)
173 {
174 	va_list ap;
175 
176 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
177 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
178 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
179 	va_start(ap, fmt);
180 	vprintf(fmt, ap);
181 	va_end(ap);
182 }
183 
184 void
185 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
186 	     const char *fmt, ...)
187 {
188 	va_list ap;
189 
190 	if (disk->volume != NULL) {
191 		printf("(%s:vol%d:%d): ",
192 		       device_get_nameunit(mpt->dev),
193 		       disk->volume->config_page->VolumeID,
194 		       disk->member_number);
195 	} else {
196 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197 		       disk->config_page.PhysDiskBus,
198 		       disk->config_page.PhysDiskID);
199 	}
200 	va_start(ap, fmt);
201 	vprintf(fmt, ap);
202 	va_end(ap);
203 }
204 
205 static void
206 mpt_raid_async(void *callback_arg, u_int32_t code,
207 	       struct cam_path *path, void *arg)
208 {
209 	struct mpt_softc *mpt;
210 
211 	mpt = (struct mpt_softc*)callback_arg;
212 	switch (code) {
213 	case AC_FOUND_DEVICE:
214 	{
215 		struct ccb_getdev *cgd;
216 		struct mpt_raid_volume *mpt_vol;
217 
218 		cgd = (struct ccb_getdev *)arg;
219 		if (cgd == NULL)
220 			break;
221 
222 		mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
223 			 cgd->ccb_h.target_id);
224 
225 		RAID_VOL_FOREACH(mpt, mpt_vol) {
226 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
227 				continue;
228 
229 			if (mpt_vol->config_page->VolumeID
230 			 == cgd->ccb_h.target_id) {
231 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
232 				break;
233 			}
234 		}
235 	}
236 	default:
237 		break;
238 	}
239 }
240 
241 int
242 mpt_raid_probe(struct mpt_softc *mpt)
243 {
244 	if (mpt->ioc_page2 == NULL
245 	 || mpt->ioc_page2->MaxPhysDisks == 0)
246 		return (ENODEV);
247 	return (0);
248 }
249 
250 int
251 mpt_raid_attach(struct mpt_softc *mpt)
252 {
253 	struct ccb_setasync csa;
254 	mpt_handler_t	 handler;
255 	int		 error;
256 
257 	mpt_callout_init(&mpt->raid_timer);
258 
259 	handler.reply_handler = mpt_raid_reply_handler;
260 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
261 				     &raid_handler_id);
262 	if (error != 0)
263 		goto cleanup;
264 
265 	error = mpt_spawn_raid_thread(mpt);
266 	if (error != 0) {
267 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
268 		goto cleanup;
269 	}
270 
271 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
272 	csa.ccb_h.func_code = XPT_SASYNC_CB;
273 	csa.event_enable = AC_FOUND_DEVICE;
274 	csa.callback = mpt_raid_async;
275 	csa.callback_arg = mpt;
276 	xpt_action((union ccb *)&csa);
277 	if (csa.ccb_h.status != CAM_REQ_CMP) {
278 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
279 			"CAM async handler.\n");
280 	}
281 
282 	mpt_raid_sysctl_attach(mpt);
283 	return (0);
284 cleanup:
285 	mpt_raid_detach(mpt);
286 	return (error);
287 }
288 
289 void
290 mpt_raid_detach(struct mpt_softc *mpt)
291 {
292 	struct ccb_setasync csa;
293 	mpt_handler_t handler;
294 
295 	callout_stop(&mpt->raid_timer);
296 	mpt_terminate_raid_thread(mpt);
297 
298 	handler.reply_handler = mpt_raid_reply_handler;
299 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
300 			       raid_handler_id);
301 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
302 	csa.ccb_h.func_code = XPT_SASYNC_CB;
303 	csa.event_enable = 0;
304 	csa.callback = mpt_raid_async;
305 	csa.callback_arg = mpt;
306 	xpt_action((union ccb *)&csa);
307 }
308 
309 static void
310 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
311 {
312 	/* Nothing to do yet. */
313 }
314 
315 static const char *raid_event_txt[] =
316 {
317 	"Volume Created",
318 	"Volume Deleted",
319 	"Volume Settings Changed",
320 	"Volume Status Changed",
321 	"Volume Physical Disk Membership Changed",
322 	"Physical Disk Created",
323 	"Physical Disk Deleted",
324 	"Physical Disk Settings Changed",
325 	"Physical Disk Status Changed",
326 	"Domain Validation Required",
327 	"SMART Data Received",
328 	"Replace Action Started",
329 };
330 
331 static int
332 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
333 	       MSG_EVENT_NOTIFY_REPLY *msg)
334 {
335 	EVENT_DATA_RAID *raid_event;
336 	struct mpt_raid_volume *mpt_vol;
337 	struct mpt_raid_disk *mpt_disk;
338 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
339 	int i;
340 	int print_event;
341 
342 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID)
343 		return (/*handled*/0);
344 
345 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
346 
347 	mpt_vol = NULL;
348 	vol_pg = NULL;
349 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
350 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
351 			mpt_vol = &mpt->raid_volumes[i];
352 			vol_pg = mpt_vol->config_page;
353 
354 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
355 				continue;
356 
357 			if (vol_pg->VolumeID == raid_event->VolumeID
358 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
359 				break;
360 		}
361 		if (i >= mpt->ioc_page2->MaxVolumes) {
362 			mpt_vol = NULL;
363 			vol_pg = NULL;
364 		}
365 	}
366 
367 	mpt_disk = NULL;
368 	if (raid_event->PhysDiskNum != 0xFF
369 	 && mpt->raid_disks != NULL) {
370 		mpt_disk = mpt->raid_disks
371 			 + raid_event->PhysDiskNum;
372 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
373 			mpt_disk = NULL;
374 	}
375 
376 	print_event = 1;
377 	switch(raid_event->ReasonCode) {
378 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
379 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
380 		break;
381 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
382 		if (mpt_vol != NULL) {
383 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
384 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
385 			} else {
386 				/*
387 				 * Coalesce status messages into one
388 				 * per background run of our RAID thread.
389 				 * This removes "spurious" status messages
390 				 * from our output.
391 				 */
392 				print_event = 0;
393 			}
394 		}
395 		break;
396 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
397 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
398 		mpt->raid_rescan++;
399 		if (mpt_vol != NULL)
400 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
401 		break;
402 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
403 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
404 		mpt->raid_rescan++;
405 		break;
406 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
407 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
408 		mpt->raid_rescan++;
409 		if (mpt_disk != NULL)
410 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
411 		break;
412 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
413 		mpt->raid_rescan++;
414 		break;
415 	case MPI_EVENT_RAID_RC_SMART_DATA:
416 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
417 		break;
418 	}
419 
420 	if (print_event) {
421 		if (mpt_disk != NULL) {
422 			mpt_disk_prt(mpt, mpt_disk, "");
423 		} else if (mpt_vol != NULL) {
424 			mpt_vol_prt(mpt, mpt_vol, "");
425 		} else {
426 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
427 				raid_event->VolumeID);
428 
429 			if (raid_event->PhysDiskNum != 0xFF)
430 				mpt_prtc(mpt, ":%d): ",
431 					 raid_event->PhysDiskNum);
432 			else
433 				mpt_prtc(mpt, "): ");
434 		}
435 
436 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
437 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
438 				 raid_event->ReasonCode);
439 		else
440 			mpt_prtc(mpt, "%s\n",
441 				 raid_event_txt[raid_event->ReasonCode]);
442 	}
443 
444 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
445 		/* XXX Use CAM's print sense for this... */
446 		if (mpt_disk != NULL)
447 			mpt_disk_prt(mpt, mpt_disk, "");
448 		else
449 			mpt_prt(mpt, "Volume(%d:%d:%d: ");
450 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x\n",
451 			 raid_event->ASC, raid_event->ASCQ);
452 	}
453 
454 	mpt_raid_wakeup(mpt);
455 	return (/*handled*/1);
456 }
457 
458 static void
459 mpt_raid_shutdown(struct mpt_softc *mpt)
460 {
461 	struct mpt_raid_volume *mpt_vol;
462 
463 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY)
464 		return;
465 
466 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
467 	RAID_VOL_FOREACH(mpt, mpt_vol) {
468 
469 		mpt_verify_mwce(mpt, mpt_vol);
470 	}
471 }
472 
473 static int
474 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
475 		       MSG_DEFAULT_REPLY *reply_frame)
476 {
477 	int free_req;
478 
479 	if (req == NULL)
480 		return (/*free_reply*/TRUE);
481 
482 	free_req = TRUE;
483 	if (reply_frame != NULL)
484 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
485 #if NOTYET
486 	else if (req->ccb != NULL) {
487 		/* Complete Quiesce CCB with error... */
488 	}
489 #endif
490 
491 	req->state &= ~REQ_STATE_QUEUED;
492 	req->state |= REQ_STATE_DONE;
493 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
494 
495 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
496 		wakeup(req);
497 	} else if (free_req) {
498 		mpt_free_request(mpt, req);
499 	}
500 
501 	return (/*free_reply*/TRUE);
502 }
503 
504 /*
505  * Parse additional completion information in the reply
506  * frame for RAID I/O requests.
507  */
508 static int
509 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
510 			     MSG_DEFAULT_REPLY *reply_frame)
511 {
512 	MSG_RAID_ACTION_REPLY *reply;
513 	struct mpt_raid_action_result *action_result;
514 	MSG_RAID_ACTION_REQUEST *rap;
515 
516 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
517 	req->IOCStatus = le16toh(reply->IOCStatus);
518 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
519 
520 	switch (rap->Action) {
521 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
522 		/*
523 		 * Parse result, call mpt_start with ccb,
524 		 * release device queue.
525 		 * COWWWWW
526 		 */
527 		break;
528 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
529 		/*
530 		 * Need additional state for transition to enabled to
531 		 * protect against attempts to disable??
532 		 */
533 		break;
534 	default:
535 		action_result = REQ_TO_RAID_ACTION_RESULT(req);
536 		memcpy(&action_result->action_data, &reply->ActionData,
537 		       sizeof(action_result->action_data));
538 		action_result->action_status = reply->ActionStatus;
539 		break;
540 	}
541 
542 	return (/*Free Request*/TRUE);
543 }
544 
545 /*
546  * Utiltity routine to perform a RAID action command;
547  */
548 int
549 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
550 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
551 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
552 		   int write, int wait)
553 {
554 	MSG_RAID_ACTION_REQUEST *rap;
555 	SGE_SIMPLE32 *se;
556 
557 	rap = req->req_vbuf;
558 	memset(rap, 0, sizeof *rap);
559 	rap->Action = Action;
560 	rap->ActionDataWord = ActionDataWord;
561 	rap->Function = MPI_FUNCTION_RAID_ACTION;
562 	rap->VolumeID = vol->config_page->VolumeID;
563 	rap->VolumeBus = vol->config_page->VolumeBus;
564 	if (disk != 0)
565 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
566 	else
567 		rap->PhysDiskNum = 0xFF;
568 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
569 	se->Address = addr;
570 	MPI_pSGE_SET_LENGTH(se, len);
571 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
572 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
573 	    MPI_SGE_FLAGS_END_OF_LIST |
574 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
575 	rap->MsgContext = htole32(req->index | raid_handler_id);
576 
577 	mpt_check_doorbell(mpt);
578 	mpt_send_cmd(mpt, req);
579 
580 	if (wait) {
581 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
582 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
583 	} else {
584 		return (0);
585 	}
586 }
587 
588 /*************************** RAID Status Monitoring ***************************/
589 static int
590 mpt_spawn_raid_thread(struct mpt_softc *mpt)
591 {
592 	int error;
593 
594 	/*
595 	 * Freeze out any CAM transactions until our thread
596 	 * is able to run at least once.  We need to update
597 	 * our RAID pages before acception I/O or we may
598 	 * reject I/O to an ID we later determine is for a
599 	 * hidden physdisk.
600 	 */
601 	xpt_freeze_simq(mpt->phydisk_sim, 1);
602 	error = mpt_kthread_create(mpt_raid_thread, mpt,
603 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
604 	    "mpt_raid%d", mpt->unit);
605 	if (error != 0)
606 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
607 	return (error);
608 }
609 
610 /*
611  * Lock is not held on entry.
612  */
613 static void
614 mpt_terminate_raid_thread(struct mpt_softc *mpt)
615 {
616 
617 	MPT_LOCK(mpt);
618 	if (mpt->raid_thread == NULL) {
619 		MPT_UNLOCK(mpt);
620 		return;
621 	}
622 	mpt->shutdwn_raid = 1;
623 	wakeup(mpt->raid_volumes);
624 	/*
625 	 * Sleep on a slightly different location
626 	 * for this interlock just for added safety.
627 	 */
628 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
629 	MPT_UNLOCK(mpt);
630 }
631 
632 static void
633 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
634 {
635 	xpt_free_path(ccb->ccb_h.path);
636 	free(ccb, M_DEVBUF);
637 }
638 
639 static void
640 mpt_raid_thread(void *arg)
641 {
642 	struct mpt_softc *mpt;
643 	int firstrun;
644 
645 #if __FreeBSD_version >= 500000
646 	mtx_lock(&Giant);
647 #endif
648 	mpt = (struct mpt_softc *)arg;
649 	firstrun = 1;
650 	MPT_LOCK(mpt);
651 	while (mpt->shutdwn_raid == 0) {
652 
653 		if (mpt->raid_wakeup == 0) {
654 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
655 			continue;
656 		}
657 
658 		mpt->raid_wakeup = 0;
659 
660 		mpt_refresh_raid_data(mpt);
661 
662 		/*
663 		 * Now that we have our first snapshot of RAID data,
664 		 * allow CAM to access our physical disk bus.
665 		 */
666 		if (firstrun) {
667 			firstrun = 0;
668 			xpt_release_simq(mpt->phydisk_sim, /*run_queue*/TRUE);
669 		}
670 
671 		if (mpt->raid_rescan != 0) {
672 			union ccb *ccb;
673 			struct cam_path *path;
674 			int error;
675 
676 			mpt->raid_rescan = 0;
677 
678 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
679 			error = xpt_create_path(&path, xpt_periph,
680 						cam_sim_path(mpt->phydisk_sim),
681 						CAM_TARGET_WILDCARD,
682 						CAM_LUN_WILDCARD);
683 			if (error != CAM_REQ_CMP) {
684 				free(ccb, M_DEVBUF);
685 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
686 			} else {
687 				xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/5);
688 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
689 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
690 				ccb->crcn.flags = CAM_FLAG_NONE;
691 				xpt_action(ccb);
692 			}
693 		}
694 	}
695 	mpt->raid_thread = NULL;
696 	wakeup(&mpt->raid_thread);
697 	MPT_UNLOCK(mpt);
698 #if __FreeBSD_version >= 500000
699 	mtx_unlock(&Giant);
700 #endif
701 	kthread_exit(0);
702 }
703 
704 cam_status
705 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
706 		      request_t *req)
707 {
708 	union ccb *ccb;
709 
710 	ccb = req->ccb;
711 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
712 		return (CAM_REQ_CMP);
713 
714 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
715 		int rv;
716 
717 		mpt_disk->flags |= MPT_RDF_QUIESCING;
718 		xpt_freeze_devq(ccb->ccb_h.path, 1);
719 
720 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
721 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
722 					/*ActionData*/0, /*addr*/0,
723 					/*len*/0, /*write*/FALSE,
724 					/*wait*/FALSE);
725 		if (rv != 0)
726 			return (CAM_REQ_CMP_ERR);
727 
728 		ccb->ccb_h.timeout_ch =
729 			timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
730 #if 0
731 		if (rv == ETIMEDOUT) {
732 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
733 				     "Quiece Timed-out\n");
734 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
735 			return (CAM_REQ_CMP_ERR);
736 		}
737 
738 		ar = REQ_TO_RAID_ACTION_RESULT(req);
739 		if (rv != 0
740 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
741 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
742 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
743 				    "%d:%x:%x\n", rv, req->IOCStatus,
744 				    ar->action_status);
745 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
746 			return (CAM_REQ_CMP_ERR);
747 		}
748 #endif
749 		return (CAM_REQ_INPROG);
750 	}
751 	return (CAM_REQUEUE_REQ);
752 }
753 
754 /* XXX Ignores that there may be multiple busses/IOCs involved. */
755 cam_status
756 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
757 {
758 	struct mpt_raid_disk *mpt_disk;
759 
760 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
761 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
762 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
763 
764 		*tgt = mpt_disk->config_page.PhysDiskID;
765 		return (0);
766 	}
767 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_map_physdisk(%d) - Not Active\n",
768 		 ccb->ccb_h.target_id);
769 	return (-1);
770 }
771 
772 #if UNUSED
773 static void
774 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
775 	       int enable)
776 {
777 	request_t *req;
778 	struct mpt_raid_action_result *ar;
779 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
780 	int enabled;
781 	int rv;
782 
783 	vol_pg = mpt_vol->config_page;
784 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
785 
786 	/*
787 	 * If the setting matches the configuration,
788 	 * there is nothing to do.
789 	 */
790 	if ((enabled && enable)
791 	 || (!enabled && !enable))
792 		return;
793 
794 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
795 	if (req == NULL) {
796 		mpt_vol_prt(mpt, mpt_vol,
797 			    "mpt_enable_vol: Get request failed!\n");
798 		return;
799 	}
800 
801 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
802 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
803 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
804 				/*data*/0, /*addr*/0, /*len*/0,
805 				/*write*/FALSE, /*wait*/TRUE);
806 	if (rv == ETIMEDOUT) {
807 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
808 			    "%s Volume Timed-out\n",
809 			    enable ? "Enable" : "Disable");
810 		return;
811 	}
812 	ar = REQ_TO_RAID_ACTION_RESULT(req);
813 	if (rv != 0
814 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
815 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
816 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
817 			    enable ? "Enable" : "Disable",
818 			    rv, req->IOCStatus, ar->action_status);
819 	}
820 
821 	mpt_free_request(mpt, req);
822 }
823 #endif
824 
825 static void
826 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
827 {
828 	request_t *req;
829 	struct mpt_raid_action_result *ar;
830 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
831 	uint32_t data;
832 	int rv;
833 	int resyncing;
834 	int mwce;
835 
836 	vol_pg = mpt_vol->config_page;
837 	resyncing = vol_pg->VolumeStatus.Flags
838 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
839 	mwce = vol_pg->VolumeSettings.Settings
840 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
841 
842 	/*
843 	 * If the setting matches the configuration,
844 	 * there is nothing to do.
845 	 */
846 	switch (mpt->raid_mwce_setting) {
847 	case MPT_RAID_MWCE_REBUILD_ONLY:
848 		if ((resyncing && mwce)
849 		 || (!resyncing && !mwce))
850 			return;
851 
852 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
853 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
854 			/*
855 			 * Wait one more status update to see if
856 			 * resyncing gets enabled.  It gets disabled
857 			 * temporarilly when WCE is changed.
858 			 */
859 			return;
860 		}
861 		break;
862 	case MPT_RAID_MWCE_ON:
863 		if (mwce)
864 			return;
865 		break;
866 	case MPT_RAID_MWCE_OFF:
867 		if (!mwce)
868 			return;
869 		break;
870 	case MPT_RAID_MWCE_NC:
871 		return;
872 	}
873 
874 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
875 	if (req == NULL) {
876 		mpt_vol_prt(mpt, mpt_vol,
877 			    "mpt_verify_mwce: Get request failed!\n");
878 		return;
879 	}
880 
881 	vol_pg->VolumeSettings.Settings ^=
882 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
883 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
884 	vol_pg->VolumeSettings.Settings ^=
885 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
886 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
887 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
888 				data, /*addr*/0, /*len*/0,
889 				/*write*/FALSE, /*wait*/TRUE);
890 	if (rv == ETIMEDOUT) {
891 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
892 			    "Write Cache Enable Timed-out\n");
893 		return;
894 	}
895 	ar = REQ_TO_RAID_ACTION_RESULT(req);
896 	if (rv != 0
897 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
898 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
899 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
900 			    "%d:%x:%x\n", rv, req->IOCStatus,
901 			    ar->action_status);
902 	} else {
903 		vol_pg->VolumeSettings.Settings ^=
904 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
905 	}
906 
907 	mpt_free_request(mpt, req);
908 }
909 
910 static void
911 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
912 {
913 	request_t *req;
914 	struct mpt_raid_action_result *ar;
915 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
916 	u_int prio;
917 	int rv;
918 
919 	vol_pg = mpt_vol->config_page;
920 
921 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
922 		return;
923 
924 	/*
925 	 * If the current RAID resync rate does not
926 	 * match our configured rate, update it.
927 	 */
928 	prio = vol_pg->VolumeSettings.Settings
929 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
930 	if (vol_pg->ResyncRate != 0
931 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
932 
933 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
934 		if (req == NULL) {
935 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
936 				    "Get request failed!\n");
937 			return;
938 		}
939 
940 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
941 					MPI_RAID_ACTION_SET_RESYNC_RATE,
942 					mpt->raid_resync_rate, /*addr*/0,
943 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
944 		if (rv == ETIMEDOUT) {
945 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
946 				    "Resync Rate Setting Timed-out\n");
947 			return;
948 		}
949 
950 		ar = REQ_TO_RAID_ACTION_RESULT(req);
951 		if (rv != 0
952 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
953 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
954 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
955 				    "%d:%x:%x\n", rv, req->IOCStatus,
956 				    ar->action_status);
957 		} else
958 			vol_pg->ResyncRate = mpt->raid_resync_rate;
959 		mpt_free_request(mpt, req);
960 	} else if ((prio && mpt->raid_resync_rate < 128)
961 		|| (!prio && mpt->raid_resync_rate >= 128)) {
962 		uint32_t data;
963 
964 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
965 		if (req == NULL) {
966 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
967 				    "Get request failed!\n");
968 			return;
969 		}
970 
971 		vol_pg->VolumeSettings.Settings ^=
972 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
973 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
974 		vol_pg->VolumeSettings.Settings ^=
975 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
976 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
977 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
978 					data, /*addr*/0, /*len*/0,
979 					/*write*/FALSE, /*wait*/TRUE);
980 		if (rv == ETIMEDOUT) {
981 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
982 				    "Resync Rate Setting Timed-out\n");
983 			return;
984 		}
985 		ar = REQ_TO_RAID_ACTION_RESULT(req);
986 		if (rv != 0
987 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
988 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
989 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
990 				    "%d:%x:%x\n", rv, req->IOCStatus,
991 				    ar->action_status);
992 		} else {
993 			vol_pg->VolumeSettings.Settings ^=
994 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
995 		}
996 
997 		mpt_free_request(mpt, req);
998 	}
999 }
1000 
1001 static void
1002 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1003 		       struct cam_path *path)
1004 {
1005 	struct ccb_relsim crs;
1006 
1007 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1008 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1009 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1010 	crs.openings = mpt->raid_queue_depth;
1011 	xpt_action((union ccb *)&crs);
1012 	if (crs.ccb_h.status != CAM_REQ_CMP)
1013 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1014 			    "with CAM status %#x\n", crs.ccb_h.status);
1015 }
1016 
1017 static void
1018 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1019 {
1020 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1021 	u_int i;
1022 
1023 	vol_pg = mpt_vol->config_page;
1024 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1025 	for (i = 1; i <= 0x8000; i <<= 1) {
1026 		switch (vol_pg->VolumeSettings.Settings & i) {
1027 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1028 			mpt_prtc(mpt, " Member-WCE");
1029 			break;
1030 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1031 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1032 			break;
1033 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1034 			mpt_prtc(mpt, " Hot-Plug-Spares");
1035 			break;
1036 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1037 			mpt_prtc(mpt, " High-Priority-ReSync");
1038 			break;
1039 		default:
1040 			break;
1041 		}
1042 	}
1043 	mpt_prtc(mpt, " )\n");
1044 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1045 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1046 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1047 			  ? ":" : "s:");
1048 		for (i = 0; i < 8; i++) {
1049 			u_int mask;
1050 
1051 			mask = 0x1 << i;
1052 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1053 				continue;
1054 			mpt_prtc(mpt, " %d", i);
1055 		}
1056 		mpt_prtc(mpt, "\n");
1057 	}
1058 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1059 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1060 		struct mpt_raid_disk *mpt_disk;
1061 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1062 
1063 		mpt_disk = mpt->raid_disks
1064 			 + vol_pg->PhysDisk[i].PhysDiskNum;
1065 		disk_pg = &mpt_disk->config_page;
1066 		mpt_prtc(mpt, "      ");
1067 		mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1068 			 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1069 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1070 			mpt_prtc(mpt, "%s\n",
1071 				 mpt_disk->member_number == 0
1072 			       ? "Primary" : "Secondary");
1073 		else
1074 			mpt_prtc(mpt, "Stripe Position %d\n",
1075 				 mpt_disk->member_number);
1076 	}
1077 }
1078 
1079 static void
1080 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1081 {
1082 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1083 	u_int i;
1084 
1085 	disk_pg = &mpt_disk->config_page;
1086 	mpt_disk_prt(mpt, mpt_disk,
1087 		     "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1088 		     device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1089 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1090 		     /*bus*/1, mpt_disk - mpt->raid_disks);
1091 
1092 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1093 		return;
1094 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1095 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1096 		   ? ":" : "s:");
1097 	for (i = 0; i < 8; i++) {
1098 		u_int mask;
1099 
1100 		mask = 0x1 << i;
1101 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1102 			continue;
1103 		mpt_prtc(mpt, " %d", i);
1104 	}
1105 	mpt_prtc(mpt, "\n");
1106 }
1107 
1108 static void
1109 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1110 		      IOC_3_PHYS_DISK *ioc_disk)
1111 {
1112 	int rv;
1113 
1114 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1115 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1116 				 &mpt_disk->config_page.Header,
1117 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1118 	if (rv != 0) {
1119 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1120 			"Failed to read RAID Disk Hdr(%d)\n",
1121 		 	ioc_disk->PhysDiskNum);
1122 		return;
1123 	}
1124 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1125 				   &mpt_disk->config_page.Header,
1126 				   sizeof(mpt_disk->config_page),
1127 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1128 	if (rv != 0)
1129 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1130 			"Failed to read RAID Disk Page(%d)\n",
1131 		 	ioc_disk->PhysDiskNum);
1132 }
1133 
1134 static void
1135 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1136 		     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1137 {
1138 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1139 	struct mpt_raid_action_result *ar;
1140 	request_t *req;
1141 	int rv;
1142 	int i;
1143 
1144 	vol_pg = mpt_vol->config_page;
1145 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1146 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1147 				 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1148 				 &vol_pg->Header, /*sleep_ok*/TRUE,
1149 				 /*timeout_ms*/5000);
1150 	if (rv != 0) {
1151 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1152 			    "Failed to read RAID Vol Hdr(%d)\n",
1153 			    ioc_vol->VolumePageNumber);
1154 		return;
1155 	}
1156 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1157 				   &vol_pg->Header, mpt->raid_page0_len,
1158 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1159 	if (rv != 0) {
1160 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1161 			    "Failed to read RAID Vol Page(%d)\n",
1162 			    ioc_vol->VolumePageNumber);
1163 		return;
1164 	}
1165 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1166 
1167 	/* Update disk entry array data. */
1168 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1169 		struct mpt_raid_disk *mpt_disk;
1170 
1171 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1172 		mpt_disk->volume = mpt_vol;
1173 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1174 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1175 			mpt_disk->member_number--;
1176 	}
1177 
1178 	if ((vol_pg->VolumeStatus.Flags
1179 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1180 		return;
1181 
1182 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1183 	if (req == NULL) {
1184 		mpt_vol_prt(mpt, mpt_vol,
1185 			    "mpt_refresh_raid_vol: Get request failed!\n");
1186 		return;
1187 	}
1188 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1189 				MPI_RAID_ACTION_INDICATOR_STRUCT,
1190 				/*ActionWord*/0, /*addr*/0, /*len*/0,
1191 				/*write*/FALSE, /*wait*/TRUE);
1192 	if (rv == ETIMEDOUT) {
1193 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1194 			    "Progress indicator fetch timedout!\n");
1195 		return;
1196 	}
1197 
1198 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1199 	if (rv == 0
1200 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1201 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1202 		memcpy(&mpt_vol->sync_progress,
1203 		       &ar->action_data.indicator_struct,
1204 		       sizeof(mpt_vol->sync_progress));
1205 	} else {
1206 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1207 			    "Progress indicator fetch failed!\n");
1208 	}
1209 	mpt_free_request(mpt, req);
1210 }
1211 
1212 /*
1213  * Update in-core information about RAID support.  We update any entries
1214  * that didn't previously exists or have been marked as needing to
1215  * be updated by our event handler.  Interesting changes are displayed
1216  * to the console.
1217  */
1218 void
1219 mpt_refresh_raid_data(struct mpt_softc *mpt)
1220 {
1221 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1222 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1223 	IOC_3_PHYS_DISK *ioc_disk;
1224 	IOC_3_PHYS_DISK *ioc_last_disk;
1225 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1226 	size_t len;
1227 	int rv;
1228 	int i;
1229 
1230 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL)
1231 		return;
1232 
1233 	/*
1234 	 * Mark all items as unreferrened by the configuration.
1235 	 * This allows us to find, report, and discard stale
1236 	 * entries.
1237 	 */
1238 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++)
1239 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1240 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++)
1241 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1242 
1243 	/*
1244 	 * Get Physical Disk information.
1245 	 */
1246 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1247 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1248 				   &mpt->ioc_page3->Header, len,
1249 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1250 	if (rv) {
1251 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1252 			"Failed to read IOC Page 3\n");
1253 		return;
1254 	}
1255 
1256 	ioc_disk = mpt->ioc_page3->PhysDisk;
1257 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1258 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1259 		struct mpt_raid_disk *mpt_disk;
1260 
1261 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1262 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1263 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1264 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1265 
1266 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1267 
1268 		}
1269 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1270 		mpt->raid_rescan++;
1271 	}
1272 
1273 	/*
1274 	 * Refresh volume data.
1275 	 */
1276 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1277 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1278 				   &mpt->ioc_page2->Header, len,
1279 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1280 	if (rv) {
1281 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1282 			"Failed to read IOC Page 2\n");
1283 		return;
1284 	}
1285 
1286 	ioc_vol = mpt->ioc_page2->RaidVolume;
1287 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1288 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1289 		struct mpt_raid_volume *mpt_vol;
1290 
1291 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1292 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1293 		vol_pg = mpt_vol->config_page;
1294 		if (vol_pg == NULL)
1295 			continue;
1296 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1297 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1298 		 || (vol_pg->VolumeStatus.Flags
1299 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1300 
1301 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1302 		}
1303 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1304 	}
1305 
1306 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1307 		struct mpt_raid_volume *mpt_vol;
1308 		uint64_t total;
1309 		uint64_t left;
1310 		int m;
1311 		u_int prio;
1312 
1313 		mpt_vol = &mpt->raid_volumes[i];
1314 
1315 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1316 			continue;
1317 
1318 		vol_pg = mpt_vol->config_page;
1319 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1320 		 == MPT_RVF_ANNOUNCED) {
1321 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1322 			mpt_vol->flags = 0;
1323 			continue;
1324 		}
1325 
1326 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1327 
1328 			mpt_announce_vol(mpt, mpt_vol);
1329 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1330 		}
1331 
1332 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1333 			continue;
1334 
1335 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1336 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1337 			    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1338 		mpt_verify_mwce(mpt, mpt_vol);
1339 
1340 		if (vol_pg->VolumeStatus.Flags == 0)
1341 			continue;
1342 
1343 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1344 		for (m = 1; m <= 0x80; m <<= 1) {
1345 			switch (vol_pg->VolumeStatus.Flags & m) {
1346 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1347 				mpt_prtc(mpt, " Enabled");
1348 				break;
1349 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1350 				mpt_prtc(mpt, " Quiesced");
1351 				break;
1352 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1353 				mpt_prtc(mpt, " Re-Syncing");
1354 				break;
1355 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1356 				mpt_prtc(mpt, " Inactive");
1357 				break;
1358 			default:
1359 				break;
1360 			}
1361 		}
1362 		mpt_prtc(mpt, " )\n");
1363 
1364 		if ((vol_pg->VolumeStatus.Flags
1365 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1366 			continue;
1367 
1368 		mpt_verify_resync_rate(mpt, mpt_vol);
1369 
1370 		left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1371 		total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1372 		if (vol_pg->ResyncRate != 0) {
1373 
1374 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1375 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1376 			    prio / 1000, prio % 1000);
1377 		} else {
1378 			prio = vol_pg->VolumeSettings.Settings
1379 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1380 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1381 			    prio ? "High" : "Low");
1382 		}
1383 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1384 			    "blocks remaining\n", (uintmax_t)left,
1385 			    (uintmax_t)total);
1386 
1387 		/* Periodically report on sync progress. */
1388 		mpt_schedule_raid_refresh(mpt);
1389 	}
1390 
1391 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1392 		struct mpt_raid_disk *mpt_disk;
1393 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1394 		int m;
1395 
1396 		mpt_disk = &mpt->raid_disks[i];
1397 		disk_pg = &mpt_disk->config_page;
1398 
1399 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1400 			continue;
1401 
1402 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1403 		 == MPT_RDF_ANNOUNCED) {
1404 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1405 			mpt_disk->flags = 0;
1406 			mpt->raid_rescan++;
1407 			continue;
1408 		}
1409 
1410 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1411 
1412 			mpt_announce_disk(mpt, mpt_disk);
1413 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1414 		}
1415 
1416 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1417 			continue;
1418 
1419 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1420 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1421 		if (disk_pg->PhysDiskStatus.Flags == 0)
1422 			continue;
1423 
1424 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1425 		for (m = 1; m <= 0x80; m <<= 1) {
1426 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1427 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1428 				mpt_prtc(mpt, " Out-Of-Sync");
1429 				break;
1430 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1431 				mpt_prtc(mpt, " Quiesced");
1432 				break;
1433 			default:
1434 				break;
1435 			}
1436 		}
1437 		mpt_prtc(mpt, " )\n");
1438 	}
1439 }
1440 
1441 static void
1442 mpt_raid_timer(void *arg)
1443 {
1444 	struct mpt_softc *mpt;
1445 
1446 	mpt = (struct mpt_softc *)arg;
1447 	MPT_LOCK(mpt);
1448 	mpt_raid_wakeup(mpt);
1449 	MPT_UNLOCK(mpt);
1450 }
1451 
1452 static void
1453 mpt_raid_quiesce_timeout(void *arg)
1454 {
1455 	/* Complete the CCB with error */
1456 	/* COWWWW */
1457 }
1458 
1459 void
1460 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1461 {
1462 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1463 		      mpt_raid_timer, mpt);
1464 }
1465 
1466 static int
1467 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1468 {
1469 	struct mpt_raid_volume *mpt_vol;
1470 
1471 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1472 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1473 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1474 		return (EINVAL);
1475 
1476 	MPT_LOCK(mpt);
1477 	mpt->raid_resync_rate = rate;
1478 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1479 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1480 			continue;
1481 		mpt_verify_resync_rate(mpt, mpt_vol);
1482 	}
1483 	MPT_UNLOCK(mpt);
1484 	return (0);
1485 }
1486 
1487 static int
1488 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1489 {
1490 	struct mpt_raid_volume *mpt_vol;
1491 
1492 	if (vol_queue_depth > 255
1493 	 || vol_queue_depth < 1)
1494 		return (EINVAL);
1495 
1496 	MPT_LOCK(mpt);
1497 	mpt->raid_queue_depth = vol_queue_depth;
1498 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1499 		struct cam_path *path;
1500 		int error;
1501 
1502 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1503 			continue;
1504 
1505 		mpt->raid_rescan = 0;
1506 
1507 		error = xpt_create_path(&path, xpt_periph,
1508 					cam_sim_path(mpt->sim),
1509 					mpt_vol->config_page->VolumeID,
1510 					/*lun*/0);
1511 		if (error != CAM_REQ_CMP) {
1512 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1513 			continue;
1514 		}
1515 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1516 		xpt_free_path(path);
1517 	}
1518 	MPT_UNLOCK(mpt);
1519 	return (0);
1520 }
1521 
1522 static int
1523 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1524 {
1525 	struct mpt_raid_volume *mpt_vol;
1526 	int force_full_resync;
1527 
1528 	MPT_LOCK(mpt);
1529 	if (mwce == mpt->raid_mwce_setting) {
1530 		MPT_UNLOCK(mpt);
1531 		return (0);
1532 	}
1533 
1534 	/*
1535 	 * Catch MWCE being left on due to a failed shutdown.  Since
1536 	 * sysctls cannot be set by the loader, we treat the first
1537 	 * setting of this varible specially and force a full volume
1538 	 * resync if MWCE is enabled and a resync is in progress.
1539 	 */
1540 	force_full_resync = 0;
1541 	if (mpt->raid_mwce_set == 0
1542 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1543 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1544 		force_full_resync = 1;
1545 
1546 	mpt->raid_mwce_setting = mwce;
1547 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1548 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1549 		int resyncing;
1550 		int mwce;
1551 
1552 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1553 			continue;
1554 
1555 		vol_pg = mpt_vol->config_page;
1556 		resyncing = vol_pg->VolumeStatus.Flags
1557 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1558 		mwce = vol_pg->VolumeSettings.Settings
1559 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1560 		if (force_full_resync && resyncing && mwce) {
1561 
1562 			/*
1563 			 * XXX disable/enable volume should force a resync,
1564 			 *     but we'll need to queice, drain, and restart
1565 			 *     I/O to do that.
1566 			 */
1567 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1568 				    "detected.  Suggest full resync.\n");
1569 		}
1570 		mpt_verify_mwce(mpt, mpt_vol);
1571 	}
1572 	mpt->raid_mwce_set = 1;
1573 	MPT_UNLOCK(mpt);
1574 	return (0);
1575 }
1576 
1577 const char *mpt_vol_mwce_strs[] =
1578 {
1579 	"On",
1580 	"Off",
1581 	"On-During-Rebuild",
1582 	"NC"
1583 };
1584 
1585 static int
1586 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1587 {
1588 	char inbuf[20];
1589 	struct mpt_softc *mpt;
1590 	const char *str;
1591 	int error;
1592 	u_int size;
1593 	u_int i;
1594 
1595 	GIANT_REQUIRED;
1596 	mpt = (struct mpt_softc *)arg1;
1597 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1598 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1599 	if (error || !req->newptr)
1600 		return (error);
1601 
1602 	size = req->newlen - req->newidx;
1603 	if (size >= sizeof(inbuf))
1604 		return (EINVAL);
1605 
1606 	error = SYSCTL_IN(req, inbuf, size);
1607 	if (error)
1608 		return (error);
1609 	inbuf[size] = '\0';
1610 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1611 
1612 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0)
1613 			return (mpt_raid_set_vol_mwce(mpt, i));
1614 	}
1615 	return (EINVAL);
1616 }
1617 
1618 static int
1619 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1620 {
1621 	struct mpt_softc *mpt;
1622 	u_int raid_resync_rate;
1623 	int error;
1624 
1625 	GIANT_REQUIRED;
1626 	mpt = (struct mpt_softc *)arg1;
1627 	raid_resync_rate = mpt->raid_resync_rate;
1628 
1629 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1630 	if (error || !req->newptr)
1631 		return error;
1632 
1633 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1634 }
1635 
1636 static int
1637 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1638 {
1639 	struct mpt_softc *mpt;
1640 	u_int raid_queue_depth;
1641 	int error;
1642 
1643 	GIANT_REQUIRED;
1644 	mpt = (struct mpt_softc *)arg1;
1645 	raid_queue_depth = mpt->raid_queue_depth;
1646 
1647 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1648 	if (error || !req->newptr)
1649 		return error;
1650 
1651 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1652 }
1653 
1654 static void
1655 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1656 {
1657 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1658 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1659 
1660 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1661 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1662 			mpt_raid_sysctl_vol_member_wce, "A",
1663 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1664 
1665 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1666 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1667 			mpt_raid_sysctl_vol_queue_depth, "I",
1668 			"default volume queue depth");
1669 
1670 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1671 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1672 			mpt_raid_sysctl_vol_resync_rate, "I",
1673 			"volume resync priority (0 == NC, 1 - 255)");
1674 }
1675