xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2005, WHEEL Sp. z o.o.
7  * Copyright (c) 2005 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon including
18  *    a substantially similar Disclaimer requirement for further binary
19  *    redistribution.
20  * 3. Neither the names of the above listed copyright holders nor the names
21  *    of any contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Some Breakage and Bug Fixing added later.
38  * Copyright (c) 2006, by Matthew Jacob
39  * All Rights Reserved
40  *
41  * Support from LSI-Logic has also gone a great deal toward making this a
42  * workable subsystem and is gratefully acknowledged.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <dev/mpt/mpt.h>
49 #include <dev/mpt/mpt_raid.h>
50 
51 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
52 #include "dev/mpt/mpilib/mpi_raid.h"
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 
60 #include <sys/callout.h>
61 #include <sys/kthread.h>
62 #include <sys/sysctl.h>
63 
64 #include <machine/stdarg.h>
65 
66 struct mpt_raid_action_result
67 {
68 	union {
69 		MPI_RAID_VOL_INDICATOR	indicator_struct;
70 		uint32_t		new_settings;
71 		uint8_t			phys_disk_num;
72 	} action_data;
73 	uint16_t			action_status;
74 };
75 
76 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
77 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
78 
79 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
80 
81 static mpt_probe_handler_t	mpt_raid_probe;
82 static mpt_attach_handler_t	mpt_raid_attach;
83 static mpt_enable_handler_t	mpt_raid_enable;
84 static mpt_event_handler_t	mpt_raid_event;
85 static mpt_shutdown_handler_t	mpt_raid_shutdown;
86 static mpt_reset_handler_t	mpt_raid_ioc_reset;
87 static mpt_detach_handler_t	mpt_raid_detach;
88 
89 static struct mpt_personality mpt_raid_personality =
90 {
91 	.name		= "mpt_raid",
92 	.probe		= mpt_raid_probe,
93 	.attach		= mpt_raid_attach,
94 	.enable		= mpt_raid_enable,
95 	.event		= mpt_raid_event,
96 	.reset		= mpt_raid_ioc_reset,
97 	.shutdown	= mpt_raid_shutdown,
98 	.detach		= mpt_raid_detach,
99 };
100 
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
103 
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 					MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static callout_func_t mpt_raid_timer;
111 #if 0
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 			   struct mpt_raid_volume *mpt_vol, int enable);
114 #endif
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
117     struct cam_path *);
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
119 
120 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
121 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
122 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
123 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
124     const char *fmt, ...);
125 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
126     const char *fmt, ...);
127 
128 static int mpt_issue_raid_req(struct mpt_softc *mpt,
129     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
130     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
131     int write, int wait);
132 
133 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
134 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
135 
136 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
137 
138 static const char *
139 mpt_vol_type(struct mpt_raid_volume *vol)
140 {
141 	switch (vol->config_page->VolumeType) {
142 	case MPI_RAID_VOL_TYPE_IS:
143 		return ("RAID-0");
144 	case MPI_RAID_VOL_TYPE_IME:
145 		return ("RAID-1E");
146 	case MPI_RAID_VOL_TYPE_IM:
147 		return ("RAID-1");
148 	default:
149 		return ("Unknown");
150 	}
151 }
152 
153 static const char *
154 mpt_vol_state(struct mpt_raid_volume *vol)
155 {
156 	switch (vol->config_page->VolumeStatus.State) {
157 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
158 		return ("Optimal");
159 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
160 		return ("Degraded");
161 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
162 		return ("Failed");
163 	default:
164 		return ("Unknown");
165 	}
166 }
167 
168 static const char *
169 mpt_disk_state(struct mpt_raid_disk *disk)
170 {
171 	switch (disk->config_page.PhysDiskStatus.State) {
172 	case MPI_PHYSDISK0_STATUS_ONLINE:
173 		return ("Online");
174 	case MPI_PHYSDISK0_STATUS_MISSING:
175 		return ("Missing");
176 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
177 		return ("Incompatible");
178 	case MPI_PHYSDISK0_STATUS_FAILED:
179 		return ("Failed");
180 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
181 		return ("Initializing");
182 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
183 		return ("Offline Requested");
184 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
185 		return ("Failed per Host Request");
186 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
187 		return ("Offline");
188 	default:
189 		return ("Unknown");
190 	}
191 }
192 
193 static void
194 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
195 	    const char *fmt, ...)
196 {
197 	va_list ap;
198 
199 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
200 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
201 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
202 	va_start(ap, fmt);
203 	vprintf(fmt, ap);
204 	va_end(ap);
205 }
206 
207 static void
208 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
209 	     const char *fmt, ...)
210 {
211 	va_list ap;
212 
213 	if (disk->volume != NULL) {
214 		printf("(%s:vol%d:%d): ",
215 		       device_get_nameunit(mpt->dev),
216 		       disk->volume->config_page->VolumeID,
217 		       disk->member_number);
218 	} else {
219 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
220 		       disk->config_page.PhysDiskBus,
221 		       disk->config_page.PhysDiskID);
222 	}
223 	va_start(ap, fmt);
224 	vprintf(fmt, ap);
225 	va_end(ap);
226 }
227 
228 static void
229 mpt_raid_async(void *callback_arg, u_int32_t code,
230 	       struct cam_path *path, void *arg)
231 {
232 	struct mpt_softc *mpt;
233 
234 	mpt = (struct mpt_softc*)callback_arg;
235 	switch (code) {
236 	case AC_FOUND_DEVICE:
237 	{
238 		struct ccb_getdev *cgd;
239 		struct mpt_raid_volume *mpt_vol;
240 
241 		cgd = (struct ccb_getdev *)arg;
242 		if (cgd == NULL) {
243 			break;
244 		}
245 
246 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
247 			 cgd->ccb_h.target_id);
248 
249 		RAID_VOL_FOREACH(mpt, mpt_vol) {
250 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
251 				continue;
252 
253 			if (mpt_vol->config_page->VolumeID
254 			 == cgd->ccb_h.target_id) {
255 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
256 				break;
257 			}
258 		}
259 	}
260 	default:
261 		break;
262 	}
263 }
264 
265 static int
266 mpt_raid_probe(struct mpt_softc *mpt)
267 {
268 
269 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
270 		return (ENODEV);
271 	}
272 	return (0);
273 }
274 
275 static int
276 mpt_raid_attach(struct mpt_softc *mpt)
277 {
278 	struct ccb_setasync csa;
279 	mpt_handler_t	 handler;
280 	int		 error;
281 
282 	mpt_callout_init(mpt, &mpt->raid_timer);
283 
284 	error = mpt_spawn_raid_thread(mpt);
285 	if (error != 0) {
286 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
287 		goto cleanup;
288 	}
289 
290 	MPT_LOCK(mpt);
291 	handler.reply_handler = mpt_raid_reply_handler;
292 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
293 				     &raid_handler_id);
294 	if (error != 0) {
295 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
296 		goto cleanup;
297 	}
298 
299 	memset(&csa, 0, sizeof(csa));
300 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
301 	csa.ccb_h.func_code = XPT_SASYNC_CB;
302 	csa.event_enable = AC_FOUND_DEVICE;
303 	csa.callback = mpt_raid_async;
304 	csa.callback_arg = mpt;
305 	xpt_action((union ccb *)&csa);
306 	if (csa.ccb_h.status != CAM_REQ_CMP) {
307 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
308 			"CAM async handler.\n");
309 	}
310 	MPT_UNLOCK(mpt);
311 
312 	mpt_raid_sysctl_attach(mpt);
313 	return (0);
314 cleanup:
315 	MPT_UNLOCK(mpt);
316 	mpt_raid_detach(mpt);
317 	return (error);
318 }
319 
320 static int
321 mpt_raid_enable(struct mpt_softc *mpt)
322 {
323 
324 	return (0);
325 }
326 
327 static void
328 mpt_raid_detach(struct mpt_softc *mpt)
329 {
330 	struct ccb_setasync csa;
331 	mpt_handler_t handler;
332 
333 	mpt_callout_drain(mpt, &mpt->raid_timer);
334 
335 	MPT_LOCK(mpt);
336 	mpt_terminate_raid_thread(mpt);
337 	handler.reply_handler = mpt_raid_reply_handler;
338 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
339 			       raid_handler_id);
340 	memset(&csa, 0, sizeof(csa));
341 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
342 	csa.ccb_h.func_code = XPT_SASYNC_CB;
343 	csa.event_enable = 0;
344 	csa.callback = mpt_raid_async;
345 	csa.callback_arg = mpt;
346 	xpt_action((union ccb *)&csa);
347 	MPT_UNLOCK(mpt);
348 }
349 
350 static void
351 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
352 {
353 
354 	/* Nothing to do yet. */
355 }
356 
357 static const char *raid_event_txt[] =
358 {
359 	"Volume Created",
360 	"Volume Deleted",
361 	"Volume Settings Changed",
362 	"Volume Status Changed",
363 	"Volume Physical Disk Membership Changed",
364 	"Physical Disk Created",
365 	"Physical Disk Deleted",
366 	"Physical Disk Settings Changed",
367 	"Physical Disk Status Changed",
368 	"Domain Validation Required",
369 	"SMART Data Received",
370 	"Replace Action Started",
371 };
372 
373 static int
374 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
375 	       MSG_EVENT_NOTIFY_REPLY *msg)
376 {
377 	EVENT_DATA_RAID *raid_event;
378 	struct mpt_raid_volume *mpt_vol;
379 	struct mpt_raid_disk *mpt_disk;
380 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
381 	int i;
382 	int print_event;
383 
384 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
385 		return (0);
386 	}
387 
388 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
389 
390 	mpt_vol = NULL;
391 	vol_pg = NULL;
392 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
393 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
394 			mpt_vol = &mpt->raid_volumes[i];
395 			vol_pg = mpt_vol->config_page;
396 
397 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
398 				continue;
399 
400 			if (vol_pg->VolumeID == raid_event->VolumeID
401 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
402 				break;
403 		}
404 		if (i >= mpt->ioc_page2->MaxVolumes) {
405 			mpt_vol = NULL;
406 			vol_pg = NULL;
407 		}
408 	}
409 
410 	mpt_disk = NULL;
411 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
412 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
413 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
414 			mpt_disk = NULL;
415 		}
416 	}
417 
418 	print_event = 1;
419 	switch(raid_event->ReasonCode) {
420 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
421 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
422 		break;
423 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
424 		if (mpt_vol != NULL) {
425 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
426 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
427 			} else {
428 				/*
429 				 * Coalesce status messages into one
430 				 * per background run of our RAID thread.
431 				 * This removes "spurious" status messages
432 				 * from our output.
433 				 */
434 				print_event = 0;
435 			}
436 		}
437 		break;
438 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
439 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
440 		mpt->raid_rescan++;
441 		if (mpt_vol != NULL) {
442 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
443 		}
444 		break;
445 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
446 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
447 		mpt->raid_rescan++;
448 		break;
449 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
450 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
451 		mpt->raid_rescan++;
452 		if (mpt_disk != NULL) {
453 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
454 		}
455 		break;
456 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
457 		mpt->raid_rescan++;
458 		break;
459 	case MPI_EVENT_RAID_RC_SMART_DATA:
460 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
461 		break;
462 	}
463 
464 	if (print_event) {
465 		if (mpt_disk != NULL) {
466 			mpt_disk_prt(mpt, mpt_disk, "");
467 		} else if (mpt_vol != NULL) {
468 			mpt_vol_prt(mpt, mpt_vol, "");
469 		} else {
470 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
471 				raid_event->VolumeID);
472 
473 			if (raid_event->PhysDiskNum != 0xFF)
474 				mpt_prtc(mpt, ":%d): ",
475 					 raid_event->PhysDiskNum);
476 			else
477 				mpt_prtc(mpt, "): ");
478 		}
479 
480 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
481 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
482 				 raid_event->ReasonCode);
483 		else
484 			mpt_prtc(mpt, "%s\n",
485 				 raid_event_txt[raid_event->ReasonCode]);
486 	}
487 
488 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
489 		/* XXX Use CAM's print sense for this... */
490 		if (mpt_disk != NULL)
491 			mpt_disk_prt(mpt, mpt_disk, "");
492 		else
493 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
494 			    raid_event->VolumeBus, raid_event->VolumeID,
495 			    raid_event->PhysDiskNum);
496 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
497 			 raid_event->ASC, raid_event->ASCQ);
498 	}
499 
500 	mpt_raid_wakeup(mpt);
501 	return (1);
502 }
503 
504 static void
505 mpt_raid_shutdown(struct mpt_softc *mpt)
506 {
507 	struct mpt_raid_volume *mpt_vol;
508 
509 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
510 		return;
511 	}
512 
513 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
514 	RAID_VOL_FOREACH(mpt, mpt_vol) {
515 		mpt_verify_mwce(mpt, mpt_vol);
516 	}
517 }
518 
519 static int
520 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
521     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
522 {
523 	int free_req;
524 
525 	if (req == NULL)
526 		return (TRUE);
527 
528 	free_req = TRUE;
529 	if (reply_frame != NULL)
530 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
531 #ifdef NOTYET
532 	else if (req->ccb != NULL) {
533 		/* Complete Quiesce CCB with error... */
534 	}
535 #endif
536 
537 	req->state &= ~REQ_STATE_QUEUED;
538 	req->state |= REQ_STATE_DONE;
539 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
540 
541 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
542 		wakeup(req);
543 	} else if (free_req) {
544 		mpt_free_request(mpt, req);
545 	}
546 
547 	return (TRUE);
548 }
549 
550 /*
551  * Parse additional completion information in the reply
552  * frame for RAID I/O requests.
553  */
554 static int
555 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
556     MSG_DEFAULT_REPLY *reply_frame)
557 {
558 	MSG_RAID_ACTION_REPLY *reply;
559 	struct mpt_raid_action_result *action_result;
560 	MSG_RAID_ACTION_REQUEST *rap;
561 
562 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
563 	req->IOCStatus = le16toh(reply->IOCStatus);
564 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
565 
566 	switch (rap->Action) {
567 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
568 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
569 		break;
570 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
571 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
572 		break;
573 	default:
574 		break;
575 	}
576 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
577 	memcpy(&action_result->action_data, &reply->ActionData,
578 	    sizeof(action_result->action_data));
579 	action_result->action_status = le16toh(reply->ActionStatus);
580 	return (TRUE);
581 }
582 
583 /*
584  * Utiltity routine to perform a RAID action command;
585  */
586 static int
587 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
588 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
589 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
590 		   int write, int wait)
591 {
592 	MSG_RAID_ACTION_REQUEST *rap;
593 	SGE_SIMPLE32 *se;
594 
595 	rap = req->req_vbuf;
596 	memset(rap, 0, sizeof *rap);
597 	rap->Action = Action;
598 	rap->ActionDataWord = htole32(ActionDataWord);
599 	rap->Function = MPI_FUNCTION_RAID_ACTION;
600 	rap->VolumeID = vol->config_page->VolumeID;
601 	rap->VolumeBus = vol->config_page->VolumeBus;
602 	if (disk != NULL)
603 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
604 	else
605 		rap->PhysDiskNum = 0xFF;
606 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
607 	se->Address = htole32(addr);
608 	MPI_pSGE_SET_LENGTH(se, len);
609 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
610 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
611 	    MPI_SGE_FLAGS_END_OF_LIST |
612 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
613 	se->FlagsLength = htole32(se->FlagsLength);
614 	rap->MsgContext = htole32(req->index | raid_handler_id);
615 
616 	mpt_check_doorbell(mpt);
617 	mpt_send_cmd(mpt, req);
618 
619 	if (wait) {
620 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
621 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
622 	} else {
623 		return (0);
624 	}
625 }
626 
627 /*************************** RAID Status Monitoring ***************************/
628 static int
629 mpt_spawn_raid_thread(struct mpt_softc *mpt)
630 {
631 	int error;
632 
633 	/*
634 	 * Freeze out any CAM transactions until our thread
635 	 * is able to run at least once.  We need to update
636 	 * our RAID pages before acception I/O or we may
637 	 * reject I/O to an ID we later determine is for a
638 	 * hidden physdisk.
639 	 */
640 	MPT_LOCK(mpt);
641 	xpt_freeze_simq(mpt->phydisk_sim, 1);
642 	MPT_UNLOCK(mpt);
643 	error = kproc_create(mpt_raid_thread, mpt,
644 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
645 	    "mpt_raid%d", mpt->unit);
646 	if (error != 0) {
647 		MPT_LOCK(mpt);
648 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
649 		MPT_UNLOCK(mpt);
650 	}
651 	return (error);
652 }
653 
654 static void
655 mpt_terminate_raid_thread(struct mpt_softc *mpt)
656 {
657 
658 	if (mpt->raid_thread == NULL) {
659 		return;
660 	}
661 	mpt->shutdwn_raid = 1;
662 	wakeup(&mpt->raid_volumes);
663 	/*
664 	 * Sleep on a slightly different location
665 	 * for this interlock just for added safety.
666 	 */
667 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
668 }
669 
670 static void
671 mpt_raid_thread(void *arg)
672 {
673 	struct mpt_softc *mpt;
674 	int firstrun;
675 
676 	mpt = (struct mpt_softc *)arg;
677 	firstrun = 1;
678 	MPT_LOCK(mpt);
679 	while (mpt->shutdwn_raid == 0) {
680 		if (mpt->raid_wakeup == 0) {
681 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
682 			continue;
683 		}
684 
685 		mpt->raid_wakeup = 0;
686 
687 		if (mpt_refresh_raid_data(mpt)) {
688 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
689 			continue;
690 		}
691 
692 		/*
693 		 * Now that we have our first snapshot of RAID data,
694 		 * allow CAM to access our physical disk bus.
695 		 */
696 		if (firstrun) {
697 			firstrun = 0;
698 			xpt_release_simq(mpt->phydisk_sim, TRUE);
699 		}
700 
701 		if (mpt->raid_rescan != 0) {
702 			union ccb *ccb;
703 			int error;
704 
705 			mpt->raid_rescan = 0;
706 			MPT_UNLOCK(mpt);
707 
708 			ccb = xpt_alloc_ccb();
709 
710 			MPT_LOCK(mpt);
711 			error = xpt_create_path(&ccb->ccb_h.path, NULL,
712 			    cam_sim_path(mpt->phydisk_sim),
713 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
714 			if (error != CAM_REQ_CMP) {
715 				xpt_free_ccb(ccb);
716 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
717 			} else {
718 				xpt_rescan(ccb);
719 			}
720 		}
721 	}
722 	mpt->raid_thread = NULL;
723 	wakeup(&mpt->raid_thread);
724 	MPT_UNLOCK(mpt);
725 	kproc_exit(0);
726 }
727 
728 #if 0
729 static void
730 mpt_raid_quiesce_timeout(void *arg)
731 {
732 
733 	/* Complete the CCB with error */
734 	/* COWWWW */
735 }
736 
737 static timeout_t mpt_raid_quiesce_timeout;
738 cam_status
739 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
740 		      request_t *req)
741 {
742 	union ccb *ccb;
743 
744 	ccb = req->ccb;
745 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
746 		return (CAM_REQ_CMP);
747 
748 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
749 		int rv;
750 
751 		mpt_disk->flags |= MPT_RDF_QUIESCING;
752 		xpt_freeze_devq(ccb->ccb_h.path, 1);
753 
754 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
755 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
756 					/*ActionData*/0, /*addr*/0,
757 					/*len*/0, /*write*/FALSE,
758 					/*wait*/FALSE);
759 		if (rv != 0)
760 			return (CAM_REQ_CMP_ERR);
761 
762 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
763 #if 0
764 		if (rv == ETIMEDOUT) {
765 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
766 				     "Quiece Timed-out\n");
767 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
768 			return (CAM_REQ_CMP_ERR);
769 		}
770 
771 		ar = REQ_TO_RAID_ACTION_RESULT(req);
772 		if (rv != 0
773 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
774 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
775 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
776 				    "%d:%x:%x\n", rv, req->IOCStatus,
777 				    ar->action_status);
778 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
779 			return (CAM_REQ_CMP_ERR);
780 		}
781 #endif
782 		return (CAM_REQ_INPROG);
783 	}
784 	return (CAM_REQUEUE_REQ);
785 }
786 #endif
787 
788 /* XXX Ignores that there may be multiple buses/IOCs involved. */
789 cam_status
790 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
791 {
792 	struct mpt_raid_disk *mpt_disk;
793 
794 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
795 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
796 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
797 		*tgt = mpt_disk->config_page.PhysDiskID;
798 		return (0);
799 	}
800 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
801 		 ccb->ccb_h.target_id);
802 	return (-1);
803 }
804 
805 /* XXX Ignores that there may be multiple buses/IOCs involved. */
806 int
807 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
808 {
809 	struct mpt_raid_disk *mpt_disk;
810 	int i;
811 
812 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
813 		return (0);
814 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
815 		mpt_disk = &mpt->raid_disks[i];
816 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
817 		    mpt_disk->config_page.PhysDiskID == tgt)
818 			return (1);
819 	}
820 	return (0);
821 
822 }
823 
824 /* XXX Ignores that there may be multiple buses/IOCs involved. */
825 int
826 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
827 {
828 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
829 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
830 
831 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
832 		return (0);
833 	}
834 	ioc_vol = mpt->ioc_page2->RaidVolume;
835 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
836 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
837 		if (ioc_vol->VolumeID == tgt) {
838 			return (1);
839 		}
840 	}
841 	return (0);
842 }
843 
844 #if 0
845 static void
846 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
847 	       int enable)
848 {
849 	request_t *req;
850 	struct mpt_raid_action_result *ar;
851 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
852 	int enabled;
853 	int rv;
854 
855 	vol_pg = mpt_vol->config_page;
856 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
857 
858 	/*
859 	 * If the setting matches the configuration,
860 	 * there is nothing to do.
861 	 */
862 	if ((enabled && enable)
863 	 || (!enabled && !enable))
864 		return;
865 
866 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
867 	if (req == NULL) {
868 		mpt_vol_prt(mpt, mpt_vol,
869 			    "mpt_enable_vol: Get request failed!\n");
870 		return;
871 	}
872 
873 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
874 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
875 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
876 				/*data*/0, /*addr*/0, /*len*/0,
877 				/*write*/FALSE, /*wait*/TRUE);
878 	if (rv == ETIMEDOUT) {
879 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
880 			    "%s Volume Timed-out\n",
881 			    enable ? "Enable" : "Disable");
882 		return;
883 	}
884 	ar = REQ_TO_RAID_ACTION_RESULT(req);
885 	if (rv != 0
886 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
887 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
888 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
889 			    enable ? "Enable" : "Disable",
890 			    rv, req->IOCStatus, ar->action_status);
891 	}
892 
893 	mpt_free_request(mpt, req);
894 }
895 #endif
896 
897 static void
898 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
899 {
900 	request_t *req;
901 	struct mpt_raid_action_result *ar;
902 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
903 	uint32_t data;
904 	int rv;
905 	int resyncing;
906 	int mwce;
907 
908 	vol_pg = mpt_vol->config_page;
909 	resyncing = vol_pg->VolumeStatus.Flags
910 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
911 	mwce = vol_pg->VolumeSettings.Settings
912 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
913 
914 	/*
915 	 * If the setting matches the configuration,
916 	 * there is nothing to do.
917 	 */
918 	switch (mpt->raid_mwce_setting) {
919 	case MPT_RAID_MWCE_REBUILD_ONLY:
920 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
921 			return;
922 		}
923 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
924 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
925 			/*
926 			 * Wait one more status update to see if
927 			 * resyncing gets enabled.  It gets disabled
928 			 * temporarilly when WCE is changed.
929 			 */
930 			return;
931 		}
932 		break;
933 	case MPT_RAID_MWCE_ON:
934 		if (mwce)
935 			return;
936 		break;
937 	case MPT_RAID_MWCE_OFF:
938 		if (!mwce)
939 			return;
940 		break;
941 	case MPT_RAID_MWCE_NC:
942 		return;
943 	}
944 
945 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
946 	if (req == NULL) {
947 		mpt_vol_prt(mpt, mpt_vol,
948 			    "mpt_verify_mwce: Get request failed!\n");
949 		return;
950 	}
951 
952 	vol_pg->VolumeSettings.Settings ^=
953 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
954 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
955 	vol_pg->VolumeSettings.Settings ^=
956 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
957 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
958 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
959 				data, /*addr*/0, /*len*/0,
960 				/*write*/FALSE, /*wait*/TRUE);
961 	if (rv == ETIMEDOUT) {
962 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
963 			    "Write Cache Enable Timed-out\n");
964 		return;
965 	}
966 	ar = REQ_TO_RAID_ACTION_RESULT(req);
967 	if (rv != 0
968 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
969 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
970 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
971 			    "%d:%x:%x\n", rv, req->IOCStatus,
972 			    ar->action_status);
973 	} else {
974 		vol_pg->VolumeSettings.Settings ^=
975 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
976 	}
977 	mpt_free_request(mpt, req);
978 }
979 
980 static void
981 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
982 {
983 	request_t *req;
984 	struct mpt_raid_action_result *ar;
985 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
986 	u_int prio;
987 	int rv;
988 
989 	vol_pg = mpt_vol->config_page;
990 
991 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
992 		return;
993 
994 	/*
995 	 * If the current RAID resync rate does not
996 	 * match our configured rate, update it.
997 	 */
998 	prio = vol_pg->VolumeSettings.Settings
999 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1000 	if (vol_pg->ResyncRate != 0
1001 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1002 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1003 		if (req == NULL) {
1004 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1005 				    "Get request failed!\n");
1006 			return;
1007 		}
1008 
1009 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1010 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1011 					mpt->raid_resync_rate, /*addr*/0,
1012 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1013 		if (rv == ETIMEDOUT) {
1014 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1015 				    "Resync Rate Setting Timed-out\n");
1016 			return;
1017 		}
1018 
1019 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1020 		if (rv != 0
1021 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1022 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1023 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1024 				    "%d:%x:%x\n", rv, req->IOCStatus,
1025 				    ar->action_status);
1026 		} else
1027 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1028 		mpt_free_request(mpt, req);
1029 	} else if ((prio && mpt->raid_resync_rate < 128)
1030 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1031 		uint32_t data;
1032 
1033 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1034 		if (req == NULL) {
1035 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1036 				    "Get request failed!\n");
1037 			return;
1038 		}
1039 
1040 		vol_pg->VolumeSettings.Settings ^=
1041 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1043 		vol_pg->VolumeSettings.Settings ^=
1044 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1045 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1046 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1047 					data, /*addr*/0, /*len*/0,
1048 					/*write*/FALSE, /*wait*/TRUE);
1049 		if (rv == ETIMEDOUT) {
1050 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1051 				    "Resync Rate Setting Timed-out\n");
1052 			return;
1053 		}
1054 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1055 		if (rv != 0
1056 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1057 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1058 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1059 				    "%d:%x:%x\n", rv, req->IOCStatus,
1060 				    ar->action_status);
1061 		} else {
1062 			vol_pg->VolumeSettings.Settings ^=
1063 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1064 		}
1065 
1066 		mpt_free_request(mpt, req);
1067 	}
1068 }
1069 
1070 static void
1071 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1072 		       struct cam_path *path)
1073 {
1074 	struct ccb_relsim crs;
1075 
1076 	memset(&crs, 0, sizeof(crs));
1077 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1078 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1079 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1080 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1081 	crs.openings = mpt->raid_queue_depth;
1082 	xpt_action((union ccb *)&crs);
1083 	if (crs.ccb_h.status != CAM_REQ_CMP)
1084 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1085 			    "with CAM status %#x\n", crs.ccb_h.status);
1086 }
1087 
1088 static void
1089 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1090 {
1091 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1092 	u_int i;
1093 
1094 	vol_pg = mpt_vol->config_page;
1095 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1096 	for (i = 1; i <= 0x8000; i <<= 1) {
1097 		switch (vol_pg->VolumeSettings.Settings & i) {
1098 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1099 			mpt_prtc(mpt, " Member-WCE");
1100 			break;
1101 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1102 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1103 			break;
1104 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1105 			mpt_prtc(mpt, " Hot-Plug-Spares");
1106 			break;
1107 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1108 			mpt_prtc(mpt, " High-Priority-ReSync");
1109 			break;
1110 		default:
1111 			break;
1112 		}
1113 	}
1114 	mpt_prtc(mpt, " )\n");
1115 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1116 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1117 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1118 			  ? ":" : "s:");
1119 		for (i = 0; i < 8; i++) {
1120 			u_int mask;
1121 
1122 			mask = 0x1 << i;
1123 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1124 				continue;
1125 			mpt_prtc(mpt, " %d", i);
1126 		}
1127 		mpt_prtc(mpt, "\n");
1128 	}
1129 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1130 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1131 		struct mpt_raid_disk *mpt_disk;
1132 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1133 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1134 		U8 f, s;
1135 
1136 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1137 		disk_pg = &mpt_disk->config_page;
1138 		mpt_prtc(mpt, "      ");
1139 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1140 			 pt_bus, disk_pg->PhysDiskID);
1141 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1142 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1143 			    "Primary" : "Secondary");
1144 		} else {
1145 			mpt_prtc(mpt, "Stripe Position %d",
1146 				 mpt_disk->member_number);
1147 		}
1148 		f = disk_pg->PhysDiskStatus.Flags;
1149 		s = disk_pg->PhysDiskStatus.State;
1150 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1151 			mpt_prtc(mpt, " Out of Sync");
1152 		}
1153 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1154 			mpt_prtc(mpt, " Quiesced");
1155 		}
1156 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1157 			mpt_prtc(mpt, " Inactive");
1158 		}
1159 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1160 			mpt_prtc(mpt, " Was Optimal");
1161 		}
1162 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1163 			mpt_prtc(mpt, " Was Non-Optimal");
1164 		}
1165 		switch (s) {
1166 		case MPI_PHYSDISK0_STATUS_ONLINE:
1167 			mpt_prtc(mpt, " Online");
1168 			break;
1169 		case MPI_PHYSDISK0_STATUS_MISSING:
1170 			mpt_prtc(mpt, " Missing");
1171 			break;
1172 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1173 			mpt_prtc(mpt, " Incompatible");
1174 			break;
1175 		case MPI_PHYSDISK0_STATUS_FAILED:
1176 			mpt_prtc(mpt, " Failed");
1177 			break;
1178 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1179 			mpt_prtc(mpt, " Initializing");
1180 			break;
1181 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1182 			mpt_prtc(mpt, " Requested Offline");
1183 			break;
1184 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1185 			mpt_prtc(mpt, " Requested Failed");
1186 			break;
1187 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1188 		default:
1189 			mpt_prtc(mpt, " Offline Other (%x)", s);
1190 			break;
1191 		}
1192 		mpt_prtc(mpt, "\n");
1193 	}
1194 }
1195 
1196 static void
1197 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1198 {
1199 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1200 	int rd_bus = cam_sim_bus(mpt->sim);
1201 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1202 	u_int i;
1203 
1204 	disk_pg = &mpt_disk->config_page;
1205 	mpt_disk_prt(mpt, mpt_disk,
1206 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1207 		     device_get_nameunit(mpt->dev), rd_bus,
1208 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1209 		     pt_bus, mpt_disk - mpt->raid_disks);
1210 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1211 		return;
1212 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1213 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1214 		   ? ":" : "s:");
1215 	for (i = 0; i < 8; i++) {
1216 		u_int mask;
1217 
1218 		mask = 0x1 << i;
1219 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1220 			continue;
1221 		mpt_prtc(mpt, " %d", i);
1222 	}
1223 	mpt_prtc(mpt, "\n");
1224 }
1225 
1226 static void
1227 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1228 		      IOC_3_PHYS_DISK *ioc_disk)
1229 {
1230 	int rv;
1231 
1232 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1233 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1234 				 &mpt_disk->config_page.Header,
1235 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1236 	if (rv != 0) {
1237 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1238 			"Failed to read RAID Disk Hdr(%d)\n",
1239 		 	ioc_disk->PhysDiskNum);
1240 		return;
1241 	}
1242 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1243 				   &mpt_disk->config_page.Header,
1244 				   sizeof(mpt_disk->config_page),
1245 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1246 	if (rv != 0)
1247 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1248 			"Failed to read RAID Disk Page(%d)\n",
1249 		 	ioc_disk->PhysDiskNum);
1250 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1251 }
1252 
1253 static void
1254 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1255     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1256 {
1257 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1258 	struct mpt_raid_action_result *ar;
1259 	request_t *req;
1260 	int rv;
1261 	int i;
1262 
1263 	vol_pg = mpt_vol->config_page;
1264 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1265 
1266 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1267 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1268 	if (rv != 0) {
1269 		mpt_vol_prt(mpt, mpt_vol,
1270 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1271 		    ioc_vol->VolumePageNumber);
1272 		return;
1273 	}
1274 
1275 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1276 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1277 	if (rv != 0) {
1278 		mpt_vol_prt(mpt, mpt_vol,
1279 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1280 		    ioc_vol->VolumePageNumber);
1281 		return;
1282 	}
1283 	mpt2host_config_page_raid_vol_0(vol_pg);
1284 
1285 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1286 
1287 	/* Update disk entry array data. */
1288 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1289 		struct mpt_raid_disk *mpt_disk;
1290 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1291 		mpt_disk->volume = mpt_vol;
1292 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1293 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1294 			mpt_disk->member_number--;
1295 		}
1296 	}
1297 
1298 	if ((vol_pg->VolumeStatus.Flags
1299 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1300 		return;
1301 
1302 	req = mpt_get_request(mpt, TRUE);
1303 	if (req == NULL) {
1304 		mpt_vol_prt(mpt, mpt_vol,
1305 		    "mpt_refresh_raid_vol: Get request failed!\n");
1306 		return;
1307 	}
1308 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1309 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1310 	if (rv == ETIMEDOUT) {
1311 		mpt_vol_prt(mpt, mpt_vol,
1312 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1313 		mpt_free_request(mpt, req);
1314 		return;
1315 	}
1316 
1317 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1318 	if (rv == 0
1319 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1320 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1321 		memcpy(&mpt_vol->sync_progress,
1322 		       &ar->action_data.indicator_struct,
1323 		       sizeof(mpt_vol->sync_progress));
1324 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1325 	} else {
1326 		mpt_vol_prt(mpt, mpt_vol,
1327 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1328 	}
1329 	mpt_free_request(mpt, req);
1330 }
1331 
1332 /*
1333  * Update in-core information about RAID support.  We update any entries
1334  * that didn't previously exists or have been marked as needing to
1335  * be updated by our event handler.  Interesting changes are displayed
1336  * to the console.
1337  */
1338 static int
1339 mpt_refresh_raid_data(struct mpt_softc *mpt)
1340 {
1341 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1342 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1343 	IOC_3_PHYS_DISK *ioc_disk;
1344 	IOC_3_PHYS_DISK *ioc_last_disk;
1345 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1346 	size_t len;
1347 	int rv;
1348 	int i;
1349 	u_int nonopt_volumes;
1350 
1351 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1352 		return (0);
1353 	}
1354 
1355 	/*
1356 	 * Mark all items as unreferenced by the configuration.
1357 	 * This allows us to find, report, and discard stale
1358 	 * entries.
1359 	 */
1360 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1361 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1362 	}
1363 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1364 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1365 	}
1366 
1367 	/*
1368 	 * Get Physical Disk information.
1369 	 */
1370 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1371 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1372 				   &mpt->ioc_page3->Header, len,
1373 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1374 	if (rv) {
1375 		mpt_prt(mpt,
1376 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1377 		return (-1);
1378 	}
1379 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1380 
1381 	ioc_disk = mpt->ioc_page3->PhysDisk;
1382 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1383 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1384 		struct mpt_raid_disk *mpt_disk;
1385 
1386 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1387 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1388 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1389 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1390 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1391 		}
1392 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1393 		mpt->raid_rescan++;
1394 	}
1395 
1396 	/*
1397 	 * Refresh volume data.
1398 	 */
1399 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1400 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1401 				   &mpt->ioc_page2->Header, len,
1402 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1403 	if (rv) {
1404 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1405 			"Failed to read IOC Page 2\n");
1406 		return (-1);
1407 	}
1408 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1409 
1410 	ioc_vol = mpt->ioc_page2->RaidVolume;
1411 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1412 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1413 		struct mpt_raid_volume *mpt_vol;
1414 
1415 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1416 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1417 		vol_pg = mpt_vol->config_page;
1418 		if (vol_pg == NULL)
1419 			continue;
1420 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1421 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1422 		 || (vol_pg->VolumeStatus.Flags
1423 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1424 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1425 		}
1426 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1427 	}
1428 
1429 	nonopt_volumes = 0;
1430 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1431 		struct mpt_raid_volume *mpt_vol;
1432 		uint64_t total;
1433 		uint64_t left;
1434 		int m;
1435 		u_int prio;
1436 
1437 		mpt_vol = &mpt->raid_volumes[i];
1438 
1439 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1440 			continue;
1441 		}
1442 
1443 		vol_pg = mpt_vol->config_page;
1444 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1445 		 == MPT_RVF_ANNOUNCED) {
1446 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1447 			mpt_vol->flags = 0;
1448 			continue;
1449 		}
1450 
1451 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1452 			mpt_announce_vol(mpt, mpt_vol);
1453 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1454 		}
1455 
1456 		if (vol_pg->VolumeStatus.State !=
1457 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1458 			nonopt_volumes++;
1459 
1460 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1461 			continue;
1462 
1463 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1464 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1465 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1466 		mpt_verify_mwce(mpt, mpt_vol);
1467 
1468 		if (vol_pg->VolumeStatus.Flags == 0) {
1469 			continue;
1470 		}
1471 
1472 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1473 		for (m = 1; m <= 0x80; m <<= 1) {
1474 			switch (vol_pg->VolumeStatus.Flags & m) {
1475 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1476 				mpt_prtc(mpt, " Enabled");
1477 				break;
1478 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1479 				mpt_prtc(mpt, " Quiesced");
1480 				break;
1481 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1482 				mpt_prtc(mpt, " Re-Syncing");
1483 				break;
1484 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1485 				mpt_prtc(mpt, " Inactive");
1486 				break;
1487 			default:
1488 				break;
1489 			}
1490 		}
1491 		mpt_prtc(mpt, " )\n");
1492 
1493 		if ((vol_pg->VolumeStatus.Flags
1494 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1495 			continue;
1496 
1497 		mpt_verify_resync_rate(mpt, mpt_vol);
1498 
1499 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1500 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1501 		if (vol_pg->ResyncRate != 0) {
1502 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1503 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1504 			    prio / 1000, prio % 1000);
1505 		} else {
1506 			prio = vol_pg->VolumeSettings.Settings
1507 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1508 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1509 			    prio ? "High" : "Low");
1510 		}
1511 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1512 			    "blocks remaining\n", (uintmax_t)left,
1513 			    (uintmax_t)total);
1514 
1515 		/* Periodically report on sync progress. */
1516 		mpt_schedule_raid_refresh(mpt);
1517 	}
1518 
1519 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1520 		struct mpt_raid_disk *mpt_disk;
1521 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1522 		int m;
1523 
1524 		mpt_disk = &mpt->raid_disks[i];
1525 		disk_pg = &mpt_disk->config_page;
1526 
1527 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1528 			continue;
1529 
1530 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1531 		 == MPT_RDF_ANNOUNCED) {
1532 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1533 			mpt_disk->flags = 0;
1534 			mpt->raid_rescan++;
1535 			continue;
1536 		}
1537 
1538 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1539 			mpt_announce_disk(mpt, mpt_disk);
1540 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1541 		}
1542 
1543 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1544 			continue;
1545 
1546 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1547 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1548 		if (disk_pg->PhysDiskStatus.Flags == 0)
1549 			continue;
1550 
1551 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1552 		for (m = 1; m <= 0x80; m <<= 1) {
1553 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1554 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1555 				mpt_prtc(mpt, " Out-Of-Sync");
1556 				break;
1557 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1558 				mpt_prtc(mpt, " Quiesced");
1559 				break;
1560 			default:
1561 				break;
1562 			}
1563 		}
1564 		mpt_prtc(mpt, " )\n");
1565 	}
1566 
1567 	mpt->raid_nonopt_volumes = nonopt_volumes;
1568 	return (0);
1569 }
1570 
1571 static void
1572 mpt_raid_timer(void *arg)
1573 {
1574 	struct mpt_softc *mpt;
1575 
1576 	mpt = (struct mpt_softc *)arg;
1577 	MPT_LOCK_ASSERT(mpt);
1578 	mpt_raid_wakeup(mpt);
1579 }
1580 
1581 static void
1582 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1583 {
1584 
1585 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1586 		      mpt_raid_timer, mpt);
1587 }
1588 
1589 void
1590 mpt_raid_free_mem(struct mpt_softc *mpt)
1591 {
1592 
1593 	if (mpt->raid_volumes) {
1594 		struct mpt_raid_volume *mpt_raid;
1595 		int i;
1596 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1597 			mpt_raid = &mpt->raid_volumes[i];
1598 			if (mpt_raid->config_page) {
1599 				free(mpt_raid->config_page, M_DEVBUF);
1600 				mpt_raid->config_page = NULL;
1601 			}
1602 		}
1603 		free(mpt->raid_volumes, M_DEVBUF);
1604 		mpt->raid_volumes = NULL;
1605 	}
1606 	if (mpt->raid_disks) {
1607 		free(mpt->raid_disks, M_DEVBUF);
1608 		mpt->raid_disks = NULL;
1609 	}
1610 	if (mpt->ioc_page2) {
1611 		free(mpt->ioc_page2, M_DEVBUF);
1612 		mpt->ioc_page2 = NULL;
1613 	}
1614 	if (mpt->ioc_page3) {
1615 		free(mpt->ioc_page3, M_DEVBUF);
1616 		mpt->ioc_page3 = NULL;
1617 	}
1618 	mpt->raid_max_volumes =  0;
1619 	mpt->raid_max_disks =  0;
1620 }
1621 
1622 static int
1623 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1624 {
1625 	struct mpt_raid_volume *mpt_vol;
1626 
1627 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1628 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1629 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1630 		return (EINVAL);
1631 
1632 	MPT_LOCK(mpt);
1633 	mpt->raid_resync_rate = rate;
1634 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1635 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1636 			continue;
1637 		}
1638 		mpt_verify_resync_rate(mpt, mpt_vol);
1639 	}
1640 	MPT_UNLOCK(mpt);
1641 	return (0);
1642 }
1643 
1644 static int
1645 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1646 {
1647 	struct mpt_raid_volume *mpt_vol;
1648 
1649 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1650 		return (EINVAL);
1651 
1652 	MPT_LOCK(mpt);
1653 	mpt->raid_queue_depth = vol_queue_depth;
1654 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1655 		struct cam_path *path;
1656 		int error;
1657 
1658 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1659 			continue;
1660 
1661 		mpt->raid_rescan = 0;
1662 
1663 		error = xpt_create_path(&path, NULL,
1664 					cam_sim_path(mpt->sim),
1665 					mpt_vol->config_page->VolumeID,
1666 					/*lun*/0);
1667 		if (error != CAM_REQ_CMP) {
1668 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1669 			continue;
1670 		}
1671 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1672 		xpt_free_path(path);
1673 	}
1674 	MPT_UNLOCK(mpt);
1675 	return (0);
1676 }
1677 
1678 static int
1679 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1680 {
1681 	struct mpt_raid_volume *mpt_vol;
1682 	int force_full_resync;
1683 
1684 	MPT_LOCK(mpt);
1685 	if (mwce == mpt->raid_mwce_setting) {
1686 		MPT_UNLOCK(mpt);
1687 		return (0);
1688 	}
1689 
1690 	/*
1691 	 * Catch MWCE being left on due to a failed shutdown.  Since
1692 	 * sysctls cannot be set by the loader, we treat the first
1693 	 * setting of this varible specially and force a full volume
1694 	 * resync if MWCE is enabled and a resync is in progress.
1695 	 */
1696 	force_full_resync = 0;
1697 	if (mpt->raid_mwce_set == 0
1698 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1699 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1700 		force_full_resync = 1;
1701 
1702 	mpt->raid_mwce_setting = mwce;
1703 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1704 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1705 		int resyncing;
1706 		int mwce;
1707 
1708 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1709 			continue;
1710 
1711 		vol_pg = mpt_vol->config_page;
1712 		resyncing = vol_pg->VolumeStatus.Flags
1713 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1714 		mwce = vol_pg->VolumeSettings.Settings
1715 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1716 		if (force_full_resync && resyncing && mwce) {
1717 			/*
1718 			 * XXX disable/enable volume should force a resync,
1719 			 *     but we'll need to queice, drain, and restart
1720 			 *     I/O to do that.
1721 			 */
1722 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1723 				    "detected.  Suggest full resync.\n");
1724 		}
1725 		mpt_verify_mwce(mpt, mpt_vol);
1726 	}
1727 	mpt->raid_mwce_set = 1;
1728 	MPT_UNLOCK(mpt);
1729 	return (0);
1730 }
1731 
1732 static const char *mpt_vol_mwce_strs[] =
1733 {
1734 	"On",
1735 	"Off",
1736 	"On-During-Rebuild",
1737 	"NC"
1738 };
1739 
1740 static int
1741 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1742 {
1743 	char inbuf[20];
1744 	struct mpt_softc *mpt;
1745 	const char *str;
1746 	int error;
1747 	u_int size;
1748 	u_int i;
1749 
1750 	mpt = (struct mpt_softc *)arg1;
1751 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1752 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1753 	if (error || !req->newptr) {
1754 		return (error);
1755 	}
1756 
1757 	size = req->newlen - req->newidx;
1758 	if (size >= sizeof(inbuf)) {
1759 		return (EINVAL);
1760 	}
1761 
1762 	error = SYSCTL_IN(req, inbuf, size);
1763 	if (error) {
1764 		return (error);
1765 	}
1766 	inbuf[size] = '\0';
1767 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1768 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1769 			return (mpt_raid_set_vol_mwce(mpt, i));
1770 		}
1771 	}
1772 	return (EINVAL);
1773 }
1774 
1775 static int
1776 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1777 {
1778 	struct mpt_softc *mpt;
1779 	u_int raid_resync_rate;
1780 	int error;
1781 
1782 	mpt = (struct mpt_softc *)arg1;
1783 	raid_resync_rate = mpt->raid_resync_rate;
1784 
1785 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1786 	if (error || !req->newptr) {
1787 		return error;
1788 	}
1789 
1790 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1791 }
1792 
1793 static int
1794 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1795 {
1796 	struct mpt_softc *mpt;
1797 	u_int raid_queue_depth;
1798 	int error;
1799 
1800 	mpt = (struct mpt_softc *)arg1;
1801 	raid_queue_depth = mpt->raid_queue_depth;
1802 
1803 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1804 	if (error || !req->newptr) {
1805 		return error;
1806 	}
1807 
1808 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1809 }
1810 
1811 static void
1812 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1813 {
1814 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1815 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1816 
1817 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1818 	    "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1819 	    mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1820 	    "volume member WCE(On,Off,On-During-Rebuild,NC)");
1821 
1822 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1823 	    "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1824 	    mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1825 	    "default volume queue depth");
1826 
1827 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1828 	    "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1829 	    mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1830 	    "volume resync priority (0 == NC, 1 - 255)");
1831 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1832 			"nonoptimal_volumes", CTLFLAG_RD,
1833 			&mpt->raid_nonopt_volumes, 0,
1834 			"number of nonoptimal volumes");
1835 }
1836