xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision a9148abd9da5db2f1c682fb17bed791845fc41c9)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_sim.h>
57 
58 #if __FreeBSD_version < 500000
59 #include <sys/devicestat.h>
60 #define	GIANT_REQUIRED
61 #endif
62 #include <cam/cam_periph.h>
63 
64 #include <sys/callout.h>
65 #include <sys/kthread.h>
66 #include <sys/sysctl.h>
67 
68 #include <machine/stdarg.h>
69 
70 struct mpt_raid_action_result
71 {
72 	union {
73 		MPI_RAID_VOL_INDICATOR	indicator_struct;
74 		uint32_t		new_settings;
75 		uint8_t			phys_disk_num;
76 	} action_data;
77 	uint16_t			action_status;
78 };
79 
80 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
81 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
82 
83 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
84 
85 
86 static mpt_probe_handler_t	mpt_raid_probe;
87 static mpt_attach_handler_t	mpt_raid_attach;
88 static mpt_enable_handler_t	mpt_raid_enable;
89 static mpt_event_handler_t	mpt_raid_event;
90 static mpt_shutdown_handler_t	mpt_raid_shutdown;
91 static mpt_reset_handler_t	mpt_raid_ioc_reset;
92 static mpt_detach_handler_t	mpt_raid_detach;
93 
94 static struct mpt_personality mpt_raid_personality =
95 {
96 	.name		= "mpt_raid",
97 	.probe		= mpt_raid_probe,
98 	.attach		= mpt_raid_attach,
99 	.enable		= mpt_raid_enable,
100 	.event		= mpt_raid_event,
101 	.reset		= mpt_raid_ioc_reset,
102 	.shutdown	= mpt_raid_shutdown,
103 	.detach		= mpt_raid_detach,
104 };
105 
106 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
107 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
108 
109 static mpt_reply_handler_t mpt_raid_reply_handler;
110 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
111 					MSG_DEFAULT_REPLY *reply_frame);
112 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
113 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
114 static void mpt_raid_thread(void *arg);
115 static timeout_t mpt_raid_timer;
116 #if 0
117 static void mpt_enable_vol(struct mpt_softc *mpt,
118 			   struct mpt_raid_volume *mpt_vol, int enable);
119 #endif
120 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
121 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
122     struct cam_path *);
123 #if __FreeBSD_version < 500000
124 #define	mpt_raid_sysctl_attach(x)	do { } while (0)
125 #else
126 static void mpt_raid_sysctl_attach(struct mpt_softc *);
127 #endif
128 
129 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
130 
131 const char *
132 mpt_vol_type(struct mpt_raid_volume *vol)
133 {
134 	switch (vol->config_page->VolumeType) {
135 	case MPI_RAID_VOL_TYPE_IS:
136 		return ("RAID-0");
137 	case MPI_RAID_VOL_TYPE_IME:
138 		return ("RAID-1E");
139 	case MPI_RAID_VOL_TYPE_IM:
140 		return ("RAID-1");
141 	default:
142 		return ("Unknown");
143 	}
144 }
145 
146 const char *
147 mpt_vol_state(struct mpt_raid_volume *vol)
148 {
149 	switch (vol->config_page->VolumeStatus.State) {
150 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
151 		return ("Optimal");
152 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
153 		return ("Degraded");
154 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
155 		return ("Failed");
156 	default:
157 		return ("Unknown");
158 	}
159 }
160 
161 const char *
162 mpt_disk_state(struct mpt_raid_disk *disk)
163 {
164 	switch (disk->config_page.PhysDiskStatus.State) {
165 	case MPI_PHYSDISK0_STATUS_ONLINE:
166 		return ("Online");
167 	case MPI_PHYSDISK0_STATUS_MISSING:
168 		return ("Missing");
169 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
170 		return ("Incompatible");
171 	case MPI_PHYSDISK0_STATUS_FAILED:
172 		return ("Failed");
173 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
174 		return ("Initializing");
175 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
176 		return ("Offline Requested");
177 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
178 		return ("Failed per Host Request");
179 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
180 		return ("Offline");
181 	default:
182 		return ("Unknown");
183 	}
184 }
185 
186 void
187 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
188 	    const char *fmt, ...)
189 {
190 	va_list ap;
191 
192 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
193 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
194 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
195 	va_start(ap, fmt);
196 	vprintf(fmt, ap);
197 	va_end(ap);
198 }
199 
200 void
201 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
202 	     const char *fmt, ...)
203 {
204 	va_list ap;
205 
206 	if (disk->volume != NULL) {
207 		printf("(%s:vol%d:%d): ",
208 		       device_get_nameunit(mpt->dev),
209 		       disk->volume->config_page->VolumeID,
210 		       disk->member_number);
211 	} else {
212 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
213 		       disk->config_page.PhysDiskBus,
214 		       disk->config_page.PhysDiskID);
215 	}
216 	va_start(ap, fmt);
217 	vprintf(fmt, ap);
218 	va_end(ap);
219 }
220 
221 static void
222 mpt_raid_async(void *callback_arg, u_int32_t code,
223 	       struct cam_path *path, void *arg)
224 {
225 	struct mpt_softc *mpt;
226 
227 	mpt = (struct mpt_softc*)callback_arg;
228 	switch (code) {
229 	case AC_FOUND_DEVICE:
230 	{
231 		struct ccb_getdev *cgd;
232 		struct mpt_raid_volume *mpt_vol;
233 
234 		cgd = (struct ccb_getdev *)arg;
235 		if (cgd == NULL) {
236 			break;
237 		}
238 
239 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
240 			 cgd->ccb_h.target_id);
241 
242 		RAID_VOL_FOREACH(mpt, mpt_vol) {
243 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
244 				continue;
245 
246 			if (mpt_vol->config_page->VolumeID
247 			 == cgd->ccb_h.target_id) {
248 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
249 				break;
250 			}
251 		}
252 	}
253 	default:
254 		break;
255 	}
256 }
257 
258 int
259 mpt_raid_probe(struct mpt_softc *mpt)
260 {
261 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
262 		return (ENODEV);
263 	}
264 	return (0);
265 }
266 
267 int
268 mpt_raid_attach(struct mpt_softc *mpt)
269 {
270 	struct ccb_setasync csa;
271 	mpt_handler_t	 handler;
272 	int		 error;
273 
274 	mpt_callout_init(&mpt->raid_timer);
275 
276 	error = mpt_spawn_raid_thread(mpt);
277 	if (error != 0) {
278 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
279 		goto cleanup;
280 	}
281 
282 	MPT_LOCK(mpt);
283 	handler.reply_handler = mpt_raid_reply_handler;
284 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 				     &raid_handler_id);
286 	if (error != 0) {
287 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
288 		goto cleanup;
289 	}
290 
291 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
292 	csa.ccb_h.func_code = XPT_SASYNC_CB;
293 	csa.event_enable = AC_FOUND_DEVICE;
294 	csa.callback = mpt_raid_async;
295 	csa.callback_arg = mpt;
296 	xpt_action((union ccb *)&csa);
297 	if (csa.ccb_h.status != CAM_REQ_CMP) {
298 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
299 			"CAM async handler.\n");
300 	}
301 	MPT_UNLOCK(mpt);
302 
303 	mpt_raid_sysctl_attach(mpt);
304 	return (0);
305 cleanup:
306 	MPT_UNLOCK(mpt);
307 	mpt_raid_detach(mpt);
308 	return (error);
309 }
310 
311 int
312 mpt_raid_enable(struct mpt_softc *mpt)
313 {
314 	return (0);
315 }
316 
317 void
318 mpt_raid_detach(struct mpt_softc *mpt)
319 {
320 	struct ccb_setasync csa;
321 	mpt_handler_t handler;
322 
323 	callout_stop(&mpt->raid_timer);
324 	MPT_LOCK(mpt);
325 	mpt_terminate_raid_thread(mpt);
326 
327 	handler.reply_handler = mpt_raid_reply_handler;
328 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
329 			       raid_handler_id);
330 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
331 	csa.ccb_h.func_code = XPT_SASYNC_CB;
332 	csa.event_enable = 0;
333 	csa.callback = mpt_raid_async;
334 	csa.callback_arg = mpt;
335 	xpt_action((union ccb *)&csa);
336 	MPT_UNLOCK(mpt);
337 }
338 
339 static void
340 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
341 {
342 	/* Nothing to do yet. */
343 }
344 
345 static const char *raid_event_txt[] =
346 {
347 	"Volume Created",
348 	"Volume Deleted",
349 	"Volume Settings Changed",
350 	"Volume Status Changed",
351 	"Volume Physical Disk Membership Changed",
352 	"Physical Disk Created",
353 	"Physical Disk Deleted",
354 	"Physical Disk Settings Changed",
355 	"Physical Disk Status Changed",
356 	"Domain Validation Required",
357 	"SMART Data Received",
358 	"Replace Action Started",
359 };
360 
361 static int
362 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
363 	       MSG_EVENT_NOTIFY_REPLY *msg)
364 {
365 	EVENT_DATA_RAID *raid_event;
366 	struct mpt_raid_volume *mpt_vol;
367 	struct mpt_raid_disk *mpt_disk;
368 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
369 	int i;
370 	int print_event;
371 
372 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
373 		return (0);
374 	}
375 
376 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
377 
378 	mpt_vol = NULL;
379 	vol_pg = NULL;
380 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
381 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
382 			mpt_vol = &mpt->raid_volumes[i];
383 			vol_pg = mpt_vol->config_page;
384 
385 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
386 				continue;
387 
388 			if (vol_pg->VolumeID == raid_event->VolumeID
389 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
390 				break;
391 		}
392 		if (i >= mpt->ioc_page2->MaxVolumes) {
393 			mpt_vol = NULL;
394 			vol_pg = NULL;
395 		}
396 	}
397 
398 	mpt_disk = NULL;
399 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
400 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
401 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
402 			mpt_disk = NULL;
403 		}
404 	}
405 
406 	print_event = 1;
407 	switch(raid_event->ReasonCode) {
408 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
409 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
410 		break;
411 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
412 		if (mpt_vol != NULL) {
413 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
414 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
415 			} else {
416 				/*
417 				 * Coalesce status messages into one
418 				 * per background run of our RAID thread.
419 				 * This removes "spurious" status messages
420 				 * from our output.
421 				 */
422 				print_event = 0;
423 			}
424 		}
425 		break;
426 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
427 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
428 		mpt->raid_rescan++;
429 		if (mpt_vol != NULL) {
430 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
431 		}
432 		break;
433 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
434 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
435 		mpt->raid_rescan++;
436 		break;
437 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
438 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
439 		mpt->raid_rescan++;
440 		if (mpt_disk != NULL) {
441 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
442 		}
443 		break;
444 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
445 		mpt->raid_rescan++;
446 		break;
447 	case MPI_EVENT_RAID_RC_SMART_DATA:
448 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
449 		break;
450 	}
451 
452 	if (print_event) {
453 		if (mpt_disk != NULL) {
454 			mpt_disk_prt(mpt, mpt_disk, "");
455 		} else if (mpt_vol != NULL) {
456 			mpt_vol_prt(mpt, mpt_vol, "");
457 		} else {
458 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
459 				raid_event->VolumeID);
460 
461 			if (raid_event->PhysDiskNum != 0xFF)
462 				mpt_prtc(mpt, ":%d): ",
463 					 raid_event->PhysDiskNum);
464 			else
465 				mpt_prtc(mpt, "): ");
466 		}
467 
468 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
469 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
470 				 raid_event->ReasonCode);
471 		else
472 			mpt_prtc(mpt, "%s\n",
473 				 raid_event_txt[raid_event->ReasonCode]);
474 	}
475 
476 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
477 		/* XXX Use CAM's print sense for this... */
478 		if (mpt_disk != NULL)
479 			mpt_disk_prt(mpt, mpt_disk, "");
480 		else
481 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
482 			    raid_event->VolumeBus, raid_event->VolumeID,
483 			    raid_event->PhysDiskNum);
484 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
485 			 raid_event->ASC, raid_event->ASCQ);
486 	}
487 
488 	mpt_raid_wakeup(mpt);
489 	return (1);
490 }
491 
492 static void
493 mpt_raid_shutdown(struct mpt_softc *mpt)
494 {
495 	struct mpt_raid_volume *mpt_vol;
496 
497 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
498 		return;
499 	}
500 
501 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
502 	RAID_VOL_FOREACH(mpt, mpt_vol) {
503 		mpt_verify_mwce(mpt, mpt_vol);
504 	}
505 }
506 
507 static int
508 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
509     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
510 {
511 	int free_req;
512 
513 	if (req == NULL)
514 		return (TRUE);
515 
516 	free_req = TRUE;
517 	if (reply_frame != NULL)
518 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
519 #ifdef NOTYET
520 	else if (req->ccb != NULL) {
521 		/* Complete Quiesce CCB with error... */
522 	}
523 #endif
524 
525 	req->state &= ~REQ_STATE_QUEUED;
526 	req->state |= REQ_STATE_DONE;
527 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
528 
529 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
530 		wakeup(req);
531 	} else if (free_req) {
532 		mpt_free_request(mpt, req);
533 	}
534 
535 	return (TRUE);
536 }
537 
538 /*
539  * Parse additional completion information in the reply
540  * frame for RAID I/O requests.
541  */
542 static int
543 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
544     MSG_DEFAULT_REPLY *reply_frame)
545 {
546 	MSG_RAID_ACTION_REPLY *reply;
547 	struct mpt_raid_action_result *action_result;
548 	MSG_RAID_ACTION_REQUEST *rap;
549 
550 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
551 	req->IOCStatus = le16toh(reply->IOCStatus);
552 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
553 
554 	switch (rap->Action) {
555 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
556 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
557 		break;
558 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
559 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
560 		break;
561 	default:
562 		break;
563 	}
564 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
565 	memcpy(&action_result->action_data, &reply->ActionData,
566 	    sizeof(action_result->action_data));
567 	action_result->action_status = reply->ActionStatus;
568 	return (TRUE);
569 }
570 
571 /*
572  * Utiltity routine to perform a RAID action command;
573  */
574 int
575 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
576 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
577 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
578 		   int write, int wait)
579 {
580 	MSG_RAID_ACTION_REQUEST *rap;
581 	SGE_SIMPLE32 *se;
582 
583 	rap = req->req_vbuf;
584 	memset(rap, 0, sizeof *rap);
585 	rap->Action = Action;
586 	rap->ActionDataWord = ActionDataWord;
587 	rap->Function = MPI_FUNCTION_RAID_ACTION;
588 	rap->VolumeID = vol->config_page->VolumeID;
589 	rap->VolumeBus = vol->config_page->VolumeBus;
590 	if (disk != 0)
591 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
592 	else
593 		rap->PhysDiskNum = 0xFF;
594 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
595 	se->Address = addr;
596 	MPI_pSGE_SET_LENGTH(se, len);
597 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
598 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
599 	    MPI_SGE_FLAGS_END_OF_LIST |
600 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
601 	rap->MsgContext = htole32(req->index | raid_handler_id);
602 
603 	mpt_check_doorbell(mpt);
604 	mpt_send_cmd(mpt, req);
605 
606 	if (wait) {
607 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
609 	} else {
610 		return (0);
611 	}
612 }
613 
614 /*************************** RAID Status Monitoring ***************************/
615 static int
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 {
618 	int error;
619 
620 	/*
621 	 * Freeze out any CAM transactions until our thread
622 	 * is able to run at least once.  We need to update
623 	 * our RAID pages before acception I/O or we may
624 	 * reject I/O to an ID we later determine is for a
625 	 * hidden physdisk.
626 	 */
627 	MPT_LOCK(mpt);
628 	xpt_freeze_simq(mpt->phydisk_sim, 1);
629 	MPT_UNLOCK(mpt);
630 	error = mpt_kthread_create(mpt_raid_thread, mpt,
631 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632 	    "mpt_raid%d", mpt->unit);
633 	if (error != 0) {
634 		MPT_LOCK(mpt);
635 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
636 		MPT_UNLOCK(mpt);
637 	}
638 	return (error);
639 }
640 
641 static void
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
643 {
644 
645 	if (mpt->raid_thread == NULL) {
646 		return;
647 	}
648 	mpt->shutdwn_raid = 1;
649 	wakeup(mpt->raid_volumes);
650 	/*
651 	 * Sleep on a slightly different location
652 	 * for this interlock just for added safety.
653 	 */
654 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
655 }
656 
657 static void
658 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
659 {
660 	xpt_free_path(ccb->ccb_h.path);
661 }
662 
663 static void
664 mpt_raid_thread(void *arg)
665 {
666 	struct mpt_softc *mpt;
667 	union ccb *ccb;
668 	int firstrun;
669 
670 	mpt = (struct mpt_softc *)arg;
671 	firstrun = 1;
672 	ccb = xpt_alloc_ccb();
673 	MPT_LOCK(mpt);
674 	while (mpt->shutdwn_raid == 0) {
675 
676 		if (mpt->raid_wakeup == 0) {
677 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
678 			continue;
679 		}
680 
681 		mpt->raid_wakeup = 0;
682 
683 		if (mpt_refresh_raid_data(mpt)) {
684 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
685 			continue;
686 		}
687 
688 		/*
689 		 * Now that we have our first snapshot of RAID data,
690 		 * allow CAM to access our physical disk bus.
691 		 */
692 		if (firstrun) {
693 			firstrun = 0;
694 			MPTLOCK_2_CAMLOCK(mpt);
695 			xpt_release_simq(mpt->phydisk_sim, TRUE);
696 			CAMLOCK_2_MPTLOCK(mpt);
697 		}
698 
699 		if (mpt->raid_rescan != 0) {
700 			struct cam_path *path;
701 			int error;
702 
703 			mpt->raid_rescan = 0;
704 
705 			error = xpt_create_path(&path, xpt_periph,
706 			    cam_sim_path(mpt->phydisk_sim),
707 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
708 			if (error != CAM_REQ_CMP) {
709 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
710 			} else {
711 				xpt_setup_ccb(&ccb->ccb_h, path, 5);
712 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
713 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
714 				ccb->crcn.flags = CAM_FLAG_NONE;
715 				MPTLOCK_2_CAMLOCK(mpt);
716 				xpt_action(ccb);
717 				CAMLOCK_2_MPTLOCK(mpt);
718 			}
719 		}
720 	}
721 	xpt_free_ccb(ccb);
722 	mpt->raid_thread = NULL;
723 	wakeup(&mpt->raid_thread);
724 	MPT_UNLOCK(mpt);
725 	mpt_kthread_exit(0);
726 }
727 
728 #if 0
729 static void
730 mpt_raid_quiesce_timeout(void *arg)
731 {
732 	/* Complete the CCB with error */
733 	/* COWWWW */
734 }
735 
736 static timeout_t mpt_raid_quiesce_timeout;
737 cam_status
738 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
739 		      request_t *req)
740 {
741 	union ccb *ccb;
742 
743 	ccb = req->ccb;
744 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
745 		return (CAM_REQ_CMP);
746 
747 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
748 		int rv;
749 
750 		mpt_disk->flags |= MPT_RDF_QUIESCING;
751 		xpt_freeze_devq(ccb->ccb_h.path, 1);
752 
753 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
754 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
755 					/*ActionData*/0, /*addr*/0,
756 					/*len*/0, /*write*/FALSE,
757 					/*wait*/FALSE);
758 		if (rv != 0)
759 			return (CAM_REQ_CMP_ERR);
760 
761 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
762 #if 0
763 		if (rv == ETIMEDOUT) {
764 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
765 				     "Quiece Timed-out\n");
766 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
767 			return (CAM_REQ_CMP_ERR);
768 		}
769 
770 		ar = REQ_TO_RAID_ACTION_RESULT(req);
771 		if (rv != 0
772 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
773 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
774 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
775 				    "%d:%x:%x\n", rv, req->IOCStatus,
776 				    ar->action_status);
777 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
778 			return (CAM_REQ_CMP_ERR);
779 		}
780 #endif
781 		return (CAM_REQ_INPROG);
782 	}
783 	return (CAM_REQUEUE_REQ);
784 }
785 #endif
786 
787 /* XXX Ignores that there may be multiple busses/IOCs involved. */
788 cam_status
789 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
790 {
791 	struct mpt_raid_disk *mpt_disk;
792 
793 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
794 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
795 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
796 		*tgt = mpt_disk->config_page.PhysDiskID;
797 		return (0);
798 	}
799 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800 		 ccb->ccb_h.target_id);
801 	return (-1);
802 }
803 
804 /* XXX Ignores that there may be multiple busses/IOCs involved. */
805 int
806 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
807 {
808 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
809 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
810 
811 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
812 		return (0);
813 	}
814 	ioc_vol = mpt->ioc_page2->RaidVolume;
815 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
816 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
817 		if (ioc_vol->VolumeID == tgt) {
818 			return (1);
819 		}
820 	}
821 	return (0);
822 }
823 
824 #if 0
825 static void
826 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
827 	       int enable)
828 {
829 	request_t *req;
830 	struct mpt_raid_action_result *ar;
831 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
832 	int enabled;
833 	int rv;
834 
835 	vol_pg = mpt_vol->config_page;
836 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
837 
838 	/*
839 	 * If the setting matches the configuration,
840 	 * there is nothing to do.
841 	 */
842 	if ((enabled && enable)
843 	 || (!enabled && !enable))
844 		return;
845 
846 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
847 	if (req == NULL) {
848 		mpt_vol_prt(mpt, mpt_vol,
849 			    "mpt_enable_vol: Get request failed!\n");
850 		return;
851 	}
852 
853 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
854 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
855 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
856 				/*data*/0, /*addr*/0, /*len*/0,
857 				/*write*/FALSE, /*wait*/TRUE);
858 	if (rv == ETIMEDOUT) {
859 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
860 			    "%s Volume Timed-out\n",
861 			    enable ? "Enable" : "Disable");
862 		return;
863 	}
864 	ar = REQ_TO_RAID_ACTION_RESULT(req);
865 	if (rv != 0
866 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
867 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
868 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
869 			    enable ? "Enable" : "Disable",
870 			    rv, req->IOCStatus, ar->action_status);
871 	}
872 
873 	mpt_free_request(mpt, req);
874 }
875 #endif
876 
877 static void
878 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
879 {
880 	request_t *req;
881 	struct mpt_raid_action_result *ar;
882 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
883 	uint32_t data;
884 	int rv;
885 	int resyncing;
886 	int mwce;
887 
888 	vol_pg = mpt_vol->config_page;
889 	resyncing = vol_pg->VolumeStatus.Flags
890 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
891 	mwce = vol_pg->VolumeSettings.Settings
892 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
893 
894 	/*
895 	 * If the setting matches the configuration,
896 	 * there is nothing to do.
897 	 */
898 	switch (mpt->raid_mwce_setting) {
899 	case MPT_RAID_MWCE_REBUILD_ONLY:
900 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
901 			return;
902 		}
903 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
904 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
905 			/*
906 			 * Wait one more status update to see if
907 			 * resyncing gets enabled.  It gets disabled
908 			 * temporarilly when WCE is changed.
909 			 */
910 			return;
911 		}
912 		break;
913 	case MPT_RAID_MWCE_ON:
914 		if (mwce)
915 			return;
916 		break;
917 	case MPT_RAID_MWCE_OFF:
918 		if (!mwce)
919 			return;
920 		break;
921 	case MPT_RAID_MWCE_NC:
922 		return;
923 	}
924 
925 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
926 	if (req == NULL) {
927 		mpt_vol_prt(mpt, mpt_vol,
928 			    "mpt_verify_mwce: Get request failed!\n");
929 		return;
930 	}
931 
932 	vol_pg->VolumeSettings.Settings ^=
933 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
935 	vol_pg->VolumeSettings.Settings ^=
936 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
937 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
938 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
939 				data, /*addr*/0, /*len*/0,
940 				/*write*/FALSE, /*wait*/TRUE);
941 	if (rv == ETIMEDOUT) {
942 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
943 			    "Write Cache Enable Timed-out\n");
944 		return;
945 	}
946 	ar = REQ_TO_RAID_ACTION_RESULT(req);
947 	if (rv != 0
948 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
949 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
950 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
951 			    "%d:%x:%x\n", rv, req->IOCStatus,
952 			    ar->action_status);
953 	} else {
954 		vol_pg->VolumeSettings.Settings ^=
955 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
956 	}
957 	mpt_free_request(mpt, req);
958 }
959 
960 static void
961 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
962 {
963 	request_t *req;
964 	struct mpt_raid_action_result *ar;
965 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
966 	u_int prio;
967 	int rv;
968 
969 	vol_pg = mpt_vol->config_page;
970 
971 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
972 		return;
973 
974 	/*
975 	 * If the current RAID resync rate does not
976 	 * match our configured rate, update it.
977 	 */
978 	prio = vol_pg->VolumeSettings.Settings
979 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
980 	if (vol_pg->ResyncRate != 0
981 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
982 
983 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
984 		if (req == NULL) {
985 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
986 				    "Get request failed!\n");
987 			return;
988 		}
989 
990 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
991 					MPI_RAID_ACTION_SET_RESYNC_RATE,
992 					mpt->raid_resync_rate, /*addr*/0,
993 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
994 		if (rv == ETIMEDOUT) {
995 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
996 				    "Resync Rate Setting Timed-out\n");
997 			return;
998 		}
999 
1000 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1001 		if (rv != 0
1002 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1003 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1004 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1005 				    "%d:%x:%x\n", rv, req->IOCStatus,
1006 				    ar->action_status);
1007 		} else
1008 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1009 		mpt_free_request(mpt, req);
1010 	} else if ((prio && mpt->raid_resync_rate < 128)
1011 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1012 		uint32_t data;
1013 
1014 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1015 		if (req == NULL) {
1016 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1017 				    "Get request failed!\n");
1018 			return;
1019 		}
1020 
1021 		vol_pg->VolumeSettings.Settings ^=
1022 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1024 		vol_pg->VolumeSettings.Settings ^=
1025 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1026 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1027 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1028 					data, /*addr*/0, /*len*/0,
1029 					/*write*/FALSE, /*wait*/TRUE);
1030 		if (rv == ETIMEDOUT) {
1031 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1032 				    "Resync Rate Setting Timed-out\n");
1033 			return;
1034 		}
1035 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1036 		if (rv != 0
1037 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1038 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1039 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1040 				    "%d:%x:%x\n", rv, req->IOCStatus,
1041 				    ar->action_status);
1042 		} else {
1043 			vol_pg->VolumeSettings.Settings ^=
1044 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1045 		}
1046 
1047 		mpt_free_request(mpt, req);
1048 	}
1049 }
1050 
1051 static void
1052 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1053 		       struct cam_path *path)
1054 {
1055 	struct ccb_relsim crs;
1056 
1057 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1058 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1059 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1060 	crs.openings = mpt->raid_queue_depth;
1061 	xpt_action((union ccb *)&crs);
1062 	if (crs.ccb_h.status != CAM_REQ_CMP)
1063 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1064 			    "with CAM status %#x\n", crs.ccb_h.status);
1065 }
1066 
1067 static void
1068 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1069 {
1070 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1071 	u_int i;
1072 
1073 	vol_pg = mpt_vol->config_page;
1074 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1075 	for (i = 1; i <= 0x8000; i <<= 1) {
1076 		switch (vol_pg->VolumeSettings.Settings & i) {
1077 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1078 			mpt_prtc(mpt, " Member-WCE");
1079 			break;
1080 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1081 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1082 			break;
1083 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1084 			mpt_prtc(mpt, " Hot-Plug-Spares");
1085 			break;
1086 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1087 			mpt_prtc(mpt, " High-Priority-ReSync");
1088 			break;
1089 		default:
1090 			break;
1091 		}
1092 	}
1093 	mpt_prtc(mpt, " )\n");
1094 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1095 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1096 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1097 			  ? ":" : "s:");
1098 		for (i = 0; i < 8; i++) {
1099 			u_int mask;
1100 
1101 			mask = 0x1 << i;
1102 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1103 				continue;
1104 			mpt_prtc(mpt, " %d", i);
1105 		}
1106 		mpt_prtc(mpt, "\n");
1107 	}
1108 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1109 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1110 		struct mpt_raid_disk *mpt_disk;
1111 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1112 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1113 		U8 f, s;
1114 
1115 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1116 		disk_pg = &mpt_disk->config_page;
1117 		mpt_prtc(mpt, "      ");
1118 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1119 			 pt_bus, disk_pg->PhysDiskID);
1120 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1121 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1122 			    "Primary" : "Secondary");
1123 		} else {
1124 			mpt_prtc(mpt, "Stripe Position %d",
1125 				 mpt_disk->member_number);
1126 		}
1127 		f = disk_pg->PhysDiskStatus.Flags;
1128 		s = disk_pg->PhysDiskStatus.State;
1129 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1130 			mpt_prtc(mpt, " Out of Sync");
1131 		}
1132 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1133 			mpt_prtc(mpt, " Quiesced");
1134 		}
1135 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1136 			mpt_prtc(mpt, " Inactive");
1137 		}
1138 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1139 			mpt_prtc(mpt, " Was Optimal");
1140 		}
1141 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1142 			mpt_prtc(mpt, " Was Non-Optimal");
1143 		}
1144 		switch (s) {
1145 		case MPI_PHYSDISK0_STATUS_ONLINE:
1146 			mpt_prtc(mpt, " Online");
1147 			break;
1148 		case MPI_PHYSDISK0_STATUS_MISSING:
1149 			mpt_prtc(mpt, " Missing");
1150 			break;
1151 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1152 			mpt_prtc(mpt, " Incompatible");
1153 			break;
1154 		case MPI_PHYSDISK0_STATUS_FAILED:
1155 			mpt_prtc(mpt, " Failed");
1156 			break;
1157 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1158 			mpt_prtc(mpt, " Initializing");
1159 			break;
1160 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1161 			mpt_prtc(mpt, " Requested Offline");
1162 			break;
1163 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1164 			mpt_prtc(mpt, " Requested Failed");
1165 			break;
1166 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1167 		default:
1168 			mpt_prtc(mpt, " Offline Other (%x)", s);
1169 			break;
1170 		}
1171 		mpt_prtc(mpt, "\n");
1172 	}
1173 }
1174 
1175 static void
1176 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1177 {
1178 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1179 	int rd_bus = cam_sim_bus(mpt->sim);
1180 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1181 	u_int i;
1182 
1183 	disk_pg = &mpt_disk->config_page;
1184 	mpt_disk_prt(mpt, mpt_disk,
1185 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1186 		     device_get_nameunit(mpt->dev), rd_bus,
1187 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1188 		     pt_bus, mpt_disk - mpt->raid_disks);
1189 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1190 		return;
1191 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1192 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1193 		   ? ":" : "s:");
1194 	for (i = 0; i < 8; i++) {
1195 		u_int mask;
1196 
1197 		mask = 0x1 << i;
1198 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1199 			continue;
1200 		mpt_prtc(mpt, " %d", i);
1201 	}
1202 	mpt_prtc(mpt, "\n");
1203 }
1204 
1205 static void
1206 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1207 		      IOC_3_PHYS_DISK *ioc_disk)
1208 {
1209 	int rv;
1210 
1211 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1212 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1213 				 &mpt_disk->config_page.Header,
1214 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1215 	if (rv != 0) {
1216 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1217 			"Failed to read RAID Disk Hdr(%d)\n",
1218 		 	ioc_disk->PhysDiskNum);
1219 		return;
1220 	}
1221 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1222 				   &mpt_disk->config_page.Header,
1223 				   sizeof(mpt_disk->config_page),
1224 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1225 	if (rv != 0)
1226 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1227 			"Failed to read RAID Disk Page(%d)\n",
1228 		 	ioc_disk->PhysDiskNum);
1229 }
1230 
1231 static void
1232 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1233     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1234 {
1235 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1236 	struct mpt_raid_action_result *ar;
1237 	request_t *req;
1238 	int rv;
1239 	int i;
1240 
1241 	vol_pg = mpt_vol->config_page;
1242 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1243 
1244 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1245 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1246 	if (rv != 0) {
1247 		mpt_vol_prt(mpt, mpt_vol,
1248 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1249 		    ioc_vol->VolumePageNumber);
1250 		return;
1251 	}
1252 
1253 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1254 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1255 	if (rv != 0) {
1256 		mpt_vol_prt(mpt, mpt_vol,
1257 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1258 		    ioc_vol->VolumePageNumber);
1259 		return;
1260 	}
1261 	mpt2host_config_page_raid_vol_0(vol_pg);
1262 
1263 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1264 
1265 	/* Update disk entry array data. */
1266 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1267 		struct mpt_raid_disk *mpt_disk;
1268 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1269 		mpt_disk->volume = mpt_vol;
1270 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1271 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1272 			mpt_disk->member_number--;
1273 		}
1274 	}
1275 
1276 	if ((vol_pg->VolumeStatus.Flags
1277 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1278 		return;
1279 
1280 	req = mpt_get_request(mpt, TRUE);
1281 	if (req == NULL) {
1282 		mpt_vol_prt(mpt, mpt_vol,
1283 		    "mpt_refresh_raid_vol: Get request failed!\n");
1284 		return;
1285 	}
1286 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1287 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1288 	if (rv == ETIMEDOUT) {
1289 		mpt_vol_prt(mpt, mpt_vol,
1290 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1291 		mpt_free_request(mpt, req);
1292 		return;
1293 	}
1294 
1295 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1296 	if (rv == 0
1297 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1298 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1299 		memcpy(&mpt_vol->sync_progress,
1300 		       &ar->action_data.indicator_struct,
1301 		       sizeof(mpt_vol->sync_progress));
1302 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1303 	} else {
1304 		mpt_vol_prt(mpt, mpt_vol,
1305 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1306 	}
1307 	mpt_free_request(mpt, req);
1308 }
1309 
1310 /*
1311  * Update in-core information about RAID support.  We update any entries
1312  * that didn't previously exists or have been marked as needing to
1313  * be updated by our event handler.  Interesting changes are displayed
1314  * to the console.
1315  */
1316 int
1317 mpt_refresh_raid_data(struct mpt_softc *mpt)
1318 {
1319 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1320 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1321 	IOC_3_PHYS_DISK *ioc_disk;
1322 	IOC_3_PHYS_DISK *ioc_last_disk;
1323 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1324 	size_t len;
1325 	int rv;
1326 	int i;
1327 	u_int nonopt_volumes;
1328 
1329 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1330 		return (0);
1331 	}
1332 
1333 	/*
1334 	 * Mark all items as unreferenced by the configuration.
1335 	 * This allows us to find, report, and discard stale
1336 	 * entries.
1337 	 */
1338 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1339 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1340 	}
1341 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1342 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1343 	}
1344 
1345 	/*
1346 	 * Get Physical Disk information.
1347 	 */
1348 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1349 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1350 				   &mpt->ioc_page3->Header, len,
1351 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1352 	if (rv) {
1353 		mpt_prt(mpt,
1354 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1355 		return (-1);
1356 	}
1357 
1358 	ioc_disk = mpt->ioc_page3->PhysDisk;
1359 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1360 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1361 		struct mpt_raid_disk *mpt_disk;
1362 
1363 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1364 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1365 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1366 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1367 
1368 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1369 
1370 		}
1371 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1372 		mpt->raid_rescan++;
1373 	}
1374 
1375 	/*
1376 	 * Refresh volume data.
1377 	 */
1378 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1379 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380 				   &mpt->ioc_page2->Header, len,
1381 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1382 	if (rv) {
1383 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1384 			"Failed to read IOC Page 2\n");
1385 		return (-1);
1386 	}
1387 
1388 	ioc_vol = mpt->ioc_page2->RaidVolume;
1389 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1390 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1391 		struct mpt_raid_volume *mpt_vol;
1392 
1393 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1394 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1395 		vol_pg = mpt_vol->config_page;
1396 		if (vol_pg == NULL)
1397 			continue;
1398 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1399 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1400 		 || (vol_pg->VolumeStatus.Flags
1401 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1402 
1403 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1404 		}
1405 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1406 	}
1407 
1408 	nonopt_volumes = 0;
1409 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1410 		struct mpt_raid_volume *mpt_vol;
1411 		uint64_t total;
1412 		uint64_t left;
1413 		int m;
1414 		u_int prio;
1415 
1416 		mpt_vol = &mpt->raid_volumes[i];
1417 
1418 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1419 			continue;
1420 		}
1421 
1422 		vol_pg = mpt_vol->config_page;
1423 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1424 		 == MPT_RVF_ANNOUNCED) {
1425 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1426 			mpt_vol->flags = 0;
1427 			continue;
1428 		}
1429 
1430 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1431 			mpt_announce_vol(mpt, mpt_vol);
1432 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1433 		}
1434 
1435 		if (vol_pg->VolumeStatus.State !=
1436 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1437 			nonopt_volumes++;
1438 
1439 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1440 			continue;
1441 
1442 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1443 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1444 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1445 		mpt_verify_mwce(mpt, mpt_vol);
1446 
1447 		if (vol_pg->VolumeStatus.Flags == 0) {
1448 			continue;
1449 		}
1450 
1451 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1452 		for (m = 1; m <= 0x80; m <<= 1) {
1453 			switch (vol_pg->VolumeStatus.Flags & m) {
1454 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1455 				mpt_prtc(mpt, " Enabled");
1456 				break;
1457 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1458 				mpt_prtc(mpt, " Quiesced");
1459 				break;
1460 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1461 				mpt_prtc(mpt, " Re-Syncing");
1462 				break;
1463 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1464 				mpt_prtc(mpt, " Inactive");
1465 				break;
1466 			default:
1467 				break;
1468 			}
1469 		}
1470 		mpt_prtc(mpt, " )\n");
1471 
1472 		if ((vol_pg->VolumeStatus.Flags
1473 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1474 			continue;
1475 
1476 		mpt_verify_resync_rate(mpt, mpt_vol);
1477 
1478 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1479 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1480 		if (vol_pg->ResyncRate != 0) {
1481 
1482 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1483 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1484 			    prio / 1000, prio % 1000);
1485 		} else {
1486 			prio = vol_pg->VolumeSettings.Settings
1487 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1488 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1489 			    prio ? "High" : "Low");
1490 		}
1491 #if __FreeBSD_version >= 500000
1492 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1493 			    "blocks remaining\n", (uintmax_t)left,
1494 			    (uintmax_t)total);
1495 #else
1496 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1497 			    "blocks remaining\n", (uint64_t)left,
1498 			    (uint64_t)total);
1499 #endif
1500 
1501 		/* Periodically report on sync progress. */
1502 		mpt_schedule_raid_refresh(mpt);
1503 	}
1504 
1505 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1506 		struct mpt_raid_disk *mpt_disk;
1507 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1508 		int m;
1509 
1510 		mpt_disk = &mpt->raid_disks[i];
1511 		disk_pg = &mpt_disk->config_page;
1512 
1513 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1514 			continue;
1515 
1516 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1517 		 == MPT_RDF_ANNOUNCED) {
1518 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1519 			mpt_disk->flags = 0;
1520 			mpt->raid_rescan++;
1521 			continue;
1522 		}
1523 
1524 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1525 
1526 			mpt_announce_disk(mpt, mpt_disk);
1527 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1528 		}
1529 
1530 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1531 			continue;
1532 
1533 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1534 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1535 		if (disk_pg->PhysDiskStatus.Flags == 0)
1536 			continue;
1537 
1538 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1539 		for (m = 1; m <= 0x80; m <<= 1) {
1540 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1541 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1542 				mpt_prtc(mpt, " Out-Of-Sync");
1543 				break;
1544 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1545 				mpt_prtc(mpt, " Quiesced");
1546 				break;
1547 			default:
1548 				break;
1549 			}
1550 		}
1551 		mpt_prtc(mpt, " )\n");
1552 	}
1553 
1554 	mpt->raid_nonopt_volumes = nonopt_volumes;
1555 	return (0);
1556 }
1557 
1558 static void
1559 mpt_raid_timer(void *arg)
1560 {
1561 	struct mpt_softc *mpt;
1562 
1563 	mpt = (struct mpt_softc *)arg;
1564 	MPT_LOCK(mpt);
1565 	mpt_raid_wakeup(mpt);
1566 	MPT_UNLOCK(mpt);
1567 }
1568 
1569 void
1570 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1571 {
1572 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1573 		      mpt_raid_timer, mpt);
1574 }
1575 
1576 void
1577 mpt_raid_free_mem(struct mpt_softc *mpt)
1578 {
1579 
1580 	if (mpt->raid_volumes) {
1581 		struct mpt_raid_volume *mpt_raid;
1582 		int i;
1583 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1584 			mpt_raid = &mpt->raid_volumes[i];
1585 			if (mpt_raid->config_page) {
1586 				free(mpt_raid->config_page, M_DEVBUF);
1587 				mpt_raid->config_page = NULL;
1588 			}
1589 		}
1590 		free(mpt->raid_volumes, M_DEVBUF);
1591 		mpt->raid_volumes = NULL;
1592 	}
1593 	if (mpt->raid_disks) {
1594 		free(mpt->raid_disks, M_DEVBUF);
1595 		mpt->raid_disks = NULL;
1596 	}
1597 	if (mpt->ioc_page2) {
1598 		free(mpt->ioc_page2, M_DEVBUF);
1599 		mpt->ioc_page2 = NULL;
1600 	}
1601 	if (mpt->ioc_page3) {
1602 		free(mpt->ioc_page3, M_DEVBUF);
1603 		mpt->ioc_page3 = NULL;
1604 	}
1605 	mpt->raid_max_volumes =  0;
1606 	mpt->raid_max_disks =  0;
1607 }
1608 
1609 #if __FreeBSD_version >= 500000
1610 static int
1611 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1612 {
1613 	struct mpt_raid_volume *mpt_vol;
1614 
1615 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1616 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1617 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1618 		return (EINVAL);
1619 
1620 	MPT_LOCK(mpt);
1621 	mpt->raid_resync_rate = rate;
1622 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1623 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1624 			continue;
1625 		}
1626 		mpt_verify_resync_rate(mpt, mpt_vol);
1627 	}
1628 	MPT_UNLOCK(mpt);
1629 	return (0);
1630 }
1631 
1632 static int
1633 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1634 {
1635 	struct mpt_raid_volume *mpt_vol;
1636 
1637 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1638 		return (EINVAL);
1639 
1640 	MPT_LOCK(mpt);
1641 	mpt->raid_queue_depth = vol_queue_depth;
1642 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1643 		struct cam_path *path;
1644 		int error;
1645 
1646 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1647 			continue;
1648 
1649 		mpt->raid_rescan = 0;
1650 
1651 		MPTLOCK_2_CAMLOCK(mpt);
1652 		error = xpt_create_path(&path, xpt_periph,
1653 					cam_sim_path(mpt->sim),
1654 					mpt_vol->config_page->VolumeID,
1655 					/*lun*/0);
1656 		if (error != CAM_REQ_CMP) {
1657 			CAMLOCK_2_MPTLOCK(mpt);
1658 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1659 			continue;
1660 		}
1661 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1662 		xpt_free_path(path);
1663 		CAMLOCK_2_MPTLOCK(mpt);
1664 	}
1665 	MPT_UNLOCK(mpt);
1666 	return (0);
1667 }
1668 
1669 static int
1670 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1671 {
1672 	struct mpt_raid_volume *mpt_vol;
1673 	int force_full_resync;
1674 
1675 	MPT_LOCK(mpt);
1676 	if (mwce == mpt->raid_mwce_setting) {
1677 		MPT_UNLOCK(mpt);
1678 		return (0);
1679 	}
1680 
1681 	/*
1682 	 * Catch MWCE being left on due to a failed shutdown.  Since
1683 	 * sysctls cannot be set by the loader, we treat the first
1684 	 * setting of this varible specially and force a full volume
1685 	 * resync if MWCE is enabled and a resync is in progress.
1686 	 */
1687 	force_full_resync = 0;
1688 	if (mpt->raid_mwce_set == 0
1689 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1690 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1691 		force_full_resync = 1;
1692 
1693 	mpt->raid_mwce_setting = mwce;
1694 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1695 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1696 		int resyncing;
1697 		int mwce;
1698 
1699 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1700 			continue;
1701 
1702 		vol_pg = mpt_vol->config_page;
1703 		resyncing = vol_pg->VolumeStatus.Flags
1704 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1705 		mwce = vol_pg->VolumeSettings.Settings
1706 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1707 		if (force_full_resync && resyncing && mwce) {
1708 
1709 			/*
1710 			 * XXX disable/enable volume should force a resync,
1711 			 *     but we'll need to queice, drain, and restart
1712 			 *     I/O to do that.
1713 			 */
1714 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1715 				    "detected.  Suggest full resync.\n");
1716 		}
1717 		mpt_verify_mwce(mpt, mpt_vol);
1718 	}
1719 	mpt->raid_mwce_set = 1;
1720 	MPT_UNLOCK(mpt);
1721 	return (0);
1722 }
1723 const char *mpt_vol_mwce_strs[] =
1724 {
1725 	"On",
1726 	"Off",
1727 	"On-During-Rebuild",
1728 	"NC"
1729 };
1730 
1731 static int
1732 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1733 {
1734 	char inbuf[20];
1735 	struct mpt_softc *mpt;
1736 	const char *str;
1737 	int error;
1738 	u_int size;
1739 	u_int i;
1740 
1741 	GIANT_REQUIRED;
1742 
1743 	mpt = (struct mpt_softc *)arg1;
1744 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1745 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1746 	if (error || !req->newptr) {
1747 		return (error);
1748 	}
1749 
1750 	size = req->newlen - req->newidx;
1751 	if (size >= sizeof(inbuf)) {
1752 		return (EINVAL);
1753 	}
1754 
1755 	error = SYSCTL_IN(req, inbuf, size);
1756 	if (error) {
1757 		return (error);
1758 	}
1759 	inbuf[size] = '\0';
1760 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1761 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1762 			return (mpt_raid_set_vol_mwce(mpt, i));
1763 		}
1764 	}
1765 	return (EINVAL);
1766 }
1767 
1768 static int
1769 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1770 {
1771 	struct mpt_softc *mpt;
1772 	u_int raid_resync_rate;
1773 	int error;
1774 
1775 	GIANT_REQUIRED;
1776 
1777 	mpt = (struct mpt_softc *)arg1;
1778 	raid_resync_rate = mpt->raid_resync_rate;
1779 
1780 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1781 	if (error || !req->newptr) {
1782 		return error;
1783 	}
1784 
1785 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1786 }
1787 
1788 static int
1789 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1790 {
1791 	struct mpt_softc *mpt;
1792 	u_int raid_queue_depth;
1793 	int error;
1794 
1795 	GIANT_REQUIRED;
1796 
1797 	mpt = (struct mpt_softc *)arg1;
1798 	raid_queue_depth = mpt->raid_queue_depth;
1799 
1800 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1801 	if (error || !req->newptr) {
1802 		return error;
1803 	}
1804 
1805 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1806 }
1807 
1808 static void
1809 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1810 {
1811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813 
1814 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1816 			mpt_raid_sysctl_vol_member_wce, "A",
1817 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1818 
1819 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1821 			mpt_raid_sysctl_vol_queue_depth, "I",
1822 			"default volume queue depth");
1823 
1824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826 			mpt_raid_sysctl_vol_resync_rate, "I",
1827 			"volume resync priority (0 == NC, 1 - 255)");
1828 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 			"nonoptimal_volumes", CTLFLAG_RD,
1830 			&mpt->raid_nonopt_volumes, 0,
1831 			"number of nonoptimal volumes");
1832 }
1833 #endif
1834