xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 
85 static mpt_probe_handler_t	mpt_raid_probe;
86 static mpt_attach_handler_t	mpt_raid_attach;
87 static mpt_enable_handler_t	mpt_raid_enable;
88 static mpt_event_handler_t	mpt_raid_event;
89 static mpt_shutdown_handler_t	mpt_raid_shutdown;
90 static mpt_reset_handler_t	mpt_raid_ioc_reset;
91 static mpt_detach_handler_t	mpt_raid_detach;
92 
93 static struct mpt_personality mpt_raid_personality =
94 {
95 	.name		= "mpt_raid",
96 	.probe		= mpt_raid_probe,
97 	.attach		= mpt_raid_attach,
98 	.enable		= mpt_raid_enable,
99 	.event		= mpt_raid_event,
100 	.reset		= mpt_raid_ioc_reset,
101 	.shutdown	= mpt_raid_shutdown,
102 	.detach		= mpt_raid_detach,
103 };
104 
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 					MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 			   struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121     struct cam_path *);
122 #if __FreeBSD_version < 500000
123 #define	mpt_raid_sysctl_attach(x)	do { } while (0)
124 #else
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
126 #endif
127 
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129 
130 const char *
131 mpt_vol_type(struct mpt_raid_volume *vol)
132 {
133 	switch (vol->config_page->VolumeType) {
134 	case MPI_RAID_VOL_TYPE_IS:
135 		return ("RAID-0");
136 	case MPI_RAID_VOL_TYPE_IME:
137 		return ("RAID-1E");
138 	case MPI_RAID_VOL_TYPE_IM:
139 		return ("RAID-1");
140 	default:
141 		return ("Unknown");
142 	}
143 }
144 
145 const char *
146 mpt_vol_state(struct mpt_raid_volume *vol)
147 {
148 	switch (vol->config_page->VolumeStatus.State) {
149 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
150 		return ("Optimal");
151 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
152 		return ("Degraded");
153 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
154 		return ("Failed");
155 	default:
156 		return ("Unknown");
157 	}
158 }
159 
160 const char *
161 mpt_disk_state(struct mpt_raid_disk *disk)
162 {
163 	switch (disk->config_page.PhysDiskStatus.State) {
164 	case MPI_PHYSDISK0_STATUS_ONLINE:
165 		return ("Online");
166 	case MPI_PHYSDISK0_STATUS_MISSING:
167 		return ("Missing");
168 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169 		return ("Incompatible");
170 	case MPI_PHYSDISK0_STATUS_FAILED:
171 		return ("Failed");
172 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
173 		return ("Initializing");
174 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175 		return ("Offline Requested");
176 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177 		return ("Failed per Host Request");
178 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
179 		return ("Offline");
180 	default:
181 		return ("Unknown");
182 	}
183 }
184 
185 void
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187 	    const char *fmt, ...)
188 {
189 	va_list ap;
190 
191 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
194 	va_start(ap, fmt);
195 	vprintf(fmt, ap);
196 	va_end(ap);
197 }
198 
199 void
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201 	     const char *fmt, ...)
202 {
203 	va_list ap;
204 
205 	if (disk->volume != NULL) {
206 		printf("(%s:vol%d:%d): ",
207 		       device_get_nameunit(mpt->dev),
208 		       disk->volume->config_page->VolumeID,
209 		       disk->member_number);
210 	} else {
211 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212 		       disk->config_page.PhysDiskBus,
213 		       disk->config_page.PhysDiskID);
214 	}
215 	va_start(ap, fmt);
216 	vprintf(fmt, ap);
217 	va_end(ap);
218 }
219 
220 static void
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222 	       struct cam_path *path, void *arg)
223 {
224 	struct mpt_softc *mpt;
225 
226 	mpt = (struct mpt_softc*)callback_arg;
227 	switch (code) {
228 	case AC_FOUND_DEVICE:
229 	{
230 		struct ccb_getdev *cgd;
231 		struct mpt_raid_volume *mpt_vol;
232 
233 		cgd = (struct ccb_getdev *)arg;
234 		if (cgd == NULL) {
235 			break;
236 		}
237 
238 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239 			 cgd->ccb_h.target_id);
240 
241 		RAID_VOL_FOREACH(mpt, mpt_vol) {
242 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243 				continue;
244 
245 			if (mpt_vol->config_page->VolumeID
246 			 == cgd->ccb_h.target_id) {
247 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
248 				break;
249 			}
250 		}
251 	}
252 	default:
253 		break;
254 	}
255 }
256 
257 int
258 mpt_raid_probe(struct mpt_softc *mpt)
259 {
260 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
261 		return (ENODEV);
262 	}
263 	return (0);
264 }
265 
266 int
267 mpt_raid_attach(struct mpt_softc *mpt)
268 {
269 	struct ccb_setasync csa;
270 	mpt_handler_t	 handler;
271 	int		 error;
272 
273 	mpt_callout_init(mpt, &mpt->raid_timer);
274 
275 	error = mpt_spawn_raid_thread(mpt);
276 	if (error != 0) {
277 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
278 		goto cleanup;
279 	}
280 
281 	MPT_LOCK(mpt);
282 	handler.reply_handler = mpt_raid_reply_handler;
283 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
284 				     &raid_handler_id);
285 	if (error != 0) {
286 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
287 		goto cleanup;
288 	}
289 
290 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291 	csa.ccb_h.func_code = XPT_SASYNC_CB;
292 	csa.event_enable = AC_FOUND_DEVICE;
293 	csa.callback = mpt_raid_async;
294 	csa.callback_arg = mpt;
295 	xpt_action((union ccb *)&csa);
296 	if (csa.ccb_h.status != CAM_REQ_CMP) {
297 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298 			"CAM async handler.\n");
299 	}
300 	MPT_UNLOCK(mpt);
301 
302 	mpt_raid_sysctl_attach(mpt);
303 	return (0);
304 cleanup:
305 	MPT_UNLOCK(mpt);
306 	mpt_raid_detach(mpt);
307 	return (error);
308 }
309 
310 int
311 mpt_raid_enable(struct mpt_softc *mpt)
312 {
313 	return (0);
314 }
315 
316 void
317 mpt_raid_detach(struct mpt_softc *mpt)
318 {
319 	struct ccb_setasync csa;
320 	mpt_handler_t handler;
321 
322 	mpt_callout_drain(mpt, &mpt->raid_timer);
323 
324 	MPT_LOCK(mpt);
325 	mpt_terminate_raid_thread(mpt);
326 	handler.reply_handler = mpt_raid_reply_handler;
327 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
328 			       raid_handler_id);
329 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330 	csa.ccb_h.func_code = XPT_SASYNC_CB;
331 	csa.event_enable = 0;
332 	csa.callback = mpt_raid_async;
333 	csa.callback_arg = mpt;
334 	xpt_action((union ccb *)&csa);
335 	MPT_UNLOCK(mpt);
336 }
337 
338 static void
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
340 {
341 	/* Nothing to do yet. */
342 }
343 
344 static const char *raid_event_txt[] =
345 {
346 	"Volume Created",
347 	"Volume Deleted",
348 	"Volume Settings Changed",
349 	"Volume Status Changed",
350 	"Volume Physical Disk Membership Changed",
351 	"Physical Disk Created",
352 	"Physical Disk Deleted",
353 	"Physical Disk Settings Changed",
354 	"Physical Disk Status Changed",
355 	"Domain Validation Required",
356 	"SMART Data Received",
357 	"Replace Action Started",
358 };
359 
360 static int
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362 	       MSG_EVENT_NOTIFY_REPLY *msg)
363 {
364 	EVENT_DATA_RAID *raid_event;
365 	struct mpt_raid_volume *mpt_vol;
366 	struct mpt_raid_disk *mpt_disk;
367 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368 	int i;
369 	int print_event;
370 
371 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372 		return (0);
373 	}
374 
375 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
376 
377 	mpt_vol = NULL;
378 	vol_pg = NULL;
379 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381 			mpt_vol = &mpt->raid_volumes[i];
382 			vol_pg = mpt_vol->config_page;
383 
384 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
385 				continue;
386 
387 			if (vol_pg->VolumeID == raid_event->VolumeID
388 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
389 				break;
390 		}
391 		if (i >= mpt->ioc_page2->MaxVolumes) {
392 			mpt_vol = NULL;
393 			vol_pg = NULL;
394 		}
395 	}
396 
397 	mpt_disk = NULL;
398 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
401 			mpt_disk = NULL;
402 		}
403 	}
404 
405 	print_event = 1;
406 	switch(raid_event->ReasonCode) {
407 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
409 		break;
410 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411 		if (mpt_vol != NULL) {
412 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
414 			} else {
415 				/*
416 				 * Coalesce status messages into one
417 				 * per background run of our RAID thread.
418 				 * This removes "spurious" status messages
419 				 * from our output.
420 				 */
421 				print_event = 0;
422 			}
423 		}
424 		break;
425 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
427 		mpt->raid_rescan++;
428 		if (mpt_vol != NULL) {
429 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
430 		}
431 		break;
432 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
434 		mpt->raid_rescan++;
435 		break;
436 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
438 		mpt->raid_rescan++;
439 		if (mpt_disk != NULL) {
440 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
444 		mpt->raid_rescan++;
445 		break;
446 	case MPI_EVENT_RAID_RC_SMART_DATA:
447 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
448 		break;
449 	}
450 
451 	if (print_event) {
452 		if (mpt_disk != NULL) {
453 			mpt_disk_prt(mpt, mpt_disk, "");
454 		} else if (mpt_vol != NULL) {
455 			mpt_vol_prt(mpt, mpt_vol, "");
456 		} else {
457 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458 				raid_event->VolumeID);
459 
460 			if (raid_event->PhysDiskNum != 0xFF)
461 				mpt_prtc(mpt, ":%d): ",
462 					 raid_event->PhysDiskNum);
463 			else
464 				mpt_prtc(mpt, "): ");
465 		}
466 
467 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469 				 raid_event->ReasonCode);
470 		else
471 			mpt_prtc(mpt, "%s\n",
472 				 raid_event_txt[raid_event->ReasonCode]);
473 	}
474 
475 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476 		/* XXX Use CAM's print sense for this... */
477 		if (mpt_disk != NULL)
478 			mpt_disk_prt(mpt, mpt_disk, "");
479 		else
480 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
481 			    raid_event->VolumeBus, raid_event->VolumeID,
482 			    raid_event->PhysDiskNum);
483 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484 			 raid_event->ASC, raid_event->ASCQ);
485 	}
486 
487 	mpt_raid_wakeup(mpt);
488 	return (1);
489 }
490 
491 static void
492 mpt_raid_shutdown(struct mpt_softc *mpt)
493 {
494 	struct mpt_raid_volume *mpt_vol;
495 
496 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497 		return;
498 	}
499 
500 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501 	RAID_VOL_FOREACH(mpt, mpt_vol) {
502 		mpt_verify_mwce(mpt, mpt_vol);
503 	}
504 }
505 
506 static int
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
509 {
510 	int free_req;
511 
512 	if (req == NULL)
513 		return (TRUE);
514 
515 	free_req = TRUE;
516 	if (reply_frame != NULL)
517 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
518 #ifdef NOTYET
519 	else if (req->ccb != NULL) {
520 		/* Complete Quiesce CCB with error... */
521 	}
522 #endif
523 
524 	req->state &= ~REQ_STATE_QUEUED;
525 	req->state |= REQ_STATE_DONE;
526 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527 
528 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
529 		wakeup(req);
530 	} else if (free_req) {
531 		mpt_free_request(mpt, req);
532 	}
533 
534 	return (TRUE);
535 }
536 
537 /*
538  * Parse additional completion information in the reply
539  * frame for RAID I/O requests.
540  */
541 static int
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543     MSG_DEFAULT_REPLY *reply_frame)
544 {
545 	MSG_RAID_ACTION_REPLY *reply;
546 	struct mpt_raid_action_result *action_result;
547 	MSG_RAID_ACTION_REQUEST *rap;
548 
549 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550 	req->IOCStatus = le16toh(reply->IOCStatus);
551 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
552 
553 	switch (rap->Action) {
554 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
556 		break;
557 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
559 		break;
560 	default:
561 		break;
562 	}
563 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
564 	memcpy(&action_result->action_data, &reply->ActionData,
565 	    sizeof(action_result->action_data));
566 	action_result->action_status = le16toh(reply->ActionStatus);
567 	return (TRUE);
568 }
569 
570 /*
571  * Utiltity routine to perform a RAID action command;
572  */
573 int
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
576 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
577 		   int write, int wait)
578 {
579 	MSG_RAID_ACTION_REQUEST *rap;
580 	SGE_SIMPLE32 *se;
581 
582 	rap = req->req_vbuf;
583 	memset(rap, 0, sizeof *rap);
584 	rap->Action = Action;
585 	rap->ActionDataWord = htole32(ActionDataWord);
586 	rap->Function = MPI_FUNCTION_RAID_ACTION;
587 	rap->VolumeID = vol->config_page->VolumeID;
588 	rap->VolumeBus = vol->config_page->VolumeBus;
589 	if (disk != 0)
590 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
591 	else
592 		rap->PhysDiskNum = 0xFF;
593 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594 	se->Address = htole32(addr);
595 	MPI_pSGE_SET_LENGTH(se, len);
596 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598 	    MPI_SGE_FLAGS_END_OF_LIST |
599 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600 	se->FlagsLength = htole32(se->FlagsLength);
601 	rap->MsgContext = htole32(req->index | raid_handler_id);
602 
603 	mpt_check_doorbell(mpt);
604 	mpt_send_cmd(mpt, req);
605 
606 	if (wait) {
607 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
609 	} else {
610 		return (0);
611 	}
612 }
613 
614 /*************************** RAID Status Monitoring ***************************/
615 static int
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 {
618 	int error;
619 
620 	/*
621 	 * Freeze out any CAM transactions until our thread
622 	 * is able to run at least once.  We need to update
623 	 * our RAID pages before acception I/O or we may
624 	 * reject I/O to an ID we later determine is for a
625 	 * hidden physdisk.
626 	 */
627 	MPT_LOCK(mpt);
628 	xpt_freeze_simq(mpt->phydisk_sim, 1);
629 	MPT_UNLOCK(mpt);
630 	error = mpt_kthread_create(mpt_raid_thread, mpt,
631 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632 	    "mpt_raid%d", mpt->unit);
633 	if (error != 0) {
634 		MPT_LOCK(mpt);
635 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
636 		MPT_UNLOCK(mpt);
637 	}
638 	return (error);
639 }
640 
641 static void
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
643 {
644 
645 	if (mpt->raid_thread == NULL) {
646 		return;
647 	}
648 	mpt->shutdwn_raid = 1;
649 	wakeup(mpt->raid_volumes);
650 	/*
651 	 * Sleep on a slightly different location
652 	 * for this interlock just for added safety.
653 	 */
654 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
655 }
656 
657 static void
658 mpt_raid_thread(void *arg)
659 {
660 	struct mpt_softc *mpt;
661 	int firstrun;
662 
663 	mpt = (struct mpt_softc *)arg;
664 	firstrun = 1;
665 	MPT_LOCK(mpt);
666 	while (mpt->shutdwn_raid == 0) {
667 
668 		if (mpt->raid_wakeup == 0) {
669 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
670 			continue;
671 		}
672 
673 		mpt->raid_wakeup = 0;
674 
675 		if (mpt_refresh_raid_data(mpt)) {
676 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
677 			continue;
678 		}
679 
680 		/*
681 		 * Now that we have our first snapshot of RAID data,
682 		 * allow CAM to access our physical disk bus.
683 		 */
684 		if (firstrun) {
685 			firstrun = 0;
686 			MPTLOCK_2_CAMLOCK(mpt);
687 			xpt_release_simq(mpt->phydisk_sim, TRUE);
688 			CAMLOCK_2_MPTLOCK(mpt);
689 		}
690 
691 		if (mpt->raid_rescan != 0) {
692 			union ccb *ccb;
693 			struct cam_path *path;
694 			int error;
695 
696 			mpt->raid_rescan = 0;
697 			MPT_UNLOCK(mpt);
698 
699 			ccb = xpt_alloc_ccb();
700 
701 			MPT_LOCK(mpt);
702 			error = xpt_create_path(&path, xpt_periph,
703 			    cam_sim_path(mpt->phydisk_sim),
704 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
705 			if (error != CAM_REQ_CMP) {
706 				xpt_free_ccb(ccb);
707 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
708 			} else {
709 				xpt_rescan(ccb);
710 			}
711 		}
712 	}
713 	mpt->raid_thread = NULL;
714 	wakeup(&mpt->raid_thread);
715 	MPT_UNLOCK(mpt);
716 	mpt_kthread_exit(0);
717 }
718 
719 #if 0
720 static void
721 mpt_raid_quiesce_timeout(void *arg)
722 {
723 	/* Complete the CCB with error */
724 	/* COWWWW */
725 }
726 
727 static timeout_t mpt_raid_quiesce_timeout;
728 cam_status
729 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
730 		      request_t *req)
731 {
732 	union ccb *ccb;
733 
734 	ccb = req->ccb;
735 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
736 		return (CAM_REQ_CMP);
737 
738 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
739 		int rv;
740 
741 		mpt_disk->flags |= MPT_RDF_QUIESCING;
742 		xpt_freeze_devq(ccb->ccb_h.path, 1);
743 
744 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
745 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
746 					/*ActionData*/0, /*addr*/0,
747 					/*len*/0, /*write*/FALSE,
748 					/*wait*/FALSE);
749 		if (rv != 0)
750 			return (CAM_REQ_CMP_ERR);
751 
752 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
753 #if 0
754 		if (rv == ETIMEDOUT) {
755 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
756 				     "Quiece Timed-out\n");
757 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
758 			return (CAM_REQ_CMP_ERR);
759 		}
760 
761 		ar = REQ_TO_RAID_ACTION_RESULT(req);
762 		if (rv != 0
763 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
764 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
765 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
766 				    "%d:%x:%x\n", rv, req->IOCStatus,
767 				    ar->action_status);
768 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
769 			return (CAM_REQ_CMP_ERR);
770 		}
771 #endif
772 		return (CAM_REQ_INPROG);
773 	}
774 	return (CAM_REQUEUE_REQ);
775 }
776 #endif
777 
778 /* XXX Ignores that there may be multiple busses/IOCs involved. */
779 cam_status
780 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
781 {
782 	struct mpt_raid_disk *mpt_disk;
783 
784 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
785 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
786 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
787 		*tgt = mpt_disk->config_page.PhysDiskID;
788 		return (0);
789 	}
790 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
791 		 ccb->ccb_h.target_id);
792 	return (-1);
793 }
794 
795 /* XXX Ignores that there may be multiple busses/IOCs involved. */
796 int
797 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
798 {
799 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
800 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
801 
802 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
803 		return (0);
804 	}
805 	ioc_vol = mpt->ioc_page2->RaidVolume;
806 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
807 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
808 		if (ioc_vol->VolumeID == tgt) {
809 			return (1);
810 		}
811 	}
812 	return (0);
813 }
814 
815 #if 0
816 static void
817 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
818 	       int enable)
819 {
820 	request_t *req;
821 	struct mpt_raid_action_result *ar;
822 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
823 	int enabled;
824 	int rv;
825 
826 	vol_pg = mpt_vol->config_page;
827 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
828 
829 	/*
830 	 * If the setting matches the configuration,
831 	 * there is nothing to do.
832 	 */
833 	if ((enabled && enable)
834 	 || (!enabled && !enable))
835 		return;
836 
837 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
838 	if (req == NULL) {
839 		mpt_vol_prt(mpt, mpt_vol,
840 			    "mpt_enable_vol: Get request failed!\n");
841 		return;
842 	}
843 
844 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
845 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
846 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
847 				/*data*/0, /*addr*/0, /*len*/0,
848 				/*write*/FALSE, /*wait*/TRUE);
849 	if (rv == ETIMEDOUT) {
850 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
851 			    "%s Volume Timed-out\n",
852 			    enable ? "Enable" : "Disable");
853 		return;
854 	}
855 	ar = REQ_TO_RAID_ACTION_RESULT(req);
856 	if (rv != 0
857 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
858 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
859 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
860 			    enable ? "Enable" : "Disable",
861 			    rv, req->IOCStatus, ar->action_status);
862 	}
863 
864 	mpt_free_request(mpt, req);
865 }
866 #endif
867 
868 static void
869 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
870 {
871 	request_t *req;
872 	struct mpt_raid_action_result *ar;
873 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
874 	uint32_t data;
875 	int rv;
876 	int resyncing;
877 	int mwce;
878 
879 	vol_pg = mpt_vol->config_page;
880 	resyncing = vol_pg->VolumeStatus.Flags
881 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
882 	mwce = vol_pg->VolumeSettings.Settings
883 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
884 
885 	/*
886 	 * If the setting matches the configuration,
887 	 * there is nothing to do.
888 	 */
889 	switch (mpt->raid_mwce_setting) {
890 	case MPT_RAID_MWCE_REBUILD_ONLY:
891 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
892 			return;
893 		}
894 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
895 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
896 			/*
897 			 * Wait one more status update to see if
898 			 * resyncing gets enabled.  It gets disabled
899 			 * temporarilly when WCE is changed.
900 			 */
901 			return;
902 		}
903 		break;
904 	case MPT_RAID_MWCE_ON:
905 		if (mwce)
906 			return;
907 		break;
908 	case MPT_RAID_MWCE_OFF:
909 		if (!mwce)
910 			return;
911 		break;
912 	case MPT_RAID_MWCE_NC:
913 		return;
914 	}
915 
916 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
917 	if (req == NULL) {
918 		mpt_vol_prt(mpt, mpt_vol,
919 			    "mpt_verify_mwce: Get request failed!\n");
920 		return;
921 	}
922 
923 	vol_pg->VolumeSettings.Settings ^=
924 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
925 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
926 	vol_pg->VolumeSettings.Settings ^=
927 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
928 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
929 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
930 				data, /*addr*/0, /*len*/0,
931 				/*write*/FALSE, /*wait*/TRUE);
932 	if (rv == ETIMEDOUT) {
933 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
934 			    "Write Cache Enable Timed-out\n");
935 		return;
936 	}
937 	ar = REQ_TO_RAID_ACTION_RESULT(req);
938 	if (rv != 0
939 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
940 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
941 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
942 			    "%d:%x:%x\n", rv, req->IOCStatus,
943 			    ar->action_status);
944 	} else {
945 		vol_pg->VolumeSettings.Settings ^=
946 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
947 	}
948 	mpt_free_request(mpt, req);
949 }
950 
951 static void
952 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
953 {
954 	request_t *req;
955 	struct mpt_raid_action_result *ar;
956 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
957 	u_int prio;
958 	int rv;
959 
960 	vol_pg = mpt_vol->config_page;
961 
962 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
963 		return;
964 
965 	/*
966 	 * If the current RAID resync rate does not
967 	 * match our configured rate, update it.
968 	 */
969 	prio = vol_pg->VolumeSettings.Settings
970 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
971 	if (vol_pg->ResyncRate != 0
972 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
973 
974 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
975 		if (req == NULL) {
976 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
977 				    "Get request failed!\n");
978 			return;
979 		}
980 
981 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
982 					MPI_RAID_ACTION_SET_RESYNC_RATE,
983 					mpt->raid_resync_rate, /*addr*/0,
984 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
985 		if (rv == ETIMEDOUT) {
986 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
987 				    "Resync Rate Setting Timed-out\n");
988 			return;
989 		}
990 
991 		ar = REQ_TO_RAID_ACTION_RESULT(req);
992 		if (rv != 0
993 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
994 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
995 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
996 				    "%d:%x:%x\n", rv, req->IOCStatus,
997 				    ar->action_status);
998 		} else
999 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1000 		mpt_free_request(mpt, req);
1001 	} else if ((prio && mpt->raid_resync_rate < 128)
1002 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1003 		uint32_t data;
1004 
1005 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1006 		if (req == NULL) {
1007 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1008 				    "Get request failed!\n");
1009 			return;
1010 		}
1011 
1012 		vol_pg->VolumeSettings.Settings ^=
1013 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1014 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1015 		vol_pg->VolumeSettings.Settings ^=
1016 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1017 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1018 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1019 					data, /*addr*/0, /*len*/0,
1020 					/*write*/FALSE, /*wait*/TRUE);
1021 		if (rv == ETIMEDOUT) {
1022 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1023 				    "Resync Rate Setting Timed-out\n");
1024 			return;
1025 		}
1026 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1027 		if (rv != 0
1028 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1029 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1030 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1031 				    "%d:%x:%x\n", rv, req->IOCStatus,
1032 				    ar->action_status);
1033 		} else {
1034 			vol_pg->VolumeSettings.Settings ^=
1035 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1036 		}
1037 
1038 		mpt_free_request(mpt, req);
1039 	}
1040 }
1041 
1042 static void
1043 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1044 		       struct cam_path *path)
1045 {
1046 	struct ccb_relsim crs;
1047 
1048 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1049 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1050 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1051 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1052 	crs.openings = mpt->raid_queue_depth;
1053 	xpt_action((union ccb *)&crs);
1054 	if (crs.ccb_h.status != CAM_REQ_CMP)
1055 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1056 			    "with CAM status %#x\n", crs.ccb_h.status);
1057 }
1058 
1059 static void
1060 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1061 {
1062 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1063 	u_int i;
1064 
1065 	vol_pg = mpt_vol->config_page;
1066 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1067 	for (i = 1; i <= 0x8000; i <<= 1) {
1068 		switch (vol_pg->VolumeSettings.Settings & i) {
1069 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1070 			mpt_prtc(mpt, " Member-WCE");
1071 			break;
1072 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1073 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1074 			break;
1075 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1076 			mpt_prtc(mpt, " Hot-Plug-Spares");
1077 			break;
1078 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1079 			mpt_prtc(mpt, " High-Priority-ReSync");
1080 			break;
1081 		default:
1082 			break;
1083 		}
1084 	}
1085 	mpt_prtc(mpt, " )\n");
1086 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1087 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1088 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1089 			  ? ":" : "s:");
1090 		for (i = 0; i < 8; i++) {
1091 			u_int mask;
1092 
1093 			mask = 0x1 << i;
1094 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1095 				continue;
1096 			mpt_prtc(mpt, " %d", i);
1097 		}
1098 		mpt_prtc(mpt, "\n");
1099 	}
1100 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1101 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1102 		struct mpt_raid_disk *mpt_disk;
1103 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1104 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1105 		U8 f, s;
1106 
1107 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1108 		disk_pg = &mpt_disk->config_page;
1109 		mpt_prtc(mpt, "      ");
1110 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1111 			 pt_bus, disk_pg->PhysDiskID);
1112 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1113 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1114 			    "Primary" : "Secondary");
1115 		} else {
1116 			mpt_prtc(mpt, "Stripe Position %d",
1117 				 mpt_disk->member_number);
1118 		}
1119 		f = disk_pg->PhysDiskStatus.Flags;
1120 		s = disk_pg->PhysDiskStatus.State;
1121 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1122 			mpt_prtc(mpt, " Out of Sync");
1123 		}
1124 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1125 			mpt_prtc(mpt, " Quiesced");
1126 		}
1127 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1128 			mpt_prtc(mpt, " Inactive");
1129 		}
1130 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1131 			mpt_prtc(mpt, " Was Optimal");
1132 		}
1133 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1134 			mpt_prtc(mpt, " Was Non-Optimal");
1135 		}
1136 		switch (s) {
1137 		case MPI_PHYSDISK0_STATUS_ONLINE:
1138 			mpt_prtc(mpt, " Online");
1139 			break;
1140 		case MPI_PHYSDISK0_STATUS_MISSING:
1141 			mpt_prtc(mpt, " Missing");
1142 			break;
1143 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1144 			mpt_prtc(mpt, " Incompatible");
1145 			break;
1146 		case MPI_PHYSDISK0_STATUS_FAILED:
1147 			mpt_prtc(mpt, " Failed");
1148 			break;
1149 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1150 			mpt_prtc(mpt, " Initializing");
1151 			break;
1152 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1153 			mpt_prtc(mpt, " Requested Offline");
1154 			break;
1155 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1156 			mpt_prtc(mpt, " Requested Failed");
1157 			break;
1158 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1159 		default:
1160 			mpt_prtc(mpt, " Offline Other (%x)", s);
1161 			break;
1162 		}
1163 		mpt_prtc(mpt, "\n");
1164 	}
1165 }
1166 
1167 static void
1168 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1169 {
1170 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1171 	int rd_bus = cam_sim_bus(mpt->sim);
1172 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1173 	u_int i;
1174 
1175 	disk_pg = &mpt_disk->config_page;
1176 	mpt_disk_prt(mpt, mpt_disk,
1177 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1178 		     device_get_nameunit(mpt->dev), rd_bus,
1179 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1180 		     pt_bus, mpt_disk - mpt->raid_disks);
1181 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1182 		return;
1183 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1184 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1185 		   ? ":" : "s:");
1186 	for (i = 0; i < 8; i++) {
1187 		u_int mask;
1188 
1189 		mask = 0x1 << i;
1190 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1191 			continue;
1192 		mpt_prtc(mpt, " %d", i);
1193 	}
1194 	mpt_prtc(mpt, "\n");
1195 }
1196 
1197 static void
1198 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1199 		      IOC_3_PHYS_DISK *ioc_disk)
1200 {
1201 	int rv;
1202 
1203 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1204 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1205 				 &mpt_disk->config_page.Header,
1206 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1207 	if (rv != 0) {
1208 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1209 			"Failed to read RAID Disk Hdr(%d)\n",
1210 		 	ioc_disk->PhysDiskNum);
1211 		return;
1212 	}
1213 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1214 				   &mpt_disk->config_page.Header,
1215 				   sizeof(mpt_disk->config_page),
1216 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1217 	if (rv != 0)
1218 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1219 			"Failed to read RAID Disk Page(%d)\n",
1220 		 	ioc_disk->PhysDiskNum);
1221 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1222 }
1223 
1224 static void
1225 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1226     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1227 {
1228 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1229 	struct mpt_raid_action_result *ar;
1230 	request_t *req;
1231 	int rv;
1232 	int i;
1233 
1234 	vol_pg = mpt_vol->config_page;
1235 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1236 
1237 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1238 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1239 	if (rv != 0) {
1240 		mpt_vol_prt(mpt, mpt_vol,
1241 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1242 		    ioc_vol->VolumePageNumber);
1243 		return;
1244 	}
1245 
1246 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1247 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1248 	if (rv != 0) {
1249 		mpt_vol_prt(mpt, mpt_vol,
1250 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1251 		    ioc_vol->VolumePageNumber);
1252 		return;
1253 	}
1254 	mpt2host_config_page_raid_vol_0(vol_pg);
1255 
1256 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1257 
1258 	/* Update disk entry array data. */
1259 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1260 		struct mpt_raid_disk *mpt_disk;
1261 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1262 		mpt_disk->volume = mpt_vol;
1263 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1264 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1265 			mpt_disk->member_number--;
1266 		}
1267 	}
1268 
1269 	if ((vol_pg->VolumeStatus.Flags
1270 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1271 		return;
1272 
1273 	req = mpt_get_request(mpt, TRUE);
1274 	if (req == NULL) {
1275 		mpt_vol_prt(mpt, mpt_vol,
1276 		    "mpt_refresh_raid_vol: Get request failed!\n");
1277 		return;
1278 	}
1279 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1280 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1281 	if (rv == ETIMEDOUT) {
1282 		mpt_vol_prt(mpt, mpt_vol,
1283 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1284 		mpt_free_request(mpt, req);
1285 		return;
1286 	}
1287 
1288 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1289 	if (rv == 0
1290 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1291 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1292 		memcpy(&mpt_vol->sync_progress,
1293 		       &ar->action_data.indicator_struct,
1294 		       sizeof(mpt_vol->sync_progress));
1295 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1296 	} else {
1297 		mpt_vol_prt(mpt, mpt_vol,
1298 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1299 	}
1300 	mpt_free_request(mpt, req);
1301 }
1302 
1303 /*
1304  * Update in-core information about RAID support.  We update any entries
1305  * that didn't previously exists or have been marked as needing to
1306  * be updated by our event handler.  Interesting changes are displayed
1307  * to the console.
1308  */
1309 int
1310 mpt_refresh_raid_data(struct mpt_softc *mpt)
1311 {
1312 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1313 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1314 	IOC_3_PHYS_DISK *ioc_disk;
1315 	IOC_3_PHYS_DISK *ioc_last_disk;
1316 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1317 	size_t len;
1318 	int rv;
1319 	int i;
1320 	u_int nonopt_volumes;
1321 
1322 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1323 		return (0);
1324 	}
1325 
1326 	/*
1327 	 * Mark all items as unreferenced by the configuration.
1328 	 * This allows us to find, report, and discard stale
1329 	 * entries.
1330 	 */
1331 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1332 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1333 	}
1334 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1335 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1336 	}
1337 
1338 	/*
1339 	 * Get Physical Disk information.
1340 	 */
1341 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1342 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1343 				   &mpt->ioc_page3->Header, len,
1344 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1345 	if (rv) {
1346 		mpt_prt(mpt,
1347 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1348 		return (-1);
1349 	}
1350 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1351 
1352 	ioc_disk = mpt->ioc_page3->PhysDisk;
1353 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1354 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1355 		struct mpt_raid_disk *mpt_disk;
1356 
1357 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1358 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1359 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1360 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1361 
1362 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1363 
1364 		}
1365 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1366 		mpt->raid_rescan++;
1367 	}
1368 
1369 	/*
1370 	 * Refresh volume data.
1371 	 */
1372 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1373 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1374 				   &mpt->ioc_page2->Header, len,
1375 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1376 	if (rv) {
1377 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1378 			"Failed to read IOC Page 2\n");
1379 		return (-1);
1380 	}
1381 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1382 
1383 	ioc_vol = mpt->ioc_page2->RaidVolume;
1384 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1385 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1386 		struct mpt_raid_volume *mpt_vol;
1387 
1388 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1389 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1390 		vol_pg = mpt_vol->config_page;
1391 		if (vol_pg == NULL)
1392 			continue;
1393 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1394 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1395 		 || (vol_pg->VolumeStatus.Flags
1396 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1397 
1398 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1399 		}
1400 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1401 	}
1402 
1403 	nonopt_volumes = 0;
1404 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1405 		struct mpt_raid_volume *mpt_vol;
1406 		uint64_t total;
1407 		uint64_t left;
1408 		int m;
1409 		u_int prio;
1410 
1411 		mpt_vol = &mpt->raid_volumes[i];
1412 
1413 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1414 			continue;
1415 		}
1416 
1417 		vol_pg = mpt_vol->config_page;
1418 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1419 		 == MPT_RVF_ANNOUNCED) {
1420 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1421 			mpt_vol->flags = 0;
1422 			continue;
1423 		}
1424 
1425 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1426 			mpt_announce_vol(mpt, mpt_vol);
1427 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1428 		}
1429 
1430 		if (vol_pg->VolumeStatus.State !=
1431 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1432 			nonopt_volumes++;
1433 
1434 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1435 			continue;
1436 
1437 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1438 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1439 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1440 		mpt_verify_mwce(mpt, mpt_vol);
1441 
1442 		if (vol_pg->VolumeStatus.Flags == 0) {
1443 			continue;
1444 		}
1445 
1446 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1447 		for (m = 1; m <= 0x80; m <<= 1) {
1448 			switch (vol_pg->VolumeStatus.Flags & m) {
1449 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1450 				mpt_prtc(mpt, " Enabled");
1451 				break;
1452 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1453 				mpt_prtc(mpt, " Quiesced");
1454 				break;
1455 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1456 				mpt_prtc(mpt, " Re-Syncing");
1457 				break;
1458 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1459 				mpt_prtc(mpt, " Inactive");
1460 				break;
1461 			default:
1462 				break;
1463 			}
1464 		}
1465 		mpt_prtc(mpt, " )\n");
1466 
1467 		if ((vol_pg->VolumeStatus.Flags
1468 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1469 			continue;
1470 
1471 		mpt_verify_resync_rate(mpt, mpt_vol);
1472 
1473 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1474 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1475 		if (vol_pg->ResyncRate != 0) {
1476 
1477 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1478 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1479 			    prio / 1000, prio % 1000);
1480 		} else {
1481 			prio = vol_pg->VolumeSettings.Settings
1482 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1483 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1484 			    prio ? "High" : "Low");
1485 		}
1486 #if __FreeBSD_version >= 500000
1487 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1488 			    "blocks remaining\n", (uintmax_t)left,
1489 			    (uintmax_t)total);
1490 #else
1491 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1492 			    "blocks remaining\n", (uint64_t)left,
1493 			    (uint64_t)total);
1494 #endif
1495 
1496 		/* Periodically report on sync progress. */
1497 		mpt_schedule_raid_refresh(mpt);
1498 	}
1499 
1500 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1501 		struct mpt_raid_disk *mpt_disk;
1502 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1503 		int m;
1504 
1505 		mpt_disk = &mpt->raid_disks[i];
1506 		disk_pg = &mpt_disk->config_page;
1507 
1508 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1509 			continue;
1510 
1511 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1512 		 == MPT_RDF_ANNOUNCED) {
1513 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1514 			mpt_disk->flags = 0;
1515 			mpt->raid_rescan++;
1516 			continue;
1517 		}
1518 
1519 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1520 
1521 			mpt_announce_disk(mpt, mpt_disk);
1522 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1523 		}
1524 
1525 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1526 			continue;
1527 
1528 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1529 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1530 		if (disk_pg->PhysDiskStatus.Flags == 0)
1531 			continue;
1532 
1533 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1534 		for (m = 1; m <= 0x80; m <<= 1) {
1535 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1536 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1537 				mpt_prtc(mpt, " Out-Of-Sync");
1538 				break;
1539 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1540 				mpt_prtc(mpt, " Quiesced");
1541 				break;
1542 			default:
1543 				break;
1544 			}
1545 		}
1546 		mpt_prtc(mpt, " )\n");
1547 	}
1548 
1549 	mpt->raid_nonopt_volumes = nonopt_volumes;
1550 	return (0);
1551 }
1552 
1553 static void
1554 mpt_raid_timer(void *arg)
1555 {
1556 	struct mpt_softc *mpt;
1557 
1558 	mpt = (struct mpt_softc *)arg;
1559 #if __FreeBSD_version < 500000
1560 	MPT_LOCK(mpt);
1561 #endif
1562 	MPT_LOCK_ASSERT(mpt);
1563 	mpt_raid_wakeup(mpt);
1564 #if __FreeBSD_version < 500000
1565 	MPT_UNLOCK(mpt);
1566 #endif
1567 }
1568 
1569 void
1570 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1571 {
1572 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1573 		      mpt_raid_timer, mpt);
1574 }
1575 
1576 void
1577 mpt_raid_free_mem(struct mpt_softc *mpt)
1578 {
1579 
1580 	if (mpt->raid_volumes) {
1581 		struct mpt_raid_volume *mpt_raid;
1582 		int i;
1583 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1584 			mpt_raid = &mpt->raid_volumes[i];
1585 			if (mpt_raid->config_page) {
1586 				free(mpt_raid->config_page, M_DEVBUF);
1587 				mpt_raid->config_page = NULL;
1588 			}
1589 		}
1590 		free(mpt->raid_volumes, M_DEVBUF);
1591 		mpt->raid_volumes = NULL;
1592 	}
1593 	if (mpt->raid_disks) {
1594 		free(mpt->raid_disks, M_DEVBUF);
1595 		mpt->raid_disks = NULL;
1596 	}
1597 	if (mpt->ioc_page2) {
1598 		free(mpt->ioc_page2, M_DEVBUF);
1599 		mpt->ioc_page2 = NULL;
1600 	}
1601 	if (mpt->ioc_page3) {
1602 		free(mpt->ioc_page3, M_DEVBUF);
1603 		mpt->ioc_page3 = NULL;
1604 	}
1605 	mpt->raid_max_volumes =  0;
1606 	mpt->raid_max_disks =  0;
1607 }
1608 
1609 #if __FreeBSD_version >= 500000
1610 static int
1611 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1612 {
1613 	struct mpt_raid_volume *mpt_vol;
1614 
1615 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1616 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1617 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1618 		return (EINVAL);
1619 
1620 	MPT_LOCK(mpt);
1621 	mpt->raid_resync_rate = rate;
1622 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1623 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1624 			continue;
1625 		}
1626 		mpt_verify_resync_rate(mpt, mpt_vol);
1627 	}
1628 	MPT_UNLOCK(mpt);
1629 	return (0);
1630 }
1631 
1632 static int
1633 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1634 {
1635 	struct mpt_raid_volume *mpt_vol;
1636 
1637 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1638 		return (EINVAL);
1639 
1640 	MPT_LOCK(mpt);
1641 	mpt->raid_queue_depth = vol_queue_depth;
1642 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1643 		struct cam_path *path;
1644 		int error;
1645 
1646 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1647 			continue;
1648 
1649 		mpt->raid_rescan = 0;
1650 
1651 		MPTLOCK_2_CAMLOCK(mpt);
1652 		error = xpt_create_path(&path, xpt_periph,
1653 					cam_sim_path(mpt->sim),
1654 					mpt_vol->config_page->VolumeID,
1655 					/*lun*/0);
1656 		if (error != CAM_REQ_CMP) {
1657 			CAMLOCK_2_MPTLOCK(mpt);
1658 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1659 			continue;
1660 		}
1661 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1662 		xpt_free_path(path);
1663 		CAMLOCK_2_MPTLOCK(mpt);
1664 	}
1665 	MPT_UNLOCK(mpt);
1666 	return (0);
1667 }
1668 
1669 static int
1670 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1671 {
1672 	struct mpt_raid_volume *mpt_vol;
1673 	int force_full_resync;
1674 
1675 	MPT_LOCK(mpt);
1676 	if (mwce == mpt->raid_mwce_setting) {
1677 		MPT_UNLOCK(mpt);
1678 		return (0);
1679 	}
1680 
1681 	/*
1682 	 * Catch MWCE being left on due to a failed shutdown.  Since
1683 	 * sysctls cannot be set by the loader, we treat the first
1684 	 * setting of this varible specially and force a full volume
1685 	 * resync if MWCE is enabled and a resync is in progress.
1686 	 */
1687 	force_full_resync = 0;
1688 	if (mpt->raid_mwce_set == 0
1689 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1690 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1691 		force_full_resync = 1;
1692 
1693 	mpt->raid_mwce_setting = mwce;
1694 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1695 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1696 		int resyncing;
1697 		int mwce;
1698 
1699 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1700 			continue;
1701 
1702 		vol_pg = mpt_vol->config_page;
1703 		resyncing = vol_pg->VolumeStatus.Flags
1704 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1705 		mwce = vol_pg->VolumeSettings.Settings
1706 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1707 		if (force_full_resync && resyncing && mwce) {
1708 
1709 			/*
1710 			 * XXX disable/enable volume should force a resync,
1711 			 *     but we'll need to queice, drain, and restart
1712 			 *     I/O to do that.
1713 			 */
1714 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1715 				    "detected.  Suggest full resync.\n");
1716 		}
1717 		mpt_verify_mwce(mpt, mpt_vol);
1718 	}
1719 	mpt->raid_mwce_set = 1;
1720 	MPT_UNLOCK(mpt);
1721 	return (0);
1722 }
1723 const char *mpt_vol_mwce_strs[] =
1724 {
1725 	"On",
1726 	"Off",
1727 	"On-During-Rebuild",
1728 	"NC"
1729 };
1730 
1731 static int
1732 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1733 {
1734 	char inbuf[20];
1735 	struct mpt_softc *mpt;
1736 	const char *str;
1737 	int error;
1738 	u_int size;
1739 	u_int i;
1740 
1741 	GIANT_REQUIRED;
1742 
1743 	mpt = (struct mpt_softc *)arg1;
1744 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1745 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1746 	if (error || !req->newptr) {
1747 		return (error);
1748 	}
1749 
1750 	size = req->newlen - req->newidx;
1751 	if (size >= sizeof(inbuf)) {
1752 		return (EINVAL);
1753 	}
1754 
1755 	error = SYSCTL_IN(req, inbuf, size);
1756 	if (error) {
1757 		return (error);
1758 	}
1759 	inbuf[size] = '\0';
1760 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1761 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1762 			return (mpt_raid_set_vol_mwce(mpt, i));
1763 		}
1764 	}
1765 	return (EINVAL);
1766 }
1767 
1768 static int
1769 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1770 {
1771 	struct mpt_softc *mpt;
1772 	u_int raid_resync_rate;
1773 	int error;
1774 
1775 	GIANT_REQUIRED;
1776 
1777 	mpt = (struct mpt_softc *)arg1;
1778 	raid_resync_rate = mpt->raid_resync_rate;
1779 
1780 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1781 	if (error || !req->newptr) {
1782 		return error;
1783 	}
1784 
1785 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1786 }
1787 
1788 static int
1789 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1790 {
1791 	struct mpt_softc *mpt;
1792 	u_int raid_queue_depth;
1793 	int error;
1794 
1795 	GIANT_REQUIRED;
1796 
1797 	mpt = (struct mpt_softc *)arg1;
1798 	raid_queue_depth = mpt->raid_queue_depth;
1799 
1800 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1801 	if (error || !req->newptr) {
1802 		return error;
1803 	}
1804 
1805 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1806 }
1807 
1808 static void
1809 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1810 {
1811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813 
1814 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1816 			mpt_raid_sysctl_vol_member_wce, "A",
1817 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1818 
1819 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1821 			mpt_raid_sysctl_vol_queue_depth, "I",
1822 			"default volume queue depth");
1823 
1824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826 			mpt_raid_sysctl_vol_resync_rate, "I",
1827 			"volume resync priority (0 == NC, 1 - 255)");
1828 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 			"nonoptimal_volumes", CTLFLAG_RD,
1830 			&mpt->raid_nonopt_volumes, 0,
1831 			"number of nonoptimal volumes");
1832 }
1833 #endif
1834