xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision bfe691b2f75de2224c7ceb304ebcdef2b42d4179)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 
85 static mpt_probe_handler_t	mpt_raid_probe;
86 static mpt_attach_handler_t	mpt_raid_attach;
87 static mpt_enable_handler_t	mpt_raid_enable;
88 static mpt_event_handler_t	mpt_raid_event;
89 static mpt_shutdown_handler_t	mpt_raid_shutdown;
90 static mpt_reset_handler_t	mpt_raid_ioc_reset;
91 static mpt_detach_handler_t	mpt_raid_detach;
92 
93 static struct mpt_personality mpt_raid_personality =
94 {
95 	.name		= "mpt_raid",
96 	.probe		= mpt_raid_probe,
97 	.attach		= mpt_raid_attach,
98 	.enable		= mpt_raid_enable,
99 	.event		= mpt_raid_event,
100 	.reset		= mpt_raid_ioc_reset,
101 	.shutdown	= mpt_raid_shutdown,
102 	.detach		= mpt_raid_detach,
103 };
104 
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 					MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 			   struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *mpt,
120 			    struct mpt_raid_volume *mpt_vol);
121 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
122 				   struct mpt_raid_volume *mpt_vol,
123 				   struct cam_path *path);
124 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
125 
126 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
127 
128 const char *
129 mpt_vol_type(struct mpt_raid_volume *vol)
130 {
131 	switch (vol->config_page->VolumeType) {
132 	case MPI_RAID_VOL_TYPE_IS:
133 		return ("RAID-0");
134 	case MPI_RAID_VOL_TYPE_IME:
135 		return ("RAID-1E");
136 	case MPI_RAID_VOL_TYPE_IM:
137 		return ("RAID-1");
138 	default:
139 		return ("Unknown");
140 	}
141 }
142 
143 const char *
144 mpt_vol_state(struct mpt_raid_volume *vol)
145 {
146 	switch (vol->config_page->VolumeStatus.State) {
147 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
148 		return ("Optimal");
149 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
150 		return ("Degraded");
151 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
152 		return ("Failed");
153 	default:
154 		return ("Unknown");
155 	}
156 }
157 
158 const char *
159 mpt_disk_state(struct mpt_raid_disk *disk)
160 {
161 	switch (disk->config_page.PhysDiskStatus.State) {
162 	case MPI_PHYSDISK0_STATUS_ONLINE:
163 		return ("Online");
164 	case MPI_PHYSDISK0_STATUS_MISSING:
165 		return ("Missing");
166 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
167 		return ("Incompatible");
168 	case MPI_PHYSDISK0_STATUS_FAILED:
169 		return ("Failed");
170 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
171 		return ("Initializing");
172 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
173 		return ("Offline Requested");
174 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
175 		return ("Failed per Host Request");
176 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
177 		return ("Offline");
178 	default:
179 		return ("Unknown");
180 	}
181 }
182 
183 void
184 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
185 	    const char *fmt, ...)
186 {
187 	va_list ap;
188 
189 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
190 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
191 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
192 	va_start(ap, fmt);
193 	vprintf(fmt, ap);
194 	va_end(ap);
195 }
196 
197 void
198 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
199 	     const char *fmt, ...)
200 {
201 	va_list ap;
202 
203 	if (disk->volume != NULL) {
204 		printf("(%s:vol%d:%d): ",
205 		       device_get_nameunit(mpt->dev),
206 		       disk->volume->config_page->VolumeID,
207 		       disk->member_number);
208 	} else {
209 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
210 		       disk->config_page.PhysDiskBus,
211 		       disk->config_page.PhysDiskID);
212 	}
213 	va_start(ap, fmt);
214 	vprintf(fmt, ap);
215 	va_end(ap);
216 }
217 
218 static void
219 mpt_raid_async(void *callback_arg, u_int32_t code,
220 	       struct cam_path *path, void *arg)
221 {
222 	struct mpt_softc *mpt;
223 
224 	mpt = (struct mpt_softc*)callback_arg;
225 	switch (code) {
226 	case AC_FOUND_DEVICE:
227 	{
228 		struct ccb_getdev *cgd;
229 		struct mpt_raid_volume *mpt_vol;
230 
231 		cgd = (struct ccb_getdev *)arg;
232 		if (cgd == NULL) {
233 			break;
234 		}
235 
236 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
237 			 cgd->ccb_h.target_id);
238 
239 		RAID_VOL_FOREACH(mpt, mpt_vol) {
240 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
241 				continue;
242 
243 			if (mpt_vol->config_page->VolumeID
244 			 == cgd->ccb_h.target_id) {
245 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
246 				break;
247 			}
248 		}
249 	}
250 	default:
251 		break;
252 	}
253 }
254 
255 int
256 mpt_raid_probe(struct mpt_softc *mpt)
257 {
258 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
259 		return (ENODEV);
260 	}
261 	return (0);
262 }
263 
264 int
265 mpt_raid_attach(struct mpt_softc *mpt)
266 {
267 	struct ccb_setasync csa;
268 	mpt_handler_t	 handler;
269 	int		 error;
270 
271 	mpt_callout_init(&mpt->raid_timer);
272 
273 	handler.reply_handler = mpt_raid_reply_handler;
274 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 				     &raid_handler_id);
276 	if (error != 0) {
277 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
278 		goto cleanup;
279 	}
280 
281 	error = mpt_spawn_raid_thread(mpt);
282 	if (error != 0) {
283 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284 		goto cleanup;
285 	}
286 
287 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
288 	csa.ccb_h.func_code = XPT_SASYNC_CB;
289 	csa.event_enable = AC_FOUND_DEVICE;
290 	csa.callback = mpt_raid_async;
291 	csa.callback_arg = mpt;
292 	MPTLOCK_2_CAMLOCK(mpt);
293 	xpt_action((union ccb *)&csa);
294 	CAMLOCK_2_MPTLOCK(mpt);
295 	if (csa.ccb_h.status != CAM_REQ_CMP) {
296 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
297 			"CAM async handler.\n");
298 	}
299 
300 	mpt_raid_sysctl_attach(mpt);
301 	return (0);
302 cleanup:
303 	mpt_raid_detach(mpt);
304 	return (error);
305 }
306 
307 int
308 mpt_raid_enable(struct mpt_softc *mpt)
309 {
310 	return (0);
311 }
312 
313 void
314 mpt_raid_detach(struct mpt_softc *mpt)
315 {
316 	struct ccb_setasync csa;
317 	mpt_handler_t handler;
318 
319 	callout_stop(&mpt->raid_timer);
320 	mpt_terminate_raid_thread(mpt);
321 
322 	handler.reply_handler = mpt_raid_reply_handler;
323 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
324 			       raid_handler_id);
325 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
326 	csa.ccb_h.func_code = XPT_SASYNC_CB;
327 	csa.event_enable = 0;
328 	csa.callback = mpt_raid_async;
329 	csa.callback_arg = mpt;
330 	MPTLOCK_2_CAMLOCK(mpt);
331 	xpt_action((union ccb *)&csa);
332 	CAMLOCK_2_MPTLOCK(mpt);
333 }
334 
335 static void
336 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
337 {
338 	/* Nothing to do yet. */
339 }
340 
341 static const char *raid_event_txt[] =
342 {
343 	"Volume Created",
344 	"Volume Deleted",
345 	"Volume Settings Changed",
346 	"Volume Status Changed",
347 	"Volume Physical Disk Membership Changed",
348 	"Physical Disk Created",
349 	"Physical Disk Deleted",
350 	"Physical Disk Settings Changed",
351 	"Physical Disk Status Changed",
352 	"Domain Validation Required",
353 	"SMART Data Received",
354 	"Replace Action Started",
355 };
356 
357 static int
358 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
359 	       MSG_EVENT_NOTIFY_REPLY *msg)
360 {
361 	EVENT_DATA_RAID *raid_event;
362 	struct mpt_raid_volume *mpt_vol;
363 	struct mpt_raid_disk *mpt_disk;
364 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
365 	int i;
366 	int print_event;
367 
368 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
369 		return (0);
370 	}
371 
372 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
373 
374 	mpt_vol = NULL;
375 	vol_pg = NULL;
376 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
377 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
378 			mpt_vol = &mpt->raid_volumes[i];
379 			vol_pg = mpt_vol->config_page;
380 
381 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
382 				continue;
383 
384 			if (vol_pg->VolumeID == raid_event->VolumeID
385 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
386 				break;
387 		}
388 		if (i >= mpt->ioc_page2->MaxVolumes) {
389 			mpt_vol = NULL;
390 			vol_pg = NULL;
391 		}
392 	}
393 
394 	mpt_disk = NULL;
395 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
396 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
397 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
398 			mpt_disk = NULL;
399 		}
400 	}
401 
402 	print_event = 1;
403 	switch(raid_event->ReasonCode) {
404 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
405 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
406 		break;
407 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
408 		if (mpt_vol != NULL) {
409 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
410 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
411 			} else {
412 				/*
413 				 * Coalesce status messages into one
414 				 * per background run of our RAID thread.
415 				 * This removes "spurious" status messages
416 				 * from our output.
417 				 */
418 				print_event = 0;
419 			}
420 		}
421 		break;
422 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
423 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
424 		mpt->raid_rescan++;
425 		if (mpt_vol != NULL) {
426 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
427 		}
428 		break;
429 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
430 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
431 		mpt->raid_rescan++;
432 		break;
433 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
434 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
435 		mpt->raid_rescan++;
436 		if (mpt_disk != NULL) {
437 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
438 		}
439 		break;
440 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
441 		mpt->raid_rescan++;
442 		break;
443 	case MPI_EVENT_RAID_RC_SMART_DATA:
444 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
445 		break;
446 	}
447 
448 	if (print_event) {
449 		if (mpt_disk != NULL) {
450 			mpt_disk_prt(mpt, mpt_disk, "");
451 		} else if (mpt_vol != NULL) {
452 			mpt_vol_prt(mpt, mpt_vol, "");
453 		} else {
454 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
455 				raid_event->VolumeID);
456 
457 			if (raid_event->PhysDiskNum != 0xFF)
458 				mpt_prtc(mpt, ":%d): ",
459 					 raid_event->PhysDiskNum);
460 			else
461 				mpt_prtc(mpt, "): ");
462 		}
463 
464 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
465 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
466 				 raid_event->ReasonCode);
467 		else
468 			mpt_prtc(mpt, "%s\n",
469 				 raid_event_txt[raid_event->ReasonCode]);
470 	}
471 
472 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
473 		/* XXX Use CAM's print sense for this... */
474 		if (mpt_disk != NULL)
475 			mpt_disk_prt(mpt, mpt_disk, "");
476 		else
477 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
478 			    raid_event->VolumeBus, raid_event->VolumeID,
479 			    raid_event->PhysDiskNum);
480 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
481 			 raid_event->ASC, raid_event->ASCQ);
482 	}
483 
484 	mpt_raid_wakeup(mpt);
485 	return (1);
486 }
487 
488 static void
489 mpt_raid_shutdown(struct mpt_softc *mpt)
490 {
491 	struct mpt_raid_volume *mpt_vol;
492 
493 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
494 		return;
495 	}
496 
497 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
498 	RAID_VOL_FOREACH(mpt, mpt_vol) {
499 		mpt_verify_mwce(mpt, mpt_vol);
500 	}
501 }
502 
503 static int
504 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
505     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
506 {
507 	int free_req;
508 
509 	if (req == NULL)
510 		return (TRUE);
511 
512 	free_req = TRUE;
513 	if (reply_frame != NULL)
514 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
515 #ifdef NOTYET
516 	else if (req->ccb != NULL) {
517 		/* Complete Quiesce CCB with error... */
518 	}
519 #endif
520 
521 	req->state &= ~REQ_STATE_QUEUED;
522 	req->state |= REQ_STATE_DONE;
523 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
524 
525 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
526 		wakeup(req);
527 	} else if (free_req) {
528 		mpt_free_request(mpt, req);
529 	}
530 
531 	return (TRUE);
532 }
533 
534 /*
535  * Parse additional completion information in the reply
536  * frame for RAID I/O requests.
537  */
538 static int
539 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
540     MSG_DEFAULT_REPLY *reply_frame)
541 {
542 	MSG_RAID_ACTION_REPLY *reply;
543 	struct mpt_raid_action_result *action_result;
544 	MSG_RAID_ACTION_REQUEST *rap;
545 
546 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
547 	req->IOCStatus = le16toh(reply->IOCStatus);
548 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
549 
550 	switch (rap->Action) {
551 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
552 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
553 		break;
554 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
555 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
556 		break;
557 	default:
558 		break;
559 	}
560 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
561 	memcpy(&action_result->action_data, &reply->ActionData,
562 	    sizeof(action_result->action_data));
563 	action_result->action_status = reply->ActionStatus;
564 	return (TRUE);
565 }
566 
567 /*
568  * Utiltity routine to perform a RAID action command;
569  */
570 int
571 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
572 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
573 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
574 		   int write, int wait)
575 {
576 	MSG_RAID_ACTION_REQUEST *rap;
577 	SGE_SIMPLE32 *se;
578 
579 	rap = req->req_vbuf;
580 	memset(rap, 0, sizeof *rap);
581 	rap->Action = Action;
582 	rap->ActionDataWord = ActionDataWord;
583 	rap->Function = MPI_FUNCTION_RAID_ACTION;
584 	rap->VolumeID = vol->config_page->VolumeID;
585 	rap->VolumeBus = vol->config_page->VolumeBus;
586 	if (disk != 0)
587 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
588 	else
589 		rap->PhysDiskNum = 0xFF;
590 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
591 	se->Address = addr;
592 	MPI_pSGE_SET_LENGTH(se, len);
593 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
594 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
595 	    MPI_SGE_FLAGS_END_OF_LIST |
596 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
597 	rap->MsgContext = htole32(req->index | raid_handler_id);
598 
599 	mpt_check_doorbell(mpt);
600 	mpt_send_cmd(mpt, req);
601 
602 	if (wait) {
603 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
604 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
605 	} else {
606 		return (0);
607 	}
608 }
609 
610 /*************************** RAID Status Monitoring ***************************/
611 static int
612 mpt_spawn_raid_thread(struct mpt_softc *mpt)
613 {
614 	int error;
615 
616 	/*
617 	 * Freeze out any CAM transactions until our thread
618 	 * is able to run at least once.  We need to update
619 	 * our RAID pages before acception I/O or we may
620 	 * reject I/O to an ID we later determine is for a
621 	 * hidden physdisk.
622 	 */
623 	xpt_freeze_simq(mpt->phydisk_sim, 1);
624 	error = mpt_kthread_create(mpt_raid_thread, mpt,
625 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
626 	    "mpt_raid%d", mpt->unit);
627 	if (error != 0)
628 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
629 	return (error);
630 }
631 
632 static void
633 mpt_terminate_raid_thread(struct mpt_softc *mpt)
634 {
635 
636 	if (mpt->raid_thread == NULL) {
637 		return;
638 	}
639 	mpt->shutdwn_raid = 1;
640 	wakeup(mpt->raid_volumes);
641 	/*
642 	 * Sleep on a slightly different location
643 	 * for this interlock just for added safety.
644 	 */
645 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
646 }
647 
648 static void
649 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
650 {
651 	xpt_free_path(ccb->ccb_h.path);
652 	free(ccb, M_DEVBUF);
653 }
654 
655 static void
656 mpt_raid_thread(void *arg)
657 {
658 	struct mpt_softc *mpt;
659 	int firstrun;
660 
661 #if __FreeBSD_version >= 500000
662 	mtx_lock(&Giant);
663 #endif
664 	mpt = (struct mpt_softc *)arg;
665 	firstrun = 1;
666 	MPT_LOCK(mpt);
667 	while (mpt->shutdwn_raid == 0) {
668 
669 		if (mpt->raid_wakeup == 0) {
670 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
671 			continue;
672 		}
673 
674 		mpt->raid_wakeup = 0;
675 
676 		if (mpt_refresh_raid_data(mpt)) {
677 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
678 			continue;
679 		}
680 
681 		/*
682 		 * Now that we have our first snapshot of RAID data,
683 		 * allow CAM to access our physical disk bus.
684 		 */
685 		if (firstrun) {
686 			firstrun = 0;
687 			MPTLOCK_2_CAMLOCK(mpt);
688 			xpt_release_simq(mpt->phydisk_sim, TRUE);
689 			CAMLOCK_2_MPTLOCK(mpt);
690 		}
691 
692 		if (mpt->raid_rescan != 0) {
693 			union ccb *ccb;
694 			struct cam_path *path;
695 			int error;
696 
697 			mpt->raid_rescan = 0;
698 
699 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
700 			error = xpt_create_path(&path, xpt_periph,
701 			    cam_sim_path(mpt->phydisk_sim),
702 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
703 			if (error != CAM_REQ_CMP) {
704 				free(ccb, M_DEVBUF);
705 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
706 			} else {
707 				xpt_setup_ccb(&ccb->ccb_h, path, 5);
708 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
709 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
710 				ccb->crcn.flags = CAM_FLAG_NONE;
711 				MPTLOCK_2_CAMLOCK(mpt);
712 				xpt_action(ccb);
713 				CAMLOCK_2_MPTLOCK(mpt);
714 			}
715 		}
716 	}
717 	mpt->raid_thread = NULL;
718 	wakeup(&mpt->raid_thread);
719 	MPT_UNLOCK(mpt);
720 #if __FreeBSD_version >= 500000
721 	mtx_unlock(&Giant);
722 #endif
723 	kthread_exit(0);
724 }
725 
726 #if 0
727 static void
728 mpt_raid_quiesce_timeout(void *arg)
729 {
730 	/* Complete the CCB with error */
731 	/* COWWWW */
732 }
733 
734 static timeout_t mpt_raid_quiesce_timeout;
735 cam_status
736 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
737 		      request_t *req)
738 {
739 	union ccb *ccb;
740 
741 	ccb = req->ccb;
742 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
743 		return (CAM_REQ_CMP);
744 
745 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
746 		int rv;
747 
748 		mpt_disk->flags |= MPT_RDF_QUIESCING;
749 		xpt_freeze_devq(ccb->ccb_h.path, 1);
750 
751 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
752 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
753 					/*ActionData*/0, /*addr*/0,
754 					/*len*/0, /*write*/FALSE,
755 					/*wait*/FALSE);
756 		if (rv != 0)
757 			return (CAM_REQ_CMP_ERR);
758 
759 		ccb->ccb_h.timeout_ch =
760 			timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
761 #if 0
762 		if (rv == ETIMEDOUT) {
763 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764 				     "Quiece Timed-out\n");
765 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766 			return (CAM_REQ_CMP_ERR);
767 		}
768 
769 		ar = REQ_TO_RAID_ACTION_RESULT(req);
770 		if (rv != 0
771 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774 				    "%d:%x:%x\n", rv, req->IOCStatus,
775 				    ar->action_status);
776 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777 			return (CAM_REQ_CMP_ERR);
778 		}
779 #endif
780 		return (CAM_REQ_INPROG);
781 	}
782 	return (CAM_REQUEUE_REQ);
783 }
784 #endif
785 
786 /* XXX Ignores that there may be multiple busses/IOCs involved. */
787 cam_status
788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
789 {
790 	struct mpt_raid_disk *mpt_disk;
791 
792 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
794 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
795 		*tgt = mpt_disk->config_page.PhysDiskID;
796 		return (0);
797 	}
798 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
799 		 ccb->ccb_h.target_id);
800 	return (-1);
801 }
802 
803 /* XXX Ignores that there may be multiple busses/IOCs involved. */
804 int
805 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
806 {
807 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
808 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
809 
810 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
811 		return (0);
812 	}
813 	ioc_vol = mpt->ioc_page2->RaidVolume;
814 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
815 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
816 		if (ioc_vol->VolumeID == tgt) {
817 			return (1);
818 		}
819 	}
820 	return (0);
821 }
822 
823 #if 0
824 static void
825 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
826 	       int enable)
827 {
828 	request_t *req;
829 	struct mpt_raid_action_result *ar;
830 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
831 	int enabled;
832 	int rv;
833 
834 	vol_pg = mpt_vol->config_page;
835 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
836 
837 	/*
838 	 * If the setting matches the configuration,
839 	 * there is nothing to do.
840 	 */
841 	if ((enabled && enable)
842 	 || (!enabled && !enable))
843 		return;
844 
845 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
846 	if (req == NULL) {
847 		mpt_vol_prt(mpt, mpt_vol,
848 			    "mpt_enable_vol: Get request failed!\n");
849 		return;
850 	}
851 
852 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
853 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
854 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
855 				/*data*/0, /*addr*/0, /*len*/0,
856 				/*write*/FALSE, /*wait*/TRUE);
857 	if (rv == ETIMEDOUT) {
858 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
859 			    "%s Volume Timed-out\n",
860 			    enable ? "Enable" : "Disable");
861 		return;
862 	}
863 	ar = REQ_TO_RAID_ACTION_RESULT(req);
864 	if (rv != 0
865 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
866 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
867 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
868 			    enable ? "Enable" : "Disable",
869 			    rv, req->IOCStatus, ar->action_status);
870 	}
871 
872 	mpt_free_request(mpt, req);
873 }
874 #endif
875 
876 static void
877 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
878 {
879 	request_t *req;
880 	struct mpt_raid_action_result *ar;
881 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
882 	uint32_t data;
883 	int rv;
884 	int resyncing;
885 	int mwce;
886 
887 	vol_pg = mpt_vol->config_page;
888 	resyncing = vol_pg->VolumeStatus.Flags
889 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
890 	mwce = vol_pg->VolumeSettings.Settings
891 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
892 
893 	/*
894 	 * If the setting matches the configuration,
895 	 * there is nothing to do.
896 	 */
897 	switch (mpt->raid_mwce_setting) {
898 	case MPT_RAID_MWCE_REBUILD_ONLY:
899 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
900 			return;
901 		}
902 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
903 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
904 			/*
905 			 * Wait one more status update to see if
906 			 * resyncing gets enabled.  It gets disabled
907 			 * temporarilly when WCE is changed.
908 			 */
909 			return;
910 		}
911 		break;
912 	case MPT_RAID_MWCE_ON:
913 		if (mwce)
914 			return;
915 		break;
916 	case MPT_RAID_MWCE_OFF:
917 		if (!mwce)
918 			return;
919 		break;
920 	case MPT_RAID_MWCE_NC:
921 		return;
922 	}
923 
924 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
925 	if (req == NULL) {
926 		mpt_vol_prt(mpt, mpt_vol,
927 			    "mpt_verify_mwce: Get request failed!\n");
928 		return;
929 	}
930 
931 	vol_pg->VolumeSettings.Settings ^=
932 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
933 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
934 	vol_pg->VolumeSettings.Settings ^=
935 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
936 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
937 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
938 				data, /*addr*/0, /*len*/0,
939 				/*write*/FALSE, /*wait*/TRUE);
940 	if (rv == ETIMEDOUT) {
941 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
942 			    "Write Cache Enable Timed-out\n");
943 		return;
944 	}
945 	ar = REQ_TO_RAID_ACTION_RESULT(req);
946 	if (rv != 0
947 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
948 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
949 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
950 			    "%d:%x:%x\n", rv, req->IOCStatus,
951 			    ar->action_status);
952 	} else {
953 		vol_pg->VolumeSettings.Settings ^=
954 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
955 	}
956 	mpt_free_request(mpt, req);
957 }
958 
959 static void
960 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
961 {
962 	request_t *req;
963 	struct mpt_raid_action_result *ar;
964 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
965 	u_int prio;
966 	int rv;
967 
968 	vol_pg = mpt_vol->config_page;
969 
970 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
971 		return;
972 
973 	/*
974 	 * If the current RAID resync rate does not
975 	 * match our configured rate, update it.
976 	 */
977 	prio = vol_pg->VolumeSettings.Settings
978 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
979 	if (vol_pg->ResyncRate != 0
980 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
981 
982 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
983 		if (req == NULL) {
984 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
985 				    "Get request failed!\n");
986 			return;
987 		}
988 
989 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
990 					MPI_RAID_ACTION_SET_RESYNC_RATE,
991 					mpt->raid_resync_rate, /*addr*/0,
992 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
993 		if (rv == ETIMEDOUT) {
994 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
995 				    "Resync Rate Setting Timed-out\n");
996 			return;
997 		}
998 
999 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1000 		if (rv != 0
1001 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1002 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1003 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1004 				    "%d:%x:%x\n", rv, req->IOCStatus,
1005 				    ar->action_status);
1006 		} else
1007 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1008 		mpt_free_request(mpt, req);
1009 	} else if ((prio && mpt->raid_resync_rate < 128)
1010 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1011 		uint32_t data;
1012 
1013 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1014 		if (req == NULL) {
1015 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1016 				    "Get request failed!\n");
1017 			return;
1018 		}
1019 
1020 		vol_pg->VolumeSettings.Settings ^=
1021 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1022 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1023 		vol_pg->VolumeSettings.Settings ^=
1024 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1025 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1026 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1027 					data, /*addr*/0, /*len*/0,
1028 					/*write*/FALSE, /*wait*/TRUE);
1029 		if (rv == ETIMEDOUT) {
1030 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1031 				    "Resync Rate Setting Timed-out\n");
1032 			return;
1033 		}
1034 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1035 		if (rv != 0
1036 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1037 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1038 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1039 				    "%d:%x:%x\n", rv, req->IOCStatus,
1040 				    ar->action_status);
1041 		} else {
1042 			vol_pg->VolumeSettings.Settings ^=
1043 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1044 		}
1045 
1046 		mpt_free_request(mpt, req);
1047 	}
1048 }
1049 
1050 static void
1051 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1052 		       struct cam_path *path)
1053 {
1054 	struct ccb_relsim crs;
1055 
1056 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1057 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1058 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1059 	crs.openings = mpt->raid_queue_depth;
1060 	xpt_action((union ccb *)&crs);
1061 	if (crs.ccb_h.status != CAM_REQ_CMP)
1062 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1063 			    "with CAM status %#x\n", crs.ccb_h.status);
1064 }
1065 
1066 static void
1067 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1068 {
1069 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1070 	u_int i;
1071 
1072 	vol_pg = mpt_vol->config_page;
1073 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1074 	for (i = 1; i <= 0x8000; i <<= 1) {
1075 		switch (vol_pg->VolumeSettings.Settings & i) {
1076 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1077 			mpt_prtc(mpt, " Member-WCE");
1078 			break;
1079 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1080 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1081 			break;
1082 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1083 			mpt_prtc(mpt, " Hot-Plug-Spares");
1084 			break;
1085 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1086 			mpt_prtc(mpt, " High-Priority-ReSync");
1087 			break;
1088 		default:
1089 			break;
1090 		}
1091 	}
1092 	mpt_prtc(mpt, " )\n");
1093 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1094 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1095 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1096 			  ? ":" : "s:");
1097 		for (i = 0; i < 8; i++) {
1098 			u_int mask;
1099 
1100 			mask = 0x1 << i;
1101 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1102 				continue;
1103 			mpt_prtc(mpt, " %d", i);
1104 		}
1105 		mpt_prtc(mpt, "\n");
1106 	}
1107 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1108 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1109 		struct mpt_raid_disk *mpt_disk;
1110 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1111 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1112 		U8 f, s;
1113 
1114 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1115 		disk_pg = &mpt_disk->config_page;
1116 		mpt_prtc(mpt, "      ");
1117 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1118 			 pt_bus, disk_pg->PhysDiskID);
1119 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1120 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1121 			    "Primary" : "Secondary");
1122 		} else {
1123 			mpt_prtc(mpt, "Stripe Position %d",
1124 				 mpt_disk->member_number);
1125 		}
1126 		f = disk_pg->PhysDiskStatus.Flags;
1127 		s = disk_pg->PhysDiskStatus.State;
1128 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1129 			mpt_prtc(mpt, " Out of Sync");
1130 		}
1131 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1132 			mpt_prtc(mpt, " Quiesced");
1133 		}
1134 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1135 			mpt_prtc(mpt, " Inactive");
1136 		}
1137 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1138 			mpt_prtc(mpt, " Was Optimal");
1139 		}
1140 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1141 			mpt_prtc(mpt, " Was Non-Optimal");
1142 		}
1143 		switch (s) {
1144 		case MPI_PHYSDISK0_STATUS_ONLINE:
1145 			mpt_prtc(mpt, " Online");
1146 			break;
1147 		case MPI_PHYSDISK0_STATUS_MISSING:
1148 			mpt_prtc(mpt, " Missing");
1149 			break;
1150 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1151 			mpt_prtc(mpt, " Incompatible");
1152 			break;
1153 		case MPI_PHYSDISK0_STATUS_FAILED:
1154 			mpt_prtc(mpt, " Failed");
1155 			break;
1156 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1157 			mpt_prtc(mpt, " Initializing");
1158 			break;
1159 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1160 			mpt_prtc(mpt, " Requested Offline");
1161 			break;
1162 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1163 			mpt_prtc(mpt, " Requested Failed");
1164 			break;
1165 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1166 		default:
1167 			mpt_prtc(mpt, " Offline Other (%x)", s);
1168 			break;
1169 		}
1170 		mpt_prtc(mpt, "\n");
1171 	}
1172 }
1173 
1174 static void
1175 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1176 {
1177 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1178 	int rd_bus = cam_sim_bus(mpt->sim);
1179 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1180 	u_int i;
1181 
1182 	disk_pg = &mpt_disk->config_page;
1183 	mpt_disk_prt(mpt, mpt_disk,
1184 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1185 		     device_get_nameunit(mpt->dev), rd_bus,
1186 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1187 		     pt_bus, mpt_disk - mpt->raid_disks);
1188 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1189 		return;
1190 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1191 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1192 		   ? ":" : "s:");
1193 	for (i = 0; i < 8; i++) {
1194 		u_int mask;
1195 
1196 		mask = 0x1 << i;
1197 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1198 			continue;
1199 		mpt_prtc(mpt, " %d", i);
1200 	}
1201 	mpt_prtc(mpt, "\n");
1202 }
1203 
1204 static void
1205 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1206 		      IOC_3_PHYS_DISK *ioc_disk)
1207 {
1208 	int rv;
1209 
1210 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1211 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1212 				 &mpt_disk->config_page.Header,
1213 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1214 	if (rv != 0) {
1215 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1216 			"Failed to read RAID Disk Hdr(%d)\n",
1217 		 	ioc_disk->PhysDiskNum);
1218 		return;
1219 	}
1220 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1221 				   &mpt_disk->config_page.Header,
1222 				   sizeof(mpt_disk->config_page),
1223 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1224 	if (rv != 0)
1225 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1226 			"Failed to read RAID Disk Page(%d)\n",
1227 		 	ioc_disk->PhysDiskNum);
1228 }
1229 
1230 static void
1231 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1232     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1233 {
1234 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1235 	struct mpt_raid_action_result *ar;
1236 	request_t *req;
1237 	int rv;
1238 	int i;
1239 
1240 	vol_pg = mpt_vol->config_page;
1241 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1242 
1243 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1244 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1245 	if (rv != 0) {
1246 		mpt_vol_prt(mpt, mpt_vol,
1247 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1248 		    ioc_vol->VolumePageNumber);
1249 		return;
1250 	}
1251 
1252 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1253 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1254 	if (rv != 0) {
1255 		mpt_vol_prt(mpt, mpt_vol,
1256 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1257 		    ioc_vol->VolumePageNumber);
1258 		return;
1259 	}
1260 	mpt2host_config_page_raid_vol_0(vol_pg);
1261 
1262 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1263 
1264 	/* Update disk entry array data. */
1265 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1266 		struct mpt_raid_disk *mpt_disk;
1267 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1268 		mpt_disk->volume = mpt_vol;
1269 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1270 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1271 			mpt_disk->member_number--;
1272 		}
1273 	}
1274 
1275 	if ((vol_pg->VolumeStatus.Flags
1276 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1277 		return;
1278 
1279 	req = mpt_get_request(mpt, TRUE);
1280 	if (req == NULL) {
1281 		mpt_vol_prt(mpt, mpt_vol,
1282 		    "mpt_refresh_raid_vol: Get request failed!\n");
1283 		return;
1284 	}
1285 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1286 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1287 	if (rv == ETIMEDOUT) {
1288 		mpt_vol_prt(mpt, mpt_vol,
1289 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1290 		mpt_free_request(mpt, req);
1291 		return;
1292 	}
1293 
1294 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1295 	if (rv == 0
1296 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1297 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1298 		memcpy(&mpt_vol->sync_progress,
1299 		       &ar->action_data.indicator_struct,
1300 		       sizeof(mpt_vol->sync_progress));
1301 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1302 	} else {
1303 		mpt_vol_prt(mpt, mpt_vol,
1304 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1305 	}
1306 	mpt_free_request(mpt, req);
1307 }
1308 
1309 /*
1310  * Update in-core information about RAID support.  We update any entries
1311  * that didn't previously exists or have been marked as needing to
1312  * be updated by our event handler.  Interesting changes are displayed
1313  * to the console.
1314  */
1315 int
1316 mpt_refresh_raid_data(struct mpt_softc *mpt)
1317 {
1318 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1319 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1320 	IOC_3_PHYS_DISK *ioc_disk;
1321 	IOC_3_PHYS_DISK *ioc_last_disk;
1322 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1323 	size_t len;
1324 	int rv;
1325 	int i;
1326 	u_int nonopt_volumes;
1327 
1328 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1329 		return (0);
1330 	}
1331 
1332 	/*
1333 	 * Mark all items as unreferenced by the configuration.
1334 	 * This allows us to find, report, and discard stale
1335 	 * entries.
1336 	 */
1337 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1338 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1339 	}
1340 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1341 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1342 	}
1343 
1344 	/*
1345 	 * Get Physical Disk information.
1346 	 */
1347 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1348 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1349 				   &mpt->ioc_page3->Header, len,
1350 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1351 	if (rv) {
1352 		mpt_prt(mpt,
1353 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1354 		return (-1);
1355 	}
1356 
1357 	ioc_disk = mpt->ioc_page3->PhysDisk;
1358 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1359 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1360 		struct mpt_raid_disk *mpt_disk;
1361 
1362 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1363 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1364 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1365 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1366 
1367 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1368 
1369 		}
1370 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1371 		mpt->raid_rescan++;
1372 	}
1373 
1374 	/*
1375 	 * Refresh volume data.
1376 	 */
1377 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1378 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1379 				   &mpt->ioc_page2->Header, len,
1380 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1381 	if (rv) {
1382 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1383 			"Failed to read IOC Page 2\n");
1384 		return (-1);
1385 	}
1386 
1387 	ioc_vol = mpt->ioc_page2->RaidVolume;
1388 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1389 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1390 		struct mpt_raid_volume *mpt_vol;
1391 
1392 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1393 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1394 		vol_pg = mpt_vol->config_page;
1395 		if (vol_pg == NULL)
1396 			continue;
1397 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1398 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1399 		 || (vol_pg->VolumeStatus.Flags
1400 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1401 
1402 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1403 		}
1404 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1405 	}
1406 
1407 	nonopt_volumes = 0;
1408 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1409 		struct mpt_raid_volume *mpt_vol;
1410 		uint64_t total;
1411 		uint64_t left;
1412 		int m;
1413 		u_int prio;
1414 
1415 		mpt_vol = &mpt->raid_volumes[i];
1416 
1417 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1418 			continue;
1419 		}
1420 
1421 		vol_pg = mpt_vol->config_page;
1422 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1423 		 == MPT_RVF_ANNOUNCED) {
1424 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1425 			mpt_vol->flags = 0;
1426 			continue;
1427 		}
1428 
1429 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1430 			mpt_announce_vol(mpt, mpt_vol);
1431 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1432 		}
1433 
1434 		if (vol_pg->VolumeStatus.State !=
1435 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1436 			nonopt_volumes++;
1437 
1438 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1439 			continue;
1440 
1441 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1442 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1443 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1444 		mpt_verify_mwce(mpt, mpt_vol);
1445 
1446 		if (vol_pg->VolumeStatus.Flags == 0) {
1447 			continue;
1448 		}
1449 
1450 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1451 		for (m = 1; m <= 0x80; m <<= 1) {
1452 			switch (vol_pg->VolumeStatus.Flags & m) {
1453 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1454 				mpt_prtc(mpt, " Enabled");
1455 				break;
1456 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1457 				mpt_prtc(mpt, " Quiesced");
1458 				break;
1459 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1460 				mpt_prtc(mpt, " Re-Syncing");
1461 				break;
1462 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1463 				mpt_prtc(mpt, " Inactive");
1464 				break;
1465 			default:
1466 				break;
1467 			}
1468 		}
1469 		mpt_prtc(mpt, " )\n");
1470 
1471 		if ((vol_pg->VolumeStatus.Flags
1472 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1473 			continue;
1474 
1475 		mpt_verify_resync_rate(mpt, mpt_vol);
1476 
1477 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1478 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1479 		if (vol_pg->ResyncRate != 0) {
1480 
1481 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1482 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1483 			    prio / 1000, prio % 1000);
1484 		} else {
1485 			prio = vol_pg->VolumeSettings.Settings
1486 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1487 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1488 			    prio ? "High" : "Low");
1489 		}
1490 #if __FreeBSD_version >= 500000
1491 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1492 			    "blocks remaining\n", (uintmax_t)left,
1493 			    (uintmax_t)total);
1494 #else
1495 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1496 			    "blocks remaining\n", (uint64_t)left,
1497 			    (uint64_t)total);
1498 #endif
1499 
1500 		/* Periodically report on sync progress. */
1501 		mpt_schedule_raid_refresh(mpt);
1502 	}
1503 
1504 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1505 		struct mpt_raid_disk *mpt_disk;
1506 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1507 		int m;
1508 
1509 		mpt_disk = &mpt->raid_disks[i];
1510 		disk_pg = &mpt_disk->config_page;
1511 
1512 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1513 			continue;
1514 
1515 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1516 		 == MPT_RDF_ANNOUNCED) {
1517 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1518 			mpt_disk->flags = 0;
1519 			mpt->raid_rescan++;
1520 			continue;
1521 		}
1522 
1523 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1524 
1525 			mpt_announce_disk(mpt, mpt_disk);
1526 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1527 		}
1528 
1529 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1530 			continue;
1531 
1532 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1533 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1534 		if (disk_pg->PhysDiskStatus.Flags == 0)
1535 			continue;
1536 
1537 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1538 		for (m = 1; m <= 0x80; m <<= 1) {
1539 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1540 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1541 				mpt_prtc(mpt, " Out-Of-Sync");
1542 				break;
1543 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1544 				mpt_prtc(mpt, " Quiesced");
1545 				break;
1546 			default:
1547 				break;
1548 			}
1549 		}
1550 		mpt_prtc(mpt, " )\n");
1551 	}
1552 
1553 	mpt->raid_nonopt_volumes = nonopt_volumes;
1554 	return (0);
1555 }
1556 
1557 static void
1558 mpt_raid_timer(void *arg)
1559 {
1560 	struct mpt_softc *mpt;
1561 
1562 	mpt = (struct mpt_softc *)arg;
1563 	MPT_LOCK(mpt);
1564 	mpt_raid_wakeup(mpt);
1565 	MPT_UNLOCK(mpt);
1566 }
1567 
1568 void
1569 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1570 {
1571 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1572 		      mpt_raid_timer, mpt);
1573 }
1574 
1575 void
1576 mpt_raid_free_mem(struct mpt_softc *mpt)
1577 {
1578 
1579 	if (mpt->raid_volumes) {
1580 		struct mpt_raid_volume *mpt_raid;
1581 		int i;
1582 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1583 			mpt_raid = &mpt->raid_volumes[i];
1584 			if (mpt_raid->config_page) {
1585 				free(mpt_raid->config_page, M_DEVBUF);
1586 				mpt_raid->config_page = NULL;
1587 			}
1588 		}
1589 		free(mpt->raid_volumes, M_DEVBUF);
1590 		mpt->raid_volumes = NULL;
1591 	}
1592 	if (mpt->raid_disks) {
1593 		free(mpt->raid_disks, M_DEVBUF);
1594 		mpt->raid_disks = NULL;
1595 	}
1596 	if (mpt->ioc_page2) {
1597 		free(mpt->ioc_page2, M_DEVBUF);
1598 		mpt->ioc_page2 = NULL;
1599 	}
1600 	if (mpt->ioc_page3) {
1601 		free(mpt->ioc_page3, M_DEVBUF);
1602 		mpt->ioc_page3 = NULL;
1603 	}
1604 	mpt->raid_max_volumes =  0;
1605 	mpt->raid_max_disks =  0;
1606 }
1607 
1608 static int
1609 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1610 {
1611 	struct mpt_raid_volume *mpt_vol;
1612 
1613 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1614 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1615 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1616 		return (EINVAL);
1617 
1618 	MPT_LOCK(mpt);
1619 	mpt->raid_resync_rate = rate;
1620 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1621 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1622 			continue;
1623 		}
1624 		mpt_verify_resync_rate(mpt, mpt_vol);
1625 	}
1626 	MPT_UNLOCK(mpt);
1627 	return (0);
1628 }
1629 
1630 static int
1631 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1632 {
1633 	struct mpt_raid_volume *mpt_vol;
1634 
1635 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1636 		return (EINVAL);
1637 
1638 	MPT_LOCK(mpt);
1639 	mpt->raid_queue_depth = vol_queue_depth;
1640 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1641 		struct cam_path *path;
1642 		int error;
1643 
1644 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1645 			continue;
1646 
1647 		mpt->raid_rescan = 0;
1648 
1649 		MPTLOCK_2_CAMLOCK(mpt);
1650 		error = xpt_create_path(&path, xpt_periph,
1651 					cam_sim_path(mpt->sim),
1652 					mpt_vol->config_page->VolumeID,
1653 					/*lun*/0);
1654 		if (error != CAM_REQ_CMP) {
1655 			CAMLOCK_2_MPTLOCK(mpt);
1656 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1657 			continue;
1658 		}
1659 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1660 		xpt_free_path(path);
1661 		CAMLOCK_2_MPTLOCK(mpt);
1662 	}
1663 	MPT_UNLOCK(mpt);
1664 	return (0);
1665 }
1666 
1667 static int
1668 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1669 {
1670 	struct mpt_raid_volume *mpt_vol;
1671 	int force_full_resync;
1672 
1673 	MPT_LOCK(mpt);
1674 	if (mwce == mpt->raid_mwce_setting) {
1675 		MPT_UNLOCK(mpt);
1676 		return (0);
1677 	}
1678 
1679 	/*
1680 	 * Catch MWCE being left on due to a failed shutdown.  Since
1681 	 * sysctls cannot be set by the loader, we treat the first
1682 	 * setting of this varible specially and force a full volume
1683 	 * resync if MWCE is enabled and a resync is in progress.
1684 	 */
1685 	force_full_resync = 0;
1686 	if (mpt->raid_mwce_set == 0
1687 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1688 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1689 		force_full_resync = 1;
1690 
1691 	mpt->raid_mwce_setting = mwce;
1692 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1693 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1694 		int resyncing;
1695 		int mwce;
1696 
1697 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1698 			continue;
1699 
1700 		vol_pg = mpt_vol->config_page;
1701 		resyncing = vol_pg->VolumeStatus.Flags
1702 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1703 		mwce = vol_pg->VolumeSettings.Settings
1704 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1705 		if (force_full_resync && resyncing && mwce) {
1706 
1707 			/*
1708 			 * XXX disable/enable volume should force a resync,
1709 			 *     but we'll need to queice, drain, and restart
1710 			 *     I/O to do that.
1711 			 */
1712 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1713 				    "detected.  Suggest full resync.\n");
1714 		}
1715 		mpt_verify_mwce(mpt, mpt_vol);
1716 	}
1717 	mpt->raid_mwce_set = 1;
1718 	MPT_UNLOCK(mpt);
1719 	return (0);
1720 }
1721 
1722 const char *mpt_vol_mwce_strs[] =
1723 {
1724 	"On",
1725 	"Off",
1726 	"On-During-Rebuild",
1727 	"NC"
1728 };
1729 
1730 static int
1731 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1732 {
1733 	char inbuf[20];
1734 	struct mpt_softc *mpt;
1735 	const char *str;
1736 	int error;
1737 	u_int size;
1738 	u_int i;
1739 
1740 	GIANT_REQUIRED;
1741 
1742 	mpt = (struct mpt_softc *)arg1;
1743 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1744 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1745 	if (error || !req->newptr) {
1746 		return (error);
1747 	}
1748 
1749 	size = req->newlen - req->newidx;
1750 	if (size >= sizeof(inbuf)) {
1751 		return (EINVAL);
1752 	}
1753 
1754 	error = SYSCTL_IN(req, inbuf, size);
1755 	if (error) {
1756 		return (error);
1757 	}
1758 	inbuf[size] = '\0';
1759 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1760 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1761 			return (mpt_raid_set_vol_mwce(mpt, i));
1762 		}
1763 	}
1764 	return (EINVAL);
1765 }
1766 
1767 static int
1768 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1769 {
1770 	struct mpt_softc *mpt;
1771 	u_int raid_resync_rate;
1772 	int error;
1773 
1774 	GIANT_REQUIRED;
1775 
1776 	mpt = (struct mpt_softc *)arg1;
1777 	raid_resync_rate = mpt->raid_resync_rate;
1778 
1779 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1780 	if (error || !req->newptr) {
1781 		return error;
1782 	}
1783 
1784 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1785 }
1786 
1787 static int
1788 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1789 {
1790 	struct mpt_softc *mpt;
1791 	u_int raid_queue_depth;
1792 	int error;
1793 
1794 	GIANT_REQUIRED;
1795 
1796 	mpt = (struct mpt_softc *)arg1;
1797 	raid_queue_depth = mpt->raid_queue_depth;
1798 
1799 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1800 	if (error || !req->newptr) {
1801 		return error;
1802 	}
1803 
1804 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1805 }
1806 
1807 static void
1808 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1809 {
1810 #if __FreeBSD_version >= 500000
1811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813 
1814 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1816 			mpt_raid_sysctl_vol_member_wce, "A",
1817 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1818 
1819 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1821 			mpt_raid_sysctl_vol_queue_depth, "I",
1822 			"default volume queue depth");
1823 
1824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826 			mpt_raid_sysctl_vol_resync_rate, "I",
1827 			"volume resync priority (0 == NC, 1 - 255)");
1828 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 			"nonoptimal_volumes", CTLFLAG_RD,
1830 			&mpt->raid_nonopt_volumes, 0,
1831 			"number of nonoptimal volumes");
1832 #endif
1833 }
1834