xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 
85 static mpt_probe_handler_t	mpt_raid_probe;
86 static mpt_attach_handler_t	mpt_raid_attach;
87 static mpt_enable_handler_t	mpt_raid_enable;
88 static mpt_event_handler_t	mpt_raid_event;
89 static mpt_shutdown_handler_t	mpt_raid_shutdown;
90 static mpt_reset_handler_t	mpt_raid_ioc_reset;
91 static mpt_detach_handler_t	mpt_raid_detach;
92 
93 static struct mpt_personality mpt_raid_personality =
94 {
95 	.name		= "mpt_raid",
96 	.probe		= mpt_raid_probe,
97 	.attach		= mpt_raid_attach,
98 	.enable		= mpt_raid_enable,
99 	.event		= mpt_raid_event,
100 	.reset		= mpt_raid_ioc_reset,
101 	.shutdown	= mpt_raid_shutdown,
102 	.detach		= mpt_raid_detach,
103 };
104 
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 					MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 static timeout_t mpt_raid_quiesce_timeout;
116 #if 0
117 static void mpt_enable_vol(struct mpt_softc *mpt,
118 			   struct mpt_raid_volume *mpt_vol, int enable);
119 #endif
120 static void mpt_verify_mwce(struct mpt_softc *mpt,
121 			    struct mpt_raid_volume *mpt_vol);
122 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
123 				   struct mpt_raid_volume *mpt_vol,
124 				   struct cam_path *path);
125 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
126 
127 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
128 
129 const char *
130 mpt_vol_type(struct mpt_raid_volume *vol)
131 {
132 	switch (vol->config_page->VolumeType) {
133 	case MPI_RAID_VOL_TYPE_IS:
134 		return ("RAID-0");
135 	case MPI_RAID_VOL_TYPE_IME:
136 		return ("RAID-1E");
137 	case MPI_RAID_VOL_TYPE_IM:
138 		return ("RAID-1");
139 	default:
140 		return ("Unknown");
141 	}
142 }
143 
144 const char *
145 mpt_vol_state(struct mpt_raid_volume *vol)
146 {
147 	switch (vol->config_page->VolumeStatus.State) {
148 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
149 		return ("Optimal");
150 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
151 		return ("Degraded");
152 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
153 		return ("Failed");
154 	default:
155 		return ("Unknown");
156 	}
157 }
158 
159 const char *
160 mpt_disk_state(struct mpt_raid_disk *disk)
161 {
162 	switch (disk->config_page.PhysDiskStatus.State) {
163 	case MPI_PHYSDISK0_STATUS_ONLINE:
164 		return ("Online");
165 	case MPI_PHYSDISK0_STATUS_MISSING:
166 		return ("Missing");
167 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
168 		return ("Incompatible");
169 	case MPI_PHYSDISK0_STATUS_FAILED:
170 		return ("Failed");
171 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
172 		return ("Initializing");
173 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
174 		return ("Offline Requested");
175 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
176 		return ("Failed per Host Request");
177 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
178 		return ("Offline");
179 	default:
180 		return ("Unknown");
181 	}
182 }
183 
184 void
185 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
186 	    const char *fmt, ...)
187 {
188 	va_list ap;
189 
190 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
191 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
192 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
193 	va_start(ap, fmt);
194 	vprintf(fmt, ap);
195 	va_end(ap);
196 }
197 
198 void
199 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
200 	     const char *fmt, ...)
201 {
202 	va_list ap;
203 
204 	if (disk->volume != NULL) {
205 		printf("(%s:vol%d:%d): ",
206 		       device_get_nameunit(mpt->dev),
207 		       disk->volume->config_page->VolumeID,
208 		       disk->member_number);
209 	} else {
210 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
211 		       disk->config_page.PhysDiskBus,
212 		       disk->config_page.PhysDiskID);
213 	}
214 	va_start(ap, fmt);
215 	vprintf(fmt, ap);
216 	va_end(ap);
217 }
218 
219 static void
220 mpt_raid_async(void *callback_arg, u_int32_t code,
221 	       struct cam_path *path, void *arg)
222 {
223 	struct mpt_softc *mpt;
224 
225 	mpt = (struct mpt_softc*)callback_arg;
226 	switch (code) {
227 	case AC_FOUND_DEVICE:
228 	{
229 		struct ccb_getdev *cgd;
230 		struct mpt_raid_volume *mpt_vol;
231 
232 		cgd = (struct ccb_getdev *)arg;
233 		if (cgd == NULL) {
234 			break;
235 		}
236 
237 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
238 			 cgd->ccb_h.target_id);
239 
240 		RAID_VOL_FOREACH(mpt, mpt_vol) {
241 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
242 				continue;
243 
244 			if (mpt_vol->config_page->VolumeID
245 			 == cgd->ccb_h.target_id) {
246 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
247 				break;
248 			}
249 		}
250 	}
251 	default:
252 		break;
253 	}
254 }
255 
256 int
257 mpt_raid_probe(struct mpt_softc *mpt)
258 {
259 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
260 		return (ENODEV);
261 	}
262 	return (0);
263 }
264 
265 int
266 mpt_raid_attach(struct mpt_softc *mpt)
267 {
268 	struct ccb_setasync csa;
269 	mpt_handler_t	 handler;
270 	int		 error;
271 
272 	mpt_callout_init(&mpt->raid_timer);
273 
274 	handler.reply_handler = mpt_raid_reply_handler;
275 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
276 				     &raid_handler_id);
277 	if (error != 0) {
278 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
279 		goto cleanup;
280 	}
281 
282 	error = mpt_spawn_raid_thread(mpt);
283 	if (error != 0) {
284 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
285 		goto cleanup;
286 	}
287 
288 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
289 	csa.ccb_h.func_code = XPT_SASYNC_CB;
290 	csa.event_enable = AC_FOUND_DEVICE;
291 	csa.callback = mpt_raid_async;
292 	csa.callback_arg = mpt;
293 	MPTLOCK_2_CAMLOCK(mpt);
294 	xpt_action((union ccb *)&csa);
295 	CAMLOCK_2_MPTLOCK(mpt);
296 	if (csa.ccb_h.status != CAM_REQ_CMP) {
297 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298 			"CAM async handler.\n");
299 	}
300 
301 	mpt_raid_sysctl_attach(mpt);
302 	return (0);
303 cleanup:
304 	mpt_raid_detach(mpt);
305 	return (error);
306 }
307 
308 int
309 mpt_raid_enable(struct mpt_softc *mpt)
310 {
311 	return (0);
312 }
313 
314 void
315 mpt_raid_detach(struct mpt_softc *mpt)
316 {
317 	struct ccb_setasync csa;
318 	mpt_handler_t handler;
319 
320 	callout_stop(&mpt->raid_timer);
321 	mpt_terminate_raid_thread(mpt);
322 
323 	handler.reply_handler = mpt_raid_reply_handler;
324 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
325 			       raid_handler_id);
326 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
327 	csa.ccb_h.func_code = XPT_SASYNC_CB;
328 	csa.event_enable = 0;
329 	csa.callback = mpt_raid_async;
330 	csa.callback_arg = mpt;
331 	MPTLOCK_2_CAMLOCK(mpt);
332 	xpt_action((union ccb *)&csa);
333 	CAMLOCK_2_MPTLOCK(mpt);
334 }
335 
336 static void
337 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
338 {
339 	/* Nothing to do yet. */
340 }
341 
342 static const char *raid_event_txt[] =
343 {
344 	"Volume Created",
345 	"Volume Deleted",
346 	"Volume Settings Changed",
347 	"Volume Status Changed",
348 	"Volume Physical Disk Membership Changed",
349 	"Physical Disk Created",
350 	"Physical Disk Deleted",
351 	"Physical Disk Settings Changed",
352 	"Physical Disk Status Changed",
353 	"Domain Validation Required",
354 	"SMART Data Received",
355 	"Replace Action Started",
356 };
357 
358 static int
359 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
360 	       MSG_EVENT_NOTIFY_REPLY *msg)
361 {
362 	EVENT_DATA_RAID *raid_event;
363 	struct mpt_raid_volume *mpt_vol;
364 	struct mpt_raid_disk *mpt_disk;
365 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
366 	int i;
367 	int print_event;
368 
369 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
370 		return (0);
371 	}
372 
373 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
374 
375 	mpt_vol = NULL;
376 	vol_pg = NULL;
377 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
378 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
379 			mpt_vol = &mpt->raid_volumes[i];
380 			vol_pg = mpt_vol->config_page;
381 
382 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
383 				continue;
384 
385 			if (vol_pg->VolumeID == raid_event->VolumeID
386 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
387 				break;
388 		}
389 		if (i >= mpt->ioc_page2->MaxVolumes) {
390 			mpt_vol = NULL;
391 			vol_pg = NULL;
392 		}
393 	}
394 
395 	mpt_disk = NULL;
396 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
397 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
398 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
399 			mpt_disk = NULL;
400 		}
401 	}
402 
403 	print_event = 1;
404 	switch(raid_event->ReasonCode) {
405 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
406 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
407 		break;
408 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
409 		if (mpt_vol != NULL) {
410 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
411 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
412 			} else {
413 				/*
414 				 * Coalesce status messages into one
415 				 * per background run of our RAID thread.
416 				 * This removes "spurious" status messages
417 				 * from our output.
418 				 */
419 				print_event = 0;
420 			}
421 		}
422 		break;
423 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
424 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
425 		mpt->raid_rescan++;
426 		if (mpt_vol != NULL) {
427 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
428 		}
429 		break;
430 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
431 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
432 		mpt->raid_rescan++;
433 		break;
434 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
435 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
436 		mpt->raid_rescan++;
437 		if (mpt_disk != NULL) {
438 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
439 		}
440 		break;
441 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
442 		mpt->raid_rescan++;
443 		break;
444 	case MPI_EVENT_RAID_RC_SMART_DATA:
445 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
446 		break;
447 	}
448 
449 	if (print_event) {
450 		if (mpt_disk != NULL) {
451 			mpt_disk_prt(mpt, mpt_disk, "");
452 		} else if (mpt_vol != NULL) {
453 			mpt_vol_prt(mpt, mpt_vol, "");
454 		} else {
455 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
456 				raid_event->VolumeID);
457 
458 			if (raid_event->PhysDiskNum != 0xFF)
459 				mpt_prtc(mpt, ":%d): ",
460 					 raid_event->PhysDiskNum);
461 			else
462 				mpt_prtc(mpt, "): ");
463 		}
464 
465 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
466 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
467 				 raid_event->ReasonCode);
468 		else
469 			mpt_prtc(mpt, "%s\n",
470 				 raid_event_txt[raid_event->ReasonCode]);
471 	}
472 
473 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
474 		/* XXX Use CAM's print sense for this... */
475 		if (mpt_disk != NULL)
476 			mpt_disk_prt(mpt, mpt_disk, "");
477 		else
478 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
479 			    raid_event->VolumeBus, raid_event->VolumeID,
480 			    raid_event->PhysDiskNum);
481 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
482 			 raid_event->ASC, raid_event->ASCQ);
483 	}
484 
485 	mpt_raid_wakeup(mpt);
486 	return (1);
487 }
488 
489 static void
490 mpt_raid_shutdown(struct mpt_softc *mpt)
491 {
492 	struct mpt_raid_volume *mpt_vol;
493 
494 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
495 		return;
496 	}
497 
498 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
499 	RAID_VOL_FOREACH(mpt, mpt_vol) {
500 		mpt_verify_mwce(mpt, mpt_vol);
501 	}
502 }
503 
504 static int
505 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
506     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
507 {
508 	int free_req;
509 
510 	if (req == NULL)
511 		return (TRUE);
512 
513 	free_req = TRUE;
514 	if (reply_frame != NULL)
515 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
516 #ifdef NOTYET
517 	else if (req->ccb != NULL) {
518 		/* Complete Quiesce CCB with error... */
519 	}
520 #endif
521 
522 	req->state &= ~REQ_STATE_QUEUED;
523 	req->state |= REQ_STATE_DONE;
524 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
525 
526 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
527 		wakeup(req);
528 	} else if (free_req) {
529 		mpt_free_request(mpt, req);
530 	}
531 
532 	return (TRUE);
533 }
534 
535 /*
536  * Parse additional completion information in the reply
537  * frame for RAID I/O requests.
538  */
539 static int
540 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
541     MSG_DEFAULT_REPLY *reply_frame)
542 {
543 	MSG_RAID_ACTION_REPLY *reply;
544 	struct mpt_raid_action_result *action_result;
545 	MSG_RAID_ACTION_REQUEST *rap;
546 
547 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
548 	req->IOCStatus = le16toh(reply->IOCStatus);
549 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
550 
551 	switch (rap->Action) {
552 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
553 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
554 		break;
555 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
556 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
557 		break;
558 	default:
559 		break;
560 	}
561 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
562 	memcpy(&action_result->action_data, &reply->ActionData,
563 	    sizeof(action_result->action_data));
564 	action_result->action_status = reply->ActionStatus;
565 	return (TRUE);
566 }
567 
568 /*
569  * Utiltity routine to perform a RAID action command;
570  */
571 int
572 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
573 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
574 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
575 		   int write, int wait)
576 {
577 	MSG_RAID_ACTION_REQUEST *rap;
578 	SGE_SIMPLE32 *se;
579 
580 	rap = req->req_vbuf;
581 	memset(rap, 0, sizeof *rap);
582 	rap->Action = Action;
583 	rap->ActionDataWord = ActionDataWord;
584 	rap->Function = MPI_FUNCTION_RAID_ACTION;
585 	rap->VolumeID = vol->config_page->VolumeID;
586 	rap->VolumeBus = vol->config_page->VolumeBus;
587 	if (disk != 0)
588 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
589 	else
590 		rap->PhysDiskNum = 0xFF;
591 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
592 	se->Address = addr;
593 	MPI_pSGE_SET_LENGTH(se, len);
594 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
595 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
596 	    MPI_SGE_FLAGS_END_OF_LIST |
597 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
598 	rap->MsgContext = htole32(req->index | raid_handler_id);
599 
600 	mpt_check_doorbell(mpt);
601 	mpt_send_cmd(mpt, req);
602 
603 	if (wait) {
604 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
605 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
606 	} else {
607 		return (0);
608 	}
609 }
610 
611 /*************************** RAID Status Monitoring ***************************/
612 static int
613 mpt_spawn_raid_thread(struct mpt_softc *mpt)
614 {
615 	int error;
616 
617 	/*
618 	 * Freeze out any CAM transactions until our thread
619 	 * is able to run at least once.  We need to update
620 	 * our RAID pages before acception I/O or we may
621 	 * reject I/O to an ID we later determine is for a
622 	 * hidden physdisk.
623 	 */
624 	xpt_freeze_simq(mpt->phydisk_sim, 1);
625 	error = mpt_kthread_create(mpt_raid_thread, mpt,
626 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
627 	    "mpt_raid%d", mpt->unit);
628 	if (error != 0)
629 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
630 	return (error);
631 }
632 
633 static void
634 mpt_terminate_raid_thread(struct mpt_softc *mpt)
635 {
636 
637 	if (mpt->raid_thread == NULL) {
638 		return;
639 	}
640 	mpt->shutdwn_raid = 1;
641 	wakeup(mpt->raid_volumes);
642 	/*
643 	 * Sleep on a slightly different location
644 	 * for this interlock just for added safety.
645 	 */
646 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
647 }
648 
649 static void
650 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
651 {
652 	xpt_free_path(ccb->ccb_h.path);
653 	free(ccb, M_DEVBUF);
654 }
655 
656 static void
657 mpt_raid_thread(void *arg)
658 {
659 	struct mpt_softc *mpt;
660 	int firstrun;
661 
662 #if __FreeBSD_version >= 500000
663 	mtx_lock(&Giant);
664 #endif
665 	mpt = (struct mpt_softc *)arg;
666 	firstrun = 1;
667 	MPT_LOCK(mpt);
668 	while (mpt->shutdwn_raid == 0) {
669 
670 		if (mpt->raid_wakeup == 0) {
671 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
672 			continue;
673 		}
674 
675 		mpt->raid_wakeup = 0;
676 
677 		if (mpt_refresh_raid_data(mpt)) {
678 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
679 			continue;
680 		}
681 
682 		/*
683 		 * Now that we have our first snapshot of RAID data,
684 		 * allow CAM to access our physical disk bus.
685 		 */
686 		if (firstrun) {
687 			firstrun = 0;
688 			MPTLOCK_2_CAMLOCK(mpt);
689 			xpt_release_simq(mpt->phydisk_sim, TRUE);
690 			CAMLOCK_2_MPTLOCK(mpt);
691 		}
692 
693 		if (mpt->raid_rescan != 0) {
694 			union ccb *ccb;
695 			struct cam_path *path;
696 			int error;
697 
698 			mpt->raid_rescan = 0;
699 
700 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
701 			error = xpt_create_path(&path, xpt_periph,
702 			    cam_sim_path(mpt->phydisk_sim),
703 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
704 			if (error != CAM_REQ_CMP) {
705 				free(ccb, M_DEVBUF);
706 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
707 			} else {
708 				xpt_setup_ccb(&ccb->ccb_h, path, 5);
709 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
710 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
711 				ccb->crcn.flags = CAM_FLAG_NONE;
712 				MPTLOCK_2_CAMLOCK(mpt);
713 				xpt_action(ccb);
714 				CAMLOCK_2_MPTLOCK(mpt);
715 			}
716 		}
717 	}
718 	mpt->raid_thread = NULL;
719 	wakeup(&mpt->raid_thread);
720 	MPT_UNLOCK(mpt);
721 #if __FreeBSD_version >= 500000
722 	mtx_unlock(&Giant);
723 #endif
724 	kthread_exit(0);
725 }
726 
727 cam_status
728 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
729 		      request_t *req)
730 {
731 	union ccb *ccb;
732 
733 	ccb = req->ccb;
734 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
735 		return (CAM_REQ_CMP);
736 
737 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
738 		int rv;
739 
740 		mpt_disk->flags |= MPT_RDF_QUIESCING;
741 		xpt_freeze_devq(ccb->ccb_h.path, 1);
742 
743 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
744 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
745 					/*ActionData*/0, /*addr*/0,
746 					/*len*/0, /*write*/FALSE,
747 					/*wait*/FALSE);
748 		if (rv != 0)
749 			return (CAM_REQ_CMP_ERR);
750 
751 		ccb->ccb_h.timeout_ch =
752 			timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
753 #if 0
754 		if (rv == ETIMEDOUT) {
755 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
756 				     "Quiece Timed-out\n");
757 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
758 			return (CAM_REQ_CMP_ERR);
759 		}
760 
761 		ar = REQ_TO_RAID_ACTION_RESULT(req);
762 		if (rv != 0
763 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
764 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
765 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
766 				    "%d:%x:%x\n", rv, req->IOCStatus,
767 				    ar->action_status);
768 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
769 			return (CAM_REQ_CMP_ERR);
770 		}
771 #endif
772 		return (CAM_REQ_INPROG);
773 	}
774 	return (CAM_REQUEUE_REQ);
775 }
776 
777 /* XXX Ignores that there may be multiple busses/IOCs involved. */
778 cam_status
779 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
780 {
781 	struct mpt_raid_disk *mpt_disk;
782 
783 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
784 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
785 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
786 
787 		*tgt = mpt_disk->config_page.PhysDiskID;
788 		return (0);
789 	}
790 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
791 		 ccb->ccb_h.target_id);
792 	return (-1);
793 }
794 
795 /* XXX Ignores that there may be multiple busses/IOCs involved. */
796 int
797 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
798 {
799 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
800 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
801 
802 	ioc_vol = mpt->ioc_page2->RaidVolume;
803 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
804 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
805 		if (ioc_vol->VolumeID == tgt) {
806 			return (1);
807 		}
808 	}
809 	return (0);
810 }
811 
812 #if 0
813 static void
814 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
815 	       int enable)
816 {
817 	request_t *req;
818 	struct mpt_raid_action_result *ar;
819 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
820 	int enabled;
821 	int rv;
822 
823 	vol_pg = mpt_vol->config_page;
824 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
825 
826 	/*
827 	 * If the setting matches the configuration,
828 	 * there is nothing to do.
829 	 */
830 	if ((enabled && enable)
831 	 || (!enabled && !enable))
832 		return;
833 
834 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
835 	if (req == NULL) {
836 		mpt_vol_prt(mpt, mpt_vol,
837 			    "mpt_enable_vol: Get request failed!\n");
838 		return;
839 	}
840 
841 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
842 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
843 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
844 				/*data*/0, /*addr*/0, /*len*/0,
845 				/*write*/FALSE, /*wait*/TRUE);
846 	if (rv == ETIMEDOUT) {
847 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
848 			    "%s Volume Timed-out\n",
849 			    enable ? "Enable" : "Disable");
850 		return;
851 	}
852 	ar = REQ_TO_RAID_ACTION_RESULT(req);
853 	if (rv != 0
854 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
855 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
856 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
857 			    enable ? "Enable" : "Disable",
858 			    rv, req->IOCStatus, ar->action_status);
859 	}
860 
861 	mpt_free_request(mpt, req);
862 }
863 #endif
864 
865 static void
866 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
867 {
868 	request_t *req;
869 	struct mpt_raid_action_result *ar;
870 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
871 	uint32_t data;
872 	int rv;
873 	int resyncing;
874 	int mwce;
875 
876 	vol_pg = mpt_vol->config_page;
877 	resyncing = vol_pg->VolumeStatus.Flags
878 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
879 	mwce = vol_pg->VolumeSettings.Settings
880 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
881 
882 	/*
883 	 * If the setting matches the configuration,
884 	 * there is nothing to do.
885 	 */
886 	switch (mpt->raid_mwce_setting) {
887 	case MPT_RAID_MWCE_REBUILD_ONLY:
888 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
889 			return;
890 		}
891 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
892 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
893 			/*
894 			 * Wait one more status update to see if
895 			 * resyncing gets enabled.  It gets disabled
896 			 * temporarilly when WCE is changed.
897 			 */
898 			return;
899 		}
900 		break;
901 	case MPT_RAID_MWCE_ON:
902 		if (mwce)
903 			return;
904 		break;
905 	case MPT_RAID_MWCE_OFF:
906 		if (!mwce)
907 			return;
908 		break;
909 	case MPT_RAID_MWCE_NC:
910 		return;
911 	}
912 
913 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
914 	if (req == NULL) {
915 		mpt_vol_prt(mpt, mpt_vol,
916 			    "mpt_verify_mwce: Get request failed!\n");
917 		return;
918 	}
919 
920 	vol_pg->VolumeSettings.Settings ^=
921 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
922 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
923 	vol_pg->VolumeSettings.Settings ^=
924 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
925 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
926 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
927 				data, /*addr*/0, /*len*/0,
928 				/*write*/FALSE, /*wait*/TRUE);
929 	if (rv == ETIMEDOUT) {
930 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
931 			    "Write Cache Enable Timed-out\n");
932 		return;
933 	}
934 	ar = REQ_TO_RAID_ACTION_RESULT(req);
935 	if (rv != 0
936 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
937 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
938 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
939 			    "%d:%x:%x\n", rv, req->IOCStatus,
940 			    ar->action_status);
941 	} else {
942 		vol_pg->VolumeSettings.Settings ^=
943 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
944 	}
945 	mpt_free_request(mpt, req);
946 }
947 
948 static void
949 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
950 {
951 	request_t *req;
952 	struct mpt_raid_action_result *ar;
953 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
954 	u_int prio;
955 	int rv;
956 
957 	vol_pg = mpt_vol->config_page;
958 
959 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
960 		return;
961 
962 	/*
963 	 * If the current RAID resync rate does not
964 	 * match our configured rate, update it.
965 	 */
966 	prio = vol_pg->VolumeSettings.Settings
967 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
968 	if (vol_pg->ResyncRate != 0
969 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
970 
971 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
972 		if (req == NULL) {
973 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
974 				    "Get request failed!\n");
975 			return;
976 		}
977 
978 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
979 					MPI_RAID_ACTION_SET_RESYNC_RATE,
980 					mpt->raid_resync_rate, /*addr*/0,
981 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
982 		if (rv == ETIMEDOUT) {
983 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
984 				    "Resync Rate Setting Timed-out\n");
985 			return;
986 		}
987 
988 		ar = REQ_TO_RAID_ACTION_RESULT(req);
989 		if (rv != 0
990 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
991 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
992 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
993 				    "%d:%x:%x\n", rv, req->IOCStatus,
994 				    ar->action_status);
995 		} else
996 			vol_pg->ResyncRate = mpt->raid_resync_rate;
997 		mpt_free_request(mpt, req);
998 	} else if ((prio && mpt->raid_resync_rate < 128)
999 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1000 		uint32_t data;
1001 
1002 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1003 		if (req == NULL) {
1004 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1005 				    "Get request failed!\n");
1006 			return;
1007 		}
1008 
1009 		vol_pg->VolumeSettings.Settings ^=
1010 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1011 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1012 		vol_pg->VolumeSettings.Settings ^=
1013 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1014 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1015 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1016 					data, /*addr*/0, /*len*/0,
1017 					/*write*/FALSE, /*wait*/TRUE);
1018 		if (rv == ETIMEDOUT) {
1019 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1020 				    "Resync Rate Setting Timed-out\n");
1021 			return;
1022 		}
1023 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1024 		if (rv != 0
1025 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1026 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1027 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1028 				    "%d:%x:%x\n", rv, req->IOCStatus,
1029 				    ar->action_status);
1030 		} else {
1031 			vol_pg->VolumeSettings.Settings ^=
1032 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1033 		}
1034 
1035 		mpt_free_request(mpt, req);
1036 	}
1037 }
1038 
1039 static void
1040 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1041 		       struct cam_path *path)
1042 {
1043 	struct ccb_relsim crs;
1044 
1045 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1046 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1047 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1048 	crs.openings = mpt->raid_queue_depth;
1049 	xpt_action((union ccb *)&crs);
1050 	if (crs.ccb_h.status != CAM_REQ_CMP)
1051 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1052 			    "with CAM status %#x\n", crs.ccb_h.status);
1053 }
1054 
1055 static void
1056 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1057 {
1058 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1059 	u_int i;
1060 
1061 	vol_pg = mpt_vol->config_page;
1062 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1063 	for (i = 1; i <= 0x8000; i <<= 1) {
1064 		switch (vol_pg->VolumeSettings.Settings & i) {
1065 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1066 			mpt_prtc(mpt, " Member-WCE");
1067 			break;
1068 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1069 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1070 			break;
1071 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1072 			mpt_prtc(mpt, " Hot-Plug-Spares");
1073 			break;
1074 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1075 			mpt_prtc(mpt, " High-Priority-ReSync");
1076 			break;
1077 		default:
1078 			break;
1079 		}
1080 	}
1081 	mpt_prtc(mpt, " )\n");
1082 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1083 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1084 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1085 			  ? ":" : "s:");
1086 		for (i = 0; i < 8; i++) {
1087 			u_int mask;
1088 
1089 			mask = 0x1 << i;
1090 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1091 				continue;
1092 			mpt_prtc(mpt, " %d", i);
1093 		}
1094 		mpt_prtc(mpt, "\n");
1095 	}
1096 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1097 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1098 		struct mpt_raid_disk *mpt_disk;
1099 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1100 
1101 		mpt_disk = mpt->raid_disks
1102 			 + vol_pg->PhysDisk[i].PhysDiskNum;
1103 		disk_pg = &mpt_disk->config_page;
1104 		mpt_prtc(mpt, "      ");
1105 		mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1106 			 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1107 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1108 			mpt_prtc(mpt, "%s\n",
1109 				 mpt_disk->member_number == 0
1110 			       ? "Primary" : "Secondary");
1111 		else
1112 			mpt_prtc(mpt, "Stripe Position %d\n",
1113 				 mpt_disk->member_number);
1114 	}
1115 }
1116 
1117 static void
1118 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1119 {
1120 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1121 	u_int i;
1122 
1123 	disk_pg = &mpt_disk->config_page;
1124 	mpt_disk_prt(mpt, mpt_disk,
1125 		     "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1126 		     device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1127 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1128 		     /*bus*/1, mpt_disk - mpt->raid_disks);
1129 
1130 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1131 		return;
1132 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1133 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1134 		   ? ":" : "s:");
1135 	for (i = 0; i < 8; i++) {
1136 		u_int mask;
1137 
1138 		mask = 0x1 << i;
1139 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1140 			continue;
1141 		mpt_prtc(mpt, " %d", i);
1142 	}
1143 	mpt_prtc(mpt, "\n");
1144 }
1145 
1146 static void
1147 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1148 		      IOC_3_PHYS_DISK *ioc_disk)
1149 {
1150 	int rv;
1151 
1152 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1153 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1154 				 &mpt_disk->config_page.Header,
1155 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1156 	if (rv != 0) {
1157 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1158 			"Failed to read RAID Disk Hdr(%d)\n",
1159 		 	ioc_disk->PhysDiskNum);
1160 		return;
1161 	}
1162 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1163 				   &mpt_disk->config_page.Header,
1164 				   sizeof(mpt_disk->config_page),
1165 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1166 	if (rv != 0)
1167 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1168 			"Failed to read RAID Disk Page(%d)\n",
1169 		 	ioc_disk->PhysDiskNum);
1170 }
1171 
1172 static void
1173 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1174 		     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1175 {
1176 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1177 	struct mpt_raid_action_result *ar;
1178 	request_t *req;
1179 	int rv;
1180 	int i;
1181 
1182 	vol_pg = mpt_vol->config_page;
1183 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1184 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1185 				 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1186 				 &vol_pg->Header, /*sleep_ok*/TRUE,
1187 				 /*timeout_ms*/5000);
1188 	if (rv != 0) {
1189 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1190 			    "Failed to read RAID Vol Hdr(%d)\n",
1191 			    ioc_vol->VolumePageNumber);
1192 		return;
1193 	}
1194 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1195 				   &vol_pg->Header, mpt->raid_page0_len,
1196 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1197 	if (rv != 0) {
1198 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1199 			    "Failed to read RAID Vol Page(%d)\n",
1200 			    ioc_vol->VolumePageNumber);
1201 		return;
1202 	}
1203 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1204 
1205 	/* Update disk entry array data. */
1206 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1207 		struct mpt_raid_disk *mpt_disk;
1208 
1209 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1210 		mpt_disk->volume = mpt_vol;
1211 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1212 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1213 			mpt_disk->member_number--;
1214 	}
1215 
1216 	if ((vol_pg->VolumeStatus.Flags
1217 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1218 		return;
1219 
1220 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1221 	if (req == NULL) {
1222 		mpt_vol_prt(mpt, mpt_vol,
1223 			    "mpt_refresh_raid_vol: Get request failed!\n");
1224 		return;
1225 	}
1226 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1227 				MPI_RAID_ACTION_INDICATOR_STRUCT,
1228 				/*ActionWord*/0, /*addr*/0, /*len*/0,
1229 				/*write*/FALSE, /*wait*/TRUE);
1230 	if (rv == ETIMEDOUT) {
1231 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1232 			    "Progress indicator fetch timedout!\n");
1233 		return;
1234 	}
1235 
1236 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1237 	if (rv == 0
1238 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1239 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1240 		memcpy(&mpt_vol->sync_progress,
1241 		       &ar->action_data.indicator_struct,
1242 		       sizeof(mpt_vol->sync_progress));
1243 	} else {
1244 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1245 			    "Progress indicator fetch failed!\n");
1246 	}
1247 	mpt_free_request(mpt, req);
1248 }
1249 
1250 /*
1251  * Update in-core information about RAID support.  We update any entries
1252  * that didn't previously exists or have been marked as needing to
1253  * be updated by our event handler.  Interesting changes are displayed
1254  * to the console.
1255  */
1256 int
1257 mpt_refresh_raid_data(struct mpt_softc *mpt)
1258 {
1259 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1260 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1261 	IOC_3_PHYS_DISK *ioc_disk;
1262 	IOC_3_PHYS_DISK *ioc_last_disk;
1263 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1264 	size_t len;
1265 	int rv;
1266 	int i;
1267 	u_int nonopt_volumes;
1268 
1269 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1270 		return (0);
1271 	}
1272 
1273 	/*
1274 	 * Mark all items as unreferenced by the configuration.
1275 	 * This allows us to find, report, and discard stale
1276 	 * entries.
1277 	 */
1278 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1279 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1280 	}
1281 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1282 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1283 	}
1284 
1285 	/*
1286 	 * Get Physical Disk information.
1287 	 */
1288 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1289 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1290 				   &mpt->ioc_page3->Header, len,
1291 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1292 	if (rv) {
1293 		mpt_prt(mpt,
1294 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1295 		return (-1);
1296 	}
1297 
1298 	ioc_disk = mpt->ioc_page3->PhysDisk;
1299 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1300 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1301 		struct mpt_raid_disk *mpt_disk;
1302 
1303 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1304 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1305 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1306 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1307 
1308 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1309 
1310 		}
1311 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1312 		mpt->raid_rescan++;
1313 	}
1314 
1315 	/*
1316 	 * Refresh volume data.
1317 	 */
1318 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1319 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1320 				   &mpt->ioc_page2->Header, len,
1321 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1322 	if (rv) {
1323 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1324 			"Failed to read IOC Page 2\n");
1325 		return (-1);
1326 	}
1327 
1328 	ioc_vol = mpt->ioc_page2->RaidVolume;
1329 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1330 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1331 		struct mpt_raid_volume *mpt_vol;
1332 
1333 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1334 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1335 		vol_pg = mpt_vol->config_page;
1336 		if (vol_pg == NULL)
1337 			continue;
1338 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1339 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1340 		 || (vol_pg->VolumeStatus.Flags
1341 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1342 
1343 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1344 		}
1345 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1346 	}
1347 
1348 	nonopt_volumes = 0;
1349 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1350 		struct mpt_raid_volume *mpt_vol;
1351 		uint64_t total;
1352 		uint64_t left;
1353 		int m;
1354 		u_int prio;
1355 
1356 		mpt_vol = &mpt->raid_volumes[i];
1357 
1358 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1359 			continue;
1360 
1361 		vol_pg = mpt_vol->config_page;
1362 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1363 		 == MPT_RVF_ANNOUNCED) {
1364 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1365 			mpt_vol->flags = 0;
1366 			continue;
1367 		}
1368 
1369 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1370 
1371 			mpt_announce_vol(mpt, mpt_vol);
1372 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1373 		}
1374 
1375 		if (vol_pg->VolumeStatus.State !=
1376 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1377 			nonopt_volumes++;
1378 
1379 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1380 			continue;
1381 
1382 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1383 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1384 			    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1385 		mpt_verify_mwce(mpt, mpt_vol);
1386 
1387 		if (vol_pg->VolumeStatus.Flags == 0)
1388 			continue;
1389 
1390 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1391 		for (m = 1; m <= 0x80; m <<= 1) {
1392 			switch (vol_pg->VolumeStatus.Flags & m) {
1393 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1394 				mpt_prtc(mpt, " Enabled");
1395 				break;
1396 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1397 				mpt_prtc(mpt, " Quiesced");
1398 				break;
1399 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1400 				mpt_prtc(mpt, " Re-Syncing");
1401 				break;
1402 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1403 				mpt_prtc(mpt, " Inactive");
1404 				break;
1405 			default:
1406 				break;
1407 			}
1408 		}
1409 		mpt_prtc(mpt, " )\n");
1410 
1411 		if ((vol_pg->VolumeStatus.Flags
1412 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1413 			continue;
1414 
1415 		mpt_verify_resync_rate(mpt, mpt_vol);
1416 
1417 		left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1418 		total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1419 		if (vol_pg->ResyncRate != 0) {
1420 
1421 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1422 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1423 			    prio / 1000, prio % 1000);
1424 		} else {
1425 			prio = vol_pg->VolumeSettings.Settings
1426 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1427 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1428 			    prio ? "High" : "Low");
1429 		}
1430 #if __FreeBSD_version >= 500000
1431 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1432 			    "blocks remaining\n", (uintmax_t)left,
1433 			    (uintmax_t)total);
1434 #else
1435 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1436 			    "blocks remaining\n", (uint64_t)left,
1437 			    (uint64_t)total);
1438 #endif
1439 
1440 		/* Periodically report on sync progress. */
1441 		mpt_schedule_raid_refresh(mpt);
1442 	}
1443 
1444 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1445 		struct mpt_raid_disk *mpt_disk;
1446 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1447 		int m;
1448 
1449 		mpt_disk = &mpt->raid_disks[i];
1450 		disk_pg = &mpt_disk->config_page;
1451 
1452 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1453 			continue;
1454 
1455 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1456 		 == MPT_RDF_ANNOUNCED) {
1457 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1458 			mpt_disk->flags = 0;
1459 			mpt->raid_rescan++;
1460 			continue;
1461 		}
1462 
1463 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1464 
1465 			mpt_announce_disk(mpt, mpt_disk);
1466 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1467 		}
1468 
1469 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1470 			continue;
1471 
1472 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1473 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1474 		if (disk_pg->PhysDiskStatus.Flags == 0)
1475 			continue;
1476 
1477 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1478 		for (m = 1; m <= 0x80; m <<= 1) {
1479 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1480 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1481 				mpt_prtc(mpt, " Out-Of-Sync");
1482 				break;
1483 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1484 				mpt_prtc(mpt, " Quiesced");
1485 				break;
1486 			default:
1487 				break;
1488 			}
1489 		}
1490 		mpt_prtc(mpt, " )\n");
1491 	}
1492 
1493 	mpt->raid_nonopt_volumes = nonopt_volumes;
1494 	return (0);
1495 }
1496 
1497 static void
1498 mpt_raid_timer(void *arg)
1499 {
1500 	struct mpt_softc *mpt;
1501 
1502 	mpt = (struct mpt_softc *)arg;
1503 	MPT_LOCK(mpt);
1504 	mpt_raid_wakeup(mpt);
1505 	MPT_UNLOCK(mpt);
1506 }
1507 
1508 static void
1509 mpt_raid_quiesce_timeout(void *arg)
1510 {
1511 	/* Complete the CCB with error */
1512 	/* COWWWW */
1513 }
1514 
1515 void
1516 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1517 {
1518 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1519 		      mpt_raid_timer, mpt);
1520 }
1521 
1522 void
1523 mpt_raid_free_mem(struct mpt_softc *mpt)
1524 {
1525 
1526 	if (mpt->raid_volumes) {
1527 		struct mpt_raid_volume *mpt_raid;
1528 		int i;
1529 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1530 			mpt_raid = &mpt->raid_volumes[i];
1531 			if (mpt_raid->config_page) {
1532 				free(mpt_raid->config_page, M_DEVBUF);
1533 				mpt_raid->config_page = NULL;
1534 			}
1535 		}
1536 		free(mpt->raid_volumes, M_DEVBUF);
1537 		mpt->raid_volumes = NULL;
1538 	}
1539 	if (mpt->raid_disks) {
1540 		free(mpt->raid_disks, M_DEVBUF);
1541 		mpt->raid_disks = NULL;
1542 	}
1543 	if (mpt->ioc_page2) {
1544 		free(mpt->ioc_page2, M_DEVBUF);
1545 		mpt->ioc_page2 = NULL;
1546 	}
1547 	if (mpt->ioc_page3) {
1548 		free(mpt->ioc_page3, M_DEVBUF);
1549 		mpt->ioc_page3 = NULL;
1550 	}
1551 	mpt->raid_max_volumes =  0;
1552 	mpt->raid_max_disks =  0;
1553 }
1554 
1555 static int
1556 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1557 {
1558 	struct mpt_raid_volume *mpt_vol;
1559 
1560 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1561 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1562 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1563 		return (EINVAL);
1564 
1565 	MPT_LOCK(mpt);
1566 	mpt->raid_resync_rate = rate;
1567 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1568 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1569 			continue;
1570 		}
1571 		mpt_verify_resync_rate(mpt, mpt_vol);
1572 	}
1573 	MPT_UNLOCK(mpt);
1574 	return (0);
1575 }
1576 
1577 static int
1578 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1579 {
1580 	struct mpt_raid_volume *mpt_vol;
1581 
1582 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1583 		return (EINVAL);
1584 
1585 	MPT_LOCK(mpt);
1586 	mpt->raid_queue_depth = vol_queue_depth;
1587 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1588 		struct cam_path *path;
1589 		int error;
1590 
1591 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1592 			continue;
1593 
1594 		mpt->raid_rescan = 0;
1595 
1596 		error = xpt_create_path(&path, xpt_periph,
1597 					cam_sim_path(mpt->sim),
1598 					mpt_vol->config_page->VolumeID,
1599 					/*lun*/0);
1600 		if (error != CAM_REQ_CMP) {
1601 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1602 			continue;
1603 		}
1604 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1605 		xpt_free_path(path);
1606 	}
1607 	MPT_UNLOCK(mpt);
1608 	return (0);
1609 }
1610 
1611 static int
1612 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1613 {
1614 	struct mpt_raid_volume *mpt_vol;
1615 	int force_full_resync;
1616 
1617 	MPT_LOCK(mpt);
1618 	if (mwce == mpt->raid_mwce_setting) {
1619 		MPT_UNLOCK(mpt);
1620 		return (0);
1621 	}
1622 
1623 	/*
1624 	 * Catch MWCE being left on due to a failed shutdown.  Since
1625 	 * sysctls cannot be set by the loader, we treat the first
1626 	 * setting of this varible specially and force a full volume
1627 	 * resync if MWCE is enabled and a resync is in progress.
1628 	 */
1629 	force_full_resync = 0;
1630 	if (mpt->raid_mwce_set == 0
1631 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1632 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1633 		force_full_resync = 1;
1634 
1635 	mpt->raid_mwce_setting = mwce;
1636 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1637 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1638 		int resyncing;
1639 		int mwce;
1640 
1641 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1642 			continue;
1643 
1644 		vol_pg = mpt_vol->config_page;
1645 		resyncing = vol_pg->VolumeStatus.Flags
1646 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1647 		mwce = vol_pg->VolumeSettings.Settings
1648 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1649 		if (force_full_resync && resyncing && mwce) {
1650 
1651 			/*
1652 			 * XXX disable/enable volume should force a resync,
1653 			 *     but we'll need to queice, drain, and restart
1654 			 *     I/O to do that.
1655 			 */
1656 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1657 				    "detected.  Suggest full resync.\n");
1658 		}
1659 		mpt_verify_mwce(mpt, mpt_vol);
1660 	}
1661 	mpt->raid_mwce_set = 1;
1662 	MPT_UNLOCK(mpt);
1663 	return (0);
1664 }
1665 
1666 const char *mpt_vol_mwce_strs[] =
1667 {
1668 	"On",
1669 	"Off",
1670 	"On-During-Rebuild",
1671 	"NC"
1672 };
1673 
1674 static int
1675 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1676 {
1677 	char inbuf[20];
1678 	struct mpt_softc *mpt;
1679 	const char *str;
1680 	int error;
1681 	u_int size;
1682 	u_int i;
1683 
1684 	GIANT_REQUIRED;
1685 
1686 	mpt = (struct mpt_softc *)arg1;
1687 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1688 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1689 	if (error || !req->newptr) {
1690 		return (error);
1691 	}
1692 
1693 	size = req->newlen - req->newidx;
1694 	if (size >= sizeof(inbuf)) {
1695 		return (EINVAL);
1696 	}
1697 
1698 	error = SYSCTL_IN(req, inbuf, size);
1699 	if (error) {
1700 		return (error);
1701 	}
1702 	inbuf[size] = '\0';
1703 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1704 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1705 			return (mpt_raid_set_vol_mwce(mpt, i));
1706 		}
1707 	}
1708 	return (EINVAL);
1709 }
1710 
1711 static int
1712 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1713 {
1714 	struct mpt_softc *mpt;
1715 	u_int raid_resync_rate;
1716 	int error;
1717 
1718 	GIANT_REQUIRED;
1719 
1720 	mpt = (struct mpt_softc *)arg1;
1721 	raid_resync_rate = mpt->raid_resync_rate;
1722 
1723 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1724 	if (error || !req->newptr) {
1725 		return error;
1726 	}
1727 
1728 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1729 }
1730 
1731 static int
1732 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1733 {
1734 	struct mpt_softc *mpt;
1735 	u_int raid_queue_depth;
1736 	int error;
1737 
1738 	GIANT_REQUIRED;
1739 
1740 	mpt = (struct mpt_softc *)arg1;
1741 	raid_queue_depth = mpt->raid_queue_depth;
1742 
1743 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1744 	if (error || !req->newptr) {
1745 		return error;
1746 	}
1747 
1748 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1749 }
1750 
1751 static void
1752 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1753 {
1754 #if __FreeBSD_version >= 500000
1755 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1756 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1757 
1758 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1759 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1760 			mpt_raid_sysctl_vol_member_wce, "A",
1761 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1762 
1763 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1764 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1765 			mpt_raid_sysctl_vol_queue_depth, "I",
1766 			"default volume queue depth");
1767 
1768 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1769 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1770 			mpt_raid_sysctl_vol_resync_rate, "I",
1771 			"volume resync priority (0 == NC, 1 - 255)");
1772 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1773 			"nonoptimal_volumes", CTLFLAG_RD,
1774 			&mpt->raid_nonopt_volumes, 0,
1775 			"number of nonoptimal volumes");
1776 #endif
1777 }
1778