xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 3d11b6c8f01e1fca5936a11d6996448467851a94)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <dev/mpt/mpt.h>
39 #include <dev/mpt/mpt_raid.h>
40 
41 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
42 #include "dev/mpt/mpilib/mpi_raid.h"
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
48 
49 #if __FreeBSD_version < 500000
50 #include <sys/devicestat.h>
51 #define	GIANT_REQUIRED
52 #endif
53 #include <cam/cam_periph.h>
54 
55 #include <sys/callout.h>
56 #include <sys/kthread.h>
57 #include <sys/sysctl.h>
58 
59 #include <machine/stdarg.h>
60 
61 struct mpt_raid_action_result
62 {
63 	union {
64 		MPI_RAID_VOL_INDICATOR	indicator_struct;
65 		uint32_t		new_settings;
66 		uint8_t			phys_disk_num;
67 	} action_data;
68 	uint16_t			action_status;
69 };
70 
71 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
72 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
73 
74 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
75 
76 
77 static mpt_probe_handler_t	mpt_raid_probe;
78 static mpt_attach_handler_t	mpt_raid_attach;
79 static mpt_event_handler_t	mpt_raid_event;
80 static mpt_shutdown_handler_t	mpt_raid_shutdown;
81 static mpt_reset_handler_t	mpt_raid_ioc_reset;
82 static mpt_detach_handler_t	mpt_raid_detach;
83 
84 static struct mpt_personality mpt_raid_personality =
85 {
86 	.name		= "mpt_raid",
87 	.probe		= mpt_raid_probe,
88 	.attach		= mpt_raid_attach,
89 	.event		= mpt_raid_event,
90 	.reset		= mpt_raid_ioc_reset,
91 	.shutdown	= mpt_raid_shutdown,
92 	.detach		= mpt_raid_detach,
93 };
94 
95 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
96 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
97 
98 static mpt_reply_handler_t mpt_raid_reply_handler;
99 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
100 					MSG_DEFAULT_REPLY *reply_frame);
101 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
102 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
103 static void mpt_raid_thread(void *arg);
104 static timeout_t mpt_raid_timer;
105 static timeout_t mpt_raid_quiesce_timeout;
106 #if 0
107 static void mpt_enable_vol(struct mpt_softc *mpt,
108 			   struct mpt_raid_volume *mpt_vol, int enable);
109 #endif
110 static void mpt_verify_mwce(struct mpt_softc *mpt,
111 			    struct mpt_raid_volume *mpt_vol);
112 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
113 				   struct mpt_raid_volume *mpt_vol,
114 				   struct cam_path *path);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
116 
117 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
118 
119 const char *
120 mpt_vol_type(struct mpt_raid_volume *vol)
121 {
122 	switch (vol->config_page->VolumeType) {
123 	case MPI_RAID_VOL_TYPE_IS:
124 		return ("RAID-0");
125 	case MPI_RAID_VOL_TYPE_IME:
126 		return ("RAID-1E");
127 	case MPI_RAID_VOL_TYPE_IM:
128 		return ("RAID-1");
129 	default:
130 		return ("Unknown");
131 	}
132 }
133 
134 const char *
135 mpt_vol_state(struct mpt_raid_volume *vol)
136 {
137 	switch (vol->config_page->VolumeStatus.State) {
138 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
139 		return ("Optimal");
140 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
141 		return ("Degraded");
142 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
143 		return ("Failed");
144 	default:
145 		return ("Unknown");
146 	}
147 }
148 
149 const char *
150 mpt_disk_state(struct mpt_raid_disk *disk)
151 {
152 	switch (disk->config_page.PhysDiskStatus.State) {
153 	case MPI_PHYSDISK0_STATUS_ONLINE:
154 		return ("Online");
155 	case MPI_PHYSDISK0_STATUS_MISSING:
156 		return ("Missing");
157 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
158 		return ("Incompatible");
159 	case MPI_PHYSDISK0_STATUS_FAILED:
160 		return ("Failed");
161 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
162 		return ("Initializing");
163 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
164 		return ("Offline Requested");
165 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
166 		return ("Failed per Host Request");
167 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
168 		return ("Offline");
169 	default:
170 		return ("Unknown");
171 	}
172 }
173 
174 void
175 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
176 	    const char *fmt, ...)
177 {
178 	va_list ap;
179 
180 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
181 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
182 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
183 	va_start(ap, fmt);
184 	vprintf(fmt, ap);
185 	va_end(ap);
186 }
187 
188 void
189 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
190 	     const char *fmt, ...)
191 {
192 	va_list ap;
193 
194 	if (disk->volume != NULL) {
195 		printf("(%s:vol%d:%d): ",
196 		       device_get_nameunit(mpt->dev),
197 		       disk->volume->config_page->VolumeID,
198 		       disk->member_number);
199 	} else {
200 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
201 		       disk->config_page.PhysDiskBus,
202 		       disk->config_page.PhysDiskID);
203 	}
204 	va_start(ap, fmt);
205 	vprintf(fmt, ap);
206 	va_end(ap);
207 }
208 
209 static void
210 mpt_raid_async(void *callback_arg, u_int32_t code,
211 	       struct cam_path *path, void *arg)
212 {
213 	struct mpt_softc *mpt;
214 
215 	mpt = (struct mpt_softc*)callback_arg;
216 	switch (code) {
217 	case AC_FOUND_DEVICE:
218 	{
219 		struct ccb_getdev *cgd;
220 		struct mpt_raid_volume *mpt_vol;
221 
222 		cgd = (struct ccb_getdev *)arg;
223 		if (cgd == NULL)
224 			break;
225 
226 		mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
227 			 cgd->ccb_h.target_id);
228 
229 		RAID_VOL_FOREACH(mpt, mpt_vol) {
230 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
231 				continue;
232 
233 			if (mpt_vol->config_page->VolumeID
234 			 == cgd->ccb_h.target_id) {
235 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
236 				break;
237 			}
238 		}
239 	}
240 	default:
241 		break;
242 	}
243 }
244 
245 int
246 mpt_raid_probe(struct mpt_softc *mpt)
247 {
248 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
249 		return (ENODEV);
250 	}
251 	return (0);
252 }
253 
254 int
255 mpt_raid_attach(struct mpt_softc *mpt)
256 {
257 	struct ccb_setasync csa;
258 	mpt_handler_t	 handler;
259 	int		 error;
260 
261 	mpt_callout_init(&mpt->raid_timer);
262 
263 	handler.reply_handler = mpt_raid_reply_handler;
264 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
265 				     &raid_handler_id);
266 	if (error != 0)
267 		goto cleanup;
268 
269 	error = mpt_spawn_raid_thread(mpt);
270 	if (error != 0) {
271 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
272 		goto cleanup;
273 	}
274 
275 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
276 	csa.ccb_h.func_code = XPT_SASYNC_CB;
277 	csa.event_enable = AC_FOUND_DEVICE;
278 	csa.callback = mpt_raid_async;
279 	csa.callback_arg = mpt;
280 	xpt_action((union ccb *)&csa);
281 	if (csa.ccb_h.status != CAM_REQ_CMP) {
282 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
283 			"CAM async handler.\n");
284 	}
285 
286 	mpt_raid_sysctl_attach(mpt);
287 	return (0);
288 cleanup:
289 	mpt_raid_detach(mpt);
290 	return (error);
291 }
292 
293 void
294 mpt_raid_detach(struct mpt_softc *mpt)
295 {
296 	struct ccb_setasync csa;
297 	mpt_handler_t handler;
298 
299 	callout_stop(&mpt->raid_timer);
300 	mpt_terminate_raid_thread(mpt);
301 
302 	handler.reply_handler = mpt_raid_reply_handler;
303 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
304 			       raid_handler_id);
305 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
306 	csa.ccb_h.func_code = XPT_SASYNC_CB;
307 	csa.event_enable = 0;
308 	csa.callback = mpt_raid_async;
309 	csa.callback_arg = mpt;
310 	xpt_action((union ccb *)&csa);
311 }
312 
313 static void
314 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
315 {
316 	/* Nothing to do yet. */
317 }
318 
319 static const char *raid_event_txt[] =
320 {
321 	"Volume Created",
322 	"Volume Deleted",
323 	"Volume Settings Changed",
324 	"Volume Status Changed",
325 	"Volume Physical Disk Membership Changed",
326 	"Physical Disk Created",
327 	"Physical Disk Deleted",
328 	"Physical Disk Settings Changed",
329 	"Physical Disk Status Changed",
330 	"Domain Validation Required",
331 	"SMART Data Received",
332 	"Replace Action Started",
333 };
334 
335 static int
336 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
337 	       MSG_EVENT_NOTIFY_REPLY *msg)
338 {
339 	EVENT_DATA_RAID *raid_event;
340 	struct mpt_raid_volume *mpt_vol;
341 	struct mpt_raid_disk *mpt_disk;
342 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
343 	int i;
344 	int print_event;
345 
346 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID)
347 		return (/*handled*/0);
348 
349 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
350 
351 	mpt_vol = NULL;
352 	vol_pg = NULL;
353 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
354 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
355 			mpt_vol = &mpt->raid_volumes[i];
356 			vol_pg = mpt_vol->config_page;
357 
358 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
359 				continue;
360 
361 			if (vol_pg->VolumeID == raid_event->VolumeID
362 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
363 				break;
364 		}
365 		if (i >= mpt->ioc_page2->MaxVolumes) {
366 			mpt_vol = NULL;
367 			vol_pg = NULL;
368 		}
369 	}
370 
371 	mpt_disk = NULL;
372 	if (raid_event->PhysDiskNum != 0xFF
373 	 && mpt->raid_disks != NULL) {
374 		mpt_disk = mpt->raid_disks
375 			 + raid_event->PhysDiskNum;
376 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
377 			mpt_disk = NULL;
378 	}
379 
380 	print_event = 1;
381 	switch(raid_event->ReasonCode) {
382 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
383 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
384 		break;
385 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
386 		if (mpt_vol != NULL) {
387 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
388 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
389 			} else {
390 				/*
391 				 * Coalesce status messages into one
392 				 * per background run of our RAID thread.
393 				 * This removes "spurious" status messages
394 				 * from our output.
395 				 */
396 				print_event = 0;
397 			}
398 		}
399 		break;
400 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
401 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
402 		mpt->raid_rescan++;
403 		if (mpt_vol != NULL)
404 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
405 		break;
406 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
407 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
408 		mpt->raid_rescan++;
409 		break;
410 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
411 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
412 		mpt->raid_rescan++;
413 		if (mpt_disk != NULL)
414 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
415 		break;
416 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
417 		mpt->raid_rescan++;
418 		break;
419 	case MPI_EVENT_RAID_RC_SMART_DATA:
420 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
421 		break;
422 	}
423 
424 	if (print_event) {
425 		if (mpt_disk != NULL) {
426 			mpt_disk_prt(mpt, mpt_disk, "");
427 		} else if (mpt_vol != NULL) {
428 			mpt_vol_prt(mpt, mpt_vol, "");
429 		} else {
430 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
431 				raid_event->VolumeID);
432 
433 			if (raid_event->PhysDiskNum != 0xFF)
434 				mpt_prtc(mpt, ":%d): ",
435 					 raid_event->PhysDiskNum);
436 			else
437 				mpt_prtc(mpt, "): ");
438 		}
439 
440 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
441 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
442 				 raid_event->ReasonCode);
443 		else
444 			mpt_prtc(mpt, "%s\n",
445 				 raid_event_txt[raid_event->ReasonCode]);
446 	}
447 
448 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
449 		/* XXX Use CAM's print sense for this... */
450 		if (mpt_disk != NULL)
451 			mpt_disk_prt(mpt, mpt_disk, "");
452 		else
453 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
454 			    raid_event->VolumeBus, raid_event->VolumeID,
455 			    raid_event->PhysDiskNum);
456 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
457 			 raid_event->ASC, raid_event->ASCQ);
458 	}
459 
460 	mpt_raid_wakeup(mpt);
461 	return (/*handled*/1);
462 }
463 
464 static void
465 mpt_raid_shutdown(struct mpt_softc *mpt)
466 {
467 	struct mpt_raid_volume *mpt_vol;
468 
469 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
470 		return;
471 	}
472 
473 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
474 	RAID_VOL_FOREACH(mpt, mpt_vol) {
475 		mpt_verify_mwce(mpt, mpt_vol);
476 	}
477 }
478 
479 static int
480 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
481     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
482 {
483 	int free_req;
484 
485 	if (req == NULL)
486 		return (/*free_reply*/TRUE);
487 
488 	free_req = TRUE;
489 	if (reply_frame != NULL)
490 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
491 #ifdef NOTYET
492 	else if (req->ccb != NULL) {
493 		/* Complete Quiesce CCB with error... */
494 	}
495 #endif
496 
497 	req->state &= ~REQ_STATE_QUEUED;
498 	req->state |= REQ_STATE_DONE;
499 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
500 
501 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
502 		wakeup(req);
503 	} else if (free_req) {
504 		mpt_free_request(mpt, req);
505 	}
506 
507 	return (/*free_reply*/TRUE);
508 }
509 
510 /*
511  * Parse additional completion information in the reply
512  * frame for RAID I/O requests.
513  */
514 static int
515 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
516 			     MSG_DEFAULT_REPLY *reply_frame)
517 {
518 	MSG_RAID_ACTION_REPLY *reply;
519 	struct mpt_raid_action_result *action_result;
520 	MSG_RAID_ACTION_REQUEST *rap;
521 
522 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
523 	req->IOCStatus = le16toh(reply->IOCStatus);
524 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
525 
526 	switch (rap->Action) {
527 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
528 		/*
529 		 * Parse result, call mpt_start with ccb,
530 		 * release device queue.
531 		 * COWWWWW
532 		 */
533 		break;
534 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
535 		/*
536 		 * Need additional state for transition to enabled to
537 		 * protect against attempts to disable??
538 		 */
539 		break;
540 	default:
541 		action_result = REQ_TO_RAID_ACTION_RESULT(req);
542 		memcpy(&action_result->action_data, &reply->ActionData,
543 		       sizeof(action_result->action_data));
544 		action_result->action_status = reply->ActionStatus;
545 		break;
546 	}
547 
548 	return (/*Free Request*/TRUE);
549 }
550 
551 /*
552  * Utiltity routine to perform a RAID action command;
553  */
554 int
555 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
556 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
557 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
558 		   int write, int wait)
559 {
560 	MSG_RAID_ACTION_REQUEST *rap;
561 	SGE_SIMPLE32 *se;
562 
563 	rap = req->req_vbuf;
564 	memset(rap, 0, sizeof *rap);
565 	rap->Action = Action;
566 	rap->ActionDataWord = ActionDataWord;
567 	rap->Function = MPI_FUNCTION_RAID_ACTION;
568 	rap->VolumeID = vol->config_page->VolumeID;
569 	rap->VolumeBus = vol->config_page->VolumeBus;
570 	if (disk != 0)
571 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
572 	else
573 		rap->PhysDiskNum = 0xFF;
574 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
575 	se->Address = addr;
576 	MPI_pSGE_SET_LENGTH(se, len);
577 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
578 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
579 	    MPI_SGE_FLAGS_END_OF_LIST |
580 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
581 	rap->MsgContext = htole32(req->index | raid_handler_id);
582 
583 	mpt_check_doorbell(mpt);
584 	mpt_send_cmd(mpt, req);
585 
586 	if (wait) {
587 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
588 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
589 	} else {
590 		return (0);
591 	}
592 }
593 
594 /*************************** RAID Status Monitoring ***************************/
595 static int
596 mpt_spawn_raid_thread(struct mpt_softc *mpt)
597 {
598 	int error;
599 
600 	/*
601 	 * Freeze out any CAM transactions until our thread
602 	 * is able to run at least once.  We need to update
603 	 * our RAID pages before acception I/O or we may
604 	 * reject I/O to an ID we later determine is for a
605 	 * hidden physdisk.
606 	 */
607 	xpt_freeze_simq(mpt->phydisk_sim, 1);
608 	error = mpt_kthread_create(mpt_raid_thread, mpt,
609 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
610 	    "mpt_raid%d", mpt->unit);
611 	if (error != 0)
612 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
613 	return (error);
614 }
615 
616 static void
617 mpt_terminate_raid_thread(struct mpt_softc *mpt)
618 {
619 
620 	if (mpt->raid_thread == NULL) {
621 		return;
622 	}
623 	mpt->shutdwn_raid = 1;
624 	wakeup(mpt->raid_volumes);
625 	/*
626 	 * Sleep on a slightly different location
627 	 * for this interlock just for added safety.
628 	 */
629 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
630 }
631 
632 static void
633 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
634 {
635 	xpt_free_path(ccb->ccb_h.path);
636 	free(ccb, M_DEVBUF);
637 }
638 
639 static void
640 mpt_raid_thread(void *arg)
641 {
642 	struct mpt_softc *mpt;
643 	int firstrun;
644 
645 #if __FreeBSD_version >= 500000
646 	mtx_lock(&Giant);
647 #endif
648 	mpt = (struct mpt_softc *)arg;
649 	firstrun = 1;
650 	MPT_LOCK(mpt);
651 	while (mpt->shutdwn_raid == 0) {
652 
653 		if (mpt->raid_wakeup == 0) {
654 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
655 			continue;
656 		}
657 
658 		mpt->raid_wakeup = 0;
659 
660 		mpt_refresh_raid_data(mpt);
661 
662 		/*
663 		 * Now that we have our first snapshot of RAID data,
664 		 * allow CAM to access our physical disk bus.
665 		 */
666 		if (firstrun) {
667 			firstrun = 0;
668 			xpt_release_simq(mpt->phydisk_sim, /*run_queue*/TRUE);
669 		}
670 
671 		if (mpt->raid_rescan != 0) {
672 			union ccb *ccb;
673 			struct cam_path *path;
674 			int error;
675 
676 			mpt->raid_rescan = 0;
677 
678 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
679 			error = xpt_create_path(&path, xpt_periph,
680 						cam_sim_path(mpt->phydisk_sim),
681 						CAM_TARGET_WILDCARD,
682 						CAM_LUN_WILDCARD);
683 			if (error != CAM_REQ_CMP) {
684 				free(ccb, M_DEVBUF);
685 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
686 			} else {
687 				xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/5);
688 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
689 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
690 				ccb->crcn.flags = CAM_FLAG_NONE;
691 				xpt_action(ccb);
692 			}
693 		}
694 	}
695 	mpt->raid_thread = NULL;
696 	wakeup(&mpt->raid_thread);
697 	MPT_UNLOCK(mpt);
698 #if __FreeBSD_version >= 500000
699 	mtx_unlock(&Giant);
700 #endif
701 	kthread_exit(0);
702 }
703 
704 cam_status
705 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
706 		      request_t *req)
707 {
708 	union ccb *ccb;
709 
710 	ccb = req->ccb;
711 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
712 		return (CAM_REQ_CMP);
713 
714 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
715 		int rv;
716 
717 		mpt_disk->flags |= MPT_RDF_QUIESCING;
718 		xpt_freeze_devq(ccb->ccb_h.path, 1);
719 
720 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
721 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
722 					/*ActionData*/0, /*addr*/0,
723 					/*len*/0, /*write*/FALSE,
724 					/*wait*/FALSE);
725 		if (rv != 0)
726 			return (CAM_REQ_CMP_ERR);
727 
728 		ccb->ccb_h.timeout_ch =
729 			timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
730 #if 0
731 		if (rv == ETIMEDOUT) {
732 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
733 				     "Quiece Timed-out\n");
734 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
735 			return (CAM_REQ_CMP_ERR);
736 		}
737 
738 		ar = REQ_TO_RAID_ACTION_RESULT(req);
739 		if (rv != 0
740 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
741 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
742 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
743 				    "%d:%x:%x\n", rv, req->IOCStatus,
744 				    ar->action_status);
745 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
746 			return (CAM_REQ_CMP_ERR);
747 		}
748 #endif
749 		return (CAM_REQ_INPROG);
750 	}
751 	return (CAM_REQUEUE_REQ);
752 }
753 
754 /* XXX Ignores that there may be multiple busses/IOCs involved. */
755 cam_status
756 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
757 {
758 	struct mpt_raid_disk *mpt_disk;
759 
760 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
761 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
762 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
763 
764 		*tgt = mpt_disk->config_page.PhysDiskID;
765 		return (0);
766 	}
767 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_map_physdisk(%d) - Not Active\n",
768 		 ccb->ccb_h.target_id);
769 	return (-1);
770 }
771 
772 #if 0
773 static void
774 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
775 	       int enable)
776 {
777 	request_t *req;
778 	struct mpt_raid_action_result *ar;
779 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
780 	int enabled;
781 	int rv;
782 
783 	vol_pg = mpt_vol->config_page;
784 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
785 
786 	/*
787 	 * If the setting matches the configuration,
788 	 * there is nothing to do.
789 	 */
790 	if ((enabled && enable)
791 	 || (!enabled && !enable))
792 		return;
793 
794 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
795 	if (req == NULL) {
796 		mpt_vol_prt(mpt, mpt_vol,
797 			    "mpt_enable_vol: Get request failed!\n");
798 		return;
799 	}
800 
801 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
802 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
803 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
804 				/*data*/0, /*addr*/0, /*len*/0,
805 				/*write*/FALSE, /*wait*/TRUE);
806 	if (rv == ETIMEDOUT) {
807 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
808 			    "%s Volume Timed-out\n",
809 			    enable ? "Enable" : "Disable");
810 		return;
811 	}
812 	ar = REQ_TO_RAID_ACTION_RESULT(req);
813 	if (rv != 0
814 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
815 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
816 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
817 			    enable ? "Enable" : "Disable",
818 			    rv, req->IOCStatus, ar->action_status);
819 	}
820 
821 	mpt_free_request(mpt, req);
822 }
823 #endif
824 
825 static void
826 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
827 {
828 	request_t *req;
829 	struct mpt_raid_action_result *ar;
830 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
831 	uint32_t data;
832 	int rv;
833 	int resyncing;
834 	int mwce;
835 
836 	vol_pg = mpt_vol->config_page;
837 	resyncing = vol_pg->VolumeStatus.Flags
838 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
839 	mwce = vol_pg->VolumeSettings.Settings
840 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
841 
842 	/*
843 	 * If the setting matches the configuration,
844 	 * there is nothing to do.
845 	 */
846 	switch (mpt->raid_mwce_setting) {
847 	case MPT_RAID_MWCE_REBUILD_ONLY:
848 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
849 			return;
850 		}
851 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
852 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
853 			/*
854 			 * Wait one more status update to see if
855 			 * resyncing gets enabled.  It gets disabled
856 			 * temporarilly when WCE is changed.
857 			 */
858 			return;
859 		}
860 		break;
861 	case MPT_RAID_MWCE_ON:
862 		if (mwce)
863 			return;
864 		break;
865 	case MPT_RAID_MWCE_OFF:
866 		if (!mwce)
867 			return;
868 		break;
869 	case MPT_RAID_MWCE_NC:
870 		return;
871 	}
872 
873 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
874 	if (req == NULL) {
875 		mpt_vol_prt(mpt, mpt_vol,
876 			    "mpt_verify_mwce: Get request failed!\n");
877 		return;
878 	}
879 
880 	vol_pg->VolumeSettings.Settings ^=
881 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
882 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
883 	vol_pg->VolumeSettings.Settings ^=
884 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
885 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
886 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
887 				data, /*addr*/0, /*len*/0,
888 				/*write*/FALSE, /*wait*/TRUE);
889 	if (rv == ETIMEDOUT) {
890 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
891 			    "Write Cache Enable Timed-out\n");
892 		return;
893 	}
894 	ar = REQ_TO_RAID_ACTION_RESULT(req);
895 	if (rv != 0
896 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
897 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
898 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
899 			    "%d:%x:%x\n", rv, req->IOCStatus,
900 			    ar->action_status);
901 	} else {
902 		vol_pg->VolumeSettings.Settings ^=
903 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
904 	}
905 	mpt_free_request(mpt, req);
906 }
907 
908 static void
909 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
910 {
911 	request_t *req;
912 	struct mpt_raid_action_result *ar;
913 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
914 	u_int prio;
915 	int rv;
916 
917 	vol_pg = mpt_vol->config_page;
918 
919 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
920 		return;
921 
922 	/*
923 	 * If the current RAID resync rate does not
924 	 * match our configured rate, update it.
925 	 */
926 	prio = vol_pg->VolumeSettings.Settings
927 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
928 	if (vol_pg->ResyncRate != 0
929 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
930 
931 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
932 		if (req == NULL) {
933 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
934 				    "Get request failed!\n");
935 			return;
936 		}
937 
938 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
939 					MPI_RAID_ACTION_SET_RESYNC_RATE,
940 					mpt->raid_resync_rate, /*addr*/0,
941 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
942 		if (rv == ETIMEDOUT) {
943 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
944 				    "Resync Rate Setting Timed-out\n");
945 			return;
946 		}
947 
948 		ar = REQ_TO_RAID_ACTION_RESULT(req);
949 		if (rv != 0
950 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
951 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
952 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
953 				    "%d:%x:%x\n", rv, req->IOCStatus,
954 				    ar->action_status);
955 		} else
956 			vol_pg->ResyncRate = mpt->raid_resync_rate;
957 		mpt_free_request(mpt, req);
958 	} else if ((prio && mpt->raid_resync_rate < 128)
959 		|| (!prio && mpt->raid_resync_rate >= 128)) {
960 		uint32_t data;
961 
962 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
963 		if (req == NULL) {
964 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
965 				    "Get request failed!\n");
966 			return;
967 		}
968 
969 		vol_pg->VolumeSettings.Settings ^=
970 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
971 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
972 		vol_pg->VolumeSettings.Settings ^=
973 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
974 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
975 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
976 					data, /*addr*/0, /*len*/0,
977 					/*write*/FALSE, /*wait*/TRUE);
978 		if (rv == ETIMEDOUT) {
979 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
980 				    "Resync Rate Setting Timed-out\n");
981 			return;
982 		}
983 		ar = REQ_TO_RAID_ACTION_RESULT(req);
984 		if (rv != 0
985 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
986 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
987 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
988 				    "%d:%x:%x\n", rv, req->IOCStatus,
989 				    ar->action_status);
990 		} else {
991 			vol_pg->VolumeSettings.Settings ^=
992 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
993 		}
994 
995 		mpt_free_request(mpt, req);
996 	}
997 }
998 
999 static void
1000 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1001 		       struct cam_path *path)
1002 {
1003 	struct ccb_relsim crs;
1004 
1005 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1006 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1007 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1008 	crs.openings = mpt->raid_queue_depth;
1009 	xpt_action((union ccb *)&crs);
1010 	if (crs.ccb_h.status != CAM_REQ_CMP)
1011 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1012 			    "with CAM status %#x\n", crs.ccb_h.status);
1013 }
1014 
1015 static void
1016 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1017 {
1018 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1019 	u_int i;
1020 
1021 	vol_pg = mpt_vol->config_page;
1022 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1023 	for (i = 1; i <= 0x8000; i <<= 1) {
1024 		switch (vol_pg->VolumeSettings.Settings & i) {
1025 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1026 			mpt_prtc(mpt, " Member-WCE");
1027 			break;
1028 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1029 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1030 			break;
1031 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1032 			mpt_prtc(mpt, " Hot-Plug-Spares");
1033 			break;
1034 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1035 			mpt_prtc(mpt, " High-Priority-ReSync");
1036 			break;
1037 		default:
1038 			break;
1039 		}
1040 	}
1041 	mpt_prtc(mpt, " )\n");
1042 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1043 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1044 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1045 			  ? ":" : "s:");
1046 		for (i = 0; i < 8; i++) {
1047 			u_int mask;
1048 
1049 			mask = 0x1 << i;
1050 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1051 				continue;
1052 			mpt_prtc(mpt, " %d", i);
1053 		}
1054 		mpt_prtc(mpt, "\n");
1055 	}
1056 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1057 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1058 		struct mpt_raid_disk *mpt_disk;
1059 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1060 
1061 		mpt_disk = mpt->raid_disks
1062 			 + vol_pg->PhysDisk[i].PhysDiskNum;
1063 		disk_pg = &mpt_disk->config_page;
1064 		mpt_prtc(mpt, "      ");
1065 		mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1066 			 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1067 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1068 			mpt_prtc(mpt, "%s\n",
1069 				 mpt_disk->member_number == 0
1070 			       ? "Primary" : "Secondary");
1071 		else
1072 			mpt_prtc(mpt, "Stripe Position %d\n",
1073 				 mpt_disk->member_number);
1074 	}
1075 }
1076 
1077 static void
1078 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1079 {
1080 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1081 	u_int i;
1082 
1083 	disk_pg = &mpt_disk->config_page;
1084 	mpt_disk_prt(mpt, mpt_disk,
1085 		     "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1086 		     device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1087 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1088 		     /*bus*/1, mpt_disk - mpt->raid_disks);
1089 
1090 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1091 		return;
1092 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1093 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1094 		   ? ":" : "s:");
1095 	for (i = 0; i < 8; i++) {
1096 		u_int mask;
1097 
1098 		mask = 0x1 << i;
1099 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1100 			continue;
1101 		mpt_prtc(mpt, " %d", i);
1102 	}
1103 	mpt_prtc(mpt, "\n");
1104 }
1105 
1106 static void
1107 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1108 		      IOC_3_PHYS_DISK *ioc_disk)
1109 {
1110 	int rv;
1111 
1112 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1113 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1114 				 &mpt_disk->config_page.Header,
1115 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1116 	if (rv != 0) {
1117 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1118 			"Failed to read RAID Disk Hdr(%d)\n",
1119 		 	ioc_disk->PhysDiskNum);
1120 		return;
1121 	}
1122 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1123 				   &mpt_disk->config_page.Header,
1124 				   sizeof(mpt_disk->config_page),
1125 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1126 	if (rv != 0)
1127 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1128 			"Failed to read RAID Disk Page(%d)\n",
1129 		 	ioc_disk->PhysDiskNum);
1130 }
1131 
1132 static void
1133 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1134 		     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1135 {
1136 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1137 	struct mpt_raid_action_result *ar;
1138 	request_t *req;
1139 	int rv;
1140 	int i;
1141 
1142 	vol_pg = mpt_vol->config_page;
1143 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1144 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1145 				 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1146 				 &vol_pg->Header, /*sleep_ok*/TRUE,
1147 				 /*timeout_ms*/5000);
1148 	if (rv != 0) {
1149 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1150 			    "Failed to read RAID Vol Hdr(%d)\n",
1151 			    ioc_vol->VolumePageNumber);
1152 		return;
1153 	}
1154 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1155 				   &vol_pg->Header, mpt->raid_page0_len,
1156 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1157 	if (rv != 0) {
1158 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1159 			    "Failed to read RAID Vol Page(%d)\n",
1160 			    ioc_vol->VolumePageNumber);
1161 		return;
1162 	}
1163 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1164 
1165 	/* Update disk entry array data. */
1166 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1167 		struct mpt_raid_disk *mpt_disk;
1168 
1169 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1170 		mpt_disk->volume = mpt_vol;
1171 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1172 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1173 			mpt_disk->member_number--;
1174 	}
1175 
1176 	if ((vol_pg->VolumeStatus.Flags
1177 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1178 		return;
1179 
1180 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1181 	if (req == NULL) {
1182 		mpt_vol_prt(mpt, mpt_vol,
1183 			    "mpt_refresh_raid_vol: Get request failed!\n");
1184 		return;
1185 	}
1186 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1187 				MPI_RAID_ACTION_INDICATOR_STRUCT,
1188 				/*ActionWord*/0, /*addr*/0, /*len*/0,
1189 				/*write*/FALSE, /*wait*/TRUE);
1190 	if (rv == ETIMEDOUT) {
1191 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1192 			    "Progress indicator fetch timedout!\n");
1193 		return;
1194 	}
1195 
1196 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1197 	if (rv == 0
1198 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1199 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1200 		memcpy(&mpt_vol->sync_progress,
1201 		       &ar->action_data.indicator_struct,
1202 		       sizeof(mpt_vol->sync_progress));
1203 	} else {
1204 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1205 			    "Progress indicator fetch failed!\n");
1206 	}
1207 	mpt_free_request(mpt, req);
1208 }
1209 
1210 /*
1211  * Update in-core information about RAID support.  We update any entries
1212  * that didn't previously exists or have been marked as needing to
1213  * be updated by our event handler.  Interesting changes are displayed
1214  * to the console.
1215  */
1216 void
1217 mpt_refresh_raid_data(struct mpt_softc *mpt)
1218 {
1219 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1220 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1221 	IOC_3_PHYS_DISK *ioc_disk;
1222 	IOC_3_PHYS_DISK *ioc_last_disk;
1223 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1224 	size_t len;
1225 	int rv;
1226 	int i;
1227 	u_int nonopt_volumes;
1228 
1229 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1230 		return;
1231 	}
1232 
1233 	/*
1234 	 * Mark all items as unreferenced by the configuration.
1235 	 * This allows us to find, report, and discard stale
1236 	 * entries.
1237 	 */
1238 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1239 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1240 	}
1241 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1242 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1243 	}
1244 
1245 	/*
1246 	 * Get Physical Disk information.
1247 	 */
1248 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1249 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1250 				   &mpt->ioc_page3->Header, len,
1251 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1252 	if (rv) {
1253 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1254 			"Failed to read IOC Page 3\n");
1255 		return;
1256 	}
1257 
1258 	ioc_disk = mpt->ioc_page3->PhysDisk;
1259 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1260 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1261 		struct mpt_raid_disk *mpt_disk;
1262 
1263 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1264 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1265 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1266 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1267 
1268 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1269 
1270 		}
1271 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1272 		mpt->raid_rescan++;
1273 	}
1274 
1275 	/*
1276 	 * Refresh volume data.
1277 	 */
1278 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1279 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1280 				   &mpt->ioc_page2->Header, len,
1281 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1282 	if (rv) {
1283 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1284 			"Failed to read IOC Page 2\n");
1285 		return;
1286 	}
1287 
1288 	ioc_vol = mpt->ioc_page2->RaidVolume;
1289 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1290 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1291 		struct mpt_raid_volume *mpt_vol;
1292 
1293 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1294 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1295 		vol_pg = mpt_vol->config_page;
1296 		if (vol_pg == NULL)
1297 			continue;
1298 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1299 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1300 		 || (vol_pg->VolumeStatus.Flags
1301 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1302 
1303 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1304 		}
1305 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1306 	}
1307 
1308 	nonopt_volumes = 0;
1309 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1310 		struct mpt_raid_volume *mpt_vol;
1311 		uint64_t total;
1312 		uint64_t left;
1313 		int m;
1314 		u_int prio;
1315 
1316 		mpt_vol = &mpt->raid_volumes[i];
1317 
1318 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1319 			continue;
1320 
1321 		vol_pg = mpt_vol->config_page;
1322 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1323 		 == MPT_RVF_ANNOUNCED) {
1324 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1325 			mpt_vol->flags = 0;
1326 			continue;
1327 		}
1328 
1329 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1330 
1331 			mpt_announce_vol(mpt, mpt_vol);
1332 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1333 		}
1334 
1335 		if (vol_pg->VolumeStatus.State !=
1336 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1337 			nonopt_volumes++;
1338 
1339 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1340 			continue;
1341 
1342 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1343 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1344 			    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1345 		mpt_verify_mwce(mpt, mpt_vol);
1346 
1347 		if (vol_pg->VolumeStatus.Flags == 0)
1348 			continue;
1349 
1350 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1351 		for (m = 1; m <= 0x80; m <<= 1) {
1352 			switch (vol_pg->VolumeStatus.Flags & m) {
1353 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1354 				mpt_prtc(mpt, " Enabled");
1355 				break;
1356 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1357 				mpt_prtc(mpt, " Quiesced");
1358 				break;
1359 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1360 				mpt_prtc(mpt, " Re-Syncing");
1361 				break;
1362 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1363 				mpt_prtc(mpt, " Inactive");
1364 				break;
1365 			default:
1366 				break;
1367 			}
1368 		}
1369 		mpt_prtc(mpt, " )\n");
1370 
1371 		if ((vol_pg->VolumeStatus.Flags
1372 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1373 			continue;
1374 
1375 		mpt_verify_resync_rate(mpt, mpt_vol);
1376 
1377 		left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1378 		total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1379 		if (vol_pg->ResyncRate != 0) {
1380 
1381 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1382 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1383 			    prio / 1000, prio % 1000);
1384 		} else {
1385 			prio = vol_pg->VolumeSettings.Settings
1386 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1387 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1388 			    prio ? "High" : "Low");
1389 		}
1390 #if __FreeBSD_version >= 500000
1391 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1392 			    "blocks remaining\n", (uintmax_t)left,
1393 			    (uintmax_t)total);
1394 #else
1395 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1396 			    "blocks remaining\n", (uint64_t)left,
1397 			    (uint64_t)total);
1398 #endif
1399 
1400 		/* Periodically report on sync progress. */
1401 		mpt_schedule_raid_refresh(mpt);
1402 	}
1403 
1404 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1405 		struct mpt_raid_disk *mpt_disk;
1406 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1407 		int m;
1408 
1409 		mpt_disk = &mpt->raid_disks[i];
1410 		disk_pg = &mpt_disk->config_page;
1411 
1412 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1413 			continue;
1414 
1415 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1416 		 == MPT_RDF_ANNOUNCED) {
1417 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1418 			mpt_disk->flags = 0;
1419 			mpt->raid_rescan++;
1420 			continue;
1421 		}
1422 
1423 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1424 
1425 			mpt_announce_disk(mpt, mpt_disk);
1426 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1427 		}
1428 
1429 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1430 			continue;
1431 
1432 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1433 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1434 		if (disk_pg->PhysDiskStatus.Flags == 0)
1435 			continue;
1436 
1437 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1438 		for (m = 1; m <= 0x80; m <<= 1) {
1439 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1440 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1441 				mpt_prtc(mpt, " Out-Of-Sync");
1442 				break;
1443 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1444 				mpt_prtc(mpt, " Quiesced");
1445 				break;
1446 			default:
1447 				break;
1448 			}
1449 		}
1450 		mpt_prtc(mpt, " )\n");
1451 	}
1452 
1453 	mpt->raid_nonopt_volumes = nonopt_volumes;
1454 }
1455 
1456 static void
1457 mpt_raid_timer(void *arg)
1458 {
1459 	struct mpt_softc *mpt;
1460 
1461 	mpt = (struct mpt_softc *)arg;
1462 	MPT_LOCK(mpt);
1463 	mpt_raid_wakeup(mpt);
1464 	MPT_UNLOCK(mpt);
1465 }
1466 
1467 static void
1468 mpt_raid_quiesce_timeout(void *arg)
1469 {
1470 	/* Complete the CCB with error */
1471 	/* COWWWW */
1472 }
1473 
1474 void
1475 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1476 {
1477 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1478 		      mpt_raid_timer, mpt);
1479 }
1480 
1481 static int
1482 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1483 {
1484 	struct mpt_raid_volume *mpt_vol;
1485 
1486 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1487 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1488 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1489 		return (EINVAL);
1490 
1491 	MPT_LOCK(mpt);
1492 	mpt->raid_resync_rate = rate;
1493 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1494 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1495 			continue;
1496 		}
1497 		mpt_verify_resync_rate(mpt, mpt_vol);
1498 	}
1499 	MPT_UNLOCK(mpt);
1500 	return (0);
1501 }
1502 
1503 static int
1504 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1505 {
1506 	struct mpt_raid_volume *mpt_vol;
1507 
1508 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1509 		return (EINVAL);
1510 
1511 	MPT_LOCK(mpt);
1512 	mpt->raid_queue_depth = vol_queue_depth;
1513 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1514 		struct cam_path *path;
1515 		int error;
1516 
1517 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1518 			continue;
1519 
1520 		mpt->raid_rescan = 0;
1521 
1522 		error = xpt_create_path(&path, xpt_periph,
1523 					cam_sim_path(mpt->sim),
1524 					mpt_vol->config_page->VolumeID,
1525 					/*lun*/0);
1526 		if (error != CAM_REQ_CMP) {
1527 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1528 			continue;
1529 		}
1530 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1531 		xpt_free_path(path);
1532 	}
1533 	MPT_UNLOCK(mpt);
1534 	return (0);
1535 }
1536 
1537 static int
1538 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1539 {
1540 	struct mpt_raid_volume *mpt_vol;
1541 	int force_full_resync;
1542 
1543 	MPT_LOCK(mpt);
1544 	if (mwce == mpt->raid_mwce_setting) {
1545 		MPT_UNLOCK(mpt);
1546 		return (0);
1547 	}
1548 
1549 	/*
1550 	 * Catch MWCE being left on due to a failed shutdown.  Since
1551 	 * sysctls cannot be set by the loader, we treat the first
1552 	 * setting of this varible specially and force a full volume
1553 	 * resync if MWCE is enabled and a resync is in progress.
1554 	 */
1555 	force_full_resync = 0;
1556 	if (mpt->raid_mwce_set == 0
1557 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1558 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1559 		force_full_resync = 1;
1560 
1561 	mpt->raid_mwce_setting = mwce;
1562 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1563 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1564 		int resyncing;
1565 		int mwce;
1566 
1567 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1568 			continue;
1569 
1570 		vol_pg = mpt_vol->config_page;
1571 		resyncing = vol_pg->VolumeStatus.Flags
1572 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1573 		mwce = vol_pg->VolumeSettings.Settings
1574 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1575 		if (force_full_resync && resyncing && mwce) {
1576 
1577 			/*
1578 			 * XXX disable/enable volume should force a resync,
1579 			 *     but we'll need to queice, drain, and restart
1580 			 *     I/O to do that.
1581 			 */
1582 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1583 				    "detected.  Suggest full resync.\n");
1584 		}
1585 		mpt_verify_mwce(mpt, mpt_vol);
1586 	}
1587 	mpt->raid_mwce_set = 1;
1588 	MPT_UNLOCK(mpt);
1589 	return (0);
1590 }
1591 
1592 const char *mpt_vol_mwce_strs[] =
1593 {
1594 	"On",
1595 	"Off",
1596 	"On-During-Rebuild",
1597 	"NC"
1598 };
1599 
1600 static int
1601 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1602 {
1603 	char inbuf[20];
1604 	struct mpt_softc *mpt;
1605 	const char *str;
1606 	int error;
1607 	u_int size;
1608 	u_int i;
1609 
1610 	GIANT_REQUIRED;
1611 	mpt = (struct mpt_softc *)arg1;
1612 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1613 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1614 	if (error || !req->newptr)
1615 		return (error);
1616 
1617 	size = req->newlen - req->newidx;
1618 	if (size >= sizeof(inbuf))
1619 		return (EINVAL);
1620 
1621 	error = SYSCTL_IN(req, inbuf, size);
1622 	if (error)
1623 		return (error);
1624 	inbuf[size] = '\0';
1625 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1626 
1627 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0)
1628 			return (mpt_raid_set_vol_mwce(mpt, i));
1629 	}
1630 	return (EINVAL);
1631 }
1632 
1633 static int
1634 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1635 {
1636 	struct mpt_softc *mpt;
1637 	u_int raid_resync_rate;
1638 	int error;
1639 
1640 	GIANT_REQUIRED;
1641 	mpt = (struct mpt_softc *)arg1;
1642 	raid_resync_rate = mpt->raid_resync_rate;
1643 
1644 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1645 	if (error || !req->newptr)
1646 		return error;
1647 
1648 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1649 }
1650 
1651 static int
1652 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1653 {
1654 	struct mpt_softc *mpt;
1655 	u_int raid_queue_depth;
1656 	int error;
1657 
1658 	GIANT_REQUIRED;
1659 	mpt = (struct mpt_softc *)arg1;
1660 	raid_queue_depth = mpt->raid_queue_depth;
1661 
1662 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1663 	if (error || !req->newptr)
1664 		return error;
1665 
1666 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1667 }
1668 
1669 static void
1670 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1671 {
1672 #if __FreeBSD_version >= 500000
1673 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1674 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1675 
1676 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1677 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1678 			mpt_raid_sysctl_vol_member_wce, "A",
1679 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1680 
1681 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1682 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1683 			mpt_raid_sysctl_vol_queue_depth, "I",
1684 			"default volume queue depth");
1685 
1686 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1687 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1688 			mpt_raid_sysctl_vol_resync_rate, "I",
1689 			"volume resync priority (0 == NC, 1 - 255)");
1690 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1691 			"nonoptimal_volumes", CTLFLAG_RD,
1692 			&mpt->raid_nonopt_volumes, 0,
1693 			"number of nonoptimal volumes");
1694 #endif
1695 }
1696