xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 66c14b21d3ab0b18376563ba643ddb49b4fd33dd)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <dev/mpt/mpt.h>
39 #include <dev/mpt/mpt_raid.h>
40 
41 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
42 #include "dev/mpt/mpilib/mpi_raid.h"
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
48 
49 #if __FreeBSD_version < 500000
50 #include <sys/devicestat.h>
51 #define	GIANT_REQUIRED
52 #endif
53 #include <cam/cam_periph.h>
54 
55 #include <sys/callout.h>
56 #include <sys/kthread.h>
57 #include <sys/sysctl.h>
58 
59 #include <machine/stdarg.h>
60 
61 struct mpt_raid_action_result
62 {
63 	union {
64 		MPI_RAID_VOL_INDICATOR	indicator_struct;
65 		uint32_t		new_settings;
66 		uint8_t			phys_disk_num;
67 	} action_data;
68 	uint16_t			action_status;
69 };
70 
71 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
72 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
73 
74 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
75 
76 
77 static mpt_probe_handler_t	mpt_raid_probe;
78 static mpt_attach_handler_t	mpt_raid_attach;
79 static mpt_event_handler_t	mpt_raid_event;
80 static mpt_shutdown_handler_t	mpt_raid_shutdown;
81 static mpt_reset_handler_t	mpt_raid_ioc_reset;
82 static mpt_detach_handler_t	mpt_raid_detach;
83 
84 static struct mpt_personality mpt_raid_personality =
85 {
86 	.name		= "mpt_raid",
87 	.probe		= mpt_raid_probe,
88 	.attach		= mpt_raid_attach,
89 	.event		= mpt_raid_event,
90 	.reset		= mpt_raid_ioc_reset,
91 	.shutdown	= mpt_raid_shutdown,
92 	.detach		= mpt_raid_detach,
93 };
94 
95 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
96 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
97 
98 static mpt_reply_handler_t mpt_raid_reply_handler;
99 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
100 					MSG_DEFAULT_REPLY *reply_frame);
101 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
102 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
103 static void mpt_raid_thread(void *arg);
104 static timeout_t mpt_raid_timer;
105 static timeout_t mpt_raid_quiesce_timeout;
106 #if 0
107 static void mpt_enable_vol(struct mpt_softc *mpt,
108 			   struct mpt_raid_volume *mpt_vol, int enable);
109 #endif
110 static void mpt_verify_mwce(struct mpt_softc *mpt,
111 			    struct mpt_raid_volume *mpt_vol);
112 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
113 				   struct mpt_raid_volume *mpt_vol,
114 				   struct cam_path *path);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
116 
117 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
118 
119 const char *
120 mpt_vol_type(struct mpt_raid_volume *vol)
121 {
122 	switch (vol->config_page->VolumeType) {
123 	case MPI_RAID_VOL_TYPE_IS:
124 		return ("RAID-0");
125 	case MPI_RAID_VOL_TYPE_IME:
126 		return ("RAID-1E");
127 	case MPI_RAID_VOL_TYPE_IM:
128 		return ("RAID-1");
129 	default:
130 		return ("Unknown");
131 	}
132 }
133 
134 const char *
135 mpt_vol_state(struct mpt_raid_volume *vol)
136 {
137 	switch (vol->config_page->VolumeStatus.State) {
138 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
139 		return ("Optimal");
140 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
141 		return ("Degraded");
142 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
143 		return ("Failed");
144 	default:
145 		return ("Unknown");
146 	}
147 }
148 
149 const char *
150 mpt_disk_state(struct mpt_raid_disk *disk)
151 {
152 	switch (disk->config_page.PhysDiskStatus.State) {
153 	case MPI_PHYSDISK0_STATUS_ONLINE:
154 		return ("Online");
155 	case MPI_PHYSDISK0_STATUS_MISSING:
156 		return ("Missing");
157 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
158 		return ("Incompatible");
159 	case MPI_PHYSDISK0_STATUS_FAILED:
160 		return ("Failed");
161 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
162 		return ("Initializing");
163 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
164 		return ("Offline Requested");
165 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
166 		return ("Failed per Host Request");
167 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
168 		return ("Offline");
169 	default:
170 		return ("Unknown");
171 	}
172 }
173 
174 void
175 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
176 	    const char *fmt, ...)
177 {
178 	va_list ap;
179 
180 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
181 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
182 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
183 	va_start(ap, fmt);
184 	vprintf(fmt, ap);
185 	va_end(ap);
186 }
187 
188 void
189 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
190 	     const char *fmt, ...)
191 {
192 	va_list ap;
193 
194 	if (disk->volume != NULL) {
195 		printf("(%s:vol%d:%d): ",
196 		       device_get_nameunit(mpt->dev),
197 		       disk->volume->config_page->VolumeID,
198 		       disk->member_number);
199 	} else {
200 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
201 		       disk->config_page.PhysDiskBus,
202 		       disk->config_page.PhysDiskID);
203 	}
204 	va_start(ap, fmt);
205 	vprintf(fmt, ap);
206 	va_end(ap);
207 }
208 
209 static void
210 mpt_raid_async(void *callback_arg, u_int32_t code,
211 	       struct cam_path *path, void *arg)
212 {
213 	struct mpt_softc *mpt;
214 
215 	mpt = (struct mpt_softc*)callback_arg;
216 	switch (code) {
217 	case AC_FOUND_DEVICE:
218 	{
219 		struct ccb_getdev *cgd;
220 		struct mpt_raid_volume *mpt_vol;
221 
222 		cgd = (struct ccb_getdev *)arg;
223 		if (cgd == NULL)
224 			break;
225 
226 		mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
227 			 cgd->ccb_h.target_id);
228 
229 		RAID_VOL_FOREACH(mpt, mpt_vol) {
230 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
231 				continue;
232 
233 			if (mpt_vol->config_page->VolumeID
234 			 == cgd->ccb_h.target_id) {
235 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
236 				break;
237 			}
238 		}
239 	}
240 	default:
241 		break;
242 	}
243 }
244 
245 int
246 mpt_raid_probe(struct mpt_softc *mpt)
247 {
248 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
249 		return (ENODEV);
250 	}
251 	return (0);
252 }
253 
254 int
255 mpt_raid_attach(struct mpt_softc *mpt)
256 {
257 	struct ccb_setasync csa;
258 	mpt_handler_t	 handler;
259 	int		 error;
260 
261 	mpt_callout_init(&mpt->raid_timer);
262 
263 	handler.reply_handler = mpt_raid_reply_handler;
264 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
265 				     &raid_handler_id);
266 	if (error != 0)
267 		goto cleanup;
268 
269 	error = mpt_spawn_raid_thread(mpt);
270 	if (error != 0) {
271 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
272 		goto cleanup;
273 	}
274 
275 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
276 	csa.ccb_h.func_code = XPT_SASYNC_CB;
277 	csa.event_enable = AC_FOUND_DEVICE;
278 	csa.callback = mpt_raid_async;
279 	csa.callback_arg = mpt;
280 	MPTLOCK_2_CAMLOCK(mpt);
281 	xpt_action((union ccb *)&csa);
282 	CAMLOCK_2_MPTLOCK(mpt);
283 	if (csa.ccb_h.status != CAM_REQ_CMP) {
284 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
285 			"CAM async handler.\n");
286 	}
287 
288 	mpt_raid_sysctl_attach(mpt);
289 	return (0);
290 cleanup:
291 	mpt_raid_detach(mpt);
292 	return (error);
293 }
294 
295 void
296 mpt_raid_detach(struct mpt_softc *mpt)
297 {
298 	struct ccb_setasync csa;
299 	mpt_handler_t handler;
300 
301 	callout_stop(&mpt->raid_timer);
302 	mpt_terminate_raid_thread(mpt);
303 
304 	handler.reply_handler = mpt_raid_reply_handler;
305 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
306 			       raid_handler_id);
307 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
308 	csa.ccb_h.func_code = XPT_SASYNC_CB;
309 	csa.event_enable = 0;
310 	csa.callback = mpt_raid_async;
311 	csa.callback_arg = mpt;
312 	MPTLOCK_2_CAMLOCK(mpt);
313 	xpt_action((union ccb *)&csa);
314 	CAMLOCK_2_MPTLOCK(mpt);
315 }
316 
317 static void
318 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
319 {
320 	/* Nothing to do yet. */
321 }
322 
323 static const char *raid_event_txt[] =
324 {
325 	"Volume Created",
326 	"Volume Deleted",
327 	"Volume Settings Changed",
328 	"Volume Status Changed",
329 	"Volume Physical Disk Membership Changed",
330 	"Physical Disk Created",
331 	"Physical Disk Deleted",
332 	"Physical Disk Settings Changed",
333 	"Physical Disk Status Changed",
334 	"Domain Validation Required",
335 	"SMART Data Received",
336 	"Replace Action Started",
337 };
338 
339 static int
340 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
341 	       MSG_EVENT_NOTIFY_REPLY *msg)
342 {
343 	EVENT_DATA_RAID *raid_event;
344 	struct mpt_raid_volume *mpt_vol;
345 	struct mpt_raid_disk *mpt_disk;
346 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
347 	int i;
348 	int print_event;
349 
350 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID)
351 		return (/*handled*/0);
352 
353 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
354 
355 	mpt_vol = NULL;
356 	vol_pg = NULL;
357 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
358 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
359 			mpt_vol = &mpt->raid_volumes[i];
360 			vol_pg = mpt_vol->config_page;
361 
362 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
363 				continue;
364 
365 			if (vol_pg->VolumeID == raid_event->VolumeID
366 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
367 				break;
368 		}
369 		if (i >= mpt->ioc_page2->MaxVolumes) {
370 			mpt_vol = NULL;
371 			vol_pg = NULL;
372 		}
373 	}
374 
375 	mpt_disk = NULL;
376 	if (raid_event->PhysDiskNum != 0xFF
377 	 && mpt->raid_disks != NULL) {
378 		mpt_disk = mpt->raid_disks
379 			 + raid_event->PhysDiskNum;
380 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
381 			mpt_disk = NULL;
382 	}
383 
384 	print_event = 1;
385 	switch(raid_event->ReasonCode) {
386 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
387 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
388 		break;
389 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
390 		if (mpt_vol != NULL) {
391 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
392 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
393 			} else {
394 				/*
395 				 * Coalesce status messages into one
396 				 * per background run of our RAID thread.
397 				 * This removes "spurious" status messages
398 				 * from our output.
399 				 */
400 				print_event = 0;
401 			}
402 		}
403 		break;
404 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
405 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
406 		mpt->raid_rescan++;
407 		if (mpt_vol != NULL)
408 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
409 		break;
410 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
411 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
412 		mpt->raid_rescan++;
413 		break;
414 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
415 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
416 		mpt->raid_rescan++;
417 		if (mpt_disk != NULL)
418 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
419 		break;
420 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
421 		mpt->raid_rescan++;
422 		break;
423 	case MPI_EVENT_RAID_RC_SMART_DATA:
424 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
425 		break;
426 	}
427 
428 	if (print_event) {
429 		if (mpt_disk != NULL) {
430 			mpt_disk_prt(mpt, mpt_disk, "");
431 		} else if (mpt_vol != NULL) {
432 			mpt_vol_prt(mpt, mpt_vol, "");
433 		} else {
434 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
435 				raid_event->VolumeID);
436 
437 			if (raid_event->PhysDiskNum != 0xFF)
438 				mpt_prtc(mpt, ":%d): ",
439 					 raid_event->PhysDiskNum);
440 			else
441 				mpt_prtc(mpt, "): ");
442 		}
443 
444 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
445 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
446 				 raid_event->ReasonCode);
447 		else
448 			mpt_prtc(mpt, "%s\n",
449 				 raid_event_txt[raid_event->ReasonCode]);
450 	}
451 
452 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
453 		/* XXX Use CAM's print sense for this... */
454 		if (mpt_disk != NULL)
455 			mpt_disk_prt(mpt, mpt_disk, "");
456 		else
457 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
458 			    raid_event->VolumeBus, raid_event->VolumeID,
459 			    raid_event->PhysDiskNum);
460 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
461 			 raid_event->ASC, raid_event->ASCQ);
462 	}
463 
464 	mpt_raid_wakeup(mpt);
465 	return (/*handled*/1);
466 }
467 
468 static void
469 mpt_raid_shutdown(struct mpt_softc *mpt)
470 {
471 	struct mpt_raid_volume *mpt_vol;
472 
473 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
474 		return;
475 	}
476 
477 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
478 	RAID_VOL_FOREACH(mpt, mpt_vol) {
479 		mpt_verify_mwce(mpt, mpt_vol);
480 	}
481 }
482 
483 static int
484 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
485     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
486 {
487 	int free_req;
488 
489 	if (req == NULL)
490 		return (/*free_reply*/TRUE);
491 
492 	free_req = TRUE;
493 	if (reply_frame != NULL)
494 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
495 #ifdef NOTYET
496 	else if (req->ccb != NULL) {
497 		/* Complete Quiesce CCB with error... */
498 	}
499 #endif
500 
501 	req->state &= ~REQ_STATE_QUEUED;
502 	req->state |= REQ_STATE_DONE;
503 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
504 
505 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
506 		wakeup(req);
507 	} else if (free_req) {
508 		mpt_free_request(mpt, req);
509 	}
510 
511 	return (/*free_reply*/TRUE);
512 }
513 
514 /*
515  * Parse additional completion information in the reply
516  * frame for RAID I/O requests.
517  */
518 static int
519 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
520 			     MSG_DEFAULT_REPLY *reply_frame)
521 {
522 	MSG_RAID_ACTION_REPLY *reply;
523 	struct mpt_raid_action_result *action_result;
524 	MSG_RAID_ACTION_REQUEST *rap;
525 
526 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
527 	req->IOCStatus = le16toh(reply->IOCStatus);
528 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
529 
530 	switch (rap->Action) {
531 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
532 		/*
533 		 * Parse result, call mpt_start with ccb,
534 		 * release device queue.
535 		 * COWWWWW
536 		 */
537 		break;
538 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
539 		/*
540 		 * Need additional state for transition to enabled to
541 		 * protect against attempts to disable??
542 		 */
543 		break;
544 	default:
545 		action_result = REQ_TO_RAID_ACTION_RESULT(req);
546 		memcpy(&action_result->action_data, &reply->ActionData,
547 		       sizeof(action_result->action_data));
548 		action_result->action_status = reply->ActionStatus;
549 		break;
550 	}
551 
552 	return (/*Free Request*/TRUE);
553 }
554 
555 /*
556  * Utiltity routine to perform a RAID action command;
557  */
558 int
559 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
560 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
561 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
562 		   int write, int wait)
563 {
564 	MSG_RAID_ACTION_REQUEST *rap;
565 	SGE_SIMPLE32 *se;
566 
567 	rap = req->req_vbuf;
568 	memset(rap, 0, sizeof *rap);
569 	rap->Action = Action;
570 	rap->ActionDataWord = ActionDataWord;
571 	rap->Function = MPI_FUNCTION_RAID_ACTION;
572 	rap->VolumeID = vol->config_page->VolumeID;
573 	rap->VolumeBus = vol->config_page->VolumeBus;
574 	if (disk != 0)
575 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
576 	else
577 		rap->PhysDiskNum = 0xFF;
578 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
579 	se->Address = addr;
580 	MPI_pSGE_SET_LENGTH(se, len);
581 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
582 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
583 	    MPI_SGE_FLAGS_END_OF_LIST |
584 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
585 	rap->MsgContext = htole32(req->index | raid_handler_id);
586 
587 	mpt_check_doorbell(mpt);
588 	mpt_send_cmd(mpt, req);
589 
590 	if (wait) {
591 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
592 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
593 	} else {
594 		return (0);
595 	}
596 }
597 
598 /*************************** RAID Status Monitoring ***************************/
599 static int
600 mpt_spawn_raid_thread(struct mpt_softc *mpt)
601 {
602 	int error;
603 
604 	/*
605 	 * Freeze out any CAM transactions until our thread
606 	 * is able to run at least once.  We need to update
607 	 * our RAID pages before acception I/O or we may
608 	 * reject I/O to an ID we later determine is for a
609 	 * hidden physdisk.
610 	 */
611 	xpt_freeze_simq(mpt->phydisk_sim, 1);
612 	error = mpt_kthread_create(mpt_raid_thread, mpt,
613 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
614 	    "mpt_raid%d", mpt->unit);
615 	if (error != 0)
616 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
617 	return (error);
618 }
619 
620 static void
621 mpt_terminate_raid_thread(struct mpt_softc *mpt)
622 {
623 
624 	if (mpt->raid_thread == NULL) {
625 		return;
626 	}
627 	mpt->shutdwn_raid = 1;
628 	wakeup(mpt->raid_volumes);
629 	/*
630 	 * Sleep on a slightly different location
631 	 * for this interlock just for added safety.
632 	 */
633 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
634 }
635 
636 static void
637 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
638 {
639 	xpt_free_path(ccb->ccb_h.path);
640 	free(ccb, M_DEVBUF);
641 }
642 
643 static void
644 mpt_raid_thread(void *arg)
645 {
646 	struct mpt_softc *mpt;
647 	int firstrun;
648 
649 #if __FreeBSD_version >= 500000
650 	mtx_lock(&Giant);
651 #endif
652 	mpt = (struct mpt_softc *)arg;
653 	firstrun = 1;
654 	MPT_LOCK(mpt);
655 	while (mpt->shutdwn_raid == 0) {
656 
657 		if (mpt->raid_wakeup == 0) {
658 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
659 			continue;
660 		}
661 
662 		mpt->raid_wakeup = 0;
663 
664 		mpt_refresh_raid_data(mpt);
665 
666 		/*
667 		 * Now that we have our first snapshot of RAID data,
668 		 * allow CAM to access our physical disk bus.
669 		 */
670 		if (firstrun) {
671 			firstrun = 0;
672 			xpt_release_simq(mpt->phydisk_sim, /*run_queue*/TRUE);
673 		}
674 
675 		if (mpt->raid_rescan != 0) {
676 			union ccb *ccb;
677 			struct cam_path *path;
678 			int error;
679 
680 			mpt->raid_rescan = 0;
681 
682 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
683 			error = xpt_create_path(&path, xpt_periph,
684 						cam_sim_path(mpt->phydisk_sim),
685 						CAM_TARGET_WILDCARD,
686 						CAM_LUN_WILDCARD);
687 			if (error != CAM_REQ_CMP) {
688 				free(ccb, M_DEVBUF);
689 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
690 			} else {
691 				xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/5);
692 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
693 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
694 				ccb->crcn.flags = CAM_FLAG_NONE;
695 				xpt_action(ccb);
696 			}
697 		}
698 	}
699 	mpt->raid_thread = NULL;
700 	wakeup(&mpt->raid_thread);
701 	MPT_UNLOCK(mpt);
702 #if __FreeBSD_version >= 500000
703 	mtx_unlock(&Giant);
704 #endif
705 	kthread_exit(0);
706 }
707 
708 cam_status
709 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
710 		      request_t *req)
711 {
712 	union ccb *ccb;
713 
714 	ccb = req->ccb;
715 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
716 		return (CAM_REQ_CMP);
717 
718 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
719 		int rv;
720 
721 		mpt_disk->flags |= MPT_RDF_QUIESCING;
722 		xpt_freeze_devq(ccb->ccb_h.path, 1);
723 
724 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
725 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
726 					/*ActionData*/0, /*addr*/0,
727 					/*len*/0, /*write*/FALSE,
728 					/*wait*/FALSE);
729 		if (rv != 0)
730 			return (CAM_REQ_CMP_ERR);
731 
732 		ccb->ccb_h.timeout_ch =
733 			timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
734 #if 0
735 		if (rv == ETIMEDOUT) {
736 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
737 				     "Quiece Timed-out\n");
738 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
739 			return (CAM_REQ_CMP_ERR);
740 		}
741 
742 		ar = REQ_TO_RAID_ACTION_RESULT(req);
743 		if (rv != 0
744 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
745 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
746 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
747 				    "%d:%x:%x\n", rv, req->IOCStatus,
748 				    ar->action_status);
749 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
750 			return (CAM_REQ_CMP_ERR);
751 		}
752 #endif
753 		return (CAM_REQ_INPROG);
754 	}
755 	return (CAM_REQUEUE_REQ);
756 }
757 
758 /* XXX Ignores that there may be multiple busses/IOCs involved. */
759 cam_status
760 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
761 {
762 	struct mpt_raid_disk *mpt_disk;
763 
764 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
765 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
766 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
767 
768 		*tgt = mpt_disk->config_page.PhysDiskID;
769 		return (0);
770 	}
771 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_map_physdisk(%d) - Not Active\n",
772 		 ccb->ccb_h.target_id);
773 	return (-1);
774 }
775 
776 #if 0
777 static void
778 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
779 	       int enable)
780 {
781 	request_t *req;
782 	struct mpt_raid_action_result *ar;
783 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
784 	int enabled;
785 	int rv;
786 
787 	vol_pg = mpt_vol->config_page;
788 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
789 
790 	/*
791 	 * If the setting matches the configuration,
792 	 * there is nothing to do.
793 	 */
794 	if ((enabled && enable)
795 	 || (!enabled && !enable))
796 		return;
797 
798 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
799 	if (req == NULL) {
800 		mpt_vol_prt(mpt, mpt_vol,
801 			    "mpt_enable_vol: Get request failed!\n");
802 		return;
803 	}
804 
805 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
806 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
807 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
808 				/*data*/0, /*addr*/0, /*len*/0,
809 				/*write*/FALSE, /*wait*/TRUE);
810 	if (rv == ETIMEDOUT) {
811 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
812 			    "%s Volume Timed-out\n",
813 			    enable ? "Enable" : "Disable");
814 		return;
815 	}
816 	ar = REQ_TO_RAID_ACTION_RESULT(req);
817 	if (rv != 0
818 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
819 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
820 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
821 			    enable ? "Enable" : "Disable",
822 			    rv, req->IOCStatus, ar->action_status);
823 	}
824 
825 	mpt_free_request(mpt, req);
826 }
827 #endif
828 
829 static void
830 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
831 {
832 	request_t *req;
833 	struct mpt_raid_action_result *ar;
834 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
835 	uint32_t data;
836 	int rv;
837 	int resyncing;
838 	int mwce;
839 
840 	vol_pg = mpt_vol->config_page;
841 	resyncing = vol_pg->VolumeStatus.Flags
842 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
843 	mwce = vol_pg->VolumeSettings.Settings
844 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
845 
846 	/*
847 	 * If the setting matches the configuration,
848 	 * there is nothing to do.
849 	 */
850 	switch (mpt->raid_mwce_setting) {
851 	case MPT_RAID_MWCE_REBUILD_ONLY:
852 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
853 			return;
854 		}
855 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
856 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
857 			/*
858 			 * Wait one more status update to see if
859 			 * resyncing gets enabled.  It gets disabled
860 			 * temporarilly when WCE is changed.
861 			 */
862 			return;
863 		}
864 		break;
865 	case MPT_RAID_MWCE_ON:
866 		if (mwce)
867 			return;
868 		break;
869 	case MPT_RAID_MWCE_OFF:
870 		if (!mwce)
871 			return;
872 		break;
873 	case MPT_RAID_MWCE_NC:
874 		return;
875 	}
876 
877 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
878 	if (req == NULL) {
879 		mpt_vol_prt(mpt, mpt_vol,
880 			    "mpt_verify_mwce: Get request failed!\n");
881 		return;
882 	}
883 
884 	vol_pg->VolumeSettings.Settings ^=
885 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
886 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
887 	vol_pg->VolumeSettings.Settings ^=
888 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
889 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
890 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
891 				data, /*addr*/0, /*len*/0,
892 				/*write*/FALSE, /*wait*/TRUE);
893 	if (rv == ETIMEDOUT) {
894 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
895 			    "Write Cache Enable Timed-out\n");
896 		return;
897 	}
898 	ar = REQ_TO_RAID_ACTION_RESULT(req);
899 	if (rv != 0
900 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
901 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
902 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
903 			    "%d:%x:%x\n", rv, req->IOCStatus,
904 			    ar->action_status);
905 	} else {
906 		vol_pg->VolumeSettings.Settings ^=
907 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
908 	}
909 	mpt_free_request(mpt, req);
910 }
911 
912 static void
913 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
914 {
915 	request_t *req;
916 	struct mpt_raid_action_result *ar;
917 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
918 	u_int prio;
919 	int rv;
920 
921 	vol_pg = mpt_vol->config_page;
922 
923 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
924 		return;
925 
926 	/*
927 	 * If the current RAID resync rate does not
928 	 * match our configured rate, update it.
929 	 */
930 	prio = vol_pg->VolumeSettings.Settings
931 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
932 	if (vol_pg->ResyncRate != 0
933 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
934 
935 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
936 		if (req == NULL) {
937 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
938 				    "Get request failed!\n");
939 			return;
940 		}
941 
942 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
943 					MPI_RAID_ACTION_SET_RESYNC_RATE,
944 					mpt->raid_resync_rate, /*addr*/0,
945 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
946 		if (rv == ETIMEDOUT) {
947 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
948 				    "Resync Rate Setting Timed-out\n");
949 			return;
950 		}
951 
952 		ar = REQ_TO_RAID_ACTION_RESULT(req);
953 		if (rv != 0
954 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
955 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
956 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
957 				    "%d:%x:%x\n", rv, req->IOCStatus,
958 				    ar->action_status);
959 		} else
960 			vol_pg->ResyncRate = mpt->raid_resync_rate;
961 		mpt_free_request(mpt, req);
962 	} else if ((prio && mpt->raid_resync_rate < 128)
963 		|| (!prio && mpt->raid_resync_rate >= 128)) {
964 		uint32_t data;
965 
966 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
967 		if (req == NULL) {
968 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
969 				    "Get request failed!\n");
970 			return;
971 		}
972 
973 		vol_pg->VolumeSettings.Settings ^=
974 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
975 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
976 		vol_pg->VolumeSettings.Settings ^=
977 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
978 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
979 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
980 					data, /*addr*/0, /*len*/0,
981 					/*write*/FALSE, /*wait*/TRUE);
982 		if (rv == ETIMEDOUT) {
983 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
984 				    "Resync Rate Setting Timed-out\n");
985 			return;
986 		}
987 		ar = REQ_TO_RAID_ACTION_RESULT(req);
988 		if (rv != 0
989 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
990 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
991 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
992 				    "%d:%x:%x\n", rv, req->IOCStatus,
993 				    ar->action_status);
994 		} else {
995 			vol_pg->VolumeSettings.Settings ^=
996 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
997 		}
998 
999 		mpt_free_request(mpt, req);
1000 	}
1001 }
1002 
1003 static void
1004 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1005 		       struct cam_path *path)
1006 {
1007 	struct ccb_relsim crs;
1008 
1009 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1010 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1011 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1012 	crs.openings = mpt->raid_queue_depth;
1013 	xpt_action((union ccb *)&crs);
1014 	if (crs.ccb_h.status != CAM_REQ_CMP)
1015 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1016 			    "with CAM status %#x\n", crs.ccb_h.status);
1017 }
1018 
1019 static void
1020 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1021 {
1022 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1023 	u_int i;
1024 
1025 	vol_pg = mpt_vol->config_page;
1026 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1027 	for (i = 1; i <= 0x8000; i <<= 1) {
1028 		switch (vol_pg->VolumeSettings.Settings & i) {
1029 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1030 			mpt_prtc(mpt, " Member-WCE");
1031 			break;
1032 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1033 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1034 			break;
1035 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1036 			mpt_prtc(mpt, " Hot-Plug-Spares");
1037 			break;
1038 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1039 			mpt_prtc(mpt, " High-Priority-ReSync");
1040 			break;
1041 		default:
1042 			break;
1043 		}
1044 	}
1045 	mpt_prtc(mpt, " )\n");
1046 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1047 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1048 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1049 			  ? ":" : "s:");
1050 		for (i = 0; i < 8; i++) {
1051 			u_int mask;
1052 
1053 			mask = 0x1 << i;
1054 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1055 				continue;
1056 			mpt_prtc(mpt, " %d", i);
1057 		}
1058 		mpt_prtc(mpt, "\n");
1059 	}
1060 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1061 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1062 		struct mpt_raid_disk *mpt_disk;
1063 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1064 
1065 		mpt_disk = mpt->raid_disks
1066 			 + vol_pg->PhysDisk[i].PhysDiskNum;
1067 		disk_pg = &mpt_disk->config_page;
1068 		mpt_prtc(mpt, "      ");
1069 		mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1070 			 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1071 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1072 			mpt_prtc(mpt, "%s\n",
1073 				 mpt_disk->member_number == 0
1074 			       ? "Primary" : "Secondary");
1075 		else
1076 			mpt_prtc(mpt, "Stripe Position %d\n",
1077 				 mpt_disk->member_number);
1078 	}
1079 }
1080 
1081 static void
1082 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1083 {
1084 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1085 	u_int i;
1086 
1087 	disk_pg = &mpt_disk->config_page;
1088 	mpt_disk_prt(mpt, mpt_disk,
1089 		     "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1090 		     device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1091 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1092 		     /*bus*/1, mpt_disk - mpt->raid_disks);
1093 
1094 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1095 		return;
1096 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1097 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1098 		   ? ":" : "s:");
1099 	for (i = 0; i < 8; i++) {
1100 		u_int mask;
1101 
1102 		mask = 0x1 << i;
1103 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1104 			continue;
1105 		mpt_prtc(mpt, " %d", i);
1106 	}
1107 	mpt_prtc(mpt, "\n");
1108 }
1109 
1110 static void
1111 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1112 		      IOC_3_PHYS_DISK *ioc_disk)
1113 {
1114 	int rv;
1115 
1116 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1117 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1118 				 &mpt_disk->config_page.Header,
1119 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1120 	if (rv != 0) {
1121 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1122 			"Failed to read RAID Disk Hdr(%d)\n",
1123 		 	ioc_disk->PhysDiskNum);
1124 		return;
1125 	}
1126 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1127 				   &mpt_disk->config_page.Header,
1128 				   sizeof(mpt_disk->config_page),
1129 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1130 	if (rv != 0)
1131 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1132 			"Failed to read RAID Disk Page(%d)\n",
1133 		 	ioc_disk->PhysDiskNum);
1134 }
1135 
1136 static void
1137 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1138 		     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1139 {
1140 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1141 	struct mpt_raid_action_result *ar;
1142 	request_t *req;
1143 	int rv;
1144 	int i;
1145 
1146 	vol_pg = mpt_vol->config_page;
1147 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1148 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1149 				 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1150 				 &vol_pg->Header, /*sleep_ok*/TRUE,
1151 				 /*timeout_ms*/5000);
1152 	if (rv != 0) {
1153 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1154 			    "Failed to read RAID Vol Hdr(%d)\n",
1155 			    ioc_vol->VolumePageNumber);
1156 		return;
1157 	}
1158 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1159 				   &vol_pg->Header, mpt->raid_page0_len,
1160 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1161 	if (rv != 0) {
1162 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1163 			    "Failed to read RAID Vol Page(%d)\n",
1164 			    ioc_vol->VolumePageNumber);
1165 		return;
1166 	}
1167 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1168 
1169 	/* Update disk entry array data. */
1170 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1171 		struct mpt_raid_disk *mpt_disk;
1172 
1173 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1174 		mpt_disk->volume = mpt_vol;
1175 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1176 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1177 			mpt_disk->member_number--;
1178 	}
1179 
1180 	if ((vol_pg->VolumeStatus.Flags
1181 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1182 		return;
1183 
1184 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1185 	if (req == NULL) {
1186 		mpt_vol_prt(mpt, mpt_vol,
1187 			    "mpt_refresh_raid_vol: Get request failed!\n");
1188 		return;
1189 	}
1190 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1191 				MPI_RAID_ACTION_INDICATOR_STRUCT,
1192 				/*ActionWord*/0, /*addr*/0, /*len*/0,
1193 				/*write*/FALSE, /*wait*/TRUE);
1194 	if (rv == ETIMEDOUT) {
1195 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1196 			    "Progress indicator fetch timedout!\n");
1197 		return;
1198 	}
1199 
1200 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1201 	if (rv == 0
1202 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1203 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1204 		memcpy(&mpt_vol->sync_progress,
1205 		       &ar->action_data.indicator_struct,
1206 		       sizeof(mpt_vol->sync_progress));
1207 	} else {
1208 		mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1209 			    "Progress indicator fetch failed!\n");
1210 	}
1211 	mpt_free_request(mpt, req);
1212 }
1213 
1214 /*
1215  * Update in-core information about RAID support.  We update any entries
1216  * that didn't previously exists or have been marked as needing to
1217  * be updated by our event handler.  Interesting changes are displayed
1218  * to the console.
1219  */
1220 void
1221 mpt_refresh_raid_data(struct mpt_softc *mpt)
1222 {
1223 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1224 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1225 	IOC_3_PHYS_DISK *ioc_disk;
1226 	IOC_3_PHYS_DISK *ioc_last_disk;
1227 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1228 	size_t len;
1229 	int rv;
1230 	int i;
1231 	u_int nonopt_volumes;
1232 
1233 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1234 		return;
1235 	}
1236 
1237 	/*
1238 	 * Mark all items as unreferenced by the configuration.
1239 	 * This allows us to find, report, and discard stale
1240 	 * entries.
1241 	 */
1242 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1243 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1244 	}
1245 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1246 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1247 	}
1248 
1249 	/*
1250 	 * Get Physical Disk information.
1251 	 */
1252 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1253 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1254 				   &mpt->ioc_page3->Header, len,
1255 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1256 	if (rv) {
1257 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1258 			"Failed to read IOC Page 3\n");
1259 		return;
1260 	}
1261 
1262 	ioc_disk = mpt->ioc_page3->PhysDisk;
1263 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1264 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1265 		struct mpt_raid_disk *mpt_disk;
1266 
1267 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1268 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1269 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1270 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1271 
1272 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1273 
1274 		}
1275 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1276 		mpt->raid_rescan++;
1277 	}
1278 
1279 	/*
1280 	 * Refresh volume data.
1281 	 */
1282 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1283 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1284 				   &mpt->ioc_page2->Header, len,
1285 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1286 	if (rv) {
1287 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1288 			"Failed to read IOC Page 2\n");
1289 		return;
1290 	}
1291 
1292 	ioc_vol = mpt->ioc_page2->RaidVolume;
1293 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1294 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1295 		struct mpt_raid_volume *mpt_vol;
1296 
1297 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1298 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1299 		vol_pg = mpt_vol->config_page;
1300 		if (vol_pg == NULL)
1301 			continue;
1302 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1303 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1304 		 || (vol_pg->VolumeStatus.Flags
1305 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1306 
1307 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1308 		}
1309 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1310 	}
1311 
1312 	nonopt_volumes = 0;
1313 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1314 		struct mpt_raid_volume *mpt_vol;
1315 		uint64_t total;
1316 		uint64_t left;
1317 		int m;
1318 		u_int prio;
1319 
1320 		mpt_vol = &mpt->raid_volumes[i];
1321 
1322 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1323 			continue;
1324 
1325 		vol_pg = mpt_vol->config_page;
1326 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1327 		 == MPT_RVF_ANNOUNCED) {
1328 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1329 			mpt_vol->flags = 0;
1330 			continue;
1331 		}
1332 
1333 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1334 
1335 			mpt_announce_vol(mpt, mpt_vol);
1336 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1337 		}
1338 
1339 		if (vol_pg->VolumeStatus.State !=
1340 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1341 			nonopt_volumes++;
1342 
1343 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1344 			continue;
1345 
1346 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1347 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1348 			    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1349 		mpt_verify_mwce(mpt, mpt_vol);
1350 
1351 		if (vol_pg->VolumeStatus.Flags == 0)
1352 			continue;
1353 
1354 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1355 		for (m = 1; m <= 0x80; m <<= 1) {
1356 			switch (vol_pg->VolumeStatus.Flags & m) {
1357 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1358 				mpt_prtc(mpt, " Enabled");
1359 				break;
1360 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1361 				mpt_prtc(mpt, " Quiesced");
1362 				break;
1363 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1364 				mpt_prtc(mpt, " Re-Syncing");
1365 				break;
1366 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1367 				mpt_prtc(mpt, " Inactive");
1368 				break;
1369 			default:
1370 				break;
1371 			}
1372 		}
1373 		mpt_prtc(mpt, " )\n");
1374 
1375 		if ((vol_pg->VolumeStatus.Flags
1376 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1377 			continue;
1378 
1379 		mpt_verify_resync_rate(mpt, mpt_vol);
1380 
1381 		left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1382 		total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1383 		if (vol_pg->ResyncRate != 0) {
1384 
1385 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1386 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1387 			    prio / 1000, prio % 1000);
1388 		} else {
1389 			prio = vol_pg->VolumeSettings.Settings
1390 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1391 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1392 			    prio ? "High" : "Low");
1393 		}
1394 #if __FreeBSD_version >= 500000
1395 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1396 			    "blocks remaining\n", (uintmax_t)left,
1397 			    (uintmax_t)total);
1398 #else
1399 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1400 			    "blocks remaining\n", (uint64_t)left,
1401 			    (uint64_t)total);
1402 #endif
1403 
1404 		/* Periodically report on sync progress. */
1405 		mpt_schedule_raid_refresh(mpt);
1406 	}
1407 
1408 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1409 		struct mpt_raid_disk *mpt_disk;
1410 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1411 		int m;
1412 
1413 		mpt_disk = &mpt->raid_disks[i];
1414 		disk_pg = &mpt_disk->config_page;
1415 
1416 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1417 			continue;
1418 
1419 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1420 		 == MPT_RDF_ANNOUNCED) {
1421 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1422 			mpt_disk->flags = 0;
1423 			mpt->raid_rescan++;
1424 			continue;
1425 		}
1426 
1427 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1428 
1429 			mpt_announce_disk(mpt, mpt_disk);
1430 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1431 		}
1432 
1433 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1434 			continue;
1435 
1436 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1437 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1438 		if (disk_pg->PhysDiskStatus.Flags == 0)
1439 			continue;
1440 
1441 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1442 		for (m = 1; m <= 0x80; m <<= 1) {
1443 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1444 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1445 				mpt_prtc(mpt, " Out-Of-Sync");
1446 				break;
1447 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1448 				mpt_prtc(mpt, " Quiesced");
1449 				break;
1450 			default:
1451 				break;
1452 			}
1453 		}
1454 		mpt_prtc(mpt, " )\n");
1455 	}
1456 
1457 	mpt->raid_nonopt_volumes = nonopt_volumes;
1458 }
1459 
1460 static void
1461 mpt_raid_timer(void *arg)
1462 {
1463 	struct mpt_softc *mpt;
1464 
1465 	mpt = (struct mpt_softc *)arg;
1466 	MPT_LOCK(mpt);
1467 	mpt_raid_wakeup(mpt);
1468 	MPT_UNLOCK(mpt);
1469 }
1470 
1471 static void
1472 mpt_raid_quiesce_timeout(void *arg)
1473 {
1474 	/* Complete the CCB with error */
1475 	/* COWWWW */
1476 }
1477 
1478 void
1479 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1480 {
1481 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1482 		      mpt_raid_timer, mpt);
1483 }
1484 
1485 static int
1486 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1487 {
1488 	struct mpt_raid_volume *mpt_vol;
1489 
1490 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1491 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1492 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1493 		return (EINVAL);
1494 
1495 	MPT_LOCK(mpt);
1496 	mpt->raid_resync_rate = rate;
1497 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1498 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1499 			continue;
1500 		}
1501 		mpt_verify_resync_rate(mpt, mpt_vol);
1502 	}
1503 	MPT_UNLOCK(mpt);
1504 	return (0);
1505 }
1506 
1507 static int
1508 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1509 {
1510 	struct mpt_raid_volume *mpt_vol;
1511 
1512 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1513 		return (EINVAL);
1514 
1515 	MPT_LOCK(mpt);
1516 	mpt->raid_queue_depth = vol_queue_depth;
1517 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1518 		struct cam_path *path;
1519 		int error;
1520 
1521 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1522 			continue;
1523 
1524 		mpt->raid_rescan = 0;
1525 
1526 		error = xpt_create_path(&path, xpt_periph,
1527 					cam_sim_path(mpt->sim),
1528 					mpt_vol->config_page->VolumeID,
1529 					/*lun*/0);
1530 		if (error != CAM_REQ_CMP) {
1531 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1532 			continue;
1533 		}
1534 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1535 		xpt_free_path(path);
1536 	}
1537 	MPT_UNLOCK(mpt);
1538 	return (0);
1539 }
1540 
1541 static int
1542 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1543 {
1544 	struct mpt_raid_volume *mpt_vol;
1545 	int force_full_resync;
1546 
1547 	MPT_LOCK(mpt);
1548 	if (mwce == mpt->raid_mwce_setting) {
1549 		MPT_UNLOCK(mpt);
1550 		return (0);
1551 	}
1552 
1553 	/*
1554 	 * Catch MWCE being left on due to a failed shutdown.  Since
1555 	 * sysctls cannot be set by the loader, we treat the first
1556 	 * setting of this varible specially and force a full volume
1557 	 * resync if MWCE is enabled and a resync is in progress.
1558 	 */
1559 	force_full_resync = 0;
1560 	if (mpt->raid_mwce_set == 0
1561 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1562 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1563 		force_full_resync = 1;
1564 
1565 	mpt->raid_mwce_setting = mwce;
1566 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1567 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1568 		int resyncing;
1569 		int mwce;
1570 
1571 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1572 			continue;
1573 
1574 		vol_pg = mpt_vol->config_page;
1575 		resyncing = vol_pg->VolumeStatus.Flags
1576 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1577 		mwce = vol_pg->VolumeSettings.Settings
1578 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1579 		if (force_full_resync && resyncing && mwce) {
1580 
1581 			/*
1582 			 * XXX disable/enable volume should force a resync,
1583 			 *     but we'll need to queice, drain, and restart
1584 			 *     I/O to do that.
1585 			 */
1586 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1587 				    "detected.  Suggest full resync.\n");
1588 		}
1589 		mpt_verify_mwce(mpt, mpt_vol);
1590 	}
1591 	mpt->raid_mwce_set = 1;
1592 	MPT_UNLOCK(mpt);
1593 	return (0);
1594 }
1595 
1596 const char *mpt_vol_mwce_strs[] =
1597 {
1598 	"On",
1599 	"Off",
1600 	"On-During-Rebuild",
1601 	"NC"
1602 };
1603 
1604 static int
1605 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1606 {
1607 	char inbuf[20];
1608 	struct mpt_softc *mpt;
1609 	const char *str;
1610 	int error;
1611 	u_int size;
1612 	u_int i;
1613 
1614 	GIANT_REQUIRED;
1615 	mpt = (struct mpt_softc *)arg1;
1616 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1617 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1618 	if (error || !req->newptr)
1619 		return (error);
1620 
1621 	size = req->newlen - req->newidx;
1622 	if (size >= sizeof(inbuf))
1623 		return (EINVAL);
1624 
1625 	error = SYSCTL_IN(req, inbuf, size);
1626 	if (error)
1627 		return (error);
1628 	inbuf[size] = '\0';
1629 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1630 
1631 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0)
1632 			return (mpt_raid_set_vol_mwce(mpt, i));
1633 	}
1634 	return (EINVAL);
1635 }
1636 
1637 static int
1638 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1639 {
1640 	struct mpt_softc *mpt;
1641 	u_int raid_resync_rate;
1642 	int error;
1643 
1644 	GIANT_REQUIRED;
1645 	mpt = (struct mpt_softc *)arg1;
1646 	raid_resync_rate = mpt->raid_resync_rate;
1647 
1648 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1649 	if (error || !req->newptr)
1650 		return error;
1651 
1652 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1653 }
1654 
1655 static int
1656 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1657 {
1658 	struct mpt_softc *mpt;
1659 	u_int raid_queue_depth;
1660 	int error;
1661 
1662 	GIANT_REQUIRED;
1663 	mpt = (struct mpt_softc *)arg1;
1664 	raid_queue_depth = mpt->raid_queue_depth;
1665 
1666 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1667 	if (error || !req->newptr)
1668 		return error;
1669 
1670 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1671 }
1672 
1673 static void
1674 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1675 {
1676 #if __FreeBSD_version >= 500000
1677 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1678 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1679 
1680 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1681 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1682 			mpt_raid_sysctl_vol_member_wce, "A",
1683 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1684 
1685 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1686 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1687 			mpt_raid_sysctl_vol_queue_depth, "I",
1688 			"default volume queue depth");
1689 
1690 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1691 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1692 			mpt_raid_sysctl_vol_resync_rate, "I",
1693 			"volume resync priority (0 == NC, 1 - 255)");
1694 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1695 			"nonoptimal_volumes", CTLFLAG_RD,
1696 			&mpt->raid_nonopt_volumes, 0,
1697 			"number of nonoptimal volumes");
1698 #endif
1699 }
1700