xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 1aaed33edb24c98a09130cd66667d6a795b6b2a8)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 static mpt_probe_handler_t	mpt_raid_probe;
85 static mpt_attach_handler_t	mpt_raid_attach;
86 static mpt_enable_handler_t	mpt_raid_enable;
87 static mpt_event_handler_t	mpt_raid_event;
88 static mpt_shutdown_handler_t	mpt_raid_shutdown;
89 static mpt_reset_handler_t	mpt_raid_ioc_reset;
90 static mpt_detach_handler_t	mpt_raid_detach;
91 
92 static struct mpt_personality mpt_raid_personality =
93 {
94 	.name		= "mpt_raid",
95 	.probe		= mpt_raid_probe,
96 	.attach		= mpt_raid_attach,
97 	.enable		= mpt_raid_enable,
98 	.event		= mpt_raid_event,
99 	.reset		= mpt_raid_ioc_reset,
100 	.shutdown	= mpt_raid_shutdown,
101 	.detach		= mpt_raid_detach,
102 };
103 
104 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
105 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
106 
107 static mpt_reply_handler_t mpt_raid_reply_handler;
108 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
109 					MSG_DEFAULT_REPLY *reply_frame);
110 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
111 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
112 static void mpt_raid_thread(void *arg);
113 static timeout_t mpt_raid_timer;
114 #if 0
115 static void mpt_enable_vol(struct mpt_softc *mpt,
116 			   struct mpt_raid_volume *mpt_vol, int enable);
117 #endif
118 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
119 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
120     struct cam_path *);
121 #if __FreeBSD_version < 500000
122 #define	mpt_raid_sysctl_attach(x)	do { } while (0)
123 #else
124 static void mpt_raid_sysctl_attach(struct mpt_softc *);
125 #endif
126 
127 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
128 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
129 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
130 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
131     const char *fmt, ...);
132 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
133     const char *fmt, ...);
134 
135 static int mpt_issue_raid_req(struct mpt_softc *mpt,
136     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
137     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
138     int write, int wait);
139 
140 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
141 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
142 
143 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
144 
145 static const char *
146 mpt_vol_type(struct mpt_raid_volume *vol)
147 {
148 	switch (vol->config_page->VolumeType) {
149 	case MPI_RAID_VOL_TYPE_IS:
150 		return ("RAID-0");
151 	case MPI_RAID_VOL_TYPE_IME:
152 		return ("RAID-1E");
153 	case MPI_RAID_VOL_TYPE_IM:
154 		return ("RAID-1");
155 	default:
156 		return ("Unknown");
157 	}
158 }
159 
160 static const char *
161 mpt_vol_state(struct mpt_raid_volume *vol)
162 {
163 	switch (vol->config_page->VolumeStatus.State) {
164 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
165 		return ("Optimal");
166 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
167 		return ("Degraded");
168 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
169 		return ("Failed");
170 	default:
171 		return ("Unknown");
172 	}
173 }
174 
175 static const char *
176 mpt_disk_state(struct mpt_raid_disk *disk)
177 {
178 	switch (disk->config_page.PhysDiskStatus.State) {
179 	case MPI_PHYSDISK0_STATUS_ONLINE:
180 		return ("Online");
181 	case MPI_PHYSDISK0_STATUS_MISSING:
182 		return ("Missing");
183 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
184 		return ("Incompatible");
185 	case MPI_PHYSDISK0_STATUS_FAILED:
186 		return ("Failed");
187 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
188 		return ("Initializing");
189 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
190 		return ("Offline Requested");
191 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
192 		return ("Failed per Host Request");
193 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
194 		return ("Offline");
195 	default:
196 		return ("Unknown");
197 	}
198 }
199 
200 static void
201 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
202 	    const char *fmt, ...)
203 {
204 	va_list ap;
205 
206 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
207 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
208 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
209 	va_start(ap, fmt);
210 	vprintf(fmt, ap);
211 	va_end(ap);
212 }
213 
214 static void
215 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
216 	     const char *fmt, ...)
217 {
218 	va_list ap;
219 
220 	if (disk->volume != NULL) {
221 		printf("(%s:vol%d:%d): ",
222 		       device_get_nameunit(mpt->dev),
223 		       disk->volume->config_page->VolumeID,
224 		       disk->member_number);
225 	} else {
226 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
227 		       disk->config_page.PhysDiskBus,
228 		       disk->config_page.PhysDiskID);
229 	}
230 	va_start(ap, fmt);
231 	vprintf(fmt, ap);
232 	va_end(ap);
233 }
234 
235 static void
236 mpt_raid_async(void *callback_arg, u_int32_t code,
237 	       struct cam_path *path, void *arg)
238 {
239 	struct mpt_softc *mpt;
240 
241 	mpt = (struct mpt_softc*)callback_arg;
242 	switch (code) {
243 	case AC_FOUND_DEVICE:
244 	{
245 		struct ccb_getdev *cgd;
246 		struct mpt_raid_volume *mpt_vol;
247 
248 		cgd = (struct ccb_getdev *)arg;
249 		if (cgd == NULL) {
250 			break;
251 		}
252 
253 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
254 			 cgd->ccb_h.target_id);
255 
256 		RAID_VOL_FOREACH(mpt, mpt_vol) {
257 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
258 				continue;
259 
260 			if (mpt_vol->config_page->VolumeID
261 			 == cgd->ccb_h.target_id) {
262 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
263 				break;
264 			}
265 		}
266 	}
267 	default:
268 		break;
269 	}
270 }
271 
272 static int
273 mpt_raid_probe(struct mpt_softc *mpt)
274 {
275 
276 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
277 		return (ENODEV);
278 	}
279 	return (0);
280 }
281 
282 static int
283 mpt_raid_attach(struct mpt_softc *mpt)
284 {
285 	struct ccb_setasync csa;
286 	mpt_handler_t	 handler;
287 	int		 error;
288 
289 	mpt_callout_init(mpt, &mpt->raid_timer);
290 
291 	error = mpt_spawn_raid_thread(mpt);
292 	if (error != 0) {
293 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
294 		goto cleanup;
295 	}
296 
297 	MPT_LOCK(mpt);
298 	handler.reply_handler = mpt_raid_reply_handler;
299 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
300 				     &raid_handler_id);
301 	if (error != 0) {
302 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
303 		goto cleanup;
304 	}
305 
306 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
307 	csa.ccb_h.func_code = XPT_SASYNC_CB;
308 	csa.event_enable = AC_FOUND_DEVICE;
309 	csa.callback = mpt_raid_async;
310 	csa.callback_arg = mpt;
311 	xpt_action((union ccb *)&csa);
312 	if (csa.ccb_h.status != CAM_REQ_CMP) {
313 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
314 			"CAM async handler.\n");
315 	}
316 	MPT_UNLOCK(mpt);
317 
318 	mpt_raid_sysctl_attach(mpt);
319 	return (0);
320 cleanup:
321 	MPT_UNLOCK(mpt);
322 	mpt_raid_detach(mpt);
323 	return (error);
324 }
325 
326 static int
327 mpt_raid_enable(struct mpt_softc *mpt)
328 {
329 
330 	return (0);
331 }
332 
333 static void
334 mpt_raid_detach(struct mpt_softc *mpt)
335 {
336 	struct ccb_setasync csa;
337 	mpt_handler_t handler;
338 
339 	mpt_callout_drain(mpt, &mpt->raid_timer);
340 
341 	MPT_LOCK(mpt);
342 	mpt_terminate_raid_thread(mpt);
343 	handler.reply_handler = mpt_raid_reply_handler;
344 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
345 			       raid_handler_id);
346 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
347 	csa.ccb_h.func_code = XPT_SASYNC_CB;
348 	csa.event_enable = 0;
349 	csa.callback = mpt_raid_async;
350 	csa.callback_arg = mpt;
351 	xpt_action((union ccb *)&csa);
352 	MPT_UNLOCK(mpt);
353 }
354 
355 static void
356 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
357 {
358 
359 	/* Nothing to do yet. */
360 }
361 
362 static const char *raid_event_txt[] =
363 {
364 	"Volume Created",
365 	"Volume Deleted",
366 	"Volume Settings Changed",
367 	"Volume Status Changed",
368 	"Volume Physical Disk Membership Changed",
369 	"Physical Disk Created",
370 	"Physical Disk Deleted",
371 	"Physical Disk Settings Changed",
372 	"Physical Disk Status Changed",
373 	"Domain Validation Required",
374 	"SMART Data Received",
375 	"Replace Action Started",
376 };
377 
378 static int
379 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
380 	       MSG_EVENT_NOTIFY_REPLY *msg)
381 {
382 	EVENT_DATA_RAID *raid_event;
383 	struct mpt_raid_volume *mpt_vol;
384 	struct mpt_raid_disk *mpt_disk;
385 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
386 	int i;
387 	int print_event;
388 
389 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
390 		return (0);
391 	}
392 
393 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
394 
395 	mpt_vol = NULL;
396 	vol_pg = NULL;
397 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
398 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
399 			mpt_vol = &mpt->raid_volumes[i];
400 			vol_pg = mpt_vol->config_page;
401 
402 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
403 				continue;
404 
405 			if (vol_pg->VolumeID == raid_event->VolumeID
406 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
407 				break;
408 		}
409 		if (i >= mpt->ioc_page2->MaxVolumes) {
410 			mpt_vol = NULL;
411 			vol_pg = NULL;
412 		}
413 	}
414 
415 	mpt_disk = NULL;
416 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
417 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
418 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
419 			mpt_disk = NULL;
420 		}
421 	}
422 
423 	print_event = 1;
424 	switch(raid_event->ReasonCode) {
425 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
426 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
427 		break;
428 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
429 		if (mpt_vol != NULL) {
430 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
431 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
432 			} else {
433 				/*
434 				 * Coalesce status messages into one
435 				 * per background run of our RAID thread.
436 				 * This removes "spurious" status messages
437 				 * from our output.
438 				 */
439 				print_event = 0;
440 			}
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
444 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
445 		mpt->raid_rescan++;
446 		if (mpt_vol != NULL) {
447 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
448 		}
449 		break;
450 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
451 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
452 		mpt->raid_rescan++;
453 		break;
454 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
455 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
456 		mpt->raid_rescan++;
457 		if (mpt_disk != NULL) {
458 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
459 		}
460 		break;
461 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
462 		mpt->raid_rescan++;
463 		break;
464 	case MPI_EVENT_RAID_RC_SMART_DATA:
465 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
466 		break;
467 	}
468 
469 	if (print_event) {
470 		if (mpt_disk != NULL) {
471 			mpt_disk_prt(mpt, mpt_disk, "");
472 		} else if (mpt_vol != NULL) {
473 			mpt_vol_prt(mpt, mpt_vol, "");
474 		} else {
475 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
476 				raid_event->VolumeID);
477 
478 			if (raid_event->PhysDiskNum != 0xFF)
479 				mpt_prtc(mpt, ":%d): ",
480 					 raid_event->PhysDiskNum);
481 			else
482 				mpt_prtc(mpt, "): ");
483 		}
484 
485 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
486 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
487 				 raid_event->ReasonCode);
488 		else
489 			mpt_prtc(mpt, "%s\n",
490 				 raid_event_txt[raid_event->ReasonCode]);
491 	}
492 
493 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
494 		/* XXX Use CAM's print sense for this... */
495 		if (mpt_disk != NULL)
496 			mpt_disk_prt(mpt, mpt_disk, "");
497 		else
498 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
499 			    raid_event->VolumeBus, raid_event->VolumeID,
500 			    raid_event->PhysDiskNum);
501 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
502 			 raid_event->ASC, raid_event->ASCQ);
503 	}
504 
505 	mpt_raid_wakeup(mpt);
506 	return (1);
507 }
508 
509 static void
510 mpt_raid_shutdown(struct mpt_softc *mpt)
511 {
512 	struct mpt_raid_volume *mpt_vol;
513 
514 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
515 		return;
516 	}
517 
518 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
519 	RAID_VOL_FOREACH(mpt, mpt_vol) {
520 		mpt_verify_mwce(mpt, mpt_vol);
521 	}
522 }
523 
524 static int
525 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
526     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
527 {
528 	int free_req;
529 
530 	if (req == NULL)
531 		return (TRUE);
532 
533 	free_req = TRUE;
534 	if (reply_frame != NULL)
535 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
536 #ifdef NOTYET
537 	else if (req->ccb != NULL) {
538 		/* Complete Quiesce CCB with error... */
539 	}
540 #endif
541 
542 	req->state &= ~REQ_STATE_QUEUED;
543 	req->state |= REQ_STATE_DONE;
544 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
545 
546 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
547 		wakeup(req);
548 	} else if (free_req) {
549 		mpt_free_request(mpt, req);
550 	}
551 
552 	return (TRUE);
553 }
554 
555 /*
556  * Parse additional completion information in the reply
557  * frame for RAID I/O requests.
558  */
559 static int
560 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
561     MSG_DEFAULT_REPLY *reply_frame)
562 {
563 	MSG_RAID_ACTION_REPLY *reply;
564 	struct mpt_raid_action_result *action_result;
565 	MSG_RAID_ACTION_REQUEST *rap;
566 
567 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
568 	req->IOCStatus = le16toh(reply->IOCStatus);
569 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
570 
571 	switch (rap->Action) {
572 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
573 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
574 		break;
575 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
576 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
577 		break;
578 	default:
579 		break;
580 	}
581 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
582 	memcpy(&action_result->action_data, &reply->ActionData,
583 	    sizeof(action_result->action_data));
584 	action_result->action_status = le16toh(reply->ActionStatus);
585 	return (TRUE);
586 }
587 
588 /*
589  * Utiltity routine to perform a RAID action command;
590  */
591 static int
592 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
593 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
594 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
595 		   int write, int wait)
596 {
597 	MSG_RAID_ACTION_REQUEST *rap;
598 	SGE_SIMPLE32 *se;
599 
600 	rap = req->req_vbuf;
601 	memset(rap, 0, sizeof *rap);
602 	rap->Action = Action;
603 	rap->ActionDataWord = htole32(ActionDataWord);
604 	rap->Function = MPI_FUNCTION_RAID_ACTION;
605 	rap->VolumeID = vol->config_page->VolumeID;
606 	rap->VolumeBus = vol->config_page->VolumeBus;
607 	if (disk != 0)
608 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
609 	else
610 		rap->PhysDiskNum = 0xFF;
611 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
612 	se->Address = htole32(addr);
613 	MPI_pSGE_SET_LENGTH(se, len);
614 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
615 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
616 	    MPI_SGE_FLAGS_END_OF_LIST |
617 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
618 	se->FlagsLength = htole32(se->FlagsLength);
619 	rap->MsgContext = htole32(req->index | raid_handler_id);
620 
621 	mpt_check_doorbell(mpt);
622 	mpt_send_cmd(mpt, req);
623 
624 	if (wait) {
625 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
626 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
627 	} else {
628 		return (0);
629 	}
630 }
631 
632 /*************************** RAID Status Monitoring ***************************/
633 static int
634 mpt_spawn_raid_thread(struct mpt_softc *mpt)
635 {
636 	int error;
637 
638 	/*
639 	 * Freeze out any CAM transactions until our thread
640 	 * is able to run at least once.  We need to update
641 	 * our RAID pages before acception I/O or we may
642 	 * reject I/O to an ID we later determine is for a
643 	 * hidden physdisk.
644 	 */
645 	MPT_LOCK(mpt);
646 	xpt_freeze_simq(mpt->phydisk_sim, 1);
647 	MPT_UNLOCK(mpt);
648 	error = mpt_kthread_create(mpt_raid_thread, mpt,
649 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
650 	    "mpt_raid%d", mpt->unit);
651 	if (error != 0) {
652 		MPT_LOCK(mpt);
653 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
654 		MPT_UNLOCK(mpt);
655 	}
656 	return (error);
657 }
658 
659 static void
660 mpt_terminate_raid_thread(struct mpt_softc *mpt)
661 {
662 
663 	if (mpt->raid_thread == NULL) {
664 		return;
665 	}
666 	mpt->shutdwn_raid = 1;
667 	wakeup(&mpt->raid_volumes);
668 	/*
669 	 * Sleep on a slightly different location
670 	 * for this interlock just for added safety.
671 	 */
672 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
673 }
674 
675 static void
676 mpt_raid_thread(void *arg)
677 {
678 	struct mpt_softc *mpt;
679 	int firstrun;
680 
681 	mpt = (struct mpt_softc *)arg;
682 	firstrun = 1;
683 	MPT_LOCK(mpt);
684 	while (mpt->shutdwn_raid == 0) {
685 
686 		if (mpt->raid_wakeup == 0) {
687 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
688 			continue;
689 		}
690 
691 		mpt->raid_wakeup = 0;
692 
693 		if (mpt_refresh_raid_data(mpt)) {
694 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
695 			continue;
696 		}
697 
698 		/*
699 		 * Now that we have our first snapshot of RAID data,
700 		 * allow CAM to access our physical disk bus.
701 		 */
702 		if (firstrun) {
703 			firstrun = 0;
704 			MPTLOCK_2_CAMLOCK(mpt);
705 			xpt_release_simq(mpt->phydisk_sim, TRUE);
706 			CAMLOCK_2_MPTLOCK(mpt);
707 		}
708 
709 		if (mpt->raid_rescan != 0) {
710 			union ccb *ccb;
711 			int error;
712 
713 			mpt->raid_rescan = 0;
714 			MPT_UNLOCK(mpt);
715 
716 			ccb = xpt_alloc_ccb();
717 
718 			MPT_LOCK(mpt);
719 			error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
720 			    cam_sim_path(mpt->phydisk_sim),
721 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
722 			if (error != CAM_REQ_CMP) {
723 				xpt_free_ccb(ccb);
724 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
725 			} else {
726 				xpt_rescan(ccb);
727 			}
728 		}
729 	}
730 	mpt->raid_thread = NULL;
731 	wakeup(&mpt->raid_thread);
732 	MPT_UNLOCK(mpt);
733 	mpt_kthread_exit(0);
734 }
735 
736 #if 0
737 static void
738 mpt_raid_quiesce_timeout(void *arg)
739 {
740 
741 	/* Complete the CCB with error */
742 	/* COWWWW */
743 }
744 
745 static timeout_t mpt_raid_quiesce_timeout;
746 cam_status
747 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
748 		      request_t *req)
749 {
750 	union ccb *ccb;
751 
752 	ccb = req->ccb;
753 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
754 		return (CAM_REQ_CMP);
755 
756 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
757 		int rv;
758 
759 		mpt_disk->flags |= MPT_RDF_QUIESCING;
760 		xpt_freeze_devq(ccb->ccb_h.path, 1);
761 
762 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
763 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
764 					/*ActionData*/0, /*addr*/0,
765 					/*len*/0, /*write*/FALSE,
766 					/*wait*/FALSE);
767 		if (rv != 0)
768 			return (CAM_REQ_CMP_ERR);
769 
770 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
771 #if 0
772 		if (rv == ETIMEDOUT) {
773 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
774 				     "Quiece Timed-out\n");
775 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
776 			return (CAM_REQ_CMP_ERR);
777 		}
778 
779 		ar = REQ_TO_RAID_ACTION_RESULT(req);
780 		if (rv != 0
781 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
782 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
783 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
784 				    "%d:%x:%x\n", rv, req->IOCStatus,
785 				    ar->action_status);
786 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
787 			return (CAM_REQ_CMP_ERR);
788 		}
789 #endif
790 		return (CAM_REQ_INPROG);
791 	}
792 	return (CAM_REQUEUE_REQ);
793 }
794 #endif
795 
796 /* XXX Ignores that there may be multiple busses/IOCs involved. */
797 cam_status
798 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
799 {
800 	struct mpt_raid_disk *mpt_disk;
801 
802 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
803 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
804 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
805 		*tgt = mpt_disk->config_page.PhysDiskID;
806 		return (0);
807 	}
808 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
809 		 ccb->ccb_h.target_id);
810 	return (-1);
811 }
812 
813 /* XXX Ignores that there may be multiple busses/IOCs involved. */
814 int
815 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
816 {
817 	struct mpt_raid_disk *mpt_disk;
818 	int i;
819 
820 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
821 		return (0);
822 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
823 		mpt_disk = &mpt->raid_disks[i];
824 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
825 		    mpt_disk->config_page.PhysDiskID == tgt)
826 			return (1);
827 	}
828 	return (0);
829 
830 }
831 
832 /* XXX Ignores that there may be multiple busses/IOCs involved. */
833 int
834 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
835 {
836 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
837 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
838 
839 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
840 		return (0);
841 	}
842 	ioc_vol = mpt->ioc_page2->RaidVolume;
843 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
844 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
845 		if (ioc_vol->VolumeID == tgt) {
846 			return (1);
847 		}
848 	}
849 	return (0);
850 }
851 
852 #if 0
853 static void
854 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
855 	       int enable)
856 {
857 	request_t *req;
858 	struct mpt_raid_action_result *ar;
859 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
860 	int enabled;
861 	int rv;
862 
863 	vol_pg = mpt_vol->config_page;
864 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
865 
866 	/*
867 	 * If the setting matches the configuration,
868 	 * there is nothing to do.
869 	 */
870 	if ((enabled && enable)
871 	 || (!enabled && !enable))
872 		return;
873 
874 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
875 	if (req == NULL) {
876 		mpt_vol_prt(mpt, mpt_vol,
877 			    "mpt_enable_vol: Get request failed!\n");
878 		return;
879 	}
880 
881 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
882 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
883 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
884 				/*data*/0, /*addr*/0, /*len*/0,
885 				/*write*/FALSE, /*wait*/TRUE);
886 	if (rv == ETIMEDOUT) {
887 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
888 			    "%s Volume Timed-out\n",
889 			    enable ? "Enable" : "Disable");
890 		return;
891 	}
892 	ar = REQ_TO_RAID_ACTION_RESULT(req);
893 	if (rv != 0
894 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
895 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
896 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
897 			    enable ? "Enable" : "Disable",
898 			    rv, req->IOCStatus, ar->action_status);
899 	}
900 
901 	mpt_free_request(mpt, req);
902 }
903 #endif
904 
905 static void
906 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
907 {
908 	request_t *req;
909 	struct mpt_raid_action_result *ar;
910 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
911 	uint32_t data;
912 	int rv;
913 	int resyncing;
914 	int mwce;
915 
916 	vol_pg = mpt_vol->config_page;
917 	resyncing = vol_pg->VolumeStatus.Flags
918 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
919 	mwce = vol_pg->VolumeSettings.Settings
920 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
921 
922 	/*
923 	 * If the setting matches the configuration,
924 	 * there is nothing to do.
925 	 */
926 	switch (mpt->raid_mwce_setting) {
927 	case MPT_RAID_MWCE_REBUILD_ONLY:
928 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
929 			return;
930 		}
931 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
932 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
933 			/*
934 			 * Wait one more status update to see if
935 			 * resyncing gets enabled.  It gets disabled
936 			 * temporarilly when WCE is changed.
937 			 */
938 			return;
939 		}
940 		break;
941 	case MPT_RAID_MWCE_ON:
942 		if (mwce)
943 			return;
944 		break;
945 	case MPT_RAID_MWCE_OFF:
946 		if (!mwce)
947 			return;
948 		break;
949 	case MPT_RAID_MWCE_NC:
950 		return;
951 	}
952 
953 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
954 	if (req == NULL) {
955 		mpt_vol_prt(mpt, mpt_vol,
956 			    "mpt_verify_mwce: Get request failed!\n");
957 		return;
958 	}
959 
960 	vol_pg->VolumeSettings.Settings ^=
961 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
962 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
963 	vol_pg->VolumeSettings.Settings ^=
964 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
965 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
966 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
967 				data, /*addr*/0, /*len*/0,
968 				/*write*/FALSE, /*wait*/TRUE);
969 	if (rv == ETIMEDOUT) {
970 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
971 			    "Write Cache Enable Timed-out\n");
972 		return;
973 	}
974 	ar = REQ_TO_RAID_ACTION_RESULT(req);
975 	if (rv != 0
976 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
977 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
978 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
979 			    "%d:%x:%x\n", rv, req->IOCStatus,
980 			    ar->action_status);
981 	} else {
982 		vol_pg->VolumeSettings.Settings ^=
983 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
984 	}
985 	mpt_free_request(mpt, req);
986 }
987 
988 static void
989 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
990 {
991 	request_t *req;
992 	struct mpt_raid_action_result *ar;
993 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
994 	u_int prio;
995 	int rv;
996 
997 	vol_pg = mpt_vol->config_page;
998 
999 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1000 		return;
1001 
1002 	/*
1003 	 * If the current RAID resync rate does not
1004 	 * match our configured rate, update it.
1005 	 */
1006 	prio = vol_pg->VolumeSettings.Settings
1007 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1008 	if (vol_pg->ResyncRate != 0
1009 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1010 
1011 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1012 		if (req == NULL) {
1013 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014 				    "Get request failed!\n");
1015 			return;
1016 		}
1017 
1018 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1019 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1020 					mpt->raid_resync_rate, /*addr*/0,
1021 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1022 		if (rv == ETIMEDOUT) {
1023 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1024 				    "Resync Rate Setting Timed-out\n");
1025 			return;
1026 		}
1027 
1028 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1029 		if (rv != 0
1030 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1031 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1032 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1033 				    "%d:%x:%x\n", rv, req->IOCStatus,
1034 				    ar->action_status);
1035 		} else
1036 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1037 		mpt_free_request(mpt, req);
1038 	} else if ((prio && mpt->raid_resync_rate < 128)
1039 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1040 		uint32_t data;
1041 
1042 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1043 		if (req == NULL) {
1044 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1045 				    "Get request failed!\n");
1046 			return;
1047 		}
1048 
1049 		vol_pg->VolumeSettings.Settings ^=
1050 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1051 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1052 		vol_pg->VolumeSettings.Settings ^=
1053 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1054 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1055 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1056 					data, /*addr*/0, /*len*/0,
1057 					/*write*/FALSE, /*wait*/TRUE);
1058 		if (rv == ETIMEDOUT) {
1059 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1060 				    "Resync Rate Setting Timed-out\n");
1061 			return;
1062 		}
1063 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1064 		if (rv != 0
1065 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1066 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1067 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1068 				    "%d:%x:%x\n", rv, req->IOCStatus,
1069 				    ar->action_status);
1070 		} else {
1071 			vol_pg->VolumeSettings.Settings ^=
1072 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1073 		}
1074 
1075 		mpt_free_request(mpt, req);
1076 	}
1077 }
1078 
1079 static void
1080 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1081 		       struct cam_path *path)
1082 {
1083 	struct ccb_relsim crs;
1084 
1085 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1086 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1087 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1088 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1089 	crs.openings = mpt->raid_queue_depth;
1090 	xpt_action((union ccb *)&crs);
1091 	if (crs.ccb_h.status != CAM_REQ_CMP)
1092 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1093 			    "with CAM status %#x\n", crs.ccb_h.status);
1094 }
1095 
1096 static void
1097 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1098 {
1099 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1100 	u_int i;
1101 
1102 	vol_pg = mpt_vol->config_page;
1103 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1104 	for (i = 1; i <= 0x8000; i <<= 1) {
1105 		switch (vol_pg->VolumeSettings.Settings & i) {
1106 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1107 			mpt_prtc(mpt, " Member-WCE");
1108 			break;
1109 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1110 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1111 			break;
1112 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1113 			mpt_prtc(mpt, " Hot-Plug-Spares");
1114 			break;
1115 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1116 			mpt_prtc(mpt, " High-Priority-ReSync");
1117 			break;
1118 		default:
1119 			break;
1120 		}
1121 	}
1122 	mpt_prtc(mpt, " )\n");
1123 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1124 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1125 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1126 			  ? ":" : "s:");
1127 		for (i = 0; i < 8; i++) {
1128 			u_int mask;
1129 
1130 			mask = 0x1 << i;
1131 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1132 				continue;
1133 			mpt_prtc(mpt, " %d", i);
1134 		}
1135 		mpt_prtc(mpt, "\n");
1136 	}
1137 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1138 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1139 		struct mpt_raid_disk *mpt_disk;
1140 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1141 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1142 		U8 f, s;
1143 
1144 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1145 		disk_pg = &mpt_disk->config_page;
1146 		mpt_prtc(mpt, "      ");
1147 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1148 			 pt_bus, disk_pg->PhysDiskID);
1149 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1150 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1151 			    "Primary" : "Secondary");
1152 		} else {
1153 			mpt_prtc(mpt, "Stripe Position %d",
1154 				 mpt_disk->member_number);
1155 		}
1156 		f = disk_pg->PhysDiskStatus.Flags;
1157 		s = disk_pg->PhysDiskStatus.State;
1158 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1159 			mpt_prtc(mpt, " Out of Sync");
1160 		}
1161 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1162 			mpt_prtc(mpt, " Quiesced");
1163 		}
1164 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1165 			mpt_prtc(mpt, " Inactive");
1166 		}
1167 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1168 			mpt_prtc(mpt, " Was Optimal");
1169 		}
1170 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1171 			mpt_prtc(mpt, " Was Non-Optimal");
1172 		}
1173 		switch (s) {
1174 		case MPI_PHYSDISK0_STATUS_ONLINE:
1175 			mpt_prtc(mpt, " Online");
1176 			break;
1177 		case MPI_PHYSDISK0_STATUS_MISSING:
1178 			mpt_prtc(mpt, " Missing");
1179 			break;
1180 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1181 			mpt_prtc(mpt, " Incompatible");
1182 			break;
1183 		case MPI_PHYSDISK0_STATUS_FAILED:
1184 			mpt_prtc(mpt, " Failed");
1185 			break;
1186 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1187 			mpt_prtc(mpt, " Initializing");
1188 			break;
1189 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1190 			mpt_prtc(mpt, " Requested Offline");
1191 			break;
1192 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1193 			mpt_prtc(mpt, " Requested Failed");
1194 			break;
1195 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1196 		default:
1197 			mpt_prtc(mpt, " Offline Other (%x)", s);
1198 			break;
1199 		}
1200 		mpt_prtc(mpt, "\n");
1201 	}
1202 }
1203 
1204 static void
1205 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1206 {
1207 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1208 	int rd_bus = cam_sim_bus(mpt->sim);
1209 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1210 	u_int i;
1211 
1212 	disk_pg = &mpt_disk->config_page;
1213 	mpt_disk_prt(mpt, mpt_disk,
1214 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1215 		     device_get_nameunit(mpt->dev), rd_bus,
1216 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1217 		     pt_bus, mpt_disk - mpt->raid_disks);
1218 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1219 		return;
1220 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1221 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1222 		   ? ":" : "s:");
1223 	for (i = 0; i < 8; i++) {
1224 		u_int mask;
1225 
1226 		mask = 0x1 << i;
1227 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1228 			continue;
1229 		mpt_prtc(mpt, " %d", i);
1230 	}
1231 	mpt_prtc(mpt, "\n");
1232 }
1233 
1234 static void
1235 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1236 		      IOC_3_PHYS_DISK *ioc_disk)
1237 {
1238 	int rv;
1239 
1240 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1241 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1242 				 &mpt_disk->config_page.Header,
1243 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1244 	if (rv != 0) {
1245 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1246 			"Failed to read RAID Disk Hdr(%d)\n",
1247 		 	ioc_disk->PhysDiskNum);
1248 		return;
1249 	}
1250 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1251 				   &mpt_disk->config_page.Header,
1252 				   sizeof(mpt_disk->config_page),
1253 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1254 	if (rv != 0)
1255 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1256 			"Failed to read RAID Disk Page(%d)\n",
1257 		 	ioc_disk->PhysDiskNum);
1258 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1259 }
1260 
1261 static void
1262 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1263     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1264 {
1265 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1266 	struct mpt_raid_action_result *ar;
1267 	request_t *req;
1268 	int rv;
1269 	int i;
1270 
1271 	vol_pg = mpt_vol->config_page;
1272 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1273 
1274 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1275 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1276 	if (rv != 0) {
1277 		mpt_vol_prt(mpt, mpt_vol,
1278 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1279 		    ioc_vol->VolumePageNumber);
1280 		return;
1281 	}
1282 
1283 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1284 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1285 	if (rv != 0) {
1286 		mpt_vol_prt(mpt, mpt_vol,
1287 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1288 		    ioc_vol->VolumePageNumber);
1289 		return;
1290 	}
1291 	mpt2host_config_page_raid_vol_0(vol_pg);
1292 
1293 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1294 
1295 	/* Update disk entry array data. */
1296 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1297 		struct mpt_raid_disk *mpt_disk;
1298 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1299 		mpt_disk->volume = mpt_vol;
1300 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1301 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1302 			mpt_disk->member_number--;
1303 		}
1304 	}
1305 
1306 	if ((vol_pg->VolumeStatus.Flags
1307 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1308 		return;
1309 
1310 	req = mpt_get_request(mpt, TRUE);
1311 	if (req == NULL) {
1312 		mpt_vol_prt(mpt, mpt_vol,
1313 		    "mpt_refresh_raid_vol: Get request failed!\n");
1314 		return;
1315 	}
1316 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1317 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1318 	if (rv == ETIMEDOUT) {
1319 		mpt_vol_prt(mpt, mpt_vol,
1320 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1321 		mpt_free_request(mpt, req);
1322 		return;
1323 	}
1324 
1325 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1326 	if (rv == 0
1327 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1328 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1329 		memcpy(&mpt_vol->sync_progress,
1330 		       &ar->action_data.indicator_struct,
1331 		       sizeof(mpt_vol->sync_progress));
1332 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1333 	} else {
1334 		mpt_vol_prt(mpt, mpt_vol,
1335 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1336 	}
1337 	mpt_free_request(mpt, req);
1338 }
1339 
1340 /*
1341  * Update in-core information about RAID support.  We update any entries
1342  * that didn't previously exists or have been marked as needing to
1343  * be updated by our event handler.  Interesting changes are displayed
1344  * to the console.
1345  */
1346 static int
1347 mpt_refresh_raid_data(struct mpt_softc *mpt)
1348 {
1349 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1350 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1351 	IOC_3_PHYS_DISK *ioc_disk;
1352 	IOC_3_PHYS_DISK *ioc_last_disk;
1353 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1354 	size_t len;
1355 	int rv;
1356 	int i;
1357 	u_int nonopt_volumes;
1358 
1359 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1360 		return (0);
1361 	}
1362 
1363 	/*
1364 	 * Mark all items as unreferenced by the configuration.
1365 	 * This allows us to find, report, and discard stale
1366 	 * entries.
1367 	 */
1368 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1369 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1370 	}
1371 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1372 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1373 	}
1374 
1375 	/*
1376 	 * Get Physical Disk information.
1377 	 */
1378 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1379 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380 				   &mpt->ioc_page3->Header, len,
1381 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1382 	if (rv) {
1383 		mpt_prt(mpt,
1384 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1385 		return (-1);
1386 	}
1387 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1388 
1389 	ioc_disk = mpt->ioc_page3->PhysDisk;
1390 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1391 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1392 		struct mpt_raid_disk *mpt_disk;
1393 
1394 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1395 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1396 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1397 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1398 
1399 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1400 
1401 		}
1402 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1403 		mpt->raid_rescan++;
1404 	}
1405 
1406 	/*
1407 	 * Refresh volume data.
1408 	 */
1409 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1410 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1411 				   &mpt->ioc_page2->Header, len,
1412 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1413 	if (rv) {
1414 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1415 			"Failed to read IOC Page 2\n");
1416 		return (-1);
1417 	}
1418 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1419 
1420 	ioc_vol = mpt->ioc_page2->RaidVolume;
1421 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1422 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1423 		struct mpt_raid_volume *mpt_vol;
1424 
1425 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1426 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1427 		vol_pg = mpt_vol->config_page;
1428 		if (vol_pg == NULL)
1429 			continue;
1430 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1431 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1432 		 || (vol_pg->VolumeStatus.Flags
1433 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1434 
1435 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1436 		}
1437 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1438 	}
1439 
1440 	nonopt_volumes = 0;
1441 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1442 		struct mpt_raid_volume *mpt_vol;
1443 		uint64_t total;
1444 		uint64_t left;
1445 		int m;
1446 		u_int prio;
1447 
1448 		mpt_vol = &mpt->raid_volumes[i];
1449 
1450 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1451 			continue;
1452 		}
1453 
1454 		vol_pg = mpt_vol->config_page;
1455 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1456 		 == MPT_RVF_ANNOUNCED) {
1457 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1458 			mpt_vol->flags = 0;
1459 			continue;
1460 		}
1461 
1462 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1463 			mpt_announce_vol(mpt, mpt_vol);
1464 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1465 		}
1466 
1467 		if (vol_pg->VolumeStatus.State !=
1468 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1469 			nonopt_volumes++;
1470 
1471 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1472 			continue;
1473 
1474 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1475 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1476 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1477 		mpt_verify_mwce(mpt, mpt_vol);
1478 
1479 		if (vol_pg->VolumeStatus.Flags == 0) {
1480 			continue;
1481 		}
1482 
1483 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1484 		for (m = 1; m <= 0x80; m <<= 1) {
1485 			switch (vol_pg->VolumeStatus.Flags & m) {
1486 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1487 				mpt_prtc(mpt, " Enabled");
1488 				break;
1489 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1490 				mpt_prtc(mpt, " Quiesced");
1491 				break;
1492 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1493 				mpt_prtc(mpt, " Re-Syncing");
1494 				break;
1495 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1496 				mpt_prtc(mpt, " Inactive");
1497 				break;
1498 			default:
1499 				break;
1500 			}
1501 		}
1502 		mpt_prtc(mpt, " )\n");
1503 
1504 		if ((vol_pg->VolumeStatus.Flags
1505 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1506 			continue;
1507 
1508 		mpt_verify_resync_rate(mpt, mpt_vol);
1509 
1510 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1511 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1512 		if (vol_pg->ResyncRate != 0) {
1513 
1514 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1515 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1516 			    prio / 1000, prio % 1000);
1517 		} else {
1518 			prio = vol_pg->VolumeSettings.Settings
1519 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1520 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1521 			    prio ? "High" : "Low");
1522 		}
1523 #if __FreeBSD_version >= 500000
1524 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1525 			    "blocks remaining\n", (uintmax_t)left,
1526 			    (uintmax_t)total);
1527 #else
1528 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1529 			    "blocks remaining\n", (uint64_t)left,
1530 			    (uint64_t)total);
1531 #endif
1532 
1533 		/* Periodically report on sync progress. */
1534 		mpt_schedule_raid_refresh(mpt);
1535 	}
1536 
1537 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1538 		struct mpt_raid_disk *mpt_disk;
1539 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1540 		int m;
1541 
1542 		mpt_disk = &mpt->raid_disks[i];
1543 		disk_pg = &mpt_disk->config_page;
1544 
1545 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1546 			continue;
1547 
1548 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1549 		 == MPT_RDF_ANNOUNCED) {
1550 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1551 			mpt_disk->flags = 0;
1552 			mpt->raid_rescan++;
1553 			continue;
1554 		}
1555 
1556 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1557 
1558 			mpt_announce_disk(mpt, mpt_disk);
1559 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1560 		}
1561 
1562 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1563 			continue;
1564 
1565 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1566 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1567 		if (disk_pg->PhysDiskStatus.Flags == 0)
1568 			continue;
1569 
1570 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1571 		for (m = 1; m <= 0x80; m <<= 1) {
1572 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1573 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1574 				mpt_prtc(mpt, " Out-Of-Sync");
1575 				break;
1576 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1577 				mpt_prtc(mpt, " Quiesced");
1578 				break;
1579 			default:
1580 				break;
1581 			}
1582 		}
1583 		mpt_prtc(mpt, " )\n");
1584 	}
1585 
1586 	mpt->raid_nonopt_volumes = nonopt_volumes;
1587 	return (0);
1588 }
1589 
1590 static void
1591 mpt_raid_timer(void *arg)
1592 {
1593 	struct mpt_softc *mpt;
1594 
1595 	mpt = (struct mpt_softc *)arg;
1596 #if __FreeBSD_version < 500000
1597 	MPT_LOCK(mpt);
1598 #endif
1599 	MPT_LOCK_ASSERT(mpt);
1600 	mpt_raid_wakeup(mpt);
1601 #if __FreeBSD_version < 500000
1602 	MPT_UNLOCK(mpt);
1603 #endif
1604 }
1605 
1606 static void
1607 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1608 {
1609 
1610 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1611 		      mpt_raid_timer, mpt);
1612 }
1613 
1614 void
1615 mpt_raid_free_mem(struct mpt_softc *mpt)
1616 {
1617 
1618 	if (mpt->raid_volumes) {
1619 		struct mpt_raid_volume *mpt_raid;
1620 		int i;
1621 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1622 			mpt_raid = &mpt->raid_volumes[i];
1623 			if (mpt_raid->config_page) {
1624 				free(mpt_raid->config_page, M_DEVBUF);
1625 				mpt_raid->config_page = NULL;
1626 			}
1627 		}
1628 		free(mpt->raid_volumes, M_DEVBUF);
1629 		mpt->raid_volumes = NULL;
1630 	}
1631 	if (mpt->raid_disks) {
1632 		free(mpt->raid_disks, M_DEVBUF);
1633 		mpt->raid_disks = NULL;
1634 	}
1635 	if (mpt->ioc_page2) {
1636 		free(mpt->ioc_page2, M_DEVBUF);
1637 		mpt->ioc_page2 = NULL;
1638 	}
1639 	if (mpt->ioc_page3) {
1640 		free(mpt->ioc_page3, M_DEVBUF);
1641 		mpt->ioc_page3 = NULL;
1642 	}
1643 	mpt->raid_max_volumes =  0;
1644 	mpt->raid_max_disks =  0;
1645 }
1646 
1647 #if __FreeBSD_version >= 500000
1648 static int
1649 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1650 {
1651 	struct mpt_raid_volume *mpt_vol;
1652 
1653 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1654 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1655 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1656 		return (EINVAL);
1657 
1658 	MPT_LOCK(mpt);
1659 	mpt->raid_resync_rate = rate;
1660 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1661 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1662 			continue;
1663 		}
1664 		mpt_verify_resync_rate(mpt, mpt_vol);
1665 	}
1666 	MPT_UNLOCK(mpt);
1667 	return (0);
1668 }
1669 
1670 static int
1671 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1672 {
1673 	struct mpt_raid_volume *mpt_vol;
1674 
1675 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1676 		return (EINVAL);
1677 
1678 	MPT_LOCK(mpt);
1679 	mpt->raid_queue_depth = vol_queue_depth;
1680 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1681 		struct cam_path *path;
1682 		int error;
1683 
1684 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1685 			continue;
1686 
1687 		mpt->raid_rescan = 0;
1688 
1689 		MPTLOCK_2_CAMLOCK(mpt);
1690 		error = xpt_create_path(&path, xpt_periph,
1691 					cam_sim_path(mpt->sim),
1692 					mpt_vol->config_page->VolumeID,
1693 					/*lun*/0);
1694 		if (error != CAM_REQ_CMP) {
1695 			CAMLOCK_2_MPTLOCK(mpt);
1696 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1697 			continue;
1698 		}
1699 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1700 		xpt_free_path(path);
1701 		CAMLOCK_2_MPTLOCK(mpt);
1702 	}
1703 	MPT_UNLOCK(mpt);
1704 	return (0);
1705 }
1706 
1707 static int
1708 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1709 {
1710 	struct mpt_raid_volume *mpt_vol;
1711 	int force_full_resync;
1712 
1713 	MPT_LOCK(mpt);
1714 	if (mwce == mpt->raid_mwce_setting) {
1715 		MPT_UNLOCK(mpt);
1716 		return (0);
1717 	}
1718 
1719 	/*
1720 	 * Catch MWCE being left on due to a failed shutdown.  Since
1721 	 * sysctls cannot be set by the loader, we treat the first
1722 	 * setting of this varible specially and force a full volume
1723 	 * resync if MWCE is enabled and a resync is in progress.
1724 	 */
1725 	force_full_resync = 0;
1726 	if (mpt->raid_mwce_set == 0
1727 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1728 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1729 		force_full_resync = 1;
1730 
1731 	mpt->raid_mwce_setting = mwce;
1732 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1733 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1734 		int resyncing;
1735 		int mwce;
1736 
1737 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1738 			continue;
1739 
1740 		vol_pg = mpt_vol->config_page;
1741 		resyncing = vol_pg->VolumeStatus.Flags
1742 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1743 		mwce = vol_pg->VolumeSettings.Settings
1744 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1745 		if (force_full_resync && resyncing && mwce) {
1746 
1747 			/*
1748 			 * XXX disable/enable volume should force a resync,
1749 			 *     but we'll need to queice, drain, and restart
1750 			 *     I/O to do that.
1751 			 */
1752 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1753 				    "detected.  Suggest full resync.\n");
1754 		}
1755 		mpt_verify_mwce(mpt, mpt_vol);
1756 	}
1757 	mpt->raid_mwce_set = 1;
1758 	MPT_UNLOCK(mpt);
1759 	return (0);
1760 }
1761 
1762 static const char *mpt_vol_mwce_strs[] =
1763 {
1764 	"On",
1765 	"Off",
1766 	"On-During-Rebuild",
1767 	"NC"
1768 };
1769 
1770 static int
1771 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1772 {
1773 	char inbuf[20];
1774 	struct mpt_softc *mpt;
1775 	const char *str;
1776 	int error;
1777 	u_int size;
1778 	u_int i;
1779 
1780 	GIANT_REQUIRED;
1781 
1782 	mpt = (struct mpt_softc *)arg1;
1783 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1784 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1785 	if (error || !req->newptr) {
1786 		return (error);
1787 	}
1788 
1789 	size = req->newlen - req->newidx;
1790 	if (size >= sizeof(inbuf)) {
1791 		return (EINVAL);
1792 	}
1793 
1794 	error = SYSCTL_IN(req, inbuf, size);
1795 	if (error) {
1796 		return (error);
1797 	}
1798 	inbuf[size] = '\0';
1799 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1800 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1801 			return (mpt_raid_set_vol_mwce(mpt, i));
1802 		}
1803 	}
1804 	return (EINVAL);
1805 }
1806 
1807 static int
1808 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1809 {
1810 	struct mpt_softc *mpt;
1811 	u_int raid_resync_rate;
1812 	int error;
1813 
1814 	GIANT_REQUIRED;
1815 
1816 	mpt = (struct mpt_softc *)arg1;
1817 	raid_resync_rate = mpt->raid_resync_rate;
1818 
1819 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1820 	if (error || !req->newptr) {
1821 		return error;
1822 	}
1823 
1824 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1825 }
1826 
1827 static int
1828 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1829 {
1830 	struct mpt_softc *mpt;
1831 	u_int raid_queue_depth;
1832 	int error;
1833 
1834 	GIANT_REQUIRED;
1835 
1836 	mpt = (struct mpt_softc *)arg1;
1837 	raid_queue_depth = mpt->raid_queue_depth;
1838 
1839 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1840 	if (error || !req->newptr) {
1841 		return error;
1842 	}
1843 
1844 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1845 }
1846 
1847 static void
1848 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1849 {
1850 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1851 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1852 
1853 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1854 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1855 			mpt_raid_sysctl_vol_member_wce, "A",
1856 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1857 
1858 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1859 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1860 			mpt_raid_sysctl_vol_queue_depth, "I",
1861 			"default volume queue depth");
1862 
1863 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1864 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1865 			mpt_raid_sysctl_vol_resync_rate, "I",
1866 			"volume resync priority (0 == NC, 1 - 255)");
1867 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1868 			"nonoptimal_volumes", CTLFLAG_RD,
1869 			&mpt->raid_nonopt_volumes, 0,
1870 			"number of nonoptimal volumes");
1871 }
1872 #endif
1873