xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision ce6a89e27cd190313be39bb479880aeda4778436)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2005, WHEEL Sp. z o.o.
7  * Copyright (c) 2005 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon including
18  *    a substantially similar Disclaimer requirement for further binary
19  *    redistribution.
20  * 3. Neither the names of the above listed copyright holders nor the names
21  *    of any contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Some Breakage and Bug Fixing added later.
38  * Copyright (c) 2006, by Matthew Jacob
39  * All Rights Reserved
40  *
41  * Support from LSI-Logic has also gone a great deal toward making this a
42  * workable subsystem and is gratefully acknowledged.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <dev/mpt/mpt.h>
49 #include <dev/mpt/mpt_raid.h>
50 
51 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
52 #include "dev/mpt/mpilib/mpi_raid.h"
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 
60 #include <sys/callout.h>
61 #include <sys/kthread.h>
62 #include <sys/sysctl.h>
63 
64 #include <machine/stdarg.h>
65 
66 struct mpt_raid_action_result
67 {
68 	union {
69 		MPI_RAID_VOL_INDICATOR	indicator_struct;
70 		uint32_t		new_settings;
71 		uint8_t			phys_disk_num;
72 	} action_data;
73 	uint16_t			action_status;
74 };
75 
76 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
77 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
78 
79 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
80 
81 static mpt_probe_handler_t	mpt_raid_probe;
82 static mpt_attach_handler_t	mpt_raid_attach;
83 static mpt_enable_handler_t	mpt_raid_enable;
84 static mpt_event_handler_t	mpt_raid_event;
85 static mpt_shutdown_handler_t	mpt_raid_shutdown;
86 static mpt_reset_handler_t	mpt_raid_ioc_reset;
87 static mpt_detach_handler_t	mpt_raid_detach;
88 
89 static struct mpt_personality mpt_raid_personality =
90 {
91 	.name		= "mpt_raid",
92 	.probe		= mpt_raid_probe,
93 	.attach		= mpt_raid_attach,
94 	.enable		= mpt_raid_enable,
95 	.event		= mpt_raid_event,
96 	.reset		= mpt_raid_ioc_reset,
97 	.shutdown	= mpt_raid_shutdown,
98 	.detach		= mpt_raid_detach,
99 };
100 
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
103 
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 					MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static callout_func_t mpt_raid_timer;
111 #if 0
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 			   struct mpt_raid_volume *mpt_vol, int enable);
114 #endif
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
117     struct cam_path *);
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
119 
120 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
121 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
122 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
123 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
124     const char *fmt, ...);
125 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
126     const char *fmt, ...);
127 
128 static int mpt_issue_raid_req(struct mpt_softc *mpt,
129     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
130     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
131     int write, int wait);
132 
133 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
134 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
135 
136 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
137 
138 static const char *
139 mpt_vol_type(struct mpt_raid_volume *vol)
140 {
141 	switch (vol->config_page->VolumeType) {
142 	case MPI_RAID_VOL_TYPE_IS:
143 		return ("RAID-0");
144 	case MPI_RAID_VOL_TYPE_IME:
145 		return ("RAID-1E");
146 	case MPI_RAID_VOL_TYPE_IM:
147 		return ("RAID-1");
148 	default:
149 		return ("Unknown");
150 	}
151 }
152 
153 static const char *
154 mpt_vol_state(struct mpt_raid_volume *vol)
155 {
156 	switch (vol->config_page->VolumeStatus.State) {
157 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
158 		return ("Optimal");
159 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
160 		return ("Degraded");
161 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
162 		return ("Failed");
163 	default:
164 		return ("Unknown");
165 	}
166 }
167 
168 static const char *
169 mpt_disk_state(struct mpt_raid_disk *disk)
170 {
171 	switch (disk->config_page.PhysDiskStatus.State) {
172 	case MPI_PHYSDISK0_STATUS_ONLINE:
173 		return ("Online");
174 	case MPI_PHYSDISK0_STATUS_MISSING:
175 		return ("Missing");
176 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
177 		return ("Incompatible");
178 	case MPI_PHYSDISK0_STATUS_FAILED:
179 		return ("Failed");
180 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
181 		return ("Initializing");
182 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
183 		return ("Offline Requested");
184 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
185 		return ("Failed per Host Request");
186 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
187 		return ("Offline");
188 	default:
189 		return ("Unknown");
190 	}
191 }
192 
193 static void
194 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
195 	    const char *fmt, ...)
196 {
197 	va_list ap;
198 
199 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
200 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
201 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
202 	va_start(ap, fmt);
203 	vprintf(fmt, ap);
204 	va_end(ap);
205 }
206 
207 static void
208 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
209 	     const char *fmt, ...)
210 {
211 	va_list ap;
212 
213 	if (disk->volume != NULL) {
214 		printf("(%s:vol%d:%d): ",
215 		       device_get_nameunit(mpt->dev),
216 		       disk->volume->config_page->VolumeID,
217 		       disk->member_number);
218 	} else {
219 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
220 		       disk->config_page.PhysDiskBus,
221 		       disk->config_page.PhysDiskID);
222 	}
223 	va_start(ap, fmt);
224 	vprintf(fmt, ap);
225 	va_end(ap);
226 }
227 
228 static void
229 mpt_raid_async(void *callback_arg, u_int32_t code,
230 	       struct cam_path *path, void *arg)
231 {
232 	struct mpt_softc *mpt;
233 
234 	mpt = (struct mpt_softc*)callback_arg;
235 	switch (code) {
236 	case AC_FOUND_DEVICE:
237 	{
238 		struct ccb_getdev *cgd;
239 		struct mpt_raid_volume *mpt_vol;
240 
241 		cgd = (struct ccb_getdev *)arg;
242 		if (cgd == NULL) {
243 			break;
244 		}
245 
246 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
247 			 cgd->ccb_h.target_id);
248 
249 		RAID_VOL_FOREACH(mpt, mpt_vol) {
250 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
251 				continue;
252 
253 			if (mpt_vol->config_page->VolumeID
254 			 == cgd->ccb_h.target_id) {
255 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
256 				break;
257 			}
258 		}
259 	}
260 	default:
261 		break;
262 	}
263 }
264 
265 static int
266 mpt_raid_probe(struct mpt_softc *mpt)
267 {
268 
269 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
270 		return (ENODEV);
271 	}
272 	return (0);
273 }
274 
275 static int
276 mpt_raid_attach(struct mpt_softc *mpt)
277 {
278 	struct ccb_setasync csa;
279 	mpt_handler_t	 handler;
280 	int		 error;
281 
282 	mpt_callout_init(mpt, &mpt->raid_timer);
283 
284 	error = mpt_spawn_raid_thread(mpt);
285 	if (error != 0) {
286 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
287 		goto cleanup;
288 	}
289 
290 	MPT_LOCK(mpt);
291 	handler.reply_handler = mpt_raid_reply_handler;
292 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
293 				     &raid_handler_id);
294 	if (error != 0) {
295 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
296 		goto cleanup;
297 	}
298 
299 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
300 	csa.ccb_h.func_code = XPT_SASYNC_CB;
301 	csa.event_enable = AC_FOUND_DEVICE;
302 	csa.callback = mpt_raid_async;
303 	csa.callback_arg = mpt;
304 	xpt_action((union ccb *)&csa);
305 	if (csa.ccb_h.status != CAM_REQ_CMP) {
306 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
307 			"CAM async handler.\n");
308 	}
309 	MPT_UNLOCK(mpt);
310 
311 	mpt_raid_sysctl_attach(mpt);
312 	return (0);
313 cleanup:
314 	MPT_UNLOCK(mpt);
315 	mpt_raid_detach(mpt);
316 	return (error);
317 }
318 
319 static int
320 mpt_raid_enable(struct mpt_softc *mpt)
321 {
322 
323 	return (0);
324 }
325 
326 static void
327 mpt_raid_detach(struct mpt_softc *mpt)
328 {
329 	struct ccb_setasync csa;
330 	mpt_handler_t handler;
331 
332 	mpt_callout_drain(mpt, &mpt->raid_timer);
333 
334 	MPT_LOCK(mpt);
335 	mpt_terminate_raid_thread(mpt);
336 	handler.reply_handler = mpt_raid_reply_handler;
337 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
338 			       raid_handler_id);
339 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
340 	csa.ccb_h.func_code = XPT_SASYNC_CB;
341 	csa.event_enable = 0;
342 	csa.callback = mpt_raid_async;
343 	csa.callback_arg = mpt;
344 	xpt_action((union ccb *)&csa);
345 	MPT_UNLOCK(mpt);
346 }
347 
348 static void
349 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
350 {
351 
352 	/* Nothing to do yet. */
353 }
354 
355 static const char *raid_event_txt[] =
356 {
357 	"Volume Created",
358 	"Volume Deleted",
359 	"Volume Settings Changed",
360 	"Volume Status Changed",
361 	"Volume Physical Disk Membership Changed",
362 	"Physical Disk Created",
363 	"Physical Disk Deleted",
364 	"Physical Disk Settings Changed",
365 	"Physical Disk Status Changed",
366 	"Domain Validation Required",
367 	"SMART Data Received",
368 	"Replace Action Started",
369 };
370 
371 static int
372 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
373 	       MSG_EVENT_NOTIFY_REPLY *msg)
374 {
375 	EVENT_DATA_RAID *raid_event;
376 	struct mpt_raid_volume *mpt_vol;
377 	struct mpt_raid_disk *mpt_disk;
378 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
379 	int i;
380 	int print_event;
381 
382 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
383 		return (0);
384 	}
385 
386 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
387 
388 	mpt_vol = NULL;
389 	vol_pg = NULL;
390 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
391 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
392 			mpt_vol = &mpt->raid_volumes[i];
393 			vol_pg = mpt_vol->config_page;
394 
395 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
396 				continue;
397 
398 			if (vol_pg->VolumeID == raid_event->VolumeID
399 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
400 				break;
401 		}
402 		if (i >= mpt->ioc_page2->MaxVolumes) {
403 			mpt_vol = NULL;
404 			vol_pg = NULL;
405 		}
406 	}
407 
408 	mpt_disk = NULL;
409 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
410 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
411 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
412 			mpt_disk = NULL;
413 		}
414 	}
415 
416 	print_event = 1;
417 	switch(raid_event->ReasonCode) {
418 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
419 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
420 		break;
421 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
422 		if (mpt_vol != NULL) {
423 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
424 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
425 			} else {
426 				/*
427 				 * Coalesce status messages into one
428 				 * per background run of our RAID thread.
429 				 * This removes "spurious" status messages
430 				 * from our output.
431 				 */
432 				print_event = 0;
433 			}
434 		}
435 		break;
436 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
437 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
438 		mpt->raid_rescan++;
439 		if (mpt_vol != NULL) {
440 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
444 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
445 		mpt->raid_rescan++;
446 		break;
447 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
448 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
449 		mpt->raid_rescan++;
450 		if (mpt_disk != NULL) {
451 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
452 		}
453 		break;
454 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
455 		mpt->raid_rescan++;
456 		break;
457 	case MPI_EVENT_RAID_RC_SMART_DATA:
458 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
459 		break;
460 	}
461 
462 	if (print_event) {
463 		if (mpt_disk != NULL) {
464 			mpt_disk_prt(mpt, mpt_disk, "");
465 		} else if (mpt_vol != NULL) {
466 			mpt_vol_prt(mpt, mpt_vol, "");
467 		} else {
468 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
469 				raid_event->VolumeID);
470 
471 			if (raid_event->PhysDiskNum != 0xFF)
472 				mpt_prtc(mpt, ":%d): ",
473 					 raid_event->PhysDiskNum);
474 			else
475 				mpt_prtc(mpt, "): ");
476 		}
477 
478 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
479 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
480 				 raid_event->ReasonCode);
481 		else
482 			mpt_prtc(mpt, "%s\n",
483 				 raid_event_txt[raid_event->ReasonCode]);
484 	}
485 
486 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
487 		/* XXX Use CAM's print sense for this... */
488 		if (mpt_disk != NULL)
489 			mpt_disk_prt(mpt, mpt_disk, "");
490 		else
491 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
492 			    raid_event->VolumeBus, raid_event->VolumeID,
493 			    raid_event->PhysDiskNum);
494 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
495 			 raid_event->ASC, raid_event->ASCQ);
496 	}
497 
498 	mpt_raid_wakeup(mpt);
499 	return (1);
500 }
501 
502 static void
503 mpt_raid_shutdown(struct mpt_softc *mpt)
504 {
505 	struct mpt_raid_volume *mpt_vol;
506 
507 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
508 		return;
509 	}
510 
511 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
512 	RAID_VOL_FOREACH(mpt, mpt_vol) {
513 		mpt_verify_mwce(mpt, mpt_vol);
514 	}
515 }
516 
517 static int
518 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
519     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
520 {
521 	int free_req;
522 
523 	if (req == NULL)
524 		return (TRUE);
525 
526 	free_req = TRUE;
527 	if (reply_frame != NULL)
528 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
529 #ifdef NOTYET
530 	else if (req->ccb != NULL) {
531 		/* Complete Quiesce CCB with error... */
532 	}
533 #endif
534 
535 	req->state &= ~REQ_STATE_QUEUED;
536 	req->state |= REQ_STATE_DONE;
537 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
538 
539 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
540 		wakeup(req);
541 	} else if (free_req) {
542 		mpt_free_request(mpt, req);
543 	}
544 
545 	return (TRUE);
546 }
547 
548 /*
549  * Parse additional completion information in the reply
550  * frame for RAID I/O requests.
551  */
552 static int
553 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
554     MSG_DEFAULT_REPLY *reply_frame)
555 {
556 	MSG_RAID_ACTION_REPLY *reply;
557 	struct mpt_raid_action_result *action_result;
558 	MSG_RAID_ACTION_REQUEST *rap;
559 
560 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
561 	req->IOCStatus = le16toh(reply->IOCStatus);
562 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
563 
564 	switch (rap->Action) {
565 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
566 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
567 		break;
568 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
569 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
570 		break;
571 	default:
572 		break;
573 	}
574 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
575 	memcpy(&action_result->action_data, &reply->ActionData,
576 	    sizeof(action_result->action_data));
577 	action_result->action_status = le16toh(reply->ActionStatus);
578 	return (TRUE);
579 }
580 
581 /*
582  * Utiltity routine to perform a RAID action command;
583  */
584 static int
585 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
586 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
587 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
588 		   int write, int wait)
589 {
590 	MSG_RAID_ACTION_REQUEST *rap;
591 	SGE_SIMPLE32 *se;
592 
593 	rap = req->req_vbuf;
594 	memset(rap, 0, sizeof *rap);
595 	rap->Action = Action;
596 	rap->ActionDataWord = htole32(ActionDataWord);
597 	rap->Function = MPI_FUNCTION_RAID_ACTION;
598 	rap->VolumeID = vol->config_page->VolumeID;
599 	rap->VolumeBus = vol->config_page->VolumeBus;
600 	if (disk != NULL)
601 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
602 	else
603 		rap->PhysDiskNum = 0xFF;
604 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
605 	se->Address = htole32(addr);
606 	MPI_pSGE_SET_LENGTH(se, len);
607 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
608 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
609 	    MPI_SGE_FLAGS_END_OF_LIST |
610 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
611 	se->FlagsLength = htole32(se->FlagsLength);
612 	rap->MsgContext = htole32(req->index | raid_handler_id);
613 
614 	mpt_check_doorbell(mpt);
615 	mpt_send_cmd(mpt, req);
616 
617 	if (wait) {
618 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
619 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
620 	} else {
621 		return (0);
622 	}
623 }
624 
625 /*************************** RAID Status Monitoring ***************************/
626 static int
627 mpt_spawn_raid_thread(struct mpt_softc *mpt)
628 {
629 	int error;
630 
631 	/*
632 	 * Freeze out any CAM transactions until our thread
633 	 * is able to run at least once.  We need to update
634 	 * our RAID pages before acception I/O or we may
635 	 * reject I/O to an ID we later determine is for a
636 	 * hidden physdisk.
637 	 */
638 	MPT_LOCK(mpt);
639 	xpt_freeze_simq(mpt->phydisk_sim, 1);
640 	MPT_UNLOCK(mpt);
641 	error = kproc_create(mpt_raid_thread, mpt,
642 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
643 	    "mpt_raid%d", mpt->unit);
644 	if (error != 0) {
645 		MPT_LOCK(mpt);
646 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
647 		MPT_UNLOCK(mpt);
648 	}
649 	return (error);
650 }
651 
652 static void
653 mpt_terminate_raid_thread(struct mpt_softc *mpt)
654 {
655 
656 	if (mpt->raid_thread == NULL) {
657 		return;
658 	}
659 	mpt->shutdwn_raid = 1;
660 	wakeup(&mpt->raid_volumes);
661 	/*
662 	 * Sleep on a slightly different location
663 	 * for this interlock just for added safety.
664 	 */
665 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
666 }
667 
668 static void
669 mpt_raid_thread(void *arg)
670 {
671 	struct mpt_softc *mpt;
672 	int firstrun;
673 
674 	mpt = (struct mpt_softc *)arg;
675 	firstrun = 1;
676 	MPT_LOCK(mpt);
677 	while (mpt->shutdwn_raid == 0) {
678 
679 		if (mpt->raid_wakeup == 0) {
680 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
681 			continue;
682 		}
683 
684 		mpt->raid_wakeup = 0;
685 
686 		if (mpt_refresh_raid_data(mpt)) {
687 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
688 			continue;
689 		}
690 
691 		/*
692 		 * Now that we have our first snapshot of RAID data,
693 		 * allow CAM to access our physical disk bus.
694 		 */
695 		if (firstrun) {
696 			firstrun = 0;
697 			xpt_release_simq(mpt->phydisk_sim, TRUE);
698 		}
699 
700 		if (mpt->raid_rescan != 0) {
701 			union ccb *ccb;
702 			int error;
703 
704 			mpt->raid_rescan = 0;
705 			MPT_UNLOCK(mpt);
706 
707 			ccb = xpt_alloc_ccb();
708 
709 			MPT_LOCK(mpt);
710 			error = xpt_create_path(&ccb->ccb_h.path, NULL,
711 			    cam_sim_path(mpt->phydisk_sim),
712 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
713 			if (error != CAM_REQ_CMP) {
714 				xpt_free_ccb(ccb);
715 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
716 			} else {
717 				xpt_rescan(ccb);
718 			}
719 		}
720 	}
721 	mpt->raid_thread = NULL;
722 	wakeup(&mpt->raid_thread);
723 	MPT_UNLOCK(mpt);
724 	kproc_exit(0);
725 }
726 
727 #if 0
728 static void
729 mpt_raid_quiesce_timeout(void *arg)
730 {
731 
732 	/* Complete the CCB with error */
733 	/* COWWWW */
734 }
735 
736 static timeout_t mpt_raid_quiesce_timeout;
737 cam_status
738 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
739 		      request_t *req)
740 {
741 	union ccb *ccb;
742 
743 	ccb = req->ccb;
744 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
745 		return (CAM_REQ_CMP);
746 
747 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
748 		int rv;
749 
750 		mpt_disk->flags |= MPT_RDF_QUIESCING;
751 		xpt_freeze_devq(ccb->ccb_h.path, 1);
752 
753 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
754 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
755 					/*ActionData*/0, /*addr*/0,
756 					/*len*/0, /*write*/FALSE,
757 					/*wait*/FALSE);
758 		if (rv != 0)
759 			return (CAM_REQ_CMP_ERR);
760 
761 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
762 #if 0
763 		if (rv == ETIMEDOUT) {
764 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
765 				     "Quiece Timed-out\n");
766 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
767 			return (CAM_REQ_CMP_ERR);
768 		}
769 
770 		ar = REQ_TO_RAID_ACTION_RESULT(req);
771 		if (rv != 0
772 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
773 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
774 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
775 				    "%d:%x:%x\n", rv, req->IOCStatus,
776 				    ar->action_status);
777 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
778 			return (CAM_REQ_CMP_ERR);
779 		}
780 #endif
781 		return (CAM_REQ_INPROG);
782 	}
783 	return (CAM_REQUEUE_REQ);
784 }
785 #endif
786 
787 /* XXX Ignores that there may be multiple buses/IOCs involved. */
788 cam_status
789 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
790 {
791 	struct mpt_raid_disk *mpt_disk;
792 
793 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
794 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
795 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
796 		*tgt = mpt_disk->config_page.PhysDiskID;
797 		return (0);
798 	}
799 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800 		 ccb->ccb_h.target_id);
801 	return (-1);
802 }
803 
804 /* XXX Ignores that there may be multiple buses/IOCs involved. */
805 int
806 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
807 {
808 	struct mpt_raid_disk *mpt_disk;
809 	int i;
810 
811 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
812 		return (0);
813 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
814 		mpt_disk = &mpt->raid_disks[i];
815 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
816 		    mpt_disk->config_page.PhysDiskID == tgt)
817 			return (1);
818 	}
819 	return (0);
820 
821 }
822 
823 /* XXX Ignores that there may be multiple buses/IOCs involved. */
824 int
825 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
826 {
827 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
828 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
829 
830 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
831 		return (0);
832 	}
833 	ioc_vol = mpt->ioc_page2->RaidVolume;
834 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
835 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
836 		if (ioc_vol->VolumeID == tgt) {
837 			return (1);
838 		}
839 	}
840 	return (0);
841 }
842 
843 #if 0
844 static void
845 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
846 	       int enable)
847 {
848 	request_t *req;
849 	struct mpt_raid_action_result *ar;
850 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
851 	int enabled;
852 	int rv;
853 
854 	vol_pg = mpt_vol->config_page;
855 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
856 
857 	/*
858 	 * If the setting matches the configuration,
859 	 * there is nothing to do.
860 	 */
861 	if ((enabled && enable)
862 	 || (!enabled && !enable))
863 		return;
864 
865 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
866 	if (req == NULL) {
867 		mpt_vol_prt(mpt, mpt_vol,
868 			    "mpt_enable_vol: Get request failed!\n");
869 		return;
870 	}
871 
872 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
873 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
874 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
875 				/*data*/0, /*addr*/0, /*len*/0,
876 				/*write*/FALSE, /*wait*/TRUE);
877 	if (rv == ETIMEDOUT) {
878 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
879 			    "%s Volume Timed-out\n",
880 			    enable ? "Enable" : "Disable");
881 		return;
882 	}
883 	ar = REQ_TO_RAID_ACTION_RESULT(req);
884 	if (rv != 0
885 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
886 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
887 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
888 			    enable ? "Enable" : "Disable",
889 			    rv, req->IOCStatus, ar->action_status);
890 	}
891 
892 	mpt_free_request(mpt, req);
893 }
894 #endif
895 
896 static void
897 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
898 {
899 	request_t *req;
900 	struct mpt_raid_action_result *ar;
901 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
902 	uint32_t data;
903 	int rv;
904 	int resyncing;
905 	int mwce;
906 
907 	vol_pg = mpt_vol->config_page;
908 	resyncing = vol_pg->VolumeStatus.Flags
909 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
910 	mwce = vol_pg->VolumeSettings.Settings
911 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
912 
913 	/*
914 	 * If the setting matches the configuration,
915 	 * there is nothing to do.
916 	 */
917 	switch (mpt->raid_mwce_setting) {
918 	case MPT_RAID_MWCE_REBUILD_ONLY:
919 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
920 			return;
921 		}
922 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
923 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
924 			/*
925 			 * Wait one more status update to see if
926 			 * resyncing gets enabled.  It gets disabled
927 			 * temporarilly when WCE is changed.
928 			 */
929 			return;
930 		}
931 		break;
932 	case MPT_RAID_MWCE_ON:
933 		if (mwce)
934 			return;
935 		break;
936 	case MPT_RAID_MWCE_OFF:
937 		if (!mwce)
938 			return;
939 		break;
940 	case MPT_RAID_MWCE_NC:
941 		return;
942 	}
943 
944 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
945 	if (req == NULL) {
946 		mpt_vol_prt(mpt, mpt_vol,
947 			    "mpt_verify_mwce: Get request failed!\n");
948 		return;
949 	}
950 
951 	vol_pg->VolumeSettings.Settings ^=
952 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
953 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
954 	vol_pg->VolumeSettings.Settings ^=
955 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
956 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
957 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
958 				data, /*addr*/0, /*len*/0,
959 				/*write*/FALSE, /*wait*/TRUE);
960 	if (rv == ETIMEDOUT) {
961 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
962 			    "Write Cache Enable Timed-out\n");
963 		return;
964 	}
965 	ar = REQ_TO_RAID_ACTION_RESULT(req);
966 	if (rv != 0
967 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
968 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
969 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
970 			    "%d:%x:%x\n", rv, req->IOCStatus,
971 			    ar->action_status);
972 	} else {
973 		vol_pg->VolumeSettings.Settings ^=
974 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
975 	}
976 	mpt_free_request(mpt, req);
977 }
978 
979 static void
980 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
981 {
982 	request_t *req;
983 	struct mpt_raid_action_result *ar;
984 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
985 	u_int prio;
986 	int rv;
987 
988 	vol_pg = mpt_vol->config_page;
989 
990 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
991 		return;
992 
993 	/*
994 	 * If the current RAID resync rate does not
995 	 * match our configured rate, update it.
996 	 */
997 	prio = vol_pg->VolumeSettings.Settings
998 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
999 	if (vol_pg->ResyncRate != 0
1000 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1001 
1002 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1003 		if (req == NULL) {
1004 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1005 				    "Get request failed!\n");
1006 			return;
1007 		}
1008 
1009 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1010 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1011 					mpt->raid_resync_rate, /*addr*/0,
1012 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1013 		if (rv == ETIMEDOUT) {
1014 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1015 				    "Resync Rate Setting Timed-out\n");
1016 			return;
1017 		}
1018 
1019 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1020 		if (rv != 0
1021 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1022 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1023 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1024 				    "%d:%x:%x\n", rv, req->IOCStatus,
1025 				    ar->action_status);
1026 		} else
1027 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1028 		mpt_free_request(mpt, req);
1029 	} else if ((prio && mpt->raid_resync_rate < 128)
1030 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1031 		uint32_t data;
1032 
1033 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1034 		if (req == NULL) {
1035 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1036 				    "Get request failed!\n");
1037 			return;
1038 		}
1039 
1040 		vol_pg->VolumeSettings.Settings ^=
1041 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1043 		vol_pg->VolumeSettings.Settings ^=
1044 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1045 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1046 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1047 					data, /*addr*/0, /*len*/0,
1048 					/*write*/FALSE, /*wait*/TRUE);
1049 		if (rv == ETIMEDOUT) {
1050 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1051 				    "Resync Rate Setting Timed-out\n");
1052 			return;
1053 		}
1054 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1055 		if (rv != 0
1056 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1057 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1058 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1059 				    "%d:%x:%x\n", rv, req->IOCStatus,
1060 				    ar->action_status);
1061 		} else {
1062 			vol_pg->VolumeSettings.Settings ^=
1063 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1064 		}
1065 
1066 		mpt_free_request(mpt, req);
1067 	}
1068 }
1069 
1070 static void
1071 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1072 		       struct cam_path *path)
1073 {
1074 	struct ccb_relsim crs;
1075 
1076 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1077 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1078 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1079 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1080 	crs.openings = mpt->raid_queue_depth;
1081 	xpt_action((union ccb *)&crs);
1082 	if (crs.ccb_h.status != CAM_REQ_CMP)
1083 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1084 			    "with CAM status %#x\n", crs.ccb_h.status);
1085 }
1086 
1087 static void
1088 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1089 {
1090 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1091 	u_int i;
1092 
1093 	vol_pg = mpt_vol->config_page;
1094 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1095 	for (i = 1; i <= 0x8000; i <<= 1) {
1096 		switch (vol_pg->VolumeSettings.Settings & i) {
1097 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1098 			mpt_prtc(mpt, " Member-WCE");
1099 			break;
1100 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1101 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1102 			break;
1103 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1104 			mpt_prtc(mpt, " Hot-Plug-Spares");
1105 			break;
1106 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1107 			mpt_prtc(mpt, " High-Priority-ReSync");
1108 			break;
1109 		default:
1110 			break;
1111 		}
1112 	}
1113 	mpt_prtc(mpt, " )\n");
1114 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1115 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1116 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1117 			  ? ":" : "s:");
1118 		for (i = 0; i < 8; i++) {
1119 			u_int mask;
1120 
1121 			mask = 0x1 << i;
1122 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1123 				continue;
1124 			mpt_prtc(mpt, " %d", i);
1125 		}
1126 		mpt_prtc(mpt, "\n");
1127 	}
1128 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1129 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1130 		struct mpt_raid_disk *mpt_disk;
1131 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1132 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1133 		U8 f, s;
1134 
1135 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1136 		disk_pg = &mpt_disk->config_page;
1137 		mpt_prtc(mpt, "      ");
1138 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1139 			 pt_bus, disk_pg->PhysDiskID);
1140 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1141 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1142 			    "Primary" : "Secondary");
1143 		} else {
1144 			mpt_prtc(mpt, "Stripe Position %d",
1145 				 mpt_disk->member_number);
1146 		}
1147 		f = disk_pg->PhysDiskStatus.Flags;
1148 		s = disk_pg->PhysDiskStatus.State;
1149 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1150 			mpt_prtc(mpt, " Out of Sync");
1151 		}
1152 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1153 			mpt_prtc(mpt, " Quiesced");
1154 		}
1155 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1156 			mpt_prtc(mpt, " Inactive");
1157 		}
1158 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1159 			mpt_prtc(mpt, " Was Optimal");
1160 		}
1161 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1162 			mpt_prtc(mpt, " Was Non-Optimal");
1163 		}
1164 		switch (s) {
1165 		case MPI_PHYSDISK0_STATUS_ONLINE:
1166 			mpt_prtc(mpt, " Online");
1167 			break;
1168 		case MPI_PHYSDISK0_STATUS_MISSING:
1169 			mpt_prtc(mpt, " Missing");
1170 			break;
1171 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1172 			mpt_prtc(mpt, " Incompatible");
1173 			break;
1174 		case MPI_PHYSDISK0_STATUS_FAILED:
1175 			mpt_prtc(mpt, " Failed");
1176 			break;
1177 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1178 			mpt_prtc(mpt, " Initializing");
1179 			break;
1180 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1181 			mpt_prtc(mpt, " Requested Offline");
1182 			break;
1183 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1184 			mpt_prtc(mpt, " Requested Failed");
1185 			break;
1186 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1187 		default:
1188 			mpt_prtc(mpt, " Offline Other (%x)", s);
1189 			break;
1190 		}
1191 		mpt_prtc(mpt, "\n");
1192 	}
1193 }
1194 
1195 static void
1196 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1197 {
1198 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1199 	int rd_bus = cam_sim_bus(mpt->sim);
1200 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1201 	u_int i;
1202 
1203 	disk_pg = &mpt_disk->config_page;
1204 	mpt_disk_prt(mpt, mpt_disk,
1205 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1206 		     device_get_nameunit(mpt->dev), rd_bus,
1207 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1208 		     pt_bus, mpt_disk - mpt->raid_disks);
1209 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1210 		return;
1211 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1212 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1213 		   ? ":" : "s:");
1214 	for (i = 0; i < 8; i++) {
1215 		u_int mask;
1216 
1217 		mask = 0x1 << i;
1218 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1219 			continue;
1220 		mpt_prtc(mpt, " %d", i);
1221 	}
1222 	mpt_prtc(mpt, "\n");
1223 }
1224 
1225 static void
1226 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1227 		      IOC_3_PHYS_DISK *ioc_disk)
1228 {
1229 	int rv;
1230 
1231 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1232 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1233 				 &mpt_disk->config_page.Header,
1234 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1235 	if (rv != 0) {
1236 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1237 			"Failed to read RAID Disk Hdr(%d)\n",
1238 		 	ioc_disk->PhysDiskNum);
1239 		return;
1240 	}
1241 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1242 				   &mpt_disk->config_page.Header,
1243 				   sizeof(mpt_disk->config_page),
1244 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1245 	if (rv != 0)
1246 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1247 			"Failed to read RAID Disk Page(%d)\n",
1248 		 	ioc_disk->PhysDiskNum);
1249 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1250 }
1251 
1252 static void
1253 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1254     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1255 {
1256 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1257 	struct mpt_raid_action_result *ar;
1258 	request_t *req;
1259 	int rv;
1260 	int i;
1261 
1262 	vol_pg = mpt_vol->config_page;
1263 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1264 
1265 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1266 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1267 	if (rv != 0) {
1268 		mpt_vol_prt(mpt, mpt_vol,
1269 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1270 		    ioc_vol->VolumePageNumber);
1271 		return;
1272 	}
1273 
1274 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1275 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1276 	if (rv != 0) {
1277 		mpt_vol_prt(mpt, mpt_vol,
1278 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1279 		    ioc_vol->VolumePageNumber);
1280 		return;
1281 	}
1282 	mpt2host_config_page_raid_vol_0(vol_pg);
1283 
1284 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1285 
1286 	/* Update disk entry array data. */
1287 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1288 		struct mpt_raid_disk *mpt_disk;
1289 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1290 		mpt_disk->volume = mpt_vol;
1291 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1292 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1293 			mpt_disk->member_number--;
1294 		}
1295 	}
1296 
1297 	if ((vol_pg->VolumeStatus.Flags
1298 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1299 		return;
1300 
1301 	req = mpt_get_request(mpt, TRUE);
1302 	if (req == NULL) {
1303 		mpt_vol_prt(mpt, mpt_vol,
1304 		    "mpt_refresh_raid_vol: Get request failed!\n");
1305 		return;
1306 	}
1307 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1308 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1309 	if (rv == ETIMEDOUT) {
1310 		mpt_vol_prt(mpt, mpt_vol,
1311 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1312 		mpt_free_request(mpt, req);
1313 		return;
1314 	}
1315 
1316 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1317 	if (rv == 0
1318 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1319 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1320 		memcpy(&mpt_vol->sync_progress,
1321 		       &ar->action_data.indicator_struct,
1322 		       sizeof(mpt_vol->sync_progress));
1323 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1324 	} else {
1325 		mpt_vol_prt(mpt, mpt_vol,
1326 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1327 	}
1328 	mpt_free_request(mpt, req);
1329 }
1330 
1331 /*
1332  * Update in-core information about RAID support.  We update any entries
1333  * that didn't previously exists or have been marked as needing to
1334  * be updated by our event handler.  Interesting changes are displayed
1335  * to the console.
1336  */
1337 static int
1338 mpt_refresh_raid_data(struct mpt_softc *mpt)
1339 {
1340 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1341 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1342 	IOC_3_PHYS_DISK *ioc_disk;
1343 	IOC_3_PHYS_DISK *ioc_last_disk;
1344 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1345 	size_t len;
1346 	int rv;
1347 	int i;
1348 	u_int nonopt_volumes;
1349 
1350 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1351 		return (0);
1352 	}
1353 
1354 	/*
1355 	 * Mark all items as unreferenced by the configuration.
1356 	 * This allows us to find, report, and discard stale
1357 	 * entries.
1358 	 */
1359 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1360 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1361 	}
1362 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1363 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1364 	}
1365 
1366 	/*
1367 	 * Get Physical Disk information.
1368 	 */
1369 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1370 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1371 				   &mpt->ioc_page3->Header, len,
1372 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1373 	if (rv) {
1374 		mpt_prt(mpt,
1375 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1376 		return (-1);
1377 	}
1378 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1379 
1380 	ioc_disk = mpt->ioc_page3->PhysDisk;
1381 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1382 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1383 		struct mpt_raid_disk *mpt_disk;
1384 
1385 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1386 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1387 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1388 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1389 
1390 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1391 
1392 		}
1393 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1394 		mpt->raid_rescan++;
1395 	}
1396 
1397 	/*
1398 	 * Refresh volume data.
1399 	 */
1400 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1401 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1402 				   &mpt->ioc_page2->Header, len,
1403 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1404 	if (rv) {
1405 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1406 			"Failed to read IOC Page 2\n");
1407 		return (-1);
1408 	}
1409 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1410 
1411 	ioc_vol = mpt->ioc_page2->RaidVolume;
1412 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1413 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1414 		struct mpt_raid_volume *mpt_vol;
1415 
1416 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1417 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1418 		vol_pg = mpt_vol->config_page;
1419 		if (vol_pg == NULL)
1420 			continue;
1421 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1422 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1423 		 || (vol_pg->VolumeStatus.Flags
1424 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1425 
1426 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1427 		}
1428 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1429 	}
1430 
1431 	nonopt_volumes = 0;
1432 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1433 		struct mpt_raid_volume *mpt_vol;
1434 		uint64_t total;
1435 		uint64_t left;
1436 		int m;
1437 		u_int prio;
1438 
1439 		mpt_vol = &mpt->raid_volumes[i];
1440 
1441 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1442 			continue;
1443 		}
1444 
1445 		vol_pg = mpt_vol->config_page;
1446 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1447 		 == MPT_RVF_ANNOUNCED) {
1448 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1449 			mpt_vol->flags = 0;
1450 			continue;
1451 		}
1452 
1453 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1454 			mpt_announce_vol(mpt, mpt_vol);
1455 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1456 		}
1457 
1458 		if (vol_pg->VolumeStatus.State !=
1459 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1460 			nonopt_volumes++;
1461 
1462 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1463 			continue;
1464 
1465 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1466 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1467 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1468 		mpt_verify_mwce(mpt, mpt_vol);
1469 
1470 		if (vol_pg->VolumeStatus.Flags == 0) {
1471 			continue;
1472 		}
1473 
1474 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1475 		for (m = 1; m <= 0x80; m <<= 1) {
1476 			switch (vol_pg->VolumeStatus.Flags & m) {
1477 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1478 				mpt_prtc(mpt, " Enabled");
1479 				break;
1480 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1481 				mpt_prtc(mpt, " Quiesced");
1482 				break;
1483 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1484 				mpt_prtc(mpt, " Re-Syncing");
1485 				break;
1486 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1487 				mpt_prtc(mpt, " Inactive");
1488 				break;
1489 			default:
1490 				break;
1491 			}
1492 		}
1493 		mpt_prtc(mpt, " )\n");
1494 
1495 		if ((vol_pg->VolumeStatus.Flags
1496 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1497 			continue;
1498 
1499 		mpt_verify_resync_rate(mpt, mpt_vol);
1500 
1501 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1502 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1503 		if (vol_pg->ResyncRate != 0) {
1504 
1505 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1506 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1507 			    prio / 1000, prio % 1000);
1508 		} else {
1509 			prio = vol_pg->VolumeSettings.Settings
1510 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1511 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1512 			    prio ? "High" : "Low");
1513 		}
1514 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1515 			    "blocks remaining\n", (uintmax_t)left,
1516 			    (uintmax_t)total);
1517 
1518 		/* Periodically report on sync progress. */
1519 		mpt_schedule_raid_refresh(mpt);
1520 	}
1521 
1522 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1523 		struct mpt_raid_disk *mpt_disk;
1524 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1525 		int m;
1526 
1527 		mpt_disk = &mpt->raid_disks[i];
1528 		disk_pg = &mpt_disk->config_page;
1529 
1530 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1531 			continue;
1532 
1533 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1534 		 == MPT_RDF_ANNOUNCED) {
1535 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1536 			mpt_disk->flags = 0;
1537 			mpt->raid_rescan++;
1538 			continue;
1539 		}
1540 
1541 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1542 
1543 			mpt_announce_disk(mpt, mpt_disk);
1544 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1545 		}
1546 
1547 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1548 			continue;
1549 
1550 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1551 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1552 		if (disk_pg->PhysDiskStatus.Flags == 0)
1553 			continue;
1554 
1555 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1556 		for (m = 1; m <= 0x80; m <<= 1) {
1557 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1558 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1559 				mpt_prtc(mpt, " Out-Of-Sync");
1560 				break;
1561 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1562 				mpt_prtc(mpt, " Quiesced");
1563 				break;
1564 			default:
1565 				break;
1566 			}
1567 		}
1568 		mpt_prtc(mpt, " )\n");
1569 	}
1570 
1571 	mpt->raid_nonopt_volumes = nonopt_volumes;
1572 	return (0);
1573 }
1574 
1575 static void
1576 mpt_raid_timer(void *arg)
1577 {
1578 	struct mpt_softc *mpt;
1579 
1580 	mpt = (struct mpt_softc *)arg;
1581 	MPT_LOCK_ASSERT(mpt);
1582 	mpt_raid_wakeup(mpt);
1583 }
1584 
1585 static void
1586 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1587 {
1588 
1589 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1590 		      mpt_raid_timer, mpt);
1591 }
1592 
1593 void
1594 mpt_raid_free_mem(struct mpt_softc *mpt)
1595 {
1596 
1597 	if (mpt->raid_volumes) {
1598 		struct mpt_raid_volume *mpt_raid;
1599 		int i;
1600 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1601 			mpt_raid = &mpt->raid_volumes[i];
1602 			if (mpt_raid->config_page) {
1603 				free(mpt_raid->config_page, M_DEVBUF);
1604 				mpt_raid->config_page = NULL;
1605 			}
1606 		}
1607 		free(mpt->raid_volumes, M_DEVBUF);
1608 		mpt->raid_volumes = NULL;
1609 	}
1610 	if (mpt->raid_disks) {
1611 		free(mpt->raid_disks, M_DEVBUF);
1612 		mpt->raid_disks = NULL;
1613 	}
1614 	if (mpt->ioc_page2) {
1615 		free(mpt->ioc_page2, M_DEVBUF);
1616 		mpt->ioc_page2 = NULL;
1617 	}
1618 	if (mpt->ioc_page3) {
1619 		free(mpt->ioc_page3, M_DEVBUF);
1620 		mpt->ioc_page3 = NULL;
1621 	}
1622 	mpt->raid_max_volumes =  0;
1623 	mpt->raid_max_disks =  0;
1624 }
1625 
1626 static int
1627 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1628 {
1629 	struct mpt_raid_volume *mpt_vol;
1630 
1631 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1632 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1633 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1634 		return (EINVAL);
1635 
1636 	MPT_LOCK(mpt);
1637 	mpt->raid_resync_rate = rate;
1638 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1639 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1640 			continue;
1641 		}
1642 		mpt_verify_resync_rate(mpt, mpt_vol);
1643 	}
1644 	MPT_UNLOCK(mpt);
1645 	return (0);
1646 }
1647 
1648 static int
1649 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1650 {
1651 	struct mpt_raid_volume *mpt_vol;
1652 
1653 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1654 		return (EINVAL);
1655 
1656 	MPT_LOCK(mpt);
1657 	mpt->raid_queue_depth = vol_queue_depth;
1658 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1659 		struct cam_path *path;
1660 		int error;
1661 
1662 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1663 			continue;
1664 
1665 		mpt->raid_rescan = 0;
1666 
1667 		error = xpt_create_path(&path, NULL,
1668 					cam_sim_path(mpt->sim),
1669 					mpt_vol->config_page->VolumeID,
1670 					/*lun*/0);
1671 		if (error != CAM_REQ_CMP) {
1672 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1673 			continue;
1674 		}
1675 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1676 		xpt_free_path(path);
1677 	}
1678 	MPT_UNLOCK(mpt);
1679 	return (0);
1680 }
1681 
1682 static int
1683 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1684 {
1685 	struct mpt_raid_volume *mpt_vol;
1686 	int force_full_resync;
1687 
1688 	MPT_LOCK(mpt);
1689 	if (mwce == mpt->raid_mwce_setting) {
1690 		MPT_UNLOCK(mpt);
1691 		return (0);
1692 	}
1693 
1694 	/*
1695 	 * Catch MWCE being left on due to a failed shutdown.  Since
1696 	 * sysctls cannot be set by the loader, we treat the first
1697 	 * setting of this varible specially and force a full volume
1698 	 * resync if MWCE is enabled and a resync is in progress.
1699 	 */
1700 	force_full_resync = 0;
1701 	if (mpt->raid_mwce_set == 0
1702 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1703 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1704 		force_full_resync = 1;
1705 
1706 	mpt->raid_mwce_setting = mwce;
1707 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1708 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1709 		int resyncing;
1710 		int mwce;
1711 
1712 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1713 			continue;
1714 
1715 		vol_pg = mpt_vol->config_page;
1716 		resyncing = vol_pg->VolumeStatus.Flags
1717 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1718 		mwce = vol_pg->VolumeSettings.Settings
1719 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1720 		if (force_full_resync && resyncing && mwce) {
1721 
1722 			/*
1723 			 * XXX disable/enable volume should force a resync,
1724 			 *     but we'll need to queice, drain, and restart
1725 			 *     I/O to do that.
1726 			 */
1727 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1728 				    "detected.  Suggest full resync.\n");
1729 		}
1730 		mpt_verify_mwce(mpt, mpt_vol);
1731 	}
1732 	mpt->raid_mwce_set = 1;
1733 	MPT_UNLOCK(mpt);
1734 	return (0);
1735 }
1736 
1737 static const char *mpt_vol_mwce_strs[] =
1738 {
1739 	"On",
1740 	"Off",
1741 	"On-During-Rebuild",
1742 	"NC"
1743 };
1744 
1745 static int
1746 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1747 {
1748 	char inbuf[20];
1749 	struct mpt_softc *mpt;
1750 	const char *str;
1751 	int error;
1752 	u_int size;
1753 	u_int i;
1754 
1755 	GIANT_REQUIRED;
1756 
1757 	mpt = (struct mpt_softc *)arg1;
1758 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1759 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1760 	if (error || !req->newptr) {
1761 		return (error);
1762 	}
1763 
1764 	size = req->newlen - req->newidx;
1765 	if (size >= sizeof(inbuf)) {
1766 		return (EINVAL);
1767 	}
1768 
1769 	error = SYSCTL_IN(req, inbuf, size);
1770 	if (error) {
1771 		return (error);
1772 	}
1773 	inbuf[size] = '\0';
1774 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1775 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1776 			return (mpt_raid_set_vol_mwce(mpt, i));
1777 		}
1778 	}
1779 	return (EINVAL);
1780 }
1781 
1782 static int
1783 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1784 {
1785 	struct mpt_softc *mpt;
1786 	u_int raid_resync_rate;
1787 	int error;
1788 
1789 	GIANT_REQUIRED;
1790 
1791 	mpt = (struct mpt_softc *)arg1;
1792 	raid_resync_rate = mpt->raid_resync_rate;
1793 
1794 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1795 	if (error || !req->newptr) {
1796 		return error;
1797 	}
1798 
1799 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1800 }
1801 
1802 static int
1803 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1804 {
1805 	struct mpt_softc *mpt;
1806 	u_int raid_queue_depth;
1807 	int error;
1808 
1809 	GIANT_REQUIRED;
1810 
1811 	mpt = (struct mpt_softc *)arg1;
1812 	raid_queue_depth = mpt->raid_queue_depth;
1813 
1814 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1815 	if (error || !req->newptr) {
1816 		return error;
1817 	}
1818 
1819 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1820 }
1821 
1822 static void
1823 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1824 {
1825 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1826 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1827 
1828 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 	    "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1830 	    mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1831 	    "volume member WCE(On,Off,On-During-Rebuild,NC)");
1832 
1833 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1834 	    "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1835 	    mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1836 	    "default volume queue depth");
1837 
1838 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1839 	    "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1840 	    mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1841 	    "volume resync priority (0 == NC, 1 - 255)");
1842 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1843 			"nonoptimal_volumes", CTLFLAG_RD,
1844 			&mpt->raid_nonopt_volumes, 0,
1845 			"number of nonoptimal volumes");
1846 }
1847