xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56 
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define	GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62 
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66 
67 #include <machine/stdarg.h>
68 
69 struct mpt_raid_action_result
70 {
71 	union {
72 		MPI_RAID_VOL_INDICATOR	indicator_struct;
73 		uint32_t		new_settings;
74 		uint8_t			phys_disk_num;
75 	} action_data;
76 	uint16_t			action_status;
77 };
78 
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81 
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83 
84 
85 static mpt_probe_handler_t	mpt_raid_probe;
86 static mpt_attach_handler_t	mpt_raid_attach;
87 static mpt_enable_handler_t	mpt_raid_enable;
88 static mpt_event_handler_t	mpt_raid_event;
89 static mpt_shutdown_handler_t	mpt_raid_shutdown;
90 static mpt_reset_handler_t	mpt_raid_ioc_reset;
91 static mpt_detach_handler_t	mpt_raid_detach;
92 
93 static struct mpt_personality mpt_raid_personality =
94 {
95 	.name		= "mpt_raid",
96 	.probe		= mpt_raid_probe,
97 	.attach		= mpt_raid_attach,
98 	.enable		= mpt_raid_enable,
99 	.event		= mpt_raid_event,
100 	.reset		= mpt_raid_ioc_reset,
101 	.shutdown	= mpt_raid_shutdown,
102 	.detach		= mpt_raid_detach,
103 };
104 
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 					MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 			   struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121     struct cam_path *);
122 #if __FreeBSD_version < 500000
123 #define	mpt_raid_sysctl_attach(x)	do { } while (0)
124 #else
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
126 #endif
127 
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129 
130 const char *
131 mpt_vol_type(struct mpt_raid_volume *vol)
132 {
133 	switch (vol->config_page->VolumeType) {
134 	case MPI_RAID_VOL_TYPE_IS:
135 		return ("RAID-0");
136 	case MPI_RAID_VOL_TYPE_IME:
137 		return ("RAID-1E");
138 	case MPI_RAID_VOL_TYPE_IM:
139 		return ("RAID-1");
140 	default:
141 		return ("Unknown");
142 	}
143 }
144 
145 const char *
146 mpt_vol_state(struct mpt_raid_volume *vol)
147 {
148 	switch (vol->config_page->VolumeStatus.State) {
149 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
150 		return ("Optimal");
151 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
152 		return ("Degraded");
153 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
154 		return ("Failed");
155 	default:
156 		return ("Unknown");
157 	}
158 }
159 
160 const char *
161 mpt_disk_state(struct mpt_raid_disk *disk)
162 {
163 	switch (disk->config_page.PhysDiskStatus.State) {
164 	case MPI_PHYSDISK0_STATUS_ONLINE:
165 		return ("Online");
166 	case MPI_PHYSDISK0_STATUS_MISSING:
167 		return ("Missing");
168 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169 		return ("Incompatible");
170 	case MPI_PHYSDISK0_STATUS_FAILED:
171 		return ("Failed");
172 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
173 		return ("Initializing");
174 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175 		return ("Offline Requested");
176 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177 		return ("Failed per Host Request");
178 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
179 		return ("Offline");
180 	default:
181 		return ("Unknown");
182 	}
183 }
184 
185 void
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187 	    const char *fmt, ...)
188 {
189 	va_list ap;
190 
191 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
194 	va_start(ap, fmt);
195 	vprintf(fmt, ap);
196 	va_end(ap);
197 }
198 
199 void
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201 	     const char *fmt, ...)
202 {
203 	va_list ap;
204 
205 	if (disk->volume != NULL) {
206 		printf("(%s:vol%d:%d): ",
207 		       device_get_nameunit(mpt->dev),
208 		       disk->volume->config_page->VolumeID,
209 		       disk->member_number);
210 	} else {
211 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212 		       disk->config_page.PhysDiskBus,
213 		       disk->config_page.PhysDiskID);
214 	}
215 	va_start(ap, fmt);
216 	vprintf(fmt, ap);
217 	va_end(ap);
218 }
219 
220 static void
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222 	       struct cam_path *path, void *arg)
223 {
224 	struct mpt_softc *mpt;
225 
226 	mpt = (struct mpt_softc*)callback_arg;
227 	switch (code) {
228 	case AC_FOUND_DEVICE:
229 	{
230 		struct ccb_getdev *cgd;
231 		struct mpt_raid_volume *mpt_vol;
232 
233 		cgd = (struct ccb_getdev *)arg;
234 		if (cgd == NULL) {
235 			break;
236 		}
237 
238 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239 			 cgd->ccb_h.target_id);
240 
241 		RAID_VOL_FOREACH(mpt, mpt_vol) {
242 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243 				continue;
244 
245 			if (mpt_vol->config_page->VolumeID
246 			 == cgd->ccb_h.target_id) {
247 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
248 				break;
249 			}
250 		}
251 	}
252 	default:
253 		break;
254 	}
255 }
256 
257 int
258 mpt_raid_probe(struct mpt_softc *mpt)
259 {
260 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
261 		return (ENODEV);
262 	}
263 	return (0);
264 }
265 
266 int
267 mpt_raid_attach(struct mpt_softc *mpt)
268 {
269 	struct ccb_setasync csa;
270 	mpt_handler_t	 handler;
271 	int		 error;
272 
273 	mpt_callout_init(&mpt->raid_timer);
274 
275 	error = mpt_spawn_raid_thread(mpt);
276 	if (error != 0) {
277 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
278 		goto cleanup;
279 	}
280 
281 	MPT_LOCK(mpt);
282 	handler.reply_handler = mpt_raid_reply_handler;
283 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
284 				     &raid_handler_id);
285 	if (error != 0) {
286 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
287 		goto cleanup;
288 	}
289 
290 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291 	csa.ccb_h.func_code = XPT_SASYNC_CB;
292 	csa.event_enable = AC_FOUND_DEVICE;
293 	csa.callback = mpt_raid_async;
294 	csa.callback_arg = mpt;
295 	xpt_action((union ccb *)&csa);
296 	if (csa.ccb_h.status != CAM_REQ_CMP) {
297 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298 			"CAM async handler.\n");
299 	}
300 	MPT_UNLOCK(mpt);
301 
302 	mpt_raid_sysctl_attach(mpt);
303 	return (0);
304 cleanup:
305 	MPT_UNLOCK(mpt);
306 	mpt_raid_detach(mpt);
307 	return (error);
308 }
309 
310 int
311 mpt_raid_enable(struct mpt_softc *mpt)
312 {
313 	return (0);
314 }
315 
316 void
317 mpt_raid_detach(struct mpt_softc *mpt)
318 {
319 	struct ccb_setasync csa;
320 	mpt_handler_t handler;
321 
322 	callout_stop(&mpt->raid_timer);
323 	MPT_LOCK(mpt);
324 	mpt_terminate_raid_thread(mpt);
325 
326 	handler.reply_handler = mpt_raid_reply_handler;
327 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
328 			       raid_handler_id);
329 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330 	csa.ccb_h.func_code = XPT_SASYNC_CB;
331 	csa.event_enable = 0;
332 	csa.callback = mpt_raid_async;
333 	csa.callback_arg = mpt;
334 	xpt_action((union ccb *)&csa);
335 	MPT_UNLOCK(mpt);
336 }
337 
338 static void
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
340 {
341 	/* Nothing to do yet. */
342 }
343 
344 static const char *raid_event_txt[] =
345 {
346 	"Volume Created",
347 	"Volume Deleted",
348 	"Volume Settings Changed",
349 	"Volume Status Changed",
350 	"Volume Physical Disk Membership Changed",
351 	"Physical Disk Created",
352 	"Physical Disk Deleted",
353 	"Physical Disk Settings Changed",
354 	"Physical Disk Status Changed",
355 	"Domain Validation Required",
356 	"SMART Data Received",
357 	"Replace Action Started",
358 };
359 
360 static int
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362 	       MSG_EVENT_NOTIFY_REPLY *msg)
363 {
364 	EVENT_DATA_RAID *raid_event;
365 	struct mpt_raid_volume *mpt_vol;
366 	struct mpt_raid_disk *mpt_disk;
367 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368 	int i;
369 	int print_event;
370 
371 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372 		return (0);
373 	}
374 
375 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
376 
377 	mpt_vol = NULL;
378 	vol_pg = NULL;
379 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381 			mpt_vol = &mpt->raid_volumes[i];
382 			vol_pg = mpt_vol->config_page;
383 
384 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
385 				continue;
386 
387 			if (vol_pg->VolumeID == raid_event->VolumeID
388 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
389 				break;
390 		}
391 		if (i >= mpt->ioc_page2->MaxVolumes) {
392 			mpt_vol = NULL;
393 			vol_pg = NULL;
394 		}
395 	}
396 
397 	mpt_disk = NULL;
398 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
401 			mpt_disk = NULL;
402 		}
403 	}
404 
405 	print_event = 1;
406 	switch(raid_event->ReasonCode) {
407 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
409 		break;
410 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411 		if (mpt_vol != NULL) {
412 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
414 			} else {
415 				/*
416 				 * Coalesce status messages into one
417 				 * per background run of our RAID thread.
418 				 * This removes "spurious" status messages
419 				 * from our output.
420 				 */
421 				print_event = 0;
422 			}
423 		}
424 		break;
425 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
427 		mpt->raid_rescan++;
428 		if (mpt_vol != NULL) {
429 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
430 		}
431 		break;
432 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
434 		mpt->raid_rescan++;
435 		break;
436 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
438 		mpt->raid_rescan++;
439 		if (mpt_disk != NULL) {
440 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
444 		mpt->raid_rescan++;
445 		break;
446 	case MPI_EVENT_RAID_RC_SMART_DATA:
447 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
448 		break;
449 	}
450 
451 	if (print_event) {
452 		if (mpt_disk != NULL) {
453 			mpt_disk_prt(mpt, mpt_disk, "");
454 		} else if (mpt_vol != NULL) {
455 			mpt_vol_prt(mpt, mpt_vol, "");
456 		} else {
457 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458 				raid_event->VolumeID);
459 
460 			if (raid_event->PhysDiskNum != 0xFF)
461 				mpt_prtc(mpt, ":%d): ",
462 					 raid_event->PhysDiskNum);
463 			else
464 				mpt_prtc(mpt, "): ");
465 		}
466 
467 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469 				 raid_event->ReasonCode);
470 		else
471 			mpt_prtc(mpt, "%s\n",
472 				 raid_event_txt[raid_event->ReasonCode]);
473 	}
474 
475 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476 		/* XXX Use CAM's print sense for this... */
477 		if (mpt_disk != NULL)
478 			mpt_disk_prt(mpt, mpt_disk, "");
479 		else
480 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
481 			    raid_event->VolumeBus, raid_event->VolumeID,
482 			    raid_event->PhysDiskNum);
483 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484 			 raid_event->ASC, raid_event->ASCQ);
485 	}
486 
487 	mpt_raid_wakeup(mpt);
488 	return (1);
489 }
490 
491 static void
492 mpt_raid_shutdown(struct mpt_softc *mpt)
493 {
494 	struct mpt_raid_volume *mpt_vol;
495 
496 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497 		return;
498 	}
499 
500 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501 	RAID_VOL_FOREACH(mpt, mpt_vol) {
502 		mpt_verify_mwce(mpt, mpt_vol);
503 	}
504 }
505 
506 static int
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
509 {
510 	int free_req;
511 
512 	if (req == NULL)
513 		return (TRUE);
514 
515 	free_req = TRUE;
516 	if (reply_frame != NULL)
517 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
518 #ifdef NOTYET
519 	else if (req->ccb != NULL) {
520 		/* Complete Quiesce CCB with error... */
521 	}
522 #endif
523 
524 	req->state &= ~REQ_STATE_QUEUED;
525 	req->state |= REQ_STATE_DONE;
526 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527 
528 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
529 		wakeup(req);
530 	} else if (free_req) {
531 		mpt_free_request(mpt, req);
532 	}
533 
534 	return (TRUE);
535 }
536 
537 /*
538  * Parse additional completion information in the reply
539  * frame for RAID I/O requests.
540  */
541 static int
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543     MSG_DEFAULT_REPLY *reply_frame)
544 {
545 	MSG_RAID_ACTION_REPLY *reply;
546 	struct mpt_raid_action_result *action_result;
547 	MSG_RAID_ACTION_REQUEST *rap;
548 
549 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550 	req->IOCStatus = le16toh(reply->IOCStatus);
551 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
552 
553 	switch (rap->Action) {
554 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
556 		break;
557 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
559 		break;
560 	default:
561 		break;
562 	}
563 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
564 	memcpy(&action_result->action_data, &reply->ActionData,
565 	    sizeof(action_result->action_data));
566 	action_result->action_status = reply->ActionStatus;
567 	return (TRUE);
568 }
569 
570 /*
571  * Utiltity routine to perform a RAID action command;
572  */
573 int
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
576 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
577 		   int write, int wait)
578 {
579 	MSG_RAID_ACTION_REQUEST *rap;
580 	SGE_SIMPLE32 *se;
581 
582 	rap = req->req_vbuf;
583 	memset(rap, 0, sizeof *rap);
584 	rap->Action = Action;
585 	rap->ActionDataWord = ActionDataWord;
586 	rap->Function = MPI_FUNCTION_RAID_ACTION;
587 	rap->VolumeID = vol->config_page->VolumeID;
588 	rap->VolumeBus = vol->config_page->VolumeBus;
589 	if (disk != 0)
590 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
591 	else
592 		rap->PhysDiskNum = 0xFF;
593 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594 	se->Address = addr;
595 	MPI_pSGE_SET_LENGTH(se, len);
596 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598 	    MPI_SGE_FLAGS_END_OF_LIST |
599 	    write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600 	rap->MsgContext = htole32(req->index | raid_handler_id);
601 
602 	mpt_check_doorbell(mpt);
603 	mpt_send_cmd(mpt, req);
604 
605 	if (wait) {
606 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
607 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
608 	} else {
609 		return (0);
610 	}
611 }
612 
613 /*************************** RAID Status Monitoring ***************************/
614 static int
615 mpt_spawn_raid_thread(struct mpt_softc *mpt)
616 {
617 	int error;
618 
619 	/*
620 	 * Freeze out any CAM transactions until our thread
621 	 * is able to run at least once.  We need to update
622 	 * our RAID pages before acception I/O or we may
623 	 * reject I/O to an ID we later determine is for a
624 	 * hidden physdisk.
625 	 */
626 	MPT_LOCK(mpt);
627 	xpt_freeze_simq(mpt->phydisk_sim, 1);
628 	MPT_UNLOCK(mpt);
629 	error = mpt_kthread_create(mpt_raid_thread, mpt,
630 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
631 	    "mpt_raid%d", mpt->unit);
632 	if (error != 0) {
633 		MPT_LOCK(mpt);
634 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
635 		MPT_UNLOCK(mpt);
636 	}
637 	return (error);
638 }
639 
640 static void
641 mpt_terminate_raid_thread(struct mpt_softc *mpt)
642 {
643 
644 	if (mpt->raid_thread == NULL) {
645 		return;
646 	}
647 	mpt->shutdwn_raid = 1;
648 	wakeup(mpt->raid_volumes);
649 	/*
650 	 * Sleep on a slightly different location
651 	 * for this interlock just for added safety.
652 	 */
653 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
654 }
655 
656 static void
657 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
658 {
659 	xpt_free_path(ccb->ccb_h.path);
660 	free(ccb, M_DEVBUF);
661 }
662 
663 static void
664 mpt_raid_thread(void *arg)
665 {
666 	struct mpt_softc *mpt;
667 	int firstrun;
668 
669 	mpt = (struct mpt_softc *)arg;
670 	firstrun = 1;
671 	MPT_LOCK(mpt);
672 	while (mpt->shutdwn_raid == 0) {
673 
674 		if (mpt->raid_wakeup == 0) {
675 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
676 			continue;
677 		}
678 
679 		mpt->raid_wakeup = 0;
680 
681 		if (mpt_refresh_raid_data(mpt)) {
682 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
683 			continue;
684 		}
685 
686 		/*
687 		 * Now that we have our first snapshot of RAID data,
688 		 * allow CAM to access our physical disk bus.
689 		 */
690 		if (firstrun) {
691 			firstrun = 0;
692 			MPTLOCK_2_CAMLOCK(mpt);
693 			xpt_release_simq(mpt->phydisk_sim, TRUE);
694 			CAMLOCK_2_MPTLOCK(mpt);
695 		}
696 
697 		if (mpt->raid_rescan != 0) {
698 			union ccb *ccb;
699 			struct cam_path *path;
700 			int error;
701 
702 			mpt->raid_rescan = 0;
703 
704 			ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
705 			error = xpt_create_path(&path, xpt_periph,
706 			    cam_sim_path(mpt->phydisk_sim),
707 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
708 			if (error != CAM_REQ_CMP) {
709 				free(ccb, M_DEVBUF);
710 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
711 			} else {
712 				xpt_setup_ccb(&ccb->ccb_h, path, 5);
713 				ccb->ccb_h.func_code = XPT_SCAN_BUS;
714 				ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
715 				ccb->crcn.flags = CAM_FLAG_NONE;
716 				MPTLOCK_2_CAMLOCK(mpt);
717 				xpt_action(ccb);
718 				CAMLOCK_2_MPTLOCK(mpt);
719 			}
720 		}
721 	}
722 	mpt->raid_thread = NULL;
723 	wakeup(&mpt->raid_thread);
724 	MPT_UNLOCK(mpt);
725 	kproc_exit(0);
726 }
727 
728 #if 0
729 static void
730 mpt_raid_quiesce_timeout(void *arg)
731 {
732 	/* Complete the CCB with error */
733 	/* COWWWW */
734 }
735 
736 static timeout_t mpt_raid_quiesce_timeout;
737 cam_status
738 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
739 		      request_t *req)
740 {
741 	union ccb *ccb;
742 
743 	ccb = req->ccb;
744 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
745 		return (CAM_REQ_CMP);
746 
747 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
748 		int rv;
749 
750 		mpt_disk->flags |= MPT_RDF_QUIESCING;
751 		xpt_freeze_devq(ccb->ccb_h.path, 1);
752 
753 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
754 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
755 					/*ActionData*/0, /*addr*/0,
756 					/*len*/0, /*write*/FALSE,
757 					/*wait*/FALSE);
758 		if (rv != 0)
759 			return (CAM_REQ_CMP_ERR);
760 
761 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
762 #if 0
763 		if (rv == ETIMEDOUT) {
764 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
765 				     "Quiece Timed-out\n");
766 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
767 			return (CAM_REQ_CMP_ERR);
768 		}
769 
770 		ar = REQ_TO_RAID_ACTION_RESULT(req);
771 		if (rv != 0
772 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
773 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
774 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
775 				    "%d:%x:%x\n", rv, req->IOCStatus,
776 				    ar->action_status);
777 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
778 			return (CAM_REQ_CMP_ERR);
779 		}
780 #endif
781 		return (CAM_REQ_INPROG);
782 	}
783 	return (CAM_REQUEUE_REQ);
784 }
785 #endif
786 
787 /* XXX Ignores that there may be multiple busses/IOCs involved. */
788 cam_status
789 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
790 {
791 	struct mpt_raid_disk *mpt_disk;
792 
793 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
794 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
795 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
796 		*tgt = mpt_disk->config_page.PhysDiskID;
797 		return (0);
798 	}
799 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800 		 ccb->ccb_h.target_id);
801 	return (-1);
802 }
803 
804 /* XXX Ignores that there may be multiple busses/IOCs involved. */
805 int
806 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
807 {
808 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
809 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
810 
811 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
812 		return (0);
813 	}
814 	ioc_vol = mpt->ioc_page2->RaidVolume;
815 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
816 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
817 		if (ioc_vol->VolumeID == tgt) {
818 			return (1);
819 		}
820 	}
821 	return (0);
822 }
823 
824 #if 0
825 static void
826 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
827 	       int enable)
828 {
829 	request_t *req;
830 	struct mpt_raid_action_result *ar;
831 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
832 	int enabled;
833 	int rv;
834 
835 	vol_pg = mpt_vol->config_page;
836 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
837 
838 	/*
839 	 * If the setting matches the configuration,
840 	 * there is nothing to do.
841 	 */
842 	if ((enabled && enable)
843 	 || (!enabled && !enable))
844 		return;
845 
846 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
847 	if (req == NULL) {
848 		mpt_vol_prt(mpt, mpt_vol,
849 			    "mpt_enable_vol: Get request failed!\n");
850 		return;
851 	}
852 
853 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
854 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
855 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
856 				/*data*/0, /*addr*/0, /*len*/0,
857 				/*write*/FALSE, /*wait*/TRUE);
858 	if (rv == ETIMEDOUT) {
859 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
860 			    "%s Volume Timed-out\n",
861 			    enable ? "Enable" : "Disable");
862 		return;
863 	}
864 	ar = REQ_TO_RAID_ACTION_RESULT(req);
865 	if (rv != 0
866 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
867 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
868 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
869 			    enable ? "Enable" : "Disable",
870 			    rv, req->IOCStatus, ar->action_status);
871 	}
872 
873 	mpt_free_request(mpt, req);
874 }
875 #endif
876 
877 static void
878 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
879 {
880 	request_t *req;
881 	struct mpt_raid_action_result *ar;
882 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
883 	uint32_t data;
884 	int rv;
885 	int resyncing;
886 	int mwce;
887 
888 	vol_pg = mpt_vol->config_page;
889 	resyncing = vol_pg->VolumeStatus.Flags
890 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
891 	mwce = vol_pg->VolumeSettings.Settings
892 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
893 
894 	/*
895 	 * If the setting matches the configuration,
896 	 * there is nothing to do.
897 	 */
898 	switch (mpt->raid_mwce_setting) {
899 	case MPT_RAID_MWCE_REBUILD_ONLY:
900 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
901 			return;
902 		}
903 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
904 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
905 			/*
906 			 * Wait one more status update to see if
907 			 * resyncing gets enabled.  It gets disabled
908 			 * temporarilly when WCE is changed.
909 			 */
910 			return;
911 		}
912 		break;
913 	case MPT_RAID_MWCE_ON:
914 		if (mwce)
915 			return;
916 		break;
917 	case MPT_RAID_MWCE_OFF:
918 		if (!mwce)
919 			return;
920 		break;
921 	case MPT_RAID_MWCE_NC:
922 		return;
923 	}
924 
925 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
926 	if (req == NULL) {
927 		mpt_vol_prt(mpt, mpt_vol,
928 			    "mpt_verify_mwce: Get request failed!\n");
929 		return;
930 	}
931 
932 	vol_pg->VolumeSettings.Settings ^=
933 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
935 	vol_pg->VolumeSettings.Settings ^=
936 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
937 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
938 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
939 				data, /*addr*/0, /*len*/0,
940 				/*write*/FALSE, /*wait*/TRUE);
941 	if (rv == ETIMEDOUT) {
942 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
943 			    "Write Cache Enable Timed-out\n");
944 		return;
945 	}
946 	ar = REQ_TO_RAID_ACTION_RESULT(req);
947 	if (rv != 0
948 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
949 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
950 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
951 			    "%d:%x:%x\n", rv, req->IOCStatus,
952 			    ar->action_status);
953 	} else {
954 		vol_pg->VolumeSettings.Settings ^=
955 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
956 	}
957 	mpt_free_request(mpt, req);
958 }
959 
960 static void
961 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
962 {
963 	request_t *req;
964 	struct mpt_raid_action_result *ar;
965 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
966 	u_int prio;
967 	int rv;
968 
969 	vol_pg = mpt_vol->config_page;
970 
971 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
972 		return;
973 
974 	/*
975 	 * If the current RAID resync rate does not
976 	 * match our configured rate, update it.
977 	 */
978 	prio = vol_pg->VolumeSettings.Settings
979 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
980 	if (vol_pg->ResyncRate != 0
981 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
982 
983 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
984 		if (req == NULL) {
985 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
986 				    "Get request failed!\n");
987 			return;
988 		}
989 
990 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
991 					MPI_RAID_ACTION_SET_RESYNC_RATE,
992 					mpt->raid_resync_rate, /*addr*/0,
993 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
994 		if (rv == ETIMEDOUT) {
995 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
996 				    "Resync Rate Setting Timed-out\n");
997 			return;
998 		}
999 
1000 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1001 		if (rv != 0
1002 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1003 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1004 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1005 				    "%d:%x:%x\n", rv, req->IOCStatus,
1006 				    ar->action_status);
1007 		} else
1008 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1009 		mpt_free_request(mpt, req);
1010 	} else if ((prio && mpt->raid_resync_rate < 128)
1011 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1012 		uint32_t data;
1013 
1014 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1015 		if (req == NULL) {
1016 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1017 				    "Get request failed!\n");
1018 			return;
1019 		}
1020 
1021 		vol_pg->VolumeSettings.Settings ^=
1022 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1024 		vol_pg->VolumeSettings.Settings ^=
1025 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1026 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1027 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1028 					data, /*addr*/0, /*len*/0,
1029 					/*write*/FALSE, /*wait*/TRUE);
1030 		if (rv == ETIMEDOUT) {
1031 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1032 				    "Resync Rate Setting Timed-out\n");
1033 			return;
1034 		}
1035 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1036 		if (rv != 0
1037 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1038 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1039 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1040 				    "%d:%x:%x\n", rv, req->IOCStatus,
1041 				    ar->action_status);
1042 		} else {
1043 			vol_pg->VolumeSettings.Settings ^=
1044 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1045 		}
1046 
1047 		mpt_free_request(mpt, req);
1048 	}
1049 }
1050 
1051 static void
1052 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1053 		       struct cam_path *path)
1054 {
1055 	struct ccb_relsim crs;
1056 
1057 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1058 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1059 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1060 	crs.openings = mpt->raid_queue_depth;
1061 	xpt_action((union ccb *)&crs);
1062 	if (crs.ccb_h.status != CAM_REQ_CMP)
1063 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1064 			    "with CAM status %#x\n", crs.ccb_h.status);
1065 }
1066 
1067 static void
1068 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1069 {
1070 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1071 	u_int i;
1072 
1073 	vol_pg = mpt_vol->config_page;
1074 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1075 	for (i = 1; i <= 0x8000; i <<= 1) {
1076 		switch (vol_pg->VolumeSettings.Settings & i) {
1077 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1078 			mpt_prtc(mpt, " Member-WCE");
1079 			break;
1080 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1081 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1082 			break;
1083 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1084 			mpt_prtc(mpt, " Hot-Plug-Spares");
1085 			break;
1086 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1087 			mpt_prtc(mpt, " High-Priority-ReSync");
1088 			break;
1089 		default:
1090 			break;
1091 		}
1092 	}
1093 	mpt_prtc(mpt, " )\n");
1094 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1095 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1096 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1097 			  ? ":" : "s:");
1098 		for (i = 0; i < 8; i++) {
1099 			u_int mask;
1100 
1101 			mask = 0x1 << i;
1102 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1103 				continue;
1104 			mpt_prtc(mpt, " %d", i);
1105 		}
1106 		mpt_prtc(mpt, "\n");
1107 	}
1108 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1109 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1110 		struct mpt_raid_disk *mpt_disk;
1111 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1112 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1113 		U8 f, s;
1114 
1115 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1116 		disk_pg = &mpt_disk->config_page;
1117 		mpt_prtc(mpt, "      ");
1118 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1119 			 pt_bus, disk_pg->PhysDiskID);
1120 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1121 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1122 			    "Primary" : "Secondary");
1123 		} else {
1124 			mpt_prtc(mpt, "Stripe Position %d",
1125 				 mpt_disk->member_number);
1126 		}
1127 		f = disk_pg->PhysDiskStatus.Flags;
1128 		s = disk_pg->PhysDiskStatus.State;
1129 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1130 			mpt_prtc(mpt, " Out of Sync");
1131 		}
1132 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1133 			mpt_prtc(mpt, " Quiesced");
1134 		}
1135 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1136 			mpt_prtc(mpt, " Inactive");
1137 		}
1138 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1139 			mpt_prtc(mpt, " Was Optimal");
1140 		}
1141 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1142 			mpt_prtc(mpt, " Was Non-Optimal");
1143 		}
1144 		switch (s) {
1145 		case MPI_PHYSDISK0_STATUS_ONLINE:
1146 			mpt_prtc(mpt, " Online");
1147 			break;
1148 		case MPI_PHYSDISK0_STATUS_MISSING:
1149 			mpt_prtc(mpt, " Missing");
1150 			break;
1151 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1152 			mpt_prtc(mpt, " Incompatible");
1153 			break;
1154 		case MPI_PHYSDISK0_STATUS_FAILED:
1155 			mpt_prtc(mpt, " Failed");
1156 			break;
1157 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1158 			mpt_prtc(mpt, " Initializing");
1159 			break;
1160 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1161 			mpt_prtc(mpt, " Requested Offline");
1162 			break;
1163 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1164 			mpt_prtc(mpt, " Requested Failed");
1165 			break;
1166 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1167 		default:
1168 			mpt_prtc(mpt, " Offline Other (%x)", s);
1169 			break;
1170 		}
1171 		mpt_prtc(mpt, "\n");
1172 	}
1173 }
1174 
1175 static void
1176 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1177 {
1178 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1179 	int rd_bus = cam_sim_bus(mpt->sim);
1180 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1181 	u_int i;
1182 
1183 	disk_pg = &mpt_disk->config_page;
1184 	mpt_disk_prt(mpt, mpt_disk,
1185 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1186 		     device_get_nameunit(mpt->dev), rd_bus,
1187 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1188 		     pt_bus, mpt_disk - mpt->raid_disks);
1189 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1190 		return;
1191 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1192 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1193 		   ? ":" : "s:");
1194 	for (i = 0; i < 8; i++) {
1195 		u_int mask;
1196 
1197 		mask = 0x1 << i;
1198 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1199 			continue;
1200 		mpt_prtc(mpt, " %d", i);
1201 	}
1202 	mpt_prtc(mpt, "\n");
1203 }
1204 
1205 static void
1206 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1207 		      IOC_3_PHYS_DISK *ioc_disk)
1208 {
1209 	int rv;
1210 
1211 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1212 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1213 				 &mpt_disk->config_page.Header,
1214 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1215 	if (rv != 0) {
1216 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1217 			"Failed to read RAID Disk Hdr(%d)\n",
1218 		 	ioc_disk->PhysDiskNum);
1219 		return;
1220 	}
1221 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1222 				   &mpt_disk->config_page.Header,
1223 				   sizeof(mpt_disk->config_page),
1224 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1225 	if (rv != 0)
1226 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1227 			"Failed to read RAID Disk Page(%d)\n",
1228 		 	ioc_disk->PhysDiskNum);
1229 }
1230 
1231 static void
1232 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1233     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1234 {
1235 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1236 	struct mpt_raid_action_result *ar;
1237 	request_t *req;
1238 	int rv;
1239 	int i;
1240 
1241 	vol_pg = mpt_vol->config_page;
1242 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1243 
1244 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1245 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1246 	if (rv != 0) {
1247 		mpt_vol_prt(mpt, mpt_vol,
1248 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1249 		    ioc_vol->VolumePageNumber);
1250 		return;
1251 	}
1252 
1253 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1254 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1255 	if (rv != 0) {
1256 		mpt_vol_prt(mpt, mpt_vol,
1257 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1258 		    ioc_vol->VolumePageNumber);
1259 		return;
1260 	}
1261 	mpt2host_config_page_raid_vol_0(vol_pg);
1262 
1263 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1264 
1265 	/* Update disk entry array data. */
1266 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1267 		struct mpt_raid_disk *mpt_disk;
1268 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1269 		mpt_disk->volume = mpt_vol;
1270 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1271 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1272 			mpt_disk->member_number--;
1273 		}
1274 	}
1275 
1276 	if ((vol_pg->VolumeStatus.Flags
1277 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1278 		return;
1279 
1280 	req = mpt_get_request(mpt, TRUE);
1281 	if (req == NULL) {
1282 		mpt_vol_prt(mpt, mpt_vol,
1283 		    "mpt_refresh_raid_vol: Get request failed!\n");
1284 		return;
1285 	}
1286 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1287 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1288 	if (rv == ETIMEDOUT) {
1289 		mpt_vol_prt(mpt, mpt_vol,
1290 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1291 		mpt_free_request(mpt, req);
1292 		return;
1293 	}
1294 
1295 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1296 	if (rv == 0
1297 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1298 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1299 		memcpy(&mpt_vol->sync_progress,
1300 		       &ar->action_data.indicator_struct,
1301 		       sizeof(mpt_vol->sync_progress));
1302 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1303 	} else {
1304 		mpt_vol_prt(mpt, mpt_vol,
1305 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1306 	}
1307 	mpt_free_request(mpt, req);
1308 }
1309 
1310 /*
1311  * Update in-core information about RAID support.  We update any entries
1312  * that didn't previously exists or have been marked as needing to
1313  * be updated by our event handler.  Interesting changes are displayed
1314  * to the console.
1315  */
1316 int
1317 mpt_refresh_raid_data(struct mpt_softc *mpt)
1318 {
1319 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1320 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1321 	IOC_3_PHYS_DISK *ioc_disk;
1322 	IOC_3_PHYS_DISK *ioc_last_disk;
1323 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1324 	size_t len;
1325 	int rv;
1326 	int i;
1327 	u_int nonopt_volumes;
1328 
1329 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1330 		return (0);
1331 	}
1332 
1333 	/*
1334 	 * Mark all items as unreferenced by the configuration.
1335 	 * This allows us to find, report, and discard stale
1336 	 * entries.
1337 	 */
1338 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1339 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1340 	}
1341 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1342 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1343 	}
1344 
1345 	/*
1346 	 * Get Physical Disk information.
1347 	 */
1348 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1349 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1350 				   &mpt->ioc_page3->Header, len,
1351 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1352 	if (rv) {
1353 		mpt_prt(mpt,
1354 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1355 		return (-1);
1356 	}
1357 
1358 	ioc_disk = mpt->ioc_page3->PhysDisk;
1359 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1360 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1361 		struct mpt_raid_disk *mpt_disk;
1362 
1363 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1364 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1365 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1366 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1367 
1368 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1369 
1370 		}
1371 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1372 		mpt->raid_rescan++;
1373 	}
1374 
1375 	/*
1376 	 * Refresh volume data.
1377 	 */
1378 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1379 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380 				   &mpt->ioc_page2->Header, len,
1381 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1382 	if (rv) {
1383 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1384 			"Failed to read IOC Page 2\n");
1385 		return (-1);
1386 	}
1387 
1388 	ioc_vol = mpt->ioc_page2->RaidVolume;
1389 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1390 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1391 		struct mpt_raid_volume *mpt_vol;
1392 
1393 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1394 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1395 		vol_pg = mpt_vol->config_page;
1396 		if (vol_pg == NULL)
1397 			continue;
1398 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1399 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1400 		 || (vol_pg->VolumeStatus.Flags
1401 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1402 
1403 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1404 		}
1405 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1406 	}
1407 
1408 	nonopt_volumes = 0;
1409 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1410 		struct mpt_raid_volume *mpt_vol;
1411 		uint64_t total;
1412 		uint64_t left;
1413 		int m;
1414 		u_int prio;
1415 
1416 		mpt_vol = &mpt->raid_volumes[i];
1417 
1418 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1419 			continue;
1420 		}
1421 
1422 		vol_pg = mpt_vol->config_page;
1423 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1424 		 == MPT_RVF_ANNOUNCED) {
1425 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1426 			mpt_vol->flags = 0;
1427 			continue;
1428 		}
1429 
1430 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1431 			mpt_announce_vol(mpt, mpt_vol);
1432 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1433 		}
1434 
1435 		if (vol_pg->VolumeStatus.State !=
1436 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1437 			nonopt_volumes++;
1438 
1439 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1440 			continue;
1441 
1442 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1443 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1444 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1445 		mpt_verify_mwce(mpt, mpt_vol);
1446 
1447 		if (vol_pg->VolumeStatus.Flags == 0) {
1448 			continue;
1449 		}
1450 
1451 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1452 		for (m = 1; m <= 0x80; m <<= 1) {
1453 			switch (vol_pg->VolumeStatus.Flags & m) {
1454 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1455 				mpt_prtc(mpt, " Enabled");
1456 				break;
1457 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1458 				mpt_prtc(mpt, " Quiesced");
1459 				break;
1460 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1461 				mpt_prtc(mpt, " Re-Syncing");
1462 				break;
1463 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1464 				mpt_prtc(mpt, " Inactive");
1465 				break;
1466 			default:
1467 				break;
1468 			}
1469 		}
1470 		mpt_prtc(mpt, " )\n");
1471 
1472 		if ((vol_pg->VolumeStatus.Flags
1473 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1474 			continue;
1475 
1476 		mpt_verify_resync_rate(mpt, mpt_vol);
1477 
1478 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1479 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1480 		if (vol_pg->ResyncRate != 0) {
1481 
1482 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1483 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1484 			    prio / 1000, prio % 1000);
1485 		} else {
1486 			prio = vol_pg->VolumeSettings.Settings
1487 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1488 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1489 			    prio ? "High" : "Low");
1490 		}
1491 #if __FreeBSD_version >= 500000
1492 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1493 			    "blocks remaining\n", (uintmax_t)left,
1494 			    (uintmax_t)total);
1495 #else
1496 		mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1497 			    "blocks remaining\n", (uint64_t)left,
1498 			    (uint64_t)total);
1499 #endif
1500 
1501 		/* Periodically report on sync progress. */
1502 		mpt_schedule_raid_refresh(mpt);
1503 	}
1504 
1505 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1506 		struct mpt_raid_disk *mpt_disk;
1507 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1508 		int m;
1509 
1510 		mpt_disk = &mpt->raid_disks[i];
1511 		disk_pg = &mpt_disk->config_page;
1512 
1513 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1514 			continue;
1515 
1516 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1517 		 == MPT_RDF_ANNOUNCED) {
1518 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1519 			mpt_disk->flags = 0;
1520 			mpt->raid_rescan++;
1521 			continue;
1522 		}
1523 
1524 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1525 
1526 			mpt_announce_disk(mpt, mpt_disk);
1527 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1528 		}
1529 
1530 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1531 			continue;
1532 
1533 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1534 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1535 		if (disk_pg->PhysDiskStatus.Flags == 0)
1536 			continue;
1537 
1538 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1539 		for (m = 1; m <= 0x80; m <<= 1) {
1540 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1541 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1542 				mpt_prtc(mpt, " Out-Of-Sync");
1543 				break;
1544 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1545 				mpt_prtc(mpt, " Quiesced");
1546 				break;
1547 			default:
1548 				break;
1549 			}
1550 		}
1551 		mpt_prtc(mpt, " )\n");
1552 	}
1553 
1554 	mpt->raid_nonopt_volumes = nonopt_volumes;
1555 	return (0);
1556 }
1557 
1558 static void
1559 mpt_raid_timer(void *arg)
1560 {
1561 	struct mpt_softc *mpt;
1562 
1563 	mpt = (struct mpt_softc *)arg;
1564 	MPT_LOCK(mpt);
1565 	mpt_raid_wakeup(mpt);
1566 	MPT_UNLOCK(mpt);
1567 }
1568 
1569 void
1570 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1571 {
1572 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1573 		      mpt_raid_timer, mpt);
1574 }
1575 
1576 void
1577 mpt_raid_free_mem(struct mpt_softc *mpt)
1578 {
1579 
1580 	if (mpt->raid_volumes) {
1581 		struct mpt_raid_volume *mpt_raid;
1582 		int i;
1583 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1584 			mpt_raid = &mpt->raid_volumes[i];
1585 			if (mpt_raid->config_page) {
1586 				free(mpt_raid->config_page, M_DEVBUF);
1587 				mpt_raid->config_page = NULL;
1588 			}
1589 		}
1590 		free(mpt->raid_volumes, M_DEVBUF);
1591 		mpt->raid_volumes = NULL;
1592 	}
1593 	if (mpt->raid_disks) {
1594 		free(mpt->raid_disks, M_DEVBUF);
1595 		mpt->raid_disks = NULL;
1596 	}
1597 	if (mpt->ioc_page2) {
1598 		free(mpt->ioc_page2, M_DEVBUF);
1599 		mpt->ioc_page2 = NULL;
1600 	}
1601 	if (mpt->ioc_page3) {
1602 		free(mpt->ioc_page3, M_DEVBUF);
1603 		mpt->ioc_page3 = NULL;
1604 	}
1605 	mpt->raid_max_volumes =  0;
1606 	mpt->raid_max_disks =  0;
1607 }
1608 
1609 #if __FreeBSD_version >= 500000
1610 static int
1611 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1612 {
1613 	struct mpt_raid_volume *mpt_vol;
1614 
1615 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1616 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1617 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1618 		return (EINVAL);
1619 
1620 	MPT_LOCK(mpt);
1621 	mpt->raid_resync_rate = rate;
1622 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1623 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1624 			continue;
1625 		}
1626 		mpt_verify_resync_rate(mpt, mpt_vol);
1627 	}
1628 	MPT_UNLOCK(mpt);
1629 	return (0);
1630 }
1631 
1632 static int
1633 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1634 {
1635 	struct mpt_raid_volume *mpt_vol;
1636 
1637 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1638 		return (EINVAL);
1639 
1640 	MPT_LOCK(mpt);
1641 	mpt->raid_queue_depth = vol_queue_depth;
1642 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1643 		struct cam_path *path;
1644 		int error;
1645 
1646 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1647 			continue;
1648 
1649 		mpt->raid_rescan = 0;
1650 
1651 		MPTLOCK_2_CAMLOCK(mpt);
1652 		error = xpt_create_path(&path, xpt_periph,
1653 					cam_sim_path(mpt->sim),
1654 					mpt_vol->config_page->VolumeID,
1655 					/*lun*/0);
1656 		if (error != CAM_REQ_CMP) {
1657 			CAMLOCK_2_MPTLOCK(mpt);
1658 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1659 			continue;
1660 		}
1661 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1662 		xpt_free_path(path);
1663 		CAMLOCK_2_MPTLOCK(mpt);
1664 	}
1665 	MPT_UNLOCK(mpt);
1666 	return (0);
1667 }
1668 
1669 static int
1670 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1671 {
1672 	struct mpt_raid_volume *mpt_vol;
1673 	int force_full_resync;
1674 
1675 	MPT_LOCK(mpt);
1676 	if (mwce == mpt->raid_mwce_setting) {
1677 		MPT_UNLOCK(mpt);
1678 		return (0);
1679 	}
1680 
1681 	/*
1682 	 * Catch MWCE being left on due to a failed shutdown.  Since
1683 	 * sysctls cannot be set by the loader, we treat the first
1684 	 * setting of this varible specially and force a full volume
1685 	 * resync if MWCE is enabled and a resync is in progress.
1686 	 */
1687 	force_full_resync = 0;
1688 	if (mpt->raid_mwce_set == 0
1689 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1690 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1691 		force_full_resync = 1;
1692 
1693 	mpt->raid_mwce_setting = mwce;
1694 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1695 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1696 		int resyncing;
1697 		int mwce;
1698 
1699 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1700 			continue;
1701 
1702 		vol_pg = mpt_vol->config_page;
1703 		resyncing = vol_pg->VolumeStatus.Flags
1704 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1705 		mwce = vol_pg->VolumeSettings.Settings
1706 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1707 		if (force_full_resync && resyncing && mwce) {
1708 
1709 			/*
1710 			 * XXX disable/enable volume should force a resync,
1711 			 *     but we'll need to queice, drain, and restart
1712 			 *     I/O to do that.
1713 			 */
1714 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1715 				    "detected.  Suggest full resync.\n");
1716 		}
1717 		mpt_verify_mwce(mpt, mpt_vol);
1718 	}
1719 	mpt->raid_mwce_set = 1;
1720 	MPT_UNLOCK(mpt);
1721 	return (0);
1722 }
1723 const char *mpt_vol_mwce_strs[] =
1724 {
1725 	"On",
1726 	"Off",
1727 	"On-During-Rebuild",
1728 	"NC"
1729 };
1730 
1731 static int
1732 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1733 {
1734 	char inbuf[20];
1735 	struct mpt_softc *mpt;
1736 	const char *str;
1737 	int error;
1738 	u_int size;
1739 	u_int i;
1740 
1741 	GIANT_REQUIRED;
1742 
1743 	mpt = (struct mpt_softc *)arg1;
1744 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1745 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1746 	if (error || !req->newptr) {
1747 		return (error);
1748 	}
1749 
1750 	size = req->newlen - req->newidx;
1751 	if (size >= sizeof(inbuf)) {
1752 		return (EINVAL);
1753 	}
1754 
1755 	error = SYSCTL_IN(req, inbuf, size);
1756 	if (error) {
1757 		return (error);
1758 	}
1759 	inbuf[size] = '\0';
1760 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1761 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1762 			return (mpt_raid_set_vol_mwce(mpt, i));
1763 		}
1764 	}
1765 	return (EINVAL);
1766 }
1767 
1768 static int
1769 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1770 {
1771 	struct mpt_softc *mpt;
1772 	u_int raid_resync_rate;
1773 	int error;
1774 
1775 	GIANT_REQUIRED;
1776 
1777 	mpt = (struct mpt_softc *)arg1;
1778 	raid_resync_rate = mpt->raid_resync_rate;
1779 
1780 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1781 	if (error || !req->newptr) {
1782 		return error;
1783 	}
1784 
1785 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1786 }
1787 
1788 static int
1789 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1790 {
1791 	struct mpt_softc *mpt;
1792 	u_int raid_queue_depth;
1793 	int error;
1794 
1795 	GIANT_REQUIRED;
1796 
1797 	mpt = (struct mpt_softc *)arg1;
1798 	raid_queue_depth = mpt->raid_queue_depth;
1799 
1800 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1801 	if (error || !req->newptr) {
1802 		return error;
1803 	}
1804 
1805 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1806 }
1807 
1808 static void
1809 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1810 {
1811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813 
1814 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1816 			mpt_raid_sysctl_vol_member_wce, "A",
1817 			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1818 
1819 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1821 			mpt_raid_sysctl_vol_queue_depth, "I",
1822 			"default volume queue depth");
1823 
1824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826 			mpt_raid_sysctl_vol_resync_rate, "I",
1827 			"volume resync priority (0 == NC, 1 - 255)");
1828 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 			"nonoptimal_volumes", CTLFLAG_RD,
1830 			&mpt->raid_nonopt_volumes, 0,
1831 			"number of nonoptimal volumes");
1832 }
1833 #endif
1834