xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2005, WHEEL Sp. z o.o.
7  * Copyright (c) 2005 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon including
18  *    a substantially similar Disclaimer requirement for further binary
19  *    redistribution.
20  * 3. Neither the names of the above listed copyright holders nor the names
21  *    of any contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Some Breakage and Bug Fixing added later.
38  * Copyright (c) 2006, by Matthew Jacob
39  * All Rights Reserved
40  *
41  * Support from LSI-Logic has also gone a great deal toward making this a
42  * workable subsystem and is gratefully acknowledged.
43  */
44 
45 #include <sys/cdefs.h>
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt_sim.h>
57 
58 #include <sys/callout.h>
59 #include <sys/kthread.h>
60 #include <sys/sysctl.h>
61 
62 #include <machine/stdarg.h>
63 
64 struct mpt_raid_action_result
65 {
66 	union {
67 		MPI_RAID_VOL_INDICATOR	indicator_struct;
68 		uint32_t		new_settings;
69 		uint8_t			phys_disk_num;
70 	} action_data;
71 	uint16_t			action_status;
72 };
73 
74 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
75 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
76 
77 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
78 
79 static mpt_probe_handler_t	mpt_raid_probe;
80 static mpt_attach_handler_t	mpt_raid_attach;
81 static mpt_enable_handler_t	mpt_raid_enable;
82 static mpt_event_handler_t	mpt_raid_event;
83 static mpt_shutdown_handler_t	mpt_raid_shutdown;
84 static mpt_reset_handler_t	mpt_raid_ioc_reset;
85 static mpt_detach_handler_t	mpt_raid_detach;
86 
87 static struct mpt_personality mpt_raid_personality =
88 {
89 	.name		= "mpt_raid",
90 	.probe		= mpt_raid_probe,
91 	.attach		= mpt_raid_attach,
92 	.enable		= mpt_raid_enable,
93 	.event		= mpt_raid_event,
94 	.reset		= mpt_raid_ioc_reset,
95 	.shutdown	= mpt_raid_shutdown,
96 	.detach		= mpt_raid_detach,
97 };
98 
99 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
100 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
101 
102 static mpt_reply_handler_t mpt_raid_reply_handler;
103 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
104 					MSG_DEFAULT_REPLY *reply_frame);
105 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
106 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
107 static void mpt_raid_thread(void *arg);
108 static callout_func_t mpt_raid_timer;
109 #if 0
110 static void mpt_enable_vol(struct mpt_softc *mpt,
111 			   struct mpt_raid_volume *mpt_vol, int enable);
112 #endif
113 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
114 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
115     struct cam_path *);
116 static void mpt_raid_sysctl_attach(struct mpt_softc *);
117 
118 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
119 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
120 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
121 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
122     const char *fmt, ...);
123 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
124     const char *fmt, ...);
125 
126 static int mpt_issue_raid_req(struct mpt_softc *mpt,
127     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
128     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
129     int write, int wait);
130 
131 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
132 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
133 
134 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
135 
136 static const char *
mpt_vol_type(struct mpt_raid_volume * vol)137 mpt_vol_type(struct mpt_raid_volume *vol)
138 {
139 	switch (vol->config_page->VolumeType) {
140 	case MPI_RAID_VOL_TYPE_IS:
141 		return ("RAID-0");
142 	case MPI_RAID_VOL_TYPE_IME:
143 		return ("RAID-1E");
144 	case MPI_RAID_VOL_TYPE_IM:
145 		return ("RAID-1");
146 	default:
147 		return ("Unknown");
148 	}
149 }
150 
151 static const char *
mpt_vol_state(struct mpt_raid_volume * vol)152 mpt_vol_state(struct mpt_raid_volume *vol)
153 {
154 	switch (vol->config_page->VolumeStatus.State) {
155 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
156 		return ("Optimal");
157 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
158 		return ("Degraded");
159 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
160 		return ("Failed");
161 	default:
162 		return ("Unknown");
163 	}
164 }
165 
166 static const char *
mpt_disk_state(struct mpt_raid_disk * disk)167 mpt_disk_state(struct mpt_raid_disk *disk)
168 {
169 	switch (disk->config_page.PhysDiskStatus.State) {
170 	case MPI_PHYSDISK0_STATUS_ONLINE:
171 		return ("Online");
172 	case MPI_PHYSDISK0_STATUS_MISSING:
173 		return ("Missing");
174 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
175 		return ("Incompatible");
176 	case MPI_PHYSDISK0_STATUS_FAILED:
177 		return ("Failed");
178 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
179 		return ("Initializing");
180 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
181 		return ("Offline Requested");
182 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
183 		return ("Failed per Host Request");
184 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
185 		return ("Offline");
186 	default:
187 		return ("Unknown");
188 	}
189 }
190 
191 static void
mpt_vol_prt(struct mpt_softc * mpt,struct mpt_raid_volume * vol,const char * fmt,...)192 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
193 	    const char *fmt, ...)
194 {
195 	va_list ap;
196 
197 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
198 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
199 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
200 	va_start(ap, fmt);
201 	vprintf(fmt, ap);
202 	va_end(ap);
203 }
204 
205 static void
mpt_disk_prt(struct mpt_softc * mpt,struct mpt_raid_disk * disk,const char * fmt,...)206 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
207 	     const char *fmt, ...)
208 {
209 	va_list ap;
210 
211 	if (disk->volume != NULL) {
212 		printf("(%s:vol%d:%d): ",
213 		       device_get_nameunit(mpt->dev),
214 		       disk->volume->config_page->VolumeID,
215 		       disk->member_number);
216 	} else {
217 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
218 		       disk->config_page.PhysDiskBus,
219 		       disk->config_page.PhysDiskID);
220 	}
221 	va_start(ap, fmt);
222 	vprintf(fmt, ap);
223 	va_end(ap);
224 }
225 
226 static void
mpt_raid_async(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)227 mpt_raid_async(void *callback_arg, u_int32_t code,
228 	       struct cam_path *path, void *arg)
229 {
230 	struct mpt_softc *mpt;
231 
232 	mpt = (struct mpt_softc*)callback_arg;
233 	switch (code) {
234 	case AC_FOUND_DEVICE:
235 	{
236 		struct ccb_getdev *cgd;
237 		struct mpt_raid_volume *mpt_vol;
238 
239 		cgd = (struct ccb_getdev *)arg;
240 		if (cgd == NULL) {
241 			break;
242 		}
243 
244 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
245 			 cgd->ccb_h.target_id);
246 
247 		RAID_VOL_FOREACH(mpt, mpt_vol) {
248 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
249 				continue;
250 
251 			if (mpt_vol->config_page->VolumeID
252 			 == cgd->ccb_h.target_id) {
253 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
254 				break;
255 			}
256 		}
257 	}
258 	default:
259 		break;
260 	}
261 }
262 
263 static int
mpt_raid_probe(struct mpt_softc * mpt)264 mpt_raid_probe(struct mpt_softc *mpt)
265 {
266 
267 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
268 		return (ENODEV);
269 	}
270 	return (0);
271 }
272 
273 static int
mpt_raid_attach(struct mpt_softc * mpt)274 mpt_raid_attach(struct mpt_softc *mpt)
275 {
276 	struct ccb_setasync csa;
277 	mpt_handler_t	 handler;
278 	int		 error;
279 
280 	mpt_callout_init(mpt, &mpt->raid_timer);
281 
282 	error = mpt_spawn_raid_thread(mpt);
283 	if (error != 0) {
284 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
285 		goto cleanup;
286 	}
287 
288 	MPT_LOCK(mpt);
289 	handler.reply_handler = mpt_raid_reply_handler;
290 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
291 				     &raid_handler_id);
292 	if (error != 0) {
293 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
294 		goto cleanup;
295 	}
296 
297 	memset(&csa, 0, sizeof(csa));
298 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
299 	csa.ccb_h.func_code = XPT_SASYNC_CB;
300 	csa.event_enable = AC_FOUND_DEVICE;
301 	csa.callback = mpt_raid_async;
302 	csa.callback_arg = mpt;
303 	xpt_action((union ccb *)&csa);
304 	if (csa.ccb_h.status != CAM_REQ_CMP) {
305 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
306 			"CAM async handler.\n");
307 	}
308 	MPT_UNLOCK(mpt);
309 
310 	mpt_raid_sysctl_attach(mpt);
311 	return (0);
312 cleanup:
313 	MPT_UNLOCK(mpt);
314 	mpt_raid_detach(mpt);
315 	return (error);
316 }
317 
318 static int
mpt_raid_enable(struct mpt_softc * mpt)319 mpt_raid_enable(struct mpt_softc *mpt)
320 {
321 
322 	return (0);
323 }
324 
325 static void
mpt_raid_detach(struct mpt_softc * mpt)326 mpt_raid_detach(struct mpt_softc *mpt)
327 {
328 	struct ccb_setasync csa;
329 	mpt_handler_t handler;
330 
331 	mpt_callout_drain(mpt, &mpt->raid_timer);
332 
333 	MPT_LOCK(mpt);
334 	mpt_terminate_raid_thread(mpt);
335 	handler.reply_handler = mpt_raid_reply_handler;
336 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
337 			       raid_handler_id);
338 	memset(&csa, 0, sizeof(csa));
339 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
340 	csa.ccb_h.func_code = XPT_SASYNC_CB;
341 	csa.event_enable = 0;
342 	csa.callback = mpt_raid_async;
343 	csa.callback_arg = mpt;
344 	xpt_action((union ccb *)&csa);
345 	MPT_UNLOCK(mpt);
346 }
347 
348 static void
mpt_raid_ioc_reset(struct mpt_softc * mpt,int type)349 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
350 {
351 
352 	/* Nothing to do yet. */
353 }
354 
355 static const char *raid_event_txt[] =
356 {
357 	"Volume Created",
358 	"Volume Deleted",
359 	"Volume Settings Changed",
360 	"Volume Status Changed",
361 	"Volume Physical Disk Membership Changed",
362 	"Physical Disk Created",
363 	"Physical Disk Deleted",
364 	"Physical Disk Settings Changed",
365 	"Physical Disk Status Changed",
366 	"Domain Validation Required",
367 	"SMART Data Received",
368 	"Replace Action Started",
369 };
370 
371 static int
mpt_raid_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)372 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
373 	       MSG_EVENT_NOTIFY_REPLY *msg)
374 {
375 	EVENT_DATA_RAID *raid_event;
376 	struct mpt_raid_volume *mpt_vol;
377 	struct mpt_raid_disk *mpt_disk;
378 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
379 	int i;
380 	int print_event;
381 
382 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
383 		return (0);
384 	}
385 
386 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
387 
388 	mpt_vol = NULL;
389 	vol_pg = NULL;
390 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
391 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
392 			mpt_vol = &mpt->raid_volumes[i];
393 			vol_pg = mpt_vol->config_page;
394 
395 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
396 				continue;
397 
398 			if (vol_pg->VolumeID == raid_event->VolumeID
399 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
400 				break;
401 		}
402 		if (i >= mpt->ioc_page2->MaxVolumes) {
403 			mpt_vol = NULL;
404 			vol_pg = NULL;
405 		}
406 	}
407 
408 	mpt_disk = NULL;
409 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
410 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
411 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
412 			mpt_disk = NULL;
413 		}
414 	}
415 
416 	print_event = 1;
417 	switch(raid_event->ReasonCode) {
418 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
419 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
420 		break;
421 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
422 		if (mpt_vol != NULL) {
423 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
424 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
425 			} else {
426 				/*
427 				 * Coalesce status messages into one
428 				 * per background run of our RAID thread.
429 				 * This removes "spurious" status messages
430 				 * from our output.
431 				 */
432 				print_event = 0;
433 			}
434 		}
435 		break;
436 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
437 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
438 		mpt->raid_rescan++;
439 		if (mpt_vol != NULL) {
440 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
441 		}
442 		break;
443 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
444 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
445 		mpt->raid_rescan++;
446 		break;
447 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
448 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
449 		mpt->raid_rescan++;
450 		if (mpt_disk != NULL) {
451 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
452 		}
453 		break;
454 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
455 		mpt->raid_rescan++;
456 		break;
457 	case MPI_EVENT_RAID_RC_SMART_DATA:
458 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
459 		break;
460 	}
461 
462 	if (print_event) {
463 		if (mpt_disk != NULL) {
464 			mpt_disk_prt(mpt, mpt_disk, "");
465 		} else if (mpt_vol != NULL) {
466 			mpt_vol_prt(mpt, mpt_vol, "");
467 		} else {
468 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
469 				raid_event->VolumeID);
470 
471 			if (raid_event->PhysDiskNum != 0xFF)
472 				mpt_prtc(mpt, ":%d): ",
473 					 raid_event->PhysDiskNum);
474 			else
475 				mpt_prtc(mpt, "): ");
476 		}
477 
478 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
479 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
480 				 raid_event->ReasonCode);
481 		else
482 			mpt_prtc(mpt, "%s\n",
483 				 raid_event_txt[raid_event->ReasonCode]);
484 	}
485 
486 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
487 		/* XXX Use CAM's print sense for this... */
488 		if (mpt_disk != NULL)
489 			mpt_disk_prt(mpt, mpt_disk, "");
490 		else
491 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
492 			    raid_event->VolumeBus, raid_event->VolumeID,
493 			    raid_event->PhysDiskNum);
494 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
495 			 raid_event->ASC, raid_event->ASCQ);
496 	}
497 
498 	mpt_raid_wakeup(mpt);
499 	return (1);
500 }
501 
502 static void
mpt_raid_shutdown(struct mpt_softc * mpt)503 mpt_raid_shutdown(struct mpt_softc *mpt)
504 {
505 	struct mpt_raid_volume *mpt_vol;
506 
507 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
508 		return;
509 	}
510 
511 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
512 	RAID_VOL_FOREACH(mpt, mpt_vol) {
513 		mpt_verify_mwce(mpt, mpt_vol);
514 	}
515 }
516 
517 static int
mpt_raid_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)518 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
519     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
520 {
521 	int free_req;
522 
523 	if (req == NULL)
524 		return (TRUE);
525 
526 	free_req = TRUE;
527 	if (reply_frame != NULL)
528 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
529 #ifdef NOTYET
530 	else if (req->ccb != NULL) {
531 		/* Complete Quiesce CCB with error... */
532 	}
533 #endif
534 
535 	req->state &= ~REQ_STATE_QUEUED;
536 	req->state |= REQ_STATE_DONE;
537 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
538 
539 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
540 		wakeup(req);
541 	} else if (free_req) {
542 		mpt_free_request(mpt, req);
543 	}
544 
545 	return (TRUE);
546 }
547 
548 /*
549  * Parse additional completion information in the reply
550  * frame for RAID I/O requests.
551  */
552 static int
mpt_raid_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)553 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
554     MSG_DEFAULT_REPLY *reply_frame)
555 {
556 	MSG_RAID_ACTION_REPLY *reply;
557 	struct mpt_raid_action_result *action_result;
558 	MSG_RAID_ACTION_REQUEST *rap;
559 
560 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
561 	req->IOCStatus = le16toh(reply->IOCStatus);
562 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
563 
564 	switch (rap->Action) {
565 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
566 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
567 		break;
568 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
569 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
570 		break;
571 	default:
572 		break;
573 	}
574 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
575 	memcpy(&action_result->action_data, &reply->ActionData,
576 	    sizeof(action_result->action_data));
577 	action_result->action_status = le16toh(reply->ActionStatus);
578 	return (TRUE);
579 }
580 
581 /*
582  * Utiltity routine to perform a RAID action command;
583  */
584 static int
mpt_issue_raid_req(struct mpt_softc * mpt,struct mpt_raid_volume * vol,struct mpt_raid_disk * disk,request_t * req,u_int Action,uint32_t ActionDataWord,bus_addr_t addr,bus_size_t len,int write,int wait)585 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
586 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
587 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
588 		   int write, int wait)
589 {
590 	MSG_RAID_ACTION_REQUEST *rap;
591 	SGE_SIMPLE32 *se;
592 
593 	rap = req->req_vbuf;
594 	memset(rap, 0, sizeof *rap);
595 	rap->Action = Action;
596 	rap->ActionDataWord = htole32(ActionDataWord);
597 	rap->Function = MPI_FUNCTION_RAID_ACTION;
598 	rap->VolumeID = vol->config_page->VolumeID;
599 	rap->VolumeBus = vol->config_page->VolumeBus;
600 	if (disk != NULL)
601 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
602 	else
603 		rap->PhysDiskNum = 0xFF;
604 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
605 	se->Address = htole32(addr);
606 	MPI_pSGE_SET_LENGTH(se, len);
607 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
608 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
609 	    MPI_SGE_FLAGS_END_OF_LIST |
610 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
611 	se->FlagsLength = htole32(se->FlagsLength);
612 	rap->MsgContext = htole32(req->index | raid_handler_id);
613 
614 	mpt_check_doorbell(mpt);
615 	mpt_send_cmd(mpt, req);
616 
617 	if (wait) {
618 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
619 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
620 	} else {
621 		return (0);
622 	}
623 }
624 
625 /*************************** RAID Status Monitoring ***************************/
626 static int
mpt_spawn_raid_thread(struct mpt_softc * mpt)627 mpt_spawn_raid_thread(struct mpt_softc *mpt)
628 {
629 	int error;
630 
631 	/*
632 	 * Freeze out any CAM transactions until our thread
633 	 * is able to run at least once.  We need to update
634 	 * our RAID pages before acception I/O or we may
635 	 * reject I/O to an ID we later determine is for a
636 	 * hidden physdisk.
637 	 */
638 	MPT_LOCK(mpt);
639 	xpt_freeze_simq(mpt->phydisk_sim, 1);
640 	MPT_UNLOCK(mpt);
641 	error = kproc_create(mpt_raid_thread, mpt,
642 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
643 	    "mpt_raid%d", mpt->unit);
644 	if (error != 0) {
645 		MPT_LOCK(mpt);
646 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
647 		MPT_UNLOCK(mpt);
648 	}
649 	return (error);
650 }
651 
652 static void
mpt_terminate_raid_thread(struct mpt_softc * mpt)653 mpt_terminate_raid_thread(struct mpt_softc *mpt)
654 {
655 
656 	if (mpt->raid_thread == NULL) {
657 		return;
658 	}
659 	mpt->shutdwn_raid = 1;
660 	wakeup(&mpt->raid_volumes);
661 	/*
662 	 * Sleep on a slightly different location
663 	 * for this interlock just for added safety.
664 	 */
665 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
666 }
667 
668 static void
mpt_raid_thread(void * arg)669 mpt_raid_thread(void *arg)
670 {
671 	struct mpt_softc *mpt;
672 	int firstrun;
673 
674 	mpt = (struct mpt_softc *)arg;
675 	firstrun = 1;
676 	MPT_LOCK(mpt);
677 	while (mpt->shutdwn_raid == 0) {
678 		if (mpt->raid_wakeup == 0) {
679 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
680 			continue;
681 		}
682 
683 		mpt->raid_wakeup = 0;
684 
685 		if (mpt_refresh_raid_data(mpt)) {
686 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
687 			continue;
688 		}
689 
690 		/*
691 		 * Now that we have our first snapshot of RAID data,
692 		 * allow CAM to access our physical disk bus.
693 		 */
694 		if (firstrun) {
695 			firstrun = 0;
696 			xpt_release_simq(mpt->phydisk_sim, TRUE);
697 		}
698 
699 		if (mpt->raid_rescan != 0) {
700 			union ccb *ccb;
701 			int error;
702 
703 			mpt->raid_rescan = 0;
704 			MPT_UNLOCK(mpt);
705 
706 			ccb = xpt_alloc_ccb();
707 
708 			MPT_LOCK(mpt);
709 			error = xpt_create_path(&ccb->ccb_h.path, NULL,
710 			    cam_sim_path(mpt->phydisk_sim),
711 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
712 			if (error != CAM_REQ_CMP) {
713 				xpt_free_ccb(ccb);
714 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
715 			} else {
716 				xpt_rescan(ccb);
717 			}
718 		}
719 	}
720 	mpt->raid_thread = NULL;
721 	wakeup(&mpt->raid_thread);
722 	MPT_UNLOCK(mpt);
723 	kproc_exit(0);
724 }
725 
726 #if 0
727 static void
728 mpt_raid_quiesce_timeout(void *arg)
729 {
730 
731 	/* Complete the CCB with error */
732 	/* COWWWW */
733 }
734 
735 static timeout_t mpt_raid_quiesce_timeout;
736 cam_status
737 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
738 		      request_t *req)
739 {
740 	union ccb *ccb;
741 
742 	ccb = req->ccb;
743 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
744 		return (CAM_REQ_CMP);
745 
746 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
747 		int rv;
748 
749 		mpt_disk->flags |= MPT_RDF_QUIESCING;
750 		xpt_freeze_devq(ccb->ccb_h.path, 1);
751 
752 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
753 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
754 					/*ActionData*/0, /*addr*/0,
755 					/*len*/0, /*write*/FALSE,
756 					/*wait*/FALSE);
757 		if (rv != 0)
758 			return (CAM_REQ_CMP_ERR);
759 
760 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
761 #if 0
762 		if (rv == ETIMEDOUT) {
763 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764 				     "Quiece Timed-out\n");
765 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766 			return (CAM_REQ_CMP_ERR);
767 		}
768 
769 		ar = REQ_TO_RAID_ACTION_RESULT(req);
770 		if (rv != 0
771 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774 				    "%d:%x:%x\n", rv, req->IOCStatus,
775 				    ar->action_status);
776 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777 			return (CAM_REQ_CMP_ERR);
778 		}
779 #endif
780 		return (CAM_REQ_INPROG);
781 	}
782 	return (CAM_REQUEUE_REQ);
783 }
784 #endif
785 
786 /* XXX Ignores that there may be multiple buses/IOCs involved. */
787 cam_status
mpt_map_physdisk(struct mpt_softc * mpt,union ccb * ccb,target_id_t * tgt)788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
789 {
790 	struct mpt_raid_disk *mpt_disk;
791 
792 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
794 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
795 		*tgt = mpt_disk->config_page.PhysDiskID;
796 		return (0);
797 	}
798 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
799 		 ccb->ccb_h.target_id);
800 	return (-1);
801 }
802 
803 /* XXX Ignores that there may be multiple buses/IOCs involved. */
804 int
mpt_is_raid_member(struct mpt_softc * mpt,target_id_t tgt)805 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
806 {
807 	struct mpt_raid_disk *mpt_disk;
808 	int i;
809 
810 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
811 		return (0);
812 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
813 		mpt_disk = &mpt->raid_disks[i];
814 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
815 		    mpt_disk->config_page.PhysDiskID == tgt)
816 			return (1);
817 	}
818 	return (0);
819 
820 }
821 
822 /* XXX Ignores that there may be multiple buses/IOCs involved. */
823 int
mpt_is_raid_volume(struct mpt_softc * mpt,target_id_t tgt)824 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
825 {
826 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
827 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
828 
829 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
830 		return (0);
831 	}
832 	ioc_vol = mpt->ioc_page2->RaidVolume;
833 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
834 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
835 		if (ioc_vol->VolumeID == tgt) {
836 			return (1);
837 		}
838 	}
839 	return (0);
840 }
841 
842 #if 0
843 static void
844 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
845 	       int enable)
846 {
847 	request_t *req;
848 	struct mpt_raid_action_result *ar;
849 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
850 	int enabled;
851 	int rv;
852 
853 	vol_pg = mpt_vol->config_page;
854 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
855 
856 	/*
857 	 * If the setting matches the configuration,
858 	 * there is nothing to do.
859 	 */
860 	if ((enabled && enable)
861 	 || (!enabled && !enable))
862 		return;
863 
864 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
865 	if (req == NULL) {
866 		mpt_vol_prt(mpt, mpt_vol,
867 			    "mpt_enable_vol: Get request failed!\n");
868 		return;
869 	}
870 
871 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
872 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
873 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
874 				/*data*/0, /*addr*/0, /*len*/0,
875 				/*write*/FALSE, /*wait*/TRUE);
876 	if (rv == ETIMEDOUT) {
877 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
878 			    "%s Volume Timed-out\n",
879 			    enable ? "Enable" : "Disable");
880 		return;
881 	}
882 	ar = REQ_TO_RAID_ACTION_RESULT(req);
883 	if (rv != 0
884 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
885 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
886 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
887 			    enable ? "Enable" : "Disable",
888 			    rv, req->IOCStatus, ar->action_status);
889 	}
890 
891 	mpt_free_request(mpt, req);
892 }
893 #endif
894 
895 static void
mpt_verify_mwce(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)896 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
897 {
898 	request_t *req;
899 	struct mpt_raid_action_result *ar;
900 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
901 	uint32_t data;
902 	int rv;
903 	int resyncing;
904 	int mwce;
905 
906 	vol_pg = mpt_vol->config_page;
907 	resyncing = vol_pg->VolumeStatus.Flags
908 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
909 	mwce = vol_pg->VolumeSettings.Settings
910 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
911 
912 	/*
913 	 * If the setting matches the configuration,
914 	 * there is nothing to do.
915 	 */
916 	switch (mpt->raid_mwce_setting) {
917 	case MPT_RAID_MWCE_REBUILD_ONLY:
918 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
919 			return;
920 		}
921 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
922 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
923 			/*
924 			 * Wait one more status update to see if
925 			 * resyncing gets enabled.  It gets disabled
926 			 * temporarilly when WCE is changed.
927 			 */
928 			return;
929 		}
930 		break;
931 	case MPT_RAID_MWCE_ON:
932 		if (mwce)
933 			return;
934 		break;
935 	case MPT_RAID_MWCE_OFF:
936 		if (!mwce)
937 			return;
938 		break;
939 	case MPT_RAID_MWCE_NC:
940 		return;
941 	}
942 
943 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
944 	if (req == NULL) {
945 		mpt_vol_prt(mpt, mpt_vol,
946 			    "mpt_verify_mwce: Get request failed!\n");
947 		return;
948 	}
949 
950 	vol_pg->VolumeSettings.Settings ^=
951 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
952 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
953 	vol_pg->VolumeSettings.Settings ^=
954 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
955 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
956 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
957 				data, /*addr*/0, /*len*/0,
958 				/*write*/FALSE, /*wait*/TRUE);
959 	if (rv == ETIMEDOUT) {
960 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
961 			    "Write Cache Enable Timed-out\n");
962 		return;
963 	}
964 	ar = REQ_TO_RAID_ACTION_RESULT(req);
965 	if (rv != 0
966 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
967 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
968 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
969 			    "%d:%x:%x\n", rv, req->IOCStatus,
970 			    ar->action_status);
971 	} else {
972 		vol_pg->VolumeSettings.Settings ^=
973 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
974 	}
975 	mpt_free_request(mpt, req);
976 }
977 
978 static void
mpt_verify_resync_rate(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)979 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
980 {
981 	request_t *req;
982 	struct mpt_raid_action_result *ar;
983 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
984 	u_int prio;
985 	int rv;
986 
987 	vol_pg = mpt_vol->config_page;
988 
989 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
990 		return;
991 
992 	/*
993 	 * If the current RAID resync rate does not
994 	 * match our configured rate, update it.
995 	 */
996 	prio = vol_pg->VolumeSettings.Settings
997 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
998 	if (vol_pg->ResyncRate != 0
999 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1000 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1001 		if (req == NULL) {
1002 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1003 				    "Get request failed!\n");
1004 			return;
1005 		}
1006 
1007 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1008 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1009 					mpt->raid_resync_rate, /*addr*/0,
1010 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1011 		if (rv == ETIMEDOUT) {
1012 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1013 				    "Resync Rate Setting Timed-out\n");
1014 			return;
1015 		}
1016 
1017 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1018 		if (rv != 0
1019 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1020 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1021 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1022 				    "%d:%x:%x\n", rv, req->IOCStatus,
1023 				    ar->action_status);
1024 		} else
1025 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1026 		mpt_free_request(mpt, req);
1027 	} else if ((prio && mpt->raid_resync_rate < 128)
1028 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1029 		uint32_t data;
1030 
1031 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1032 		if (req == NULL) {
1033 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1034 				    "Get request failed!\n");
1035 			return;
1036 		}
1037 
1038 		vol_pg->VolumeSettings.Settings ^=
1039 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1040 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1041 		vol_pg->VolumeSettings.Settings ^=
1042 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1043 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1044 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1045 					data, /*addr*/0, /*len*/0,
1046 					/*write*/FALSE, /*wait*/TRUE);
1047 		if (rv == ETIMEDOUT) {
1048 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1049 				    "Resync Rate Setting Timed-out\n");
1050 			return;
1051 		}
1052 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1053 		if (rv != 0
1054 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1055 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1056 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1057 				    "%d:%x:%x\n", rv, req->IOCStatus,
1058 				    ar->action_status);
1059 		} else {
1060 			vol_pg->VolumeSettings.Settings ^=
1061 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1062 		}
1063 
1064 		mpt_free_request(mpt, req);
1065 	}
1066 }
1067 
1068 static void
mpt_adjust_queue_depth(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,struct cam_path * path)1069 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1070 		       struct cam_path *path)
1071 {
1072 	struct ccb_relsim crs;
1073 
1074 	memset(&crs, 0, sizeof(crs));
1075 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1076 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1077 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1078 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1079 	crs.openings = mpt->raid_queue_depth;
1080 	xpt_action((union ccb *)&crs);
1081 	if (crs.ccb_h.status != CAM_REQ_CMP)
1082 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1083 			    "with CAM status %#x\n", crs.ccb_h.status);
1084 }
1085 
1086 static void
mpt_announce_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)1087 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1088 {
1089 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1090 	u_int i;
1091 
1092 	vol_pg = mpt_vol->config_page;
1093 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1094 	for (i = 1; i <= 0x8000; i <<= 1) {
1095 		switch (vol_pg->VolumeSettings.Settings & i) {
1096 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1097 			mpt_prtc(mpt, " Member-WCE");
1098 			break;
1099 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1100 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1101 			break;
1102 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1103 			mpt_prtc(mpt, " Hot-Plug-Spares");
1104 			break;
1105 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1106 			mpt_prtc(mpt, " High-Priority-ReSync");
1107 			break;
1108 		default:
1109 			break;
1110 		}
1111 	}
1112 	mpt_prtc(mpt, " )\n");
1113 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1114 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1115 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1116 			  ? ":" : "s:");
1117 		for (i = 0; i < 8; i++) {
1118 			u_int mask;
1119 
1120 			mask = 0x1 << i;
1121 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1122 				continue;
1123 			mpt_prtc(mpt, " %d", i);
1124 		}
1125 		mpt_prtc(mpt, "\n");
1126 	}
1127 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1128 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1129 		struct mpt_raid_disk *mpt_disk;
1130 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1131 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1132 		U8 f, s;
1133 
1134 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1135 		disk_pg = &mpt_disk->config_page;
1136 		mpt_prtc(mpt, "      ");
1137 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1138 			 pt_bus, disk_pg->PhysDiskID);
1139 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1140 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1141 			    "Primary" : "Secondary");
1142 		} else {
1143 			mpt_prtc(mpt, "Stripe Position %d",
1144 				 mpt_disk->member_number);
1145 		}
1146 		f = disk_pg->PhysDiskStatus.Flags;
1147 		s = disk_pg->PhysDiskStatus.State;
1148 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1149 			mpt_prtc(mpt, " Out of Sync");
1150 		}
1151 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1152 			mpt_prtc(mpt, " Quiesced");
1153 		}
1154 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1155 			mpt_prtc(mpt, " Inactive");
1156 		}
1157 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1158 			mpt_prtc(mpt, " Was Optimal");
1159 		}
1160 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1161 			mpt_prtc(mpt, " Was Non-Optimal");
1162 		}
1163 		switch (s) {
1164 		case MPI_PHYSDISK0_STATUS_ONLINE:
1165 			mpt_prtc(mpt, " Online");
1166 			break;
1167 		case MPI_PHYSDISK0_STATUS_MISSING:
1168 			mpt_prtc(mpt, " Missing");
1169 			break;
1170 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1171 			mpt_prtc(mpt, " Incompatible");
1172 			break;
1173 		case MPI_PHYSDISK0_STATUS_FAILED:
1174 			mpt_prtc(mpt, " Failed");
1175 			break;
1176 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1177 			mpt_prtc(mpt, " Initializing");
1178 			break;
1179 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1180 			mpt_prtc(mpt, " Requested Offline");
1181 			break;
1182 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1183 			mpt_prtc(mpt, " Requested Failed");
1184 			break;
1185 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1186 		default:
1187 			mpt_prtc(mpt, " Offline Other (%x)", s);
1188 			break;
1189 		}
1190 		mpt_prtc(mpt, "\n");
1191 	}
1192 }
1193 
1194 static void
mpt_announce_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk)1195 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1196 {
1197 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1198 	int rd_bus = cam_sim_bus(mpt->sim);
1199 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1200 	u_int i;
1201 
1202 	disk_pg = &mpt_disk->config_page;
1203 	mpt_disk_prt(mpt, mpt_disk,
1204 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1205 		     device_get_nameunit(mpt->dev), rd_bus,
1206 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1207 		     pt_bus, mpt_disk - mpt->raid_disks);
1208 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1209 		return;
1210 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1211 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1212 		   ? ":" : "s:");
1213 	for (i = 0; i < 8; i++) {
1214 		u_int mask;
1215 
1216 		mask = 0x1 << i;
1217 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1218 			continue;
1219 		mpt_prtc(mpt, " %d", i);
1220 	}
1221 	mpt_prtc(mpt, "\n");
1222 }
1223 
1224 static void
mpt_refresh_raid_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk,IOC_3_PHYS_DISK * ioc_disk)1225 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1226 		      IOC_3_PHYS_DISK *ioc_disk)
1227 {
1228 	int rv;
1229 
1230 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1231 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1232 				 &mpt_disk->config_page.Header,
1233 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1234 	if (rv != 0) {
1235 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1236 			"Failed to read RAID Disk Hdr(%d)\n",
1237 		 	ioc_disk->PhysDiskNum);
1238 		return;
1239 	}
1240 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1241 				   &mpt_disk->config_page.Header,
1242 				   sizeof(mpt_disk->config_page),
1243 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1244 	if (rv != 0)
1245 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1246 			"Failed to read RAID Disk Page(%d)\n",
1247 		 	ioc_disk->PhysDiskNum);
1248 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1249 }
1250 
1251 static void
mpt_refresh_raid_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,CONFIG_PAGE_IOC_2_RAID_VOL * ioc_vol)1252 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1253     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1254 {
1255 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1256 	struct mpt_raid_action_result *ar;
1257 	request_t *req;
1258 	int rv;
1259 	int i;
1260 
1261 	vol_pg = mpt_vol->config_page;
1262 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1263 
1264 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1265 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1266 	if (rv != 0) {
1267 		mpt_vol_prt(mpt, mpt_vol,
1268 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1269 		    ioc_vol->VolumePageNumber);
1270 		return;
1271 	}
1272 
1273 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1274 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1275 	if (rv != 0) {
1276 		mpt_vol_prt(mpt, mpt_vol,
1277 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1278 		    ioc_vol->VolumePageNumber);
1279 		return;
1280 	}
1281 	mpt2host_config_page_raid_vol_0(vol_pg);
1282 
1283 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1284 
1285 	/* Update disk entry array data. */
1286 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1287 		struct mpt_raid_disk *mpt_disk;
1288 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1289 		mpt_disk->volume = mpt_vol;
1290 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1291 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1292 			mpt_disk->member_number--;
1293 		}
1294 	}
1295 
1296 	if ((vol_pg->VolumeStatus.Flags
1297 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1298 		return;
1299 
1300 	req = mpt_get_request(mpt, TRUE);
1301 	if (req == NULL) {
1302 		mpt_vol_prt(mpt, mpt_vol,
1303 		    "mpt_refresh_raid_vol: Get request failed!\n");
1304 		return;
1305 	}
1306 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1307 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1308 	if (rv == ETIMEDOUT) {
1309 		mpt_vol_prt(mpt, mpt_vol,
1310 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1311 		mpt_free_request(mpt, req);
1312 		return;
1313 	}
1314 
1315 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1316 	if (rv == 0
1317 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1318 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1319 		memcpy(&mpt_vol->sync_progress,
1320 		       &ar->action_data.indicator_struct,
1321 		       sizeof(mpt_vol->sync_progress));
1322 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1323 	} else {
1324 		mpt_vol_prt(mpt, mpt_vol,
1325 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1326 	}
1327 	mpt_free_request(mpt, req);
1328 }
1329 
1330 /*
1331  * Update in-core information about RAID support.  We update any entries
1332  * that didn't previously exists or have been marked as needing to
1333  * be updated by our event handler.  Interesting changes are displayed
1334  * to the console.
1335  */
1336 static int
mpt_refresh_raid_data(struct mpt_softc * mpt)1337 mpt_refresh_raid_data(struct mpt_softc *mpt)
1338 {
1339 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1340 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1341 	IOC_3_PHYS_DISK *ioc_disk;
1342 	IOC_3_PHYS_DISK *ioc_last_disk;
1343 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1344 	size_t len;
1345 	int rv;
1346 	int i;
1347 	u_int nonopt_volumes;
1348 
1349 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1350 		return (0);
1351 	}
1352 
1353 	/*
1354 	 * Mark all items as unreferenced by the configuration.
1355 	 * This allows us to find, report, and discard stale
1356 	 * entries.
1357 	 */
1358 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1359 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1360 	}
1361 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1362 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1363 	}
1364 
1365 	/*
1366 	 * Get Physical Disk information.
1367 	 */
1368 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1369 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1370 				   &mpt->ioc_page3->Header, len,
1371 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1372 	if (rv) {
1373 		mpt_prt(mpt,
1374 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1375 		return (-1);
1376 	}
1377 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1378 
1379 	ioc_disk = mpt->ioc_page3->PhysDisk;
1380 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1381 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1382 		struct mpt_raid_disk *mpt_disk;
1383 
1384 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1385 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1386 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1387 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1388 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1389 		}
1390 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1391 		mpt->raid_rescan++;
1392 	}
1393 
1394 	/*
1395 	 * Refresh volume data.
1396 	 */
1397 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1398 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1399 				   &mpt->ioc_page2->Header, len,
1400 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1401 	if (rv) {
1402 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1403 			"Failed to read IOC Page 2\n");
1404 		return (-1);
1405 	}
1406 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1407 
1408 	ioc_vol = mpt->ioc_page2->RaidVolume;
1409 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1410 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1411 		struct mpt_raid_volume *mpt_vol;
1412 
1413 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1414 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1415 		vol_pg = mpt_vol->config_page;
1416 		if (vol_pg == NULL)
1417 			continue;
1418 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1419 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1420 		 || (vol_pg->VolumeStatus.Flags
1421 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1422 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1423 		}
1424 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1425 	}
1426 
1427 	nonopt_volumes = 0;
1428 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1429 		struct mpt_raid_volume *mpt_vol;
1430 		uint64_t total;
1431 		uint64_t left;
1432 		int m;
1433 		u_int prio;
1434 
1435 		mpt_vol = &mpt->raid_volumes[i];
1436 
1437 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1438 			continue;
1439 		}
1440 
1441 		vol_pg = mpt_vol->config_page;
1442 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1443 		 == MPT_RVF_ANNOUNCED) {
1444 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1445 			mpt_vol->flags = 0;
1446 			continue;
1447 		}
1448 
1449 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1450 			mpt_announce_vol(mpt, mpt_vol);
1451 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1452 		}
1453 
1454 		if (vol_pg->VolumeStatus.State !=
1455 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1456 			nonopt_volumes++;
1457 
1458 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1459 			continue;
1460 
1461 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1462 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1463 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1464 		mpt_verify_mwce(mpt, mpt_vol);
1465 
1466 		if (vol_pg->VolumeStatus.Flags == 0) {
1467 			continue;
1468 		}
1469 
1470 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1471 		for (m = 1; m <= 0x80; m <<= 1) {
1472 			switch (vol_pg->VolumeStatus.Flags & m) {
1473 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1474 				mpt_prtc(mpt, " Enabled");
1475 				break;
1476 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1477 				mpt_prtc(mpt, " Quiesced");
1478 				break;
1479 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1480 				mpt_prtc(mpt, " Re-Syncing");
1481 				break;
1482 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1483 				mpt_prtc(mpt, " Inactive");
1484 				break;
1485 			default:
1486 				break;
1487 			}
1488 		}
1489 		mpt_prtc(mpt, " )\n");
1490 
1491 		if ((vol_pg->VolumeStatus.Flags
1492 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1493 			continue;
1494 
1495 		mpt_verify_resync_rate(mpt, mpt_vol);
1496 
1497 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1498 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1499 		if (vol_pg->ResyncRate != 0) {
1500 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1501 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1502 			    prio / 1000, prio % 1000);
1503 		} else {
1504 			prio = vol_pg->VolumeSettings.Settings
1505 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1506 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1507 			    prio ? "High" : "Low");
1508 		}
1509 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1510 			    "blocks remaining\n", (uintmax_t)left,
1511 			    (uintmax_t)total);
1512 
1513 		/* Periodically report on sync progress. */
1514 		mpt_schedule_raid_refresh(mpt);
1515 	}
1516 
1517 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1518 		struct mpt_raid_disk *mpt_disk;
1519 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1520 		int m;
1521 
1522 		mpt_disk = &mpt->raid_disks[i];
1523 		disk_pg = &mpt_disk->config_page;
1524 
1525 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1526 			continue;
1527 
1528 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1529 		 == MPT_RDF_ANNOUNCED) {
1530 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1531 			mpt_disk->flags = 0;
1532 			mpt->raid_rescan++;
1533 			continue;
1534 		}
1535 
1536 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1537 			mpt_announce_disk(mpt, mpt_disk);
1538 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1539 		}
1540 
1541 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1542 			continue;
1543 
1544 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1545 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1546 		if (disk_pg->PhysDiskStatus.Flags == 0)
1547 			continue;
1548 
1549 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1550 		for (m = 1; m <= 0x80; m <<= 1) {
1551 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1552 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1553 				mpt_prtc(mpt, " Out-Of-Sync");
1554 				break;
1555 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1556 				mpt_prtc(mpt, " Quiesced");
1557 				break;
1558 			default:
1559 				break;
1560 			}
1561 		}
1562 		mpt_prtc(mpt, " )\n");
1563 	}
1564 
1565 	mpt->raid_nonopt_volumes = nonopt_volumes;
1566 	return (0);
1567 }
1568 
1569 static void
mpt_raid_timer(void * arg)1570 mpt_raid_timer(void *arg)
1571 {
1572 	struct mpt_softc *mpt;
1573 
1574 	mpt = (struct mpt_softc *)arg;
1575 	MPT_LOCK_ASSERT(mpt);
1576 	mpt_raid_wakeup(mpt);
1577 }
1578 
1579 static void
mpt_schedule_raid_refresh(struct mpt_softc * mpt)1580 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1581 {
1582 
1583 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1584 		      mpt_raid_timer, mpt);
1585 }
1586 
1587 void
mpt_raid_free_mem(struct mpt_softc * mpt)1588 mpt_raid_free_mem(struct mpt_softc *mpt)
1589 {
1590 
1591 	if (mpt->raid_volumes) {
1592 		struct mpt_raid_volume *mpt_raid;
1593 		int i;
1594 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1595 			mpt_raid = &mpt->raid_volumes[i];
1596 			if (mpt_raid->config_page) {
1597 				free(mpt_raid->config_page, M_DEVBUF);
1598 				mpt_raid->config_page = NULL;
1599 			}
1600 		}
1601 		free(mpt->raid_volumes, M_DEVBUF);
1602 		mpt->raid_volumes = NULL;
1603 	}
1604 	if (mpt->raid_disks) {
1605 		free(mpt->raid_disks, M_DEVBUF);
1606 		mpt->raid_disks = NULL;
1607 	}
1608 	if (mpt->ioc_page2) {
1609 		free(mpt->ioc_page2, M_DEVBUF);
1610 		mpt->ioc_page2 = NULL;
1611 	}
1612 	if (mpt->ioc_page3) {
1613 		free(mpt->ioc_page3, M_DEVBUF);
1614 		mpt->ioc_page3 = NULL;
1615 	}
1616 	mpt->raid_max_volumes =  0;
1617 	mpt->raid_max_disks =  0;
1618 }
1619 
1620 static int
mpt_raid_set_vol_resync_rate(struct mpt_softc * mpt,u_int rate)1621 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1622 {
1623 	struct mpt_raid_volume *mpt_vol;
1624 
1625 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1626 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1627 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1628 		return (EINVAL);
1629 
1630 	MPT_LOCK(mpt);
1631 	mpt->raid_resync_rate = rate;
1632 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1633 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1634 			continue;
1635 		}
1636 		mpt_verify_resync_rate(mpt, mpt_vol);
1637 	}
1638 	MPT_UNLOCK(mpt);
1639 	return (0);
1640 }
1641 
1642 static int
mpt_raid_set_vol_queue_depth(struct mpt_softc * mpt,u_int vol_queue_depth)1643 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1644 {
1645 	struct mpt_raid_volume *mpt_vol;
1646 
1647 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1648 		return (EINVAL);
1649 
1650 	MPT_LOCK(mpt);
1651 	mpt->raid_queue_depth = vol_queue_depth;
1652 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1653 		struct cam_path *path;
1654 		int error;
1655 
1656 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1657 			continue;
1658 
1659 		mpt->raid_rescan = 0;
1660 
1661 		error = xpt_create_path(&path, NULL,
1662 					cam_sim_path(mpt->sim),
1663 					mpt_vol->config_page->VolumeID,
1664 					/*lun*/0);
1665 		if (error != CAM_REQ_CMP) {
1666 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1667 			continue;
1668 		}
1669 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1670 		xpt_free_path(path);
1671 	}
1672 	MPT_UNLOCK(mpt);
1673 	return (0);
1674 }
1675 
1676 static int
mpt_raid_set_vol_mwce(struct mpt_softc * mpt,mpt_raid_mwce_t mwce)1677 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1678 {
1679 	struct mpt_raid_volume *mpt_vol;
1680 	int force_full_resync;
1681 
1682 	MPT_LOCK(mpt);
1683 	if (mwce == mpt->raid_mwce_setting) {
1684 		MPT_UNLOCK(mpt);
1685 		return (0);
1686 	}
1687 
1688 	/*
1689 	 * Catch MWCE being left on due to a failed shutdown.  Since
1690 	 * sysctls cannot be set by the loader, we treat the first
1691 	 * setting of this varible specially and force a full volume
1692 	 * resync if MWCE is enabled and a resync is in progress.
1693 	 */
1694 	force_full_resync = 0;
1695 	if (mpt->raid_mwce_set == 0
1696 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1697 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1698 		force_full_resync = 1;
1699 
1700 	mpt->raid_mwce_setting = mwce;
1701 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1702 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1703 		int resyncing;
1704 		int mwce;
1705 
1706 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1707 			continue;
1708 
1709 		vol_pg = mpt_vol->config_page;
1710 		resyncing = vol_pg->VolumeStatus.Flags
1711 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1712 		mwce = vol_pg->VolumeSettings.Settings
1713 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1714 		if (force_full_resync && resyncing && mwce) {
1715 			/*
1716 			 * XXX disable/enable volume should force a resync,
1717 			 *     but we'll need to queice, drain, and restart
1718 			 *     I/O to do that.
1719 			 */
1720 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1721 				    "detected.  Suggest full resync.\n");
1722 		}
1723 		mpt_verify_mwce(mpt, mpt_vol);
1724 	}
1725 	mpt->raid_mwce_set = 1;
1726 	MPT_UNLOCK(mpt);
1727 	return (0);
1728 }
1729 
1730 static const char *mpt_vol_mwce_strs[] =
1731 {
1732 	"On",
1733 	"Off",
1734 	"On-During-Rebuild",
1735 	"NC"
1736 };
1737 
1738 static int
mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)1739 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1740 {
1741 	char inbuf[20];
1742 	struct mpt_softc *mpt;
1743 	const char *str;
1744 	int error;
1745 	u_int size;
1746 	u_int i;
1747 
1748 	mpt = (struct mpt_softc *)arg1;
1749 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1750 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1751 	if (error || !req->newptr) {
1752 		return (error);
1753 	}
1754 
1755 	size = req->newlen - req->newidx;
1756 	if (size >= sizeof(inbuf)) {
1757 		return (EINVAL);
1758 	}
1759 
1760 	error = SYSCTL_IN(req, inbuf, size);
1761 	if (error) {
1762 		return (error);
1763 	}
1764 	inbuf[size] = '\0';
1765 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1766 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1767 			return (mpt_raid_set_vol_mwce(mpt, i));
1768 		}
1769 	}
1770 	return (EINVAL);
1771 }
1772 
1773 static int
mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)1774 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1775 {
1776 	struct mpt_softc *mpt;
1777 	u_int raid_resync_rate;
1778 	int error;
1779 
1780 	mpt = (struct mpt_softc *)arg1;
1781 	raid_resync_rate = mpt->raid_resync_rate;
1782 
1783 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1784 	if (error || !req->newptr) {
1785 		return error;
1786 	}
1787 
1788 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1789 }
1790 
1791 static int
mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)1792 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1793 {
1794 	struct mpt_softc *mpt;
1795 	u_int raid_queue_depth;
1796 	int error;
1797 
1798 	mpt = (struct mpt_softc *)arg1;
1799 	raid_queue_depth = mpt->raid_queue_depth;
1800 
1801 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1802 	if (error || !req->newptr) {
1803 		return error;
1804 	}
1805 
1806 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1807 }
1808 
1809 static void
mpt_raid_sysctl_attach(struct mpt_softc * mpt)1810 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1811 {
1812 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1813 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1814 
1815 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1816 	    "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1817 	    mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1818 	    "volume member WCE(On,Off,On-During-Rebuild,NC)");
1819 
1820 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1821 	    "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1822 	    mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1823 	    "default volume queue depth");
1824 
1825 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1826 	    "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1827 	    mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1828 	    "volume resync priority (0 == NC, 1 - 255)");
1829 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1830 			"nonoptimal_volumes", CTLFLAG_RD,
1831 			&mpt->raid_nonopt_volumes, 0,
1832 			"number of nonoptimal volumes");
1833 }
1834