xref: /freebsd/sys/dev/mpt/mpt_raid.c (revision e453e498cbb88570a3ff7b3679de65c88707da95)
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2005, WHEEL Sp. z o.o.
7  * Copyright (c) 2005 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon including
18  *    a substantially similar Disclaimer requirement for further binary
19  *    redistribution.
20  * 3. Neither the names of the above listed copyright holders nor the names
21  *    of any contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 /*-
37  * Some Breakage and Bug Fixing added later.
38  * Copyright (c) 2006, by Matthew Jacob
39  * All Rights Reserved
40  *
41  * Support from LSI-Logic has also gone a great deal toward making this a
42  * workable subsystem and is gratefully acknowledged.
43  */
44 
45 #include <sys/cdefs.h>
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48 
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt_sim.h>
57 
58 #include <sys/callout.h>
59 #include <sys/kthread.h>
60 #include <sys/stdarg.h>
61 #include <sys/sysctl.h>
62 
63 struct mpt_raid_action_result
64 {
65 	union {
66 		MPI_RAID_VOL_INDICATOR	indicator_struct;
67 		uint32_t		new_settings;
68 		uint8_t			phys_disk_num;
69 	} action_data;
70 	uint16_t			action_status;
71 };
72 
73 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
74 	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
75 
76 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
77 
78 static mpt_probe_handler_t	mpt_raid_probe;
79 static mpt_attach_handler_t	mpt_raid_attach;
80 static mpt_enable_handler_t	mpt_raid_enable;
81 static mpt_event_handler_t	mpt_raid_event;
82 static mpt_shutdown_handler_t	mpt_raid_shutdown;
83 static mpt_reset_handler_t	mpt_raid_ioc_reset;
84 static mpt_detach_handler_t	mpt_raid_detach;
85 
86 static struct mpt_personality mpt_raid_personality =
87 {
88 	.name		= "mpt_raid",
89 	.probe		= mpt_raid_probe,
90 	.attach		= mpt_raid_attach,
91 	.enable		= mpt_raid_enable,
92 	.event		= mpt_raid_event,
93 	.reset		= mpt_raid_ioc_reset,
94 	.shutdown	= mpt_raid_shutdown,
95 	.detach		= mpt_raid_detach,
96 };
97 
98 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
99 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
100 
101 static mpt_reply_handler_t mpt_raid_reply_handler;
102 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
103 					MSG_DEFAULT_REPLY *reply_frame);
104 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
105 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
106 static void mpt_raid_thread(void *arg);
107 static callout_func_t mpt_raid_timer;
108 #if 0
109 static void mpt_enable_vol(struct mpt_softc *mpt,
110 			   struct mpt_raid_volume *mpt_vol, int enable);
111 #endif
112 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
113 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
114     struct cam_path *);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *);
116 
117 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
118 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
119 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
120 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
121     const char *fmt, ...);
122 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
123     const char *fmt, ...);
124 
125 static int mpt_issue_raid_req(struct mpt_softc *mpt,
126     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
127     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
128     int write, int wait);
129 
130 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
131 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
132 
133 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
134 
135 static const char *
mpt_vol_type(struct mpt_raid_volume * vol)136 mpt_vol_type(struct mpt_raid_volume *vol)
137 {
138 	switch (vol->config_page->VolumeType) {
139 	case MPI_RAID_VOL_TYPE_IS:
140 		return ("RAID-0");
141 	case MPI_RAID_VOL_TYPE_IME:
142 		return ("RAID-1E");
143 	case MPI_RAID_VOL_TYPE_IM:
144 		return ("RAID-1");
145 	default:
146 		return ("Unknown");
147 	}
148 }
149 
150 static const char *
mpt_vol_state(struct mpt_raid_volume * vol)151 mpt_vol_state(struct mpt_raid_volume *vol)
152 {
153 	switch (vol->config_page->VolumeStatus.State) {
154 	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
155 		return ("Optimal");
156 	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
157 		return ("Degraded");
158 	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
159 		return ("Failed");
160 	default:
161 		return ("Unknown");
162 	}
163 }
164 
165 static const char *
mpt_disk_state(struct mpt_raid_disk * disk)166 mpt_disk_state(struct mpt_raid_disk *disk)
167 {
168 	switch (disk->config_page.PhysDiskStatus.State) {
169 	case MPI_PHYSDISK0_STATUS_ONLINE:
170 		return ("Online");
171 	case MPI_PHYSDISK0_STATUS_MISSING:
172 		return ("Missing");
173 	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
174 		return ("Incompatible");
175 	case MPI_PHYSDISK0_STATUS_FAILED:
176 		return ("Failed");
177 	case MPI_PHYSDISK0_STATUS_INITIALIZING:
178 		return ("Initializing");
179 	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
180 		return ("Offline Requested");
181 	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
182 		return ("Failed per Host Request");
183 	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
184 		return ("Offline");
185 	default:
186 		return ("Unknown");
187 	}
188 }
189 
190 static void
mpt_vol_prt(struct mpt_softc * mpt,struct mpt_raid_volume * vol,const char * fmt,...)191 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
192 	    const char *fmt, ...)
193 {
194 	va_list ap;
195 
196 	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197 	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
198 	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
199 	va_start(ap, fmt);
200 	vprintf(fmt, ap);
201 	va_end(ap);
202 }
203 
204 static void
mpt_disk_prt(struct mpt_softc * mpt,struct mpt_raid_disk * disk,const char * fmt,...)205 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
206 	     const char *fmt, ...)
207 {
208 	va_list ap;
209 
210 	if (disk->volume != NULL) {
211 		printf("(%s:vol%d:%d): ",
212 		       device_get_nameunit(mpt->dev),
213 		       disk->volume->config_page->VolumeID,
214 		       disk->member_number);
215 	} else {
216 		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
217 		       disk->config_page.PhysDiskBus,
218 		       disk->config_page.PhysDiskID);
219 	}
220 	va_start(ap, fmt);
221 	vprintf(fmt, ap);
222 	va_end(ap);
223 }
224 
225 static void
mpt_raid_async(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)226 mpt_raid_async(void *callback_arg, u_int32_t code,
227 	       struct cam_path *path, void *arg)
228 {
229 	struct mpt_softc *mpt;
230 
231 	mpt = (struct mpt_softc*)callback_arg;
232 	switch (code) {
233 	case AC_FOUND_DEVICE:
234 	{
235 		struct ccb_getdev *cgd;
236 		struct mpt_raid_volume *mpt_vol;
237 
238 		cgd = (struct ccb_getdev *)arg;
239 		if (cgd == NULL) {
240 			break;
241 		}
242 
243 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
244 			 cgd->ccb_h.target_id);
245 
246 		RAID_VOL_FOREACH(mpt, mpt_vol) {
247 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
248 				continue;
249 
250 			if (mpt_vol->config_page->VolumeID
251 			 == cgd->ccb_h.target_id) {
252 				mpt_adjust_queue_depth(mpt, mpt_vol, path);
253 				break;
254 			}
255 		}
256 	}
257 	default:
258 		break;
259 	}
260 }
261 
262 static int
mpt_raid_probe(struct mpt_softc * mpt)263 mpt_raid_probe(struct mpt_softc *mpt)
264 {
265 
266 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
267 		return (ENODEV);
268 	}
269 	return (0);
270 }
271 
272 static int
mpt_raid_attach(struct mpt_softc * mpt)273 mpt_raid_attach(struct mpt_softc *mpt)
274 {
275 	struct ccb_setasync csa;
276 	mpt_handler_t	 handler;
277 	int		 error;
278 
279 	mpt_callout_init(mpt, &mpt->raid_timer);
280 
281 	error = mpt_spawn_raid_thread(mpt);
282 	if (error != 0) {
283 		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284 		goto cleanup;
285 	}
286 
287 	MPT_LOCK(mpt);
288 	handler.reply_handler = mpt_raid_reply_handler;
289 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
290 				     &raid_handler_id);
291 	if (error != 0) {
292 		mpt_prt(mpt, "Unable to register RAID haandler!\n");
293 		goto cleanup;
294 	}
295 
296 	memset(&csa, 0, sizeof(csa));
297 	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
298 	csa.ccb_h.func_code = XPT_SASYNC_CB;
299 	csa.event_enable = AC_FOUND_DEVICE;
300 	csa.callback = mpt_raid_async;
301 	csa.callback_arg = mpt;
302 	xpt_action((union ccb *)&csa);
303 	if (csa.ccb_h.status != CAM_REQ_CMP) {
304 		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
305 			"CAM async handler.\n");
306 	}
307 	MPT_UNLOCK(mpt);
308 
309 	mpt_raid_sysctl_attach(mpt);
310 	return (0);
311 cleanup:
312 	MPT_UNLOCK(mpt);
313 	mpt_raid_detach(mpt);
314 	return (error);
315 }
316 
317 static int
mpt_raid_enable(struct mpt_softc * mpt)318 mpt_raid_enable(struct mpt_softc *mpt)
319 {
320 
321 	return (0);
322 }
323 
324 static void
mpt_raid_detach(struct mpt_softc * mpt)325 mpt_raid_detach(struct mpt_softc *mpt)
326 {
327 	struct ccb_setasync csa;
328 	mpt_handler_t handler;
329 
330 	mpt_callout_drain(mpt, &mpt->raid_timer);
331 
332 	MPT_LOCK(mpt);
333 	mpt_terminate_raid_thread(mpt);
334 	handler.reply_handler = mpt_raid_reply_handler;
335 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
336 			       raid_handler_id);
337 	memset(&csa, 0, sizeof(csa));
338 	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
339 	csa.ccb_h.func_code = XPT_SASYNC_CB;
340 	csa.event_enable = 0;
341 	csa.callback = mpt_raid_async;
342 	csa.callback_arg = mpt;
343 	xpt_action((union ccb *)&csa);
344 	MPT_UNLOCK(mpt);
345 }
346 
347 static void
mpt_raid_ioc_reset(struct mpt_softc * mpt,int type)348 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
349 {
350 
351 	/* Nothing to do yet. */
352 }
353 
354 static const char *raid_event_txt[] =
355 {
356 	"Volume Created",
357 	"Volume Deleted",
358 	"Volume Settings Changed",
359 	"Volume Status Changed",
360 	"Volume Physical Disk Membership Changed",
361 	"Physical Disk Created",
362 	"Physical Disk Deleted",
363 	"Physical Disk Settings Changed",
364 	"Physical Disk Status Changed",
365 	"Domain Validation Required",
366 	"SMART Data Received",
367 	"Replace Action Started",
368 };
369 
370 static int
mpt_raid_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)371 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
372 	       MSG_EVENT_NOTIFY_REPLY *msg)
373 {
374 	EVENT_DATA_RAID *raid_event;
375 	struct mpt_raid_volume *mpt_vol;
376 	struct mpt_raid_disk *mpt_disk;
377 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
378 	int i;
379 	int print_event;
380 
381 	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
382 		return (0);
383 	}
384 
385 	raid_event = (EVENT_DATA_RAID *)&msg->Data;
386 
387 	mpt_vol = NULL;
388 	vol_pg = NULL;
389 	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
390 		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
391 			mpt_vol = &mpt->raid_volumes[i];
392 			vol_pg = mpt_vol->config_page;
393 
394 			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
395 				continue;
396 
397 			if (vol_pg->VolumeID == raid_event->VolumeID
398 			 && vol_pg->VolumeBus == raid_event->VolumeBus)
399 				break;
400 		}
401 		if (i >= mpt->ioc_page2->MaxVolumes) {
402 			mpt_vol = NULL;
403 			vol_pg = NULL;
404 		}
405 	}
406 
407 	mpt_disk = NULL;
408 	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
409 		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
410 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
411 			mpt_disk = NULL;
412 		}
413 	}
414 
415 	print_event = 1;
416 	switch(raid_event->ReasonCode) {
417 	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
418 	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
419 		break;
420 	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
421 		if (mpt_vol != NULL) {
422 			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
423 				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
424 			} else {
425 				/*
426 				 * Coalesce status messages into one
427 				 * per background run of our RAID thread.
428 				 * This removes "spurious" status messages
429 				 * from our output.
430 				 */
431 				print_event = 0;
432 			}
433 		}
434 		break;
435 	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
436 	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
437 		mpt->raid_rescan++;
438 		if (mpt_vol != NULL) {
439 			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
440 		}
441 		break;
442 	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
443 	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
444 		mpt->raid_rescan++;
445 		break;
446 	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
447 	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
448 		mpt->raid_rescan++;
449 		if (mpt_disk != NULL) {
450 			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
451 		}
452 		break;
453 	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
454 		mpt->raid_rescan++;
455 		break;
456 	case MPI_EVENT_RAID_RC_SMART_DATA:
457 	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
458 		break;
459 	}
460 
461 	if (print_event) {
462 		if (mpt_disk != NULL) {
463 			mpt_disk_prt(mpt, mpt_disk, "");
464 		} else if (mpt_vol != NULL) {
465 			mpt_vol_prt(mpt, mpt_vol, "");
466 		} else {
467 			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
468 				raid_event->VolumeID);
469 
470 			if (raid_event->PhysDiskNum != 0xFF)
471 				mpt_prtc(mpt, ":%d): ",
472 					 raid_event->PhysDiskNum);
473 			else
474 				mpt_prtc(mpt, "): ");
475 		}
476 
477 		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
478 			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
479 				 raid_event->ReasonCode);
480 		else
481 			mpt_prtc(mpt, "%s\n",
482 				 raid_event_txt[raid_event->ReasonCode]);
483 	}
484 
485 	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
486 		/* XXX Use CAM's print sense for this... */
487 		if (mpt_disk != NULL)
488 			mpt_disk_prt(mpt, mpt_disk, "");
489 		else
490 			mpt_prt(mpt, "Volume(%d:%d:%d: ",
491 			    raid_event->VolumeBus, raid_event->VolumeID,
492 			    raid_event->PhysDiskNum);
493 		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
494 			 raid_event->ASC, raid_event->ASCQ);
495 	}
496 
497 	mpt_raid_wakeup(mpt);
498 	return (1);
499 }
500 
501 static void
mpt_raid_shutdown(struct mpt_softc * mpt)502 mpt_raid_shutdown(struct mpt_softc *mpt)
503 {
504 	struct mpt_raid_volume *mpt_vol;
505 
506 	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
507 		return;
508 	}
509 
510 	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
511 	RAID_VOL_FOREACH(mpt, mpt_vol) {
512 		mpt_verify_mwce(mpt, mpt_vol);
513 	}
514 }
515 
516 static int
mpt_raid_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)517 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
518     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
519 {
520 	int free_req;
521 
522 	if (req == NULL)
523 		return (TRUE);
524 
525 	free_req = TRUE;
526 	if (reply_frame != NULL)
527 		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
528 #ifdef NOTYET
529 	else if (req->ccb != NULL) {
530 		/* Complete Quiesce CCB with error... */
531 	}
532 #endif
533 
534 	req->state &= ~REQ_STATE_QUEUED;
535 	req->state |= REQ_STATE_DONE;
536 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
537 
538 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
539 		wakeup(req);
540 	} else if (free_req) {
541 		mpt_free_request(mpt, req);
542 	}
543 
544 	return (TRUE);
545 }
546 
547 /*
548  * Parse additional completion information in the reply
549  * frame for RAID I/O requests.
550  */
551 static int
mpt_raid_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)552 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
553     MSG_DEFAULT_REPLY *reply_frame)
554 {
555 	MSG_RAID_ACTION_REPLY *reply;
556 	struct mpt_raid_action_result *action_result;
557 	MSG_RAID_ACTION_REQUEST *rap;
558 
559 	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
560 	req->IOCStatus = le16toh(reply->IOCStatus);
561 	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
562 
563 	switch (rap->Action) {
564 	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
565 		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
566 		break;
567 	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
568 		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
569 		break;
570 	default:
571 		break;
572 	}
573 	action_result = REQ_TO_RAID_ACTION_RESULT(req);
574 	memcpy(&action_result->action_data, &reply->ActionData,
575 	    sizeof(action_result->action_data));
576 	action_result->action_status = le16toh(reply->ActionStatus);
577 	return (TRUE);
578 }
579 
580 /*
581  * Utiltity routine to perform a RAID action command;
582  */
583 static int
mpt_issue_raid_req(struct mpt_softc * mpt,struct mpt_raid_volume * vol,struct mpt_raid_disk * disk,request_t * req,u_int Action,uint32_t ActionDataWord,bus_addr_t addr,bus_size_t len,int write,int wait)584 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
585 		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
586 		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
587 		   int write, int wait)
588 {
589 	MSG_RAID_ACTION_REQUEST *rap;
590 	SGE_SIMPLE32 *se;
591 
592 	rap = req->req_vbuf;
593 	memset(rap, 0, sizeof *rap);
594 	rap->Action = Action;
595 	rap->ActionDataWord = htole32(ActionDataWord);
596 	rap->Function = MPI_FUNCTION_RAID_ACTION;
597 	rap->VolumeID = vol->config_page->VolumeID;
598 	rap->VolumeBus = vol->config_page->VolumeBus;
599 	if (disk != NULL)
600 		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
601 	else
602 		rap->PhysDiskNum = 0xFF;
603 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
604 	se->Address = htole32(addr);
605 	MPI_pSGE_SET_LENGTH(se, len);
606 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
607 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
608 	    MPI_SGE_FLAGS_END_OF_LIST |
609 	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
610 	se->FlagsLength = htole32(se->FlagsLength);
611 	rap->MsgContext = htole32(req->index | raid_handler_id);
612 
613 	mpt_check_doorbell(mpt);
614 	mpt_send_cmd(mpt, req);
615 
616 	if (wait) {
617 		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
618 				     /*sleep_ok*/FALSE, /*time_ms*/2000));
619 	} else {
620 		return (0);
621 	}
622 }
623 
624 /*************************** RAID Status Monitoring ***************************/
625 static int
mpt_spawn_raid_thread(struct mpt_softc * mpt)626 mpt_spawn_raid_thread(struct mpt_softc *mpt)
627 {
628 	int error;
629 
630 	/*
631 	 * Freeze out any CAM transactions until our thread
632 	 * is able to run at least once.  We need to update
633 	 * our RAID pages before acception I/O or we may
634 	 * reject I/O to an ID we later determine is for a
635 	 * hidden physdisk.
636 	 */
637 	MPT_LOCK(mpt);
638 	xpt_freeze_simq(mpt->phydisk_sim, 1);
639 	MPT_UNLOCK(mpt);
640 	error = kproc_create(mpt_raid_thread, mpt,
641 	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
642 	    "mpt_raid%d", mpt->unit);
643 	if (error != 0) {
644 		MPT_LOCK(mpt);
645 		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
646 		MPT_UNLOCK(mpt);
647 	}
648 	return (error);
649 }
650 
651 static void
mpt_terminate_raid_thread(struct mpt_softc * mpt)652 mpt_terminate_raid_thread(struct mpt_softc *mpt)
653 {
654 
655 	if (mpt->raid_thread == NULL) {
656 		return;
657 	}
658 	mpt->shutdwn_raid = 1;
659 	wakeup(&mpt->raid_volumes);
660 	/*
661 	 * Sleep on a slightly different location
662 	 * for this interlock just for added safety.
663 	 */
664 	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
665 }
666 
667 static void
mpt_raid_thread(void * arg)668 mpt_raid_thread(void *arg)
669 {
670 	struct mpt_softc *mpt;
671 	int firstrun;
672 
673 	mpt = (struct mpt_softc *)arg;
674 	firstrun = 1;
675 	MPT_LOCK(mpt);
676 	while (mpt->shutdwn_raid == 0) {
677 		if (mpt->raid_wakeup == 0) {
678 			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
679 			continue;
680 		}
681 
682 		mpt->raid_wakeup = 0;
683 
684 		if (mpt_refresh_raid_data(mpt)) {
685 			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
686 			continue;
687 		}
688 
689 		/*
690 		 * Now that we have our first snapshot of RAID data,
691 		 * allow CAM to access our physical disk bus.
692 		 */
693 		if (firstrun) {
694 			firstrun = 0;
695 			xpt_release_simq(mpt->phydisk_sim, TRUE);
696 		}
697 
698 		if (mpt->raid_rescan != 0) {
699 			union ccb *ccb;
700 			int error;
701 
702 			mpt->raid_rescan = 0;
703 			MPT_UNLOCK(mpt);
704 
705 			ccb = xpt_alloc_ccb();
706 
707 			MPT_LOCK(mpt);
708 			error = xpt_create_path(&ccb->ccb_h.path, NULL,
709 			    cam_sim_path(mpt->phydisk_sim),
710 			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
711 			if (error != CAM_REQ_CMP) {
712 				xpt_free_ccb(ccb);
713 				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
714 			} else {
715 				xpt_rescan(ccb);
716 			}
717 		}
718 	}
719 	mpt->raid_thread = NULL;
720 	wakeup(&mpt->raid_thread);
721 	MPT_UNLOCK(mpt);
722 	kproc_exit(0);
723 }
724 
725 #if 0
726 static void
727 mpt_raid_quiesce_timeout(void *arg)
728 {
729 
730 	/* Complete the CCB with error */
731 	/* COWWWW */
732 }
733 
734 static timeout_t mpt_raid_quiesce_timeout;
735 cam_status
736 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
737 		      request_t *req)
738 {
739 	union ccb *ccb;
740 
741 	ccb = req->ccb;
742 	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
743 		return (CAM_REQ_CMP);
744 
745 	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
746 		int rv;
747 
748 		mpt_disk->flags |= MPT_RDF_QUIESCING;
749 		xpt_freeze_devq(ccb->ccb_h.path, 1);
750 
751 		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
752 					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
753 					/*ActionData*/0, /*addr*/0,
754 					/*len*/0, /*write*/FALSE,
755 					/*wait*/FALSE);
756 		if (rv != 0)
757 			return (CAM_REQ_CMP_ERR);
758 
759 		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
760 #if 0
761 		if (rv == ETIMEDOUT) {
762 			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
763 				     "Quiece Timed-out\n");
764 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
765 			return (CAM_REQ_CMP_ERR);
766 		}
767 
768 		ar = REQ_TO_RAID_ACTION_RESULT(req);
769 		if (rv != 0
770 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
771 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
772 			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
773 				    "%d:%x:%x\n", rv, req->IOCStatus,
774 				    ar->action_status);
775 			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
776 			return (CAM_REQ_CMP_ERR);
777 		}
778 #endif
779 		return (CAM_REQ_INPROG);
780 	}
781 	return (CAM_REQUEUE_REQ);
782 }
783 #endif
784 
785 /* XXX Ignores that there may be multiple buses/IOCs involved. */
786 cam_status
mpt_map_physdisk(struct mpt_softc * mpt,union ccb * ccb,target_id_t * tgt)787 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
788 {
789 	struct mpt_raid_disk *mpt_disk;
790 
791 	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
792 	if (ccb->ccb_h.target_id < mpt->raid_max_disks
793 	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
794 		*tgt = mpt_disk->config_page.PhysDiskID;
795 		return (0);
796 	}
797 	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
798 		 ccb->ccb_h.target_id);
799 	return (-1);
800 }
801 
802 /* XXX Ignores that there may be multiple buses/IOCs involved. */
803 int
mpt_is_raid_member(struct mpt_softc * mpt,target_id_t tgt)804 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
805 {
806 	struct mpt_raid_disk *mpt_disk;
807 	int i;
808 
809 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
810 		return (0);
811 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
812 		mpt_disk = &mpt->raid_disks[i];
813 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
814 		    mpt_disk->config_page.PhysDiskID == tgt)
815 			return (1);
816 	}
817 	return (0);
818 
819 }
820 
821 /* XXX Ignores that there may be multiple buses/IOCs involved. */
822 int
mpt_is_raid_volume(struct mpt_softc * mpt,target_id_t tgt)823 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
824 {
825 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
826 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
827 
828 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
829 		return (0);
830 	}
831 	ioc_vol = mpt->ioc_page2->RaidVolume;
832 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
833 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
834 		if (ioc_vol->VolumeID == tgt) {
835 			return (1);
836 		}
837 	}
838 	return (0);
839 }
840 
841 #if 0
842 static void
843 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
844 	       int enable)
845 {
846 	request_t *req;
847 	struct mpt_raid_action_result *ar;
848 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
849 	int enabled;
850 	int rv;
851 
852 	vol_pg = mpt_vol->config_page;
853 	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
854 
855 	/*
856 	 * If the setting matches the configuration,
857 	 * there is nothing to do.
858 	 */
859 	if ((enabled && enable)
860 	 || (!enabled && !enable))
861 		return;
862 
863 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
864 	if (req == NULL) {
865 		mpt_vol_prt(mpt, mpt_vol,
866 			    "mpt_enable_vol: Get request failed!\n");
867 		return;
868 	}
869 
870 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
871 				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
872 				       : MPI_RAID_ACTION_DISABLE_VOLUME,
873 				/*data*/0, /*addr*/0, /*len*/0,
874 				/*write*/FALSE, /*wait*/TRUE);
875 	if (rv == ETIMEDOUT) {
876 		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
877 			    "%s Volume Timed-out\n",
878 			    enable ? "Enable" : "Disable");
879 		return;
880 	}
881 	ar = REQ_TO_RAID_ACTION_RESULT(req);
882 	if (rv != 0
883 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
884 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
885 		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
886 			    enable ? "Enable" : "Disable",
887 			    rv, req->IOCStatus, ar->action_status);
888 	}
889 
890 	mpt_free_request(mpt, req);
891 }
892 #endif
893 
894 static void
mpt_verify_mwce(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)895 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
896 {
897 	request_t *req;
898 	struct mpt_raid_action_result *ar;
899 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
900 	uint32_t data;
901 	int rv;
902 	int resyncing;
903 	int mwce;
904 
905 	vol_pg = mpt_vol->config_page;
906 	resyncing = vol_pg->VolumeStatus.Flags
907 		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
908 	mwce = vol_pg->VolumeSettings.Settings
909 	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
910 
911 	/*
912 	 * If the setting matches the configuration,
913 	 * there is nothing to do.
914 	 */
915 	switch (mpt->raid_mwce_setting) {
916 	case MPT_RAID_MWCE_REBUILD_ONLY:
917 		if ((resyncing && mwce) || (!resyncing && !mwce)) {
918 			return;
919 		}
920 		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
921 		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
922 			/*
923 			 * Wait one more status update to see if
924 			 * resyncing gets enabled.  It gets disabled
925 			 * temporarilly when WCE is changed.
926 			 */
927 			return;
928 		}
929 		break;
930 	case MPT_RAID_MWCE_ON:
931 		if (mwce)
932 			return;
933 		break;
934 	case MPT_RAID_MWCE_OFF:
935 		if (!mwce)
936 			return;
937 		break;
938 	case MPT_RAID_MWCE_NC:
939 		return;
940 	}
941 
942 	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
943 	if (req == NULL) {
944 		mpt_vol_prt(mpt, mpt_vol,
945 			    "mpt_verify_mwce: Get request failed!\n");
946 		return;
947 	}
948 
949 	vol_pg->VolumeSettings.Settings ^=
950 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
951 	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
952 	vol_pg->VolumeSettings.Settings ^=
953 	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
954 	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
955 				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
956 				data, /*addr*/0, /*len*/0,
957 				/*write*/FALSE, /*wait*/TRUE);
958 	if (rv == ETIMEDOUT) {
959 		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
960 			    "Write Cache Enable Timed-out\n");
961 		return;
962 	}
963 	ar = REQ_TO_RAID_ACTION_RESULT(req);
964 	if (rv != 0
965 	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
966 	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
967 		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
968 			    "%d:%x:%x\n", rv, req->IOCStatus,
969 			    ar->action_status);
970 	} else {
971 		vol_pg->VolumeSettings.Settings ^=
972 		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
973 	}
974 	mpt_free_request(mpt, req);
975 }
976 
977 static void
mpt_verify_resync_rate(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)978 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
979 {
980 	request_t *req;
981 	struct mpt_raid_action_result *ar;
982 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
983 	u_int prio;
984 	int rv;
985 
986 	vol_pg = mpt_vol->config_page;
987 
988 	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
989 		return;
990 
991 	/*
992 	 * If the current RAID resync rate does not
993 	 * match our configured rate, update it.
994 	 */
995 	prio = vol_pg->VolumeSettings.Settings
996 	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
997 	if (vol_pg->ResyncRate != 0
998 	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
999 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1000 		if (req == NULL) {
1001 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1002 				    "Get request failed!\n");
1003 			return;
1004 		}
1005 
1006 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1007 					MPI_RAID_ACTION_SET_RESYNC_RATE,
1008 					mpt->raid_resync_rate, /*addr*/0,
1009 					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1010 		if (rv == ETIMEDOUT) {
1011 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1012 				    "Resync Rate Setting Timed-out\n");
1013 			return;
1014 		}
1015 
1016 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1017 		if (rv != 0
1018 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1019 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1020 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1021 				    "%d:%x:%x\n", rv, req->IOCStatus,
1022 				    ar->action_status);
1023 		} else
1024 			vol_pg->ResyncRate = mpt->raid_resync_rate;
1025 		mpt_free_request(mpt, req);
1026 	} else if ((prio && mpt->raid_resync_rate < 128)
1027 		|| (!prio && mpt->raid_resync_rate >= 128)) {
1028 		uint32_t data;
1029 
1030 		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1031 		if (req == NULL) {
1032 			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1033 				    "Get request failed!\n");
1034 			return;
1035 		}
1036 
1037 		vol_pg->VolumeSettings.Settings ^=
1038 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1039 		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1040 		vol_pg->VolumeSettings.Settings ^=
1041 		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042 		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1043 					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1044 					data, /*addr*/0, /*len*/0,
1045 					/*write*/FALSE, /*wait*/TRUE);
1046 		if (rv == ETIMEDOUT) {
1047 			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1048 				    "Resync Rate Setting Timed-out\n");
1049 			return;
1050 		}
1051 		ar = REQ_TO_RAID_ACTION_RESULT(req);
1052 		if (rv != 0
1053 		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1054 		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1055 			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1056 				    "%d:%x:%x\n", rv, req->IOCStatus,
1057 				    ar->action_status);
1058 		} else {
1059 			vol_pg->VolumeSettings.Settings ^=
1060 			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1061 		}
1062 
1063 		mpt_free_request(mpt, req);
1064 	}
1065 }
1066 
1067 static void
mpt_adjust_queue_depth(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,struct cam_path * path)1068 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1069 		       struct cam_path *path)
1070 {
1071 	struct ccb_relsim crs;
1072 
1073 	memset(&crs, 0, sizeof(crs));
1074 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1075 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1076 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1077 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1078 	crs.openings = mpt->raid_queue_depth;
1079 	xpt_action((union ccb *)&crs);
1080 	if (crs.ccb_h.status != CAM_REQ_CMP)
1081 		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1082 			    "with CAM status %#x\n", crs.ccb_h.status);
1083 }
1084 
1085 static void
mpt_announce_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol)1086 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1087 {
1088 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1089 	u_int i;
1090 
1091 	vol_pg = mpt_vol->config_page;
1092 	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1093 	for (i = 1; i <= 0x8000; i <<= 1) {
1094 		switch (vol_pg->VolumeSettings.Settings & i) {
1095 		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1096 			mpt_prtc(mpt, " Member-WCE");
1097 			break;
1098 		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1099 			mpt_prtc(mpt, " Offline-On-SMART-Err");
1100 			break;
1101 		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1102 			mpt_prtc(mpt, " Hot-Plug-Spares");
1103 			break;
1104 		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1105 			mpt_prtc(mpt, " High-Priority-ReSync");
1106 			break;
1107 		default:
1108 			break;
1109 		}
1110 	}
1111 	mpt_prtc(mpt, " )\n");
1112 	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1113 		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1114 			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1115 			  ? ":" : "s:");
1116 		for (i = 0; i < 8; i++) {
1117 			u_int mask;
1118 
1119 			mask = 0x1 << i;
1120 			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1121 				continue;
1122 			mpt_prtc(mpt, " %d", i);
1123 		}
1124 		mpt_prtc(mpt, "\n");
1125 	}
1126 	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1127 	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1128 		struct mpt_raid_disk *mpt_disk;
1129 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1130 		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1131 		U8 f, s;
1132 
1133 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1134 		disk_pg = &mpt_disk->config_page;
1135 		mpt_prtc(mpt, "      ");
1136 		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1137 			 pt_bus, disk_pg->PhysDiskID);
1138 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1139 			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1140 			    "Primary" : "Secondary");
1141 		} else {
1142 			mpt_prtc(mpt, "Stripe Position %d",
1143 				 mpt_disk->member_number);
1144 		}
1145 		f = disk_pg->PhysDiskStatus.Flags;
1146 		s = disk_pg->PhysDiskStatus.State;
1147 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1148 			mpt_prtc(mpt, " Out of Sync");
1149 		}
1150 		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1151 			mpt_prtc(mpt, " Quiesced");
1152 		}
1153 		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1154 			mpt_prtc(mpt, " Inactive");
1155 		}
1156 		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1157 			mpt_prtc(mpt, " Was Optimal");
1158 		}
1159 		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1160 			mpt_prtc(mpt, " Was Non-Optimal");
1161 		}
1162 		switch (s) {
1163 		case MPI_PHYSDISK0_STATUS_ONLINE:
1164 			mpt_prtc(mpt, " Online");
1165 			break;
1166 		case MPI_PHYSDISK0_STATUS_MISSING:
1167 			mpt_prtc(mpt, " Missing");
1168 			break;
1169 		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1170 			mpt_prtc(mpt, " Incompatible");
1171 			break;
1172 		case MPI_PHYSDISK0_STATUS_FAILED:
1173 			mpt_prtc(mpt, " Failed");
1174 			break;
1175 		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1176 			mpt_prtc(mpt, " Initializing");
1177 			break;
1178 		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1179 			mpt_prtc(mpt, " Requested Offline");
1180 			break;
1181 		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1182 			mpt_prtc(mpt, " Requested Failed");
1183 			break;
1184 		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1185 		default:
1186 			mpt_prtc(mpt, " Offline Other (%x)", s);
1187 			break;
1188 		}
1189 		mpt_prtc(mpt, "\n");
1190 	}
1191 }
1192 
1193 static void
mpt_announce_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk)1194 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1195 {
1196 	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1197 	int rd_bus = cam_sim_bus(mpt->sim);
1198 	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1199 	u_int i;
1200 
1201 	disk_pg = &mpt_disk->config_page;
1202 	mpt_disk_prt(mpt, mpt_disk,
1203 		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1204 		     device_get_nameunit(mpt->dev), rd_bus,
1205 		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1206 		     pt_bus, mpt_disk - mpt->raid_disks);
1207 	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1208 		return;
1209 	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1210 		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1211 		   ? ":" : "s:");
1212 	for (i = 0; i < 8; i++) {
1213 		u_int mask;
1214 
1215 		mask = 0x1 << i;
1216 		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1217 			continue;
1218 		mpt_prtc(mpt, " %d", i);
1219 	}
1220 	mpt_prtc(mpt, "\n");
1221 }
1222 
1223 static void
mpt_refresh_raid_disk(struct mpt_softc * mpt,struct mpt_raid_disk * mpt_disk,IOC_3_PHYS_DISK * ioc_disk)1224 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1225 		      IOC_3_PHYS_DISK *ioc_disk)
1226 {
1227 	int rv;
1228 
1229 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1230 				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1231 				 &mpt_disk->config_page.Header,
1232 				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1233 	if (rv != 0) {
1234 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235 			"Failed to read RAID Disk Hdr(%d)\n",
1236 		 	ioc_disk->PhysDiskNum);
1237 		return;
1238 	}
1239 	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1240 				   &mpt_disk->config_page.Header,
1241 				   sizeof(mpt_disk->config_page),
1242 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1243 	if (rv != 0)
1244 		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1245 			"Failed to read RAID Disk Page(%d)\n",
1246 		 	ioc_disk->PhysDiskNum);
1247 	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1248 }
1249 
1250 static void
mpt_refresh_raid_vol(struct mpt_softc * mpt,struct mpt_raid_volume * mpt_vol,CONFIG_PAGE_IOC_2_RAID_VOL * ioc_vol)1251 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1252     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1253 {
1254 	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1255 	struct mpt_raid_action_result *ar;
1256 	request_t *req;
1257 	int rv;
1258 	int i;
1259 
1260 	vol_pg = mpt_vol->config_page;
1261 	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1262 
1263 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1264 	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1265 	if (rv != 0) {
1266 		mpt_vol_prt(mpt, mpt_vol,
1267 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1268 		    ioc_vol->VolumePageNumber);
1269 		return;
1270 	}
1271 
1272 	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1273 	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1274 	if (rv != 0) {
1275 		mpt_vol_prt(mpt, mpt_vol,
1276 		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1277 		    ioc_vol->VolumePageNumber);
1278 		return;
1279 	}
1280 	mpt2host_config_page_raid_vol_0(vol_pg);
1281 
1282 	mpt_vol->flags |= MPT_RVF_ACTIVE;
1283 
1284 	/* Update disk entry array data. */
1285 	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1286 		struct mpt_raid_disk *mpt_disk;
1287 		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1288 		mpt_disk->volume = mpt_vol;
1289 		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1290 		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1291 			mpt_disk->member_number--;
1292 		}
1293 	}
1294 
1295 	if ((vol_pg->VolumeStatus.Flags
1296 	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1297 		return;
1298 
1299 	req = mpt_get_request(mpt, TRUE);
1300 	if (req == NULL) {
1301 		mpt_vol_prt(mpt, mpt_vol,
1302 		    "mpt_refresh_raid_vol: Get request failed!\n");
1303 		return;
1304 	}
1305 	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1306 	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1307 	if (rv == ETIMEDOUT) {
1308 		mpt_vol_prt(mpt, mpt_vol,
1309 		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1310 		mpt_free_request(mpt, req);
1311 		return;
1312 	}
1313 
1314 	ar = REQ_TO_RAID_ACTION_RESULT(req);
1315 	if (rv == 0
1316 	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1317 	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1318 		memcpy(&mpt_vol->sync_progress,
1319 		       &ar->action_data.indicator_struct,
1320 		       sizeof(mpt_vol->sync_progress));
1321 		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1322 	} else {
1323 		mpt_vol_prt(mpt, mpt_vol,
1324 		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1325 	}
1326 	mpt_free_request(mpt, req);
1327 }
1328 
1329 /*
1330  * Update in-core information about RAID support.  We update any entries
1331  * that didn't previously exists or have been marked as needing to
1332  * be updated by our event handler.  Interesting changes are displayed
1333  * to the console.
1334  */
1335 static int
mpt_refresh_raid_data(struct mpt_softc * mpt)1336 mpt_refresh_raid_data(struct mpt_softc *mpt)
1337 {
1338 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1339 	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1340 	IOC_3_PHYS_DISK *ioc_disk;
1341 	IOC_3_PHYS_DISK *ioc_last_disk;
1342 	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1343 	size_t len;
1344 	int rv;
1345 	int i;
1346 	u_int nonopt_volumes;
1347 
1348 	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1349 		return (0);
1350 	}
1351 
1352 	/*
1353 	 * Mark all items as unreferenced by the configuration.
1354 	 * This allows us to find, report, and discard stale
1355 	 * entries.
1356 	 */
1357 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1358 		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1359 	}
1360 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1361 		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1362 	}
1363 
1364 	/*
1365 	 * Get Physical Disk information.
1366 	 */
1367 	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1368 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1369 				   &mpt->ioc_page3->Header, len,
1370 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1371 	if (rv) {
1372 		mpt_prt(mpt,
1373 		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1374 		return (-1);
1375 	}
1376 	mpt2host_config_page_ioc3(mpt->ioc_page3);
1377 
1378 	ioc_disk = mpt->ioc_page3->PhysDisk;
1379 	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1380 	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1381 		struct mpt_raid_disk *mpt_disk;
1382 
1383 		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1384 		mpt_disk->flags |= MPT_RDF_REFERENCED;
1385 		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1386 		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1387 			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1388 		}
1389 		mpt_disk->flags |= MPT_RDF_ACTIVE;
1390 		mpt->raid_rescan++;
1391 	}
1392 
1393 	/*
1394 	 * Refresh volume data.
1395 	 */
1396 	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1397 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1398 				   &mpt->ioc_page2->Header, len,
1399 				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1400 	if (rv) {
1401 		mpt_prt(mpt, "mpt_refresh_raid_data: "
1402 			"Failed to read IOC Page 2\n");
1403 		return (-1);
1404 	}
1405 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1406 
1407 	ioc_vol = mpt->ioc_page2->RaidVolume;
1408 	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1409 	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1410 		struct mpt_raid_volume *mpt_vol;
1411 
1412 		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1413 		mpt_vol->flags |= MPT_RVF_REFERENCED;
1414 		vol_pg = mpt_vol->config_page;
1415 		if (vol_pg == NULL)
1416 			continue;
1417 		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1418 		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1419 		 || (vol_pg->VolumeStatus.Flags
1420 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1421 			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1422 		}
1423 		mpt_vol->flags |= MPT_RVF_ACTIVE;
1424 	}
1425 
1426 	nonopt_volumes = 0;
1427 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1428 		struct mpt_raid_volume *mpt_vol;
1429 		uint64_t total;
1430 		uint64_t left;
1431 		int m;
1432 		u_int prio;
1433 
1434 		mpt_vol = &mpt->raid_volumes[i];
1435 
1436 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1437 			continue;
1438 		}
1439 
1440 		vol_pg = mpt_vol->config_page;
1441 		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1442 		 == MPT_RVF_ANNOUNCED) {
1443 			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1444 			mpt_vol->flags = 0;
1445 			continue;
1446 		}
1447 
1448 		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1449 			mpt_announce_vol(mpt, mpt_vol);
1450 			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1451 		}
1452 
1453 		if (vol_pg->VolumeStatus.State !=
1454 		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1455 			nonopt_volumes++;
1456 
1457 		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1458 			continue;
1459 
1460 		mpt_vol->flags |= MPT_RVF_UP2DATE;
1461 		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1462 		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1463 		mpt_verify_mwce(mpt, mpt_vol);
1464 
1465 		if (vol_pg->VolumeStatus.Flags == 0) {
1466 			continue;
1467 		}
1468 
1469 		mpt_vol_prt(mpt, mpt_vol, "Status (");
1470 		for (m = 1; m <= 0x80; m <<= 1) {
1471 			switch (vol_pg->VolumeStatus.Flags & m) {
1472 			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1473 				mpt_prtc(mpt, " Enabled");
1474 				break;
1475 			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1476 				mpt_prtc(mpt, " Quiesced");
1477 				break;
1478 			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1479 				mpt_prtc(mpt, " Re-Syncing");
1480 				break;
1481 			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1482 				mpt_prtc(mpt, " Inactive");
1483 				break;
1484 			default:
1485 				break;
1486 			}
1487 		}
1488 		mpt_prtc(mpt, " )\n");
1489 
1490 		if ((vol_pg->VolumeStatus.Flags
1491 		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1492 			continue;
1493 
1494 		mpt_verify_resync_rate(mpt, mpt_vol);
1495 
1496 		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1497 		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1498 		if (vol_pg->ResyncRate != 0) {
1499 			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1500 			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1501 			    prio / 1000, prio % 1000);
1502 		} else {
1503 			prio = vol_pg->VolumeSettings.Settings
1504 			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1505 			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1506 			    prio ? "High" : "Low");
1507 		}
1508 		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1509 			    "blocks remaining\n", (uintmax_t)left,
1510 			    (uintmax_t)total);
1511 
1512 		/* Periodically report on sync progress. */
1513 		mpt_schedule_raid_refresh(mpt);
1514 	}
1515 
1516 	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1517 		struct mpt_raid_disk *mpt_disk;
1518 		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1519 		int m;
1520 
1521 		mpt_disk = &mpt->raid_disks[i];
1522 		disk_pg = &mpt_disk->config_page;
1523 
1524 		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1525 			continue;
1526 
1527 		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1528 		 == MPT_RDF_ANNOUNCED) {
1529 			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1530 			mpt_disk->flags = 0;
1531 			mpt->raid_rescan++;
1532 			continue;
1533 		}
1534 
1535 		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1536 			mpt_announce_disk(mpt, mpt_disk);
1537 			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1538 		}
1539 
1540 		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1541 			continue;
1542 
1543 		mpt_disk->flags |= MPT_RDF_UP2DATE;
1544 		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1545 		if (disk_pg->PhysDiskStatus.Flags == 0)
1546 			continue;
1547 
1548 		mpt_disk_prt(mpt, mpt_disk, "Status (");
1549 		for (m = 1; m <= 0x80; m <<= 1) {
1550 			switch (disk_pg->PhysDiskStatus.Flags & m) {
1551 			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1552 				mpt_prtc(mpt, " Out-Of-Sync");
1553 				break;
1554 			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1555 				mpt_prtc(mpt, " Quiesced");
1556 				break;
1557 			default:
1558 				break;
1559 			}
1560 		}
1561 		mpt_prtc(mpt, " )\n");
1562 	}
1563 
1564 	mpt->raid_nonopt_volumes = nonopt_volumes;
1565 	return (0);
1566 }
1567 
1568 static void
mpt_raid_timer(void * arg)1569 mpt_raid_timer(void *arg)
1570 {
1571 	struct mpt_softc *mpt;
1572 
1573 	mpt = (struct mpt_softc *)arg;
1574 	MPT_LOCK_ASSERT(mpt);
1575 	mpt_raid_wakeup(mpt);
1576 }
1577 
1578 static void
mpt_schedule_raid_refresh(struct mpt_softc * mpt)1579 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1580 {
1581 
1582 	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1583 		      mpt_raid_timer, mpt);
1584 }
1585 
1586 void
mpt_raid_free_mem(struct mpt_softc * mpt)1587 mpt_raid_free_mem(struct mpt_softc *mpt)
1588 {
1589 
1590 	if (mpt->raid_volumes) {
1591 		struct mpt_raid_volume *mpt_raid;
1592 		int i;
1593 		for (i = 0; i < mpt->raid_max_volumes; i++) {
1594 			mpt_raid = &mpt->raid_volumes[i];
1595 			if (mpt_raid->config_page) {
1596 				free(mpt_raid->config_page, M_DEVBUF);
1597 				mpt_raid->config_page = NULL;
1598 			}
1599 		}
1600 		free(mpt->raid_volumes, M_DEVBUF);
1601 		mpt->raid_volumes = NULL;
1602 	}
1603 	if (mpt->raid_disks) {
1604 		free(mpt->raid_disks, M_DEVBUF);
1605 		mpt->raid_disks = NULL;
1606 	}
1607 	if (mpt->ioc_page2) {
1608 		free(mpt->ioc_page2, M_DEVBUF);
1609 		mpt->ioc_page2 = NULL;
1610 	}
1611 	if (mpt->ioc_page3) {
1612 		free(mpt->ioc_page3, M_DEVBUF);
1613 		mpt->ioc_page3 = NULL;
1614 	}
1615 	mpt->raid_max_volumes =  0;
1616 	mpt->raid_max_disks =  0;
1617 }
1618 
1619 static int
mpt_raid_set_vol_resync_rate(struct mpt_softc * mpt,u_int rate)1620 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1621 {
1622 	struct mpt_raid_volume *mpt_vol;
1623 
1624 	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1625 	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1626 	 && rate != MPT_RAID_RESYNC_RATE_NC)
1627 		return (EINVAL);
1628 
1629 	MPT_LOCK(mpt);
1630 	mpt->raid_resync_rate = rate;
1631 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1632 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1633 			continue;
1634 		}
1635 		mpt_verify_resync_rate(mpt, mpt_vol);
1636 	}
1637 	MPT_UNLOCK(mpt);
1638 	return (0);
1639 }
1640 
1641 static int
mpt_raid_set_vol_queue_depth(struct mpt_softc * mpt,u_int vol_queue_depth)1642 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1643 {
1644 	struct mpt_raid_volume *mpt_vol;
1645 
1646 	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1647 		return (EINVAL);
1648 
1649 	MPT_LOCK(mpt);
1650 	mpt->raid_queue_depth = vol_queue_depth;
1651 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1652 		struct cam_path *path;
1653 		int error;
1654 
1655 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1656 			continue;
1657 
1658 		mpt->raid_rescan = 0;
1659 
1660 		error = xpt_create_path(&path, NULL,
1661 					cam_sim_path(mpt->sim),
1662 					mpt_vol->config_page->VolumeID,
1663 					/*lun*/0);
1664 		if (error != CAM_REQ_CMP) {
1665 			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1666 			continue;
1667 		}
1668 		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1669 		xpt_free_path(path);
1670 	}
1671 	MPT_UNLOCK(mpt);
1672 	return (0);
1673 }
1674 
1675 static int
mpt_raid_set_vol_mwce(struct mpt_softc * mpt,mpt_raid_mwce_t mwce)1676 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1677 {
1678 	struct mpt_raid_volume *mpt_vol;
1679 	int force_full_resync;
1680 
1681 	MPT_LOCK(mpt);
1682 	if (mwce == mpt->raid_mwce_setting) {
1683 		MPT_UNLOCK(mpt);
1684 		return (0);
1685 	}
1686 
1687 	/*
1688 	 * Catch MWCE being left on due to a failed shutdown.  Since
1689 	 * sysctls cannot be set by the loader, we treat the first
1690 	 * setting of this varible specially and force a full volume
1691 	 * resync if MWCE is enabled and a resync is in progress.
1692 	 */
1693 	force_full_resync = 0;
1694 	if (mpt->raid_mwce_set == 0
1695 	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1696 	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1697 		force_full_resync = 1;
1698 
1699 	mpt->raid_mwce_setting = mwce;
1700 	RAID_VOL_FOREACH(mpt, mpt_vol) {
1701 		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1702 		int resyncing;
1703 		int mwce;
1704 
1705 		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1706 			continue;
1707 
1708 		vol_pg = mpt_vol->config_page;
1709 		resyncing = vol_pg->VolumeStatus.Flags
1710 			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1711 		mwce = vol_pg->VolumeSettings.Settings
1712 		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1713 		if (force_full_resync && resyncing && mwce) {
1714 			/*
1715 			 * XXX disable/enable volume should force a resync,
1716 			 *     but we'll need to queice, drain, and restart
1717 			 *     I/O to do that.
1718 			 */
1719 			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1720 				    "detected.  Suggest full resync.\n");
1721 		}
1722 		mpt_verify_mwce(mpt, mpt_vol);
1723 	}
1724 	mpt->raid_mwce_set = 1;
1725 	MPT_UNLOCK(mpt);
1726 	return (0);
1727 }
1728 
1729 static const char *mpt_vol_mwce_strs[] =
1730 {
1731 	"On",
1732 	"Off",
1733 	"On-During-Rebuild",
1734 	"NC"
1735 };
1736 
1737 static int
mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)1738 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1739 {
1740 	char inbuf[20];
1741 	struct mpt_softc *mpt;
1742 	const char *str;
1743 	int error;
1744 	u_int size;
1745 	u_int i;
1746 
1747 	mpt = (struct mpt_softc *)arg1;
1748 	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1749 	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1750 	if (error || !req->newptr) {
1751 		return (error);
1752 	}
1753 
1754 	size = req->newlen - req->newidx;
1755 	if (size >= sizeof(inbuf)) {
1756 		return (EINVAL);
1757 	}
1758 
1759 	error = SYSCTL_IN(req, inbuf, size);
1760 	if (error) {
1761 		return (error);
1762 	}
1763 	inbuf[size] = '\0';
1764 	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1765 		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1766 			return (mpt_raid_set_vol_mwce(mpt, i));
1767 		}
1768 	}
1769 	return (EINVAL);
1770 }
1771 
1772 static int
mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)1773 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1774 {
1775 	struct mpt_softc *mpt;
1776 	u_int raid_resync_rate;
1777 	int error;
1778 
1779 	mpt = (struct mpt_softc *)arg1;
1780 	raid_resync_rate = mpt->raid_resync_rate;
1781 
1782 	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1783 	if (error || !req->newptr) {
1784 		return error;
1785 	}
1786 
1787 	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1788 }
1789 
1790 static int
mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)1791 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1792 {
1793 	struct mpt_softc *mpt;
1794 	u_int raid_queue_depth;
1795 	int error;
1796 
1797 	mpt = (struct mpt_softc *)arg1;
1798 	raid_queue_depth = mpt->raid_queue_depth;
1799 
1800 	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1801 	if (error || !req->newptr) {
1802 		return error;
1803 	}
1804 
1805 	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1806 }
1807 
1808 static void
mpt_raid_sysctl_attach(struct mpt_softc * mpt)1809 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1810 {
1811 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1812 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1813 
1814 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1815 	    "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1816 	    mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1817 	    "volume member WCE(On,Off,On-During-Rebuild,NC)");
1818 
1819 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 	    "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1821 	    mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1822 	    "default volume queue depth");
1823 
1824 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 	    "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1826 	    mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1827 	    "volume resync priority (0 == NC, 1 - 255)");
1828 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 			"nonoptimal_volumes", CTLFLAG_RD,
1830 			&mpt->raid_nonopt_volumes, 0,
1831 			"number of nonoptimal volumes");
1832 }
1833