xref: /freebsd/sys/dev/mpt/mpt_user.c (revision 721351876cd4d3a8a700f62d2061331fa951a488)
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/errno.h>
39 #include <sys/ioccom.h>
40 #include <sys/mpt_ioctl.h>
41 
42 #include <dev/mpt/mpt.h>
43 
44 struct mpt_user_raid_action_result {
45 	uint32_t	volume_status;
46 	uint32_t	action_data[4];
47 	uint16_t	action_status;
48 };
49 
50 static mpt_probe_handler_t	mpt_user_probe;
51 static mpt_attach_handler_t	mpt_user_attach;
52 static mpt_enable_handler_t	mpt_user_enable;
53 static mpt_ready_handler_t	mpt_user_ready;
54 static mpt_event_handler_t	mpt_user_event;
55 static mpt_reset_handler_t	mpt_user_reset;
56 static mpt_detach_handler_t	mpt_user_detach;
57 
58 static struct mpt_personality mpt_user_personality = {
59 	.name		= "mpt_user",
60 	.probe		= mpt_user_probe,
61 	.attach		= mpt_user_attach,
62 	.enable		= mpt_user_enable,
63 	.ready		= mpt_user_ready,
64 	.event		= mpt_user_event,
65 	.reset		= mpt_user_reset,
66 	.detach		= mpt_user_detach,
67 };
68 
69 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
70 
71 static mpt_reply_handler_t	mpt_user_reply_handler;
72 
73 static d_open_t		mpt_open;
74 static d_close_t	mpt_close;
75 static d_ioctl_t	mpt_ioctl;
76 
77 static struct cdevsw mpt_cdevsw = {
78 	.d_version =	D_VERSION,
79 	.d_flags =	0,
80 	.d_open =	mpt_open,
81 	.d_close =	mpt_close,
82 	.d_ioctl =	mpt_ioctl,
83 	.d_name =	"mpt",
84 };
85 
86 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
87 
88 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
89 
90 int
91 mpt_user_probe(struct mpt_softc *mpt)
92 {
93 
94 	/* Attach to every controller. */
95 	return (0);
96 }
97 
98 int
99 mpt_user_attach(struct mpt_softc *mpt)
100 {
101 	mpt_handler_t handler;
102 	int error, unit;
103 
104 	MPT_LOCK(mpt);
105 	handler.reply_handler = mpt_user_reply_handler;
106 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
107 				     &user_handler_id);
108 	MPT_UNLOCK(mpt);
109 	if (error != 0) {
110 		mpt_prt(mpt, "Unable to register user handler!\n");
111 		return (error);
112 	}
113 	unit = device_get_unit(mpt->dev);
114 	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
115 	    "mpt%d", unit);
116 	if (mpt->cdev == NULL) {
117 		MPT_LOCK(mpt);
118 		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
119 		    user_handler_id);
120 		MPT_UNLOCK(mpt);
121 		return (ENOMEM);
122 	}
123 	mpt->cdev->si_drv1 = mpt;
124 	return (0);
125 }
126 
127 int
128 mpt_user_enable(struct mpt_softc *mpt)
129 {
130 
131 	return (0);
132 }
133 
134 void
135 mpt_user_ready(struct mpt_softc *mpt)
136 {
137 }
138 
139 int
140 mpt_user_event(struct mpt_softc *mpt, request_t *req,
141     MSG_EVENT_NOTIFY_REPLY *msg)
142 {
143 
144 	/* Someday we may want to let a user daemon listen for events? */
145 	return (0);
146 }
147 
148 void
149 mpt_user_reset(struct mpt_softc *mpt, int type)
150 {
151 }
152 
153 void
154 mpt_user_detach(struct mpt_softc *mpt)
155 {
156 	mpt_handler_t handler;
157 
158 	/* XXX: do a purge of pending requests? */
159 	destroy_dev(mpt->cdev);
160 
161 	MPT_LOCK(mpt);
162 	handler.reply_handler = mpt_user_reply_handler;
163 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
164 	    user_handler_id);
165 	MPT_UNLOCK(mpt);
166 }
167 
168 static int
169 mpt_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
170 {
171 
172 	return (0);
173 }
174 
175 static int
176 mpt_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
177 {
178 
179 	return (0);
180 }
181 
182 static int
183 mpt_user_read_cfg_header(struct mpt_softc *mpt,
184     struct mpt_cfg_page_req *page_req)
185 {
186 	request_t  *req;
187 	cfgparms_t params;
188 	MSG_CONFIG *cfgp;
189 	int	    error;
190 
191 	req = mpt_get_request(mpt, TRUE);
192 	if (req == NULL) {
193 		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
194 		return (ENOMEM);
195 	}
196 
197 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
198 	params.PageVersion = 0;
199 	params.PageLength = 0;
200 	params.PageNumber = page_req->header.PageNumber;
201 	params.PageType = page_req->header.PageType;
202 	params.PageAddress = page_req->page_address;
203 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
204 				  TRUE, 5000);
205 	if (error != 0) {
206 		/*
207 		 * Leave the request. Without resetting the chip, it's
208 		 * still owned by it and we'll just get into trouble
209 		 * freeing it now. Mark it as abandoned so that if it
210 		 * shows up later it can be freed.
211 		 */
212 		mpt_prt(mpt, "read_cfg_header timed out\n");
213 		return (ETIMEDOUT);
214 	}
215 
216 	page_req->ioc_status = req->IOCStatus;
217 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
218 		cfgp = req->req_vbuf;
219 		bcopy(&cfgp->Header, &page_req->header,
220 		    sizeof(page_req->header));
221 	}
222 	mpt_free_request(mpt, req);
223 	return (0);
224 }
225 
226 static int
227 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
228     void *mpt_page)
229 {
230 	CONFIG_PAGE_HEADER *hdr;
231 	request_t    *req;
232 	cfgparms_t    params;
233 	int	      error;
234 
235 	req = mpt_get_request(mpt, TRUE);
236 	if (req == NULL) {
237 		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
238 		return (ENOMEM);
239 	}
240 
241 	hdr = mpt_page;
242 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
243 	params.PageVersion = hdr->PageVersion;
244 	params.PageLength = hdr->PageLength;
245 	params.PageNumber = hdr->PageNumber;
246 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
247 	params.PageAddress = page_req->page_address;
248 	error = mpt_issue_cfg_req(mpt, req, &params,
249 				  req->req_pbuf + MPT_RQSL(mpt),
250 				  page_req->len, TRUE, 5000);
251 	if (error != 0) {
252 		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
253 		return (ETIMEDOUT);
254 	}
255 
256 	page_req->ioc_status = req->IOCStatus;
257 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
258 		bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
259 		    BUS_DMASYNC_POSTREAD);
260 		memcpy(mpt_page, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt),
261 		    page_req->len);
262 	}
263 	mpt_free_request(mpt, req);
264 	return (0);
265 }
266 
267 static int
268 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
269     struct mpt_ext_cfg_page_req *ext_page_req)
270 {
271 	request_t  *req;
272 	cfgparms_t params;
273 	MSG_CONFIG_REPLY *cfgp;
274 	int	    error;
275 
276 	req = mpt_get_request(mpt, TRUE);
277 	if (req == NULL) {
278 		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
279 		return (ENOMEM);
280 	}
281 
282 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
283 	params.PageVersion = ext_page_req->header.PageVersion;
284 	params.PageLength = 0;
285 	params.PageNumber = ext_page_req->header.PageNumber;
286 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
287 	params.PageAddress = ext_page_req->page_address;
288 	params.ExtPageType = ext_page_req->header.ExtPageType;
289 	params.ExtPageLength = 0;
290 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
291 				  TRUE, 5000);
292 	if (error != 0) {
293 		/*
294 		 * Leave the request. Without resetting the chip, it's
295 		 * still owned by it and we'll just get into trouble
296 		 * freeing it now. Mark it as abandoned so that if it
297 		 * shows up later it can be freed.
298 		 */
299 		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
300 		return (ETIMEDOUT);
301 	}
302 
303 	ext_page_req->ioc_status = req->IOCStatus;
304 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
305 		cfgp = req->req_vbuf;
306 		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
307 		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
308 		ext_page_req->header.PageType = cfgp->Header.PageType;
309 		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
310 		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
311 	}
312 	mpt_free_request(mpt, req);
313 	return (0);
314 }
315 
316 static int
317 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
318     struct mpt_ext_cfg_page_req *ext_page_req, void *mpt_page)
319 {
320 	CONFIG_EXTENDED_PAGE_HEADER *hdr;
321 	request_t    *req;
322 	cfgparms_t    params;
323 	int	      error;
324 
325 	req = mpt_get_request(mpt, TRUE);
326 	if (req == NULL) {
327 		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
328 		return (ENOMEM);
329 	}
330 
331 	hdr = mpt_page;
332 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
333 	params.PageVersion = hdr->PageVersion;
334 	params.PageLength = 0;
335 	params.PageNumber = hdr->PageNumber;
336 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
337 	params.PageAddress = ext_page_req->page_address;
338 	params.ExtPageType = hdr->ExtPageType;
339 	params.ExtPageLength = hdr->ExtPageLength;
340 	error = mpt_issue_cfg_req(mpt, req, &params,
341 				  req->req_pbuf + MPT_RQSL(mpt),
342 				  ext_page_req->len, TRUE, 5000);
343 	if (error != 0) {
344 		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
345 		return (ETIMEDOUT);
346 	}
347 
348 	ext_page_req->ioc_status = req->IOCStatus;
349 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
350 		bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
351 		    BUS_DMASYNC_POSTREAD);
352 		memcpy(mpt_page, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt),
353 		    ext_page_req->len);
354 	}
355 	mpt_free_request(mpt, req);
356 	return (0);
357 }
358 
359 static int
360 mpt_user_write_cfg_page(struct mpt_softc *mpt,
361     struct mpt_cfg_page_req *page_req, void *mpt_page)
362 {
363 	CONFIG_PAGE_HEADER *hdr;
364 	request_t    *req;
365 	cfgparms_t    params;
366 	u_int	      hdr_attr;
367 	int	      error;
368 
369 	hdr = mpt_page;
370 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
371 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
372 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
373 		mpt_prt(mpt, "page type 0x%x not changeable\n",
374 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
375 		return (EINVAL);
376 	}
377 
378 #if	0
379 	/*
380 	 * We shouldn't mask off other bits here.
381 	 */
382 	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
383 #endif
384 
385 	req = mpt_get_request(mpt, TRUE);
386 	if (req == NULL)
387 		return (ENOMEM);
388 
389 	memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), mpt_page,
390 	    page_req->len);
391 
392 	/*
393 	 * There isn't any point in restoring stripped out attributes
394 	 * if you then mask them going down to issue the request.
395 	 */
396 
397 	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
398 	params.PageVersion = hdr->PageVersion;
399 	params.PageLength = hdr->PageLength;
400 	params.PageNumber = hdr->PageNumber;
401 	params.PageAddress = page_req->page_address;
402 #if	0
403 	/* Restore stripped out attributes */
404 	hdr->PageType |= hdr_attr;
405 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
406 #else
407 	params.PageType = hdr->PageType;
408 #endif
409 	error = mpt_issue_cfg_req(mpt, req, &params,
410 				  req->req_pbuf + MPT_RQSL(mpt),
411 				  page_req->len, TRUE, 5000);
412 	if (error != 0) {
413 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
414 		return (ETIMEDOUT);
415 	}
416 
417 	page_req->ioc_status = req->IOCStatus;
418 	mpt_free_request(mpt, req);
419 	return (0);
420 }
421 
422 static int
423 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
424     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
425 {
426 	MSG_RAID_ACTION_REPLY *reply;
427 	struct mpt_user_raid_action_result *res;
428 
429 	if (req == NULL)
430 		return (TRUE);
431 
432 	if (reply_frame != NULL) {
433 		bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
434 		    BUS_DMASYNC_POSTREAD);
435 		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
436 		req->IOCStatus = le16toh(reply->IOCStatus);
437 		res = (struct mpt_user_raid_action_result *)
438 		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
439 		res->action_status = reply->ActionStatus;
440 		res->volume_status = reply->VolumeStatus;
441 		bcopy(&reply->ActionData, res->action_data,
442 		    sizeof(res->action_data));
443 	}
444 
445 	req->state &= ~REQ_STATE_QUEUED;
446 	req->state |= REQ_STATE_DONE;
447 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
448 
449 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
450 		wakeup(req);
451 	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
452 		/*
453 		 * Whew- we can free this request (late completion)
454 		 */
455 		mpt_free_request(mpt, req);
456 	}
457 
458 	return (TRUE);
459 }
460 
461 /*
462  * We use the first part of the request buffer after the request frame
463  * to hold the action data and action status from the RAID reply.  The
464  * rest of the request buffer is used to hold the buffer for the
465  * action SGE.
466  */
467 static int
468 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
469 	void *buf)
470 {
471 	request_t *req;
472 	struct mpt_user_raid_action_result *res;
473 	MSG_RAID_ACTION_REQUEST *rap;
474 	SGE_SIMPLE32 *se;
475 	int error;
476 
477 	req = mpt_get_request(mpt, TRUE);
478 	if (req == NULL)
479 		return (ENOMEM);
480 	rap = req->req_vbuf;
481 	memset(rap, 0, sizeof *rap);
482 	rap->Action = raid_act->action;
483 	rap->ActionDataWord = raid_act->action_data_word;
484 	rap->Function = MPI_FUNCTION_RAID_ACTION;
485 	rap->VolumeID = raid_act->volume_id;
486 	rap->VolumeBus = raid_act->volume_bus;
487 	rap->PhysDiskNum = raid_act->phys_disk_num;
488 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
489 	if (buf != 0 && raid_act->len != 0) {
490 		memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt) +
491 		    sizeof(struct mpt_user_raid_action_result), buf,
492 		    raid_act->len);
493 		se->Address = req->req_pbuf + MPT_RQSL(mpt) +
494 		    sizeof(struct mpt_user_raid_action_result);
495 		MPI_pSGE_SET_LENGTH(se, raid_act->len);
496 		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
497 		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
498 		    MPI_SGE_FLAGS_END_OF_LIST |
499 		    raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
500 		    MPI_SGE_FLAGS_IOC_TO_HOST));
501 	}
502 	rap->MsgContext = htole32(req->index | user_handler_id);
503 
504 	mpt_check_doorbell(mpt);
505 	mpt_send_cmd(mpt, req);
506 
507 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
508 	    2000);
509 	if (error != 0) {
510 		/*
511 		 * Leave request so it can be cleaned up later.
512 		 */
513 		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
514 		return (error);
515 	}
516 
517 	raid_act->ioc_status = req->IOCStatus;
518 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
519 		mpt_free_request(mpt, req);
520 		return (0);
521 	}
522 
523 	res = (struct mpt_user_raid_action_result *)
524 	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
525 	raid_act->volume_status = res->volume_status;
526 	raid_act->action_status = res->action_status;
527 	bcopy(res->action_data, raid_act->action_data,
528 	    sizeof(res->action_data));
529 	if (buf != NULL)
530 		memcpy(buf, ((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt) +
531 		    sizeof(struct mpt_user_raid_action_result), raid_act->len);
532 	mpt_free_request(mpt, req);
533 	return (0);
534 }
535 
536 #ifdef __amd64__
537 #define	PTRIN(p)		((void *)(uintptr_t)(p))
538 #define PTROUT(v)		((u_int32_t)(uintptr_t)(v))
539 #endif
540 
541 static int
542 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
543 {
544 	struct mpt_softc *mpt;
545 	struct mpt_cfg_page_req *page_req;
546 	struct mpt_ext_cfg_page_req *ext_page_req;
547 	struct mpt_raid_action *raid_act;
548 #ifdef __amd64__
549 	struct mpt_cfg_page_req32 *page_req32;
550 	struct mpt_cfg_page_req page_req_swab;
551 	struct mpt_ext_cfg_page_req32 *ext_page_req32;
552 	struct mpt_ext_cfg_page_req ext_page_req_swab;
553 	struct mpt_raid_action32 *raid_act32;
554 	struct mpt_raid_action raid_act_swab;
555 #endif
556 	void *mpt_page;
557 	int error;
558 
559 	mpt = dev->si_drv1;
560 	page_req = (void *)arg;
561 	ext_page_req = (void *)arg;
562 	raid_act = (void *)arg;
563 	mpt_page = NULL;
564 
565 #ifdef __amd64__
566 	/* Convert 32-bit structs to native ones. */
567 	page_req32 = (void *)arg;
568 	ext_page_req32 = (void *)arg;
569 	raid_act32 = (void *)arg;
570 	switch (cmd) {
571 	case MPTIO_READ_CFG_HEADER32:
572 	case MPTIO_READ_CFG_PAGE32:
573 	case MPTIO_WRITE_CFG_PAGE32:
574 		page_req = &page_req_swab;
575 		page_req->header = page_req32->header;
576 		page_req->page_address = page_req32->page_address;
577 		page_req->buf = PTRIN(page_req32->buf);
578 		page_req->len = page_req32->len;
579 		page_req->ioc_status = page_req32->ioc_status;
580 		break;
581 	case MPTIO_READ_EXT_CFG_HEADER32:
582 	case MPTIO_READ_EXT_CFG_PAGE32:
583 		ext_page_req = &ext_page_req_swab;
584 		ext_page_req->header = ext_page_req32->header;
585 		ext_page_req->page_address = ext_page_req32->page_address;
586 		ext_page_req->buf = PTRIN(ext_page_req32->buf);
587 		ext_page_req->len = ext_page_req32->len;
588 		ext_page_req->ioc_status = ext_page_req32->ioc_status;
589 		break;
590 	case MPTIO_RAID_ACTION32:
591 		raid_act = &raid_act_swab;
592 		raid_act->action = raid_act32->action;
593 		raid_act->volume_bus = raid_act32->volume_bus;
594 		raid_act->volume_id = raid_act32->volume_id;
595 		raid_act->phys_disk_num = raid_act32->phys_disk_num;
596 		raid_act->action_data_word = raid_act32->action_data_word;
597 		raid_act->buf = PTRIN(raid_act32->buf);
598 		raid_act->len = raid_act32->len;
599 		raid_act->volume_status = raid_act32->volume_status;
600 		bcopy(raid_act32->action_data, raid_act->action_data,
601 		    sizeof(raid_act->action_data));
602 		raid_act->action_status = raid_act32->action_status;
603 		raid_act->ioc_status = raid_act32->ioc_status;
604 		raid_act->write = raid_act32->write;
605 		break;
606 	}
607 #endif
608 
609 	switch (cmd) {
610 #ifdef __amd64__
611 	case MPTIO_READ_CFG_HEADER32:
612 #endif
613 	case MPTIO_READ_CFG_HEADER:
614 		MPT_LOCK(mpt);
615 		error = mpt_user_read_cfg_header(mpt, page_req);
616 		MPT_UNLOCK(mpt);
617 		break;
618 #ifdef __amd64__
619 	case MPTIO_READ_CFG_PAGE32:
620 #endif
621 	case MPTIO_READ_CFG_PAGE:
622 		if (page_req->len > (MPT_REQUEST_AREA - MPT_RQSL(mpt))) {
623 			error = EINVAL;
624 			break;
625 		}
626 		mpt_page = malloc(page_req->len, M_MPTUSER, M_WAITOK);
627 		error = copyin(page_req->buf, mpt_page,
628 		    sizeof(CONFIG_PAGE_HEADER));
629 		if (error)
630 			break;
631 		MPT_LOCK(mpt);
632 		error = mpt_user_read_cfg_page(mpt, page_req, mpt_page);
633 		MPT_UNLOCK(mpt);
634 		if (error)
635 			break;
636 		error = copyout(mpt_page, page_req->buf, page_req->len);
637 		break;
638 #ifdef __amd64__
639 	case MPTIO_READ_EXT_CFG_HEADER32:
640 #endif
641 	case MPTIO_READ_EXT_CFG_HEADER:
642 		MPT_LOCK(mpt);
643 		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
644 		MPT_UNLOCK(mpt);
645 		break;
646 #ifdef __amd64__
647 	case MPTIO_READ_EXT_CFG_PAGE32:
648 #endif
649 	case MPTIO_READ_EXT_CFG_PAGE:
650 		if (ext_page_req->len > (MPT_REQUEST_AREA - MPT_RQSL(mpt))) {
651 			error = EINVAL;
652 			break;
653 		}
654 		mpt_page = malloc(ext_page_req->len, M_MPTUSER, M_WAITOK);
655 		error = copyin(ext_page_req->buf, mpt_page,
656 		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
657 		if (error)
658 			break;
659 		MPT_LOCK(mpt);
660 		error = mpt_user_read_extcfg_page(mpt, ext_page_req, mpt_page);
661 		MPT_UNLOCK(mpt);
662 		if (error)
663 			break;
664 		error = copyout(mpt_page, ext_page_req->buf, ext_page_req->len);
665 		break;
666 #ifdef __amd64__
667 	case MPTIO_WRITE_CFG_PAGE32:
668 #endif
669 	case MPTIO_WRITE_CFG_PAGE:
670 		if (page_req->len > (MPT_REQUEST_AREA - MPT_RQSL(mpt))) {
671 			error = EINVAL;
672 			break;
673 		}
674 		mpt_page = malloc(page_req->len, M_MPTUSER, M_WAITOK);
675 		error = copyin(page_req->buf, mpt_page, page_req->len);
676 		if (error)
677 			break;
678 		MPT_LOCK(mpt);
679 		error = mpt_user_write_cfg_page(mpt, page_req, mpt_page);
680 		MPT_UNLOCK(mpt);
681 		break;
682 #ifdef __amd64__
683 	case MPTIO_RAID_ACTION32:
684 #endif
685 	case MPTIO_RAID_ACTION:
686 		if (raid_act->buf != NULL) {
687 			if (raid_act->len >
688 			    (MPT_REQUEST_AREA - MPT_RQSL(mpt) -
689 			    sizeof(struct mpt_user_raid_action_result))) {
690 				error = EINVAL;
691 				break;
692 			}
693 			mpt_page = malloc(raid_act->len, M_MPTUSER, M_WAITOK);
694 			error = copyin(raid_act->buf, mpt_page, raid_act->len);
695 			if (error)
696 				break;
697 		}
698 		MPT_LOCK(mpt);
699 		error = mpt_user_raid_action(mpt, raid_act, mpt_page);
700 		MPT_UNLOCK(mpt);
701 		if (error)
702 			break;
703 		error = copyout(mpt_page, raid_act->buf, raid_act->len);
704 		break;
705 	default:
706 		error = ENOIOCTL;
707 		break;
708 	}
709 
710 	if (mpt_page != NULL)
711 		free(mpt_page, M_MPTUSER);
712 
713 	if (error)
714 		return (error);
715 
716 #ifdef __amd64__
717 	/* Convert native structs to 32-bit ones. */
718 	switch (cmd) {
719 	case MPTIO_READ_CFG_HEADER32:
720 	case MPTIO_READ_CFG_PAGE32:
721 	case MPTIO_WRITE_CFG_PAGE32:
722 		page_req32->header = page_req->header;
723 		page_req32->page_address = page_req->page_address;
724 		page_req32->buf = PTROUT(page_req->buf);
725 		page_req32->len = page_req->len;
726 		page_req32->ioc_status = page_req->ioc_status;
727 		break;
728 	case MPTIO_READ_EXT_CFG_HEADER32:
729 	case MPTIO_READ_EXT_CFG_PAGE32:
730 		ext_page_req32->header = ext_page_req->header;
731 		ext_page_req32->page_address = ext_page_req->page_address;
732 		ext_page_req32->buf = PTROUT(ext_page_req->buf);
733 		ext_page_req32->len = ext_page_req->len;
734 		ext_page_req32->ioc_status = ext_page_req->ioc_status;
735 		break;
736 	case MPTIO_RAID_ACTION32:
737 		raid_act32->action = raid_act->action;
738 		raid_act32->volume_bus = raid_act->volume_bus;
739 		raid_act32->volume_id = raid_act->volume_id;
740 		raid_act32->phys_disk_num = raid_act->phys_disk_num;
741 		raid_act32->action_data_word = raid_act->action_data_word;
742 		raid_act32->buf = PTROUT(raid_act->buf);
743 		raid_act32->len = raid_act->len;
744 		raid_act32->volume_status = raid_act->volume_status;
745 		bcopy(raid_act->action_data, raid_act32->action_data,
746 		    sizeof(raid_act->action_data));
747 		raid_act32->action_status = raid_act->action_status;
748 		raid_act32->ioc_status = raid_act->ioc_status;
749 		raid_act32->write = raid_act->write;
750 		break;
751 	}
752 #endif
753 
754 	return (0);
755 }
756