xref: /freebsd/sys/dev/mpt/mpt_user.c (revision 5e3190f700637fcfc1a52daeaa4a031fdd2557c7)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Yahoo!, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
33  */
34 
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #ifdef __amd64__
38 #include <sys/abi_compat.h>
39 #endif
40 #include <sys/conf.h>
41 #include <sys/errno.h>
42 #include <sys/ioccom.h>
43 #include <sys/mpt_ioctl.h>
44 
45 #include <dev/mpt/mpt.h>
46 
47 struct mpt_user_raid_action_result {
48 	uint32_t	volume_status;
49 	uint32_t	action_data[4];
50 	uint16_t	action_status;
51 };
52 
53 struct mpt_page_memory {
54 	bus_dma_tag_t	tag;
55 	bus_dmamap_t	map;
56 	bus_addr_t	paddr;
57 	void		*vaddr;
58 };
59 
60 static mpt_probe_handler_t	mpt_user_probe;
61 static mpt_attach_handler_t	mpt_user_attach;
62 static mpt_enable_handler_t	mpt_user_enable;
63 static mpt_ready_handler_t	mpt_user_ready;
64 static mpt_event_handler_t	mpt_user_event;
65 static mpt_reset_handler_t	mpt_user_reset;
66 static mpt_detach_handler_t	mpt_user_detach;
67 
68 static struct mpt_personality mpt_user_personality = {
69 	.name		= "mpt_user",
70 	.probe		= mpt_user_probe,
71 	.attach		= mpt_user_attach,
72 	.enable		= mpt_user_enable,
73 	.ready		= mpt_user_ready,
74 	.event		= mpt_user_event,
75 	.reset		= mpt_user_reset,
76 	.detach		= mpt_user_detach,
77 };
78 
79 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
80 
81 static mpt_reply_handler_t	mpt_user_reply_handler;
82 
83 static d_open_t		mpt_open;
84 static d_close_t	mpt_close;
85 static d_ioctl_t	mpt_ioctl;
86 
87 static struct cdevsw mpt_cdevsw = {
88 	.d_version =	D_VERSION,
89 	.d_flags =	0,
90 	.d_open =	mpt_open,
91 	.d_close =	mpt_close,
92 	.d_ioctl =	mpt_ioctl,
93 	.d_name =	"mpt",
94 };
95 
96 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
97 
98 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
99 
100 static int
101 mpt_user_probe(struct mpt_softc *mpt)
102 {
103 
104 	/* Attach to every controller. */
105 	return (0);
106 }
107 
108 static int
109 mpt_user_attach(struct mpt_softc *mpt)
110 {
111 	mpt_handler_t handler;
112 	int error, unit;
113 
114 	MPT_LOCK(mpt);
115 	handler.reply_handler = mpt_user_reply_handler;
116 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
117 				     &user_handler_id);
118 	MPT_UNLOCK(mpt);
119 	if (error != 0) {
120 		mpt_prt(mpt, "Unable to register user handler!\n");
121 		return (error);
122 	}
123 	unit = device_get_unit(mpt->dev);
124 	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
125 	    "mpt%d", unit);
126 	if (mpt->cdev == NULL) {
127 		MPT_LOCK(mpt);
128 		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
129 		    user_handler_id);
130 		MPT_UNLOCK(mpt);
131 		return (ENOMEM);
132 	}
133 	mpt->cdev->si_drv1 = mpt;
134 	return (0);
135 }
136 
137 static int
138 mpt_user_enable(struct mpt_softc *mpt)
139 {
140 
141 	return (0);
142 }
143 
144 static void
145 mpt_user_ready(struct mpt_softc *mpt)
146 {
147 
148 }
149 
150 static int
151 mpt_user_event(struct mpt_softc *mpt, request_t *req,
152     MSG_EVENT_NOTIFY_REPLY *msg)
153 {
154 
155 	/* Someday we may want to let a user daemon listen for events? */
156 	return (0);
157 }
158 
159 static void
160 mpt_user_reset(struct mpt_softc *mpt, int type)
161 {
162 
163 }
164 
165 static void
166 mpt_user_detach(struct mpt_softc *mpt)
167 {
168 	mpt_handler_t handler;
169 
170 	/* XXX: do a purge of pending requests? */
171 	destroy_dev(mpt->cdev);
172 
173 	MPT_LOCK(mpt);
174 	handler.reply_handler = mpt_user_reply_handler;
175 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
176 	    user_handler_id);
177 	MPT_UNLOCK(mpt);
178 }
179 
180 static int
181 mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
182 {
183 
184 	return (0);
185 }
186 
187 static int
188 mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
189 {
190 
191 	return (0);
192 }
193 
194 static int
195 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
196     size_t len)
197 {
198 	struct mpt_map_info mi;
199 	int error;
200 
201 	page_mem->vaddr = NULL;
202 
203 	/* Limit requests to 16M. */
204 	if (len > 16 * 1024 * 1024)
205 		return (ENOSPC);
206 	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
207 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
208 	    len, 1, len, 0, &page_mem->tag);
209 	if (error)
210 		return (error);
211 	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
212 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
213 	if (error) {
214 		bus_dma_tag_destroy(page_mem->tag);
215 		return (error);
216 	}
217 	mi.mpt = mpt;
218 	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
219 	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
220 	if (error == 0)
221 		error = mi.error;
222 	if (error) {
223 		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
224 		bus_dma_tag_destroy(page_mem->tag);
225 		page_mem->vaddr = NULL;
226 		return (error);
227 	}
228 	page_mem->paddr = mi.phys;
229 	return (0);
230 }
231 
232 static void
233 mpt_free_buffer(struct mpt_page_memory *page_mem)
234 {
235 
236 	if (page_mem->vaddr == NULL)
237 		return;
238 	bus_dmamap_unload(page_mem->tag, page_mem->map);
239 	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
240 	bus_dma_tag_destroy(page_mem->tag);
241 	page_mem->vaddr = NULL;
242 }
243 
244 static int
245 mpt_user_read_cfg_header(struct mpt_softc *mpt,
246     struct mpt_cfg_page_req *page_req)
247 {
248 	request_t  *req;
249 	cfgparms_t params;
250 	MSG_CONFIG *cfgp;
251 	int	    error;
252 
253 	req = mpt_get_request(mpt, TRUE);
254 	if (req == NULL) {
255 		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
256 		return (ENOMEM);
257 	}
258 
259 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
260 	params.PageVersion = 0;
261 	params.PageLength = 0;
262 	params.PageNumber = page_req->header.PageNumber;
263 	params.PageType = page_req->header.PageType;
264 	params.PageAddress = le32toh(page_req->page_address);
265 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
266 				  TRUE, 5000);
267 	if (error != 0) {
268 		/*
269 		 * Leave the request. Without resetting the chip, it's
270 		 * still owned by it and we'll just get into trouble
271 		 * freeing it now. Mark it as abandoned so that if it
272 		 * shows up later it can be freed.
273 		 */
274 		mpt_prt(mpt, "read_cfg_header timed out\n");
275 		return (ETIMEDOUT);
276 	}
277 
278 	page_req->ioc_status = htole16(req->IOCStatus);
279 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
280 		cfgp = req->req_vbuf;
281 		bcopy(&cfgp->Header, &page_req->header,
282 		    sizeof(page_req->header));
283 	}
284 	mpt_free_request(mpt, req);
285 	return (0);
286 }
287 
288 static int
289 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
290     struct mpt_page_memory *mpt_page)
291 {
292 	CONFIG_PAGE_HEADER *hdr;
293 	request_t    *req;
294 	cfgparms_t    params;
295 	int	      error;
296 
297 	req = mpt_get_request(mpt, TRUE);
298 	if (req == NULL) {
299 		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
300 		return (ENOMEM);
301 	}
302 
303 	hdr = mpt_page->vaddr;
304 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
305 	params.PageVersion = hdr->PageVersion;
306 	params.PageLength = hdr->PageLength;
307 	params.PageNumber = hdr->PageNumber;
308 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
309 	params.PageAddress = le32toh(page_req->page_address);
310 	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
311 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
313 	    le32toh(page_req->len), TRUE, 5000);
314 	if (error != 0) {
315 		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
316 		return (ETIMEDOUT);
317 	}
318 
319 	page_req->ioc_status = htole16(req->IOCStatus);
320 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
321 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
322 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
323 	mpt_free_request(mpt, req);
324 	return (0);
325 }
326 
327 static int
328 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
329     struct mpt_ext_cfg_page_req *ext_page_req)
330 {
331 	request_t  *req;
332 	cfgparms_t params;
333 	MSG_CONFIG_REPLY *cfgp;
334 	int	    error;
335 
336 	req = mpt_get_request(mpt, TRUE);
337 	if (req == NULL) {
338 		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
339 		return (ENOMEM);
340 	}
341 
342 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
343 	params.PageVersion = ext_page_req->header.PageVersion;
344 	params.PageLength = 0;
345 	params.PageNumber = ext_page_req->header.PageNumber;
346 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
347 	params.PageAddress = le32toh(ext_page_req->page_address);
348 	params.ExtPageType = ext_page_req->header.ExtPageType;
349 	params.ExtPageLength = 0;
350 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
351 				  TRUE, 5000);
352 	if (error != 0) {
353 		/*
354 		 * Leave the request. Without resetting the chip, it's
355 		 * still owned by it and we'll just get into trouble
356 		 * freeing it now. Mark it as abandoned so that if it
357 		 * shows up later it can be freed.
358 		 */
359 		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
360 		return (ETIMEDOUT);
361 	}
362 
363 	ext_page_req->ioc_status = htole16(req->IOCStatus);
364 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
365 		cfgp = req->req_vbuf;
366 		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
367 		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
368 		ext_page_req->header.PageType = cfgp->Header.PageType;
369 		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
370 		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
371 	}
372 	mpt_free_request(mpt, req);
373 	return (0);
374 }
375 
376 static int
377 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
378     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
379 {
380 	CONFIG_EXTENDED_PAGE_HEADER *hdr;
381 	request_t    *req;
382 	cfgparms_t    params;
383 	int	      error;
384 
385 	req = mpt_get_request(mpt, TRUE);
386 	if (req == NULL) {
387 		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
388 		return (ENOMEM);
389 	}
390 
391 	hdr = mpt_page->vaddr;
392 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
393 	params.PageVersion = hdr->PageVersion;
394 	params.PageLength = 0;
395 	params.PageNumber = hdr->PageNumber;
396 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
397 	params.PageAddress = le32toh(ext_page_req->page_address);
398 	params.ExtPageType = hdr->ExtPageType;
399 	params.ExtPageLength = hdr->ExtPageLength;
400 	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
401 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
402 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
403 	    le32toh(ext_page_req->len), TRUE, 5000);
404 	if (error != 0) {
405 		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
406 		return (ETIMEDOUT);
407 	}
408 
409 	ext_page_req->ioc_status = htole16(req->IOCStatus);
410 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
411 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
412 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
413 	mpt_free_request(mpt, req);
414 	return (0);
415 }
416 
417 static int
418 mpt_user_write_cfg_page(struct mpt_softc *mpt,
419     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
420 {
421 	CONFIG_PAGE_HEADER *hdr;
422 	request_t    *req;
423 	cfgparms_t    params;
424 	u_int	      hdr_attr;
425 	int	      error;
426 
427 	hdr = mpt_page->vaddr;
428 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
429 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
430 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
431 		mpt_prt(mpt, "page type 0x%x not changeable\n",
432 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
433 		return (EINVAL);
434 	}
435 
436 #if	0
437 	/*
438 	 * We shouldn't mask off other bits here.
439 	 */
440 	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
441 #endif
442 
443 	req = mpt_get_request(mpt, TRUE);
444 	if (req == NULL)
445 		return (ENOMEM);
446 
447 	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
448 	    BUS_DMASYNC_PREWRITE);
449 
450 	/*
451 	 * There isn't any point in restoring stripped out attributes
452 	 * if you then mask them going down to issue the request.
453 	 */
454 
455 	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
456 	params.PageVersion = hdr->PageVersion;
457 	params.PageLength = hdr->PageLength;
458 	params.PageNumber = hdr->PageNumber;
459 	params.PageAddress = le32toh(page_req->page_address);
460 #if	0
461 	/* Restore stripped out attributes */
462 	hdr->PageType |= hdr_attr;
463 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
464 #else
465 	params.PageType = hdr->PageType;
466 #endif
467 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
468 	    le32toh(page_req->len), TRUE, 5000);
469 	if (error != 0) {
470 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
471 		return (ETIMEDOUT);
472 	}
473 
474 	page_req->ioc_status = htole16(req->IOCStatus);
475 	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
476 	    BUS_DMASYNC_POSTWRITE);
477 	mpt_free_request(mpt, req);
478 	return (0);
479 }
480 
481 static int
482 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
483     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
484 {
485 	MSG_RAID_ACTION_REPLY *reply;
486 	struct mpt_user_raid_action_result *res;
487 
488 	if (req == NULL)
489 		return (TRUE);
490 
491 	if (reply_frame != NULL) {
492 		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
493 		req->IOCStatus = le16toh(reply->IOCStatus);
494 		res = (struct mpt_user_raid_action_result *)
495 		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
496 		res->action_status = reply->ActionStatus;
497 		res->volume_status = reply->VolumeStatus;
498 		bcopy(&reply->ActionData, res->action_data,
499 		    sizeof(res->action_data));
500 	}
501 
502 	req->state &= ~REQ_STATE_QUEUED;
503 	req->state |= REQ_STATE_DONE;
504 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
505 
506 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
507 		wakeup(req);
508 	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
509 		/*
510 		 * Whew- we can free this request (late completion)
511 		 */
512 		mpt_free_request(mpt, req);
513 	}
514 
515 	return (TRUE);
516 }
517 
518 /*
519  * We use the first part of the request buffer after the request frame
520  * to hold the action data and action status from the RAID reply.  The
521  * rest of the request buffer is used to hold the buffer for the
522  * action SGE.
523  */
524 static int
525 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
526 	struct mpt_page_memory *mpt_page)
527 {
528 	request_t *req;
529 	struct mpt_user_raid_action_result *res;
530 	MSG_RAID_ACTION_REQUEST *rap;
531 	SGE_SIMPLE32 *se;
532 	int error;
533 
534 	req = mpt_get_request(mpt, TRUE);
535 	if (req == NULL)
536 		return (ENOMEM);
537 	rap = req->req_vbuf;
538 	memset(rap, 0, sizeof *rap);
539 	rap->Action = raid_act->action;
540 	rap->ActionDataWord = raid_act->action_data_word;
541 	rap->Function = MPI_FUNCTION_RAID_ACTION;
542 	rap->VolumeID = raid_act->volume_id;
543 	rap->VolumeBus = raid_act->volume_bus;
544 	rap->PhysDiskNum = raid_act->phys_disk_num;
545 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
546 	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
547 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
548 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
549 		se->Address = htole32(mpt_page->paddr);
550 		MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
551 		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
552 		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
553 		    MPI_SGE_FLAGS_END_OF_LIST |
554 		    (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
555 		    MPI_SGE_FLAGS_IOC_TO_HOST)));
556 	}
557 	se->FlagsLength = htole32(se->FlagsLength);
558 	rap->MsgContext = htole32(req->index | user_handler_id);
559 
560 	mpt_check_doorbell(mpt);
561 	mpt_send_cmd(mpt, req);
562 
563 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
564 	    2000);
565 	if (error != 0) {
566 		/*
567 		 * Leave request so it can be cleaned up later.
568 		 */
569 		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
570 		return (error);
571 	}
572 
573 	raid_act->ioc_status = htole16(req->IOCStatus);
574 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
575 		mpt_free_request(mpt, req);
576 		return (0);
577 	}
578 
579 	res = (struct mpt_user_raid_action_result *)
580 	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
581 	raid_act->volume_status = res->volume_status;
582 	raid_act->action_status = res->action_status;
583 	bcopy(res->action_data, raid_act->action_data,
584 	    sizeof(res->action_data));
585 	if (mpt_page->vaddr != NULL)
586 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
587 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
588 	mpt_free_request(mpt, req);
589 	return (0);
590 }
591 
592 static int
593 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
594 {
595 	struct mpt_softc *mpt;
596 	struct mpt_cfg_page_req *page_req;
597 	struct mpt_ext_cfg_page_req *ext_page_req;
598 	struct mpt_raid_action *raid_act;
599 	struct mpt_page_memory mpt_page;
600 #ifdef __amd64__
601 	struct mpt_cfg_page_req32 *page_req32;
602 	struct mpt_cfg_page_req page_req_swab;
603 	struct mpt_ext_cfg_page_req32 *ext_page_req32;
604 	struct mpt_ext_cfg_page_req ext_page_req_swab;
605 	struct mpt_raid_action32 *raid_act32;
606 	struct mpt_raid_action raid_act_swab;
607 #endif
608 	int error;
609 
610 	mpt = dev->si_drv1;
611 	page_req = (void *)arg;
612 	ext_page_req = (void *)arg;
613 	raid_act = (void *)arg;
614 	mpt_page.vaddr = NULL;
615 
616 #ifdef __amd64__
617 	/* Convert 32-bit structs to native ones. */
618 	page_req32 = (void *)arg;
619 	ext_page_req32 = (void *)arg;
620 	raid_act32 = (void *)arg;
621 	switch (cmd) {
622 	case MPTIO_READ_CFG_HEADER32:
623 	case MPTIO_READ_CFG_PAGE32:
624 	case MPTIO_WRITE_CFG_PAGE32:
625 		page_req = &page_req_swab;
626 		page_req->header = page_req32->header;
627 		page_req->page_address = page_req32->page_address;
628 		page_req->buf = PTRIN(page_req32->buf);
629 		page_req->len = page_req32->len;
630 		page_req->ioc_status = page_req32->ioc_status;
631 		break;
632 	case MPTIO_READ_EXT_CFG_HEADER32:
633 	case MPTIO_READ_EXT_CFG_PAGE32:
634 		ext_page_req = &ext_page_req_swab;
635 		ext_page_req->header = ext_page_req32->header;
636 		ext_page_req->page_address = ext_page_req32->page_address;
637 		ext_page_req->buf = PTRIN(ext_page_req32->buf);
638 		ext_page_req->len = ext_page_req32->len;
639 		ext_page_req->ioc_status = ext_page_req32->ioc_status;
640 		break;
641 	case MPTIO_RAID_ACTION32:
642 		raid_act = &raid_act_swab;
643 		raid_act->action = raid_act32->action;
644 		raid_act->volume_bus = raid_act32->volume_bus;
645 		raid_act->volume_id = raid_act32->volume_id;
646 		raid_act->phys_disk_num = raid_act32->phys_disk_num;
647 		raid_act->action_data_word = raid_act32->action_data_word;
648 		raid_act->buf = PTRIN(raid_act32->buf);
649 		raid_act->len = raid_act32->len;
650 		raid_act->volume_status = raid_act32->volume_status;
651 		bcopy(raid_act32->action_data, raid_act->action_data,
652 		    sizeof(raid_act->action_data));
653 		raid_act->action_status = raid_act32->action_status;
654 		raid_act->ioc_status = raid_act32->ioc_status;
655 		raid_act->write = raid_act32->write;
656 		break;
657 	}
658 #endif
659 
660 	switch (cmd) {
661 #ifdef __amd64__
662 	case MPTIO_READ_CFG_HEADER32:
663 #endif
664 	case MPTIO_READ_CFG_HEADER:
665 		MPT_LOCK(mpt);
666 		error = mpt_user_read_cfg_header(mpt, page_req);
667 		MPT_UNLOCK(mpt);
668 		break;
669 #ifdef __amd64__
670 	case MPTIO_READ_CFG_PAGE32:
671 #endif
672 	case MPTIO_READ_CFG_PAGE:
673 		if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
674 			error = EINVAL;
675 			break;
676 		}
677 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
678 		if (error)
679 			break;
680 		error = copyin(page_req->buf, mpt_page.vaddr,
681 		    sizeof(CONFIG_PAGE_HEADER));
682 		if (error)
683 			break;
684 		MPT_LOCK(mpt);
685 		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
686 		MPT_UNLOCK(mpt);
687 		if (error)
688 			break;
689 		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
690 		break;
691 #ifdef __amd64__
692 	case MPTIO_READ_EXT_CFG_HEADER32:
693 #endif
694 	case MPTIO_READ_EXT_CFG_HEADER:
695 		MPT_LOCK(mpt);
696 		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
697 		MPT_UNLOCK(mpt);
698 		break;
699 #ifdef __amd64__
700 	case MPTIO_READ_EXT_CFG_PAGE32:
701 #endif
702 	case MPTIO_READ_EXT_CFG_PAGE:
703 		if (ext_page_req->len <
704 		    (int)sizeof(CONFIG_EXTENDED_PAGE_HEADER)) {
705 			error = EINVAL;
706 			break;
707 		}
708 		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
709 		if (error)
710 			break;
711 		error = copyin(ext_page_req->buf, mpt_page.vaddr,
712 		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
713 		if (error)
714 			break;
715 		MPT_LOCK(mpt);
716 		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
717 		MPT_UNLOCK(mpt);
718 		if (error)
719 			break;
720 		error = copyout(mpt_page.vaddr, ext_page_req->buf,
721 		    ext_page_req->len);
722 		break;
723 #ifdef __amd64__
724 	case MPTIO_WRITE_CFG_PAGE32:
725 #endif
726 	case MPTIO_WRITE_CFG_PAGE:
727 		if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
728 			error = EINVAL;
729 			break;
730 		}
731 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
732 		if (error)
733 			break;
734 		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
735 		if (error)
736 			break;
737 		MPT_LOCK(mpt);
738 		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
739 		MPT_UNLOCK(mpt);
740 		break;
741 #ifdef __amd64__
742 	case MPTIO_RAID_ACTION32:
743 #endif
744 	case MPTIO_RAID_ACTION:
745 		if (raid_act->buf != NULL) {
746 			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
747 			if (error)
748 				break;
749 			error = copyin(raid_act->buf, mpt_page.vaddr,
750 			    raid_act->len);
751 			if (error)
752 				break;
753 		}
754 		MPT_LOCK(mpt);
755 		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
756 		MPT_UNLOCK(mpt);
757 		if (error)
758 			break;
759 		if (raid_act->buf != NULL)
760 			error = copyout(mpt_page.vaddr, raid_act->buf,
761 			    raid_act->len);
762 		break;
763 	default:
764 		error = ENOIOCTL;
765 		break;
766 	}
767 
768 	mpt_free_buffer(&mpt_page);
769 
770 	if (error)
771 		return (error);
772 
773 #ifdef __amd64__
774 	/* Convert native structs to 32-bit ones. */
775 	switch (cmd) {
776 	case MPTIO_READ_CFG_HEADER32:
777 	case MPTIO_READ_CFG_PAGE32:
778 	case MPTIO_WRITE_CFG_PAGE32:
779 		page_req32->header = page_req->header;
780 		page_req32->page_address = page_req->page_address;
781 		page_req32->buf = PTROUT(page_req->buf);
782 		page_req32->len = page_req->len;
783 		page_req32->ioc_status = page_req->ioc_status;
784 		break;
785 	case MPTIO_READ_EXT_CFG_HEADER32:
786 	case MPTIO_READ_EXT_CFG_PAGE32:
787 		ext_page_req32->header = ext_page_req->header;
788 		ext_page_req32->page_address = ext_page_req->page_address;
789 		ext_page_req32->buf = PTROUT(ext_page_req->buf);
790 		ext_page_req32->len = ext_page_req->len;
791 		ext_page_req32->ioc_status = ext_page_req->ioc_status;
792 		break;
793 	case MPTIO_RAID_ACTION32:
794 		raid_act32->action = raid_act->action;
795 		raid_act32->volume_bus = raid_act->volume_bus;
796 		raid_act32->volume_id = raid_act->volume_id;
797 		raid_act32->phys_disk_num = raid_act->phys_disk_num;
798 		raid_act32->action_data_word = raid_act->action_data_word;
799 		raid_act32->buf = PTROUT(raid_act->buf);
800 		raid_act32->len = raid_act->len;
801 		raid_act32->volume_status = raid_act->volume_status;
802 		bcopy(raid_act->action_data, raid_act32->action_data,
803 		    sizeof(raid_act->action_data));
804 		raid_act32->action_status = raid_act->action_status;
805 		raid_act32->ioc_status = raid_act->ioc_status;
806 		raid_act32->write = raid_act->write;
807 		break;
808 	}
809 #endif
810 
811 	return (0);
812 }
813