xref: /freebsd/sys/dev/mpt/mpt_user.c (revision 4124f62e4fe38f7693568dbfa75016118fd534e4)
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/errno.h>
39 #include <sys/ioccom.h>
40 #include <sys/mpt_ioctl.h>
41 
42 #include <dev/mpt/mpt.h>
43 
44 struct mpt_user_raid_action_result {
45 	uint32_t	volume_status;
46 	uint32_t	action_data[4];
47 	uint16_t	action_status;
48 };
49 
50 struct mpt_page_memory {
51 	bus_dma_tag_t	tag;
52 	bus_dmamap_t	map;
53 	bus_addr_t	paddr;
54 	void		*vaddr;
55 };
56 
57 static mpt_probe_handler_t	mpt_user_probe;
58 static mpt_attach_handler_t	mpt_user_attach;
59 static mpt_enable_handler_t	mpt_user_enable;
60 static mpt_ready_handler_t	mpt_user_ready;
61 static mpt_event_handler_t	mpt_user_event;
62 static mpt_reset_handler_t	mpt_user_reset;
63 static mpt_detach_handler_t	mpt_user_detach;
64 
65 static struct mpt_personality mpt_user_personality = {
66 	.name		= "mpt_user",
67 	.probe		= mpt_user_probe,
68 	.attach		= mpt_user_attach,
69 	.enable		= mpt_user_enable,
70 	.ready		= mpt_user_ready,
71 	.event		= mpt_user_event,
72 	.reset		= mpt_user_reset,
73 	.detach		= mpt_user_detach,
74 };
75 
76 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
77 
78 static mpt_reply_handler_t	mpt_user_reply_handler;
79 
80 static d_open_t		mpt_open;
81 static d_close_t	mpt_close;
82 static d_ioctl_t	mpt_ioctl;
83 
84 static struct cdevsw mpt_cdevsw = {
85 	.d_version =	D_VERSION,
86 	.d_flags =	0,
87 	.d_open =	mpt_open,
88 	.d_close =	mpt_close,
89 	.d_ioctl =	mpt_ioctl,
90 	.d_name =	"mpt",
91 };
92 
93 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
94 
95 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
96 
97 int
98 mpt_user_probe(struct mpt_softc *mpt)
99 {
100 
101 	/* Attach to every controller. */
102 	return (0);
103 }
104 
105 int
106 mpt_user_attach(struct mpt_softc *mpt)
107 {
108 	mpt_handler_t handler;
109 	int error, unit;
110 
111 	MPT_LOCK(mpt);
112 	handler.reply_handler = mpt_user_reply_handler;
113 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
114 				     &user_handler_id);
115 	MPT_UNLOCK(mpt);
116 	if (error != 0) {
117 		mpt_prt(mpt, "Unable to register user handler!\n");
118 		return (error);
119 	}
120 	unit = device_get_unit(mpt->dev);
121 	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
122 	    "mpt%d", unit);
123 	if (mpt->cdev == NULL) {
124 		MPT_LOCK(mpt);
125 		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
126 		    user_handler_id);
127 		MPT_UNLOCK(mpt);
128 		return (ENOMEM);
129 	}
130 	mpt->cdev->si_drv1 = mpt;
131 	return (0);
132 }
133 
134 int
135 mpt_user_enable(struct mpt_softc *mpt)
136 {
137 
138 	return (0);
139 }
140 
141 void
142 mpt_user_ready(struct mpt_softc *mpt)
143 {
144 }
145 
146 int
147 mpt_user_event(struct mpt_softc *mpt, request_t *req,
148     MSG_EVENT_NOTIFY_REPLY *msg)
149 {
150 
151 	/* Someday we may want to let a user daemon listen for events? */
152 	return (0);
153 }
154 
155 void
156 mpt_user_reset(struct mpt_softc *mpt, int type)
157 {
158 }
159 
160 void
161 mpt_user_detach(struct mpt_softc *mpt)
162 {
163 	mpt_handler_t handler;
164 
165 	/* XXX: do a purge of pending requests? */
166 	destroy_dev(mpt->cdev);
167 
168 	MPT_LOCK(mpt);
169 	handler.reply_handler = mpt_user_reply_handler;
170 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
171 	    user_handler_id);
172 	MPT_UNLOCK(mpt);
173 }
174 
175 static int
176 mpt_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
177 {
178 
179 	return (0);
180 }
181 
182 static int
183 mpt_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
184 {
185 
186 	return (0);
187 }
188 
189 static int
190 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
191     size_t len)
192 {
193 	struct mpt_map_info mi;
194 	int error;
195 
196 	page_mem->vaddr = NULL;
197 
198 	/* Limit requests to 16M. */
199 	if (len > 16 * 1024 * 1024)
200 		return (ENOSPC);
201 	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
202 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
203 	    len, 1, len, 0, &page_mem->tag);
204 	if (error)
205 		return (error);
206 	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
207 	    BUS_DMA_NOWAIT, &page_mem->map);
208 	if (error) {
209 		bus_dma_tag_destroy(page_mem->tag);
210 		return (error);
211 	}
212 	mi.mpt = mpt;
213 	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
214 	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
215 	if (error == 0)
216 		error = mi.error;
217 	if (error) {
218 		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
219 		bus_dma_tag_destroy(page_mem->tag);
220 		page_mem->vaddr = NULL;
221 		return (error);
222 	}
223 	page_mem->paddr = mi.phys;
224 	return (0);
225 }
226 
227 static void
228 mpt_free_buffer(struct mpt_page_memory *page_mem)
229 {
230 
231 	if (page_mem->vaddr == NULL)
232 		return;
233 	bus_dmamap_unload(page_mem->tag, page_mem->map);
234 	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
235 	bus_dma_tag_destroy(page_mem->tag);
236 	page_mem->vaddr = NULL;
237 }
238 
239 static int
240 mpt_user_read_cfg_header(struct mpt_softc *mpt,
241     struct mpt_cfg_page_req *page_req)
242 {
243 	request_t  *req;
244 	cfgparms_t params;
245 	MSG_CONFIG *cfgp;
246 	int	    error;
247 
248 	req = mpt_get_request(mpt, TRUE);
249 	if (req == NULL) {
250 		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
251 		return (ENOMEM);
252 	}
253 
254 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
255 	params.PageVersion = 0;
256 	params.PageLength = 0;
257 	params.PageNumber = page_req->header.PageNumber;
258 	params.PageType = page_req->header.PageType;
259 	params.PageAddress = page_req->page_address;
260 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
261 				  TRUE, 5000);
262 	if (error != 0) {
263 		/*
264 		 * Leave the request. Without resetting the chip, it's
265 		 * still owned by it and we'll just get into trouble
266 		 * freeing it now. Mark it as abandoned so that if it
267 		 * shows up later it can be freed.
268 		 */
269 		mpt_prt(mpt, "read_cfg_header timed out\n");
270 		return (ETIMEDOUT);
271 	}
272 
273 	page_req->ioc_status = req->IOCStatus;
274 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
275 		cfgp = req->req_vbuf;
276 		bcopy(&cfgp->Header, &page_req->header,
277 		    sizeof(page_req->header));
278 	}
279 	mpt_free_request(mpt, req);
280 	return (0);
281 }
282 
283 static int
284 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
285     struct mpt_page_memory *mpt_page)
286 {
287 	CONFIG_PAGE_HEADER *hdr;
288 	request_t    *req;
289 	cfgparms_t    params;
290 	int	      error;
291 
292 	req = mpt_get_request(mpt, TRUE);
293 	if (req == NULL) {
294 		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
295 		return (ENOMEM);
296 	}
297 
298 	hdr = mpt_page->vaddr;
299 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
300 	params.PageVersion = hdr->PageVersion;
301 	params.PageLength = hdr->PageLength;
302 	params.PageNumber = hdr->PageNumber;
303 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
304 	params.PageAddress = page_req->page_address;
305 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
306 				  page_req->len, TRUE, 5000);
307 	if (error != 0) {
308 		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
309 		return (ETIMEDOUT);
310 	}
311 
312 	page_req->ioc_status = req->IOCStatus;
313 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
314 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
315 		    BUS_DMASYNC_POSTREAD);
316 	mpt_free_request(mpt, req);
317 	return (0);
318 }
319 
320 static int
321 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
322     struct mpt_ext_cfg_page_req *ext_page_req)
323 {
324 	request_t  *req;
325 	cfgparms_t params;
326 	MSG_CONFIG_REPLY *cfgp;
327 	int	    error;
328 
329 	req = mpt_get_request(mpt, TRUE);
330 	if (req == NULL) {
331 		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
332 		return (ENOMEM);
333 	}
334 
335 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
336 	params.PageVersion = ext_page_req->header.PageVersion;
337 	params.PageLength = 0;
338 	params.PageNumber = ext_page_req->header.PageNumber;
339 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
340 	params.PageAddress = ext_page_req->page_address;
341 	params.ExtPageType = ext_page_req->header.ExtPageType;
342 	params.ExtPageLength = 0;
343 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
344 				  TRUE, 5000);
345 	if (error != 0) {
346 		/*
347 		 * Leave the request. Without resetting the chip, it's
348 		 * still owned by it and we'll just get into trouble
349 		 * freeing it now. Mark it as abandoned so that if it
350 		 * shows up later it can be freed.
351 		 */
352 		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
353 		return (ETIMEDOUT);
354 	}
355 
356 	ext_page_req->ioc_status = req->IOCStatus;
357 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
358 		cfgp = req->req_vbuf;
359 		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
360 		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
361 		ext_page_req->header.PageType = cfgp->Header.PageType;
362 		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
363 		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
364 	}
365 	mpt_free_request(mpt, req);
366 	return (0);
367 }
368 
369 static int
370 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
371     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
372 {
373 	CONFIG_EXTENDED_PAGE_HEADER *hdr;
374 	request_t    *req;
375 	cfgparms_t    params;
376 	int	      error;
377 
378 	req = mpt_get_request(mpt, TRUE);
379 	if (req == NULL) {
380 		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
381 		return (ENOMEM);
382 	}
383 
384 	hdr = mpt_page->vaddr;
385 	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
386 	params.PageVersion = hdr->PageVersion;
387 	params.PageLength = 0;
388 	params.PageNumber = hdr->PageNumber;
389 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
390 	params.PageAddress = ext_page_req->page_address;
391 	params.ExtPageType = hdr->ExtPageType;
392 	params.ExtPageLength = hdr->ExtPageLength;
393 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
394 				  ext_page_req->len, TRUE, 5000);
395 	if (error != 0) {
396 		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
397 		return (ETIMEDOUT);
398 	}
399 
400 	ext_page_req->ioc_status = req->IOCStatus;
401 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
402 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
403 		    BUS_DMASYNC_POSTREAD);
404 	mpt_free_request(mpt, req);
405 	return (0);
406 }
407 
408 static int
409 mpt_user_write_cfg_page(struct mpt_softc *mpt,
410     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
411 {
412 	CONFIG_PAGE_HEADER *hdr;
413 	request_t    *req;
414 	cfgparms_t    params;
415 	u_int	      hdr_attr;
416 	int	      error;
417 
418 	hdr = mpt_page->vaddr;
419 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
420 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
421 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
422 		mpt_prt(mpt, "page type 0x%x not changeable\n",
423 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
424 		return (EINVAL);
425 	}
426 
427 #if	0
428 	/*
429 	 * We shouldn't mask off other bits here.
430 	 */
431 	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
432 #endif
433 
434 	req = mpt_get_request(mpt, TRUE);
435 	if (req == NULL)
436 		return (ENOMEM);
437 
438 	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREWRITE);
439 
440 	/*
441 	 * There isn't any point in restoring stripped out attributes
442 	 * if you then mask them going down to issue the request.
443 	 */
444 
445 	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
446 	params.PageVersion = hdr->PageVersion;
447 	params.PageLength = hdr->PageLength;
448 	params.PageNumber = hdr->PageNumber;
449 	params.PageAddress = page_req->page_address;
450 #if	0
451 	/* Restore stripped out attributes */
452 	hdr->PageType |= hdr_attr;
453 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
454 #else
455 	params.PageType = hdr->PageType;
456 #endif
457 	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
458 				  page_req->len, TRUE, 5000);
459 	if (error != 0) {
460 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
461 		return (ETIMEDOUT);
462 	}
463 
464 	page_req->ioc_status = req->IOCStatus;
465 	mpt_free_request(mpt, req);
466 	return (0);
467 }
468 
469 static int
470 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
471     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
472 {
473 	MSG_RAID_ACTION_REPLY *reply;
474 	struct mpt_user_raid_action_result *res;
475 
476 	if (req == NULL)
477 		return (TRUE);
478 
479 	if (reply_frame != NULL) {
480 		bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
481 		    BUS_DMASYNC_POSTREAD);
482 		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
483 		req->IOCStatus = le16toh(reply->IOCStatus);
484 		res = (struct mpt_user_raid_action_result *)
485 		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
486 		res->action_status = reply->ActionStatus;
487 		res->volume_status = reply->VolumeStatus;
488 		bcopy(&reply->ActionData, res->action_data,
489 		    sizeof(res->action_data));
490 	}
491 
492 	req->state &= ~REQ_STATE_QUEUED;
493 	req->state |= REQ_STATE_DONE;
494 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
495 
496 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
497 		wakeup(req);
498 	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
499 		/*
500 		 * Whew- we can free this request (late completion)
501 		 */
502 		mpt_free_request(mpt, req);
503 	}
504 
505 	return (TRUE);
506 }
507 
508 /*
509  * We use the first part of the request buffer after the request frame
510  * to hold the action data and action status from the RAID reply.  The
511  * rest of the request buffer is used to hold the buffer for the
512  * action SGE.
513  */
514 static int
515 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
516 	struct mpt_page_memory *mpt_page)
517 {
518 	request_t *req;
519 	struct mpt_user_raid_action_result *res;
520 	MSG_RAID_ACTION_REQUEST *rap;
521 	SGE_SIMPLE32 *se;
522 	int error;
523 
524 	req = mpt_get_request(mpt, TRUE);
525 	if (req == NULL)
526 		return (ENOMEM);
527 	rap = req->req_vbuf;
528 	memset(rap, 0, sizeof *rap);
529 	rap->Action = raid_act->action;
530 	rap->ActionDataWord = raid_act->action_data_word;
531 	rap->Function = MPI_FUNCTION_RAID_ACTION;
532 	rap->VolumeID = raid_act->volume_id;
533 	rap->VolumeBus = raid_act->volume_bus;
534 	rap->PhysDiskNum = raid_act->phys_disk_num;
535 	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
536 	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
537 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
538 		    BUS_DMASYNC_PREWRITE);
539 		se->Address = mpt_page->paddr;
540 		MPI_pSGE_SET_LENGTH(se, raid_act->len);
541 		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
542 		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
543 		    MPI_SGE_FLAGS_END_OF_LIST |
544 		    raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
545 		    MPI_SGE_FLAGS_IOC_TO_HOST));
546 	}
547 	rap->MsgContext = htole32(req->index | user_handler_id);
548 
549 	mpt_check_doorbell(mpt);
550 	mpt_send_cmd(mpt, req);
551 
552 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
553 	    2000);
554 	if (error != 0) {
555 		/*
556 		 * Leave request so it can be cleaned up later.
557 		 */
558 		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
559 		return (error);
560 	}
561 
562 	raid_act->ioc_status = req->IOCStatus;
563 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
564 		mpt_free_request(mpt, req);
565 		return (0);
566 	}
567 
568 	res = (struct mpt_user_raid_action_result *)
569 	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
570 	raid_act->volume_status = res->volume_status;
571 	raid_act->action_status = res->action_status;
572 	bcopy(res->action_data, raid_act->action_data,
573 	    sizeof(res->action_data));
574 	if (mpt_page->vaddr != NULL)
575 		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
576 		    BUS_DMASYNC_POSTREAD);
577 	mpt_free_request(mpt, req);
578 	return (0);
579 }
580 
581 #ifdef __amd64__
582 #define	PTRIN(p)		((void *)(uintptr_t)(p))
583 #define PTROUT(v)		((u_int32_t)(uintptr_t)(v))
584 #endif
585 
586 static int
587 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
588 {
589 	struct mpt_softc *mpt;
590 	struct mpt_cfg_page_req *page_req;
591 	struct mpt_ext_cfg_page_req *ext_page_req;
592 	struct mpt_raid_action *raid_act;
593 	struct mpt_page_memory mpt_page;
594 #ifdef __amd64__
595 	struct mpt_cfg_page_req32 *page_req32;
596 	struct mpt_cfg_page_req page_req_swab;
597 	struct mpt_ext_cfg_page_req32 *ext_page_req32;
598 	struct mpt_ext_cfg_page_req ext_page_req_swab;
599 	struct mpt_raid_action32 *raid_act32;
600 	struct mpt_raid_action raid_act_swab;
601 #endif
602 	int error;
603 
604 	mpt = dev->si_drv1;
605 	page_req = (void *)arg;
606 	ext_page_req = (void *)arg;
607 	raid_act = (void *)arg;
608 	mpt_page.vaddr = NULL;
609 
610 #ifdef __amd64__
611 	/* Convert 32-bit structs to native ones. */
612 	page_req32 = (void *)arg;
613 	ext_page_req32 = (void *)arg;
614 	raid_act32 = (void *)arg;
615 	switch (cmd) {
616 	case MPTIO_READ_CFG_HEADER32:
617 	case MPTIO_READ_CFG_PAGE32:
618 	case MPTIO_WRITE_CFG_PAGE32:
619 		page_req = &page_req_swab;
620 		page_req->header = page_req32->header;
621 		page_req->page_address = page_req32->page_address;
622 		page_req->buf = PTRIN(page_req32->buf);
623 		page_req->len = page_req32->len;
624 		page_req->ioc_status = page_req32->ioc_status;
625 		break;
626 	case MPTIO_READ_EXT_CFG_HEADER32:
627 	case MPTIO_READ_EXT_CFG_PAGE32:
628 		ext_page_req = &ext_page_req_swab;
629 		ext_page_req->header = ext_page_req32->header;
630 		ext_page_req->page_address = ext_page_req32->page_address;
631 		ext_page_req->buf = PTRIN(ext_page_req32->buf);
632 		ext_page_req->len = ext_page_req32->len;
633 		ext_page_req->ioc_status = ext_page_req32->ioc_status;
634 		break;
635 	case MPTIO_RAID_ACTION32:
636 		raid_act = &raid_act_swab;
637 		raid_act->action = raid_act32->action;
638 		raid_act->volume_bus = raid_act32->volume_bus;
639 		raid_act->volume_id = raid_act32->volume_id;
640 		raid_act->phys_disk_num = raid_act32->phys_disk_num;
641 		raid_act->action_data_word = raid_act32->action_data_word;
642 		raid_act->buf = PTRIN(raid_act32->buf);
643 		raid_act->len = raid_act32->len;
644 		raid_act->volume_status = raid_act32->volume_status;
645 		bcopy(raid_act32->action_data, raid_act->action_data,
646 		    sizeof(raid_act->action_data));
647 		raid_act->action_status = raid_act32->action_status;
648 		raid_act->ioc_status = raid_act32->ioc_status;
649 		raid_act->write = raid_act32->write;
650 		break;
651 	}
652 #endif
653 
654 	switch (cmd) {
655 #ifdef __amd64__
656 	case MPTIO_READ_CFG_HEADER32:
657 #endif
658 	case MPTIO_READ_CFG_HEADER:
659 		MPT_LOCK(mpt);
660 		error = mpt_user_read_cfg_header(mpt, page_req);
661 		MPT_UNLOCK(mpt);
662 		break;
663 #ifdef __amd64__
664 	case MPTIO_READ_CFG_PAGE32:
665 #endif
666 	case MPTIO_READ_CFG_PAGE:
667 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
668 		if (error)
669 			break;
670 		error = copyin(page_req->buf, mpt_page.vaddr,
671 		    sizeof(CONFIG_PAGE_HEADER));
672 		if (error)
673 			break;
674 		MPT_LOCK(mpt);
675 		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
676 		MPT_UNLOCK(mpt);
677 		if (error)
678 			break;
679 		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
680 		break;
681 #ifdef __amd64__
682 	case MPTIO_READ_EXT_CFG_HEADER32:
683 #endif
684 	case MPTIO_READ_EXT_CFG_HEADER:
685 		MPT_LOCK(mpt);
686 		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
687 		MPT_UNLOCK(mpt);
688 		break;
689 #ifdef __amd64__
690 	case MPTIO_READ_EXT_CFG_PAGE32:
691 #endif
692 	case MPTIO_READ_EXT_CFG_PAGE:
693 		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
694 		if (error)
695 			break;
696 		error = copyin(ext_page_req->buf, mpt_page.vaddr,
697 		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
698 		if (error)
699 			break;
700 		MPT_LOCK(mpt);
701 		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
702 		MPT_UNLOCK(mpt);
703 		if (error)
704 			break;
705 		error = copyout(mpt_page.vaddr, ext_page_req->buf,
706 		    ext_page_req->len);
707 		break;
708 #ifdef __amd64__
709 	case MPTIO_WRITE_CFG_PAGE32:
710 #endif
711 	case MPTIO_WRITE_CFG_PAGE:
712 		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
713 		if (error)
714 			break;
715 		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
716 		if (error)
717 			break;
718 		MPT_LOCK(mpt);
719 		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
720 		MPT_UNLOCK(mpt);
721 		break;
722 #ifdef __amd64__
723 	case MPTIO_RAID_ACTION32:
724 #endif
725 	case MPTIO_RAID_ACTION:
726 		if (raid_act->buf != NULL) {
727 			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
728 			if (error)
729 				break;
730 			error = copyin(raid_act->buf, mpt_page.vaddr,
731 			    raid_act->len);
732 			if (error)
733 				break;
734 		}
735 		MPT_LOCK(mpt);
736 		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
737 		MPT_UNLOCK(mpt);
738 		if (error)
739 			break;
740 		if (raid_act->buf != NULL)
741 			error = copyout(mpt_page.vaddr, raid_act->buf,
742 			    raid_act->len);
743 		break;
744 	default:
745 		error = ENOIOCTL;
746 		break;
747 	}
748 
749 	mpt_free_buffer(&mpt_page);
750 
751 	if (error)
752 		return (error);
753 
754 #ifdef __amd64__
755 	/* Convert native structs to 32-bit ones. */
756 	switch (cmd) {
757 	case MPTIO_READ_CFG_HEADER32:
758 	case MPTIO_READ_CFG_PAGE32:
759 	case MPTIO_WRITE_CFG_PAGE32:
760 		page_req32->header = page_req->header;
761 		page_req32->page_address = page_req->page_address;
762 		page_req32->buf = PTROUT(page_req->buf);
763 		page_req32->len = page_req->len;
764 		page_req32->ioc_status = page_req->ioc_status;
765 		break;
766 	case MPTIO_READ_EXT_CFG_HEADER32:
767 	case MPTIO_READ_EXT_CFG_PAGE32:
768 		ext_page_req32->header = ext_page_req->header;
769 		ext_page_req32->page_address = ext_page_req->page_address;
770 		ext_page_req32->buf = PTROUT(ext_page_req->buf);
771 		ext_page_req32->len = ext_page_req->len;
772 		ext_page_req32->ioc_status = ext_page_req->ioc_status;
773 		break;
774 	case MPTIO_RAID_ACTION32:
775 		raid_act32->action = raid_act->action;
776 		raid_act32->volume_bus = raid_act->volume_bus;
777 		raid_act32->volume_id = raid_act->volume_id;
778 		raid_act32->phys_disk_num = raid_act->phys_disk_num;
779 		raid_act32->action_data_word = raid_act->action_data_word;
780 		raid_act32->buf = PTROUT(raid_act->buf);
781 		raid_act32->len = raid_act->len;
782 		raid_act32->volume_status = raid_act->volume_status;
783 		bcopy(raid_act->action_data, raid_act32->action_data,
784 		    sizeof(raid_act->action_data));
785 		raid_act32->action_status = raid_act->action_status;
786 		raid_act32->ioc_status = raid_act->ioc_status;
787 		raid_act32->write = raid_act->write;
788 		break;
789 	}
790 #endif
791 
792 	return (0);
793 }
794