xref: /freebsd/sys/dev/mps/mps_user.c (revision 3c134670993bf525fcd6c4dfef84a3dfc3d4ed1b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Yahoo!, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD userland interface
33  */
34 /*-
35  * Copyright (c) 2011-2015 LSI Corp.
36  * Copyright (c) 2013-2015 Avago Technologies
37  * All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
61  *
62  * $FreeBSD$
63  */
64 
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67 
68 /* TODO Move headers to mpsvar */
69 #include <sys/types.h>
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/selinfo.h>
74 #include <sys/module.h>
75 #include <sys/bus.h>
76 #include <sys/conf.h>
77 #include <sys/bio.h>
78 #include <sys/abi_compat.h>
79 #include <sys/malloc.h>
80 #include <sys/uio.h>
81 #include <sys/sysctl.h>
82 #include <sys/ioccom.h>
83 #include <sys/endian.h>
84 #include <sys/queue.h>
85 #include <sys/kthread.h>
86 #include <sys/taskqueue.h>
87 #include <sys/proc.h>
88 #include <sys/sysent.h>
89 
90 #include <machine/bus.h>
91 #include <machine/resource.h>
92 #include <sys/rman.h>
93 
94 #include <cam/cam.h>
95 #include <cam/cam_ccb.h>
96 #include <cam/scsi/scsi_all.h>
97 
98 #include <dev/mps/mpi/mpi2_type.h>
99 #include <dev/mps/mpi/mpi2.h>
100 #include <dev/mps/mpi/mpi2_ioc.h>
101 #include <dev/mps/mpi/mpi2_cnfg.h>
102 #include <dev/mps/mpi/mpi2_init.h>
103 #include <dev/mps/mpi/mpi2_tool.h>
104 #include <dev/mps/mps_ioctl.h>
105 #include <dev/mps/mpsvar.h>
106 #include <dev/mps/mps_table.h>
107 #include <dev/mps/mps_sas.h>
108 #include <dev/pci/pcivar.h>
109 #include <dev/pci/pcireg.h>
110 
111 static d_open_t		mps_open;
112 static d_close_t	mps_close;
113 static d_ioctl_t	mps_ioctl_devsw;
114 
115 static struct cdevsw mps_cdevsw = {
116 	.d_version =	D_VERSION,
117 	.d_flags =	0,
118 	.d_open =	mps_open,
119 	.d_close =	mps_close,
120 	.d_ioctl =	mps_ioctl_devsw,
121 	.d_name =	"mps",
122 };
123 
124 typedef int (mps_user_f)(struct mps_command *, struct mps_usr_command *);
125 static mps_user_f	mpi_pre_ioc_facts;
126 static mps_user_f	mpi_pre_port_facts;
127 static mps_user_f	mpi_pre_fw_download;
128 static mps_user_f	mpi_pre_fw_upload;
129 static mps_user_f	mpi_pre_sata_passthrough;
130 static mps_user_f	mpi_pre_smp_passthrough;
131 static mps_user_f	mpi_pre_config;
132 static mps_user_f	mpi_pre_sas_io_unit_control;
133 
134 static int mps_user_read_cfg_header(struct mps_softc *,
135 				    struct mps_cfg_page_req *);
136 static int mps_user_read_cfg_page(struct mps_softc *,
137 				  struct mps_cfg_page_req *, void *);
138 static int mps_user_read_extcfg_header(struct mps_softc *,
139 				     struct mps_ext_cfg_page_req *);
140 static int mps_user_read_extcfg_page(struct mps_softc *,
141 				     struct mps_ext_cfg_page_req *, void *);
142 static int mps_user_write_cfg_page(struct mps_softc *,
143 				   struct mps_cfg_page_req *, void *);
144 static int mps_user_setup_request(struct mps_command *,
145 				  struct mps_usr_command *);
146 static int mps_user_command(struct mps_softc *, struct mps_usr_command *);
147 
148 static int mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data);
149 static void mps_user_get_adapter_data(struct mps_softc *sc,
150     mps_adapter_data_t *data);
151 static void mps_user_read_pci_info(struct mps_softc *sc,
152     mps_pci_info_t *data);
153 static uint8_t mps_get_fw_diag_buffer_number(struct mps_softc *sc,
154     uint32_t unique_id);
155 static int mps_post_fw_diag_buffer(struct mps_softc *sc,
156     mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
157 static int mps_release_fw_diag_buffer(struct mps_softc *sc,
158     mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
159     uint32_t diag_type);
160 static int mps_diag_register(struct mps_softc *sc,
161     mps_fw_diag_register_t *diag_register, uint32_t *return_code);
162 static int mps_diag_unregister(struct mps_softc *sc,
163     mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
164 static int mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query,
165     uint32_t *return_code);
166 static int mps_diag_read_buffer(struct mps_softc *sc,
167     mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
168     uint32_t *return_code);
169 static int mps_diag_release(struct mps_softc *sc,
170     mps_fw_diag_release_t *diag_release, uint32_t *return_code);
171 static int mps_do_diag_action(struct mps_softc *sc, uint32_t action,
172     uint8_t *diag_action, uint32_t length, uint32_t *return_code);
173 static int mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data);
174 static void mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data);
175 static void mps_user_event_enable(struct mps_softc *sc,
176     mps_event_enable_t *data);
177 static int mps_user_event_report(struct mps_softc *sc,
178     mps_event_report_t *data);
179 static int mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data);
180 static int mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data);
181 
182 MALLOC_DEFINE(M_MPSUSER, "mps_user", "Buffers for mps(4) ioctls");
183 
184 int
185 mps_attach_user(struct mps_softc *sc)
186 {
187 	int unit;
188 
189 	unit = device_get_unit(sc->mps_dev);
190 	sc->mps_cdev = make_dev(&mps_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
191 	    "mps%d", unit);
192 	if (sc->mps_cdev == NULL) {
193 		return (ENOMEM);
194 	}
195 	sc->mps_cdev->si_drv1 = sc;
196 	return (0);
197 }
198 
199 void
200 mps_detach_user(struct mps_softc *sc)
201 {
202 
203 	/* XXX: do a purge of pending requests? */
204 	if (sc->mps_cdev != NULL)
205 		destroy_dev(sc->mps_cdev);
206 }
207 
208 static int
209 mps_open(struct cdev *dev, int flags, int fmt, struct thread *td)
210 {
211 
212 	return (0);
213 }
214 
215 static int
216 mps_close(struct cdev *dev, int flags, int fmt, struct thread *td)
217 {
218 
219 	return (0);
220 }
221 
222 static int
223 mps_user_read_cfg_header(struct mps_softc *sc,
224     struct mps_cfg_page_req *page_req)
225 {
226 	MPI2_CONFIG_PAGE_HEADER *hdr;
227 	struct mps_config_params params;
228 	int	    error;
229 
230 	hdr = &params.hdr.Struct;
231 	params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
232 	params.page_address = le32toh(page_req->page_address);
233 	hdr->PageVersion = 0;
234 	hdr->PageLength = 0;
235 	hdr->PageNumber = page_req->header.PageNumber;
236 	hdr->PageType = page_req->header.PageType;
237 	params.buffer = NULL;
238 	params.length = 0;
239 	params.callback = NULL;
240 
241 	if ((error = mps_read_config_page(sc, &params)) != 0) {
242 		/*
243 		 * Leave the request. Without resetting the chip, it's
244 		 * still owned by it and we'll just get into trouble
245 		 * freeing it now. Mark it as abandoned so that if it
246 		 * shows up later it can be freed.
247 		 */
248 		mps_printf(sc, "read_cfg_header timed out\n");
249 		return (ETIMEDOUT);
250 	}
251 
252 	page_req->ioc_status = htole16(params.status);
253 	if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
254 	    MPI2_IOCSTATUS_SUCCESS) {
255 		bcopy(hdr, &page_req->header, sizeof(page_req->header));
256 	}
257 
258 	return (0);
259 }
260 
261 static int
262 mps_user_read_cfg_page(struct mps_softc *sc, struct mps_cfg_page_req *page_req,
263     void *buf)
264 {
265 	MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
266 	struct mps_config_params params;
267 	int	      error;
268 
269 	reqhdr = buf;
270 	hdr = &params.hdr.Struct;
271 	hdr->PageVersion = reqhdr->PageVersion;
272 	hdr->PageLength = reqhdr->PageLength;
273 	hdr->PageNumber = reqhdr->PageNumber;
274 	hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK;
275 	params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
276 	params.page_address = le32toh(page_req->page_address);
277 	params.buffer = buf;
278 	params.length = le32toh(page_req->len);
279 	params.callback = NULL;
280 
281 	if ((error = mps_read_config_page(sc, &params)) != 0) {
282 		mps_printf(sc, "mps_user_read_cfg_page timed out\n");
283 		return (ETIMEDOUT);
284 	}
285 
286 	page_req->ioc_status = htole16(params.status);
287 	return (0);
288 }
289 
290 static int
291 mps_user_read_extcfg_header(struct mps_softc *sc,
292     struct mps_ext_cfg_page_req *ext_page_req)
293 {
294 	MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
295 	struct mps_config_params params;
296 	int	    error;
297 
298 	hdr = &params.hdr.Ext;
299 	params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
300 	hdr->PageVersion = ext_page_req->header.PageVersion;
301 	hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
302 	hdr->ExtPageLength = 0;
303 	hdr->PageNumber = ext_page_req->header.PageNumber;
304 	hdr->ExtPageType = ext_page_req->header.ExtPageType;
305 	params.page_address = le32toh(ext_page_req->page_address);
306 	params.buffer = NULL;
307 	params.length = 0;
308 	params.callback = NULL;
309 
310 	if ((error = mps_read_config_page(sc, &params)) != 0) {
311 		/*
312 		 * Leave the request. Without resetting the chip, it's
313 		 * still owned by it and we'll just get into trouble
314 		 * freeing it now. Mark it as abandoned so that if it
315 		 * shows up later it can be freed.
316 		 */
317 		mps_printf(sc, "mps_user_read_extcfg_header timed out\n");
318 		return (ETIMEDOUT);
319 	}
320 
321 	ext_page_req->ioc_status = htole16(params.status);
322 	if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
323 	    MPI2_IOCSTATUS_SUCCESS) {
324 		ext_page_req->header.PageVersion = hdr->PageVersion;
325 		ext_page_req->header.PageNumber = hdr->PageNumber;
326 		ext_page_req->header.PageType = hdr->PageType;
327 		ext_page_req->header.ExtPageLength = hdr->ExtPageLength;
328 		ext_page_req->header.ExtPageType = hdr->ExtPageType;
329 	}
330 
331 	return (0);
332 }
333 
334 static int
335 mps_user_read_extcfg_page(struct mps_softc *sc,
336     struct mps_ext_cfg_page_req *ext_page_req, void *buf)
337 {
338 	MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr;
339 	struct mps_config_params params;
340 	int error;
341 
342 	reqhdr = buf;
343 	hdr = &params.hdr.Ext;
344 	params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
345 	params.page_address = le32toh(ext_page_req->page_address);
346 	hdr->PageVersion = reqhdr->PageVersion;
347 	hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
348 	hdr->PageNumber = reqhdr->PageNumber;
349 	hdr->ExtPageType = reqhdr->ExtPageType;
350 	hdr->ExtPageLength = reqhdr->ExtPageLength;
351 	params.buffer = buf;
352 	params.length = le32toh(ext_page_req->len);
353 	params.callback = NULL;
354 
355 	if ((error = mps_read_config_page(sc, &params)) != 0) {
356 		mps_printf(sc, "mps_user_read_extcfg_page timed out\n");
357 		return (ETIMEDOUT);
358 	}
359 
360 	ext_page_req->ioc_status = htole16(params.status);
361 	return (0);
362 }
363 
364 static int
365 mps_user_write_cfg_page(struct mps_softc *sc,
366     struct mps_cfg_page_req *page_req, void *buf)
367 {
368 	MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
369 	struct mps_config_params params;
370 	u_int	      hdr_attr;
371 	int	      error;
372 
373 	reqhdr = buf;
374 	hdr = &params.hdr.Struct;
375 	hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK;
376 	if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE &&
377 	    hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) {
378 		mps_printf(sc, "page type 0x%x not changeable\n",
379 			reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK);
380 		return (EINVAL);
381 	}
382 
383 	/*
384 	 * There isn't any point in restoring stripped out attributes
385 	 * if you then mask them going down to issue the request.
386 	 */
387 
388 	hdr->PageVersion = reqhdr->PageVersion;
389 	hdr->PageLength = reqhdr->PageLength;
390 	hdr->PageNumber = reqhdr->PageNumber;
391 	hdr->PageType = reqhdr->PageType;
392 	params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
393 	params.page_address = le32toh(page_req->page_address);
394 	params.buffer = buf;
395 	params.length = le32toh(page_req->len);
396 	params.callback = NULL;
397 
398 	if ((error = mps_write_config_page(sc, &params)) != 0) {
399 		mps_printf(sc, "mps_write_cfg_page timed out\n");
400 		return (ETIMEDOUT);
401 	}
402 
403 	page_req->ioc_status = htole16(params.status);
404 	return (0);
405 }
406 
407 void
408 mpi_init_sge(struct mps_command *cm, void *req, void *sge)
409 {
410 	int off, space;
411 
412 	space = (int)cm->cm_sc->reqframesz;
413 	off = (uintptr_t)sge - (uintptr_t)req;
414 
415 	KASSERT(off < space, ("bad pointers %p %p, off %d, space %d",
416             req, sge, off, space));
417 
418 	cm->cm_sge = sge;
419 	cm->cm_sglsize = space - off;
420 }
421 
422 /*
423  * Prepare the mps_command for an IOC_FACTS request.
424  */
425 static int
426 mpi_pre_ioc_facts(struct mps_command *cm, struct mps_usr_command *cmd)
427 {
428 	MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req;
429 	MPI2_IOC_FACTS_REPLY *rpl;
430 
431 	if (cmd->req_len != sizeof *req)
432 		return (EINVAL);
433 	if (cmd->rpl_len != sizeof *rpl)
434 		return (EINVAL);
435 
436 	cm->cm_sge = NULL;
437 	cm->cm_sglsize = 0;
438 	return (0);
439 }
440 
441 /*
442  * Prepare the mps_command for a PORT_FACTS request.
443  */
444 static int
445 mpi_pre_port_facts(struct mps_command *cm, struct mps_usr_command *cmd)
446 {
447 	MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req;
448 	MPI2_PORT_FACTS_REPLY *rpl;
449 
450 	if (cmd->req_len != sizeof *req)
451 		return (EINVAL);
452 	if (cmd->rpl_len != sizeof *rpl)
453 		return (EINVAL);
454 
455 	cm->cm_sge = NULL;
456 	cm->cm_sglsize = 0;
457 	return (0);
458 }
459 
460 /*
461  * Prepare the mps_command for a FW_DOWNLOAD request.
462  */
463 static int
464 mpi_pre_fw_download(struct mps_command *cm, struct mps_usr_command *cmd)
465 {
466 	MPI2_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req;
467 	MPI2_FW_DOWNLOAD_REPLY *rpl;
468 	MPI2_FW_DOWNLOAD_TCSGE tc;
469 	int error;
470 
471 	/*
472 	 * This code assumes there is room in the request's SGL for
473 	 * the TransactionContext plus at least a SGL chain element.
474 	 */
475 	CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE);
476 
477 	if (cmd->req_len != sizeof *req)
478 		return (EINVAL);
479 	if (cmd->rpl_len != sizeof *rpl)
480 		return (EINVAL);
481 
482 	if (cmd->len == 0)
483 		return (EINVAL);
484 
485 	error = copyin(cmd->buf, cm->cm_data, cmd->len);
486 	if (error != 0)
487 		return (error);
488 
489 	mpi_init_sge(cm, req, &req->SGL);
490 	bzero(&tc, sizeof tc);
491 
492 	/*
493 	 * For now, the F/W image must be provided in a single request.
494 	 */
495 	if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0)
496 		return (EINVAL);
497 	if (req->TotalImageSize != cmd->len)
498 		return (EINVAL);
499 
500 	/*
501 	 * The value of the first two elements is specified in the
502 	 * Fusion-MPT Message Passing Interface document.
503 	 */
504 	tc.ContextSize = 0;
505 	tc.DetailsLength = 12;
506 	tc.ImageOffset = 0;
507 	tc.ImageSize = cmd->len;
508 
509 	cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
510 
511 	return (mps_push_sge(cm, &tc, sizeof tc, 0));
512 }
513 
514 /*
515  * Prepare the mps_command for a FW_UPLOAD request.
516  */
517 static int
518 mpi_pre_fw_upload(struct mps_command *cm, struct mps_usr_command *cmd)
519 {
520 	MPI2_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req;
521 	MPI2_FW_UPLOAD_REPLY *rpl;
522 	MPI2_FW_UPLOAD_TCSGE tc;
523 
524 	/*
525 	 * This code assumes there is room in the request's SGL for
526 	 * the TransactionContext plus at least a SGL chain element.
527 	 */
528 	CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE);
529 
530 	if (cmd->req_len != sizeof *req)
531 		return (EINVAL);
532 	if (cmd->rpl_len != sizeof *rpl)
533 		return (EINVAL);
534 
535 	mpi_init_sge(cm, req, &req->SGL);
536 	bzero(&tc, sizeof tc);
537 
538 	/*
539 	 * The value of the first two elements is specified in the
540 	 * Fusion-MPT Message Passing Interface document.
541 	 */
542 	tc.ContextSize = 0;
543 	tc.DetailsLength = 12;
544 	/*
545 	 * XXX Is there any reason to fetch a partial image?  I.e. to
546 	 * set ImageOffset to something other than 0?
547 	 */
548 	tc.ImageOffset = 0;
549 	tc.ImageSize = cmd->len;
550 
551 	cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
552 
553 	return (mps_push_sge(cm, &tc, sizeof tc, 0));
554 }
555 
556 /*
557  * Prepare the mps_command for a SATA_PASSTHROUGH request.
558  */
559 static int
560 mpi_pre_sata_passthrough(struct mps_command *cm, struct mps_usr_command *cmd)
561 {
562 	MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
563 	MPI2_SATA_PASSTHROUGH_REPLY *rpl;
564 
565 	if (cmd->req_len != sizeof *req)
566 		return (EINVAL);
567 	if (cmd->rpl_len != sizeof *rpl)
568 		return (EINVAL);
569 
570 	mpi_init_sge(cm, req, &req->SGL);
571 	return (0);
572 }
573 
574 /*
575  * Prepare the mps_command for a SMP_PASSTHROUGH request.
576  */
577 static int
578 mpi_pre_smp_passthrough(struct mps_command *cm, struct mps_usr_command *cmd)
579 {
580 	MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
581 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
582 
583 	if (cmd->req_len != sizeof *req)
584 		return (EINVAL);
585 	if (cmd->rpl_len != sizeof *rpl)
586 		return (EINVAL);
587 
588 	mpi_init_sge(cm, req, &req->SGL);
589 	return (0);
590 }
591 
592 /*
593  * Prepare the mps_command for a CONFIG request.
594  */
595 static int
596 mpi_pre_config(struct mps_command *cm, struct mps_usr_command *cmd)
597 {
598 	MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req;
599 	MPI2_CONFIG_REPLY *rpl;
600 
601 	if (cmd->req_len != sizeof *req)
602 		return (EINVAL);
603 	if (cmd->rpl_len != sizeof *rpl)
604 		return (EINVAL);
605 
606 	mpi_init_sge(cm, req, &req->PageBufferSGE);
607 	return (0);
608 }
609 
610 /*
611  * Prepare the mps_command for a SAS_IO_UNIT_CONTROL request.
612  */
613 static int
614 mpi_pre_sas_io_unit_control(struct mps_command *cm,
615 			     struct mps_usr_command *cmd)
616 {
617 
618 	cm->cm_sge = NULL;
619 	cm->cm_sglsize = 0;
620 	return (0);
621 }
622 
623 /*
624  * A set of functions to prepare an mps_command for the various
625  * supported requests.
626  */
627 struct mps_user_func {
628 	U8		Function;
629 	mps_user_f	*f_pre;
630 } mps_user_func_list[] = {
631 	{ MPI2_FUNCTION_IOC_FACTS,		mpi_pre_ioc_facts },
632 	{ MPI2_FUNCTION_PORT_FACTS,		mpi_pre_port_facts },
633 	{ MPI2_FUNCTION_FW_DOWNLOAD, 		mpi_pre_fw_download },
634 	{ MPI2_FUNCTION_FW_UPLOAD,		mpi_pre_fw_upload },
635 	{ MPI2_FUNCTION_SATA_PASSTHROUGH,	mpi_pre_sata_passthrough },
636 	{ MPI2_FUNCTION_SMP_PASSTHROUGH,	mpi_pre_smp_passthrough},
637 	{ MPI2_FUNCTION_CONFIG,			mpi_pre_config},
638 	{ MPI2_FUNCTION_SAS_IO_UNIT_CONTROL,	mpi_pre_sas_io_unit_control },
639 	{ 0xFF,					NULL } /* list end */
640 };
641 
642 static int
643 mps_user_setup_request(struct mps_command *cm, struct mps_usr_command *cmd)
644 {
645 	MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
646 	struct mps_user_func *f;
647 
648 	for (f = mps_user_func_list; f->f_pre != NULL; f++) {
649 		if (hdr->Function == f->Function)
650 			return (f->f_pre(cm, cmd));
651 	}
652 	return (EINVAL);
653 }
654 
655 static int
656 mps_user_command(struct mps_softc *sc, struct mps_usr_command *cmd)
657 {
658 	MPI2_REQUEST_HEADER *hdr;
659 	MPI2_DEFAULT_REPLY *rpl;
660 	void *buf = NULL;
661 	struct mps_command *cm = NULL;
662 	int err = 0;
663 	int sz;
664 
665 	mps_lock(sc);
666 	cm = mps_alloc_command(sc);
667 
668 	if (cm == NULL) {
669 		mps_printf(sc, "%s: no mps requests\n", __func__);
670 		err = ENOMEM;
671 		goto RetFree;
672 	}
673 	mps_unlock(sc);
674 
675 	hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
676 
677 	mps_dprint(sc, MPS_USER, "%s: req %p %d  rpl %p %d\n", __func__,
678 	    cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len);
679 
680 	if (cmd->req_len > (int)sc->reqframesz) {
681 		err = EINVAL;
682 		goto RetFreeUnlocked;
683 	}
684 	err = copyin(cmd->req, hdr, cmd->req_len);
685 	if (err != 0)
686 		goto RetFreeUnlocked;
687 
688 	mps_dprint(sc, MPS_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
689 	    hdr->Function, hdr->MsgFlags);
690 
691 	if (cmd->len > 0) {
692 		buf = malloc(cmd->len, M_MPSUSER, M_WAITOK|M_ZERO);
693 		cm->cm_data = buf;
694 		cm->cm_length = cmd->len;
695 	} else {
696 		cm->cm_data = NULL;
697 		cm->cm_length = 0;
698 	}
699 
700 	cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE;
701 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
702 
703 	err = mps_user_setup_request(cm, cmd);
704 	if (err == EINVAL) {
705 		mps_printf(sc, "%s: unsupported parameter or unsupported "
706 		    "function in request (function = 0x%X)\n", __func__,
707 		    hdr->Function);
708 	}
709 	if (err != 0)
710 		goto RetFreeUnlocked;
711 
712 	mps_lock(sc);
713 	err = mps_wait_command(sc, &cm, 60, CAN_SLEEP);
714 
715 	if (err || (cm == NULL)) {
716 		mps_printf(sc, "%s: invalid request: error %d\n",
717 		    __func__, err);
718 		goto RetFree;
719 	}
720 
721 	rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
722 	if (rpl != NULL)
723 		sz = rpl->MsgLength * 4;
724 	else
725 		sz = 0;
726 
727 	if (sz > cmd->rpl_len) {
728 		mps_printf(sc, "%s: user reply buffer (%d) smaller than "
729 		    "returned buffer (%d)\n", __func__, cmd->rpl_len, sz);
730 		sz = cmd->rpl_len;
731 	}
732 
733 	mps_unlock(sc);
734 	copyout(rpl, cmd->rpl, sz);
735 	if (buf != NULL)
736 		copyout(buf, cmd->buf, cmd->len);
737 	mps_dprint(sc, MPS_USER, "%s: reply size %d\n", __func__, sz);
738 
739 RetFreeUnlocked:
740 	mps_lock(sc);
741 RetFree:
742 	if (cm != NULL)
743 		mps_free_command(sc, cm);
744 	mps_unlock(sc);
745 	if (buf != NULL)
746 		free(buf, M_MPSUSER);
747 	return (err);
748 }
749 
750 static int
751 mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data)
752 {
753 	MPI2_REQUEST_HEADER	*hdr, tmphdr;
754 	MPI2_DEFAULT_REPLY	*rpl = NULL;
755 	struct mps_command	*cm = NULL;
756 	int			err = 0, dir = 0, sz;
757 	uint8_t			function = 0;
758 	u_int			sense_len;
759 	struct mpssas_target	*targ = NULL;
760 
761 	/*
762 	 * Only allow one passthru command at a time.  Use the MPS_FLAGS_BUSY
763 	 * bit to denote that a passthru is being processed.
764 	 */
765 	mps_lock(sc);
766 	if (sc->mps_flags & MPS_FLAGS_BUSY) {
767 		mps_dprint(sc, MPS_USER, "%s: Only one passthru command "
768 		    "allowed at a single time.", __func__);
769 		mps_unlock(sc);
770 		return (EBUSY);
771 	}
772 	sc->mps_flags |= MPS_FLAGS_BUSY;
773 	mps_unlock(sc);
774 
775 	/*
776 	 * Do some validation on data direction.  Valid cases are:
777 	 *    1) DataSize is 0 and direction is NONE
778 	 *    2) DataSize is non-zero and one of:
779 	 *        a) direction is READ or
780 	 *        b) direction is WRITE or
781 	 *        c) direction is BOTH and DataOutSize is non-zero
782 	 * If valid and the direction is BOTH, change the direction to READ.
783 	 * if valid and the direction is not BOTH, make sure DataOutSize is 0.
784 	 */
785 	if (((data->DataSize == 0) &&
786 	    (data->DataDirection == MPS_PASS_THRU_DIRECTION_NONE)) ||
787 	    ((data->DataSize != 0) &&
788 	    ((data->DataDirection == MPS_PASS_THRU_DIRECTION_READ) ||
789 	    (data->DataDirection == MPS_PASS_THRU_DIRECTION_WRITE) ||
790 	    ((data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) &&
791 	    (data->DataOutSize != 0))))) {
792 		if (data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH)
793 			data->DataDirection = MPS_PASS_THRU_DIRECTION_READ;
794 		else
795 			data->DataOutSize = 0;
796 	} else {
797 		err = EINVAL;
798 		goto RetFreeUnlocked;
799 	}
800 
801 	mps_dprint(sc, MPS_USER, "%s: req 0x%jx %d  rpl 0x%jx %d "
802 	    "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__,
803 	    data->PtrRequest, data->RequestSize, data->PtrReply,
804 	    data->ReplySize, data->PtrData, data->DataSize,
805 	    data->PtrDataOut, data->DataOutSize, data->DataDirection);
806 
807 	/*
808 	 * copy in the header so we know what we're dealing with before we
809 	 * commit to allocating a command for it.
810 	 */
811 	err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize);
812 	if (err != 0)
813 		goto RetFreeUnlocked;
814 
815 	if (data->RequestSize > (int)sc->reqframesz) {
816 		err = EINVAL;
817 		goto RetFreeUnlocked;
818 	}
819 
820 	function = tmphdr.Function;
821 	mps_dprint(sc, MPS_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
822 	    function, tmphdr.MsgFlags);
823 
824 	/*
825 	 * Handle a passthru TM request.
826 	 */
827 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
828 		MPI2_SCSI_TASK_MANAGE_REQUEST	*task;
829 
830 		mps_lock(sc);
831 		cm = mpssas_alloc_tm(sc);
832 		if (cm == NULL) {
833 			err = EINVAL;
834 			goto Ret;
835 		}
836 
837 		/* Copy the header in.  Only a small fixup is needed. */
838 		task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
839 		bcopy(&tmphdr, task, data->RequestSize);
840 		task->TaskMID = cm->cm_desc.Default.SMID;
841 
842 		cm->cm_data = NULL;
843 		cm->cm_complete = NULL;
844 		cm->cm_complete_data = NULL;
845 
846 		targ = mpssas_find_target_by_handle(sc->sassc, 0,
847 		    task->DevHandle);
848 		if (targ == NULL) {
849 			mps_dprint(sc, MPS_INFO,
850 			   "%s %d : invalid handle for requested TM 0x%x \n",
851 			   __func__, __LINE__, task->DevHandle);
852 			err = 1;
853 		} else {
854 			mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
855 			err = mps_wait_command(sc, &cm, 30, CAN_SLEEP);
856 		}
857 
858 		if (err != 0) {
859 			err = EIO;
860 			mps_dprint(sc, MPS_FAULT, "%s: task management failed",
861 			    __func__);
862 		}
863 		/*
864 		 * Copy the reply data and sense data to user space.
865 		 */
866 		if ((cm != NULL) && (cm->cm_reply != NULL)) {
867 			rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
868 			sz = rpl->MsgLength * 4;
869 
870 			if (sz > data->ReplySize) {
871 				mps_printf(sc, "%s: user reply buffer (%d) "
872 				    "smaller than returned buffer (%d)\n",
873 				    __func__, data->ReplySize, sz);
874 			}
875 			mps_unlock(sc);
876 			copyout(cm->cm_reply, PTRIN(data->PtrReply),
877 			    data->ReplySize);
878 			mps_lock(sc);
879 		}
880 		mpssas_free_tm(sc, cm);
881 		goto Ret;
882 	}
883 
884 	mps_lock(sc);
885 	cm = mps_alloc_command(sc);
886 
887 	if (cm == NULL) {
888 		mps_printf(sc, "%s: no mps requests\n", __func__);
889 		err = ENOMEM;
890 		goto Ret;
891 	}
892 	mps_unlock(sc);
893 
894 	hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
895 	bcopy(&tmphdr, hdr, data->RequestSize);
896 
897 	/*
898 	 * Do some checking to make sure the IOCTL request contains a valid
899 	 * request.  Then set the SGL info.
900 	 */
901 	mpi_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize));
902 
903 	/*
904 	 * Set up for read, write or both.  From check above, DataOutSize will
905 	 * be 0 if direction is READ or WRITE, but it will have some non-zero
906 	 * value if the direction is BOTH.  So, just use the biggest size to get
907 	 * the cm_data buffer size.  If direction is BOTH, 2 SGLs need to be set
908 	 * up; the first is for the request and the second will contain the
909 	 * response data. cm_out_len needs to be set here and this will be used
910 	 * when the SGLs are set up.
911 	 */
912 	cm->cm_data = NULL;
913 	cm->cm_length = MAX(data->DataSize, data->DataOutSize);
914 	cm->cm_out_len = data->DataOutSize;
915 	cm->cm_flags = 0;
916 	if (cm->cm_length != 0) {
917 		cm->cm_data = malloc(cm->cm_length, M_MPSUSER, M_WAITOK |
918 		    M_ZERO);
919 		cm->cm_flags = MPS_CM_FLAGS_DATAIN;
920 		if (data->DataOutSize) {
921 			cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
922 			err = copyin(PTRIN(data->PtrDataOut),
923 			    cm->cm_data, data->DataOutSize);
924 		} else if (data->DataDirection ==
925 		    MPS_PASS_THRU_DIRECTION_WRITE) {
926 			cm->cm_flags = MPS_CM_FLAGS_DATAOUT;
927 			err = copyin(PTRIN(data->PtrData),
928 			    cm->cm_data, data->DataSize);
929 		}
930 		if (err != 0)
931 			mps_dprint(sc, MPS_FAULT, "%s: failed to copy "
932 			    "IOCTL data from user space\n", __func__);
933 	}
934 	cm->cm_flags |= MPS_CM_FLAGS_SGE_SIMPLE;
935 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
936 
937 	/*
938 	 * Set up Sense buffer and SGL offset for IO passthru.  SCSI IO request
939 	 * uses SCSI IO descriptor.
940 	 */
941 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
942 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
943 		MPI2_SCSI_IO_REQUEST	*scsi_io_req;
944 
945 		scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr;
946 		/*
947 		 * Put SGE for data and data_out buffer at the end of
948 		 * scsi_io_request message header (64 bytes in total).
949 		 * Following above SGEs, the residual space will be used by
950 		 * sense data.
951 		 */
952 		scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize -
953 		    64);
954 		scsi_io_req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
955 
956 		/*
957 		 * Set SGLOffset0 value.  This is the number of dwords that SGL
958 		 * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct.
959 		 */
960 		scsi_io_req->SGLOffset0 = 24;
961 
962 		/*
963 		 * Setup descriptor info.  RAID passthrough must use the
964 		 * default request descriptor which is already set, so if this
965 		 * is a SCSI IO request, change the descriptor to SCSI IO.
966 		 * Also, if this is a SCSI IO request, handle the reply in the
967 		 * mpssas_scsio_complete function.
968 		 */
969 		if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
970 			cm->cm_desc.SCSIIO.RequestFlags =
971 			    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
972 			cm->cm_desc.SCSIIO.DevHandle = scsi_io_req->DevHandle;
973 
974 			/*
975 			 * Make sure the DevHandle is not 0 because this is a
976 			 * likely error.
977 			 */
978 			if (scsi_io_req->DevHandle == 0) {
979 				err = EINVAL;
980 				goto RetFreeUnlocked;
981 			}
982 		}
983 	}
984 
985 	mps_lock(sc);
986 
987 	err = mps_wait_command(sc, &cm, 30, CAN_SLEEP);
988 
989 	if (err || (cm == NULL)) {
990 		mps_printf(sc, "%s: invalid request: error %d\n", __func__,
991 		    err);
992 		mps_unlock(sc);
993 		goto RetFreeUnlocked;
994 	}
995 
996 	/*
997 	 * Sync the DMA data, if any.  Then copy the data to user space.
998 	 */
999 	if (cm->cm_data != NULL) {
1000 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1001 			dir = BUS_DMASYNC_POSTREAD;
1002 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1003 			dir = BUS_DMASYNC_POSTWRITE;
1004 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1005 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1006 
1007 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
1008 			mps_unlock(sc);
1009 			err = copyout(cm->cm_data,
1010 			    PTRIN(data->PtrData), data->DataSize);
1011 			mps_lock(sc);
1012 			if (err != 0)
1013 				mps_dprint(sc, MPS_FAULT, "%s: failed to copy "
1014 				    "IOCTL data to user space\n", __func__);
1015 		}
1016 	}
1017 
1018 	/*
1019 	 * Copy the reply data and sense data to user space.
1020 	 */
1021 	if (cm->cm_reply != NULL) {
1022 		rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
1023 		sz = rpl->MsgLength * 4;
1024 
1025 		if (sz > data->ReplySize) {
1026 			mps_printf(sc, "%s: user reply buffer (%d) smaller "
1027 			    "than returned buffer (%d)\n", __func__,
1028 			    data->ReplySize, sz);
1029 		}
1030 		mps_unlock(sc);
1031 		copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize);
1032 		mps_lock(sc);
1033 
1034 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
1035 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
1036 			if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState &
1037 			    MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1038 				sense_len =
1039 				    MIN((le32toh(((MPI2_SCSI_IO_REPLY *)rpl)->
1040 				    SenseCount)), sizeof(struct
1041 				    scsi_sense_data));
1042 				mps_unlock(sc);
1043 				copyout(cm->cm_sense, (PTRIN(data->PtrReply +
1044 				    sizeof(MPI2_SCSI_IO_REPLY))), sense_len);
1045 				mps_lock(sc);
1046 			}
1047 		}
1048 	}
1049 	mps_unlock(sc);
1050 
1051 RetFreeUnlocked:
1052 	mps_lock(sc);
1053 
1054 	if (cm != NULL) {
1055 		if (cm->cm_data)
1056 			free(cm->cm_data, M_MPSUSER);
1057 		mps_free_command(sc, cm);
1058 	}
1059 Ret:
1060 	sc->mps_flags &= ~MPS_FLAGS_BUSY;
1061 	mps_unlock(sc);
1062 
1063 	return (err);
1064 }
1065 
1066 static void
1067 mps_user_get_adapter_data(struct mps_softc *sc, mps_adapter_data_t *data)
1068 {
1069 	Mpi2ConfigReply_t	mpi_reply;
1070 	Mpi2BiosPage3_t		config_page;
1071 
1072 	/*
1073 	 * Use the PCI interface functions to get the Bus, Device, and Function
1074 	 * information.
1075 	 */
1076 	data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mps_dev);
1077 	data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mps_dev);
1078 	data->PciInformation.u.bits.FunctionNumber =
1079 	    pci_get_function(sc->mps_dev);
1080 
1081 	/*
1082 	 * Get the FW version that should already be saved in IOC Facts.
1083 	 */
1084 	data->MpiFirmwareVersion = sc->facts->FWVersion.Word;
1085 
1086 	/*
1087 	 * General device info.
1088 	 */
1089 	data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2;
1090 	if (sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
1091 		data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2_SSS6200;
1092 	data->PCIDeviceHwId = pci_get_device(sc->mps_dev);
1093 	data->PCIDeviceHwRev = pci_read_config(sc->mps_dev, PCIR_REVID, 1);
1094 	data->SubSystemId = pci_get_subdevice(sc->mps_dev);
1095 	data->SubsystemVendorId = pci_get_subvendor(sc->mps_dev);
1096 
1097 	/*
1098 	 * Get the driver version.
1099 	 */
1100 	strcpy((char *)&data->DriverVersion[0], MPS_DRIVER_VERSION);
1101 
1102 	/*
1103 	 * Need to get BIOS Config Page 3 for the BIOS Version.
1104 	 */
1105 	data->BiosVersion = 0;
1106 	mps_lock(sc);
1107 	if (mps_config_get_bios_pg3(sc, &mpi_reply, &config_page))
1108 		printf("%s: Error while retrieving BIOS Version\n", __func__);
1109 	else
1110 		data->BiosVersion = config_page.BiosVersion;
1111 	mps_unlock(sc);
1112 }
1113 
1114 static void
1115 mps_user_read_pci_info(struct mps_softc *sc, mps_pci_info_t *data)
1116 {
1117 	int	i;
1118 
1119 	/*
1120 	 * Use the PCI interface functions to get the Bus, Device, and Function
1121 	 * information.
1122 	 */
1123 	data->BusNumber = pci_get_bus(sc->mps_dev);
1124 	data->DeviceNumber = pci_get_slot(sc->mps_dev);
1125 	data->FunctionNumber = pci_get_function(sc->mps_dev);
1126 
1127 	/*
1128 	 * Now get the interrupt vector and the pci header.  The vector can
1129 	 * only be 0 right now.  The header is the first 256 bytes of config
1130 	 * space.
1131 	 */
1132 	data->InterruptVector = 0;
1133 	for (i = 0; i < sizeof (data->PciHeader); i++) {
1134 		data->PciHeader[i] = pci_read_config(sc->mps_dev, i, 1);
1135 	}
1136 }
1137 
1138 static uint8_t
1139 mps_get_fw_diag_buffer_number(struct mps_softc *sc, uint32_t unique_id)
1140 {
1141 	uint8_t	index;
1142 
1143 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
1144 		if (sc->fw_diag_buffer_list[index].unique_id == unique_id) {
1145 			return (index);
1146 		}
1147 	}
1148 
1149 	return (MPS_FW_DIAGNOSTIC_UID_NOT_FOUND);
1150 }
1151 
1152 static int
1153 mps_post_fw_diag_buffer(struct mps_softc *sc,
1154     mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
1155 {
1156 	MPI2_DIAG_BUFFER_POST_REQUEST	*req;
1157 	MPI2_DIAG_BUFFER_POST_REPLY	*reply = NULL;
1158 	struct mps_command		*cm = NULL;
1159 	int				i, status;
1160 
1161 	/*
1162 	 * If buffer is not enabled, just leave.
1163 	 */
1164 	*return_code = MPS_FW_DIAG_ERROR_POST_FAILED;
1165 	if (!pBuffer->enabled) {
1166 		return (MPS_DIAG_FAILURE);
1167 	}
1168 
1169 	/*
1170 	 * Clear some flags initially.
1171 	 */
1172 	pBuffer->force_release = FALSE;
1173 	pBuffer->valid_data = FALSE;
1174 	pBuffer->owned_by_firmware = FALSE;
1175 
1176 	/*
1177 	 * Get a command.
1178 	 */
1179 	cm = mps_alloc_command(sc);
1180 	if (cm == NULL) {
1181 		mps_printf(sc, "%s: no mps requests\n", __func__);
1182 		return (MPS_DIAG_FAILURE);
1183 	}
1184 
1185 	/*
1186 	 * Build the request for releasing the FW Diag Buffer and send it.
1187 	 */
1188 	req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req;
1189 	req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1190 	req->BufferType = pBuffer->buffer_type;
1191 	req->ExtendedType = pBuffer->extended_type;
1192 	req->BufferLength = pBuffer->size;
1193 	for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++)
1194 		req->ProductSpecific[i] = pBuffer->product_specific[i];
1195 	mps_from_u64(sc->fw_diag_busaddr, &req->BufferAddress);
1196 	cm->cm_data = NULL;
1197 	cm->cm_length = 0;
1198 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1199 	cm->cm_complete_data = NULL;
1200 
1201 	/*
1202 	 * Send command synchronously.
1203 	 */
1204 	status = mps_wait_command(sc, &cm, 30, CAN_SLEEP);
1205 	if (status || (cm == NULL)) {
1206 		mps_printf(sc, "%s: invalid request: error %d\n", __func__,
1207 		    status);
1208 		status = MPS_DIAG_FAILURE;
1209 		goto done;
1210 	}
1211 
1212 	/*
1213 	 * Process POST reply.
1214 	 */
1215 	reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply;
1216 	if (reply == NULL) {
1217 		mps_printf(sc, "%s: reply is NULL, probably due to "
1218 		    "reinitialization\n", __func__);
1219 		status = MPS_DIAG_FAILURE;
1220 		goto done;
1221 	}
1222 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
1223 	    MPI2_IOCSTATUS_SUCCESS) {
1224 		status = MPS_DIAG_FAILURE;
1225 		mps_dprint(sc, MPS_FAULT, "%s: post of FW  Diag Buffer failed "
1226 		    "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and "
1227 		    "TransferLength = 0x%x\n", __func__,
1228 		    le16toh(reply->IOCStatus), le32toh(reply->IOCLogInfo),
1229 		    le32toh(reply->TransferLength));
1230 		goto done;
1231 	}
1232 
1233 	/*
1234 	 * Post was successful.
1235 	 */
1236 	pBuffer->valid_data = TRUE;
1237 	pBuffer->owned_by_firmware = TRUE;
1238 	*return_code = MPS_FW_DIAG_ERROR_SUCCESS;
1239 	status = MPS_DIAG_SUCCESS;
1240 
1241 done:
1242 	if (cm != NULL)
1243 		mps_free_command(sc, cm);
1244 	return (status);
1245 }
1246 
1247 static int
1248 mps_release_fw_diag_buffer(struct mps_softc *sc,
1249     mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
1250     uint32_t diag_type)
1251 {
1252 	MPI2_DIAG_RELEASE_REQUEST	*req;
1253 	MPI2_DIAG_RELEASE_REPLY		*reply = NULL;
1254 	struct mps_command		*cm = NULL;
1255 	int				status;
1256 
1257 	/*
1258 	 * If buffer is not enabled, just leave.
1259 	 */
1260 	*return_code = MPS_FW_DIAG_ERROR_RELEASE_FAILED;
1261 	if (!pBuffer->enabled) {
1262 		mps_dprint(sc, MPS_USER, "%s: This buffer type is not "
1263 		    "supported by the IOC", __func__);
1264 		return (MPS_DIAG_FAILURE);
1265 	}
1266 
1267 	/*
1268 	 * Clear some flags initially.
1269 	 */
1270 	pBuffer->force_release = FALSE;
1271 	pBuffer->valid_data = FALSE;
1272 	pBuffer->owned_by_firmware = FALSE;
1273 
1274 	/*
1275 	 * Get a command.
1276 	 */
1277 	cm = mps_alloc_command(sc);
1278 	if (cm == NULL) {
1279 		mps_printf(sc, "%s: no mps requests\n", __func__);
1280 		return (MPS_DIAG_FAILURE);
1281 	}
1282 
1283 	/*
1284 	 * Build the request for releasing the FW Diag Buffer and send it.
1285 	 */
1286 	req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req;
1287 	req->Function = MPI2_FUNCTION_DIAG_RELEASE;
1288 	req->BufferType = pBuffer->buffer_type;
1289 	cm->cm_data = NULL;
1290 	cm->cm_length = 0;
1291 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1292 	cm->cm_complete_data = NULL;
1293 
1294 	/*
1295 	 * Send command synchronously.
1296 	 */
1297 	status = mps_wait_command(sc, &cm, 30, CAN_SLEEP);
1298 	if (status || (cm == NULL)) {
1299 		mps_printf(sc, "%s: invalid request: error %d\n", __func__,
1300 		    status);
1301 		status = MPS_DIAG_FAILURE;
1302 		goto done;
1303 	}
1304 
1305 	/*
1306 	 * Process RELEASE reply.
1307 	 */
1308 	reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply;
1309 	if (reply == NULL) {
1310 		mps_printf(sc, "%s: reply is NULL, probably due to "
1311 		    "reinitialization\n", __func__);
1312 		status = MPS_DIAG_FAILURE;
1313 		goto done;
1314 	}
1315 	if (((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
1316 	    MPI2_IOCSTATUS_SUCCESS) || pBuffer->owned_by_firmware) {
1317 		status = MPS_DIAG_FAILURE;
1318 		mps_dprint(sc, MPS_FAULT, "%s: release of FW Diag Buffer "
1319 		    "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n",
1320 		    __func__, le16toh(reply->IOCStatus),
1321 		    le32toh(reply->IOCLogInfo));
1322 		goto done;
1323 	}
1324 
1325 	/*
1326 	 * Release was successful.
1327 	 */
1328 	*return_code = MPS_FW_DIAG_ERROR_SUCCESS;
1329 	status = MPS_DIAG_SUCCESS;
1330 
1331 	/*
1332 	 * If this was for an UNREGISTER diag type command, clear the unique ID.
1333 	 */
1334 	if (diag_type == MPS_FW_DIAG_TYPE_UNREGISTER) {
1335 		pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID;
1336 	}
1337 
1338 done:
1339 	if (cm != NULL)
1340 		mps_free_command(sc, cm);
1341 
1342 	return (status);
1343 }
1344 
1345 static int
1346 mps_diag_register(struct mps_softc *sc, mps_fw_diag_register_t *diag_register,
1347     uint32_t *return_code)
1348 {
1349 	mps_fw_diagnostic_buffer_t	*pBuffer;
1350 	struct mps_busdma_context	*ctx;
1351 	uint8_t				extended_type, buffer_type, i;
1352 	uint32_t			buffer_size;
1353 	uint32_t			unique_id;
1354 	int				status;
1355 	int				error;
1356 
1357 	extended_type = diag_register->ExtendedType;
1358 	buffer_type = diag_register->BufferType;
1359 	buffer_size = diag_register->RequestedBufferSize;
1360 	unique_id = diag_register->UniqueId;
1361 	ctx = NULL;
1362 	error = 0;
1363 
1364 	/*
1365 	 * Check for valid buffer type
1366 	 */
1367 	if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
1368 		*return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1369 		return (MPS_DIAG_FAILURE);
1370 	}
1371 
1372 	/*
1373 	 * Get the current buffer and look up the unique ID.  The unique ID
1374 	 * should not be found.  If it is, the ID is already in use.
1375 	 */
1376 	i = mps_get_fw_diag_buffer_number(sc, unique_id);
1377 	pBuffer = &sc->fw_diag_buffer_list[buffer_type];
1378 	if (i != MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1379 		*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1380 		return (MPS_DIAG_FAILURE);
1381 	}
1382 
1383 	/*
1384 	 * The buffer's unique ID should not be registered yet, and the given
1385 	 * unique ID cannot be 0.
1386 	 */
1387 	if ((pBuffer->unique_id != MPS_FW_DIAG_INVALID_UID) ||
1388 	    (unique_id == MPS_FW_DIAG_INVALID_UID)) {
1389 		*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1390 		return (MPS_DIAG_FAILURE);
1391 	}
1392 
1393 	/*
1394 	 * If this buffer is already posted as immediate, just change owner.
1395 	 */
1396 	if (pBuffer->immediate && pBuffer->owned_by_firmware &&
1397 	    (pBuffer->unique_id == MPS_FW_DIAG_INVALID_UID)) {
1398 		pBuffer->immediate = FALSE;
1399 		pBuffer->unique_id = unique_id;
1400 		return (MPS_DIAG_SUCCESS);
1401 	}
1402 
1403 	/*
1404 	 * Post a new buffer after checking if it's enabled.  The DMA buffer
1405 	 * that is allocated will be contiguous (nsegments = 1).
1406 	 */
1407 	if (!pBuffer->enabled) {
1408 		*return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
1409 		return (MPS_DIAG_FAILURE);
1410 	}
1411 	if (bus_dma_tag_create( sc->mps_parent_dmat,    /* parent */
1412 				1, 0,			/* algnmnt, boundary */
1413 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1414 				BUS_SPACE_MAXADDR,	/* highaddr */
1415 				NULL, NULL,		/* filter, filterarg */
1416                                 buffer_size,		/* maxsize */
1417                                 1,			/* nsegments */
1418                                 buffer_size,		/* maxsegsize */
1419                                 0,			/* flags */
1420                                 NULL, NULL,		/* lockfunc, lockarg */
1421                                 &sc->fw_diag_dmat)) {
1422 		mps_dprint(sc, MPS_ERROR,
1423 		    "Cannot allocate FW diag buffer DMA tag\n");
1424 		*return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
1425 		status = MPS_DIAG_FAILURE;
1426 		goto bailout;
1427 	}
1428 	if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer,
1429 	    BUS_DMA_NOWAIT, &sc->fw_diag_map)) {
1430 		mps_dprint(sc, MPS_ERROR,
1431 		    "Cannot allocate FW diag buffer memory\n");
1432 		*return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
1433 		status = MPS_DIAG_FAILURE;
1434 		goto bailout;
1435         }
1436         bzero(sc->fw_diag_buffer, buffer_size);
1437 
1438 	ctx = malloc(sizeof(*ctx), M_MPSUSER, M_WAITOK | M_ZERO);
1439 	ctx->addr = &sc->fw_diag_busaddr;
1440 	ctx->buffer_dmat = sc->fw_diag_dmat;
1441 	ctx->buffer_dmamap = sc->fw_diag_map;
1442 	ctx->softc = sc;
1443         error = bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map,
1444 	    sc->fw_diag_buffer, buffer_size, mps_memaddr_wait_cb,
1445 	    ctx, 0);
1446 
1447 	if (error == EINPROGRESS) {
1448 		/* XXX KDM */
1449 		device_printf(sc->mps_dev, "%s: Deferred bus_dmamap_load\n",
1450 		    __func__);
1451 		/*
1452 		 * Wait for the load to complete.  If we're interrupted,
1453 		 * bail out.
1454 		 */
1455 		mps_lock(sc);
1456 		if (ctx->completed == 0) {
1457 			error = msleep(ctx, &sc->mps_mtx, PCATCH, "mpswait", 0);
1458 			if (error != 0) {
1459 				/*
1460 				 * We got an error from msleep(9).  This is
1461 				 * most likely due to a signal.  Tell
1462 				 * mpr_memaddr_wait_cb() that we've abandoned
1463 				 * the context, so it needs to clean up when
1464 				 * it is called.
1465 				 */
1466 				ctx->abandoned = 1;
1467 
1468 				/* The callback will free this memory */
1469 				ctx = NULL;
1470 				mps_unlock(sc);
1471 
1472 				device_printf(sc->mps_dev, "Cannot "
1473 				    "bus_dmamap_load FW diag buffer, error = "
1474 				    "%d returned from msleep\n", error);
1475 				*return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
1476 				status = MPS_DIAG_FAILURE;
1477 				goto bailout;
1478 			}
1479 		}
1480 		mps_unlock(sc);
1481 	}
1482 
1483 	if ((error != 0) || (ctx->error != 0)) {
1484 		device_printf(sc->mps_dev, "Cannot bus_dmamap_load FW diag "
1485 		    "buffer, %serror = %d\n", error ? "" : "callback ",
1486 		    error ? error : ctx->error);
1487 		*return_code = MPS_FW_DIAG_ERROR_NO_BUFFER;
1488 		status = MPS_DIAG_FAILURE;
1489 		goto bailout;
1490 	}
1491 
1492 	bus_dmamap_sync(sc->fw_diag_dmat, sc->fw_diag_map, BUS_DMASYNC_PREREAD);
1493 
1494 	pBuffer->size = buffer_size;
1495 
1496 	/*
1497 	 * Copy the given info to the diag buffer and post the buffer.
1498 	 */
1499 	pBuffer->buffer_type = buffer_type;
1500 	pBuffer->immediate = FALSE;
1501 	if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
1502 		for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
1503 		    i++) {
1504 			pBuffer->product_specific[i] =
1505 			    diag_register->ProductSpecific[i];
1506 		}
1507 	}
1508 	pBuffer->extended_type = extended_type;
1509 	pBuffer->unique_id = unique_id;
1510 	status = mps_post_fw_diag_buffer(sc, pBuffer, return_code);
1511 
1512 bailout:
1513 	/*
1514 	 * In case there was a failure, free the DMA buffer.
1515 	 */
1516 	if (status == MPS_DIAG_FAILURE) {
1517 		if (sc->fw_diag_busaddr != 0) {
1518 			bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
1519 			sc->fw_diag_busaddr = 0;
1520 		}
1521 		if (sc->fw_diag_buffer != NULL) {
1522 			bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
1523 			    sc->fw_diag_map);
1524 			sc->fw_diag_buffer = NULL;
1525 		}
1526 		if (sc->fw_diag_dmat != NULL) {
1527 			bus_dma_tag_destroy(sc->fw_diag_dmat);
1528 			sc->fw_diag_dmat = NULL;
1529 		}
1530 	}
1531 
1532 	if (ctx != NULL)
1533 		free(ctx, M_MPSUSER);
1534 
1535 	return (status);
1536 }
1537 
1538 static int
1539 mps_diag_unregister(struct mps_softc *sc,
1540     mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
1541 {
1542 	mps_fw_diagnostic_buffer_t	*pBuffer;
1543 	uint8_t				i;
1544 	uint32_t			unique_id;
1545 	int				status;
1546 
1547 	unique_id = diag_unregister->UniqueId;
1548 
1549 	/*
1550 	 * Get the current buffer and look up the unique ID.  The unique ID
1551 	 * should be there.
1552 	 */
1553 	i = mps_get_fw_diag_buffer_number(sc, unique_id);
1554 	if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1555 		*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1556 		return (MPS_DIAG_FAILURE);
1557 	}
1558 
1559 	pBuffer = &sc->fw_diag_buffer_list[i];
1560 
1561 	/*
1562 	 * Try to release the buffer from FW before freeing it.  If release
1563 	 * fails, don't free the DMA buffer in case FW tries to access it
1564 	 * later.  If buffer is not owned by firmware, can't release it.
1565 	 */
1566 	if (!pBuffer->owned_by_firmware) {
1567 		status = MPS_DIAG_SUCCESS;
1568 	} else {
1569 		status = mps_release_fw_diag_buffer(sc, pBuffer, return_code,
1570 		    MPS_FW_DIAG_TYPE_UNREGISTER);
1571 	}
1572 
1573 	/*
1574 	 * At this point, return the current status no matter what happens with
1575 	 * the DMA buffer.
1576 	 */
1577 	pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID;
1578 	if (status == MPS_DIAG_SUCCESS) {
1579 		if (sc->fw_diag_busaddr != 0) {
1580 			bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
1581 			sc->fw_diag_busaddr = 0;
1582 		}
1583 		if (sc->fw_diag_buffer != NULL) {
1584 			bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
1585 			    sc->fw_diag_map);
1586 			sc->fw_diag_buffer = NULL;
1587 		}
1588 		if (sc->fw_diag_dmat != NULL) {
1589 			bus_dma_tag_destroy(sc->fw_diag_dmat);
1590 			sc->fw_diag_dmat = NULL;
1591 		}
1592 	}
1593 
1594 	return (status);
1595 }
1596 
1597 static int
1598 mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query,
1599     uint32_t *return_code)
1600 {
1601 	mps_fw_diagnostic_buffer_t	*pBuffer;
1602 	uint8_t				i;
1603 	uint32_t			unique_id;
1604 
1605 	unique_id = diag_query->UniqueId;
1606 
1607 	/*
1608 	 * If ID is valid, query on ID.
1609 	 * If ID is invalid, query on buffer type.
1610 	 */
1611 	if (unique_id == MPS_FW_DIAG_INVALID_UID) {
1612 		i = diag_query->BufferType;
1613 		if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
1614 			*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1615 			return (MPS_DIAG_FAILURE);
1616 		}
1617 	} else {
1618 		i = mps_get_fw_diag_buffer_number(sc, unique_id);
1619 		if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1620 			*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1621 			return (MPS_DIAG_FAILURE);
1622 		}
1623 	}
1624 
1625 	/*
1626 	 * Fill query structure with the diag buffer info.
1627 	 */
1628 	pBuffer = &sc->fw_diag_buffer_list[i];
1629 	diag_query->BufferType = pBuffer->buffer_type;
1630 	diag_query->ExtendedType = pBuffer->extended_type;
1631 	if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
1632 		for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4);
1633 		    i++) {
1634 			diag_query->ProductSpecific[i] =
1635 			    pBuffer->product_specific[i];
1636 		}
1637 	}
1638 	diag_query->TotalBufferSize = pBuffer->size;
1639 	diag_query->DriverAddedBufferSize = 0;
1640 	diag_query->UniqueId = pBuffer->unique_id;
1641 	diag_query->ApplicationFlags = 0;
1642 	diag_query->DiagnosticFlags = 0;
1643 
1644 	/*
1645 	 * Set/Clear application flags
1646 	 */
1647 	if (pBuffer->immediate) {
1648 		diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_APP_OWNED;
1649 	} else {
1650 		diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_APP_OWNED;
1651 	}
1652 	if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
1653 		diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_BUFFER_VALID;
1654 	} else {
1655 		diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_BUFFER_VALID;
1656 	}
1657 	if (pBuffer->owned_by_firmware) {
1658 		diag_query->ApplicationFlags |=
1659 		    MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
1660 	} else {
1661 		diag_query->ApplicationFlags &=
1662 		    ~MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
1663 	}
1664 
1665 	return (MPS_DIAG_SUCCESS);
1666 }
1667 
1668 static int
1669 mps_diag_read_buffer(struct mps_softc *sc,
1670     mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
1671     uint32_t *return_code)
1672 {
1673 	mps_fw_diagnostic_buffer_t	*pBuffer;
1674 	uint8_t				i, *pData;
1675 	uint32_t			unique_id;
1676 	int				status;
1677 
1678 	unique_id = diag_read_buffer->UniqueId;
1679 
1680 	/*
1681 	 * Get the current buffer and look up the unique ID.  The unique ID
1682 	 * should be there.
1683 	 */
1684 	i = mps_get_fw_diag_buffer_number(sc, unique_id);
1685 	if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1686 		*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1687 		return (MPS_DIAG_FAILURE);
1688 	}
1689 
1690 	pBuffer = &sc->fw_diag_buffer_list[i];
1691 
1692 	/*
1693 	 * Make sure requested read is within limits
1694 	 */
1695 	if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
1696 	    pBuffer->size) {
1697 		*return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1698 		return (MPS_DIAG_FAILURE);
1699 	}
1700 
1701 	/* Sync the DMA map before we copy to userland. */
1702 	bus_dmamap_sync(sc->fw_diag_dmat, sc->fw_diag_map,
1703 	    BUS_DMASYNC_POSTREAD);
1704 
1705 	/*
1706 	 * Copy the requested data from DMA to the diag_read_buffer.  The DMA
1707 	 * buffer that was allocated is one contiguous buffer.
1708 	 */
1709 	pData = (uint8_t *)(sc->fw_diag_buffer +
1710 	    diag_read_buffer->StartingOffset);
1711 	if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0)
1712 		return (MPS_DIAG_FAILURE);
1713 	diag_read_buffer->Status = 0;
1714 
1715 	/*
1716 	 * Set or clear the Force Release flag.
1717 	 */
1718 	if (pBuffer->force_release) {
1719 		diag_read_buffer->Flags |= MPS_FW_DIAG_FLAG_FORCE_RELEASE;
1720 	} else {
1721 		diag_read_buffer->Flags &= ~MPS_FW_DIAG_FLAG_FORCE_RELEASE;
1722 	}
1723 
1724 	/*
1725 	 * If buffer is to be reregistered, make sure it's not already owned by
1726 	 * firmware first.
1727 	 */
1728 	status = MPS_DIAG_SUCCESS;
1729 	if (!pBuffer->owned_by_firmware) {
1730 		if (diag_read_buffer->Flags & MPS_FW_DIAG_FLAG_REREGISTER) {
1731 			status = mps_post_fw_diag_buffer(sc, pBuffer,
1732 			    return_code);
1733 		}
1734 	}
1735 
1736 	return (status);
1737 }
1738 
1739 static int
1740 mps_diag_release(struct mps_softc *sc, mps_fw_diag_release_t *diag_release,
1741     uint32_t *return_code)
1742 {
1743 	mps_fw_diagnostic_buffer_t	*pBuffer;
1744 	uint8_t				i;
1745 	uint32_t			unique_id;
1746 	int				status;
1747 
1748 	unique_id = diag_release->UniqueId;
1749 
1750 	/*
1751 	 * Get the current buffer and look up the unique ID.  The unique ID
1752 	 * should be there.
1753 	 */
1754 	i = mps_get_fw_diag_buffer_number(sc, unique_id);
1755 	if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1756 		*return_code = MPS_FW_DIAG_ERROR_INVALID_UID;
1757 		return (MPS_DIAG_FAILURE);
1758 	}
1759 
1760 	pBuffer = &sc->fw_diag_buffer_list[i];
1761 
1762 	/*
1763 	 * If buffer is not owned by firmware, it's already been released.
1764 	 */
1765 	if (!pBuffer->owned_by_firmware) {
1766 		*return_code = MPS_FW_DIAG_ERROR_ALREADY_RELEASED;
1767 		return (MPS_DIAG_FAILURE);
1768 	}
1769 
1770 	/*
1771 	 * Release the buffer.
1772 	 */
1773 	status = mps_release_fw_diag_buffer(sc, pBuffer, return_code,
1774 	    MPS_FW_DIAG_TYPE_RELEASE);
1775 	return (status);
1776 }
1777 
1778 static int
1779 mps_do_diag_action(struct mps_softc *sc, uint32_t action, uint8_t *diag_action,
1780     uint32_t length, uint32_t *return_code)
1781 {
1782 	mps_fw_diag_register_t		diag_register;
1783 	mps_fw_diag_unregister_t	diag_unregister;
1784 	mps_fw_diag_query_t		diag_query;
1785 	mps_diag_read_buffer_t		diag_read_buffer;
1786 	mps_fw_diag_release_t		diag_release;
1787 	int				status = MPS_DIAG_SUCCESS;
1788 	uint32_t			original_return_code;
1789 
1790 	original_return_code = *return_code;
1791 	*return_code = MPS_FW_DIAG_ERROR_SUCCESS;
1792 
1793 	switch (action) {
1794 		case MPS_FW_DIAG_TYPE_REGISTER:
1795 			if (!length) {
1796 				*return_code =
1797 				    MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1798 				status = MPS_DIAG_FAILURE;
1799 				break;
1800 			}
1801 			if (copyin(diag_action, &diag_register,
1802 			    sizeof(diag_register)) != 0)
1803 				return (MPS_DIAG_FAILURE);
1804 			status = mps_diag_register(sc, &diag_register,
1805 			    return_code);
1806 			break;
1807 
1808 		case MPS_FW_DIAG_TYPE_UNREGISTER:
1809 			if (length < sizeof(diag_unregister)) {
1810 				*return_code =
1811 				    MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1812 				status = MPS_DIAG_FAILURE;
1813 				break;
1814 			}
1815 			if (copyin(diag_action, &diag_unregister,
1816 			    sizeof(diag_unregister)) != 0)
1817 				return (MPS_DIAG_FAILURE);
1818 			status = mps_diag_unregister(sc, &diag_unregister,
1819 			    return_code);
1820 			break;
1821 
1822 		case MPS_FW_DIAG_TYPE_QUERY:
1823 			if (length < sizeof (diag_query)) {
1824 				*return_code =
1825 				    MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1826 				status = MPS_DIAG_FAILURE;
1827 				break;
1828 			}
1829 			if (copyin(diag_action, &diag_query, sizeof(diag_query))
1830 			    != 0)
1831 				return (MPS_DIAG_FAILURE);
1832 			status = mps_diag_query(sc, &diag_query, return_code);
1833 			if (status == MPS_DIAG_SUCCESS)
1834 				if (copyout(&diag_query, diag_action,
1835 				    sizeof (diag_query)) != 0)
1836 					return (MPS_DIAG_FAILURE);
1837 			break;
1838 
1839 		case MPS_FW_DIAG_TYPE_READ_BUFFER:
1840 			if (copyin(diag_action, &diag_read_buffer,
1841 			    sizeof(diag_read_buffer)) != 0)
1842 				return (MPS_DIAG_FAILURE);
1843 			if (length < diag_read_buffer.BytesToRead) {
1844 				*return_code =
1845 				    MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1846 				status = MPS_DIAG_FAILURE;
1847 				break;
1848 			}
1849 			status = mps_diag_read_buffer(sc, &diag_read_buffer,
1850 			    PTRIN(diag_read_buffer.PtrDataBuffer),
1851 			    return_code);
1852 			if (status == MPS_DIAG_SUCCESS) {
1853 				if (copyout(&diag_read_buffer, diag_action,
1854 				    sizeof(diag_read_buffer) -
1855 				    sizeof(diag_read_buffer.PtrDataBuffer)) !=
1856 				    0)
1857 					return (MPS_DIAG_FAILURE);
1858 			}
1859 			break;
1860 
1861 		case MPS_FW_DIAG_TYPE_RELEASE:
1862 			if (length < sizeof(diag_release)) {
1863 				*return_code =
1864 				    MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1865 				status = MPS_DIAG_FAILURE;
1866 				break;
1867 			}
1868 			if (copyin(diag_action, &diag_release,
1869 			    sizeof(diag_release)) != 0)
1870 				return (MPS_DIAG_FAILURE);
1871 			status = mps_diag_release(sc, &diag_release,
1872 			    return_code);
1873 			break;
1874 
1875 		default:
1876 			*return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER;
1877 			status = MPS_DIAG_FAILURE;
1878 			break;
1879 	}
1880 
1881 	if ((status == MPS_DIAG_FAILURE) &&
1882 	    (original_return_code == MPS_FW_DIAG_NEW) &&
1883 	    (*return_code != MPS_FW_DIAG_ERROR_SUCCESS))
1884 		status = MPS_DIAG_SUCCESS;
1885 
1886 	return (status);
1887 }
1888 
1889 static int
1890 mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data)
1891 {
1892 	int			status;
1893 
1894 	/*
1895 	 * Only allow one diag action at one time.
1896 	 */
1897 	if (sc->mps_flags & MPS_FLAGS_BUSY) {
1898 		mps_dprint(sc, MPS_USER, "%s: Only one FW diag command "
1899 		    "allowed at a single time.", __func__);
1900 		return (EBUSY);
1901 	}
1902 	sc->mps_flags |= MPS_FLAGS_BUSY;
1903 
1904 	/*
1905 	 * Send diag action request
1906 	 */
1907 	if (data->Action == MPS_FW_DIAG_TYPE_REGISTER ||
1908 	    data->Action == MPS_FW_DIAG_TYPE_UNREGISTER ||
1909 	    data->Action == MPS_FW_DIAG_TYPE_QUERY ||
1910 	    data->Action == MPS_FW_DIAG_TYPE_READ_BUFFER ||
1911 	    data->Action == MPS_FW_DIAG_TYPE_RELEASE) {
1912 		status = mps_do_diag_action(sc, data->Action,
1913 		    PTRIN(data->PtrDiagAction), data->Length,
1914 		    &data->ReturnCode);
1915 	} else
1916 		status = EINVAL;
1917 
1918 	sc->mps_flags &= ~MPS_FLAGS_BUSY;
1919 	return (status);
1920 }
1921 
1922 /*
1923  * Copy the event recording mask and the event queue size out.  For
1924  * clarification, the event recording mask (events_to_record) is not the same
1925  * thing as the event mask (event_mask).  events_to_record has a bit set for
1926  * every event type that is to be recorded by the driver, and event_mask has a
1927  * bit cleared for every event that is allowed into the driver from the IOC.
1928  * They really have nothing to do with each other.
1929  */
1930 static void
1931 mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data)
1932 {
1933 	uint8_t	i;
1934 
1935 	mps_lock(sc);
1936 	data->Entries = MPS_EVENT_QUEUE_SIZE;
1937 
1938 	for (i = 0; i < 4; i++) {
1939 		data->Types[i] = sc->events_to_record[i];
1940 	}
1941 	mps_unlock(sc);
1942 }
1943 
1944 /*
1945  * Set the driver's event mask according to what's been given.  See
1946  * mps_user_event_query for explanation of the event recording mask and the IOC
1947  * event mask.  It's the app's responsibility to enable event logging by setting
1948  * the bits in events_to_record.  Initially, no events will be logged.
1949  */
1950 static void
1951 mps_user_event_enable(struct mps_softc *sc, mps_event_enable_t *data)
1952 {
1953 	uint8_t	i;
1954 
1955 	mps_lock(sc);
1956 	for (i = 0; i < 4; i++) {
1957 		sc->events_to_record[i] = data->Types[i];
1958 	}
1959 	mps_unlock(sc);
1960 }
1961 
1962 /*
1963  * Copy out the events that have been recorded, up to the max events allowed.
1964  */
1965 static int
1966 mps_user_event_report(struct mps_softc *sc, mps_event_report_t *data)
1967 {
1968 	int		status = 0;
1969 	uint32_t	size;
1970 
1971 	mps_lock(sc);
1972 	size = data->Size;
1973 	if ((size >= sizeof(sc->recorded_events)) && (status == 0)) {
1974 		mps_unlock(sc);
1975 		if (copyout((void *)sc->recorded_events,
1976 		    PTRIN(data->PtrEvents), size) != 0)
1977 			status = EFAULT;
1978 		mps_lock(sc);
1979 	} else {
1980 		/*
1981 		 * data->Size value is not large enough to copy event data.
1982 		 */
1983 		status = EFAULT;
1984 	}
1985 
1986 	/*
1987 	 * Change size value to match the number of bytes that were copied.
1988 	 */
1989 	if (status == 0)
1990 		data->Size = sizeof(sc->recorded_events);
1991 	mps_unlock(sc);
1992 
1993 	return (status);
1994 }
1995 
1996 /*
1997  * Record events into the driver from the IOC if they are not masked.
1998  */
1999 void
2000 mpssas_record_event(struct mps_softc *sc,
2001     MPI2_EVENT_NOTIFICATION_REPLY *event_reply)
2002 {
2003 	uint32_t	event;
2004 	int		i, j;
2005 	uint16_t	event_data_len;
2006 	boolean_t	sendAEN = FALSE;
2007 
2008 	event = event_reply->Event;
2009 
2010 	/*
2011 	 * Generate a system event to let anyone who cares know that a
2012 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
2013 	 * event mask is set to.
2014 	 */
2015 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
2016 		sendAEN = TRUE;
2017 	}
2018 
2019 	/*
2020 	 * Record the event only if its corresponding bit is set in
2021 	 * events_to_record.  event_index is the index into recorded_events and
2022 	 * event_number is the overall number of an event being recorded since
2023 	 * start-of-day.  event_index will roll over; event_number will never
2024 	 * roll over.
2025 	 */
2026 	i = (uint8_t)(event / 32);
2027 	j = (uint8_t)(event % 32);
2028 	if ((i < 4) && ((1 << j) & sc->events_to_record[i])) {
2029 		i = sc->event_index;
2030 		sc->recorded_events[i].Type = event;
2031 		sc->recorded_events[i].Number = ++sc->event_number;
2032 		bzero(sc->recorded_events[i].Data, MPS_MAX_EVENT_DATA_LENGTH *
2033 		    4);
2034 		event_data_len = event_reply->EventDataLength;
2035 
2036 		if (event_data_len > 0) {
2037 			/*
2038 			 * Limit data to size in m_event entry
2039 			 */
2040 			if (event_data_len > MPS_MAX_EVENT_DATA_LENGTH) {
2041 				event_data_len = MPS_MAX_EVENT_DATA_LENGTH;
2042 			}
2043 			for (j = 0; j < event_data_len; j++) {
2044 				sc->recorded_events[i].Data[j] =
2045 				    event_reply->EventData[j];
2046 			}
2047 
2048 			/*
2049 			 * check for index wrap-around
2050 			 */
2051 			if (++i == MPS_EVENT_QUEUE_SIZE) {
2052 				i = 0;
2053 			}
2054 			sc->event_index = (uint8_t)i;
2055 
2056 			/*
2057 			 * Set flag to send the event.
2058 			 */
2059 			sendAEN = TRUE;
2060 		}
2061 	}
2062 
2063 	/*
2064 	 * Generate a system event if flag is set to let anyone who cares know
2065 	 * that an event has occurred.
2066 	 */
2067 	if (sendAEN) {
2068 //SLM-how to send a system event (see kqueue, kevent)
2069 //		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
2070 //		    "SAS", NULL, NULL, DDI_NOSLEEP);
2071 	}
2072 }
2073 
2074 static int
2075 mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data)
2076 {
2077 	int	status = 0;
2078 
2079 	switch (data->Command) {
2080 		/*
2081 		 * IO access is not supported.
2082 		 */
2083 		case REG_IO_READ:
2084 		case REG_IO_WRITE:
2085 			mps_dprint(sc, MPS_USER, "IO access is not supported. "
2086 			    "Use memory access.");
2087 			status = EINVAL;
2088 			break;
2089 
2090 		case REG_MEM_READ:
2091 			data->RegData = mps_regread(sc, data->RegOffset);
2092 			break;
2093 
2094 		case REG_MEM_WRITE:
2095 			mps_regwrite(sc, data->RegOffset, data->RegData);
2096 			break;
2097 
2098 		default:
2099 			status = EINVAL;
2100 			break;
2101 	}
2102 
2103 	return (status);
2104 }
2105 
2106 static int
2107 mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data)
2108 {
2109 	uint8_t		bt2dh = FALSE;
2110 	uint8_t		dh2bt = FALSE;
2111 	uint16_t	dev_handle, bus, target;
2112 
2113 	bus = data->Bus;
2114 	target = data->TargetID;
2115 	dev_handle = data->DevHandle;
2116 
2117 	/*
2118 	 * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/
2119 	 * Target to get DevHandle.  When Bus/Target are 0xFFFF and DevHandle is
2120 	 * not 0xFFFF, use DevHandle to get Bus/Target.  Anything else is
2121 	 * invalid.
2122 	 */
2123 	if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF))
2124 		dh2bt = TRUE;
2125 	if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF))
2126 		bt2dh = TRUE;
2127 	if (!dh2bt && !bt2dh)
2128 		return (EINVAL);
2129 
2130 	/*
2131 	 * Only handle bus of 0.  Make sure target is within range.
2132 	 */
2133 	if (bt2dh) {
2134 		if (bus != 0)
2135 			return (EINVAL);
2136 
2137 		if (target > sc->max_devices) {
2138 			mps_dprint(sc, MPS_FAULT, "Target ID is out of range "
2139 			   "for Bus/Target to DevHandle mapping.");
2140 			return (EINVAL);
2141 		}
2142 		dev_handle = sc->mapping_table[target].dev_handle;
2143 		if (dev_handle)
2144 			data->DevHandle = dev_handle;
2145 	} else {
2146 		bus = 0;
2147 		target = mps_mapping_get_tid_from_handle(sc, dev_handle);
2148 		data->Bus = bus;
2149 		data->TargetID = target;
2150 	}
2151 
2152 	return (0);
2153 }
2154 
2155 static int
2156 mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
2157     struct thread *td)
2158 {
2159 	struct mps_softc *sc;
2160 	struct mps_cfg_page_req *page_req;
2161 	struct mps_ext_cfg_page_req *ext_page_req;
2162 	void *mps_page;
2163 	int error, msleep_ret;
2164 
2165 	mps_page = NULL;
2166 	sc = dev->si_drv1;
2167 	page_req = (void *)arg;
2168 	ext_page_req = (void *)arg;
2169 
2170 	switch (cmd) {
2171 	case MPSIO_READ_CFG_HEADER:
2172 		mps_lock(sc);
2173 		error = mps_user_read_cfg_header(sc, page_req);
2174 		mps_unlock(sc);
2175 		break;
2176 	case MPSIO_READ_CFG_PAGE:
2177 		mps_page = malloc(page_req->len, M_MPSUSER, M_WAITOK | M_ZERO);
2178 		error = copyin(page_req->buf, mps_page,
2179 		    sizeof(MPI2_CONFIG_PAGE_HEADER));
2180 		if (error)
2181 			break;
2182 		mps_lock(sc);
2183 		error = mps_user_read_cfg_page(sc, page_req, mps_page);
2184 		mps_unlock(sc);
2185 		if (error)
2186 			break;
2187 		error = copyout(mps_page, page_req->buf, page_req->len);
2188 		break;
2189 	case MPSIO_READ_EXT_CFG_HEADER:
2190 		mps_lock(sc);
2191 		error = mps_user_read_extcfg_header(sc, ext_page_req);
2192 		mps_unlock(sc);
2193 		break;
2194 	case MPSIO_READ_EXT_CFG_PAGE:
2195 		mps_page = malloc(ext_page_req->len, M_MPSUSER, M_WAITOK|M_ZERO);
2196 		error = copyin(ext_page_req->buf, mps_page,
2197 		    sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
2198 		if (error)
2199 			break;
2200 		mps_lock(sc);
2201 		error = mps_user_read_extcfg_page(sc, ext_page_req, mps_page);
2202 		mps_unlock(sc);
2203 		if (error)
2204 			break;
2205 		error = copyout(mps_page, ext_page_req->buf, ext_page_req->len);
2206 		break;
2207 	case MPSIO_WRITE_CFG_PAGE:
2208 		mps_page = malloc(page_req->len, M_MPSUSER, M_WAITOK|M_ZERO);
2209 		error = copyin(page_req->buf, mps_page, page_req->len);
2210 		if (error)
2211 			break;
2212 		mps_lock(sc);
2213 		error = mps_user_write_cfg_page(sc, page_req, mps_page);
2214 		mps_unlock(sc);
2215 		break;
2216 	case MPSIO_MPS_COMMAND:
2217 		error = mps_user_command(sc, (struct mps_usr_command *)arg);
2218 		break;
2219 	case MPTIOCTL_PASS_THRU:
2220 		/*
2221 		 * The user has requested to pass through a command to be
2222 		 * executed by the MPT firmware.  Call our routine which does
2223 		 * this.  Only allow one passthru IOCTL at one time.
2224 		 */
2225 		error = mps_user_pass_thru(sc, (mps_pass_thru_t *)arg);
2226 		break;
2227 	case MPTIOCTL_GET_ADAPTER_DATA:
2228 		/*
2229 		 * The user has requested to read adapter data.  Call our
2230 		 * routine which does this.
2231 		 */
2232 		error = 0;
2233 		mps_user_get_adapter_data(sc, (mps_adapter_data_t *)arg);
2234 		break;
2235 	case MPTIOCTL_GET_PCI_INFO:
2236 		/*
2237 		 * The user has requested to read pci info.  Call
2238 		 * our routine which does this.
2239 		 */
2240 		mps_lock(sc);
2241 		error = 0;
2242 		mps_user_read_pci_info(sc, (mps_pci_info_t *)arg);
2243 		mps_unlock(sc);
2244 		break;
2245 	case MPTIOCTL_RESET_ADAPTER:
2246 		mps_lock(sc);
2247 		sc->port_enable_complete = 0;
2248 		uint32_t reinit_start = time_uptime;
2249 		error = mps_reinit(sc);
2250 		/* Sleep for 300 second. */
2251 		msleep_ret = msleep(&sc->port_enable_complete, &sc->mps_mtx, PRIBIO,
2252 		       "mps_porten", 300 * hz);
2253 		mps_unlock(sc);
2254 		if (msleep_ret)
2255 			printf("Port Enable did not complete after Diag "
2256 			    "Reset msleep error %d.\n", msleep_ret);
2257 		else
2258 			mps_dprint(sc, MPS_USER,
2259 				"Hard Reset with Port Enable completed in %d seconds.\n",
2260 				 (uint32_t) (time_uptime - reinit_start));
2261 		break;
2262 	case MPTIOCTL_DIAG_ACTION:
2263 		/*
2264 		 * The user has done a diag buffer action.  Call our routine
2265 		 * which does this.  Only allow one diag action at one time.
2266 		 */
2267 		mps_lock(sc);
2268 		error = mps_user_diag_action(sc, (mps_diag_action_t *)arg);
2269 		mps_unlock(sc);
2270 		break;
2271 	case MPTIOCTL_EVENT_QUERY:
2272 		/*
2273 		 * The user has done an event query. Call our routine which does
2274 		 * this.
2275 		 */
2276 		error = 0;
2277 		mps_user_event_query(sc, (mps_event_query_t *)arg);
2278 		break;
2279 	case MPTIOCTL_EVENT_ENABLE:
2280 		/*
2281 		 * The user has done an event enable. Call our routine which
2282 		 * does this.
2283 		 */
2284 		error = 0;
2285 		mps_user_event_enable(sc, (mps_event_enable_t *)arg);
2286 		break;
2287 	case MPTIOCTL_EVENT_REPORT:
2288 		/*
2289 		 * The user has done an event report. Call our routine which
2290 		 * does this.
2291 		 */
2292 		error = mps_user_event_report(sc, (mps_event_report_t *)arg);
2293 		break;
2294 	case MPTIOCTL_REG_ACCESS:
2295 		/*
2296 		 * The user has requested register access.  Call our routine
2297 		 * which does this.
2298 		 */
2299 		mps_lock(sc);
2300 		error = mps_user_reg_access(sc, (mps_reg_access_t *)arg);
2301 		mps_unlock(sc);
2302 		break;
2303 	case MPTIOCTL_BTDH_MAPPING:
2304 		/*
2305 		 * The user has requested to translate a bus/target to a
2306 		 * DevHandle or a DevHandle to a bus/target.  Call our routine
2307 		 * which does this.
2308 		 */
2309 		error = mps_user_btdh(sc, (mps_btdh_mapping_t *)arg);
2310 		break;
2311 	default:
2312 		error = ENOIOCTL;
2313 		break;
2314 	}
2315 
2316 	if (mps_page != NULL)
2317 		free(mps_page, M_MPSUSER);
2318 
2319 	return (error);
2320 }
2321 
2322 #ifdef COMPAT_FREEBSD32
2323 
2324 struct mps_cfg_page_req32 {
2325 	MPI2_CONFIG_PAGE_HEADER header;
2326 	uint32_t page_address;
2327 	uint32_t buf;
2328 	int	len;
2329 	uint16_t ioc_status;
2330 };
2331 
2332 struct mps_ext_cfg_page_req32 {
2333 	MPI2_CONFIG_EXTENDED_PAGE_HEADER header;
2334 	uint32_t page_address;
2335 	uint32_t buf;
2336 	int	len;
2337 	uint16_t ioc_status;
2338 };
2339 
2340 struct mps_raid_action32 {
2341 	uint8_t action;
2342 	uint8_t volume_bus;
2343 	uint8_t volume_id;
2344 	uint8_t phys_disk_num;
2345 	uint32_t action_data_word;
2346 	uint32_t buf;
2347 	int len;
2348 	uint32_t volume_status;
2349 	uint32_t action_data[4];
2350 	uint16_t action_status;
2351 	uint16_t ioc_status;
2352 	uint8_t write;
2353 };
2354 
2355 struct mps_usr_command32 {
2356 	uint32_t req;
2357 	uint32_t req_len;
2358 	uint32_t rpl;
2359 	uint32_t rpl_len;
2360 	uint32_t buf;
2361 	int len;
2362 	uint32_t flags;
2363 };
2364 
2365 #define	MPSIO_READ_CFG_HEADER32	_IOWR('M', 200, struct mps_cfg_page_req32)
2366 #define	MPSIO_READ_CFG_PAGE32	_IOWR('M', 201, struct mps_cfg_page_req32)
2367 #define	MPSIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mps_ext_cfg_page_req32)
2368 #define	MPSIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mps_ext_cfg_page_req32)
2369 #define	MPSIO_WRITE_CFG_PAGE32	_IOWR('M', 204, struct mps_cfg_page_req32)
2370 #define	MPSIO_RAID_ACTION32	_IOWR('M', 205, struct mps_raid_action32)
2371 #define	MPSIO_MPS_COMMAND32	_IOWR('M', 210, struct mps_usr_command32)
2372 
2373 static int
2374 mps_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag,
2375     struct thread *td)
2376 {
2377 	struct mps_cfg_page_req32 *page32 = _arg;
2378 	struct mps_ext_cfg_page_req32 *ext32 = _arg;
2379 	struct mps_raid_action32 *raid32 = _arg;
2380 	struct mps_usr_command32 *user32 = _arg;
2381 	union {
2382 		struct mps_cfg_page_req page;
2383 		struct mps_ext_cfg_page_req ext;
2384 		struct mps_raid_action raid;
2385 		struct mps_usr_command user;
2386 	} arg;
2387 	u_long cmd;
2388 	int error;
2389 
2390 	switch (cmd32) {
2391 	case MPSIO_READ_CFG_HEADER32:
2392 	case MPSIO_READ_CFG_PAGE32:
2393 	case MPSIO_WRITE_CFG_PAGE32:
2394 		if (cmd32 == MPSIO_READ_CFG_HEADER32)
2395 			cmd = MPSIO_READ_CFG_HEADER;
2396 		else if (cmd32 == MPSIO_READ_CFG_PAGE32)
2397 			cmd = MPSIO_READ_CFG_PAGE;
2398 		else
2399 			cmd = MPSIO_WRITE_CFG_PAGE;
2400 		CP(*page32, arg.page, header);
2401 		CP(*page32, arg.page, page_address);
2402 		PTRIN_CP(*page32, arg.page, buf);
2403 		CP(*page32, arg.page, len);
2404 		CP(*page32, arg.page, ioc_status);
2405 		break;
2406 
2407 	case MPSIO_READ_EXT_CFG_HEADER32:
2408 	case MPSIO_READ_EXT_CFG_PAGE32:
2409 		if (cmd32 == MPSIO_READ_EXT_CFG_HEADER32)
2410 			cmd = MPSIO_READ_EXT_CFG_HEADER;
2411 		else
2412 			cmd = MPSIO_READ_EXT_CFG_PAGE;
2413 		CP(*ext32, arg.ext, header);
2414 		CP(*ext32, arg.ext, page_address);
2415 		PTRIN_CP(*ext32, arg.ext, buf);
2416 		CP(*ext32, arg.ext, len);
2417 		CP(*ext32, arg.ext, ioc_status);
2418 		break;
2419 
2420 	case MPSIO_RAID_ACTION32:
2421 		cmd = MPSIO_RAID_ACTION;
2422 		CP(*raid32, arg.raid, action);
2423 		CP(*raid32, arg.raid, volume_bus);
2424 		CP(*raid32, arg.raid, volume_id);
2425 		CP(*raid32, arg.raid, phys_disk_num);
2426 		CP(*raid32, arg.raid, action_data_word);
2427 		PTRIN_CP(*raid32, arg.raid, buf);
2428 		CP(*raid32, arg.raid, len);
2429 		CP(*raid32, arg.raid, volume_status);
2430 		bcopy(raid32->action_data, arg.raid.action_data,
2431 		    sizeof arg.raid.action_data);
2432 		CP(*raid32, arg.raid, ioc_status);
2433 		CP(*raid32, arg.raid, write);
2434 		break;
2435 
2436 	case MPSIO_MPS_COMMAND32:
2437 		cmd = MPSIO_MPS_COMMAND;
2438 		PTRIN_CP(*user32, arg.user, req);
2439 		CP(*user32, arg.user, req_len);
2440 		PTRIN_CP(*user32, arg.user, rpl);
2441 		CP(*user32, arg.user, rpl_len);
2442 		PTRIN_CP(*user32, arg.user, buf);
2443 		CP(*user32, arg.user, len);
2444 		CP(*user32, arg.user, flags);
2445 		break;
2446 	default:
2447 		return (ENOIOCTL);
2448 	}
2449 
2450 	error = mps_ioctl(dev, cmd, &arg, flag, td);
2451 	if (error == 0 && (cmd32 & IOC_OUT) != 0) {
2452 		switch (cmd32) {
2453 		case MPSIO_READ_CFG_HEADER32:
2454 		case MPSIO_READ_CFG_PAGE32:
2455 		case MPSIO_WRITE_CFG_PAGE32:
2456 			CP(arg.page, *page32, header);
2457 			CP(arg.page, *page32, page_address);
2458 			PTROUT_CP(arg.page, *page32, buf);
2459 			CP(arg.page, *page32, len);
2460 			CP(arg.page, *page32, ioc_status);
2461 			break;
2462 
2463 		case MPSIO_READ_EXT_CFG_HEADER32:
2464 		case MPSIO_READ_EXT_CFG_PAGE32:
2465 			CP(arg.ext, *ext32, header);
2466 			CP(arg.ext, *ext32, page_address);
2467 			PTROUT_CP(arg.ext, *ext32, buf);
2468 			CP(arg.ext, *ext32, len);
2469 			CP(arg.ext, *ext32, ioc_status);
2470 			break;
2471 
2472 		case MPSIO_RAID_ACTION32:
2473 			CP(arg.raid, *raid32, action);
2474 			CP(arg.raid, *raid32, volume_bus);
2475 			CP(arg.raid, *raid32, volume_id);
2476 			CP(arg.raid, *raid32, phys_disk_num);
2477 			CP(arg.raid, *raid32, action_data_word);
2478 			PTROUT_CP(arg.raid, *raid32, buf);
2479 			CP(arg.raid, *raid32, len);
2480 			CP(arg.raid, *raid32, volume_status);
2481 			bcopy(arg.raid.action_data, raid32->action_data,
2482 			    sizeof arg.raid.action_data);
2483 			CP(arg.raid, *raid32, ioc_status);
2484 			CP(arg.raid, *raid32, write);
2485 			break;
2486 
2487 		case MPSIO_MPS_COMMAND32:
2488 			PTROUT_CP(arg.user, *user32, req);
2489 			CP(arg.user, *user32, req_len);
2490 			PTROUT_CP(arg.user, *user32, rpl);
2491 			CP(arg.user, *user32, rpl_len);
2492 			PTROUT_CP(arg.user, *user32, buf);
2493 			CP(arg.user, *user32, len);
2494 			CP(arg.user, *user32, flags);
2495 			break;
2496 		}
2497 	}
2498 
2499 	return (error);
2500 }
2501 #endif /* COMPAT_FREEBSD32 */
2502 
2503 static int
2504 mps_ioctl_devsw(struct cdev *dev, u_long com, caddr_t arg, int flag,
2505     struct thread *td)
2506 {
2507 #ifdef COMPAT_FREEBSD32
2508 	if (SV_CURPROC_FLAG(SV_ILP32))
2509 		return (mps_ioctl32(dev, com, arg, flag, td));
2510 #endif
2511 	return (mps_ioctl(dev, com, arg, flag, td));
2512 }
2513