1 /*- 2 * Copyright (c) 2008 Yahoo!, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the author nor the names of any co-contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * LSI MPT-Fusion Host Adapter FreeBSD userland interface 31 */ 32 /*- 33 * Copyright (c) 2011, 2012 LSI Corp. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * LSI MPT-Fusion Host Adapter FreeBSD 58 * 59 * $FreeBSD$ 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_compat.h" 66 67 /* TODO Move headers to mpsvar */ 68 #include <sys/types.h> 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/selinfo.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/conf.h> 76 #include <sys/bio.h> 77 #include <sys/malloc.h> 78 #include <sys/uio.h> 79 #include <sys/sysctl.h> 80 #include <sys/ioccom.h> 81 #include <sys/endian.h> 82 #include <sys/queue.h> 83 #include <sys/kthread.h> 84 #include <sys/taskqueue.h> 85 #include <sys/proc.h> 86 #include <sys/sysent.h> 87 88 #include <machine/bus.h> 89 #include <machine/resource.h> 90 #include <sys/rman.h> 91 92 #include <cam/cam.h> 93 #include <cam/scsi/scsi_all.h> 94 95 #include <dev/mps/mpi/mpi2_type.h> 96 #include <dev/mps/mpi/mpi2.h> 97 #include <dev/mps/mpi/mpi2_ioc.h> 98 #include <dev/mps/mpi/mpi2_cnfg.h> 99 #include <dev/mps/mpi/mpi2_init.h> 100 #include <dev/mps/mpi/mpi2_tool.h> 101 #include <dev/mps/mps_ioctl.h> 102 #include <dev/mps/mpsvar.h> 103 #include <dev/mps/mps_table.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcireg.h> 106 107 static d_open_t mps_open; 108 static d_close_t mps_close; 109 static d_ioctl_t mps_ioctl_devsw; 110 111 static struct cdevsw mps_cdevsw = { 112 .d_version = D_VERSION, 113 .d_flags = 0, 114 .d_open = mps_open, 115 .d_close = mps_close, 116 .d_ioctl = mps_ioctl_devsw, 117 .d_name = "mps", 118 }; 119 120 typedef int (mps_user_f)(struct mps_command *, struct mps_usr_command *); 121 static mps_user_f mpi_pre_ioc_facts; 122 static mps_user_f mpi_pre_port_facts; 123 static mps_user_f mpi_pre_fw_download; 124 static mps_user_f mpi_pre_fw_upload; 125 static mps_user_f mpi_pre_sata_passthrough; 126 static mps_user_f mpi_pre_smp_passthrough; 127 static mps_user_f mpi_pre_config; 128 static mps_user_f mpi_pre_sas_io_unit_control; 129 130 static int mps_user_read_cfg_header(struct mps_softc *, 131 struct mps_cfg_page_req *); 132 static int mps_user_read_cfg_page(struct mps_softc *, 133 struct mps_cfg_page_req *, void *); 134 static int mps_user_read_extcfg_header(struct mps_softc *, 135 struct mps_ext_cfg_page_req *); 136 static int mps_user_read_extcfg_page(struct mps_softc *, 137 struct mps_ext_cfg_page_req *, void *); 138 static int mps_user_write_cfg_page(struct mps_softc *, 139 struct mps_cfg_page_req *, void *); 140 static int mps_user_setup_request(struct mps_command *, 141 struct mps_usr_command *); 142 static int mps_user_command(struct mps_softc *, struct mps_usr_command *); 143 144 static int mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data); 145 static void mps_user_get_adapter_data(struct mps_softc *sc, 146 mps_adapter_data_t *data); 147 static void mps_user_read_pci_info(struct mps_softc *sc, 148 mps_pci_info_t *data); 149 static uint8_t mps_get_fw_diag_buffer_number(struct mps_softc *sc, 150 uint32_t unique_id); 151 static int mps_post_fw_diag_buffer(struct mps_softc *sc, 152 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code); 153 static int mps_release_fw_diag_buffer(struct mps_softc *sc, 154 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 155 uint32_t diag_type); 156 static int mps_diag_register(struct mps_softc *sc, 157 mps_fw_diag_register_t *diag_register, uint32_t *return_code); 158 static int mps_diag_unregister(struct mps_softc *sc, 159 mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code); 160 static int mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query, 161 uint32_t *return_code); 162 static int mps_diag_read_buffer(struct mps_softc *sc, 163 mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 164 uint32_t *return_code); 165 static int mps_diag_release(struct mps_softc *sc, 166 mps_fw_diag_release_t *diag_release, uint32_t *return_code); 167 static int mps_do_diag_action(struct mps_softc *sc, uint32_t action, 168 uint8_t *diag_action, uint32_t length, uint32_t *return_code); 169 static int mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data); 170 static void mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data); 171 static void mps_user_event_enable(struct mps_softc *sc, 172 mps_event_enable_t *data); 173 static int mps_user_event_report(struct mps_softc *sc, 174 mps_event_report_t *data); 175 static int mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data); 176 static int mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data); 177 178 static MALLOC_DEFINE(M_MPSUSER, "mps_user", "Buffers for mps(4) ioctls"); 179 180 /* Macros from compat/freebsd32/freebsd32.h */ 181 #define PTRIN(v) (void *)(uintptr_t)(v) 182 #define PTROUT(v) (uint32_t)(uintptr_t)(v) 183 184 #define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0) 185 #define PTRIN_CP(src,dst,fld) \ 186 do { (dst).fld = PTRIN((src).fld); } while (0) 187 #define PTROUT_CP(src,dst,fld) \ 188 do { (dst).fld = PTROUT((src).fld); } while (0) 189 190 int 191 mps_attach_user(struct mps_softc *sc) 192 { 193 int unit; 194 195 unit = device_get_unit(sc->mps_dev); 196 sc->mps_cdev = make_dev(&mps_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, 197 "mps%d", unit); 198 if (sc->mps_cdev == NULL) { 199 return (ENOMEM); 200 } 201 sc->mps_cdev->si_drv1 = sc; 202 return (0); 203 } 204 205 void 206 mps_detach_user(struct mps_softc *sc) 207 { 208 209 /* XXX: do a purge of pending requests? */ 210 if (sc->mps_cdev != NULL) 211 destroy_dev(sc->mps_cdev); 212 } 213 214 static int 215 mps_open(struct cdev *dev, int flags, int fmt, struct thread *td) 216 { 217 218 return (0); 219 } 220 221 static int 222 mps_close(struct cdev *dev, int flags, int fmt, struct thread *td) 223 { 224 225 return (0); 226 } 227 228 static int 229 mps_user_read_cfg_header(struct mps_softc *sc, 230 struct mps_cfg_page_req *page_req) 231 { 232 MPI2_CONFIG_PAGE_HEADER *hdr; 233 struct mps_config_params params; 234 int error; 235 236 hdr = ¶ms.hdr.Struct; 237 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; 238 params.page_address = le32toh(page_req->page_address); 239 hdr->PageVersion = 0; 240 hdr->PageLength = 0; 241 hdr->PageNumber = page_req->header.PageNumber; 242 hdr->PageType = page_req->header.PageType; 243 params.buffer = NULL; 244 params.length = 0; 245 params.callback = NULL; 246 247 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 248 /* 249 * Leave the request. Without resetting the chip, it's 250 * still owned by it and we'll just get into trouble 251 * freeing it now. Mark it as abandoned so that if it 252 * shows up later it can be freed. 253 */ 254 mps_printf(sc, "read_cfg_header timed out\n"); 255 return (ETIMEDOUT); 256 } 257 258 page_req->ioc_status = htole16(params.status); 259 if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) == 260 MPI2_IOCSTATUS_SUCCESS) { 261 bcopy(hdr, &page_req->header, sizeof(page_req->header)); 262 } 263 264 return (0); 265 } 266 267 static int 268 mps_user_read_cfg_page(struct mps_softc *sc, struct mps_cfg_page_req *page_req, 269 void *buf) 270 { 271 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; 272 struct mps_config_params params; 273 int error; 274 275 reqhdr = buf; 276 hdr = ¶ms.hdr.Struct; 277 hdr->PageVersion = reqhdr->PageVersion; 278 hdr->PageLength = reqhdr->PageLength; 279 hdr->PageNumber = reqhdr->PageNumber; 280 hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK; 281 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 282 params.page_address = le32toh(page_req->page_address); 283 params.buffer = buf; 284 params.length = le32toh(page_req->len); 285 params.callback = NULL; 286 287 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 288 mps_printf(sc, "mps_user_read_cfg_page timed out\n"); 289 return (ETIMEDOUT); 290 } 291 292 page_req->ioc_status = htole16(params.status); 293 return (0); 294 } 295 296 static int 297 mps_user_read_extcfg_header(struct mps_softc *sc, 298 struct mps_ext_cfg_page_req *ext_page_req) 299 { 300 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 301 struct mps_config_params params; 302 int error; 303 304 hdr = ¶ms.hdr.Ext; 305 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; 306 hdr->PageVersion = ext_page_req->header.PageVersion; 307 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 308 hdr->ExtPageLength = 0; 309 hdr->PageNumber = ext_page_req->header.PageNumber; 310 hdr->ExtPageType = ext_page_req->header.ExtPageType; 311 params.page_address = le32toh(ext_page_req->page_address); 312 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 313 /* 314 * Leave the request. Without resetting the chip, it's 315 * still owned by it and we'll just get into trouble 316 * freeing it now. Mark it as abandoned so that if it 317 * shows up later it can be freed. 318 */ 319 mps_printf(sc, "mps_user_read_extcfg_header timed out\n"); 320 return (ETIMEDOUT); 321 } 322 323 ext_page_req->ioc_status = htole16(params.status); 324 if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) == 325 MPI2_IOCSTATUS_SUCCESS) { 326 ext_page_req->header.PageVersion = hdr->PageVersion; 327 ext_page_req->header.PageNumber = hdr->PageNumber; 328 ext_page_req->header.PageType = hdr->PageType; 329 ext_page_req->header.ExtPageLength = hdr->ExtPageLength; 330 ext_page_req->header.ExtPageType = hdr->ExtPageType; 331 } 332 333 return (0); 334 } 335 336 static int 337 mps_user_read_extcfg_page(struct mps_softc *sc, 338 struct mps_ext_cfg_page_req *ext_page_req, void *buf) 339 { 340 MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr; 341 struct mps_config_params params; 342 int error; 343 344 reqhdr = buf; 345 hdr = ¶ms.hdr.Ext; 346 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 347 params.page_address = le32toh(ext_page_req->page_address); 348 hdr->PageVersion = reqhdr->PageVersion; 349 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 350 hdr->PageNumber = reqhdr->PageNumber; 351 hdr->ExtPageType = reqhdr->ExtPageType; 352 hdr->ExtPageLength = reqhdr->ExtPageLength; 353 params.buffer = buf; 354 params.length = le32toh(ext_page_req->len); 355 params.callback = NULL; 356 357 if ((error = mps_read_config_page(sc, ¶ms)) != 0) { 358 mps_printf(sc, "mps_user_read_extcfg_page timed out\n"); 359 return (ETIMEDOUT); 360 } 361 362 ext_page_req->ioc_status = htole16(params.status); 363 return (0); 364 } 365 366 static int 367 mps_user_write_cfg_page(struct mps_softc *sc, 368 struct mps_cfg_page_req *page_req, void *buf) 369 { 370 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; 371 struct mps_config_params params; 372 u_int hdr_attr; 373 int error; 374 375 reqhdr = buf; 376 hdr = ¶ms.hdr.Struct; 377 hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK; 378 if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE && 379 hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) { 380 mps_printf(sc, "page type 0x%x not changeable\n", 381 reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK); 382 return (EINVAL); 383 } 384 385 /* 386 * There isn't any point in restoring stripped out attributes 387 * if you then mask them going down to issue the request. 388 */ 389 390 hdr->PageVersion = reqhdr->PageVersion; 391 hdr->PageLength = reqhdr->PageLength; 392 hdr->PageNumber = reqhdr->PageNumber; 393 hdr->PageType = reqhdr->PageType; 394 params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; 395 params.page_address = le32toh(page_req->page_address); 396 params.buffer = buf; 397 params.length = le32toh(page_req->len); 398 params.callback = NULL; 399 400 if ((error = mps_write_config_page(sc, ¶ms)) != 0) { 401 mps_printf(sc, "mps_write_cfg_page timed out\n"); 402 return (ETIMEDOUT); 403 } 404 405 page_req->ioc_status = htole16(params.status); 406 return (0); 407 } 408 409 void 410 mpi_init_sge(struct mps_command *cm, void *req, void *sge) 411 { 412 int off, space; 413 414 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; 415 off = (uintptr_t)sge - (uintptr_t)req; 416 417 KASSERT(off < space, ("bad pointers %p %p, off %d, space %d", 418 req, sge, off, space)); 419 420 cm->cm_sge = sge; 421 cm->cm_sglsize = space - off; 422 } 423 424 /* 425 * Prepare the mps_command for an IOC_FACTS request. 426 */ 427 static int 428 mpi_pre_ioc_facts(struct mps_command *cm, struct mps_usr_command *cmd) 429 { 430 MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req; 431 MPI2_IOC_FACTS_REPLY *rpl; 432 433 if (cmd->req_len != sizeof *req) 434 return (EINVAL); 435 if (cmd->rpl_len != sizeof *rpl) 436 return (EINVAL); 437 438 cm->cm_sge = NULL; 439 cm->cm_sglsize = 0; 440 return (0); 441 } 442 443 /* 444 * Prepare the mps_command for a PORT_FACTS request. 445 */ 446 static int 447 mpi_pre_port_facts(struct mps_command *cm, struct mps_usr_command *cmd) 448 { 449 MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req; 450 MPI2_PORT_FACTS_REPLY *rpl; 451 452 if (cmd->req_len != sizeof *req) 453 return (EINVAL); 454 if (cmd->rpl_len != sizeof *rpl) 455 return (EINVAL); 456 457 cm->cm_sge = NULL; 458 cm->cm_sglsize = 0; 459 return (0); 460 } 461 462 /* 463 * Prepare the mps_command for a FW_DOWNLOAD request. 464 */ 465 static int 466 mpi_pre_fw_download(struct mps_command *cm, struct mps_usr_command *cmd) 467 { 468 MPI2_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req; 469 MPI2_FW_DOWNLOAD_REPLY *rpl; 470 MPI2_FW_DOWNLOAD_TCSGE tc; 471 int error; 472 473 /* 474 * This code assumes there is room in the request's SGL for 475 * the TransactionContext plus at least a SGL chain element. 476 */ 477 CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE); 478 479 if (cmd->req_len != sizeof *req) 480 return (EINVAL); 481 if (cmd->rpl_len != sizeof *rpl) 482 return (EINVAL); 483 484 if (cmd->len == 0) 485 return (EINVAL); 486 487 error = copyin(cmd->buf, cm->cm_data, cmd->len); 488 if (error != 0) 489 return (error); 490 491 mpi_init_sge(cm, req, &req->SGL); 492 bzero(&tc, sizeof tc); 493 494 /* 495 * For now, the F/W image must be provided in a single request. 496 */ 497 if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0) 498 return (EINVAL); 499 if (req->TotalImageSize != cmd->len) 500 return (EINVAL); 501 502 /* 503 * The value of the first two elements is specified in the 504 * Fusion-MPT Message Passing Interface document. 505 */ 506 tc.ContextSize = 0; 507 tc.DetailsLength = 12; 508 tc.ImageOffset = 0; 509 tc.ImageSize = cmd->len; 510 511 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 512 513 return (mps_push_sge(cm, &tc, sizeof tc, 0)); 514 } 515 516 /* 517 * Prepare the mps_command for a FW_UPLOAD request. 518 */ 519 static int 520 mpi_pre_fw_upload(struct mps_command *cm, struct mps_usr_command *cmd) 521 { 522 MPI2_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req; 523 MPI2_FW_UPLOAD_REPLY *rpl; 524 MPI2_FW_UPLOAD_TCSGE tc; 525 526 /* 527 * This code assumes there is room in the request's SGL for 528 * the TransactionContext plus at least a SGL chain element. 529 */ 530 CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE); 531 532 if (cmd->req_len != sizeof *req) 533 return (EINVAL); 534 if (cmd->rpl_len != sizeof *rpl) 535 return (EINVAL); 536 537 mpi_init_sge(cm, req, &req->SGL); 538 bzero(&tc, sizeof tc); 539 540 /* 541 * The value of the first two elements is specified in the 542 * Fusion-MPT Message Passing Interface document. 543 */ 544 tc.ContextSize = 0; 545 tc.DetailsLength = 12; 546 /* 547 * XXX Is there any reason to fetch a partial image? I.e. to 548 * set ImageOffset to something other than 0? 549 */ 550 tc.ImageOffset = 0; 551 tc.ImageSize = cmd->len; 552 553 cm->cm_flags |= MPS_CM_FLAGS_DATAIN; 554 555 return (mps_push_sge(cm, &tc, sizeof tc, 0)); 556 } 557 558 /* 559 * Prepare the mps_command for a SATA_PASSTHROUGH request. 560 */ 561 static int 562 mpi_pre_sata_passthrough(struct mps_command *cm, struct mps_usr_command *cmd) 563 { 564 MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; 565 MPI2_SATA_PASSTHROUGH_REPLY *rpl; 566 567 if (cmd->req_len != sizeof *req) 568 return (EINVAL); 569 if (cmd->rpl_len != sizeof *rpl) 570 return (EINVAL); 571 572 mpi_init_sge(cm, req, &req->SGL); 573 return (0); 574 } 575 576 /* 577 * Prepare the mps_command for a SMP_PASSTHROUGH request. 578 */ 579 static int 580 mpi_pre_smp_passthrough(struct mps_command *cm, struct mps_usr_command *cmd) 581 { 582 MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; 583 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 584 585 if (cmd->req_len != sizeof *req) 586 return (EINVAL); 587 if (cmd->rpl_len != sizeof *rpl) 588 return (EINVAL); 589 590 mpi_init_sge(cm, req, &req->SGL); 591 return (0); 592 } 593 594 /* 595 * Prepare the mps_command for a CONFIG request. 596 */ 597 static int 598 mpi_pre_config(struct mps_command *cm, struct mps_usr_command *cmd) 599 { 600 MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req; 601 MPI2_CONFIG_REPLY *rpl; 602 603 if (cmd->req_len != sizeof *req) 604 return (EINVAL); 605 if (cmd->rpl_len != sizeof *rpl) 606 return (EINVAL); 607 608 mpi_init_sge(cm, req, &req->PageBufferSGE); 609 return (0); 610 } 611 612 /* 613 * Prepare the mps_command for a SAS_IO_UNIT_CONTROL request. 614 */ 615 static int 616 mpi_pre_sas_io_unit_control(struct mps_command *cm, 617 struct mps_usr_command *cmd) 618 { 619 620 cm->cm_sge = NULL; 621 cm->cm_sglsize = 0; 622 return (0); 623 } 624 625 /* 626 * A set of functions to prepare an mps_command for the various 627 * supported requests. 628 */ 629 struct mps_user_func { 630 U8 Function; 631 mps_user_f *f_pre; 632 } mps_user_func_list[] = { 633 { MPI2_FUNCTION_IOC_FACTS, mpi_pre_ioc_facts }, 634 { MPI2_FUNCTION_PORT_FACTS, mpi_pre_port_facts }, 635 { MPI2_FUNCTION_FW_DOWNLOAD, mpi_pre_fw_download }, 636 { MPI2_FUNCTION_FW_UPLOAD, mpi_pre_fw_upload }, 637 { MPI2_FUNCTION_SATA_PASSTHROUGH, mpi_pre_sata_passthrough }, 638 { MPI2_FUNCTION_SMP_PASSTHROUGH, mpi_pre_smp_passthrough}, 639 { MPI2_FUNCTION_CONFIG, mpi_pre_config}, 640 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, mpi_pre_sas_io_unit_control }, 641 { 0xFF, NULL } /* list end */ 642 }; 643 644 static int 645 mps_user_setup_request(struct mps_command *cm, struct mps_usr_command *cmd) 646 { 647 MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 648 struct mps_user_func *f; 649 650 for (f = mps_user_func_list; f->f_pre != NULL; f++) { 651 if (hdr->Function == f->Function) 652 return (f->f_pre(cm, cmd)); 653 } 654 return (EINVAL); 655 } 656 657 static int 658 mps_user_command(struct mps_softc *sc, struct mps_usr_command *cmd) 659 { 660 MPI2_REQUEST_HEADER *hdr; 661 MPI2_DEFAULT_REPLY *rpl; 662 void *buf = NULL; 663 struct mps_command *cm = NULL; 664 int err = 0; 665 int sz; 666 667 mps_lock(sc); 668 cm = mps_alloc_command(sc); 669 670 if (cm == NULL) { 671 mps_printf(sc, "%s: no mps requests\n", __func__); 672 err = ENOMEM; 673 goto Ret; 674 } 675 mps_unlock(sc); 676 677 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 678 679 mps_dprint(sc, MPS_USER, "%s: req %p %d rpl %p %d\n", __func__, 680 cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len); 681 682 if (cmd->req_len > (int)sc->facts->IOCRequestFrameSize * 4) { 683 err = EINVAL; 684 goto RetFreeUnlocked; 685 } 686 err = copyin(cmd->req, hdr, cmd->req_len); 687 if (err != 0) 688 goto RetFreeUnlocked; 689 690 mps_dprint(sc, MPS_USER, "%s: Function %02X MsgFlags %02X\n", __func__, 691 hdr->Function, hdr->MsgFlags); 692 693 if (cmd->len > 0) { 694 buf = malloc(cmd->len, M_MPSUSER, M_WAITOK|M_ZERO); 695 if(!buf) { 696 mps_printf(sc, "Cannot allocate memory %s %d\n", 697 __func__, __LINE__); 698 return (ENOMEM); 699 } 700 cm->cm_data = buf; 701 cm->cm_length = cmd->len; 702 } else { 703 cm->cm_data = NULL; 704 cm->cm_length = 0; 705 } 706 707 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE; 708 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 709 710 err = mps_user_setup_request(cm, cmd); 711 if (err == EINVAL) { 712 mps_printf(sc, "%s: unsupported parameter or unsupported " 713 "function in request (function = 0x%X)\n", __func__, 714 hdr->Function); 715 } 716 if (err != 0) 717 goto RetFreeUnlocked; 718 719 mps_lock(sc); 720 err = mps_wait_command(sc, cm, 60, CAN_SLEEP); 721 722 if (err) { 723 mps_printf(sc, "%s: invalid request: error %d\n", 724 __func__, err); 725 goto Ret; 726 } 727 728 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 729 if (rpl != NULL) 730 sz = rpl->MsgLength * 4; 731 else 732 sz = 0; 733 734 if (sz > cmd->rpl_len) { 735 mps_printf(sc, "%s: user reply buffer (%d) smaller than " 736 "returned buffer (%d)\n", __func__, cmd->rpl_len, sz); 737 sz = cmd->rpl_len; 738 } 739 740 mps_unlock(sc); 741 copyout(rpl, cmd->rpl, sz); 742 if (buf != NULL) 743 copyout(buf, cmd->buf, cmd->len); 744 mps_dprint(sc, MPS_USER, "%s: reply size %d\n", __func__, sz); 745 746 RetFreeUnlocked: 747 mps_lock(sc); 748 if (cm != NULL) 749 mps_free_command(sc, cm); 750 Ret: 751 mps_unlock(sc); 752 if (buf != NULL) 753 free(buf, M_MPSUSER); 754 return (err); 755 } 756 757 static int 758 mps_user_pass_thru(struct mps_softc *sc, mps_pass_thru_t *data) 759 { 760 MPI2_REQUEST_HEADER *hdr, tmphdr; 761 MPI2_DEFAULT_REPLY *rpl; 762 struct mps_command *cm = NULL; 763 int err = 0, dir = 0, sz; 764 uint8_t function = 0; 765 u_int sense_len; 766 767 /* 768 * Only allow one passthru command at a time. Use the MPS_FLAGS_BUSY 769 * bit to denote that a passthru is being processed. 770 */ 771 mps_lock(sc); 772 if (sc->mps_flags & MPS_FLAGS_BUSY) { 773 mps_dprint(sc, MPS_USER, "%s: Only one passthru command " 774 "allowed at a single time.", __func__); 775 mps_unlock(sc); 776 return (EBUSY); 777 } 778 sc->mps_flags |= MPS_FLAGS_BUSY; 779 mps_unlock(sc); 780 781 /* 782 * Do some validation on data direction. Valid cases are: 783 * 1) DataSize is 0 and direction is NONE 784 * 2) DataSize is non-zero and one of: 785 * a) direction is READ or 786 * b) direction is WRITE or 787 * c) direction is BOTH and DataOutSize is non-zero 788 * If valid and the direction is BOTH, change the direction to READ. 789 * if valid and the direction is not BOTH, make sure DataOutSize is 0. 790 */ 791 if (((data->DataSize == 0) && 792 (data->DataDirection == MPS_PASS_THRU_DIRECTION_NONE)) || 793 ((data->DataSize != 0) && 794 ((data->DataDirection == MPS_PASS_THRU_DIRECTION_READ) || 795 (data->DataDirection == MPS_PASS_THRU_DIRECTION_WRITE) || 796 ((data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) && 797 (data->DataOutSize != 0))))) { 798 if (data->DataDirection == MPS_PASS_THRU_DIRECTION_BOTH) 799 data->DataDirection = MPS_PASS_THRU_DIRECTION_READ; 800 else 801 data->DataOutSize = 0; 802 } else 803 return (EINVAL); 804 805 mps_dprint(sc, MPS_USER, "%s: req 0x%jx %d rpl 0x%jx %d " 806 "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__, 807 data->PtrRequest, data->RequestSize, data->PtrReply, 808 data->ReplySize, data->PtrData, data->DataSize, 809 data->PtrDataOut, data->DataOutSize, data->DataDirection); 810 811 /* 812 * copy in the header so we know what we're dealing with before we 813 * commit to allocating a command for it. 814 */ 815 err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize); 816 if (err != 0) 817 goto RetFreeUnlocked; 818 819 if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) { 820 err = EINVAL; 821 goto RetFreeUnlocked; 822 } 823 824 function = tmphdr.Function; 825 mps_dprint(sc, MPS_USER, "%s: Function %02X MsgFlags %02X\n", __func__, 826 function, tmphdr.MsgFlags); 827 828 /* 829 * Handle a passthru TM request. 830 */ 831 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 832 MPI2_SCSI_TASK_MANAGE_REQUEST *task; 833 834 mps_lock(sc); 835 cm = mpssas_alloc_tm(sc); 836 if (cm == NULL) { 837 err = EINVAL; 838 goto Ret; 839 } 840 841 /* Copy the header in. Only a small fixup is needed. */ 842 task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 843 bcopy(&tmphdr, task, data->RequestSize); 844 task->TaskMID = cm->cm_desc.Default.SMID; 845 846 cm->cm_data = NULL; 847 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 848 cm->cm_complete = NULL; 849 cm->cm_complete_data = NULL; 850 851 err = mps_wait_command(sc, cm, 30, CAN_SLEEP); 852 853 if (err != 0) { 854 err = EIO; 855 mps_dprint(sc, MPS_FAULT, "%s: task management failed", 856 __func__); 857 } 858 /* 859 * Copy the reply data and sense data to user space. 860 */ 861 if (cm->cm_reply != NULL) { 862 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 863 sz = rpl->MsgLength * 4; 864 865 if (sz > data->ReplySize) { 866 mps_printf(sc, "%s: user reply buffer (%d) " 867 "smaller than returned buffer (%d)\n", 868 __func__, data->ReplySize, sz); 869 } 870 mps_unlock(sc); 871 copyout(cm->cm_reply, PTRIN(data->PtrReply), 872 data->ReplySize); 873 mps_lock(sc); 874 } 875 mpssas_free_tm(sc, cm); 876 goto Ret; 877 } 878 879 mps_lock(sc); 880 cm = mps_alloc_command(sc); 881 882 if (cm == NULL) { 883 mps_printf(sc, "%s: no mps requests\n", __func__); 884 err = ENOMEM; 885 goto Ret; 886 } 887 mps_unlock(sc); 888 889 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; 890 bcopy(&tmphdr, hdr, data->RequestSize); 891 892 /* 893 * Do some checking to make sure the IOCTL request contains a valid 894 * request. Then set the SGL info. 895 */ 896 mpi_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize)); 897 898 /* 899 * Set up for read, write or both. From check above, DataOutSize will 900 * be 0 if direction is READ or WRITE, but it will have some non-zero 901 * value if the direction is BOTH. So, just use the biggest size to get 902 * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set 903 * up; the first is for the request and the second will contain the 904 * response data. cm_out_len needs to be set here and this will be used 905 * when the SGLs are set up. 906 */ 907 cm->cm_data = NULL; 908 cm->cm_length = MAX(data->DataSize, data->DataOutSize); 909 cm->cm_out_len = data->DataOutSize; 910 cm->cm_flags = 0; 911 if (cm->cm_length != 0) { 912 cm->cm_data = malloc(cm->cm_length, M_MPSUSER, M_WAITOK | 913 M_ZERO); 914 if (cm->cm_data == NULL) { 915 mps_dprint(sc, MPS_FAULT, "%s: alloc failed for IOCTL " 916 "passthru length %d\n", __func__, cm->cm_length); 917 } else { 918 cm->cm_flags = MPS_CM_FLAGS_DATAIN; 919 if (data->DataOutSize) { 920 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 921 err = copyin(PTRIN(data->PtrDataOut), 922 cm->cm_data, data->DataOutSize); 923 } else if (data->DataDirection == 924 MPS_PASS_THRU_DIRECTION_WRITE) { 925 cm->cm_flags = MPS_CM_FLAGS_DATAOUT; 926 err = copyin(PTRIN(data->PtrData), 927 cm->cm_data, data->DataSize); 928 } 929 if (err != 0) 930 mps_dprint(sc, MPS_FAULT, "%s: failed to copy " 931 "IOCTL data from user space\n", __func__); 932 } 933 } 934 cm->cm_flags |= MPS_CM_FLAGS_SGE_SIMPLE; 935 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 936 937 /* 938 * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request 939 * uses SCSI IO descriptor. 940 */ 941 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 942 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 943 MPI2_SCSI_IO_REQUEST *scsi_io_req; 944 945 scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr; 946 /* 947 * Put SGE for data and data_out buffer at the end of 948 * scsi_io_request message header (64 bytes in total). 949 * Following above SGEs, the residual space will be used by 950 * sense data. 951 */ 952 scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize - 953 64); 954 scsi_io_req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 955 956 /* 957 * Set SGLOffset0 value. This is the number of dwords that SGL 958 * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct. 959 */ 960 scsi_io_req->SGLOffset0 = 24; 961 962 /* 963 * Setup descriptor info. RAID passthrough must use the 964 * default request descriptor which is already set, so if this 965 * is a SCSI IO request, change the descriptor to SCSI IO. 966 * Also, if this is a SCSI IO request, handle the reply in the 967 * mpssas_scsio_complete function. 968 */ 969 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) { 970 cm->cm_desc.SCSIIO.RequestFlags = 971 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 972 cm->cm_desc.SCSIIO.DevHandle = scsi_io_req->DevHandle; 973 974 /* 975 * Make sure the DevHandle is not 0 because this is a 976 * likely error. 977 */ 978 if (scsi_io_req->DevHandle == 0) { 979 err = EINVAL; 980 goto RetFreeUnlocked; 981 } 982 } 983 } 984 985 mps_lock(sc); 986 987 err = mps_wait_command(sc, cm, 30, CAN_SLEEP); 988 989 if (err) { 990 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 991 err); 992 mps_unlock(sc); 993 goto RetFreeUnlocked; 994 } 995 996 /* 997 * Sync the DMA data, if any. Then copy the data to user space. 998 */ 999 if (cm->cm_data != NULL) { 1000 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) 1001 dir = BUS_DMASYNC_POSTREAD; 1002 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) 1003 dir = BUS_DMASYNC_POSTWRITE; 1004 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 1005 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 1006 1007 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { 1008 mps_unlock(sc); 1009 err = copyout(cm->cm_data, 1010 PTRIN(data->PtrData), data->DataSize); 1011 mps_lock(sc); 1012 if (err != 0) 1013 mps_dprint(sc, MPS_FAULT, "%s: failed to copy " 1014 "IOCTL data to user space\n", __func__); 1015 } 1016 } 1017 1018 /* 1019 * Copy the reply data and sense data to user space. 1020 */ 1021 if (cm->cm_reply != NULL) { 1022 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; 1023 sz = rpl->MsgLength * 4; 1024 1025 if (sz > data->ReplySize) { 1026 mps_printf(sc, "%s: user reply buffer (%d) smaller " 1027 "than returned buffer (%d)\n", __func__, 1028 data->ReplySize, sz); 1029 } 1030 mps_unlock(sc); 1031 copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize); 1032 mps_lock(sc); 1033 1034 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || 1035 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 1036 if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState & 1037 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 1038 sense_len = 1039 MIN((le32toh(((MPI2_SCSI_IO_REPLY *)rpl)->SenseCount)), 1040 sizeof(struct scsi_sense_data)); 1041 mps_unlock(sc); 1042 copyout(cm->cm_sense, cm->cm_req + 64, sense_len); 1043 mps_lock(sc); 1044 } 1045 } 1046 } 1047 mps_unlock(sc); 1048 1049 RetFreeUnlocked: 1050 mps_lock(sc); 1051 1052 if (cm != NULL) { 1053 if (cm->cm_data) 1054 free(cm->cm_data, M_MPSUSER); 1055 mps_free_command(sc, cm); 1056 } 1057 Ret: 1058 sc->mps_flags &= ~MPS_FLAGS_BUSY; 1059 mps_unlock(sc); 1060 1061 return (err); 1062 } 1063 1064 static void 1065 mps_user_get_adapter_data(struct mps_softc *sc, mps_adapter_data_t *data) 1066 { 1067 Mpi2ConfigReply_t mpi_reply; 1068 Mpi2BiosPage3_t config_page; 1069 1070 /* 1071 * Use the PCI interface functions to get the Bus, Device, and Function 1072 * information. 1073 */ 1074 data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mps_dev); 1075 data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mps_dev); 1076 data->PciInformation.u.bits.FunctionNumber = 1077 pci_get_function(sc->mps_dev); 1078 1079 /* 1080 * Get the FW version that should already be saved in IOC Facts. 1081 */ 1082 data->MpiFirmwareVersion = sc->facts->FWVersion.Word; 1083 1084 /* 1085 * General device info. 1086 */ 1087 data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2; 1088 if (sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) 1089 data->AdapterType = MPSIOCTL_ADAPTER_TYPE_SAS2_SSS6200; 1090 data->PCIDeviceHwId = pci_get_device(sc->mps_dev); 1091 data->PCIDeviceHwRev = pci_read_config(sc->mps_dev, PCIR_REVID, 1); 1092 data->SubSystemId = pci_get_subdevice(sc->mps_dev); 1093 data->SubsystemVendorId = pci_get_subvendor(sc->mps_dev); 1094 1095 /* 1096 * Get the driver version. 1097 */ 1098 strcpy((char *)&data->DriverVersion[0], MPS_DRIVER_VERSION); 1099 1100 /* 1101 * Need to get BIOS Config Page 3 for the BIOS Version. 1102 */ 1103 data->BiosVersion = 0; 1104 mps_lock(sc); 1105 if (mps_config_get_bios_pg3(sc, &mpi_reply, &config_page)) 1106 printf("%s: Error while retrieving BIOS Version\n", __func__); 1107 else 1108 data->BiosVersion = config_page.BiosVersion; 1109 mps_unlock(sc); 1110 } 1111 1112 static void 1113 mps_user_read_pci_info(struct mps_softc *sc, mps_pci_info_t *data) 1114 { 1115 int i; 1116 1117 /* 1118 * Use the PCI interface functions to get the Bus, Device, and Function 1119 * information. 1120 */ 1121 data->BusNumber = pci_get_bus(sc->mps_dev); 1122 data->DeviceNumber = pci_get_slot(sc->mps_dev); 1123 data->FunctionNumber = pci_get_function(sc->mps_dev); 1124 1125 /* 1126 * Now get the interrupt vector and the pci header. The vector can 1127 * only be 0 right now. The header is the first 256 bytes of config 1128 * space. 1129 */ 1130 data->InterruptVector = 0; 1131 for (i = 0; i < sizeof (data->PciHeader); i++) { 1132 data->PciHeader[i] = pci_read_config(sc->mps_dev, i, 1); 1133 } 1134 } 1135 1136 static uint8_t 1137 mps_get_fw_diag_buffer_number(struct mps_softc *sc, uint32_t unique_id) 1138 { 1139 uint8_t index; 1140 1141 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { 1142 if (sc->fw_diag_buffer_list[index].unique_id == unique_id) { 1143 return (index); 1144 } 1145 } 1146 1147 return (MPS_FW_DIAGNOSTIC_UID_NOT_FOUND); 1148 } 1149 1150 static int 1151 mps_post_fw_diag_buffer(struct mps_softc *sc, 1152 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code) 1153 { 1154 MPI2_DIAG_BUFFER_POST_REQUEST *req; 1155 MPI2_DIAG_BUFFER_POST_REPLY *reply; 1156 struct mps_command *cm = NULL; 1157 int i, status; 1158 1159 /* 1160 * If buffer is not enabled, just leave. 1161 */ 1162 *return_code = MPS_FW_DIAG_ERROR_POST_FAILED; 1163 if (!pBuffer->enabled) { 1164 return (MPS_DIAG_FAILURE); 1165 } 1166 1167 /* 1168 * Clear some flags initially. 1169 */ 1170 pBuffer->force_release = FALSE; 1171 pBuffer->valid_data = FALSE; 1172 pBuffer->owned_by_firmware = FALSE; 1173 1174 /* 1175 * Get a command. 1176 */ 1177 cm = mps_alloc_command(sc); 1178 if (cm == NULL) { 1179 mps_printf(sc, "%s: no mps requests\n", __func__); 1180 return (MPS_DIAG_FAILURE); 1181 } 1182 1183 /* 1184 * Build the request for releasing the FW Diag Buffer and send it. 1185 */ 1186 req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req; 1187 req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1188 req->BufferType = pBuffer->buffer_type; 1189 req->ExtendedType = pBuffer->extended_type; 1190 req->BufferLength = pBuffer->size; 1191 for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++) 1192 req->ProductSpecific[i] = pBuffer->product_specific[i]; 1193 mps_from_u64(sc->fw_diag_busaddr, &req->BufferAddress); 1194 cm->cm_data = NULL; 1195 cm->cm_length = 0; 1196 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1197 cm->cm_complete_data = NULL; 1198 1199 /* 1200 * Send command synchronously. 1201 */ 1202 status = mps_wait_command(sc, cm, 30, CAN_SLEEP); 1203 if (status) { 1204 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 1205 status); 1206 status = MPS_DIAG_FAILURE; 1207 goto done; 1208 } 1209 1210 /* 1211 * Process POST reply. 1212 */ 1213 reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply; 1214 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 1215 status = MPS_DIAG_FAILURE; 1216 mps_dprint(sc, MPS_FAULT, "%s: post of FW Diag Buffer failed " 1217 "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and " 1218 "TransferLength = 0x%x\n", __func__, reply->IOCStatus, 1219 reply->IOCLogInfo, reply->TransferLength); 1220 goto done; 1221 } 1222 1223 /* 1224 * Post was successful. 1225 */ 1226 pBuffer->valid_data = TRUE; 1227 pBuffer->owned_by_firmware = TRUE; 1228 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1229 status = MPS_DIAG_SUCCESS; 1230 1231 done: 1232 mps_free_command(sc, cm); 1233 return (status); 1234 } 1235 1236 static int 1237 mps_release_fw_diag_buffer(struct mps_softc *sc, 1238 mps_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, 1239 uint32_t diag_type) 1240 { 1241 MPI2_DIAG_RELEASE_REQUEST *req; 1242 MPI2_DIAG_RELEASE_REPLY *reply; 1243 struct mps_command *cm = NULL; 1244 int status; 1245 1246 /* 1247 * If buffer is not enabled, just leave. 1248 */ 1249 *return_code = MPS_FW_DIAG_ERROR_RELEASE_FAILED; 1250 if (!pBuffer->enabled) { 1251 mps_dprint(sc, MPS_USER, "%s: This buffer type is not " 1252 "supported by the IOC", __func__); 1253 return (MPS_DIAG_FAILURE); 1254 } 1255 1256 /* 1257 * Clear some flags initially. 1258 */ 1259 pBuffer->force_release = FALSE; 1260 pBuffer->valid_data = FALSE; 1261 pBuffer->owned_by_firmware = FALSE; 1262 1263 /* 1264 * Get a command. 1265 */ 1266 cm = mps_alloc_command(sc); 1267 if (cm == NULL) { 1268 mps_printf(sc, "%s: no mps requests\n", __func__); 1269 return (MPS_DIAG_FAILURE); 1270 } 1271 1272 /* 1273 * Build the request for releasing the FW Diag Buffer and send it. 1274 */ 1275 req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req; 1276 req->Function = MPI2_FUNCTION_DIAG_RELEASE; 1277 req->BufferType = pBuffer->buffer_type; 1278 cm->cm_data = NULL; 1279 cm->cm_length = 0; 1280 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1281 cm->cm_complete_data = NULL; 1282 1283 /* 1284 * Send command synchronously. 1285 */ 1286 status = mps_wait_command(sc, cm, 30, CAN_SLEEP); 1287 if (status) { 1288 mps_printf(sc, "%s: invalid request: error %d\n", __func__, 1289 status); 1290 status = MPS_DIAG_FAILURE; 1291 goto done; 1292 } 1293 1294 /* 1295 * Process RELEASE reply. 1296 */ 1297 reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply; 1298 if ((reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) || 1299 pBuffer->owned_by_firmware) { 1300 status = MPS_DIAG_FAILURE; 1301 mps_dprint(sc, MPS_FAULT, "%s: release of FW Diag Buffer " 1302 "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n", 1303 __func__, reply->IOCStatus, reply->IOCLogInfo); 1304 goto done; 1305 } 1306 1307 /* 1308 * Release was successful. 1309 */ 1310 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1311 status = MPS_DIAG_SUCCESS; 1312 1313 /* 1314 * If this was for an UNREGISTER diag type command, clear the unique ID. 1315 */ 1316 if (diag_type == MPS_FW_DIAG_TYPE_UNREGISTER) { 1317 pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID; 1318 } 1319 1320 done: 1321 return (status); 1322 } 1323 1324 static int 1325 mps_diag_register(struct mps_softc *sc, mps_fw_diag_register_t *diag_register, 1326 uint32_t *return_code) 1327 { 1328 mps_fw_diagnostic_buffer_t *pBuffer; 1329 uint8_t extended_type, buffer_type, i; 1330 uint32_t buffer_size; 1331 uint32_t unique_id; 1332 int status; 1333 1334 extended_type = diag_register->ExtendedType; 1335 buffer_type = diag_register->BufferType; 1336 buffer_size = diag_register->RequestedBufferSize; 1337 unique_id = diag_register->UniqueId; 1338 1339 /* 1340 * Check for valid buffer type 1341 */ 1342 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) { 1343 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1344 return (MPS_DIAG_FAILURE); 1345 } 1346 1347 /* 1348 * Get the current buffer and look up the unique ID. The unique ID 1349 * should not be found. If it is, the ID is already in use. 1350 */ 1351 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1352 pBuffer = &sc->fw_diag_buffer_list[buffer_type]; 1353 if (i != MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1354 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1355 return (MPS_DIAG_FAILURE); 1356 } 1357 1358 /* 1359 * The buffer's unique ID should not be registered yet, and the given 1360 * unique ID cannot be 0. 1361 */ 1362 if ((pBuffer->unique_id != MPS_FW_DIAG_INVALID_UID) || 1363 (unique_id == MPS_FW_DIAG_INVALID_UID)) { 1364 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1365 return (MPS_DIAG_FAILURE); 1366 } 1367 1368 /* 1369 * If this buffer is already posted as immediate, just change owner. 1370 */ 1371 if (pBuffer->immediate && pBuffer->owned_by_firmware && 1372 (pBuffer->unique_id == MPS_FW_DIAG_INVALID_UID)) { 1373 pBuffer->immediate = FALSE; 1374 pBuffer->unique_id = unique_id; 1375 return (MPS_DIAG_SUCCESS); 1376 } 1377 1378 /* 1379 * Post a new buffer after checking if it's enabled. The DMA buffer 1380 * that is allocated will be contiguous (nsegments = 1). 1381 */ 1382 if (!pBuffer->enabled) { 1383 *return_code = MPS_FW_DIAG_ERROR_NO_BUFFER; 1384 return (MPS_DIAG_FAILURE); 1385 } 1386 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 1387 1, 0, /* algnmnt, boundary */ 1388 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1389 BUS_SPACE_MAXADDR, /* highaddr */ 1390 NULL, NULL, /* filter, filterarg */ 1391 buffer_size, /* maxsize */ 1392 1, /* nsegments */ 1393 buffer_size, /* maxsegsize */ 1394 0, /* flags */ 1395 NULL, NULL, /* lockfunc, lockarg */ 1396 &sc->fw_diag_dmat)) { 1397 device_printf(sc->mps_dev, "Cannot allocate FW diag buffer DMA " 1398 "tag\n"); 1399 return (ENOMEM); 1400 } 1401 if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer, 1402 BUS_DMA_NOWAIT, &sc->fw_diag_map)) { 1403 device_printf(sc->mps_dev, "Cannot allocate FW diag buffer " 1404 "memory\n"); 1405 return (ENOMEM); 1406 } 1407 bzero(sc->fw_diag_buffer, buffer_size); 1408 bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer, 1409 buffer_size, mps_memaddr_cb, &sc->fw_diag_busaddr, 0); 1410 pBuffer->size = buffer_size; 1411 1412 /* 1413 * Copy the given info to the diag buffer and post the buffer. 1414 */ 1415 pBuffer->buffer_type = buffer_type; 1416 pBuffer->immediate = FALSE; 1417 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) { 1418 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4); 1419 i++) { 1420 pBuffer->product_specific[i] = 1421 diag_register->ProductSpecific[i]; 1422 } 1423 } 1424 pBuffer->extended_type = extended_type; 1425 pBuffer->unique_id = unique_id; 1426 status = mps_post_fw_diag_buffer(sc, pBuffer, return_code); 1427 1428 /* 1429 * In case there was a failure, free the DMA buffer. 1430 */ 1431 if (status == MPS_DIAG_FAILURE) { 1432 if (sc->fw_diag_busaddr != 0) 1433 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); 1434 if (sc->fw_diag_buffer != NULL) 1435 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, 1436 sc->fw_diag_map); 1437 if (sc->fw_diag_dmat != NULL) 1438 bus_dma_tag_destroy(sc->fw_diag_dmat); 1439 } 1440 1441 return (status); 1442 } 1443 1444 static int 1445 mps_diag_unregister(struct mps_softc *sc, 1446 mps_fw_diag_unregister_t *diag_unregister, uint32_t *return_code) 1447 { 1448 mps_fw_diagnostic_buffer_t *pBuffer; 1449 uint8_t i; 1450 uint32_t unique_id; 1451 int status; 1452 1453 unique_id = diag_unregister->UniqueId; 1454 1455 /* 1456 * Get the current buffer and look up the unique ID. The unique ID 1457 * should be there. 1458 */ 1459 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1460 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1461 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1462 return (MPS_DIAG_FAILURE); 1463 } 1464 1465 pBuffer = &sc->fw_diag_buffer_list[i]; 1466 1467 /* 1468 * Try to release the buffer from FW before freeing it. If release 1469 * fails, don't free the DMA buffer in case FW tries to access it 1470 * later. If buffer is not owned by firmware, can't release it. 1471 */ 1472 if (!pBuffer->owned_by_firmware) { 1473 status = MPS_DIAG_SUCCESS; 1474 } else { 1475 status = mps_release_fw_diag_buffer(sc, pBuffer, return_code, 1476 MPS_FW_DIAG_TYPE_UNREGISTER); 1477 } 1478 1479 /* 1480 * At this point, return the current status no matter what happens with 1481 * the DMA buffer. 1482 */ 1483 pBuffer->unique_id = MPS_FW_DIAG_INVALID_UID; 1484 if (status == MPS_DIAG_SUCCESS) { 1485 if (sc->fw_diag_busaddr != 0) 1486 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); 1487 if (sc->fw_diag_buffer != NULL) 1488 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, 1489 sc->fw_diag_map); 1490 if (sc->fw_diag_dmat != NULL) 1491 bus_dma_tag_destroy(sc->fw_diag_dmat); 1492 } 1493 1494 return (status); 1495 } 1496 1497 static int 1498 mps_diag_query(struct mps_softc *sc, mps_fw_diag_query_t *diag_query, 1499 uint32_t *return_code) 1500 { 1501 mps_fw_diagnostic_buffer_t *pBuffer; 1502 uint8_t i; 1503 uint32_t unique_id; 1504 1505 unique_id = diag_query->UniqueId; 1506 1507 /* 1508 * If ID is valid, query on ID. 1509 * If ID is invalid, query on buffer type. 1510 */ 1511 if (unique_id == MPS_FW_DIAG_INVALID_UID) { 1512 i = diag_query->BufferType; 1513 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) { 1514 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1515 return (MPS_DIAG_FAILURE); 1516 } 1517 } else { 1518 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1519 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1520 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1521 return (MPS_DIAG_FAILURE); 1522 } 1523 } 1524 1525 /* 1526 * Fill query structure with the diag buffer info. 1527 */ 1528 pBuffer = &sc->fw_diag_buffer_list[i]; 1529 diag_query->BufferType = pBuffer->buffer_type; 1530 diag_query->ExtendedType = pBuffer->extended_type; 1531 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) { 1532 for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4); 1533 i++) { 1534 diag_query->ProductSpecific[i] = 1535 pBuffer->product_specific[i]; 1536 } 1537 } 1538 diag_query->TotalBufferSize = pBuffer->size; 1539 diag_query->DriverAddedBufferSize = 0; 1540 diag_query->UniqueId = pBuffer->unique_id; 1541 diag_query->ApplicationFlags = 0; 1542 diag_query->DiagnosticFlags = 0; 1543 1544 /* 1545 * Set/Clear application flags 1546 */ 1547 if (pBuffer->immediate) { 1548 diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_APP_OWNED; 1549 } else { 1550 diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_APP_OWNED; 1551 } 1552 if (pBuffer->valid_data || pBuffer->owned_by_firmware) { 1553 diag_query->ApplicationFlags |= MPS_FW_DIAG_FLAG_BUFFER_VALID; 1554 } else { 1555 diag_query->ApplicationFlags &= ~MPS_FW_DIAG_FLAG_BUFFER_VALID; 1556 } 1557 if (pBuffer->owned_by_firmware) { 1558 diag_query->ApplicationFlags |= 1559 MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 1560 } else { 1561 diag_query->ApplicationFlags &= 1562 ~MPS_FW_DIAG_FLAG_FW_BUFFER_ACCESS; 1563 } 1564 1565 return (MPS_DIAG_SUCCESS); 1566 } 1567 1568 static int 1569 mps_diag_read_buffer(struct mps_softc *sc, 1570 mps_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, 1571 uint32_t *return_code) 1572 { 1573 mps_fw_diagnostic_buffer_t *pBuffer; 1574 uint8_t i, *pData; 1575 uint32_t unique_id; 1576 int status; 1577 1578 unique_id = diag_read_buffer->UniqueId; 1579 1580 /* 1581 * Get the current buffer and look up the unique ID. The unique ID 1582 * should be there. 1583 */ 1584 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1585 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1586 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1587 return (MPS_DIAG_FAILURE); 1588 } 1589 1590 pBuffer = &sc->fw_diag_buffer_list[i]; 1591 1592 /* 1593 * Make sure requested read is within limits 1594 */ 1595 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead > 1596 pBuffer->size) { 1597 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1598 return (MPS_DIAG_FAILURE); 1599 } 1600 1601 /* 1602 * Copy the requested data from DMA to the diag_read_buffer. The DMA 1603 * buffer that was allocated is one contiguous buffer. 1604 */ 1605 pData = (uint8_t *)(sc->fw_diag_buffer + 1606 diag_read_buffer->StartingOffset); 1607 if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0) 1608 return (MPS_DIAG_FAILURE); 1609 diag_read_buffer->Status = 0; 1610 1611 /* 1612 * Set or clear the Force Release flag. 1613 */ 1614 if (pBuffer->force_release) { 1615 diag_read_buffer->Flags |= MPS_FW_DIAG_FLAG_FORCE_RELEASE; 1616 } else { 1617 diag_read_buffer->Flags &= ~MPS_FW_DIAG_FLAG_FORCE_RELEASE; 1618 } 1619 1620 /* 1621 * If buffer is to be reregistered, make sure it's not already owned by 1622 * firmware first. 1623 */ 1624 status = MPS_DIAG_SUCCESS; 1625 if (!pBuffer->owned_by_firmware) { 1626 if (diag_read_buffer->Flags & MPS_FW_DIAG_FLAG_REREGISTER) { 1627 status = mps_post_fw_diag_buffer(sc, pBuffer, 1628 return_code); 1629 } 1630 } 1631 1632 return (status); 1633 } 1634 1635 static int 1636 mps_diag_release(struct mps_softc *sc, mps_fw_diag_release_t *diag_release, 1637 uint32_t *return_code) 1638 { 1639 mps_fw_diagnostic_buffer_t *pBuffer; 1640 uint8_t i; 1641 uint32_t unique_id; 1642 int status; 1643 1644 unique_id = diag_release->UniqueId; 1645 1646 /* 1647 * Get the current buffer and look up the unique ID. The unique ID 1648 * should be there. 1649 */ 1650 i = mps_get_fw_diag_buffer_number(sc, unique_id); 1651 if (i == MPS_FW_DIAGNOSTIC_UID_NOT_FOUND) { 1652 *return_code = MPS_FW_DIAG_ERROR_INVALID_UID; 1653 return (MPS_DIAG_FAILURE); 1654 } 1655 1656 pBuffer = &sc->fw_diag_buffer_list[i]; 1657 1658 /* 1659 * If buffer is not owned by firmware, it's already been released. 1660 */ 1661 if (!pBuffer->owned_by_firmware) { 1662 *return_code = MPS_FW_DIAG_ERROR_ALREADY_RELEASED; 1663 return (MPS_DIAG_FAILURE); 1664 } 1665 1666 /* 1667 * Release the buffer. 1668 */ 1669 status = mps_release_fw_diag_buffer(sc, pBuffer, return_code, 1670 MPS_FW_DIAG_TYPE_RELEASE); 1671 return (status); 1672 } 1673 1674 static int 1675 mps_do_diag_action(struct mps_softc *sc, uint32_t action, uint8_t *diag_action, 1676 uint32_t length, uint32_t *return_code) 1677 { 1678 mps_fw_diag_register_t diag_register; 1679 mps_fw_diag_unregister_t diag_unregister; 1680 mps_fw_diag_query_t diag_query; 1681 mps_diag_read_buffer_t diag_read_buffer; 1682 mps_fw_diag_release_t diag_release; 1683 int status = MPS_DIAG_SUCCESS; 1684 uint32_t original_return_code; 1685 1686 original_return_code = *return_code; 1687 *return_code = MPS_FW_DIAG_ERROR_SUCCESS; 1688 1689 switch (action) { 1690 case MPS_FW_DIAG_TYPE_REGISTER: 1691 if (!length) { 1692 *return_code = 1693 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1694 status = MPS_DIAG_FAILURE; 1695 break; 1696 } 1697 if (copyin(diag_action, &diag_register, 1698 sizeof(diag_register)) != 0) 1699 return (MPS_DIAG_FAILURE); 1700 status = mps_diag_register(sc, &diag_register, 1701 return_code); 1702 break; 1703 1704 case MPS_FW_DIAG_TYPE_UNREGISTER: 1705 if (length < sizeof(diag_unregister)) { 1706 *return_code = 1707 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1708 status = MPS_DIAG_FAILURE; 1709 break; 1710 } 1711 if (copyin(diag_action, &diag_unregister, 1712 sizeof(diag_unregister)) != 0) 1713 return (MPS_DIAG_FAILURE); 1714 status = mps_diag_unregister(sc, &diag_unregister, 1715 return_code); 1716 break; 1717 1718 case MPS_FW_DIAG_TYPE_QUERY: 1719 if (length < sizeof (diag_query)) { 1720 *return_code = 1721 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1722 status = MPS_DIAG_FAILURE; 1723 break; 1724 } 1725 if (copyin(diag_action, &diag_query, sizeof(diag_query)) 1726 != 0) 1727 return (MPS_DIAG_FAILURE); 1728 status = mps_diag_query(sc, &diag_query, return_code); 1729 if (status == MPS_DIAG_SUCCESS) 1730 if (copyout(&diag_query, diag_action, 1731 sizeof (diag_query)) != 0) 1732 return (MPS_DIAG_FAILURE); 1733 break; 1734 1735 case MPS_FW_DIAG_TYPE_READ_BUFFER: 1736 if (copyin(diag_action, &diag_read_buffer, 1737 sizeof(diag_read_buffer)) != 0) 1738 return (MPS_DIAG_FAILURE); 1739 if (length < diag_read_buffer.BytesToRead) { 1740 *return_code = 1741 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1742 status = MPS_DIAG_FAILURE; 1743 break; 1744 } 1745 status = mps_diag_read_buffer(sc, &diag_read_buffer, 1746 PTRIN(diag_read_buffer.PtrDataBuffer), 1747 return_code); 1748 if (status == MPS_DIAG_SUCCESS) { 1749 if (copyout(&diag_read_buffer, diag_action, 1750 sizeof(diag_read_buffer) - 1751 sizeof(diag_read_buffer.PtrDataBuffer)) != 1752 0) 1753 return (MPS_DIAG_FAILURE); 1754 } 1755 break; 1756 1757 case MPS_FW_DIAG_TYPE_RELEASE: 1758 if (length < sizeof(diag_release)) { 1759 *return_code = 1760 MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1761 status = MPS_DIAG_FAILURE; 1762 break; 1763 } 1764 if (copyin(diag_action, &diag_release, 1765 sizeof(diag_release)) != 0) 1766 return (MPS_DIAG_FAILURE); 1767 status = mps_diag_release(sc, &diag_release, 1768 return_code); 1769 break; 1770 1771 default: 1772 *return_code = MPS_FW_DIAG_ERROR_INVALID_PARAMETER; 1773 status = MPS_DIAG_FAILURE; 1774 break; 1775 } 1776 1777 if ((status == MPS_DIAG_FAILURE) && 1778 (original_return_code == MPS_FW_DIAG_NEW) && 1779 (*return_code != MPS_FW_DIAG_ERROR_SUCCESS)) 1780 status = MPS_DIAG_SUCCESS; 1781 1782 return (status); 1783 } 1784 1785 static int 1786 mps_user_diag_action(struct mps_softc *sc, mps_diag_action_t *data) 1787 { 1788 int status; 1789 1790 /* 1791 * Only allow one diag action at one time. 1792 */ 1793 if (sc->mps_flags & MPS_FLAGS_BUSY) { 1794 mps_dprint(sc, MPS_USER, "%s: Only one FW diag command " 1795 "allowed at a single time.", __func__); 1796 return (EBUSY); 1797 } 1798 sc->mps_flags |= MPS_FLAGS_BUSY; 1799 1800 /* 1801 * Send diag action request 1802 */ 1803 if (data->Action == MPS_FW_DIAG_TYPE_REGISTER || 1804 data->Action == MPS_FW_DIAG_TYPE_UNREGISTER || 1805 data->Action == MPS_FW_DIAG_TYPE_QUERY || 1806 data->Action == MPS_FW_DIAG_TYPE_READ_BUFFER || 1807 data->Action == MPS_FW_DIAG_TYPE_RELEASE) { 1808 status = mps_do_diag_action(sc, data->Action, 1809 PTRIN(data->PtrDiagAction), data->Length, 1810 &data->ReturnCode); 1811 } else 1812 status = EINVAL; 1813 1814 sc->mps_flags &= ~MPS_FLAGS_BUSY; 1815 return (status); 1816 } 1817 1818 /* 1819 * Copy the event recording mask and the event queue size out. For 1820 * clarification, the event recording mask (events_to_record) is not the same 1821 * thing as the event mask (event_mask). events_to_record has a bit set for 1822 * every event type that is to be recorded by the driver, and event_mask has a 1823 * bit cleared for every event that is allowed into the driver from the IOC. 1824 * They really have nothing to do with each other. 1825 */ 1826 static void 1827 mps_user_event_query(struct mps_softc *sc, mps_event_query_t *data) 1828 { 1829 uint8_t i; 1830 1831 mps_lock(sc); 1832 data->Entries = MPS_EVENT_QUEUE_SIZE; 1833 1834 for (i = 0; i < 4; i++) { 1835 data->Types[i] = sc->events_to_record[i]; 1836 } 1837 mps_unlock(sc); 1838 } 1839 1840 /* 1841 * Set the driver's event mask according to what's been given. See 1842 * mps_user_event_query for explanation of the event recording mask and the IOC 1843 * event mask. It's the app's responsibility to enable event logging by setting 1844 * the bits in events_to_record. Initially, no events will be logged. 1845 */ 1846 static void 1847 mps_user_event_enable(struct mps_softc *sc, mps_event_enable_t *data) 1848 { 1849 uint8_t i; 1850 1851 mps_lock(sc); 1852 for (i = 0; i < 4; i++) { 1853 sc->events_to_record[i] = data->Types[i]; 1854 } 1855 mps_unlock(sc); 1856 } 1857 1858 /* 1859 * Copy out the events that have been recorded, up to the max events allowed. 1860 */ 1861 static int 1862 mps_user_event_report(struct mps_softc *sc, mps_event_report_t *data) 1863 { 1864 int status = 0; 1865 uint32_t size; 1866 1867 mps_lock(sc); 1868 size = data->Size; 1869 if ((size >= sizeof(sc->recorded_events)) && (status == 0)) { 1870 mps_unlock(sc); 1871 if (copyout((void *)sc->recorded_events, 1872 PTRIN(data->PtrEvents), size) != 0) 1873 status = EFAULT; 1874 mps_lock(sc); 1875 } else { 1876 /* 1877 * data->Size value is not large enough to copy event data. 1878 */ 1879 status = EFAULT; 1880 } 1881 1882 /* 1883 * Change size value to match the number of bytes that were copied. 1884 */ 1885 if (status == 0) 1886 data->Size = sizeof(sc->recorded_events); 1887 mps_unlock(sc); 1888 1889 return (status); 1890 } 1891 1892 /* 1893 * Record events into the driver from the IOC if they are not masked. 1894 */ 1895 void 1896 mpssas_record_event(struct mps_softc *sc, 1897 MPI2_EVENT_NOTIFICATION_REPLY *event_reply) 1898 { 1899 uint32_t event; 1900 int i, j; 1901 uint16_t event_data_len; 1902 boolean_t sendAEN = FALSE; 1903 1904 event = event_reply->Event; 1905 1906 /* 1907 * Generate a system event to let anyone who cares know that a 1908 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the 1909 * event mask is set to. 1910 */ 1911 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { 1912 sendAEN = TRUE; 1913 } 1914 1915 /* 1916 * Record the event only if its corresponding bit is set in 1917 * events_to_record. event_index is the index into recorded_events and 1918 * event_number is the overall number of an event being recorded since 1919 * start-of-day. event_index will roll over; event_number will never 1920 * roll over. 1921 */ 1922 i = (uint8_t)(event / 32); 1923 j = (uint8_t)(event % 32); 1924 if ((i < 4) && ((1 << j) & sc->events_to_record[i])) { 1925 i = sc->event_index; 1926 sc->recorded_events[i].Type = event; 1927 sc->recorded_events[i].Number = ++sc->event_number; 1928 bzero(sc->recorded_events[i].Data, MPS_MAX_EVENT_DATA_LENGTH * 1929 4); 1930 event_data_len = event_reply->EventDataLength; 1931 1932 if (event_data_len > 0) { 1933 /* 1934 * Limit data to size in m_event entry 1935 */ 1936 if (event_data_len > MPS_MAX_EVENT_DATA_LENGTH) { 1937 event_data_len = MPS_MAX_EVENT_DATA_LENGTH; 1938 } 1939 for (j = 0; j < event_data_len; j++) { 1940 sc->recorded_events[i].Data[j] = 1941 event_reply->EventData[j]; 1942 } 1943 1944 /* 1945 * check for index wrap-around 1946 */ 1947 if (++i == MPS_EVENT_QUEUE_SIZE) { 1948 i = 0; 1949 } 1950 sc->event_index = (uint8_t)i; 1951 1952 /* 1953 * Set flag to send the event. 1954 */ 1955 sendAEN = TRUE; 1956 } 1957 } 1958 1959 /* 1960 * Generate a system event if flag is set to let anyone who cares know 1961 * that an event has occurred. 1962 */ 1963 if (sendAEN) { 1964 //SLM-how to send a system event (see kqueue, kevent) 1965 // (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS", 1966 // "SAS", NULL, NULL, DDI_NOSLEEP); 1967 } 1968 } 1969 1970 static int 1971 mps_user_reg_access(struct mps_softc *sc, mps_reg_access_t *data) 1972 { 1973 int status = 0; 1974 1975 switch (data->Command) { 1976 /* 1977 * IO access is not supported. 1978 */ 1979 case REG_IO_READ: 1980 case REG_IO_WRITE: 1981 mps_dprint(sc, MPS_USER, "IO access is not supported. " 1982 "Use memory access."); 1983 status = EINVAL; 1984 break; 1985 1986 case REG_MEM_READ: 1987 data->RegData = mps_regread(sc, data->RegOffset); 1988 break; 1989 1990 case REG_MEM_WRITE: 1991 mps_regwrite(sc, data->RegOffset, data->RegData); 1992 break; 1993 1994 default: 1995 status = EINVAL; 1996 break; 1997 } 1998 1999 return (status); 2000 } 2001 2002 static int 2003 mps_user_btdh(struct mps_softc *sc, mps_btdh_mapping_t *data) 2004 { 2005 uint8_t bt2dh = FALSE; 2006 uint8_t dh2bt = FALSE; 2007 uint16_t dev_handle, bus, target; 2008 2009 bus = data->Bus; 2010 target = data->TargetID; 2011 dev_handle = data->DevHandle; 2012 2013 /* 2014 * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/ 2015 * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is 2016 * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is 2017 * invalid. 2018 */ 2019 if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF)) 2020 dh2bt = TRUE; 2021 if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF)) 2022 bt2dh = TRUE; 2023 if (!dh2bt && !bt2dh) 2024 return (EINVAL); 2025 2026 /* 2027 * Only handle bus of 0. Make sure target is within range. 2028 */ 2029 if (bt2dh) { 2030 if (bus != 0) 2031 return (EINVAL); 2032 2033 if (target > sc->max_devices) { 2034 mps_dprint(sc, MPS_FAULT, "Target ID is out of range " 2035 "for Bus/Target to DevHandle mapping."); 2036 return (EINVAL); 2037 } 2038 dev_handle = sc->mapping_table[target].dev_handle; 2039 if (dev_handle) 2040 data->DevHandle = dev_handle; 2041 } else { 2042 bus = 0; 2043 target = mps_mapping_get_sas_id_from_handle(sc, dev_handle); 2044 data->Bus = bus; 2045 data->TargetID = target; 2046 } 2047 2048 return (0); 2049 } 2050 2051 static int 2052 mps_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag, 2053 struct thread *td) 2054 { 2055 struct mps_softc *sc; 2056 struct mps_cfg_page_req *page_req; 2057 struct mps_ext_cfg_page_req *ext_page_req; 2058 void *mps_page; 2059 int error, msleep_ret; 2060 2061 mps_page = NULL; 2062 sc = dev->si_drv1; 2063 page_req = (void *)arg; 2064 ext_page_req = (void *)arg; 2065 2066 switch (cmd) { 2067 case MPSIO_READ_CFG_HEADER: 2068 mps_lock(sc); 2069 error = mps_user_read_cfg_header(sc, page_req); 2070 mps_unlock(sc); 2071 break; 2072 case MPSIO_READ_CFG_PAGE: 2073 mps_page = malloc(page_req->len, M_MPSUSER, M_WAITOK | M_ZERO); 2074 if(!mps_page) { 2075 mps_printf(sc, "Cannot allocate memory %s %d\n", 2076 __func__, __LINE__); 2077 return (ENOMEM); 2078 } 2079 error = copyin(page_req->buf, mps_page, 2080 sizeof(MPI2_CONFIG_PAGE_HEADER)); 2081 if (error) 2082 break; 2083 mps_lock(sc); 2084 error = mps_user_read_cfg_page(sc, page_req, mps_page); 2085 mps_unlock(sc); 2086 if (error) 2087 break; 2088 error = copyout(mps_page, page_req->buf, page_req->len); 2089 break; 2090 case MPSIO_READ_EXT_CFG_HEADER: 2091 mps_lock(sc); 2092 error = mps_user_read_extcfg_header(sc, ext_page_req); 2093 mps_unlock(sc); 2094 break; 2095 case MPSIO_READ_EXT_CFG_PAGE: 2096 mps_page = malloc(ext_page_req->len, M_MPSUSER, M_WAITOK|M_ZERO); 2097 if(!mps_page) { 2098 mps_printf(sc, "Cannot allocate memory %s %d\n", 2099 __func__, __LINE__); 2100 return (ENOMEM); 2101 } 2102 error = copyin(ext_page_req->buf, mps_page, 2103 sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); 2104 if (error) 2105 break; 2106 mps_lock(sc); 2107 error = mps_user_read_extcfg_page(sc, ext_page_req, mps_page); 2108 mps_unlock(sc); 2109 if (error) 2110 break; 2111 error = copyout(mps_page, ext_page_req->buf, ext_page_req->len); 2112 break; 2113 case MPSIO_WRITE_CFG_PAGE: 2114 mps_page = malloc(page_req->len, M_MPSUSER, M_WAITOK|M_ZERO); 2115 if(!mps_page) { 2116 mps_printf(sc, "Cannot allocate memory %s %d\n", 2117 __func__, __LINE__); 2118 return (ENOMEM); 2119 } 2120 error = copyin(page_req->buf, mps_page, page_req->len); 2121 if (error) 2122 break; 2123 mps_lock(sc); 2124 error = mps_user_write_cfg_page(sc, page_req, mps_page); 2125 mps_unlock(sc); 2126 break; 2127 case MPSIO_MPS_COMMAND: 2128 error = mps_user_command(sc, (struct mps_usr_command *)arg); 2129 break; 2130 case MPTIOCTL_PASS_THRU: 2131 /* 2132 * The user has requested to pass through a command to be 2133 * executed by the MPT firmware. Call our routine which does 2134 * this. Only allow one passthru IOCTL at one time. 2135 */ 2136 error = mps_user_pass_thru(sc, (mps_pass_thru_t *)arg); 2137 break; 2138 case MPTIOCTL_GET_ADAPTER_DATA: 2139 /* 2140 * The user has requested to read adapter data. Call our 2141 * routine which does this. 2142 */ 2143 error = 0; 2144 mps_user_get_adapter_data(sc, (mps_adapter_data_t *)arg); 2145 break; 2146 case MPTIOCTL_GET_PCI_INFO: 2147 /* 2148 * The user has requested to read pci info. Call 2149 * our routine which does this. 2150 */ 2151 mps_lock(sc); 2152 error = 0; 2153 mps_user_read_pci_info(sc, (mps_pci_info_t *)arg); 2154 mps_unlock(sc); 2155 break; 2156 case MPTIOCTL_RESET_ADAPTER: 2157 mps_lock(sc); 2158 sc->port_enable_complete = 0; 2159 uint32_t reinit_start = time_uptime; 2160 error = mps_reinit(sc); 2161 /* Sleep for 300 second. */ 2162 msleep_ret = msleep(&sc->port_enable_complete, &sc->mps_mtx, PRIBIO, 2163 "mps_porten", 300 * hz); 2164 mps_unlock(sc); 2165 if (msleep_ret) 2166 printf("Port Enable did not complete after Diag " 2167 "Reset msleep error %d.\n", msleep_ret); 2168 else 2169 mps_dprint(sc, MPS_USER, 2170 "Hard Reset with Port Enable completed in %d seconds.\n", 2171 (uint32_t) (time_uptime - reinit_start)); 2172 break; 2173 case MPTIOCTL_DIAG_ACTION: 2174 /* 2175 * The user has done a diag buffer action. Call our routine 2176 * which does this. Only allow one diag action at one time. 2177 */ 2178 mps_lock(sc); 2179 error = mps_user_diag_action(sc, (mps_diag_action_t *)arg); 2180 mps_unlock(sc); 2181 break; 2182 case MPTIOCTL_EVENT_QUERY: 2183 /* 2184 * The user has done an event query. Call our routine which does 2185 * this. 2186 */ 2187 error = 0; 2188 mps_user_event_query(sc, (mps_event_query_t *)arg); 2189 break; 2190 case MPTIOCTL_EVENT_ENABLE: 2191 /* 2192 * The user has done an event enable. Call our routine which 2193 * does this. 2194 */ 2195 error = 0; 2196 mps_user_event_enable(sc, (mps_event_enable_t *)arg); 2197 break; 2198 case MPTIOCTL_EVENT_REPORT: 2199 /* 2200 * The user has done an event report. Call our routine which 2201 * does this. 2202 */ 2203 error = mps_user_event_report(sc, (mps_event_report_t *)arg); 2204 break; 2205 case MPTIOCTL_REG_ACCESS: 2206 /* 2207 * The user has requested register access. Call our routine 2208 * which does this. 2209 */ 2210 mps_lock(sc); 2211 error = mps_user_reg_access(sc, (mps_reg_access_t *)arg); 2212 mps_unlock(sc); 2213 break; 2214 case MPTIOCTL_BTDH_MAPPING: 2215 /* 2216 * The user has requested to translate a bus/target to a 2217 * DevHandle or a DevHandle to a bus/target. Call our routine 2218 * which does this. 2219 */ 2220 error = mps_user_btdh(sc, (mps_btdh_mapping_t *)arg); 2221 break; 2222 default: 2223 error = ENOIOCTL; 2224 break; 2225 } 2226 2227 if (mps_page != NULL) 2228 free(mps_page, M_MPSUSER); 2229 2230 return (error); 2231 } 2232 2233 #ifdef COMPAT_FREEBSD32 2234 2235 struct mps_cfg_page_req32 { 2236 MPI2_CONFIG_PAGE_HEADER header; 2237 uint32_t page_address; 2238 uint32_t buf; 2239 int len; 2240 uint16_t ioc_status; 2241 }; 2242 2243 struct mps_ext_cfg_page_req32 { 2244 MPI2_CONFIG_EXTENDED_PAGE_HEADER header; 2245 uint32_t page_address; 2246 uint32_t buf; 2247 int len; 2248 uint16_t ioc_status; 2249 }; 2250 2251 struct mps_raid_action32 { 2252 uint8_t action; 2253 uint8_t volume_bus; 2254 uint8_t volume_id; 2255 uint8_t phys_disk_num; 2256 uint32_t action_data_word; 2257 uint32_t buf; 2258 int len; 2259 uint32_t volume_status; 2260 uint32_t action_data[4]; 2261 uint16_t action_status; 2262 uint16_t ioc_status; 2263 uint8_t write; 2264 }; 2265 2266 struct mps_usr_command32 { 2267 uint32_t req; 2268 uint32_t req_len; 2269 uint32_t rpl; 2270 uint32_t rpl_len; 2271 uint32_t buf; 2272 int len; 2273 uint32_t flags; 2274 }; 2275 2276 #define MPSIO_READ_CFG_HEADER32 _IOWR('M', 200, struct mps_cfg_page_req32) 2277 #define MPSIO_READ_CFG_PAGE32 _IOWR('M', 201, struct mps_cfg_page_req32) 2278 #define MPSIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mps_ext_cfg_page_req32) 2279 #define MPSIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mps_ext_cfg_page_req32) 2280 #define MPSIO_WRITE_CFG_PAGE32 _IOWR('M', 204, struct mps_cfg_page_req32) 2281 #define MPSIO_RAID_ACTION32 _IOWR('M', 205, struct mps_raid_action32) 2282 #define MPSIO_MPS_COMMAND32 _IOWR('M', 210, struct mps_usr_command32) 2283 2284 static int 2285 mps_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag, 2286 struct thread *td) 2287 { 2288 struct mps_cfg_page_req32 *page32 = _arg; 2289 struct mps_ext_cfg_page_req32 *ext32 = _arg; 2290 struct mps_raid_action32 *raid32 = _arg; 2291 struct mps_usr_command32 *user32 = _arg; 2292 union { 2293 struct mps_cfg_page_req page; 2294 struct mps_ext_cfg_page_req ext; 2295 struct mps_raid_action raid; 2296 struct mps_usr_command user; 2297 } arg; 2298 u_long cmd; 2299 int error; 2300 2301 switch (cmd32) { 2302 case MPSIO_READ_CFG_HEADER32: 2303 case MPSIO_READ_CFG_PAGE32: 2304 case MPSIO_WRITE_CFG_PAGE32: 2305 if (cmd32 == MPSIO_READ_CFG_HEADER32) 2306 cmd = MPSIO_READ_CFG_HEADER; 2307 else if (cmd32 == MPSIO_READ_CFG_PAGE32) 2308 cmd = MPSIO_READ_CFG_PAGE; 2309 else 2310 cmd = MPSIO_WRITE_CFG_PAGE; 2311 CP(*page32, arg.page, header); 2312 CP(*page32, arg.page, page_address); 2313 PTRIN_CP(*page32, arg.page, buf); 2314 CP(*page32, arg.page, len); 2315 CP(*page32, arg.page, ioc_status); 2316 break; 2317 2318 case MPSIO_READ_EXT_CFG_HEADER32: 2319 case MPSIO_READ_EXT_CFG_PAGE32: 2320 if (cmd32 == MPSIO_READ_EXT_CFG_HEADER32) 2321 cmd = MPSIO_READ_EXT_CFG_HEADER; 2322 else 2323 cmd = MPSIO_READ_EXT_CFG_PAGE; 2324 CP(*ext32, arg.ext, header); 2325 CP(*ext32, arg.ext, page_address); 2326 PTRIN_CP(*ext32, arg.ext, buf); 2327 CP(*ext32, arg.ext, len); 2328 CP(*ext32, arg.ext, ioc_status); 2329 break; 2330 2331 case MPSIO_RAID_ACTION32: 2332 cmd = MPSIO_RAID_ACTION; 2333 CP(*raid32, arg.raid, action); 2334 CP(*raid32, arg.raid, volume_bus); 2335 CP(*raid32, arg.raid, volume_id); 2336 CP(*raid32, arg.raid, phys_disk_num); 2337 CP(*raid32, arg.raid, action_data_word); 2338 PTRIN_CP(*raid32, arg.raid, buf); 2339 CP(*raid32, arg.raid, len); 2340 CP(*raid32, arg.raid, volume_status); 2341 bcopy(raid32->action_data, arg.raid.action_data, 2342 sizeof arg.raid.action_data); 2343 CP(*raid32, arg.raid, ioc_status); 2344 CP(*raid32, arg.raid, write); 2345 break; 2346 2347 case MPSIO_MPS_COMMAND32: 2348 cmd = MPSIO_MPS_COMMAND; 2349 PTRIN_CP(*user32, arg.user, req); 2350 CP(*user32, arg.user, req_len); 2351 PTRIN_CP(*user32, arg.user, rpl); 2352 CP(*user32, arg.user, rpl_len); 2353 PTRIN_CP(*user32, arg.user, buf); 2354 CP(*user32, arg.user, len); 2355 CP(*user32, arg.user, flags); 2356 break; 2357 default: 2358 return (ENOIOCTL); 2359 } 2360 2361 error = mps_ioctl(dev, cmd, &arg, flag, td); 2362 if (error == 0 && (cmd32 & IOC_OUT) != 0) { 2363 switch (cmd32) { 2364 case MPSIO_READ_CFG_HEADER32: 2365 case MPSIO_READ_CFG_PAGE32: 2366 case MPSIO_WRITE_CFG_PAGE32: 2367 CP(arg.page, *page32, header); 2368 CP(arg.page, *page32, page_address); 2369 PTROUT_CP(arg.page, *page32, buf); 2370 CP(arg.page, *page32, len); 2371 CP(arg.page, *page32, ioc_status); 2372 break; 2373 2374 case MPSIO_READ_EXT_CFG_HEADER32: 2375 case MPSIO_READ_EXT_CFG_PAGE32: 2376 CP(arg.ext, *ext32, header); 2377 CP(arg.ext, *ext32, page_address); 2378 PTROUT_CP(arg.ext, *ext32, buf); 2379 CP(arg.ext, *ext32, len); 2380 CP(arg.ext, *ext32, ioc_status); 2381 break; 2382 2383 case MPSIO_RAID_ACTION32: 2384 CP(arg.raid, *raid32, action); 2385 CP(arg.raid, *raid32, volume_bus); 2386 CP(arg.raid, *raid32, volume_id); 2387 CP(arg.raid, *raid32, phys_disk_num); 2388 CP(arg.raid, *raid32, action_data_word); 2389 PTROUT_CP(arg.raid, *raid32, buf); 2390 CP(arg.raid, *raid32, len); 2391 CP(arg.raid, *raid32, volume_status); 2392 bcopy(arg.raid.action_data, raid32->action_data, 2393 sizeof arg.raid.action_data); 2394 CP(arg.raid, *raid32, ioc_status); 2395 CP(arg.raid, *raid32, write); 2396 break; 2397 2398 case MPSIO_MPS_COMMAND32: 2399 PTROUT_CP(arg.user, *user32, req); 2400 CP(arg.user, *user32, req_len); 2401 PTROUT_CP(arg.user, *user32, rpl); 2402 CP(arg.user, *user32, rpl_len); 2403 PTROUT_CP(arg.user, *user32, buf); 2404 CP(arg.user, *user32, len); 2405 CP(arg.user, *user32, flags); 2406 break; 2407 } 2408 } 2409 2410 return (error); 2411 } 2412 #endif /* COMPAT_FREEBSD32 */ 2413 2414 static int 2415 mps_ioctl_devsw(struct cdev *dev, u_long com, caddr_t arg, int flag, 2416 struct thread *td) 2417 { 2418 #ifdef COMPAT_FREEBSD32 2419 if (SV_CURPROC_FLAG(SV_ILP32)) 2420 return (mps_ioctl32(dev, com, arg, flag, td)); 2421 #endif 2422 return (mps_ioctl(dev, com, arg, flag, td)); 2423 } 2424