1 /*- 2 * Copyright (c) 2008 Yahoo!, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the author nor the names of any co-contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * LSI MPT-Fusion Host Adapter FreeBSD userland interface 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/conf.h> 38 #include <sys/errno.h> 39 #include <sys/ioccom.h> 40 #include <sys/mpt_ioctl.h> 41 42 #include <dev/mpt/mpt.h> 43 44 struct mpt_user_raid_action_result { 45 uint32_t volume_status; 46 uint32_t action_data[4]; 47 uint16_t action_status; 48 }; 49 50 struct mpt_page_memory { 51 bus_dma_tag_t tag; 52 bus_dmamap_t map; 53 bus_addr_t paddr; 54 void *vaddr; 55 }; 56 57 static mpt_probe_handler_t mpt_user_probe; 58 static mpt_attach_handler_t mpt_user_attach; 59 static mpt_enable_handler_t mpt_user_enable; 60 static mpt_ready_handler_t mpt_user_ready; 61 static mpt_event_handler_t mpt_user_event; 62 static mpt_reset_handler_t mpt_user_reset; 63 static mpt_detach_handler_t mpt_user_detach; 64 65 static struct mpt_personality mpt_user_personality = { 66 .name = "mpt_user", 67 .probe = mpt_user_probe, 68 .attach = mpt_user_attach, 69 .enable = mpt_user_enable, 70 .ready = mpt_user_ready, 71 .event = mpt_user_event, 72 .reset = mpt_user_reset, 73 .detach = mpt_user_detach, 74 }; 75 76 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND); 77 78 static mpt_reply_handler_t mpt_user_reply_handler; 79 80 static d_open_t mpt_open; 81 static d_close_t mpt_close; 82 static d_ioctl_t mpt_ioctl; 83 84 static struct cdevsw mpt_cdevsw = { 85 .d_version = D_VERSION, 86 .d_flags = 0, 87 .d_open = mpt_open, 88 .d_close = mpt_close, 89 .d_ioctl = mpt_ioctl, 90 .d_name = "mpt", 91 }; 92 93 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls"); 94 95 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE; 96 97 int 98 mpt_user_probe(struct mpt_softc *mpt) 99 { 100 101 /* Attach to every controller. */ 102 return (0); 103 } 104 105 int 106 mpt_user_attach(struct mpt_softc *mpt) 107 { 108 mpt_handler_t handler; 109 int error, unit; 110 111 MPT_LOCK(mpt); 112 handler.reply_handler = mpt_user_reply_handler; 113 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 114 &user_handler_id); 115 MPT_UNLOCK(mpt); 116 if (error != 0) { 117 mpt_prt(mpt, "Unable to register user handler!\n"); 118 return (error); 119 } 120 unit = device_get_unit(mpt->dev); 121 mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, 122 "mpt%d", unit); 123 if (mpt->cdev == NULL) { 124 MPT_LOCK(mpt); 125 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 126 user_handler_id); 127 MPT_UNLOCK(mpt); 128 return (ENOMEM); 129 } 130 mpt->cdev->si_drv1 = mpt; 131 return (0); 132 } 133 134 int 135 mpt_user_enable(struct mpt_softc *mpt) 136 { 137 138 return (0); 139 } 140 141 void 142 mpt_user_ready(struct mpt_softc *mpt) 143 { 144 } 145 146 int 147 mpt_user_event(struct mpt_softc *mpt, request_t *req, 148 MSG_EVENT_NOTIFY_REPLY *msg) 149 { 150 151 /* Someday we may want to let a user daemon listen for events? */ 152 return (0); 153 } 154 155 void 156 mpt_user_reset(struct mpt_softc *mpt, int type) 157 { 158 } 159 160 void 161 mpt_user_detach(struct mpt_softc *mpt) 162 { 163 mpt_handler_t handler; 164 165 /* XXX: do a purge of pending requests? */ 166 destroy_dev(mpt->cdev); 167 168 MPT_LOCK(mpt); 169 handler.reply_handler = mpt_user_reply_handler; 170 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 171 user_handler_id); 172 MPT_UNLOCK(mpt); 173 } 174 175 static int 176 mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td) 177 { 178 179 return (0); 180 } 181 182 static int 183 mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td) 184 { 185 186 return (0); 187 } 188 189 static int 190 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem, 191 size_t len) 192 { 193 struct mpt_map_info mi; 194 int error; 195 196 page_mem->vaddr = NULL; 197 198 /* Limit requests to 16M. */ 199 if (len > 16 * 1024 * 1024) 200 return (ENOSPC); 201 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, 202 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 203 len, 1, len, 0, &page_mem->tag); 204 if (error) 205 return (error); 206 error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr, 207 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map); 208 if (error) { 209 bus_dma_tag_destroy(page_mem->tag); 210 return (error); 211 } 212 mi.mpt = mpt; 213 error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr, 214 len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT); 215 if (error == 0) 216 error = mi.error; 217 if (error) { 218 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map); 219 bus_dma_tag_destroy(page_mem->tag); 220 page_mem->vaddr = NULL; 221 return (error); 222 } 223 page_mem->paddr = mi.phys; 224 return (0); 225 } 226 227 static void 228 mpt_free_buffer(struct mpt_page_memory *page_mem) 229 { 230 231 if (page_mem->vaddr == NULL) 232 return; 233 bus_dmamap_unload(page_mem->tag, page_mem->map); 234 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map); 235 bus_dma_tag_destroy(page_mem->tag); 236 page_mem->vaddr = NULL; 237 } 238 239 static int 240 mpt_user_read_cfg_header(struct mpt_softc *mpt, 241 struct mpt_cfg_page_req *page_req) 242 { 243 request_t *req; 244 cfgparms_t params; 245 MSG_CONFIG *cfgp; 246 int error; 247 248 req = mpt_get_request(mpt, TRUE); 249 if (req == NULL) { 250 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n"); 251 return (ENOMEM); 252 } 253 254 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 255 params.PageVersion = 0; 256 params.PageLength = 0; 257 params.PageNumber = page_req->header.PageNumber; 258 params.PageType = page_req->header.PageType; 259 params.PageAddress = le32toh(page_req->page_address); 260 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 261 TRUE, 5000); 262 if (error != 0) { 263 /* 264 * Leave the request. Without resetting the chip, it's 265 * still owned by it and we'll just get into trouble 266 * freeing it now. Mark it as abandoned so that if it 267 * shows up later it can be freed. 268 */ 269 mpt_prt(mpt, "read_cfg_header timed out\n"); 270 return (ETIMEDOUT); 271 } 272 273 page_req->ioc_status = htole16(req->IOCStatus); 274 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) { 275 cfgp = req->req_vbuf; 276 bcopy(&cfgp->Header, &page_req->header, 277 sizeof(page_req->header)); 278 } 279 mpt_free_request(mpt, req); 280 return (0); 281 } 282 283 static int 284 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req, 285 struct mpt_page_memory *mpt_page) 286 { 287 CONFIG_PAGE_HEADER *hdr; 288 request_t *req; 289 cfgparms_t params; 290 int error; 291 292 req = mpt_get_request(mpt, TRUE); 293 if (req == NULL) { 294 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n"); 295 return (ENOMEM); 296 } 297 298 hdr = mpt_page->vaddr; 299 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 300 params.PageVersion = hdr->PageVersion; 301 params.PageLength = hdr->PageLength; 302 params.PageNumber = hdr->PageNumber; 303 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 304 params.PageAddress = le32toh(page_req->page_address); 305 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 307 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, 308 le32toh(page_req->len), TRUE, 5000); 309 if (error != 0) { 310 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n"); 311 return (ETIMEDOUT); 312 } 313 314 page_req->ioc_status = htole16(req->IOCStatus); 315 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) 316 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 317 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 318 mpt_free_request(mpt, req); 319 return (0); 320 } 321 322 static int 323 mpt_user_read_extcfg_header(struct mpt_softc *mpt, 324 struct mpt_ext_cfg_page_req *ext_page_req) 325 { 326 request_t *req; 327 cfgparms_t params; 328 MSG_CONFIG_REPLY *cfgp; 329 int error; 330 331 req = mpt_get_request(mpt, TRUE); 332 if (req == NULL) { 333 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n"); 334 return (ENOMEM); 335 } 336 337 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 338 params.PageVersion = ext_page_req->header.PageVersion; 339 params.PageLength = 0; 340 params.PageNumber = ext_page_req->header.PageNumber; 341 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 342 params.PageAddress = le32toh(ext_page_req->page_address); 343 params.ExtPageType = ext_page_req->header.ExtPageType; 344 params.ExtPageLength = 0; 345 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 346 TRUE, 5000); 347 if (error != 0) { 348 /* 349 * Leave the request. Without resetting the chip, it's 350 * still owned by it and we'll just get into trouble 351 * freeing it now. Mark it as abandoned so that if it 352 * shows up later it can be freed. 353 */ 354 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n"); 355 return (ETIMEDOUT); 356 } 357 358 ext_page_req->ioc_status = htole16(req->IOCStatus); 359 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) { 360 cfgp = req->req_vbuf; 361 ext_page_req->header.PageVersion = cfgp->Header.PageVersion; 362 ext_page_req->header.PageNumber = cfgp->Header.PageNumber; 363 ext_page_req->header.PageType = cfgp->Header.PageType; 364 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength; 365 ext_page_req->header.ExtPageType = cfgp->ExtPageType; 366 } 367 mpt_free_request(mpt, req); 368 return (0); 369 } 370 371 static int 372 mpt_user_read_extcfg_page(struct mpt_softc *mpt, 373 struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page) 374 { 375 CONFIG_EXTENDED_PAGE_HEADER *hdr; 376 request_t *req; 377 cfgparms_t params; 378 int error; 379 380 req = mpt_get_request(mpt, TRUE); 381 if (req == NULL) { 382 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n"); 383 return (ENOMEM); 384 } 385 386 hdr = mpt_page->vaddr; 387 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 388 params.PageVersion = hdr->PageVersion; 389 params.PageLength = 0; 390 params.PageNumber = hdr->PageNumber; 391 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 392 params.PageAddress = le32toh(ext_page_req->page_address); 393 params.ExtPageType = hdr->ExtPageType; 394 params.ExtPageLength = hdr->ExtPageLength; 395 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 396 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 397 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, 398 le32toh(ext_page_req->len), TRUE, 5000); 399 if (error != 0) { 400 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n"); 401 return (ETIMEDOUT); 402 } 403 404 ext_page_req->ioc_status = htole16(req->IOCStatus); 405 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) 406 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 407 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 408 mpt_free_request(mpt, req); 409 return (0); 410 } 411 412 static int 413 mpt_user_write_cfg_page(struct mpt_softc *mpt, 414 struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page) 415 { 416 CONFIG_PAGE_HEADER *hdr; 417 request_t *req; 418 cfgparms_t params; 419 u_int hdr_attr; 420 int error; 421 422 hdr = mpt_page->vaddr; 423 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 424 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 425 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 426 mpt_prt(mpt, "page type 0x%x not changeable\n", 427 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 428 return (EINVAL); 429 } 430 431 #if 0 432 /* 433 * We shouldn't mask off other bits here. 434 */ 435 hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK; 436 #endif 437 438 req = mpt_get_request(mpt, TRUE); 439 if (req == NULL) 440 return (ENOMEM); 441 442 bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD | 443 BUS_DMASYNC_PREWRITE); 444 445 /* 446 * There isn't any point in restoring stripped out attributes 447 * if you then mask them going down to issue the request. 448 */ 449 450 params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; 451 params.PageVersion = hdr->PageVersion; 452 params.PageLength = hdr->PageLength; 453 params.PageNumber = hdr->PageNumber; 454 params.PageAddress = le32toh(page_req->page_address); 455 #if 0 456 /* Restore stripped out attributes */ 457 hdr->PageType |= hdr_attr; 458 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 459 #else 460 params.PageType = hdr->PageType; 461 #endif 462 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, 463 le32toh(page_req->len), TRUE, 5000); 464 if (error != 0) { 465 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 466 return (ETIMEDOUT); 467 } 468 469 page_req->ioc_status = htole16(req->IOCStatus); 470 bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD | 471 BUS_DMASYNC_POSTWRITE); 472 mpt_free_request(mpt, req); 473 return (0); 474 } 475 476 static int 477 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req, 478 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 479 { 480 MSG_RAID_ACTION_REPLY *reply; 481 struct mpt_user_raid_action_result *res; 482 483 if (req == NULL) 484 return (TRUE); 485 486 if (reply_frame != NULL) { 487 reply = (MSG_RAID_ACTION_REPLY *)reply_frame; 488 req->IOCStatus = le16toh(reply->IOCStatus); 489 res = (struct mpt_user_raid_action_result *) 490 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt)); 491 res->action_status = reply->ActionStatus; 492 res->volume_status = reply->VolumeStatus; 493 bcopy(&reply->ActionData, res->action_data, 494 sizeof(res->action_data)); 495 } 496 497 req->state &= ~REQ_STATE_QUEUED; 498 req->state |= REQ_STATE_DONE; 499 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 500 501 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 502 wakeup(req); 503 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 504 /* 505 * Whew- we can free this request (late completion) 506 */ 507 mpt_free_request(mpt, req); 508 } 509 510 return (TRUE); 511 } 512 513 /* 514 * We use the first part of the request buffer after the request frame 515 * to hold the action data and action status from the RAID reply. The 516 * rest of the request buffer is used to hold the buffer for the 517 * action SGE. 518 */ 519 static int 520 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act, 521 struct mpt_page_memory *mpt_page) 522 { 523 request_t *req; 524 struct mpt_user_raid_action_result *res; 525 MSG_RAID_ACTION_REQUEST *rap; 526 SGE_SIMPLE32 *se; 527 int error; 528 529 req = mpt_get_request(mpt, TRUE); 530 if (req == NULL) 531 return (ENOMEM); 532 rap = req->req_vbuf; 533 memset(rap, 0, sizeof *rap); 534 rap->Action = raid_act->action; 535 rap->ActionDataWord = raid_act->action_data_word; 536 rap->Function = MPI_FUNCTION_RAID_ACTION; 537 rap->VolumeID = raid_act->volume_id; 538 rap->VolumeBus = raid_act->volume_bus; 539 rap->PhysDiskNum = raid_act->phys_disk_num; 540 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE; 541 if (mpt_page->vaddr != NULL && raid_act->len != 0) { 542 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 543 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 544 se->Address = htole32(mpt_page->paddr); 545 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len)); 546 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 547 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 548 MPI_SGE_FLAGS_END_OF_LIST | 549 raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC : 550 MPI_SGE_FLAGS_IOC_TO_HOST)); 551 } 552 se->FlagsLength = htole32(se->FlagsLength); 553 rap->MsgContext = htole32(req->index | user_handler_id); 554 555 mpt_check_doorbell(mpt); 556 mpt_send_cmd(mpt, req); 557 558 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 559 2000); 560 if (error != 0) { 561 /* 562 * Leave request so it can be cleaned up later. 563 */ 564 mpt_prt(mpt, "mpt_user_raid_action timed out\n"); 565 return (error); 566 } 567 568 raid_act->ioc_status = htole16(req->IOCStatus); 569 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 570 mpt_free_request(mpt, req); 571 return (0); 572 } 573 574 res = (struct mpt_user_raid_action_result *) 575 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt)); 576 raid_act->volume_status = res->volume_status; 577 raid_act->action_status = res->action_status; 578 bcopy(res->action_data, raid_act->action_data, 579 sizeof(res->action_data)); 580 if (mpt_page->vaddr != NULL) 581 bus_dmamap_sync(mpt_page->tag, mpt_page->map, 582 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 583 mpt_free_request(mpt, req); 584 return (0); 585 } 586 587 #ifdef __amd64__ 588 #define PTRIN(p) ((void *)(uintptr_t)(p)) 589 #define PTROUT(v) ((u_int32_t)(uintptr_t)(v)) 590 #endif 591 592 static int 593 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 594 { 595 struct mpt_softc *mpt; 596 struct mpt_cfg_page_req *page_req; 597 struct mpt_ext_cfg_page_req *ext_page_req; 598 struct mpt_raid_action *raid_act; 599 struct mpt_page_memory mpt_page; 600 #ifdef __amd64__ 601 struct mpt_cfg_page_req32 *page_req32; 602 struct mpt_cfg_page_req page_req_swab; 603 struct mpt_ext_cfg_page_req32 *ext_page_req32; 604 struct mpt_ext_cfg_page_req ext_page_req_swab; 605 struct mpt_raid_action32 *raid_act32; 606 struct mpt_raid_action raid_act_swab; 607 #endif 608 int error; 609 610 mpt = dev->si_drv1; 611 page_req = (void *)arg; 612 ext_page_req = (void *)arg; 613 raid_act = (void *)arg; 614 mpt_page.vaddr = NULL; 615 616 #ifdef __amd64__ 617 /* Convert 32-bit structs to native ones. */ 618 page_req32 = (void *)arg; 619 ext_page_req32 = (void *)arg; 620 raid_act32 = (void *)arg; 621 switch (cmd) { 622 case MPTIO_READ_CFG_HEADER32: 623 case MPTIO_READ_CFG_PAGE32: 624 case MPTIO_WRITE_CFG_PAGE32: 625 page_req = &page_req_swab; 626 page_req->header = page_req32->header; 627 page_req->page_address = page_req32->page_address; 628 page_req->buf = PTRIN(page_req32->buf); 629 page_req->len = page_req32->len; 630 page_req->ioc_status = page_req32->ioc_status; 631 break; 632 case MPTIO_READ_EXT_CFG_HEADER32: 633 case MPTIO_READ_EXT_CFG_PAGE32: 634 ext_page_req = &ext_page_req_swab; 635 ext_page_req->header = ext_page_req32->header; 636 ext_page_req->page_address = ext_page_req32->page_address; 637 ext_page_req->buf = PTRIN(ext_page_req32->buf); 638 ext_page_req->len = ext_page_req32->len; 639 ext_page_req->ioc_status = ext_page_req32->ioc_status; 640 break; 641 case MPTIO_RAID_ACTION32: 642 raid_act = &raid_act_swab; 643 raid_act->action = raid_act32->action; 644 raid_act->volume_bus = raid_act32->volume_bus; 645 raid_act->volume_id = raid_act32->volume_id; 646 raid_act->phys_disk_num = raid_act32->phys_disk_num; 647 raid_act->action_data_word = raid_act32->action_data_word; 648 raid_act->buf = PTRIN(raid_act32->buf); 649 raid_act->len = raid_act32->len; 650 raid_act->volume_status = raid_act32->volume_status; 651 bcopy(raid_act32->action_data, raid_act->action_data, 652 sizeof(raid_act->action_data)); 653 raid_act->action_status = raid_act32->action_status; 654 raid_act->ioc_status = raid_act32->ioc_status; 655 raid_act->write = raid_act32->write; 656 break; 657 } 658 #endif 659 660 switch (cmd) { 661 #ifdef __amd64__ 662 case MPTIO_READ_CFG_HEADER32: 663 #endif 664 case MPTIO_READ_CFG_HEADER: 665 MPT_LOCK(mpt); 666 error = mpt_user_read_cfg_header(mpt, page_req); 667 MPT_UNLOCK(mpt); 668 break; 669 #ifdef __amd64__ 670 case MPTIO_READ_CFG_PAGE32: 671 #endif 672 case MPTIO_READ_CFG_PAGE: 673 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len); 674 if (error) 675 break; 676 error = copyin(page_req->buf, mpt_page.vaddr, 677 sizeof(CONFIG_PAGE_HEADER)); 678 if (error) 679 break; 680 MPT_LOCK(mpt); 681 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page); 682 MPT_UNLOCK(mpt); 683 if (error) 684 break; 685 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len); 686 break; 687 #ifdef __amd64__ 688 case MPTIO_READ_EXT_CFG_HEADER32: 689 #endif 690 case MPTIO_READ_EXT_CFG_HEADER: 691 MPT_LOCK(mpt); 692 error = mpt_user_read_extcfg_header(mpt, ext_page_req); 693 MPT_UNLOCK(mpt); 694 break; 695 #ifdef __amd64__ 696 case MPTIO_READ_EXT_CFG_PAGE32: 697 #endif 698 case MPTIO_READ_EXT_CFG_PAGE: 699 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len); 700 if (error) 701 break; 702 error = copyin(ext_page_req->buf, mpt_page.vaddr, 703 sizeof(CONFIG_EXTENDED_PAGE_HEADER)); 704 if (error) 705 break; 706 MPT_LOCK(mpt); 707 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page); 708 MPT_UNLOCK(mpt); 709 if (error) 710 break; 711 error = copyout(mpt_page.vaddr, ext_page_req->buf, 712 ext_page_req->len); 713 break; 714 #ifdef __amd64__ 715 case MPTIO_WRITE_CFG_PAGE32: 716 #endif 717 case MPTIO_WRITE_CFG_PAGE: 718 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len); 719 if (error) 720 break; 721 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len); 722 if (error) 723 break; 724 MPT_LOCK(mpt); 725 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page); 726 MPT_UNLOCK(mpt); 727 break; 728 #ifdef __amd64__ 729 case MPTIO_RAID_ACTION32: 730 #endif 731 case MPTIO_RAID_ACTION: 732 if (raid_act->buf != NULL) { 733 error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len); 734 if (error) 735 break; 736 error = copyin(raid_act->buf, mpt_page.vaddr, 737 raid_act->len); 738 if (error) 739 break; 740 } 741 MPT_LOCK(mpt); 742 error = mpt_user_raid_action(mpt, raid_act, &mpt_page); 743 MPT_UNLOCK(mpt); 744 if (error) 745 break; 746 if (raid_act->buf != NULL) 747 error = copyout(mpt_page.vaddr, raid_act->buf, 748 raid_act->len); 749 break; 750 default: 751 error = ENOIOCTL; 752 break; 753 } 754 755 mpt_free_buffer(&mpt_page); 756 757 if (error) 758 return (error); 759 760 #ifdef __amd64__ 761 /* Convert native structs to 32-bit ones. */ 762 switch (cmd) { 763 case MPTIO_READ_CFG_HEADER32: 764 case MPTIO_READ_CFG_PAGE32: 765 case MPTIO_WRITE_CFG_PAGE32: 766 page_req32->header = page_req->header; 767 page_req32->page_address = page_req->page_address; 768 page_req32->buf = PTROUT(page_req->buf); 769 page_req32->len = page_req->len; 770 page_req32->ioc_status = page_req->ioc_status; 771 break; 772 case MPTIO_READ_EXT_CFG_HEADER32: 773 case MPTIO_READ_EXT_CFG_PAGE32: 774 ext_page_req32->header = ext_page_req->header; 775 ext_page_req32->page_address = ext_page_req->page_address; 776 ext_page_req32->buf = PTROUT(ext_page_req->buf); 777 ext_page_req32->len = ext_page_req->len; 778 ext_page_req32->ioc_status = ext_page_req->ioc_status; 779 break; 780 case MPTIO_RAID_ACTION32: 781 raid_act32->action = raid_act->action; 782 raid_act32->volume_bus = raid_act->volume_bus; 783 raid_act32->volume_id = raid_act->volume_id; 784 raid_act32->phys_disk_num = raid_act->phys_disk_num; 785 raid_act32->action_data_word = raid_act->action_data_word; 786 raid_act32->buf = PTROUT(raid_act->buf); 787 raid_act32->len = raid_act->len; 788 raid_act32->volume_status = raid_act->volume_status; 789 bcopy(raid_act->action_data, raid_act32->action_data, 790 sizeof(raid_act->action_data)); 791 raid_act32->action_status = raid_act->action_status; 792 raid_act32->ioc_status = raid_act->ioc_status; 793 raid_act32->write = raid_act->write; 794 break; 795 } 796 #endif 797 798 return (0); 799 } 800