1 /* 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved. 5 * Support: <fbsd-storage-driver.pdl@broadcom.com> 6 * 7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com> 8 * Chandrakanth Patil <chandrakanth.patil@broadcom.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are 12 * met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation and/or other 18 * materials provided with the distribution. 19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors 20 * may be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * The views and conclusions contained in the software and documentation are 36 * those of the authors and should not be interpreted as representing 37 * official policies,either expressed or implied, of the FreeBSD Project. 38 * 39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131 40 * 41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD 42 */ 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include "mpi3mr_cam.h" 49 #include "mpi3mr_app.h" 50 #include "mpi3mr.h" 51 52 static d_open_t mpi3mr_open; 53 static d_close_t mpi3mr_close; 54 static d_ioctl_t mpi3mr_ioctl; 55 static d_poll_t mpi3mr_poll; 56 57 static struct cdevsw mpi3mr_cdevsw = { 58 .d_version = D_VERSION, 59 .d_flags = 0, 60 .d_open = mpi3mr_open, 61 .d_close = mpi3mr_close, 62 .d_ioctl = mpi3mr_ioctl, 63 .d_poll = mpi3mr_poll, 64 .d_name = "mpi3mr", 65 }; 66 67 static struct mpi3mr_mgmt_info mpi3mr_mgmt_info; 68 69 static int 70 mpi3mr_open(struct cdev *dev, int flags, int fmt, struct thread *td) 71 { 72 73 return (0); 74 } 75 76 static int 77 mpi3mr_close(struct cdev *dev, int flags, int fmt, struct thread *td) 78 { 79 80 return (0); 81 } 82 83 /* 84 * mpi3mr_app_attach - Char device registration 85 * @sc: Adapter reference 86 * 87 * This function does char device registration. 88 * 89 * Return: 0 on success and proper error codes on failure 90 */ 91 int 92 mpi3mr_app_attach(struct mpi3mr_softc *sc) 93 { 94 95 /* Create a /dev entry for Avenger controller */ 96 sc->mpi3mr_cdev = make_dev(&mpi3mr_cdevsw, device_get_unit(sc->mpi3mr_dev), 97 UID_ROOT, GID_OPERATOR, 0640, "mpi3mr%d", 98 device_get_unit(sc->mpi3mr_dev)); 99 100 if (sc->mpi3mr_cdev == NULL) 101 return (ENOMEM); 102 103 sc->mpi3mr_cdev->si_drv1 = sc; 104 105 /* Assign controller instance to mgmt_info structure */ 106 if (device_get_unit(sc->mpi3mr_dev) == 0) 107 memset(&mpi3mr_mgmt_info, 0, sizeof(mpi3mr_mgmt_info)); 108 mpi3mr_mgmt_info.count++; 109 mpi3mr_mgmt_info.sc_ptr[mpi3mr_mgmt_info.max_index] = sc; 110 mpi3mr_mgmt_info.max_index++; 111 112 return (0); 113 } 114 115 void 116 mpi3mr_app_detach(struct mpi3mr_softc *sc) 117 { 118 U8 i = 0; 119 120 if (sc->mpi3mr_cdev == NULL) 121 return; 122 123 destroy_dev(sc->mpi3mr_cdev); 124 for (i = 0; i < mpi3mr_mgmt_info.max_index; i++) { 125 if (mpi3mr_mgmt_info.sc_ptr[i] == sc) { 126 mpi3mr_mgmt_info.count--; 127 mpi3mr_mgmt_info.sc_ptr[i] = NULL; 128 break; 129 } 130 } 131 return; 132 } 133 134 static int 135 mpi3mr_poll(struct cdev *dev, int poll_events, struct thread *td) 136 { 137 int revents = 0; 138 struct mpi3mr_softc *sc = NULL; 139 sc = dev->si_drv1; 140 141 if ((poll_events & (POLLIN | POLLRDNORM)) && 142 (sc->mpi3mr_aen_triggered)) 143 revents |= poll_events & (POLLIN | POLLRDNORM); 144 145 if (revents == 0) { 146 if (poll_events & (POLLIN | POLLRDNORM)) { 147 sc->mpi3mr_poll_waiting = 1; 148 selrecord(td, &sc->mpi3mr_select); 149 } 150 } 151 return revents; 152 } 153 154 /** 155 * mpi3mr_app_get_adp_instancs - Get Adapter instance 156 * @mrioc_id: Adapter ID 157 * 158 * This fucnction searches the Adapter reference with mrioc_id 159 * upon found, returns the adapter reference otherwise returns 160 * the NULL 161 * 162 * Return: Adapter reference on success and NULL on failure 163 */ 164 static struct mpi3mr_softc * 165 mpi3mr_app_get_adp_instance(U8 mrioc_id) 166 { 167 struct mpi3mr_softc *sc = NULL; 168 169 if (mrioc_id >= mpi3mr_mgmt_info.max_index) 170 return NULL; 171 172 sc = mpi3mr_mgmt_info.sc_ptr[mrioc_id]; 173 return sc; 174 } 175 176 static int 177 mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc *sc, 178 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 179 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 180 { 181 struct mpi3mr_nvme_pt_sge *nvme_sgl; 182 U64 sgl_dma; 183 U8 count; 184 U16 available_sges = 0, i; 185 U32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 186 size_t length = 0; 187 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 188 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 189 sc->facts.sge_mod_shift) << 32); 190 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 191 sc->facts.sge_mod_shift) << 32; 192 193 U32 size; 194 195 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 196 ((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_SGL_OFFSET); 197 198 /* 199 * Not all commands require a data transfer. If no data, just return 200 * without constructing any SGL. 201 */ 202 for (count = 0; count < bufcnt; count++, dma_buff++) { 203 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 204 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 205 length = dma_buff->kern_buf_len; 206 break; 207 } 208 } 209 if (!length || !dma_buff->num_dma_desc) 210 return 0; 211 212 if (dma_buff->num_dma_desc == 1) { 213 available_sges = 1; 214 goto build_sges; 215 } 216 sgl_dma = (U64)sc->ioctl_chain_sge.dma_addr; 217 218 if (sgl_dma & sgemod_mask) { 219 printf(IOCNAME "NVMe SGL address collides with SGEModifier\n",sc->name); 220 return -1; 221 } 222 223 sgl_dma &= ~sgemod_mask; 224 sgl_dma |= sgemod_val; 225 226 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 227 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 228 if (available_sges < dma_buff->num_dma_desc) 229 return -1; 230 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 231 nvme_sgl->base_addr = sgl_dma; 232 size = dma_buff->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 233 nvme_sgl->length = htole32(size); 234 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 235 236 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) sc->ioctl_chain_sge.addr; 237 238 build_sges: 239 for (i = 0; i < dma_buff->num_dma_desc; i++) { 240 sgl_dma = htole64(dma_buff->dma_desc[i].dma_addr); 241 if (sgl_dma & sgemod_mask) { 242 printf("%s: SGL address collides with SGE modifier\n", 243 __func__); 244 return -1; 245 } 246 247 sgl_dma &= ~sgemod_mask; 248 sgl_dma |= sgemod_val; 249 250 nvme_sgl->base_addr = sgl_dma; 251 nvme_sgl->length = htole32(dma_buff->dma_desc[i].size); 252 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 253 nvme_sgl++; 254 available_sges--; 255 } 256 257 return 0; 258 } 259 260 static int 261 mpi3mr_app_build_nvme_prp(struct mpi3mr_softc *sc, 262 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 263 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 264 { 265 int prp_size = MPI3MR_NVME_PRP_SIZE; 266 U64 *prp_entry, *prp1_entry, *prp2_entry; 267 U64 *prp_page; 268 bus_addr_t prp_entry_dma, prp_page_dma, dma_addr; 269 U32 offset, entry_len, dev_pgsz; 270 U32 page_mask_result, page_mask; 271 size_t length = 0, desc_len; 272 U8 count; 273 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 274 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 275 sc->facts.sge_mod_shift) << 32); 276 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 277 sc->facts.sge_mod_shift) << 32; 278 U16 dev_handle = nvme_encap_request->DevHandle; 279 struct mpi3mr_target *tgtdev; 280 U16 desc_count = 0; 281 282 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 283 if (!tgtdev) { 284 printf(IOCNAME "EncapNVMe Error: Invalid DevHandle 0x%02x\n", sc->name, 285 dev_handle); 286 return -1; 287 } 288 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 289 printf(IOCNAME "%s: NVME device page size is zero for handle 0x%04x\n", 290 sc->name, __func__, dev_handle); 291 return -1; 292 } 293 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 294 295 page_mask = dev_pgsz - 1; 296 297 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE){ 298 printf("%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 299 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 300 return -1; 301 } 302 303 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz){ 304 printf("%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 305 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 306 return -1; 307 } 308 309 /* 310 * Not all commands require a data transfer. If no data, just return 311 * without constructing any PRP. 312 */ 313 for (count = 0; count < bufcnt; count++, dma_buff++) { 314 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 315 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 316 length = dma_buff->kern_buf_len; 317 break; 318 } 319 } 320 if (!length || !dma_buff->num_dma_desc) 321 return 0; 322 323 for (count = 0; count < dma_buff->num_dma_desc; count++) { 324 dma_addr = dma_buff->dma_desc[count].dma_addr; 325 if (dma_addr & page_mask) { 326 printf("%s:dma_addr 0x%lu is not aligned with page size 0x%x\n", 327 __func__, dma_addr, dev_pgsz); 328 return -1; 329 } 330 } 331 332 dma_addr = dma_buff->dma_desc[0].dma_addr; 333 desc_len = dma_buff->dma_desc[0].size; 334 335 sc->nvme_encap_prp_sz = 0; 336 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */ 337 4, 0, /* algnmnt, boundary */ 338 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 339 BUS_SPACE_MAXADDR, /* highaddr */ 340 NULL, NULL, /* filter, filterarg */ 341 dev_pgsz, /* maxsize */ 342 1, /* nsegments */ 343 dev_pgsz, /* maxsegsize */ 344 0, /* flags */ 345 NULL, NULL, /* lockfunc, lockarg */ 346 &sc->nvme_encap_prp_list_dmatag)) { 347 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create ioctl NVME kernel buffer dma tag\n"); 348 return (ENOMEM); 349 } 350 351 if (bus_dmamem_alloc(sc->nvme_encap_prp_list_dmatag, (void **)&sc->nvme_encap_prp_list, 352 BUS_DMA_NOWAIT, &sc->nvme_encap_prp_list_dma_dmamap)) { 353 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate ioctl NVME dma memory\n"); 354 return (ENOMEM); 355 } 356 357 bzero(sc->nvme_encap_prp_list, dev_pgsz); 358 bus_dmamap_load(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap, 359 sc->nvme_encap_prp_list, dev_pgsz, mpi3mr_memaddr_cb, &sc->nvme_encap_prp_list_dma, 360 0); 361 362 if (!sc->nvme_encap_prp_list) { 363 printf(IOCNAME "%s:%d Cannot load ioctl NVME dma memory for size: %d\n", sc->name, 364 __func__, __LINE__, dev_pgsz); 365 goto err_out; 366 } 367 sc->nvme_encap_prp_sz = dev_pgsz; 368 369 /* 370 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 371 * PRP1 is located at a 24 byte offset from the start of the NVMe 372 * command. Then set the current PRP entry pointer to PRP1. 373 */ 374 prp1_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP1_OFFSET); 375 prp2_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP2_OFFSET); 376 prp_entry = prp1_entry; 377 /* 378 * For the PRP entries, use the specially allocated buffer of 379 * contiguous memory. 380 */ 381 prp_page = sc->nvme_encap_prp_list; 382 prp_page_dma = sc->nvme_encap_prp_list_dma; 383 384 /* 385 * Check if we are within 1 entry of a page boundary we don't 386 * want our first entry to be a PRP List entry. 387 */ 388 page_mask_result = (uintptr_t)((U8 *)prp_page + prp_size) & page_mask; 389 if (!page_mask_result) { 390 printf(IOCNAME "PRP Page is not page aligned\n", sc->name); 391 goto err_out; 392 } 393 394 /* 395 * Set PRP physical pointer, which initially points to the current PRP 396 * DMA memory page. 397 */ 398 prp_entry_dma = prp_page_dma; 399 400 401 /* Loop while the length is not zero. */ 402 while (length) { 403 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 404 if (!page_mask_result && (length > dev_pgsz)) { 405 printf(IOCNAME "Single PRP page is not sufficient\n", sc->name); 406 goto err_out; 407 } 408 409 /* Need to handle if entry will be part of a page. */ 410 offset = dma_addr & page_mask; 411 entry_len = dev_pgsz - offset; 412 413 if (prp_entry == prp1_entry) { 414 /* 415 * Must fill in the first PRP pointer (PRP1) before 416 * moving on. 417 */ 418 *prp1_entry = dma_addr; 419 if (*prp1_entry & sgemod_mask) { 420 printf(IOCNAME "PRP1 address collides with SGEModifier\n", sc->name); 421 goto err_out; 422 } 423 *prp1_entry &= ~sgemod_mask; 424 *prp1_entry |= sgemod_val; 425 426 /* 427 * Now point to the second PRP entry within the 428 * command (PRP2). 429 */ 430 prp_entry = prp2_entry; 431 } else if (prp_entry == prp2_entry) { 432 /* 433 * Should the PRP2 entry be a PRP List pointer or just 434 * a regular PRP pointer? If there is more than one 435 * more page of data, must use a PRP List pointer. 436 */ 437 if (length > dev_pgsz) { 438 /* 439 * PRP2 will contain a PRP List pointer because 440 * more PRP's are needed with this command. The 441 * list will start at the beginning of the 442 * contiguous buffer. 443 */ 444 *prp2_entry = prp_entry_dma; 445 if (*prp2_entry & sgemod_mask) { 446 printf(IOCNAME "PRP list address collides with SGEModifier\n", sc->name); 447 goto err_out; 448 } 449 *prp2_entry &= ~sgemod_mask; 450 *prp2_entry |= sgemod_val; 451 452 /* 453 * The next PRP Entry will be the start of the 454 * first PRP List. 455 */ 456 prp_entry = prp_page; 457 continue; 458 } else { 459 /* 460 * After this, the PRP Entries are complete. 461 * This command uses 2 PRP's and no PRP list. 462 */ 463 *prp2_entry = dma_addr; 464 if (*prp2_entry & sgemod_mask) { 465 printf(IOCNAME "PRP2 address collides with SGEModifier\n", sc->name); 466 goto err_out; 467 } 468 *prp2_entry &= ~sgemod_mask; 469 *prp2_entry |= sgemod_val; 470 } 471 } else { 472 /* 473 * Put entry in list and bump the addresses. 474 * 475 * After PRP1 and PRP2 are filled in, this will fill in 476 * all remaining PRP entries in a PRP List, one per 477 * each time through the loop. 478 */ 479 *prp_entry = dma_addr; 480 if (*prp_entry & sgemod_mask) { 481 printf(IOCNAME "PRP address collides with SGEModifier\n", sc->name); 482 goto err_out; 483 } 484 *prp_entry &= ~sgemod_mask; 485 *prp_entry |= sgemod_val; 486 prp_entry++; 487 prp_entry_dma += prp_size; 488 } 489 490 /* Decrement length accounting for last partial page. */ 491 if (entry_len >= length) 492 length = 0; 493 else { 494 if (entry_len <= desc_len) { 495 dma_addr += entry_len; 496 desc_len -= entry_len; 497 } 498 if (!desc_len) { 499 if ((++desc_count) >= 500 dma_buff->num_dma_desc) { 501 printf("%s: Invalid len %ld while building PRP\n", 502 __func__, length); 503 goto err_out; 504 } 505 dma_addr = 506 dma_buff->dma_desc[desc_count].dma_addr; 507 desc_len = 508 dma_buff->dma_desc[desc_count].size; 509 } 510 length -= entry_len; 511 } 512 } 513 return 0; 514 err_out: 515 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 516 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 517 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 518 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 519 sc->nvme_encap_prp_list = NULL; 520 } 521 return -1; 522 } 523 524 /** 525 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data 526 + * buffers 527 + * @sc: Adapter instance reference 528 + * @dma_buff: buffer map descriptor 529 + * @desc_count: Number of already consumed dma descriptors 530 + * 531 + * This function computes how many pre-allocated DMA descriptors 532 + * are required for the given data buffer and if those number of 533 + * descriptors are free, then setup the mapping of the scattered 534 + * DMA address to the given data buffer, if the data direction 535 + * of the buffer is DATA_OUT then the actual data is copied to 536 + * the DMA buffers 537 + * 538 + * Return: 0 on success, -1 on failure 539 + */ 540 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_softc *sc, 541 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 542 U8 desc_count) 543 { 544 U16 i, needed_desc = (dma_buffers->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE); 545 U32 buf_len = dma_buffers->kern_buf_len, copied_len = 0; 546 547 if (dma_buffers->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 548 needed_desc++; 549 550 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 551 printf("%s: DMA descriptor mapping error %d:%d:%d\n", 552 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 553 return -1; 554 } 555 556 dma_buffers->dma_desc = malloc(sizeof(*dma_buffers->dma_desc) * needed_desc, 557 M_MPI3MR, M_NOWAIT | M_ZERO); 558 if (!dma_buffers->dma_desc) 559 return -1; 560 561 for (i = 0; i < needed_desc; i++, desc_count++) { 562 563 dma_buffers->dma_desc[i].addr = sc->ioctl_sge[desc_count].addr; 564 dma_buffers->dma_desc[i].dma_addr = sc->ioctl_sge[desc_count].dma_addr; 565 566 if (buf_len < sc->ioctl_sge[desc_count].size) 567 dma_buffers->dma_desc[i].size = buf_len; 568 else 569 dma_buffers->dma_desc[i].size = sc->ioctl_sge[desc_count].size; 570 571 buf_len -= dma_buffers->dma_desc[i].size; 572 memset(dma_buffers->dma_desc[i].addr, 0, sc->ioctl_sge[desc_count].size); 573 574 if (dma_buffers->data_dir == MPI3MR_APP_DDO) { 575 copyin(((U8 *)dma_buffers->user_buf + copied_len), 576 dma_buffers->dma_desc[i].addr, 577 dma_buffers->dma_desc[i].size); 578 copied_len += dma_buffers->dma_desc[i].size; 579 } 580 } 581 582 dma_buffers->num_dma_desc = needed_desc; 583 584 return 0; 585 } 586 587 static unsigned int 588 mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request) 589 { 590 U8 format = 0; 591 592 format = ((nvme_encap_request->Command[0] & 0xc000) >> 14); 593 return format; 594 } 595 596 static inline U16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 597 U8 bufcnt) 598 { 599 U16 i, sge_count = 0; 600 for (i=0; i < bufcnt; i++, dma_buffers++) { 601 if ((dma_buffers->data_dir == MPI3MR_APP_DDN) || 602 dma_buffers->kern_buf) 603 continue; 604 sge_count += dma_buffers->num_dma_desc; 605 if (!dma_buffers->num_dma_desc) 606 sge_count++; 607 } 608 return sge_count; 609 } 610 611 static int 612 mpi3mr_app_construct_sgl(struct mpi3mr_softc *sc, U8 *mpi_request, U32 sgl_offset, 613 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 614 U8 bufcnt, U8 is_rmc, U8 is_rmr, U8 num_datasges) 615 { 616 U8 *sgl = (mpi_request + sgl_offset), count = 0; 617 Mpi3RequestHeader_t *mpi_header = (Mpi3RequestHeader_t *)mpi_request; 618 Mpi3MgmtPassthroughRequest_t *rmgmt_req = 619 (Mpi3MgmtPassthroughRequest_t *)mpi_request; 620 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 621 U8 flag, sgl_flags, sgl_flags_eob, sgl_flags_last, last_chain_sgl_flags; 622 U16 available_sges, i, sges_needed; 623 U32 sge_element_size = sizeof(struct _MPI3_SGE_COMMON); 624 bool chain_used = false; 625 626 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 627 MPI3_SGE_FLAGS_DLAS_SYSTEM ; 628 sgl_flags_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 629 sgl_flags_last = sgl_flags_eob | MPI3_SGE_FLAGS_END_OF_LIST; 630 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 631 MPI3_SGE_FLAGS_DLAS_SYSTEM; 632 633 sges_needed = mpi3mr_total_num_ioctl_sges(dma_buffers, bufcnt); 634 635 if (is_rmc) { 636 mpi3mr_add_sg_single(&rmgmt_req->CommandSGL, 637 sgl_flags_last, dma_buff->kern_buf_len, 638 dma_buff->kern_buf_dma); 639 sgl = (U8 *) dma_buff->kern_buf + dma_buff->user_buf_len; 640 available_sges = (dma_buff->kern_buf_len - 641 dma_buff->user_buf_len) / sge_element_size; 642 if (sges_needed > available_sges) 643 return -1; 644 chain_used = true; 645 dma_buff++; 646 count++; 647 if (is_rmr) { 648 mpi3mr_add_sg_single(&rmgmt_req->ResponseSGL, 649 sgl_flags_last, dma_buff->kern_buf_len, 650 dma_buff->kern_buf_dma); 651 dma_buff++; 652 count++; 653 } else 654 mpi3mr_build_zero_len_sge( 655 &rmgmt_req->ResponseSGL); 656 if (num_datasges) { 657 i = 0; 658 goto build_sges; 659 } 660 } else { 661 if (sgl_offset >= MPI3MR_AREQ_FRAME_SZ) 662 return -1; 663 available_sges = (MPI3MR_AREQ_FRAME_SZ - sgl_offset) / 664 sge_element_size; 665 if (!available_sges) 666 return -1; 667 } 668 669 if (!num_datasges) { 670 mpi3mr_build_zero_len_sge(sgl); 671 return 0; 672 } 673 674 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 675 if ((sges_needed > 2) || (sges_needed > available_sges)) 676 return -1; 677 for (; count < bufcnt; count++, dma_buff++) { 678 if ((dma_buff->data_dir == MPI3MR_APP_DDN) || 679 !dma_buff->num_dma_desc) 680 continue; 681 mpi3mr_add_sg_single(sgl, sgl_flags_last, 682 dma_buff->dma_desc[0].size, 683 dma_buff->dma_desc[0].dma_addr); 684 sgl += sge_element_size; 685 } 686 return 0; 687 } 688 i = 0; 689 690 build_sges: 691 for (; count < bufcnt; count++, dma_buff++) { 692 if (dma_buff->data_dir == MPI3MR_APP_DDN) 693 continue; 694 if (!dma_buff->num_dma_desc) { 695 if (chain_used && !available_sges) 696 return -1; 697 if (!chain_used && (available_sges == 1) && 698 (sges_needed > 1)) 699 goto setup_chain; 700 flag = sgl_flags_eob; 701 if (num_datasges == 1) 702 flag = sgl_flags_last; 703 mpi3mr_add_sg_single(sgl, flag, 0, 0); 704 sgl += sge_element_size; 705 available_sges--; 706 sges_needed--; 707 num_datasges--; 708 continue; 709 } 710 for (; i < dma_buff->num_dma_desc; i++) { 711 if (chain_used && !available_sges) 712 return -1; 713 if (!chain_used && (available_sges == 1) && 714 (sges_needed > 1)) 715 goto setup_chain; 716 flag = sgl_flags; 717 if (i == (dma_buff->num_dma_desc - 1)) { 718 if (num_datasges == 1) 719 flag = sgl_flags_last; 720 else 721 flag = sgl_flags_eob; 722 } 723 724 mpi3mr_add_sg_single(sgl, flag, 725 dma_buff->dma_desc[i].size, 726 dma_buff->dma_desc[i].dma_addr); 727 sgl += sge_element_size; 728 available_sges--; 729 sges_needed--; 730 } 731 num_datasges--; 732 i = 0; 733 } 734 return 0; 735 736 setup_chain: 737 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 738 if (sges_needed > available_sges) 739 return -1; 740 mpi3mr_add_sg_single(sgl, last_chain_sgl_flags, 741 (sges_needed * sge_element_size), sc->ioctl_chain_sge.dma_addr); 742 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 743 sgl = (U8 *)sc->ioctl_chain_sge.addr; 744 chain_used = true; 745 goto build_sges; 746 } 747 748 749 /** 750 * mpi3mr_app_mptcmds - MPI Pass through IOCTL handler 751 * @dev: char device 752 * @cmd: IOCTL command 753 * @arg: User data payload buffer for the IOCTL 754 * @flag: flags 755 * @thread: threads 756 * 757 * This function is the top level handler for MPI Pass through 758 * IOCTL, this does basic validation of the input data buffers, 759 * identifies the given buffer types and MPI command, allocates 760 * DMAable memory for user given buffers, construstcs SGL 761 * properly and passes the command to the firmware. 762 * 763 * Once the MPI command is completed the driver copies the data 764 * if any and reply, sense information to user provided buffers. 765 * If the command is timed out then issues controller reset 766 * prior to returning. 767 * 768 * Return: 0 on success and proper error codes on failure 769 */ 770 static long 771 mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg, 772 int flag, struct thread *td) 773 { 774 long rval = EINVAL; 775 U8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 776 U8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 777 U16 desc_count = 0; 778 U8 nvme_fmt = 0; 779 U32 tmplen = 0, erbsz = MPI3MR_SENSEBUF_SZ, din_sz = 0, dout_sz = 0; 780 U8 *kern_erb = NULL; 781 U8 *mpi_request = NULL; 782 Mpi3RequestHeader_t *mpi_header = NULL; 783 Mpi3PELReqActionGetCount_t *pel = NULL; 784 Mpi3StatusReplyDescriptor_t *status_desc = NULL; 785 struct mpi3mr_softc *sc = NULL; 786 struct mpi3mr_ioctl_buf_entry_list *buffer_list = NULL; 787 struct mpi3mr_buf_entry *buf_entries = NULL; 788 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL; 789 struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL; 790 struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg; 791 792 793 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 794 if (!sc) 795 return ENODEV; 796 797 if (!sc->ioctl_sges_allocated) { 798 printf("%s: DMA memory was not allocated\n", __func__); 799 return ENOMEM; 800 } 801 802 if (karg->timeout < MPI3MR_IOCTL_DEFAULT_TIMEOUT) 803 karg->timeout = MPI3MR_IOCTL_DEFAULT_TIMEOUT; 804 805 if (!karg->mpi_msg_size || !karg->buf_entry_list_size) { 806 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 807 __func__, __LINE__); 808 return rval; 809 } 810 if ((karg->mpi_msg_size * 4) > MPI3MR_AREQ_FRAME_SZ) { 811 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 812 __func__, __LINE__); 813 return rval; 814 } 815 816 mpi_request = malloc(MPI3MR_AREQ_FRAME_SZ, M_MPI3MR, M_NOWAIT | M_ZERO); 817 if (!mpi_request) { 818 printf(IOCNAME "%s: memory allocation failed for mpi_request\n", sc->name, 819 __func__); 820 return ENOMEM; 821 } 822 823 mpi_header = (Mpi3RequestHeader_t *)mpi_request; 824 pel = (Mpi3PELReqActionGetCount_t *)mpi_request; 825 if (copyin(karg->mpi_msg_buf, mpi_request, (karg->mpi_msg_size * 4))) { 826 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 827 __FILE__, __LINE__, __func__); 828 rval = EFAULT; 829 goto out; 830 } 831 832 buffer_list = malloc(karg->buf_entry_list_size, M_MPI3MR, M_NOWAIT | M_ZERO); 833 if (!buffer_list) { 834 printf(IOCNAME "%s: memory allocation failed for buffer_list\n", sc->name, 835 __func__); 836 rval = ENOMEM; 837 goto out; 838 } 839 if (copyin(karg->buf_entry_list, buffer_list, karg->buf_entry_list_size)) { 840 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 841 __FILE__, __LINE__, __func__); 842 rval = EFAULT; 843 goto out; 844 } 845 if (!buffer_list->num_of_buf_entries) { 846 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 847 __func__, __LINE__); 848 rval = EINVAL; 849 goto out; 850 } 851 bufcnt = buffer_list->num_of_buf_entries; 852 dma_buffers = malloc((sizeof(*dma_buffers) * bufcnt), M_MPI3MR, M_NOWAIT | M_ZERO); 853 if (!dma_buffers) { 854 printf(IOCNAME "%s: memory allocation failed for dma_buffers\n", sc->name, 855 __func__); 856 rval = ENOMEM; 857 goto out; 858 } 859 buf_entries = buffer_list->buf_entry; 860 dma_buff = dma_buffers; 861 for (count = 0; count < bufcnt; count++, buf_entries++, dma_buff++) { 862 memset(dma_buff, 0, sizeof(*dma_buff)); 863 dma_buff->user_buf = buf_entries->buffer; 864 dma_buff->user_buf_len = buf_entries->buf_len; 865 866 switch (buf_entries->buf_type) { 867 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_CMD: 868 is_rmcb = 1; 869 if ((count != 0) || !buf_entries->buf_len) 870 invalid_be = 1; 871 dma_buff->data_dir = MPI3MR_APP_DDO; 872 break; 873 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_RESP: 874 is_rmrb = 1; 875 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 876 invalid_be = 1; 877 dma_buff->data_dir = MPI3MR_APP_DDI; 878 break; 879 case MPI3MR_IOCTL_BUFTYPE_DATA_IN: 880 din_sz = dma_buff->user_buf_len; 881 din_cnt++; 882 if ((din_cnt > 1) && !is_rmcb) 883 invalid_be = 1; 884 dma_buff->data_dir = MPI3MR_APP_DDI; 885 break; 886 case MPI3MR_IOCTL_BUFTYPE_DATA_OUT: 887 dout_sz = dma_buff->user_buf_len; 888 dout_cnt++; 889 if ((dout_cnt > 1) && !is_rmcb) 890 invalid_be = 1; 891 dma_buff->data_dir = MPI3MR_APP_DDO; 892 break; 893 case MPI3MR_IOCTL_BUFTYPE_MPI_REPLY: 894 mpirep_offset = count; 895 dma_buff->data_dir = MPI3MR_APP_DDN; 896 if (!buf_entries->buf_len) 897 invalid_be = 1; 898 break; 899 case MPI3MR_IOCTL_BUFTYPE_ERR_RESPONSE: 900 erb_offset = count; 901 dma_buff->data_dir = MPI3MR_APP_DDN; 902 if (!buf_entries->buf_len) 903 invalid_be = 1; 904 break; 905 default: 906 invalid_be = 1; 907 break; 908 } 909 if (invalid_be) 910 break; 911 } 912 if (invalid_be) { 913 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 914 __func__, __LINE__); 915 rval = EINVAL; 916 goto out; 917 } 918 919 if (is_rmcb && ((din_sz + dout_sz) > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 920 printf("%s:%d: invalid data transfer size passed for function 0x%x" 921 "din_sz = %d, dout_size = %d\n", __func__, __LINE__, 922 mpi_header->Function, din_sz, dout_sz); 923 rval = EINVAL; 924 goto out; 925 } 926 927 if ((din_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE) || 928 (dout_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 929 printf("%s:%d: invalid data transfer size passed for function 0x%x" 930 "din_size=%d dout_size=%d\n", __func__, __LINE__, 931 mpi_header->Function, din_sz, dout_sz); 932 rval = EINVAL; 933 goto out; 934 } 935 936 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 937 if ((din_sz > MPI3MR_IOCTL_SGE_SIZE) || 938 (dout_sz > MPI3MR_IOCTL_SGE_SIZE)) { 939 printf("%s:%d: invalid message size passed:%d:%d:%d:%d\n", 940 __func__, __LINE__, din_cnt, dout_cnt, din_sz, dout_sz); 941 rval = EINVAL; 942 goto out; 943 } 944 } 945 946 dma_buff = dma_buffers; 947 for (count = 0; count < bufcnt; count++, dma_buff++) { 948 949 dma_buff->kern_buf_len = dma_buff->user_buf_len; 950 951 if (is_rmcb && !count) { 952 dma_buff->kern_buf = sc->ioctl_chain_sge.addr; 953 dma_buff->kern_buf_len = sc->ioctl_chain_sge.size; 954 dma_buff->kern_buf_dma = sc->ioctl_chain_sge.dma_addr; 955 dma_buff->dma_desc = NULL; 956 dma_buff->num_dma_desc = 0; 957 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 958 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 959 if (copyin(dma_buff->user_buf, dma_buff->kern_buf, tmplen)) { 960 mpi3mr_dprint(sc, MPI3MR_ERROR, "failure at %s() line: %d", 961 __func__, __LINE__); 962 rval = EFAULT; 963 goto out; 964 } 965 } else if (is_rmrb && (count == 1)) { 966 dma_buff->kern_buf = sc->ioctl_resp_sge.addr; 967 dma_buff->kern_buf_len = sc->ioctl_resp_sge.size; 968 dma_buff->kern_buf_dma = sc->ioctl_resp_sge.dma_addr; 969 dma_buff->dma_desc = NULL; 970 dma_buff->num_dma_desc = 0; 971 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 972 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 973 dma_buff->kern_buf_len = tmplen; 974 } else { 975 if (!dma_buff->kern_buf_len) 976 continue; 977 if (mpi3mr_map_data_buffer_dma(sc, dma_buff, desc_count)) { 978 rval = ENOMEM; 979 mpi3mr_dprint(sc, MPI3MR_ERROR, "mapping data buffers failed" 980 "at %s() line: %d\n", __func__, __LINE__); 981 goto out; 982 } 983 desc_count += dma_buff->num_dma_desc; 984 } 985 } 986 987 if (erb_offset != 0xFF) { 988 kern_erb = malloc(erbsz, M_MPI3MR, M_NOWAIT | M_ZERO); 989 if (!kern_erb) { 990 printf(IOCNAME "%s:%d Cannot allocate memory for sense buffer\n", sc->name, 991 __func__, __LINE__); 992 rval = ENOMEM; 993 goto out; 994 } 995 } 996 997 if (sc->ioctl_cmds.state & MPI3MR_CMD_PENDING) { 998 printf(IOCNAME "Issue IOCTL: Ioctl command is in use/previous command is pending\n", 999 sc->name); 1000 rval = EAGAIN; 1001 goto out; 1002 } 1003 1004 if (sc->unrecoverable) { 1005 printf(IOCNAME "Issue IOCTL: controller is in unrecoverable state\n", sc->name); 1006 rval = EFAULT; 1007 goto out; 1008 } 1009 1010 if (sc->reset_in_progress) { 1011 printf(IOCNAME "Issue IOCTL: reset in progress\n", sc->name); 1012 rval = EAGAIN; 1013 goto out; 1014 } 1015 if (sc->block_ioctls) { 1016 printf(IOCNAME "Issue IOCTL: IOCTLs are blocked\n", sc->name); 1017 rval = EAGAIN; 1018 goto out; 1019 } 1020 1021 if (mpi_header->Function != MPI3_FUNCTION_NVME_ENCAPSULATED) { 1022 if (mpi3mr_app_construct_sgl(sc, mpi_request, (karg->mpi_msg_size * 4), dma_buffers, 1023 bufcnt, is_rmcb, is_rmrb, (dout_cnt + din_cnt))) { 1024 printf(IOCNAME "Issue IOCTL: sgl build failed\n", sc->name); 1025 rval = EAGAIN; 1026 goto out; 1027 } 1028 1029 } else { 1030 nvme_fmt = mpi3mr_app_get_nvme_data_fmt( 1031 (Mpi3NVMeEncapsulatedRequest_t *)mpi_request); 1032 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1033 if (mpi3mr_app_build_nvme_prp(sc, 1034 (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1035 dma_buffers, bufcnt)) { 1036 rval = ENOMEM; 1037 goto out; 1038 } 1039 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1040 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1041 if (mpi3mr_app_construct_nvme_sgl(sc, (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1042 dma_buffers, bufcnt)) { 1043 rval = EINVAL; 1044 goto out; 1045 } 1046 } else { 1047 printf(IOCNAME "%s: Invalid NVMe Command Format\n", sc->name, 1048 __func__); 1049 rval = EINVAL; 1050 goto out; 1051 } 1052 } 1053 1054 sc->ioctl_cmds.state = MPI3MR_CMD_PENDING; 1055 sc->ioctl_cmds.is_waiting = 1; 1056 sc->ioctl_cmds.callback = NULL; 1057 sc->ioctl_cmds.is_senseprst = 0; 1058 sc->ioctl_cmds.sensebuf = kern_erb; 1059 memset((sc->ioctl_cmds.reply), 0, sc->reply_sz); 1060 mpi_header->HostTag = MPI3MR_HOSTTAG_IOCTLCMDS; 1061 init_completion(&sc->ioctl_cmds.completion); 1062 rval = mpi3mr_submit_admin_cmd(sc, mpi_request, MPI3MR_AREQ_FRAME_SZ); 1063 if (rval) { 1064 printf(IOCNAME "Issue IOCTL: Admin Post failed\n", sc->name); 1065 goto out_failed; 1066 } 1067 wait_for_completion_timeout(&sc->ioctl_cmds.completion, karg->timeout); 1068 1069 if (!(sc->ioctl_cmds.state & MPI3MR_CMD_COMPLETE)) { 1070 sc->ioctl_cmds.is_waiting = 0; 1071 printf(IOCNAME "Issue IOCTL: command timed out\n", sc->name); 1072 rval = EAGAIN; 1073 if (sc->ioctl_cmds.state & MPI3MR_CMD_RESET) 1074 goto out_failed; 1075 1076 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1077 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL_TIMEOUT; 1078 goto out_failed; 1079 } 1080 1081 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 1082 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 1083 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 1084 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 1085 sc->nvme_encap_prp_list = NULL; 1086 } 1087 1088 if (((sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1089 != MPI3_IOCSTATUS_SUCCESS) && 1090 (sc->mpi3mr_debug & MPI3MR_DEBUG_IOCTL)) { 1091 printf(IOCNAME "Issue IOCTL: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", sc->name, 1092 (sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1093 sc->ioctl_cmds.ioc_loginfo); 1094 } 1095 1096 if ((mpirep_offset != 0xFF) && 1097 dma_buffers[mpirep_offset].user_buf_len) { 1098 dma_buff = &dma_buffers[mpirep_offset]; 1099 dma_buff->kern_buf_len = (sizeof(*mpirepbuf) - 1 + 1100 sc->reply_sz); 1101 mpirepbuf = malloc(dma_buff->kern_buf_len, M_MPI3MR, M_NOWAIT | M_ZERO); 1102 1103 if (!mpirepbuf) { 1104 printf(IOCNAME "%s: failed obtaining a memory for mpi reply\n", sc->name, 1105 __func__); 1106 rval = ENOMEM; 1107 goto out_failed; 1108 } 1109 if (sc->ioctl_cmds.state & MPI3MR_CMD_REPLYVALID) { 1110 mpirepbuf->mpirep_type = 1111 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_ADDRESS; 1112 memcpy(mpirepbuf->repbuf, sc->ioctl_cmds.reply, sc->reply_sz); 1113 } else { 1114 mpirepbuf->mpirep_type = 1115 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_STATUS; 1116 status_desc = (Mpi3StatusReplyDescriptor_t *) 1117 mpirepbuf->repbuf; 1118 status_desc->IOCStatus = sc->ioctl_cmds.ioc_status; 1119 status_desc->IOCLogInfo = sc->ioctl_cmds.ioc_loginfo; 1120 } 1121 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 1122 if (copyout(mpirepbuf, dma_buff->user_buf, tmplen)) { 1123 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1124 __FILE__, __LINE__, __func__); 1125 rval = EFAULT; 1126 goto out_failed; 1127 } 1128 } 1129 1130 if (erb_offset != 0xFF && sc->ioctl_cmds.sensebuf && 1131 sc->ioctl_cmds.is_senseprst) { 1132 dma_buff = &dma_buffers[erb_offset]; 1133 tmplen = min(erbsz, dma_buff->user_buf_len); 1134 if (copyout(kern_erb, dma_buff->user_buf, tmplen)) { 1135 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1136 __FILE__, __LINE__, __func__); 1137 rval = EFAULT; 1138 goto out_failed; 1139 } 1140 } 1141 1142 dma_buff = dma_buffers; 1143 for (count = 0; count < bufcnt; count++, dma_buff++) { 1144 if ((count == 1) && is_rmrb) { 1145 if (copyout(dma_buff->kern_buf, dma_buff->user_buf,dma_buff->kern_buf_len)) { 1146 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1147 __FILE__, __LINE__, __func__); 1148 rval = EFAULT; 1149 goto out_failed; 1150 } 1151 } else if (dma_buff->data_dir == MPI3MR_APP_DDI) { 1152 tmplen = 0; 1153 for (desc_count = 0; desc_count < dma_buff->num_dma_desc; desc_count++) { 1154 if (copyout(dma_buff->dma_desc[desc_count].addr, 1155 (U8 *)dma_buff->user_buf+tmplen, 1156 dma_buff->dma_desc[desc_count].size)) { 1157 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1158 __FILE__, __LINE__, __func__); 1159 rval = EFAULT; 1160 goto out_failed; 1161 } 1162 tmplen += dma_buff->dma_desc[desc_count].size; 1163 } 1164 } 1165 } 1166 1167 if ((pel->Function == MPI3_FUNCTION_PERSISTENT_EVENT_LOG) && 1168 (pel->Action == MPI3_PEL_ACTION_GET_COUNT)) 1169 sc->mpi3mr_aen_triggered = 0; 1170 1171 out_failed: 1172 sc->ioctl_cmds.is_senseprst = 0; 1173 sc->ioctl_cmds.sensebuf = NULL; 1174 sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED; 1175 out: 1176 if (kern_erb) 1177 free(kern_erb, M_MPI3MR); 1178 if (buffer_list) 1179 free(buffer_list, M_MPI3MR); 1180 if (mpi_request) 1181 free(mpi_request, M_MPI3MR); 1182 if (dma_buffers) { 1183 dma_buff = dma_buffers; 1184 for (count = 0; count < bufcnt; count++, dma_buff++) { 1185 free(dma_buff->dma_desc, M_MPI3MR); 1186 } 1187 free(dma_buffers, M_MPI3MR); 1188 } 1189 if (mpirepbuf) 1190 free(mpirepbuf, M_MPI3MR); 1191 return rval; 1192 } 1193 1194 /** 1195 * mpi3mr_soft_reset_from_app - Trigger controller reset 1196 * @sc: Adapter instance reference 1197 * 1198 * This function triggers the controller reset from the 1199 * watchdog context and wait for it to complete. It will 1200 * come out of wait upon completion or timeout exaustion. 1201 * 1202 * Return: 0 on success and proper error codes on failure 1203 */ 1204 static long 1205 mpi3mr_soft_reset_from_app(struct mpi3mr_softc *sc) 1206 { 1207 1208 U32 timeout; 1209 1210 /* if reset is not in progress, trigger soft reset from watchdog context */ 1211 if (!sc->reset_in_progress) { 1212 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1213 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL; 1214 1215 /* Wait for soft reset to start */ 1216 timeout = 50; 1217 while (timeout--) { 1218 if (sc->reset_in_progress == 1) 1219 break; 1220 DELAY(100 * 1000); 1221 } 1222 if (!timeout) 1223 return EFAULT; 1224 } 1225 1226 /* Wait for soft reset to complete */ 1227 int i = 0; 1228 timeout = sc->ready_timeout; 1229 while (timeout--) { 1230 if (sc->reset_in_progress == 0) 1231 break; 1232 i++; 1233 if (!(i % 5)) { 1234 mpi3mr_dprint(sc, MPI3MR_INFO, 1235 "[%2ds]waiting for controller reset to be finished from %s\n", i, __func__); 1236 } 1237 DELAY(1000 * 1000); 1238 } 1239 1240 /* 1241 * In case of soft reset failure or not completed within stipulated time, 1242 * fail back to application. 1243 */ 1244 if ((!timeout || sc->reset.status)) 1245 return EFAULT; 1246 1247 return 0; 1248 } 1249 1250 1251 /** 1252 * mpi3mr_adp_reset - Issue controller reset 1253 * @sc: Adapter instance reference 1254 * @data_out_buf: User buffer with reset type 1255 * @data_out_sz: length of the user buffer. 1256 * 1257 * This function identifies the user provided reset type and 1258 * issues approporiate reset to the controller and wait for that 1259 * to complete and reinitialize the controller and then returns. 1260 * 1261 * Return: 0 on success and proper error codes on failure 1262 */ 1263 static long 1264 mpi3mr_adp_reset(struct mpi3mr_softc *sc, 1265 void *data_out_buf, U32 data_out_sz) 1266 { 1267 long rval = EINVAL; 1268 struct mpi3mr_ioctl_adpreset adpreset; 1269 1270 memset(&adpreset, 0, sizeof(adpreset)); 1271 1272 if (data_out_sz != sizeof(adpreset)) { 1273 printf(IOCNAME "Invalid user adpreset buffer size %s() line: %d\n", sc->name, 1274 __func__, __LINE__); 1275 goto out; 1276 } 1277 1278 if (copyin(data_out_buf, &adpreset, sizeof(adpreset))) { 1279 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1280 __func__, __LINE__); 1281 rval = EFAULT; 1282 goto out; 1283 } 1284 1285 switch (adpreset.reset_type) { 1286 case MPI3MR_IOCTL_ADPRESET_SOFT: 1287 sc->reset.ioctl_reset_snapdump = false; 1288 break; 1289 case MPI3MR_IOCTL_ADPRESET_DIAG_FAULT: 1290 sc->reset.ioctl_reset_snapdump = true; 1291 break; 1292 default: 1293 printf(IOCNAME "Unknown reset_type(0x%x) issued\n", sc->name, 1294 adpreset.reset_type); 1295 goto out; 1296 } 1297 rval = mpi3mr_soft_reset_from_app(sc); 1298 if (rval) 1299 printf(IOCNAME "reset handler returned error (0x%lx) for reset type 0x%x\n", 1300 sc->name, rval, adpreset.reset_type); 1301 1302 out: 1303 return rval; 1304 } 1305 1306 void 1307 mpi3mr_app_send_aen(struct mpi3mr_softc *sc) 1308 { 1309 sc->mpi3mr_aen_triggered = 1; 1310 if (sc->mpi3mr_poll_waiting) { 1311 selwakeup(&sc->mpi3mr_select); 1312 sc->mpi3mr_poll_waiting = 0; 1313 } 1314 return; 1315 } 1316 1317 void 1318 mpi3mr_pel_wait_complete(struct mpi3mr_softc *sc, 1319 struct mpi3mr_drvr_cmd *drvr_cmd) 1320 { 1321 U8 retry = 0; 1322 Mpi3PELReply_t *pel_reply = NULL; 1323 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1324 1325 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1326 goto cleanup_drvrcmd; 1327 1328 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1329 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1330 goto out_failed; 1331 } 1332 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1333 1334 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1335 || ((le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS) 1336 && (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_ABORTED))){ 1337 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1338 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1339 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1340 retry = 1; 1341 } 1342 1343 if (retry) { 1344 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1345 drvr_cmd->retry_count++; 1346 printf(IOCNAME "%s : PELWaitretry=%d\n", sc->name, 1347 __func__, drvr_cmd->retry_count); 1348 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1349 return; 1350 } 1351 1352 printf(IOCNAME "%s :PELWait failed after all retries\n", sc->name, 1353 __func__); 1354 goto out_failed; 1355 } 1356 1357 mpi3mr_app_send_aen(sc); 1358 1359 if (!sc->pel_abort_requested) { 1360 sc->pel_cmds.retry_count = 0; 1361 mpi3mr_send_pel_getseq(sc, &sc->pel_cmds); 1362 } 1363 1364 return; 1365 out_failed: 1366 sc->pel_wait_pend = 0; 1367 cleanup_drvrcmd: 1368 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1369 drvr_cmd->callback = NULL; 1370 drvr_cmd->retry_count = 0; 1371 } 1372 1373 void 1374 mpi3mr_issue_pel_wait(struct mpi3mr_softc *sc, 1375 struct mpi3mr_drvr_cmd *drvr_cmd) 1376 { 1377 U8 retry_count = 0; 1378 Mpi3PELReqActionWait_t pel_wait; 1379 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1380 1381 sc->pel_abort_requested = 0; 1382 1383 memset(&pel_wait, 0, sizeof(pel_wait)); 1384 drvr_cmd->state = MPI3MR_CMD_PENDING; 1385 drvr_cmd->is_waiting = 0; 1386 drvr_cmd->callback = mpi3mr_pel_wait_complete; 1387 drvr_cmd->ioc_status = 0; 1388 drvr_cmd->ioc_loginfo = 0; 1389 pel_wait.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1390 pel_wait.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1391 pel_wait.Action = MPI3_PEL_ACTION_WAIT; 1392 pel_wait.StartingSequenceNumber = htole32(sc->newest_seqnum); 1393 pel_wait.Locale = htole16(sc->pel_locale); 1394 pel_wait.Class = htole16(sc->pel_class); 1395 pel_wait.WaitTime = MPI3_PEL_WAITTIME_INFINITE_WAIT; 1396 printf(IOCNAME "Issuing PELWait: seqnum %u class %u locale 0x%08x\n", 1397 sc->name, sc->newest_seqnum, sc->pel_class, sc->pel_locale); 1398 retry_pel_wait: 1399 if (mpi3mr_submit_admin_cmd(sc, &pel_wait, sizeof(pel_wait))) { 1400 printf(IOCNAME "%s: Issue PELWait IOCTL: Admin Post failed\n", sc->name, __func__); 1401 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1402 retry_count++; 1403 goto retry_pel_wait; 1404 } 1405 goto out_failed; 1406 } 1407 return; 1408 out_failed: 1409 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1410 drvr_cmd->callback = NULL; 1411 drvr_cmd->retry_count = 0; 1412 sc->pel_wait_pend = 0; 1413 return; 1414 } 1415 1416 void 1417 mpi3mr_send_pel_getseq(struct mpi3mr_softc *sc, 1418 struct mpi3mr_drvr_cmd *drvr_cmd) 1419 { 1420 U8 retry_count = 0; 1421 U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1422 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1423 1424 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1425 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1426 sc->pel_cmds.is_waiting = 0; 1427 sc->pel_cmds.ioc_status = 0; 1428 sc->pel_cmds.ioc_loginfo = 0; 1429 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1430 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1431 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1432 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1433 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1434 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1435 1436 retry_pel_getseq: 1437 if (mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))) { 1438 printf(IOCNAME "%s: Issuing PEL GetSeq IOCTL: Admin Post failed\n", sc->name, __func__); 1439 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1440 retry_count++; 1441 goto retry_pel_getseq; 1442 } 1443 goto out_failed; 1444 } 1445 return; 1446 out_failed: 1447 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1448 drvr_cmd->callback = NULL; 1449 drvr_cmd->retry_count = 0; 1450 sc->pel_wait_pend = 0; 1451 } 1452 1453 void 1454 mpi3mr_pel_getseq_complete(struct mpi3mr_softc *sc, 1455 struct mpi3mr_drvr_cmd *drvr_cmd) 1456 { 1457 U8 retry = 0; 1458 Mpi3PELReply_t *pel_reply = NULL; 1459 Mpi3PELSeq_t *pel_seq_num = (Mpi3PELSeq_t *)sc->pel_seq_number; 1460 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1461 1462 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1463 goto cleanup_drvrcmd; 1464 1465 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1466 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1467 goto out_failed; 1468 } 1469 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1470 1471 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1472 || (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)){ 1473 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1474 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1475 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1476 retry = 1; 1477 } 1478 1479 if (retry) { 1480 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1481 drvr_cmd->retry_count++; 1482 printf(IOCNAME "%s : PELGetSeqNUM retry=%d\n", sc->name, 1483 __func__, drvr_cmd->retry_count); 1484 mpi3mr_send_pel_getseq(sc, drvr_cmd); 1485 return; 1486 } 1487 printf(IOCNAME "%s :PELGetSeqNUM failed after all retries\n", 1488 sc->name, __func__); 1489 goto out_failed; 1490 } 1491 1492 sc->newest_seqnum = le32toh(pel_seq_num->Newest) + 1; 1493 drvr_cmd->retry_count = 0; 1494 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1495 return; 1496 out_failed: 1497 sc->pel_wait_pend = 0; 1498 cleanup_drvrcmd: 1499 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1500 drvr_cmd->callback = NULL; 1501 drvr_cmd->retry_count = 0; 1502 } 1503 1504 static int 1505 mpi3mr_pel_getseq(struct mpi3mr_softc *sc) 1506 { 1507 int rval = 0; 1508 U8 sgl_flags = 0; 1509 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1510 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1511 1512 if (sc->reset_in_progress || sc->block_ioctls) { 1513 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1514 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1515 return -1; 1516 } 1517 1518 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1519 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1520 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1521 sc->pel_cmds.is_waiting = 0; 1522 sc->pel_cmds.retry_count = 0; 1523 sc->pel_cmds.ioc_status = 0; 1524 sc->pel_cmds.ioc_loginfo = 0; 1525 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1526 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1527 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1528 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1529 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1530 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1531 1532 if ((rval = mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req)))) 1533 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1534 1535 return rval; 1536 } 1537 1538 int 1539 mpi3mr_pel_abort(struct mpi3mr_softc *sc) 1540 { 1541 int retval = 0; 1542 U16 pel_log_status; 1543 Mpi3PELReqActionAbort_t pel_abort_req; 1544 Mpi3PELReply_t *pel_reply = NULL; 1545 1546 if (sc->reset_in_progress || sc->block_ioctls) { 1547 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1548 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1549 return -1; 1550 } 1551 1552 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 1553 1554 mtx_lock(&sc->pel_abort_cmd.completion.lock); 1555 if (sc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 1556 printf(IOCNAME "%s: PEL Abort command is in use\n", sc->name, __func__); 1557 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1558 return -1; 1559 } 1560 1561 sc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 1562 sc->pel_abort_cmd.is_waiting = 1; 1563 sc->pel_abort_cmd.callback = NULL; 1564 pel_abort_req.HostTag = htole16(MPI3MR_HOSTTAG_PELABORT); 1565 pel_abort_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1566 pel_abort_req.Action = MPI3_PEL_ACTION_ABORT; 1567 pel_abort_req.AbortHostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1568 1569 sc->pel_abort_requested = 1; 1570 1571 init_completion(&sc->pel_abort_cmd.completion); 1572 retval = mpi3mr_submit_admin_cmd(sc, &pel_abort_req, sizeof(pel_abort_req)); 1573 if (retval) { 1574 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1575 sc->pel_abort_requested = 0; 1576 retval = -1; 1577 goto out_unlock; 1578 } 1579 wait_for_completion_timeout(&sc->pel_abort_cmd.completion, MPI3MR_INTADMCMD_TIMEOUT); 1580 1581 if (!(sc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 1582 printf(IOCNAME "%s: PEL Abort command timedout\n",sc->name, __func__); 1583 sc->pel_abort_cmd.is_waiting = 0; 1584 retval = -1; 1585 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1586 sc->reset.reason = MPI3MR_RESET_FROM_PELABORT_TIMEOUT; 1587 goto out_unlock; 1588 } 1589 if (((GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1590 || (!(sc->pel_abort_cmd.state & MPI3MR_CMD_REPLYVALID))) { 1591 printf(IOCNAME "%s: PEL Abort command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1592 sc->name, __func__, GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status), 1593 sc->pel_abort_cmd.ioc_loginfo); 1594 retval = -1; 1595 goto out_unlock; 1596 } 1597 1598 pel_reply = (Mpi3PELReply_t *)sc->pel_abort_cmd.reply; 1599 pel_log_status = le16toh(pel_reply->PELogStatus); 1600 if (pel_log_status != MPI3_PEL_STATUS_SUCCESS) { 1601 printf(IOCNAME "%s: PEL abort command failed, pel_status(0x%04x)\n", 1602 sc->name, __func__, pel_log_status); 1603 retval = -1; 1604 } 1605 1606 out_unlock: 1607 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1608 sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 1609 return retval; 1610 } 1611 1612 /** 1613 * mpi3mr_pel_enable - Handler for PEL enable 1614 * @sc: Adapter instance reference 1615 * @data_out_buf: User buffer containing PEL enable data 1616 * @data_out_sz: length of the user buffer. 1617 * 1618 * This function is the handler for PEL enable driver IOCTL. 1619 * Validates the application given class and locale and if 1620 * requires aborts the existing PEL wait request and/or issues 1621 * new PEL wait request to the firmware and returns. 1622 * 1623 * Return: 0 on success and proper error codes on failure. 1624 */ 1625 static long 1626 mpi3mr_pel_enable(struct mpi3mr_softc *sc, 1627 void *data_out_buf, U32 data_out_sz) 1628 { 1629 long rval = EINVAL; 1630 U8 tmp_class; 1631 U16 tmp_locale; 1632 struct mpi3mr_ioctl_pel_enable pel_enable; 1633 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1634 1635 1636 if ((data_out_sz != sizeof(pel_enable) || 1637 (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) { 1638 printf(IOCNAME "%s: Invalid user pel_enable buffer size %u\n", 1639 sc->name, __func__, data_out_sz); 1640 goto out; 1641 } 1642 memset(&pel_enable, 0, sizeof(pel_enable)); 1643 if (copyin(data_out_buf, &pel_enable, sizeof(pel_enable))) { 1644 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1645 __func__, __LINE__); 1646 rval = EFAULT; 1647 goto out; 1648 } 1649 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1650 printf(IOCNAME "%s: out of range class %d\n", 1651 sc->name, __func__, pel_enable.pel_class); 1652 goto out; 1653 } 1654 1655 if (sc->pel_wait_pend) { 1656 if ((sc->pel_class <= pel_enable.pel_class) && 1657 !((sc->pel_locale & pel_enable.pel_locale) ^ 1658 pel_enable.pel_locale)) { 1659 rval = 0; 1660 goto out; 1661 } else { 1662 pel_enable.pel_locale |= sc->pel_locale; 1663 if (sc->pel_class < pel_enable.pel_class) 1664 pel_enable.pel_class = sc->pel_class; 1665 1666 if (mpi3mr_pel_abort(sc)) { 1667 printf(IOCNAME "%s: pel_abort failed, status(%ld)\n", 1668 sc->name, __func__, rval); 1669 goto out; 1670 } 1671 } 1672 } 1673 1674 tmp_class = sc->pel_class; 1675 tmp_locale = sc->pel_locale; 1676 sc->pel_class = pel_enable.pel_class; 1677 sc->pel_locale = pel_enable.pel_locale; 1678 sc->pel_wait_pend = 1; 1679 1680 if ((rval = mpi3mr_pel_getseq(sc))) { 1681 sc->pel_class = tmp_class; 1682 sc->pel_locale = tmp_locale; 1683 sc->pel_wait_pend = 0; 1684 printf(IOCNAME "%s: pel get sequence number failed, status(%ld)\n", 1685 sc->name, __func__, rval); 1686 } 1687 1688 out: 1689 return rval; 1690 } 1691 1692 void 1693 mpi3mr_app_save_logdata(struct mpi3mr_softc *sc, char *event_data, 1694 U16 event_data_size) 1695 { 1696 struct mpi3mr_log_data_entry *entry; 1697 U32 index = sc->log_data_buffer_index, sz; 1698 1699 if (!(sc->log_data_buffer)) 1700 return; 1701 1702 entry = (struct mpi3mr_log_data_entry *) 1703 (sc->log_data_buffer + (index * sc->log_data_entry_size)); 1704 entry->valid_entry = 1; 1705 sz = min(sc->log_data_entry_size, event_data_size); 1706 memcpy(entry->data, event_data, sz); 1707 sc->log_data_buffer_index = 1708 ((++index) % MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES); 1709 mpi3mr_app_send_aen(sc); 1710 } 1711 1712 /** 1713 * mpi3mr_get_logdata - Handler for get log data 1714 * @sc: Adapter instance reference 1715 * @data_in_buf: User buffer to copy the logdata entries 1716 * @data_in_sz: length of the user buffer. 1717 * 1718 * This function copies the log data entries to the user buffer 1719 * when log caching is enabled in the driver. 1720 * 1721 * Return: 0 on success and proper error codes on failure 1722 */ 1723 static long 1724 mpi3mr_get_logdata(struct mpi3mr_softc *sc, 1725 void *data_in_buf, U32 data_in_sz) 1726 { 1727 long rval = EINVAL; 1728 U16 num_entries = 0; 1729 U16 entry_sz = sc->log_data_entry_size; 1730 1731 if ((!sc->log_data_buffer) || (data_in_sz < entry_sz)) 1732 return rval; 1733 1734 num_entries = data_in_sz / entry_sz; 1735 if (num_entries > MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES) 1736 num_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1737 1738 if ((rval = copyout(sc->log_data_buffer, data_in_buf, (num_entries * entry_sz)))) { 1739 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1740 rval = EFAULT; 1741 } 1742 1743 return rval; 1744 } 1745 1746 /** 1747 * mpi3mr_logdata_enable - Handler for log data enable 1748 * @sc: Adapter instance reference 1749 * @data_in_buf: User buffer to copy the max logdata entry count 1750 * @data_in_sz: length of the user buffer. 1751 * 1752 * This function enables log data caching in the driver if not 1753 * already enabled and return the maximum number of log data 1754 * entries that can be cached in the driver. 1755 * 1756 * Return: 0 on success and proper error codes on failure 1757 */ 1758 static long 1759 mpi3mr_logdata_enable(struct mpi3mr_softc *sc, 1760 void *data_in_buf, U32 data_in_sz) 1761 { 1762 long rval = EINVAL; 1763 struct mpi3mr_ioctl_logdata_enable logdata_enable; 1764 1765 if (data_in_sz < sizeof(logdata_enable)) 1766 return rval; 1767 1768 if (sc->log_data_buffer) 1769 goto copy_data; 1770 1771 sc->log_data_entry_size = (sc->reply_sz - (sizeof(Mpi3EventNotificationReply_t) - 4)) 1772 + MPI3MR_IOCTL_LOGDATA_ENTRY_HEADER_SZ; 1773 1774 sc->log_data_buffer = malloc((MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES * sc->log_data_entry_size), 1775 M_MPI3MR, M_NOWAIT | M_ZERO); 1776 if (!sc->log_data_buffer) { 1777 printf(IOCNAME "%s log data buffer memory allocation failed\n", sc->name, __func__); 1778 return ENOMEM; 1779 } 1780 1781 sc->log_data_buffer_index = 0; 1782 1783 copy_data: 1784 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1785 logdata_enable.max_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1786 1787 if ((rval = copyout(&logdata_enable, data_in_buf, sizeof(logdata_enable)))) { 1788 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1789 rval = EFAULT; 1790 } 1791 1792 return rval; 1793 } 1794 1795 /** 1796 * mpi3mr_get_change_count - Get topology change count 1797 * @sc: Adapter instance reference 1798 * @data_in_buf: User buffer to copy the change count 1799 * @data_in_sz: length of the user buffer. 1800 * 1801 * This function copies the toplogy change count provided by the 1802 * driver in events and cached in the driver to the user 1803 * provided buffer for the specific controller. 1804 * 1805 * Return: 0 on success and proper error codes on failure 1806 */ 1807 static long 1808 mpi3mr_get_change_count(struct mpi3mr_softc *sc, 1809 void *data_in_buf, U32 data_in_sz) 1810 { 1811 long rval = EINVAL; 1812 struct mpi3mr_ioctl_chgcnt chg_count; 1813 memset(&chg_count, 0, sizeof(chg_count)); 1814 1815 chg_count.change_count = sc->change_count; 1816 if (data_in_sz >= sizeof(chg_count)) { 1817 if ((rval = copyout(&chg_count, data_in_buf, sizeof(chg_count)))) { 1818 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1819 __LINE__, __func__); 1820 rval = EFAULT; 1821 } 1822 } 1823 return rval; 1824 } 1825 1826 /** 1827 * mpi3mr_get_alltgtinfo - Get all targets information 1828 * @sc: Adapter instance reference 1829 * @data_in_buf: User buffer to copy the target information 1830 * @data_in_sz: length of the user buffer. 1831 * 1832 * This function copies the driver managed target devices device 1833 * handle, persistent ID, bus ID and taret ID to the user 1834 * provided buffer for the specific controller. This function 1835 * also provides the number of devices managed by the driver for 1836 * the specific controller. 1837 * 1838 * Return: 0 on success and proper error codes on failure 1839 */ 1840 static long 1841 mpi3mr_get_alltgtinfo(struct mpi3mr_softc *sc, 1842 void *data_in_buf, U32 data_in_sz) 1843 { 1844 long rval = EINVAL; 1845 U8 get_count = 0; 1846 U16 i = 0, num_devices = 0; 1847 U32 min_entrylen = 0, kern_entrylen = 0, user_entrylen = 0; 1848 struct mpi3mr_target *tgtdev = NULL; 1849 struct mpi3mr_device_map_info *devmap_info = NULL; 1850 struct mpi3mr_cam_softc *cam_sc = sc->cam_sc; 1851 struct mpi3mr_ioctl_all_tgtinfo *all_tgtinfo = (struct mpi3mr_ioctl_all_tgtinfo *)data_in_buf; 1852 1853 if (data_in_sz < sizeof(uint32_t)) { 1854 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1855 __LINE__, __func__); 1856 goto out; 1857 } 1858 if (data_in_sz == sizeof(uint32_t)) 1859 get_count = 1; 1860 1861 if (TAILQ_EMPTY(&cam_sc->tgt_list)) { 1862 get_count = 1; 1863 goto copy_usrbuf; 1864 } 1865 1866 mtx_lock_spin(&cam_sc->sc->target_lock); 1867 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1868 num_devices++; 1869 } 1870 mtx_unlock_spin(&cam_sc->sc->target_lock); 1871 1872 if (get_count) 1873 goto copy_usrbuf; 1874 1875 kern_entrylen = num_devices * sizeof(*devmap_info); 1876 1877 devmap_info = malloc(kern_entrylen, M_MPI3MR, M_NOWAIT | M_ZERO); 1878 if (!devmap_info) { 1879 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1880 __LINE__, __func__); 1881 rval = ENOMEM; 1882 goto out; 1883 } 1884 memset((U8*)devmap_info, 0xFF, kern_entrylen); 1885 1886 mtx_lock_spin(&cam_sc->sc->target_lock); 1887 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1888 if (i < num_devices) { 1889 devmap_info[i].handle = tgtdev->dev_handle; 1890 devmap_info[i].per_id = tgtdev->per_id; 1891 /*n 1892 * For hidden/ugood device the target_id and bus_id should be 0xFFFFFFFF and 0xFF 1893 */ 1894 if (!tgtdev->exposed_to_os) { 1895 devmap_info[i].target_id = 0xFFFFFFFF; 1896 devmap_info[i].bus_id = 0xFF; 1897 } else { 1898 devmap_info[i].target_id = tgtdev->tid; 1899 devmap_info[i].bus_id = 0; 1900 } 1901 i++; 1902 } 1903 } 1904 num_devices = i; 1905 mtx_unlock_spin(&cam_sc->sc->target_lock); 1906 1907 copy_usrbuf: 1908 if (copyout(&num_devices, &all_tgtinfo->num_devices, sizeof(num_devices))) { 1909 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1910 __LINE__, __func__); 1911 rval = EFAULT; 1912 goto out; 1913 } 1914 user_entrylen = (data_in_sz - sizeof(uint32_t))/sizeof(*devmap_info); 1915 user_entrylen *= sizeof(*devmap_info); 1916 min_entrylen = min(user_entrylen, kern_entrylen); 1917 if (min_entrylen && (copyout(devmap_info, &all_tgtinfo->dmi, min_entrylen))) { 1918 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1919 __FILE__, __LINE__, __func__); 1920 rval = EFAULT; 1921 goto out; 1922 } 1923 rval = 0; 1924 out: 1925 if (devmap_info) 1926 free(devmap_info, M_MPI3MR); 1927 1928 return rval; 1929 } 1930 1931 /** 1932 * mpi3mr_get_tgtinfo - Get specific target information 1933 * @sc: Adapter instance reference 1934 * @karg: driver ponter to users payload buffer 1935 * 1936 * This function copies the driver managed specific target device 1937 * info like handle, persistent ID, bus ID and taret ID to the user 1938 * provided buffer for the specific controller. 1939 * 1940 * Return: 0 on success and proper error codes on failure 1941 */ 1942 static long 1943 mpi3mr_get_tgtinfo(struct mpi3mr_softc *sc, 1944 struct mpi3mr_ioctl_drvcmd *karg) 1945 { 1946 long rval = EINVAL; 1947 struct mpi3mr_target *tgtdev = NULL; 1948 struct mpi3mr_ioctl_tgtinfo tgtinfo; 1949 1950 memset(&tgtinfo, 0, sizeof(tgtinfo)); 1951 1952 if ((karg->data_out_size != sizeof(struct mpi3mr_ioctl_tgtinfo)) || 1953 (karg->data_in_size != sizeof(struct mpi3mr_ioctl_tgtinfo))) { 1954 printf(IOCNAME "Invalid user tgtinfo buffer size %s() line: %d\n", sc->name, 1955 __func__, __LINE__); 1956 goto out; 1957 } 1958 1959 if (copyin(karg->data_out_buf, &tgtinfo, sizeof(tgtinfo))) { 1960 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1961 __func__, __LINE__); 1962 rval = EFAULT; 1963 goto out; 1964 } 1965 1966 if ((tgtinfo.bus_id != 0xFF) && (tgtinfo.target_id != 0xFFFFFFFF)) { 1967 if ((tgtinfo.persistent_id != 0xFFFF) || 1968 (tgtinfo.dev_handle != 0xFFFF)) 1969 goto out; 1970 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.target_id); 1971 } else if (tgtinfo.persistent_id != 0xFFFF) { 1972 if ((tgtinfo.bus_id != 0xFF) || 1973 (tgtinfo.dev_handle !=0xFFFF) || 1974 (tgtinfo.target_id != 0xFFFFFFFF)) 1975 goto out; 1976 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.persistent_id); 1977 } else if (tgtinfo.dev_handle !=0xFFFF) { 1978 if ((tgtinfo.bus_id != 0xFF) || 1979 (tgtinfo.target_id != 0xFFFFFFFF) || 1980 (tgtinfo.persistent_id != 0xFFFF)) 1981 goto out; 1982 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tgtinfo.dev_handle); 1983 } 1984 if (!tgtdev) 1985 goto out; 1986 1987 tgtinfo.target_id = tgtdev->per_id; 1988 tgtinfo.bus_id = 0; 1989 tgtinfo.dev_handle = tgtdev->dev_handle; 1990 tgtinfo.persistent_id = tgtdev->per_id; 1991 tgtinfo.seq_num = 0; 1992 1993 if (copyout(&tgtinfo, karg->data_in_buf, sizeof(tgtinfo))) { 1994 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1995 __func__, __LINE__); 1996 rval = EFAULT; 1997 } 1998 1999 out: 2000 return rval; 2001 } 2002 2003 /** 2004 * mpi3mr_get_pciinfo - Get PCI info IOCTL handler 2005 * @sc: Adapter instance reference 2006 * @data_in_buf: User buffer to hold adapter information 2007 * @data_in_sz: length of the user buffer. 2008 * 2009 * This function provides the PCI spec information for the 2010 * given controller 2011 * 2012 * Return: 0 on success and proper error codes on failure 2013 */ 2014 static long 2015 mpi3mr_get_pciinfo(struct mpi3mr_softc *sc, 2016 void *data_in_buf, U32 data_in_sz) 2017 { 2018 long rval = EINVAL; 2019 U8 i; 2020 struct mpi3mr_ioctl_pciinfo pciinfo; 2021 memset(&pciinfo, 0, sizeof(pciinfo)); 2022 2023 for (i = 0; i < 64; i++) 2024 pciinfo.config_space[i] = pci_read_config(sc->mpi3mr_dev, (i * 4), 4); 2025 2026 if (data_in_sz >= sizeof(pciinfo)) { 2027 if ((rval = copyout(&pciinfo, data_in_buf, sizeof(pciinfo)))) { 2028 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2029 __FILE__, __LINE__, __func__); 2030 rval = EFAULT; 2031 } 2032 } 2033 return rval; 2034 } 2035 2036 /** 2037 * mpi3mr_get_adpinfo - Get adapter info IOCTL handler 2038 * @sc: Adapter instance reference 2039 * @data_in_buf: User buffer to hold adapter information 2040 * @data_in_sz: length of the user buffer. 2041 * 2042 * This function provides adapter information for the given 2043 * controller 2044 * 2045 * Return: 0 on success and proper error codes on failure 2046 */ 2047 static long 2048 mpi3mr_get_adpinfo(struct mpi3mr_softc *sc, 2049 void *data_in_buf, U32 data_in_sz) 2050 { 2051 long rval = EINVAL; 2052 struct mpi3mr_ioctl_adpinfo adpinfo; 2053 enum mpi3mr_iocstate ioc_state; 2054 memset(&adpinfo, 0, sizeof(adpinfo)); 2055 2056 adpinfo.adp_type = MPI3MR_IOCTL_ADPTYPE_AVGFAMILY; 2057 adpinfo.pci_dev_id = pci_get_device(sc->mpi3mr_dev); 2058 adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1); 2059 adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev); 2060 adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev); 2061 adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);; 2062 adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev); 2063 adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev); 2064 adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev); 2065 adpinfo.ioctl_ver = MPI3MR_IOCTL_VERSION; 2066 memcpy((U8 *)&adpinfo.driver_info, (U8 *)&sc->driver_info, sizeof(adpinfo.driver_info)); 2067 2068 ioc_state = mpi3mr_get_iocstate(sc); 2069 2070 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 2071 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_UNRECOVERABLE; 2072 else if (sc->reset_in_progress || sc->block_ioctls) 2073 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_IN_RESET; 2074 else if (ioc_state == MRIOC_STATE_FAULT) 2075 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_FAULT; 2076 else 2077 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_OPERATIONAL; 2078 2079 if (data_in_sz >= sizeof(adpinfo)) { 2080 if ((rval = copyout(&adpinfo, data_in_buf, sizeof(adpinfo)))) { 2081 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2082 __FILE__, __LINE__, __func__); 2083 rval = EFAULT; 2084 } 2085 } 2086 return rval; 2087 } 2088 /** 2089 * mpi3mr_app_drvrcmds - Driver IOCTL handler 2090 * @dev: char device 2091 * @cmd: IOCTL command 2092 * @arg: User data payload buffer for the IOCTL 2093 * @flag: flags 2094 * @thread: threads 2095 * 2096 * This function is the top level handler for driver commands, 2097 * this does basic validation of the buffer and identifies the 2098 * opcode and switches to correct sub handler. 2099 * 2100 * Return: 0 on success and proper error codes on failure 2101 */ 2102 2103 static int 2104 mpi3mr_app_drvrcmds(struct cdev *dev, u_long cmd, 2105 void *uarg, int flag, struct thread *td) 2106 { 2107 long rval = EINVAL; 2108 struct mpi3mr_softc *sc = NULL; 2109 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)uarg; 2110 2111 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2112 if (!sc) 2113 return ENODEV; 2114 2115 mtx_lock(&sc->ioctl_cmds.completion.lock); 2116 switch (karg->opcode) { 2117 case MPI3MR_DRVRIOCTL_OPCODE_ADPINFO: 2118 rval = mpi3mr_get_adpinfo(sc, karg->data_in_buf, karg->data_in_size); 2119 break; 2120 case MPI3MR_DRVRIOCTL_OPCODE_GETPCIINFO: 2121 rval = mpi3mr_get_pciinfo(sc, karg->data_in_buf, karg->data_in_size); 2122 break; 2123 case MPI3MR_DRVRIOCTL_OPCODE_TGTDEVINFO: 2124 rval = mpi3mr_get_tgtinfo(sc, karg); 2125 break; 2126 case MPI3MR_DRVRIOCTL_OPCODE_ALLTGTDEVINFO: 2127 rval = mpi3mr_get_alltgtinfo(sc, karg->data_in_buf, karg->data_in_size); 2128 break; 2129 case MPI3MR_DRVRIOCTL_OPCODE_GETCHGCNT: 2130 rval = mpi3mr_get_change_count(sc, karg->data_in_buf, karg->data_in_size); 2131 break; 2132 case MPI3MR_DRVRIOCTL_OPCODE_LOGDATAENABLE: 2133 rval = mpi3mr_logdata_enable(sc, karg->data_in_buf, karg->data_in_size); 2134 break; 2135 case MPI3MR_DRVRIOCTL_OPCODE_GETLOGDATA: 2136 rval = mpi3mr_get_logdata(sc, karg->data_in_buf, karg->data_in_size); 2137 break; 2138 case MPI3MR_DRVRIOCTL_OPCODE_PELENABLE: 2139 rval = mpi3mr_pel_enable(sc, karg->data_out_buf, karg->data_out_size); 2140 break; 2141 case MPI3MR_DRVRIOCTL_OPCODE_ADPRESET: 2142 rval = mpi3mr_adp_reset(sc, karg->data_out_buf, karg->data_out_size); 2143 break; 2144 case MPI3MR_DRVRIOCTL_OPCODE_UNKNOWN: 2145 default: 2146 printf("Unsupported drvr ioctl opcode 0x%x\n", karg->opcode); 2147 break; 2148 } 2149 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2150 return rval; 2151 } 2152 /** 2153 * mpi3mr_ioctl - IOCTL Handler 2154 * @dev: char device 2155 * @cmd: IOCTL command 2156 * @arg: User data payload buffer for the IOCTL 2157 * @flag: flags 2158 * @thread: threads 2159 * 2160 * This is the IOCTL entry point which checks the command type and 2161 * executes proper sub handler specific for the command. 2162 * 2163 * Return: 0 on success and proper error codes on failure 2164 */ 2165 static int 2166 mpi3mr_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2167 { 2168 int rval = EINVAL; 2169 2170 struct mpi3mr_softc *sc = NULL; 2171 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)arg; 2172 2173 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2174 2175 if (!sc) 2176 return ENODEV; 2177 2178 mpi3mr_atomic_inc(&sc->pend_ioctls); 2179 2180 2181 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) { 2182 mpi3mr_dprint(sc, MPI3MR_INFO, 2183 "Return back IOCTL, shutdown is in progress\n"); 2184 mpi3mr_atomic_dec(&sc->pend_ioctls); 2185 return ENODEV; 2186 } 2187 2188 switch (cmd) { 2189 case MPI3MRDRVCMD: 2190 rval = mpi3mr_app_drvrcmds(dev, cmd, arg, flag, td); 2191 break; 2192 case MPI3MRMPTCMD: 2193 mtx_lock(&sc->ioctl_cmds.completion.lock); 2194 rval = mpi3mr_app_mptcmds(dev, cmd, arg, flag, td); 2195 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2196 break; 2197 default: 2198 printf("%s:Unsupported ioctl cmd (0x%08lx)\n", MPI3MR_DRIVER_NAME, cmd); 2199 break; 2200 } 2201 2202 mpi3mr_atomic_dec(&sc->pend_ioctls); 2203 2204 return rval; 2205 } 2206