1 /* 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2024, Broadcom Inc. All rights reserved. 5 * Support: <fbsd-storage-driver.pdl@broadcom.com> 6 * 7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com> 8 * Chandrakanth Patil <chandrakanth.patil@broadcom.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are 12 * met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation and/or other 18 * materials provided with the distribution. 19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors 20 * may be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * The views and conclusions contained in the software and documentation are 36 * those of the authors and should not be interpreted as representing 37 * official policies,either expressed or implied, of the FreeBSD Project. 38 * 39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131 40 * 41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD 42 */ 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include "mpi3mr_cam.h" 49 #include "mpi3mr_app.h" 50 #include "mpi3mr.h" 51 52 static d_open_t mpi3mr_open; 53 static d_close_t mpi3mr_close; 54 static d_ioctl_t mpi3mr_ioctl; 55 static d_poll_t mpi3mr_poll; 56 57 static struct cdevsw mpi3mr_cdevsw = { 58 .d_version = D_VERSION, 59 .d_flags = 0, 60 .d_open = mpi3mr_open, 61 .d_close = mpi3mr_close, 62 .d_ioctl = mpi3mr_ioctl, 63 .d_poll = mpi3mr_poll, 64 .d_name = "mpi3mr", 65 }; 66 67 static struct mpi3mr_mgmt_info mpi3mr_mgmt_info; 68 69 static int 70 mpi3mr_open(struct cdev *dev, int flags, int fmt, struct thread *td) 71 { 72 73 return (0); 74 } 75 76 static int 77 mpi3mr_close(struct cdev *dev, int flags, int fmt, struct thread *td) 78 { 79 80 return (0); 81 } 82 83 /* 84 * mpi3mr_app_attach - Char device registration 85 * @sc: Adapter reference 86 * 87 * This function does char device registration. 88 * 89 * Return: 0 on success and proper error codes on failure 90 */ 91 int 92 mpi3mr_app_attach(struct mpi3mr_softc *sc) 93 { 94 95 /* Create a /dev entry for Avenger controller */ 96 sc->mpi3mr_cdev = make_dev(&mpi3mr_cdevsw, device_get_unit(sc->mpi3mr_dev), 97 UID_ROOT, GID_OPERATOR, 0640, "mpi3mr%d", 98 device_get_unit(sc->mpi3mr_dev)); 99 100 if (sc->mpi3mr_cdev == NULL) 101 return (ENOMEM); 102 103 sc->mpi3mr_cdev->si_drv1 = sc; 104 105 /* Assign controller instance to mgmt_info structure */ 106 if (device_get_unit(sc->mpi3mr_dev) == 0) 107 memset(&mpi3mr_mgmt_info, 0, sizeof(mpi3mr_mgmt_info)); 108 mpi3mr_mgmt_info.count++; 109 mpi3mr_mgmt_info.sc_ptr[mpi3mr_mgmt_info.max_index] = sc; 110 mpi3mr_mgmt_info.max_index++; 111 112 return (0); 113 } 114 115 void 116 mpi3mr_app_detach(struct mpi3mr_softc *sc) 117 { 118 U8 i = 0; 119 120 if (sc->mpi3mr_cdev == NULL) 121 return; 122 123 destroy_dev(sc->mpi3mr_cdev); 124 for (i = 0; i < mpi3mr_mgmt_info.max_index; i++) { 125 if (mpi3mr_mgmt_info.sc_ptr[i] == sc) { 126 mpi3mr_mgmt_info.count--; 127 mpi3mr_mgmt_info.sc_ptr[i] = NULL; 128 break; 129 } 130 } 131 return; 132 } 133 134 static int 135 mpi3mr_poll(struct cdev *dev, int poll_events, struct thread *td) 136 { 137 int revents = 0; 138 struct mpi3mr_softc *sc = NULL; 139 sc = dev->si_drv1; 140 141 if ((poll_events & (POLLIN | POLLRDNORM)) && 142 (sc->mpi3mr_aen_triggered)) 143 revents |= poll_events & (POLLIN | POLLRDNORM); 144 145 if (revents == 0) { 146 if (poll_events & (POLLIN | POLLRDNORM)) { 147 sc->mpi3mr_poll_waiting = 1; 148 selrecord(td, &sc->mpi3mr_select); 149 } 150 } 151 return revents; 152 } 153 154 /** 155 * mpi3mr_app_get_adp_instancs - Get Adapter instance 156 * @mrioc_id: Adapter ID 157 * 158 * This fucnction searches the Adapter reference with mrioc_id 159 * upon found, returns the adapter reference otherwise returns 160 * the NULL 161 * 162 * Return: Adapter reference on success and NULL on failure 163 */ 164 static struct mpi3mr_softc * 165 mpi3mr_app_get_adp_instance(U8 mrioc_id) 166 { 167 struct mpi3mr_softc *sc = NULL; 168 169 if (mrioc_id >= mpi3mr_mgmt_info.max_index) 170 return NULL; 171 172 sc = mpi3mr_mgmt_info.sc_ptr[mrioc_id]; 173 return sc; 174 } 175 176 static int 177 mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc *sc, 178 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 179 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 180 { 181 struct mpi3mr_nvme_pt_sge *nvme_sgl; 182 U64 sgl_dma; 183 U8 count; 184 U16 available_sges = 0, i; 185 U32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 186 size_t length = 0; 187 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 188 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 189 sc->facts.sge_mod_shift) << 32); 190 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 191 sc->facts.sge_mod_shift) << 32; 192 193 U32 size; 194 195 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 196 ((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_SGL_OFFSET); 197 198 /* 199 * Not all commands require a data transfer. If no data, just return 200 * without constructing any SGL. 201 */ 202 for (count = 0; count < bufcnt; count++, dma_buff++) { 203 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 204 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 205 length = dma_buff->kern_buf_len; 206 break; 207 } 208 } 209 if (!length || !dma_buff->num_dma_desc) 210 return 0; 211 212 if (dma_buff->num_dma_desc == 1) { 213 available_sges = 1; 214 goto build_sges; 215 } 216 sgl_dma = (U64)sc->ioctl_chain_sge.dma_addr; 217 218 if (sgl_dma & sgemod_mask) { 219 printf(IOCNAME "NVMe SGL address collides with SGEModifier\n",sc->name); 220 return -1; 221 } 222 223 sgl_dma &= ~sgemod_mask; 224 sgl_dma |= sgemod_val; 225 226 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 227 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 228 if (available_sges < dma_buff->num_dma_desc) 229 return -1; 230 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 231 nvme_sgl->base_addr = sgl_dma; 232 size = dma_buff->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 233 nvme_sgl->length = htole32(size); 234 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 235 236 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) sc->ioctl_chain_sge.addr; 237 238 build_sges: 239 for (i = 0; i < dma_buff->num_dma_desc; i++) { 240 sgl_dma = htole64(dma_buff->dma_desc[i].dma_addr); 241 if (sgl_dma & sgemod_mask) { 242 printf("%s: SGL address collides with SGE modifier\n", 243 __func__); 244 return -1; 245 } 246 247 sgl_dma &= ~sgemod_mask; 248 sgl_dma |= sgemod_val; 249 250 nvme_sgl->base_addr = sgl_dma; 251 nvme_sgl->length = htole32(dma_buff->dma_desc[i].size); 252 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 253 nvme_sgl++; 254 available_sges--; 255 } 256 257 return 0; 258 } 259 260 static int 261 mpi3mr_app_build_nvme_prp(struct mpi3mr_softc *sc, 262 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 263 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 264 { 265 int prp_size = MPI3MR_NVME_PRP_SIZE; 266 U64 *prp_entry, *prp1_entry, *prp2_entry; 267 U64 *prp_page; 268 bus_addr_t prp_entry_dma, prp_page_dma, dma_addr; 269 U32 offset, entry_len, dev_pgsz; 270 U32 page_mask_result, page_mask; 271 size_t length = 0, desc_len; 272 U8 count; 273 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 274 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 275 sc->facts.sge_mod_shift) << 32); 276 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 277 sc->facts.sge_mod_shift) << 32; 278 U16 dev_handle = nvme_encap_request->DevHandle; 279 struct mpi3mr_target *tgtdev; 280 U16 desc_count = 0; 281 282 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 283 if (!tgtdev) { 284 printf(IOCNAME "EncapNVMe Error: Invalid DevHandle 0x%02x\n", sc->name, 285 dev_handle); 286 return -1; 287 } 288 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 289 printf(IOCNAME "%s: NVME device page size is zero for handle 0x%04x\n", 290 sc->name, __func__, dev_handle); 291 return -1; 292 } 293 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 294 295 page_mask = dev_pgsz - 1; 296 297 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE){ 298 printf("%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 299 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 300 return -1; 301 } 302 303 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz){ 304 printf("%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 305 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 306 return -1; 307 } 308 309 /* 310 * Not all commands require a data transfer. If no data, just return 311 * without constructing any PRP. 312 */ 313 for (count = 0; count < bufcnt; count++, dma_buff++) { 314 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 315 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 316 length = dma_buff->kern_buf_len; 317 break; 318 } 319 } 320 if (!length || !dma_buff->num_dma_desc) 321 return 0; 322 323 for (count = 0; count < dma_buff->num_dma_desc; count++) { 324 dma_addr = dma_buff->dma_desc[count].dma_addr; 325 if (dma_addr & page_mask) { 326 printf("%s:dma_addr 0x%lu is not aligned with page size 0x%x\n", 327 __func__, dma_addr, dev_pgsz); 328 return -1; 329 } 330 } 331 332 dma_addr = dma_buff->dma_desc[0].dma_addr; 333 desc_len = dma_buff->dma_desc[0].size; 334 335 sc->nvme_encap_prp_sz = 0; 336 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */ 337 4, 0, /* algnmnt, boundary */ 338 sc->dma_loaddr, /* lowaddr */ 339 BUS_SPACE_MAXADDR, /* highaddr */ 340 NULL, NULL, /* filter, filterarg */ 341 dev_pgsz, /* maxsize */ 342 1, /* nsegments */ 343 dev_pgsz, /* maxsegsize */ 344 0, /* flags */ 345 NULL, NULL, /* lockfunc, lockarg */ 346 &sc->nvme_encap_prp_list_dmatag)) { 347 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create ioctl NVME kernel buffer dma tag\n"); 348 return (ENOMEM); 349 } 350 351 if (bus_dmamem_alloc(sc->nvme_encap_prp_list_dmatag, (void **)&sc->nvme_encap_prp_list, 352 BUS_DMA_NOWAIT, &sc->nvme_encap_prp_list_dma_dmamap)) { 353 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate ioctl NVME dma memory\n"); 354 return (ENOMEM); 355 } 356 357 bzero(sc->nvme_encap_prp_list, dev_pgsz); 358 bus_dmamap_load(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap, 359 sc->nvme_encap_prp_list, dev_pgsz, mpi3mr_memaddr_cb, &sc->nvme_encap_prp_list_dma, 360 BUS_DMA_NOWAIT); 361 362 if (!sc->nvme_encap_prp_list) { 363 printf(IOCNAME "%s:%d Cannot load ioctl NVME dma memory for size: %d\n", sc->name, 364 __func__, __LINE__, dev_pgsz); 365 goto err_out; 366 } 367 sc->nvme_encap_prp_sz = dev_pgsz; 368 369 /* 370 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 371 * PRP1 is located at a 24 byte offset from the start of the NVMe 372 * command. Then set the current PRP entry pointer to PRP1. 373 */ 374 prp1_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP1_OFFSET); 375 prp2_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP2_OFFSET); 376 prp_entry = prp1_entry; 377 /* 378 * For the PRP entries, use the specially allocated buffer of 379 * contiguous memory. 380 */ 381 prp_page = sc->nvme_encap_prp_list; 382 prp_page_dma = sc->nvme_encap_prp_list_dma; 383 384 /* 385 * Check if we are within 1 entry of a page boundary we don't 386 * want our first entry to be a PRP List entry. 387 */ 388 page_mask_result = (uintptr_t)((U8 *)prp_page + prp_size) & page_mask; 389 if (!page_mask_result) { 390 printf(IOCNAME "PRP Page is not page aligned\n", sc->name); 391 goto err_out; 392 } 393 394 /* 395 * Set PRP physical pointer, which initially points to the current PRP 396 * DMA memory page. 397 */ 398 prp_entry_dma = prp_page_dma; 399 400 401 /* Loop while the length is not zero. */ 402 while (length) { 403 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 404 if (!page_mask_result && (length > dev_pgsz)) { 405 printf(IOCNAME "Single PRP page is not sufficient\n", sc->name); 406 goto err_out; 407 } 408 409 /* Need to handle if entry will be part of a page. */ 410 offset = dma_addr & page_mask; 411 entry_len = dev_pgsz - offset; 412 413 if (prp_entry == prp1_entry) { 414 /* 415 * Must fill in the first PRP pointer (PRP1) before 416 * moving on. 417 */ 418 *prp1_entry = dma_addr; 419 if (*prp1_entry & sgemod_mask) { 420 printf(IOCNAME "PRP1 address collides with SGEModifier\n", sc->name); 421 goto err_out; 422 } 423 *prp1_entry &= ~sgemod_mask; 424 *prp1_entry |= sgemod_val; 425 426 /* 427 * Now point to the second PRP entry within the 428 * command (PRP2). 429 */ 430 prp_entry = prp2_entry; 431 } else if (prp_entry == prp2_entry) { 432 /* 433 * Should the PRP2 entry be a PRP List pointer or just 434 * a regular PRP pointer? If there is more than one 435 * more page of data, must use a PRP List pointer. 436 */ 437 if (length > dev_pgsz) { 438 /* 439 * PRP2 will contain a PRP List pointer because 440 * more PRP's are needed with this command. The 441 * list will start at the beginning of the 442 * contiguous buffer. 443 */ 444 *prp2_entry = prp_entry_dma; 445 if (*prp2_entry & sgemod_mask) { 446 printf(IOCNAME "PRP list address collides with SGEModifier\n", sc->name); 447 goto err_out; 448 } 449 *prp2_entry &= ~sgemod_mask; 450 *prp2_entry |= sgemod_val; 451 452 /* 453 * The next PRP Entry will be the start of the 454 * first PRP List. 455 */ 456 prp_entry = prp_page; 457 continue; 458 } else { 459 /* 460 * After this, the PRP Entries are complete. 461 * This command uses 2 PRP's and no PRP list. 462 */ 463 *prp2_entry = dma_addr; 464 if (*prp2_entry & sgemod_mask) { 465 printf(IOCNAME "PRP2 address collides with SGEModifier\n", sc->name); 466 goto err_out; 467 } 468 *prp2_entry &= ~sgemod_mask; 469 *prp2_entry |= sgemod_val; 470 } 471 } else { 472 /* 473 * Put entry in list and bump the addresses. 474 * 475 * After PRP1 and PRP2 are filled in, this will fill in 476 * all remaining PRP entries in a PRP List, one per 477 * each time through the loop. 478 */ 479 *prp_entry = dma_addr; 480 if (*prp_entry & sgemod_mask) { 481 printf(IOCNAME "PRP address collides with SGEModifier\n", sc->name); 482 goto err_out; 483 } 484 *prp_entry &= ~sgemod_mask; 485 *prp_entry |= sgemod_val; 486 prp_entry++; 487 prp_entry_dma += prp_size; 488 } 489 490 /* Decrement length accounting for last partial page. */ 491 if (entry_len >= length) 492 length = 0; 493 else { 494 if (entry_len <= desc_len) { 495 dma_addr += entry_len; 496 desc_len -= entry_len; 497 } 498 if (!desc_len) { 499 if ((++desc_count) >= 500 dma_buff->num_dma_desc) { 501 printf("%s: Invalid len %ld while building PRP\n", 502 __func__, length); 503 goto err_out; 504 } 505 dma_addr = 506 dma_buff->dma_desc[desc_count].dma_addr; 507 desc_len = 508 dma_buff->dma_desc[desc_count].size; 509 } 510 length -= entry_len; 511 } 512 } 513 return 0; 514 err_out: 515 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 516 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 517 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 518 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 519 sc->nvme_encap_prp_list = NULL; 520 } 521 return -1; 522 } 523 524 /** 525 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data 526 + * buffers 527 + * @sc: Adapter instance reference 528 + * @dma_buff: buffer map descriptor 529 + * @desc_count: Number of already consumed dma descriptors 530 + * 531 + * This function computes how many pre-allocated DMA descriptors 532 + * are required for the given data buffer and if those number of 533 + * descriptors are free, then setup the mapping of the scattered 534 + * DMA address to the given data buffer, if the data direction 535 + * of the buffer is DATA_OUT then the actual data is copied to 536 + * the DMA buffers 537 + * 538 + * Return: 0 on success, -1 on failure 539 + */ 540 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_softc *sc, 541 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 542 U8 desc_count) 543 { 544 U16 i, needed_desc = (dma_buffers->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE); 545 U32 buf_len = dma_buffers->kern_buf_len, copied_len = 0; 546 int error; 547 548 if (dma_buffers->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 549 needed_desc++; 550 551 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 552 printf("%s: DMA descriptor mapping error %d:%d:%d\n", 553 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 554 return -1; 555 } 556 557 dma_buffers->dma_desc = malloc(sizeof(*dma_buffers->dma_desc) * needed_desc, 558 M_MPI3MR, M_NOWAIT | M_ZERO); 559 if (!dma_buffers->dma_desc) 560 return -1; 561 562 error = 0; 563 for (i = 0; i < needed_desc; i++, desc_count++) { 564 565 dma_buffers->dma_desc[i].addr = sc->ioctl_sge[desc_count].addr; 566 dma_buffers->dma_desc[i].dma_addr = sc->ioctl_sge[desc_count].dma_addr; 567 568 if (buf_len < sc->ioctl_sge[desc_count].size) 569 dma_buffers->dma_desc[i].size = buf_len; 570 else 571 dma_buffers->dma_desc[i].size = sc->ioctl_sge[desc_count].size; 572 573 buf_len -= dma_buffers->dma_desc[i].size; 574 memset(dma_buffers->dma_desc[i].addr, 0, sc->ioctl_sge[desc_count].size); 575 576 if (dma_buffers->data_dir == MPI3MR_APP_DDO) { 577 error = copyin(((U8 *)dma_buffers->user_buf + copied_len), 578 dma_buffers->dma_desc[i].addr, 579 dma_buffers->dma_desc[i].size); 580 if (error != 0) 581 break; 582 copied_len += dma_buffers->dma_desc[i].size; 583 } 584 } 585 if (error != 0) { 586 printf("%s: DMA copyin error %d\n", __func__, error); 587 free(dma_buffers->dma_desc, M_MPI3MR); 588 return -1; 589 } 590 591 dma_buffers->num_dma_desc = needed_desc; 592 593 return 0; 594 } 595 596 static unsigned int 597 mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request) 598 { 599 U8 format = 0; 600 601 format = ((nvme_encap_request->Command[0] & 0xc000) >> 14); 602 return format; 603 } 604 605 static inline U16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 606 U8 bufcnt) 607 { 608 U16 i, sge_count = 0; 609 for (i=0; i < bufcnt; i++, dma_buffers++) { 610 if ((dma_buffers->data_dir == MPI3MR_APP_DDN) || 611 dma_buffers->kern_buf) 612 continue; 613 sge_count += dma_buffers->num_dma_desc; 614 if (!dma_buffers->num_dma_desc) 615 sge_count++; 616 } 617 return sge_count; 618 } 619 620 static int 621 mpi3mr_app_construct_sgl(struct mpi3mr_softc *sc, U8 *mpi_request, U32 sgl_offset, 622 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 623 U8 bufcnt, U8 is_rmc, U8 is_rmr, U8 num_datasges) 624 { 625 U8 *sgl = (mpi_request + sgl_offset), count = 0; 626 Mpi3RequestHeader_t *mpi_header = (Mpi3RequestHeader_t *)mpi_request; 627 Mpi3MgmtPassthroughRequest_t *rmgmt_req = 628 (Mpi3MgmtPassthroughRequest_t *)mpi_request; 629 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 630 U8 flag, sgl_flags, sgl_flags_eob, sgl_flags_last, last_chain_sgl_flags; 631 U16 available_sges, i, sges_needed; 632 U32 sge_element_size = sizeof(struct _MPI3_SGE_COMMON); 633 bool chain_used = false; 634 635 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 636 MPI3_SGE_FLAGS_DLAS_SYSTEM ; 637 sgl_flags_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 638 sgl_flags_last = sgl_flags_eob | MPI3_SGE_FLAGS_END_OF_LIST; 639 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 640 MPI3_SGE_FLAGS_DLAS_SYSTEM; 641 642 sges_needed = mpi3mr_total_num_ioctl_sges(dma_buffers, bufcnt); 643 644 if (is_rmc) { 645 mpi3mr_add_sg_single(&rmgmt_req->CommandSGL, 646 sgl_flags_last, dma_buff->kern_buf_len, 647 dma_buff->kern_buf_dma); 648 sgl = (U8 *) dma_buff->kern_buf + dma_buff->user_buf_len; 649 available_sges = (dma_buff->kern_buf_len - 650 dma_buff->user_buf_len) / sge_element_size; 651 if (sges_needed > available_sges) 652 return -1; 653 chain_used = true; 654 dma_buff++; 655 count++; 656 if (is_rmr) { 657 mpi3mr_add_sg_single(&rmgmt_req->ResponseSGL, 658 sgl_flags_last, dma_buff->kern_buf_len, 659 dma_buff->kern_buf_dma); 660 dma_buff++; 661 count++; 662 } else 663 mpi3mr_build_zero_len_sge( 664 &rmgmt_req->ResponseSGL); 665 if (num_datasges) { 666 i = 0; 667 goto build_sges; 668 } 669 } else { 670 if (sgl_offset >= MPI3MR_AREQ_FRAME_SZ) 671 return -1; 672 available_sges = (MPI3MR_AREQ_FRAME_SZ - sgl_offset) / 673 sge_element_size; 674 if (!available_sges) 675 return -1; 676 } 677 678 if (!num_datasges) { 679 mpi3mr_build_zero_len_sge(sgl); 680 return 0; 681 } 682 683 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 684 if ((sges_needed > 2) || (sges_needed > available_sges)) 685 return -1; 686 for (; count < bufcnt; count++, dma_buff++) { 687 if ((dma_buff->data_dir == MPI3MR_APP_DDN) || 688 !dma_buff->num_dma_desc) 689 continue; 690 mpi3mr_add_sg_single(sgl, sgl_flags_last, 691 dma_buff->dma_desc[0].size, 692 dma_buff->dma_desc[0].dma_addr); 693 sgl += sge_element_size; 694 } 695 return 0; 696 } 697 i = 0; 698 699 build_sges: 700 for (; count < bufcnt; count++, dma_buff++) { 701 if (dma_buff->data_dir == MPI3MR_APP_DDN) 702 continue; 703 if (!dma_buff->num_dma_desc) { 704 if (chain_used && !available_sges) 705 return -1; 706 if (!chain_used && (available_sges == 1) && 707 (sges_needed > 1)) 708 goto setup_chain; 709 flag = sgl_flags_eob; 710 if (num_datasges == 1) 711 flag = sgl_flags_last; 712 mpi3mr_add_sg_single(sgl, flag, 0, 0); 713 sgl += sge_element_size; 714 available_sges--; 715 sges_needed--; 716 num_datasges--; 717 continue; 718 } 719 for (; i < dma_buff->num_dma_desc; i++) { 720 if (chain_used && !available_sges) 721 return -1; 722 if (!chain_used && (available_sges == 1) && 723 (sges_needed > 1)) 724 goto setup_chain; 725 flag = sgl_flags; 726 if (i == (dma_buff->num_dma_desc - 1)) { 727 if (num_datasges == 1) 728 flag = sgl_flags_last; 729 else 730 flag = sgl_flags_eob; 731 } 732 733 mpi3mr_add_sg_single(sgl, flag, 734 dma_buff->dma_desc[i].size, 735 dma_buff->dma_desc[i].dma_addr); 736 sgl += sge_element_size; 737 available_sges--; 738 sges_needed--; 739 } 740 num_datasges--; 741 i = 0; 742 } 743 return 0; 744 745 setup_chain: 746 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 747 if (sges_needed > available_sges) 748 return -1; 749 mpi3mr_add_sg_single(sgl, last_chain_sgl_flags, 750 (sges_needed * sge_element_size), sc->ioctl_chain_sge.dma_addr); 751 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 752 sgl = (U8 *)sc->ioctl_chain_sge.addr; 753 chain_used = true; 754 goto build_sges; 755 } 756 757 758 /** 759 * mpi3mr_app_mptcmds - MPI Pass through IOCTL handler 760 * @dev: char device 761 * @cmd: IOCTL command 762 * @arg: User data payload buffer for the IOCTL 763 * @flag: flags 764 * @thread: threads 765 * 766 * This function is the top level handler for MPI Pass through 767 * IOCTL, this does basic validation of the input data buffers, 768 * identifies the given buffer types and MPI command, allocates 769 * DMAable memory for user given buffers, construstcs SGL 770 * properly and passes the command to the firmware. 771 * 772 * Once the MPI command is completed the driver copies the data 773 * if any and reply, sense information to user provided buffers. 774 * If the command is timed out then issues controller reset 775 * prior to returning. 776 * 777 * Return: 0 on success and proper error codes on failure 778 */ 779 static long 780 mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg, 781 int flag, struct thread *td) 782 { 783 long rval = EINVAL; 784 U8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 785 U8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 786 U16 desc_count = 0; 787 U8 nvme_fmt = 0; 788 U32 tmplen = 0, erbsz = MPI3MR_SENSEBUF_SZ, din_sz = 0, dout_sz = 0; 789 U8 *kern_erb = NULL; 790 U8 *mpi_request = NULL; 791 Mpi3RequestHeader_t *mpi_header = NULL; 792 Mpi3PELReqActionGetCount_t *pel = NULL; 793 Mpi3StatusReplyDescriptor_t *status_desc = NULL; 794 struct mpi3mr_softc *sc = NULL; 795 struct mpi3mr_ioctl_buf_entry_list *buffer_list = NULL; 796 struct mpi3mr_buf_entry *buf_entries = NULL; 797 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL; 798 struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL; 799 struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg; 800 801 802 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 803 if (!sc) 804 return ENODEV; 805 806 if (!sc->ioctl_sges_allocated) { 807 printf("%s: DMA memory was not allocated\n", __func__); 808 return ENOMEM; 809 } 810 811 if (karg->timeout < MPI3MR_IOCTL_DEFAULT_TIMEOUT) 812 karg->timeout = MPI3MR_IOCTL_DEFAULT_TIMEOUT; 813 814 if (!karg->mpi_msg_size || !karg->buf_entry_list_size) { 815 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 816 __func__, __LINE__); 817 return rval; 818 } 819 if ((karg->mpi_msg_size * 4) > MPI3MR_AREQ_FRAME_SZ) { 820 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 821 __func__, __LINE__); 822 return rval; 823 } 824 825 mpi_request = malloc(MPI3MR_AREQ_FRAME_SZ, M_MPI3MR, M_NOWAIT | M_ZERO); 826 if (!mpi_request) { 827 printf(IOCNAME "%s: memory allocation failed for mpi_request\n", sc->name, 828 __func__); 829 return ENOMEM; 830 } 831 832 mpi_header = (Mpi3RequestHeader_t *)mpi_request; 833 pel = (Mpi3PELReqActionGetCount_t *)mpi_request; 834 if (copyin(karg->mpi_msg_buf, mpi_request, (karg->mpi_msg_size * 4))) { 835 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 836 __FILE__, __LINE__, __func__); 837 rval = EFAULT; 838 goto out; 839 } 840 841 buffer_list = malloc(karg->buf_entry_list_size, M_MPI3MR, M_NOWAIT | M_ZERO); 842 if (!buffer_list) { 843 printf(IOCNAME "%s: memory allocation failed for buffer_list\n", sc->name, 844 __func__); 845 rval = ENOMEM; 846 goto out; 847 } 848 if (copyin(karg->buf_entry_list, buffer_list, karg->buf_entry_list_size)) { 849 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 850 __FILE__, __LINE__, __func__); 851 rval = EFAULT; 852 goto out; 853 } 854 if (!buffer_list->num_of_buf_entries) { 855 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 856 __func__, __LINE__); 857 rval = EINVAL; 858 goto out; 859 } 860 bufcnt = buffer_list->num_of_buf_entries; 861 dma_buffers = malloc((sizeof(*dma_buffers) * bufcnt), M_MPI3MR, M_NOWAIT | M_ZERO); 862 if (!dma_buffers) { 863 printf(IOCNAME "%s: memory allocation failed for dma_buffers\n", sc->name, 864 __func__); 865 rval = ENOMEM; 866 goto out; 867 } 868 buf_entries = buffer_list->buf_entry; 869 dma_buff = dma_buffers; 870 for (count = 0; count < bufcnt; count++, buf_entries++, dma_buff++) { 871 memset(dma_buff, 0, sizeof(*dma_buff)); 872 dma_buff->user_buf = buf_entries->buffer; 873 dma_buff->user_buf_len = buf_entries->buf_len; 874 875 switch (buf_entries->buf_type) { 876 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_CMD: 877 is_rmcb = 1; 878 if ((count != 0) || !buf_entries->buf_len) 879 invalid_be = 1; 880 dma_buff->data_dir = MPI3MR_APP_DDO; 881 break; 882 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_RESP: 883 is_rmrb = 1; 884 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 885 invalid_be = 1; 886 dma_buff->data_dir = MPI3MR_APP_DDI; 887 break; 888 case MPI3MR_IOCTL_BUFTYPE_DATA_IN: 889 din_sz = dma_buff->user_buf_len; 890 din_cnt++; 891 if ((din_cnt > 1) && !is_rmcb) 892 invalid_be = 1; 893 dma_buff->data_dir = MPI3MR_APP_DDI; 894 break; 895 case MPI3MR_IOCTL_BUFTYPE_DATA_OUT: 896 dout_sz = dma_buff->user_buf_len; 897 dout_cnt++; 898 if ((dout_cnt > 1) && !is_rmcb) 899 invalid_be = 1; 900 dma_buff->data_dir = MPI3MR_APP_DDO; 901 break; 902 case MPI3MR_IOCTL_BUFTYPE_MPI_REPLY: 903 mpirep_offset = count; 904 dma_buff->data_dir = MPI3MR_APP_DDN; 905 if (!buf_entries->buf_len) 906 invalid_be = 1; 907 break; 908 case MPI3MR_IOCTL_BUFTYPE_ERR_RESPONSE: 909 erb_offset = count; 910 dma_buff->data_dir = MPI3MR_APP_DDN; 911 if (!buf_entries->buf_len) 912 invalid_be = 1; 913 break; 914 default: 915 invalid_be = 1; 916 break; 917 } 918 if (invalid_be) 919 break; 920 } 921 if (invalid_be) { 922 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 923 __func__, __LINE__); 924 rval = EINVAL; 925 goto out; 926 } 927 928 if (is_rmcb && ((din_sz + dout_sz) > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 929 printf("%s:%d: invalid data transfer size passed for function 0x%x" 930 "din_sz = %d, dout_size = %d\n", __func__, __LINE__, 931 mpi_header->Function, din_sz, dout_sz); 932 rval = EINVAL; 933 goto out; 934 } 935 936 if ((din_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE) || 937 (dout_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 938 printf("%s:%d: invalid data transfer size passed for function 0x%x" 939 "din_size=%d dout_size=%d\n", __func__, __LINE__, 940 mpi_header->Function, din_sz, dout_sz); 941 rval = EINVAL; 942 goto out; 943 } 944 945 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 946 if ((din_sz > MPI3MR_IOCTL_SGE_SIZE) || 947 (dout_sz > MPI3MR_IOCTL_SGE_SIZE)) { 948 printf("%s:%d: invalid message size passed:%d:%d:%d:%d\n", 949 __func__, __LINE__, din_cnt, dout_cnt, din_sz, dout_sz); 950 rval = EINVAL; 951 goto out; 952 } 953 } 954 955 dma_buff = dma_buffers; 956 for (count = 0; count < bufcnt; count++, dma_buff++) { 957 958 dma_buff->kern_buf_len = dma_buff->user_buf_len; 959 960 if (is_rmcb && !count) { 961 dma_buff->kern_buf = sc->ioctl_chain_sge.addr; 962 dma_buff->kern_buf_len = sc->ioctl_chain_sge.size; 963 dma_buff->kern_buf_dma = sc->ioctl_chain_sge.dma_addr; 964 dma_buff->dma_desc = NULL; 965 dma_buff->num_dma_desc = 0; 966 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 967 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 968 if (copyin(dma_buff->user_buf, dma_buff->kern_buf, tmplen)) { 969 mpi3mr_dprint(sc, MPI3MR_ERROR, "failure at %s() line: %d", 970 __func__, __LINE__); 971 rval = EFAULT; 972 goto out; 973 } 974 } else if (is_rmrb && (count == 1)) { 975 dma_buff->kern_buf = sc->ioctl_resp_sge.addr; 976 dma_buff->kern_buf_len = sc->ioctl_resp_sge.size; 977 dma_buff->kern_buf_dma = sc->ioctl_resp_sge.dma_addr; 978 dma_buff->dma_desc = NULL; 979 dma_buff->num_dma_desc = 0; 980 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 981 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 982 dma_buff->kern_buf_len = tmplen; 983 } else { 984 if (!dma_buff->kern_buf_len) 985 continue; 986 if (mpi3mr_map_data_buffer_dma(sc, dma_buff, desc_count)) { 987 rval = ENOMEM; 988 mpi3mr_dprint(sc, MPI3MR_ERROR, "mapping data buffers failed" 989 "at %s() line: %d\n", __func__, __LINE__); 990 goto out; 991 } 992 desc_count += dma_buff->num_dma_desc; 993 } 994 } 995 996 if (erb_offset != 0xFF) { 997 kern_erb = malloc(erbsz, M_MPI3MR, M_NOWAIT | M_ZERO); 998 if (!kern_erb) { 999 printf(IOCNAME "%s:%d Cannot allocate memory for sense buffer\n", sc->name, 1000 __func__, __LINE__); 1001 rval = ENOMEM; 1002 goto out; 1003 } 1004 } 1005 1006 if (sc->ioctl_cmds.state & MPI3MR_CMD_PENDING) { 1007 printf(IOCNAME "Issue IOCTL: Ioctl command is in use/previous command is pending\n", 1008 sc->name); 1009 rval = EAGAIN; 1010 goto out; 1011 } 1012 1013 if (sc->unrecoverable) { 1014 printf(IOCNAME "Issue IOCTL: controller is in unrecoverable state\n", sc->name); 1015 rval = EFAULT; 1016 goto out; 1017 } 1018 1019 if (sc->reset_in_progress) { 1020 printf(IOCNAME "Issue IOCTL: reset in progress\n", sc->name); 1021 rval = EAGAIN; 1022 goto out; 1023 } 1024 if (sc->block_ioctls) { 1025 printf(IOCNAME "Issue IOCTL: IOCTLs are blocked\n", sc->name); 1026 rval = EAGAIN; 1027 goto out; 1028 } 1029 1030 if (mpi_header->Function != MPI3_FUNCTION_NVME_ENCAPSULATED) { 1031 if (mpi3mr_app_construct_sgl(sc, mpi_request, (karg->mpi_msg_size * 4), dma_buffers, 1032 bufcnt, is_rmcb, is_rmrb, (dout_cnt + din_cnt))) { 1033 printf(IOCNAME "Issue IOCTL: sgl build failed\n", sc->name); 1034 rval = EAGAIN; 1035 goto out; 1036 } 1037 1038 } else { 1039 nvme_fmt = mpi3mr_app_get_nvme_data_fmt( 1040 (Mpi3NVMeEncapsulatedRequest_t *)mpi_request); 1041 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1042 if (mpi3mr_app_build_nvme_prp(sc, 1043 (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1044 dma_buffers, bufcnt)) { 1045 rval = ENOMEM; 1046 goto out; 1047 } 1048 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1049 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1050 if (mpi3mr_app_construct_nvme_sgl(sc, (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1051 dma_buffers, bufcnt)) { 1052 rval = EINVAL; 1053 goto out; 1054 } 1055 } else { 1056 printf(IOCNAME "%s: Invalid NVMe Command Format\n", sc->name, 1057 __func__); 1058 rval = EINVAL; 1059 goto out; 1060 } 1061 } 1062 1063 sc->ioctl_cmds.state = MPI3MR_CMD_PENDING; 1064 sc->ioctl_cmds.is_waiting = 1; 1065 sc->ioctl_cmds.callback = NULL; 1066 sc->ioctl_cmds.is_senseprst = 0; 1067 sc->ioctl_cmds.sensebuf = kern_erb; 1068 memset((sc->ioctl_cmds.reply), 0, sc->reply_sz); 1069 mpi_header->HostTag = MPI3MR_HOSTTAG_IOCTLCMDS; 1070 init_completion(&sc->ioctl_cmds.completion); 1071 rval = mpi3mr_submit_admin_cmd(sc, mpi_request, MPI3MR_AREQ_FRAME_SZ); 1072 if (rval) { 1073 printf(IOCNAME "Issue IOCTL: Admin Post failed\n", sc->name); 1074 goto out_failed; 1075 } 1076 wait_for_completion_timeout(&sc->ioctl_cmds.completion, karg->timeout); 1077 1078 if (!(sc->ioctl_cmds.state & MPI3MR_CMD_COMPLETE)) { 1079 sc->ioctl_cmds.is_waiting = 0; 1080 printf(IOCNAME "Issue IOCTL: command timed out\n", sc->name); 1081 rval = EAGAIN; 1082 if (sc->ioctl_cmds.state & MPI3MR_CMD_RESET) 1083 goto out_failed; 1084 1085 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1086 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL_TIMEOUT; 1087 goto out_failed; 1088 } 1089 1090 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 1091 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 1092 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 1093 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 1094 sc->nvme_encap_prp_list = NULL; 1095 } 1096 1097 if (((sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1098 != MPI3_IOCSTATUS_SUCCESS) && 1099 (sc->mpi3mr_debug & MPI3MR_DEBUG_IOCTL)) { 1100 printf(IOCNAME "Issue IOCTL: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", sc->name, 1101 (sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1102 sc->ioctl_cmds.ioc_loginfo); 1103 } 1104 1105 if ((mpirep_offset != 0xFF) && 1106 dma_buffers[mpirep_offset].user_buf_len) { 1107 dma_buff = &dma_buffers[mpirep_offset]; 1108 dma_buff->kern_buf_len = (sizeof(*mpirepbuf) - 1 + 1109 sc->reply_sz); 1110 mpirepbuf = malloc(dma_buff->kern_buf_len, M_MPI3MR, M_NOWAIT | M_ZERO); 1111 1112 if (!mpirepbuf) { 1113 printf(IOCNAME "%s: failed obtaining a memory for mpi reply\n", sc->name, 1114 __func__); 1115 rval = ENOMEM; 1116 goto out_failed; 1117 } 1118 if (sc->ioctl_cmds.state & MPI3MR_CMD_REPLYVALID) { 1119 mpirepbuf->mpirep_type = 1120 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_ADDRESS; 1121 memcpy(mpirepbuf->repbuf, sc->ioctl_cmds.reply, sc->reply_sz); 1122 } else { 1123 mpirepbuf->mpirep_type = 1124 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_STATUS; 1125 status_desc = (Mpi3StatusReplyDescriptor_t *) 1126 mpirepbuf->repbuf; 1127 status_desc->IOCStatus = sc->ioctl_cmds.ioc_status; 1128 status_desc->IOCLogInfo = sc->ioctl_cmds.ioc_loginfo; 1129 } 1130 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 1131 if (copyout(mpirepbuf, dma_buff->user_buf, tmplen)) { 1132 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1133 __FILE__, __LINE__, __func__); 1134 rval = EFAULT; 1135 goto out_failed; 1136 } 1137 } 1138 1139 if (erb_offset != 0xFF && sc->ioctl_cmds.sensebuf && 1140 sc->ioctl_cmds.is_senseprst) { 1141 dma_buff = &dma_buffers[erb_offset]; 1142 tmplen = min(erbsz, dma_buff->user_buf_len); 1143 if (copyout(kern_erb, dma_buff->user_buf, tmplen)) { 1144 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1145 __FILE__, __LINE__, __func__); 1146 rval = EFAULT; 1147 goto out_failed; 1148 } 1149 } 1150 1151 dma_buff = dma_buffers; 1152 for (count = 0; count < bufcnt; count++, dma_buff++) { 1153 if ((count == 1) && is_rmrb) { 1154 if (copyout(dma_buff->kern_buf, dma_buff->user_buf,dma_buff->kern_buf_len)) { 1155 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1156 __FILE__, __LINE__, __func__); 1157 rval = EFAULT; 1158 goto out_failed; 1159 } 1160 } else if (dma_buff->data_dir == MPI3MR_APP_DDI) { 1161 tmplen = 0; 1162 for (desc_count = 0; desc_count < dma_buff->num_dma_desc; desc_count++) { 1163 if (copyout(dma_buff->dma_desc[desc_count].addr, 1164 (U8 *)dma_buff->user_buf+tmplen, 1165 dma_buff->dma_desc[desc_count].size)) { 1166 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1167 __FILE__, __LINE__, __func__); 1168 rval = EFAULT; 1169 goto out_failed; 1170 } 1171 tmplen += dma_buff->dma_desc[desc_count].size; 1172 } 1173 } 1174 } 1175 1176 if ((pel->Function == MPI3_FUNCTION_PERSISTENT_EVENT_LOG) && 1177 (pel->Action == MPI3_PEL_ACTION_GET_COUNT)) 1178 sc->mpi3mr_aen_triggered = 0; 1179 1180 out_failed: 1181 sc->ioctl_cmds.is_senseprst = 0; 1182 sc->ioctl_cmds.sensebuf = NULL; 1183 sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED; 1184 out: 1185 if (kern_erb) 1186 free(kern_erb, M_MPI3MR); 1187 if (buffer_list) 1188 free(buffer_list, M_MPI3MR); 1189 if (mpi_request) 1190 free(mpi_request, M_MPI3MR); 1191 if (dma_buffers) { 1192 dma_buff = dma_buffers; 1193 for (count = 0; count < bufcnt; count++, dma_buff++) { 1194 free(dma_buff->dma_desc, M_MPI3MR); 1195 } 1196 free(dma_buffers, M_MPI3MR); 1197 } 1198 if (mpirepbuf) 1199 free(mpirepbuf, M_MPI3MR); 1200 return rval; 1201 } 1202 1203 /** 1204 * mpi3mr_soft_reset_from_app - Trigger controller reset 1205 * @sc: Adapter instance reference 1206 * 1207 * This function triggers the controller reset from the 1208 * watchdog context and wait for it to complete. It will 1209 * come out of wait upon completion or timeout exaustion. 1210 * 1211 * Return: 0 on success and proper error codes on failure 1212 */ 1213 static long 1214 mpi3mr_soft_reset_from_app(struct mpi3mr_softc *sc) 1215 { 1216 1217 U32 timeout; 1218 1219 /* if reset is not in progress, trigger soft reset from watchdog context */ 1220 if (!sc->reset_in_progress) { 1221 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1222 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL; 1223 1224 /* Wait for soft reset to start */ 1225 timeout = 50; 1226 while (timeout--) { 1227 if (sc->reset_in_progress == 1) 1228 break; 1229 DELAY(100 * 1000); 1230 } 1231 if (!timeout) 1232 return EFAULT; 1233 } 1234 1235 /* Wait for soft reset to complete */ 1236 int i = 0; 1237 timeout = sc->ready_timeout; 1238 while (timeout--) { 1239 if (sc->reset_in_progress == 0) 1240 break; 1241 i++; 1242 if (!(i % 5)) { 1243 mpi3mr_dprint(sc, MPI3MR_INFO, 1244 "[%2ds]waiting for controller reset to be finished from %s\n", i, __func__); 1245 } 1246 DELAY(1000 * 1000); 1247 } 1248 1249 /* 1250 * In case of soft reset failure or not completed within stipulated time, 1251 * fail back to application. 1252 */ 1253 if ((!timeout || sc->reset.status)) 1254 return EFAULT; 1255 1256 return 0; 1257 } 1258 1259 1260 /** 1261 * mpi3mr_adp_reset - Issue controller reset 1262 * @sc: Adapter instance reference 1263 * @data_out_buf: User buffer with reset type 1264 * @data_out_sz: length of the user buffer. 1265 * 1266 * This function identifies the user provided reset type and 1267 * issues approporiate reset to the controller and wait for that 1268 * to complete and reinitialize the controller and then returns. 1269 * 1270 * Return: 0 on success and proper error codes on failure 1271 */ 1272 static long 1273 mpi3mr_adp_reset(struct mpi3mr_softc *sc, 1274 void *data_out_buf, U32 data_out_sz) 1275 { 1276 long rval = EINVAL; 1277 struct mpi3mr_ioctl_adpreset adpreset; 1278 1279 memset(&adpreset, 0, sizeof(adpreset)); 1280 1281 if (data_out_sz != sizeof(adpreset)) { 1282 printf(IOCNAME "Invalid user adpreset buffer size %s() line: %d\n", sc->name, 1283 __func__, __LINE__); 1284 goto out; 1285 } 1286 1287 if (copyin(data_out_buf, &adpreset, sizeof(adpreset))) { 1288 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1289 __func__, __LINE__); 1290 rval = EFAULT; 1291 goto out; 1292 } 1293 1294 switch (adpreset.reset_type) { 1295 case MPI3MR_IOCTL_ADPRESET_SOFT: 1296 sc->reset.ioctl_reset_snapdump = false; 1297 break; 1298 case MPI3MR_IOCTL_ADPRESET_DIAG_FAULT: 1299 sc->reset.ioctl_reset_snapdump = true; 1300 break; 1301 default: 1302 printf(IOCNAME "Unknown reset_type(0x%x) issued\n", sc->name, 1303 adpreset.reset_type); 1304 goto out; 1305 } 1306 rval = mpi3mr_soft_reset_from_app(sc); 1307 if (rval) 1308 printf(IOCNAME "reset handler returned error (0x%lx) for reset type 0x%x\n", 1309 sc->name, rval, adpreset.reset_type); 1310 1311 out: 1312 return rval; 1313 } 1314 1315 void 1316 mpi3mr_app_send_aen(struct mpi3mr_softc *sc) 1317 { 1318 sc->mpi3mr_aen_triggered = 1; 1319 if (sc->mpi3mr_poll_waiting) { 1320 selwakeup(&sc->mpi3mr_select); 1321 sc->mpi3mr_poll_waiting = 0; 1322 } 1323 return; 1324 } 1325 1326 void 1327 mpi3mr_pel_wait_complete(struct mpi3mr_softc *sc, 1328 struct mpi3mr_drvr_cmd *drvr_cmd) 1329 { 1330 U8 retry = 0; 1331 Mpi3PELReply_t *pel_reply = NULL; 1332 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1333 1334 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1335 goto cleanup_drvrcmd; 1336 1337 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1338 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1339 goto out_failed; 1340 } 1341 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1342 1343 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1344 || ((le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS) 1345 && (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_ABORTED))){ 1346 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1347 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1348 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1349 retry = 1; 1350 } 1351 1352 if (retry) { 1353 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1354 drvr_cmd->retry_count++; 1355 printf(IOCNAME "%s : PELWaitretry=%d\n", sc->name, 1356 __func__, drvr_cmd->retry_count); 1357 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1358 return; 1359 } 1360 1361 printf(IOCNAME "%s :PELWait failed after all retries\n", sc->name, 1362 __func__); 1363 goto out_failed; 1364 } 1365 1366 mpi3mr_app_send_aen(sc); 1367 1368 if (!sc->pel_abort_requested) { 1369 sc->pel_cmds.retry_count = 0; 1370 mpi3mr_send_pel_getseq(sc, &sc->pel_cmds); 1371 } 1372 1373 return; 1374 out_failed: 1375 sc->pel_wait_pend = 0; 1376 cleanup_drvrcmd: 1377 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1378 drvr_cmd->callback = NULL; 1379 drvr_cmd->retry_count = 0; 1380 } 1381 1382 void 1383 mpi3mr_issue_pel_wait(struct mpi3mr_softc *sc, 1384 struct mpi3mr_drvr_cmd *drvr_cmd) 1385 { 1386 U8 retry_count = 0; 1387 Mpi3PELReqActionWait_t pel_wait; 1388 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1389 1390 sc->pel_abort_requested = 0; 1391 1392 memset(&pel_wait, 0, sizeof(pel_wait)); 1393 drvr_cmd->state = MPI3MR_CMD_PENDING; 1394 drvr_cmd->is_waiting = 0; 1395 drvr_cmd->callback = mpi3mr_pel_wait_complete; 1396 drvr_cmd->ioc_status = 0; 1397 drvr_cmd->ioc_loginfo = 0; 1398 pel_wait.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1399 pel_wait.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1400 pel_wait.Action = MPI3_PEL_ACTION_WAIT; 1401 pel_wait.StartingSequenceNumber = htole32(sc->newest_seqnum); 1402 pel_wait.Locale = htole16(sc->pel_locale); 1403 pel_wait.Class = htole16(sc->pel_class); 1404 pel_wait.WaitTime = MPI3_PEL_WAITTIME_INFINITE_WAIT; 1405 printf(IOCNAME "Issuing PELWait: seqnum %u class %u locale 0x%08x\n", 1406 sc->name, sc->newest_seqnum, sc->pel_class, sc->pel_locale); 1407 retry_pel_wait: 1408 if (mpi3mr_submit_admin_cmd(sc, &pel_wait, sizeof(pel_wait))) { 1409 printf(IOCNAME "%s: Issue PELWait IOCTL: Admin Post failed\n", sc->name, __func__); 1410 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1411 retry_count++; 1412 goto retry_pel_wait; 1413 } 1414 goto out_failed; 1415 } 1416 return; 1417 out_failed: 1418 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1419 drvr_cmd->callback = NULL; 1420 drvr_cmd->retry_count = 0; 1421 sc->pel_wait_pend = 0; 1422 return; 1423 } 1424 1425 void 1426 mpi3mr_send_pel_getseq(struct mpi3mr_softc *sc, 1427 struct mpi3mr_drvr_cmd *drvr_cmd) 1428 { 1429 U8 retry_count = 0; 1430 U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1431 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1432 1433 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1434 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1435 sc->pel_cmds.is_waiting = 0; 1436 sc->pel_cmds.ioc_status = 0; 1437 sc->pel_cmds.ioc_loginfo = 0; 1438 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1439 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1440 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1441 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1442 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1443 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1444 1445 retry_pel_getseq: 1446 if (mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))) { 1447 printf(IOCNAME "%s: Issuing PEL GetSeq IOCTL: Admin Post failed\n", sc->name, __func__); 1448 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1449 retry_count++; 1450 goto retry_pel_getseq; 1451 } 1452 goto out_failed; 1453 } 1454 return; 1455 out_failed: 1456 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1457 drvr_cmd->callback = NULL; 1458 drvr_cmd->retry_count = 0; 1459 sc->pel_wait_pend = 0; 1460 } 1461 1462 void 1463 mpi3mr_pel_getseq_complete(struct mpi3mr_softc *sc, 1464 struct mpi3mr_drvr_cmd *drvr_cmd) 1465 { 1466 U8 retry = 0; 1467 Mpi3PELReply_t *pel_reply = NULL; 1468 Mpi3PELSeq_t *pel_seq_num = (Mpi3PELSeq_t *)sc->pel_seq_number; 1469 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1470 1471 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1472 goto cleanup_drvrcmd; 1473 1474 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1475 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1476 goto out_failed; 1477 } 1478 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1479 1480 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1481 || (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)){ 1482 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1483 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1484 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1485 retry = 1; 1486 } 1487 1488 if (retry) { 1489 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1490 drvr_cmd->retry_count++; 1491 printf(IOCNAME "%s : PELGetSeqNUM retry=%d\n", sc->name, 1492 __func__, drvr_cmd->retry_count); 1493 mpi3mr_send_pel_getseq(sc, drvr_cmd); 1494 return; 1495 } 1496 printf(IOCNAME "%s :PELGetSeqNUM failed after all retries\n", 1497 sc->name, __func__); 1498 goto out_failed; 1499 } 1500 1501 sc->newest_seqnum = le32toh(pel_seq_num->Newest) + 1; 1502 drvr_cmd->retry_count = 0; 1503 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1504 return; 1505 out_failed: 1506 sc->pel_wait_pend = 0; 1507 cleanup_drvrcmd: 1508 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1509 drvr_cmd->callback = NULL; 1510 drvr_cmd->retry_count = 0; 1511 } 1512 1513 static int 1514 mpi3mr_pel_getseq(struct mpi3mr_softc *sc) 1515 { 1516 int rval = 0; 1517 U8 sgl_flags = 0; 1518 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1519 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1520 1521 if (sc->reset_in_progress || sc->block_ioctls) { 1522 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1523 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1524 return -1; 1525 } 1526 1527 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1528 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1529 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1530 sc->pel_cmds.is_waiting = 0; 1531 sc->pel_cmds.retry_count = 0; 1532 sc->pel_cmds.ioc_status = 0; 1533 sc->pel_cmds.ioc_loginfo = 0; 1534 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1535 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1536 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1537 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1538 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1539 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1540 1541 if ((rval = mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req)))) 1542 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1543 1544 return rval; 1545 } 1546 1547 int 1548 mpi3mr_pel_abort(struct mpi3mr_softc *sc) 1549 { 1550 int retval = 0; 1551 U16 pel_log_status; 1552 Mpi3PELReqActionAbort_t pel_abort_req; 1553 Mpi3PELReply_t *pel_reply = NULL; 1554 1555 if (sc->reset_in_progress || sc->block_ioctls) { 1556 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1557 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1558 return -1; 1559 } 1560 1561 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 1562 1563 mtx_lock(&sc->pel_abort_cmd.completion.lock); 1564 if (sc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 1565 printf(IOCNAME "%s: PEL Abort command is in use\n", sc->name, __func__); 1566 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1567 return -1; 1568 } 1569 1570 sc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 1571 sc->pel_abort_cmd.is_waiting = 1; 1572 sc->pel_abort_cmd.callback = NULL; 1573 pel_abort_req.HostTag = htole16(MPI3MR_HOSTTAG_PELABORT); 1574 pel_abort_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1575 pel_abort_req.Action = MPI3_PEL_ACTION_ABORT; 1576 pel_abort_req.AbortHostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1577 1578 sc->pel_abort_requested = 1; 1579 1580 init_completion(&sc->pel_abort_cmd.completion); 1581 retval = mpi3mr_submit_admin_cmd(sc, &pel_abort_req, sizeof(pel_abort_req)); 1582 if (retval) { 1583 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1584 sc->pel_abort_requested = 0; 1585 retval = -1; 1586 goto out_unlock; 1587 } 1588 wait_for_completion_timeout(&sc->pel_abort_cmd.completion, MPI3MR_INTADMCMD_TIMEOUT); 1589 1590 if (!(sc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 1591 printf(IOCNAME "%s: PEL Abort command timedout\n",sc->name, __func__); 1592 sc->pel_abort_cmd.is_waiting = 0; 1593 retval = -1; 1594 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1595 sc->reset.reason = MPI3MR_RESET_FROM_PELABORT_TIMEOUT; 1596 goto out_unlock; 1597 } 1598 if (((GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1599 || (!(sc->pel_abort_cmd.state & MPI3MR_CMD_REPLYVALID))) { 1600 printf(IOCNAME "%s: PEL Abort command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1601 sc->name, __func__, GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status), 1602 sc->pel_abort_cmd.ioc_loginfo); 1603 retval = -1; 1604 goto out_unlock; 1605 } 1606 1607 pel_reply = (Mpi3PELReply_t *)sc->pel_abort_cmd.reply; 1608 pel_log_status = le16toh(pel_reply->PELogStatus); 1609 if (pel_log_status != MPI3_PEL_STATUS_SUCCESS) { 1610 printf(IOCNAME "%s: PEL abort command failed, pel_status(0x%04x)\n", 1611 sc->name, __func__, pel_log_status); 1612 retval = -1; 1613 } 1614 1615 out_unlock: 1616 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1617 sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 1618 return retval; 1619 } 1620 1621 /** 1622 * mpi3mr_pel_enable - Handler for PEL enable 1623 * @sc: Adapter instance reference 1624 * @data_out_buf: User buffer containing PEL enable data 1625 * @data_out_sz: length of the user buffer. 1626 * 1627 * This function is the handler for PEL enable driver IOCTL. 1628 * Validates the application given class and locale and if 1629 * requires aborts the existing PEL wait request and/or issues 1630 * new PEL wait request to the firmware and returns. 1631 * 1632 * Return: 0 on success and proper error codes on failure. 1633 */ 1634 static long 1635 mpi3mr_pel_enable(struct mpi3mr_softc *sc, 1636 void *data_out_buf, U32 data_out_sz) 1637 { 1638 long rval = EINVAL; 1639 U8 tmp_class; 1640 U16 tmp_locale; 1641 struct mpi3mr_ioctl_pel_enable pel_enable; 1642 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1643 1644 if (sc->unrecoverable) { 1645 device_printf(sc->mpi3mr_dev, "Issue IOCTL: controller is in unrecoverable state\n"); 1646 return EFAULT; 1647 } 1648 if (sc->reset_in_progress) { 1649 device_printf(sc->mpi3mr_dev, "Issue IOCTL: reset in progress\n"); 1650 return EAGAIN; 1651 } 1652 if (sc->block_ioctls) { 1653 device_printf(sc->mpi3mr_dev, "Issue IOCTL: IOCTLs are blocked\n"); 1654 return EAGAIN; 1655 } 1656 1657 if ((data_out_sz != sizeof(pel_enable) || 1658 (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) { 1659 printf(IOCNAME "%s: Invalid user pel_enable buffer size %u\n", 1660 sc->name, __func__, data_out_sz); 1661 goto out; 1662 } 1663 memset(&pel_enable, 0, sizeof(pel_enable)); 1664 if (copyin(data_out_buf, &pel_enable, sizeof(pel_enable))) { 1665 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1666 __func__, __LINE__); 1667 rval = EFAULT; 1668 goto out; 1669 } 1670 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1671 printf(IOCNAME "%s: out of range class %d\n", 1672 sc->name, __func__, pel_enable.pel_class); 1673 goto out; 1674 } 1675 1676 if (sc->pel_wait_pend) { 1677 if ((sc->pel_class <= pel_enable.pel_class) && 1678 !((sc->pel_locale & pel_enable.pel_locale) ^ 1679 pel_enable.pel_locale)) { 1680 rval = 0; 1681 goto out; 1682 } else { 1683 pel_enable.pel_locale |= sc->pel_locale; 1684 if (sc->pel_class < pel_enable.pel_class) 1685 pel_enable.pel_class = sc->pel_class; 1686 1687 if (mpi3mr_pel_abort(sc)) { 1688 printf(IOCNAME "%s: pel_abort failed, status(%ld)\n", 1689 sc->name, __func__, rval); 1690 goto out; 1691 } 1692 } 1693 } 1694 1695 tmp_class = sc->pel_class; 1696 tmp_locale = sc->pel_locale; 1697 sc->pel_class = pel_enable.pel_class; 1698 sc->pel_locale = pel_enable.pel_locale; 1699 sc->pel_wait_pend = 1; 1700 1701 if ((rval = mpi3mr_pel_getseq(sc))) { 1702 sc->pel_class = tmp_class; 1703 sc->pel_locale = tmp_locale; 1704 sc->pel_wait_pend = 0; 1705 printf(IOCNAME "%s: pel get sequence number failed, status(%ld)\n", 1706 sc->name, __func__, rval); 1707 } 1708 1709 out: 1710 return rval; 1711 } 1712 1713 void 1714 mpi3mr_app_save_logdata(struct mpi3mr_softc *sc, char *event_data, 1715 U16 event_data_size) 1716 { 1717 struct mpi3mr_log_data_entry *entry; 1718 U32 index = sc->log_data_buffer_index, sz; 1719 1720 if (!(sc->log_data_buffer)) 1721 return; 1722 1723 entry = (struct mpi3mr_log_data_entry *) 1724 (sc->log_data_buffer + (index * sc->log_data_entry_size)); 1725 entry->valid_entry = 1; 1726 sz = min(sc->log_data_entry_size, event_data_size); 1727 memcpy(entry->data, event_data, sz); 1728 sc->log_data_buffer_index = 1729 ((++index) % MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES); 1730 mpi3mr_app_send_aen(sc); 1731 } 1732 1733 /** 1734 * mpi3mr_get_logdata - Handler for get log data 1735 * @sc: Adapter instance reference 1736 * @data_in_buf: User buffer to copy the logdata entries 1737 * @data_in_sz: length of the user buffer. 1738 * 1739 * This function copies the log data entries to the user buffer 1740 * when log caching is enabled in the driver. 1741 * 1742 * Return: 0 on success and proper error codes on failure 1743 */ 1744 static long 1745 mpi3mr_get_logdata(struct mpi3mr_softc *sc, 1746 void *data_in_buf, U32 data_in_sz) 1747 { 1748 long rval = EINVAL; 1749 U16 num_entries = 0; 1750 U16 entry_sz = sc->log_data_entry_size; 1751 1752 if ((!sc->log_data_buffer) || (data_in_sz < entry_sz)) 1753 return rval; 1754 1755 num_entries = data_in_sz / entry_sz; 1756 if (num_entries > MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES) 1757 num_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1758 1759 if ((rval = copyout(sc->log_data_buffer, data_in_buf, (num_entries * entry_sz)))) { 1760 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1761 rval = EFAULT; 1762 } 1763 1764 return rval; 1765 } 1766 1767 /** 1768 * mpi3mr_logdata_enable - Handler for log data enable 1769 * @sc: Adapter instance reference 1770 * @data_in_buf: User buffer to copy the max logdata entry count 1771 * @data_in_sz: length of the user buffer. 1772 * 1773 * This function enables log data caching in the driver if not 1774 * already enabled and return the maximum number of log data 1775 * entries that can be cached in the driver. 1776 * 1777 * Return: 0 on success and proper error codes on failure 1778 */ 1779 static long 1780 mpi3mr_logdata_enable(struct mpi3mr_softc *sc, 1781 void *data_in_buf, U32 data_in_sz) 1782 { 1783 long rval = EINVAL; 1784 struct mpi3mr_ioctl_logdata_enable logdata_enable; 1785 1786 if (data_in_sz < sizeof(logdata_enable)) 1787 return rval; 1788 1789 if (sc->log_data_buffer) 1790 goto copy_data; 1791 1792 sc->log_data_entry_size = (sc->reply_sz - (sizeof(Mpi3EventNotificationReply_t) - 4)) 1793 + MPI3MR_IOCTL_LOGDATA_ENTRY_HEADER_SZ; 1794 1795 sc->log_data_buffer = malloc((MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES * sc->log_data_entry_size), 1796 M_MPI3MR, M_NOWAIT | M_ZERO); 1797 if (!sc->log_data_buffer) { 1798 printf(IOCNAME "%s log data buffer memory allocation failed\n", sc->name, __func__); 1799 return ENOMEM; 1800 } 1801 1802 sc->log_data_buffer_index = 0; 1803 1804 copy_data: 1805 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1806 logdata_enable.max_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1807 1808 if ((rval = copyout(&logdata_enable, data_in_buf, sizeof(logdata_enable)))) { 1809 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1810 rval = EFAULT; 1811 } 1812 1813 return rval; 1814 } 1815 1816 /** 1817 * mpi3mr_get_change_count - Get topology change count 1818 * @sc: Adapter instance reference 1819 * @data_in_buf: User buffer to copy the change count 1820 * @data_in_sz: length of the user buffer. 1821 * 1822 * This function copies the toplogy change count provided by the 1823 * driver in events and cached in the driver to the user 1824 * provided buffer for the specific controller. 1825 * 1826 * Return: 0 on success and proper error codes on failure 1827 */ 1828 static long 1829 mpi3mr_get_change_count(struct mpi3mr_softc *sc, 1830 void *data_in_buf, U32 data_in_sz) 1831 { 1832 long rval = EINVAL; 1833 struct mpi3mr_ioctl_chgcnt chg_count; 1834 memset(&chg_count, 0, sizeof(chg_count)); 1835 1836 chg_count.change_count = sc->change_count; 1837 if (data_in_sz >= sizeof(chg_count)) { 1838 if ((rval = copyout(&chg_count, data_in_buf, sizeof(chg_count)))) { 1839 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1840 __LINE__, __func__); 1841 rval = EFAULT; 1842 } 1843 } 1844 return rval; 1845 } 1846 1847 /** 1848 * mpi3mr_get_alltgtinfo - Get all targets information 1849 * @sc: Adapter instance reference 1850 * @data_in_buf: User buffer to copy the target information 1851 * @data_in_sz: length of the user buffer. 1852 * 1853 * This function copies the driver managed target devices device 1854 * handle, persistent ID, bus ID and taret ID to the user 1855 * provided buffer for the specific controller. This function 1856 * also provides the number of devices managed by the driver for 1857 * the specific controller. 1858 * 1859 * Return: 0 on success and proper error codes on failure 1860 */ 1861 static long 1862 mpi3mr_get_alltgtinfo(struct mpi3mr_softc *sc, 1863 void *data_in_buf, U32 data_in_sz) 1864 { 1865 long rval = EINVAL; 1866 U8 get_count = 0; 1867 U16 i = 0, num_devices = 0; 1868 U32 min_entrylen = 0, kern_entrylen = 0, user_entrylen = 0; 1869 struct mpi3mr_target *tgtdev = NULL; 1870 struct mpi3mr_device_map_info *devmap_info = NULL; 1871 struct mpi3mr_cam_softc *cam_sc = sc->cam_sc; 1872 struct mpi3mr_ioctl_all_tgtinfo *all_tgtinfo = (struct mpi3mr_ioctl_all_tgtinfo *)data_in_buf; 1873 1874 if (data_in_sz < sizeof(uint32_t)) { 1875 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1876 __LINE__, __func__); 1877 goto out; 1878 } 1879 if (data_in_sz == sizeof(uint32_t)) 1880 get_count = 1; 1881 1882 if (TAILQ_EMPTY(&cam_sc->tgt_list)) { 1883 get_count = 1; 1884 goto copy_usrbuf; 1885 } 1886 1887 mtx_lock_spin(&cam_sc->sc->target_lock); 1888 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1889 num_devices++; 1890 } 1891 mtx_unlock_spin(&cam_sc->sc->target_lock); 1892 1893 if (get_count) 1894 goto copy_usrbuf; 1895 1896 kern_entrylen = num_devices * sizeof(*devmap_info); 1897 1898 devmap_info = malloc(kern_entrylen, M_MPI3MR, M_NOWAIT | M_ZERO); 1899 if (!devmap_info) { 1900 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1901 __LINE__, __func__); 1902 rval = ENOMEM; 1903 goto out; 1904 } 1905 memset((U8*)devmap_info, 0xFF, kern_entrylen); 1906 1907 mtx_lock_spin(&cam_sc->sc->target_lock); 1908 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1909 if (i < num_devices) { 1910 devmap_info[i].handle = tgtdev->dev_handle; 1911 devmap_info[i].per_id = tgtdev->per_id; 1912 /*n 1913 * For hidden/ugood device the target_id and bus_id should be 0xFFFFFFFF and 0xFF 1914 */ 1915 if (!tgtdev->exposed_to_os) { 1916 devmap_info[i].target_id = 0xFFFFFFFF; 1917 devmap_info[i].bus_id = 0xFF; 1918 } else { 1919 devmap_info[i].target_id = tgtdev->tid; 1920 devmap_info[i].bus_id = 0; 1921 } 1922 i++; 1923 } 1924 } 1925 num_devices = i; 1926 mtx_unlock_spin(&cam_sc->sc->target_lock); 1927 1928 copy_usrbuf: 1929 if (copyout(&num_devices, &all_tgtinfo->num_devices, sizeof(num_devices))) { 1930 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1931 __LINE__, __func__); 1932 rval = EFAULT; 1933 goto out; 1934 } 1935 user_entrylen = (data_in_sz - sizeof(uint32_t))/sizeof(*devmap_info); 1936 user_entrylen *= sizeof(*devmap_info); 1937 min_entrylen = min(user_entrylen, kern_entrylen); 1938 if (min_entrylen && (copyout(devmap_info, &all_tgtinfo->dmi, min_entrylen))) { 1939 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1940 __FILE__, __LINE__, __func__); 1941 rval = EFAULT; 1942 goto out; 1943 } 1944 rval = 0; 1945 out: 1946 if (devmap_info) 1947 free(devmap_info, M_MPI3MR); 1948 1949 return rval; 1950 } 1951 1952 /** 1953 * mpi3mr_get_tgtinfo - Get specific target information 1954 * @sc: Adapter instance reference 1955 * @karg: driver ponter to users payload buffer 1956 * 1957 * This function copies the driver managed specific target device 1958 * info like handle, persistent ID, bus ID and taret ID to the user 1959 * provided buffer for the specific controller. 1960 * 1961 * Return: 0 on success and proper error codes on failure 1962 */ 1963 static long 1964 mpi3mr_get_tgtinfo(struct mpi3mr_softc *sc, 1965 struct mpi3mr_ioctl_drvcmd *karg) 1966 { 1967 long rval = EINVAL; 1968 struct mpi3mr_target *tgtdev = NULL; 1969 struct mpi3mr_ioctl_tgtinfo tgtinfo; 1970 1971 memset(&tgtinfo, 0, sizeof(tgtinfo)); 1972 1973 if ((karg->data_out_size != sizeof(struct mpi3mr_ioctl_tgtinfo)) || 1974 (karg->data_in_size != sizeof(struct mpi3mr_ioctl_tgtinfo))) { 1975 printf(IOCNAME "Invalid user tgtinfo buffer size %s() line: %d\n", sc->name, 1976 __func__, __LINE__); 1977 goto out; 1978 } 1979 1980 if (copyin(karg->data_out_buf, &tgtinfo, sizeof(tgtinfo))) { 1981 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1982 __func__, __LINE__); 1983 rval = EFAULT; 1984 goto out; 1985 } 1986 1987 if ((tgtinfo.bus_id != 0xFF) && (tgtinfo.target_id != 0xFFFFFFFF)) { 1988 if ((tgtinfo.persistent_id != 0xFFFF) || 1989 (tgtinfo.dev_handle != 0xFFFF)) 1990 goto out; 1991 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.target_id); 1992 } else if (tgtinfo.persistent_id != 0xFFFF) { 1993 if ((tgtinfo.bus_id != 0xFF) || 1994 (tgtinfo.dev_handle !=0xFFFF) || 1995 (tgtinfo.target_id != 0xFFFFFFFF)) 1996 goto out; 1997 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.persistent_id); 1998 } else if (tgtinfo.dev_handle !=0xFFFF) { 1999 if ((tgtinfo.bus_id != 0xFF) || 2000 (tgtinfo.target_id != 0xFFFFFFFF) || 2001 (tgtinfo.persistent_id != 0xFFFF)) 2002 goto out; 2003 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tgtinfo.dev_handle); 2004 } 2005 if (!tgtdev) 2006 goto out; 2007 2008 tgtinfo.target_id = tgtdev->per_id; 2009 tgtinfo.bus_id = 0; 2010 tgtinfo.dev_handle = tgtdev->dev_handle; 2011 tgtinfo.persistent_id = tgtdev->per_id; 2012 tgtinfo.seq_num = 0; 2013 2014 if (copyout(&tgtinfo, karg->data_in_buf, sizeof(tgtinfo))) { 2015 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 2016 __func__, __LINE__); 2017 rval = EFAULT; 2018 } 2019 2020 out: 2021 return rval; 2022 } 2023 2024 /** 2025 * mpi3mr_get_pciinfo - Get PCI info IOCTL handler 2026 * @sc: Adapter instance reference 2027 * @data_in_buf: User buffer to hold adapter information 2028 * @data_in_sz: length of the user buffer. 2029 * 2030 * This function provides the PCI spec information for the 2031 * given controller 2032 * 2033 * Return: 0 on success and proper error codes on failure 2034 */ 2035 static long 2036 mpi3mr_get_pciinfo(struct mpi3mr_softc *sc, 2037 void *data_in_buf, U32 data_in_sz) 2038 { 2039 long rval = EINVAL; 2040 U8 i; 2041 struct mpi3mr_ioctl_pciinfo pciinfo; 2042 memset(&pciinfo, 0, sizeof(pciinfo)); 2043 2044 for (i = 0; i < 64; i++) 2045 pciinfo.config_space[i] = pci_read_config(sc->mpi3mr_dev, (i * 4), 4); 2046 2047 if (data_in_sz >= sizeof(pciinfo)) { 2048 if ((rval = copyout(&pciinfo, data_in_buf, sizeof(pciinfo)))) { 2049 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2050 __FILE__, __LINE__, __func__); 2051 rval = EFAULT; 2052 } 2053 } 2054 return rval; 2055 } 2056 2057 /** 2058 * mpi3mr_get_adpinfo - Get adapter info IOCTL handler 2059 * @sc: Adapter instance reference 2060 * @data_in_buf: User buffer to hold adapter information 2061 * @data_in_sz: length of the user buffer. 2062 * 2063 * This function provides adapter information for the given 2064 * controller 2065 * 2066 * Return: 0 on success and proper error codes on failure 2067 */ 2068 static long 2069 mpi3mr_get_adpinfo(struct mpi3mr_softc *sc, 2070 void *data_in_buf, U32 data_in_sz) 2071 { 2072 long rval = EINVAL; 2073 struct mpi3mr_ioctl_adpinfo adpinfo; 2074 enum mpi3mr_iocstate ioc_state; 2075 memset(&adpinfo, 0, sizeof(adpinfo)); 2076 2077 adpinfo.adp_type = MPI3MR_IOCTL_ADPTYPE_AVGFAMILY; 2078 adpinfo.pci_dev_id = pci_get_device(sc->mpi3mr_dev); 2079 adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1); 2080 adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev); 2081 adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev); 2082 adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);; 2083 adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev); 2084 adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev); 2085 adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev); 2086 adpinfo.ioctl_ver = MPI3MR_IOCTL_VERSION; 2087 memcpy((U8 *)&adpinfo.driver_info, (U8 *)&sc->driver_info, sizeof(adpinfo.driver_info)); 2088 2089 ioc_state = mpi3mr_get_iocstate(sc); 2090 2091 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 2092 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_UNRECOVERABLE; 2093 else if (sc->reset_in_progress || sc->block_ioctls) 2094 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_IN_RESET; 2095 else if (ioc_state == MRIOC_STATE_FAULT) 2096 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_FAULT; 2097 else 2098 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_OPERATIONAL; 2099 2100 if (data_in_sz >= sizeof(adpinfo)) { 2101 if ((rval = copyout(&adpinfo, data_in_buf, sizeof(adpinfo)))) { 2102 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2103 __FILE__, __LINE__, __func__); 2104 rval = EFAULT; 2105 } 2106 } 2107 return rval; 2108 } 2109 /** 2110 * mpi3mr_app_drvrcmds - Driver IOCTL handler 2111 * @dev: char device 2112 * @cmd: IOCTL command 2113 * @arg: User data payload buffer for the IOCTL 2114 * @flag: flags 2115 * @thread: threads 2116 * 2117 * This function is the top level handler for driver commands, 2118 * this does basic validation of the buffer and identifies the 2119 * opcode and switches to correct sub handler. 2120 * 2121 * Return: 0 on success and proper error codes on failure 2122 */ 2123 2124 static int 2125 mpi3mr_app_drvrcmds(struct cdev *dev, u_long cmd, 2126 void *uarg, int flag, struct thread *td) 2127 { 2128 long rval = EINVAL; 2129 struct mpi3mr_softc *sc = NULL; 2130 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)uarg; 2131 2132 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2133 if (!sc) 2134 return ENODEV; 2135 2136 mtx_lock(&sc->ioctl_cmds.completion.lock); 2137 switch (karg->opcode) { 2138 case MPI3MR_DRVRIOCTL_OPCODE_ADPINFO: 2139 rval = mpi3mr_get_adpinfo(sc, karg->data_in_buf, karg->data_in_size); 2140 break; 2141 case MPI3MR_DRVRIOCTL_OPCODE_GETPCIINFO: 2142 rval = mpi3mr_get_pciinfo(sc, karg->data_in_buf, karg->data_in_size); 2143 break; 2144 case MPI3MR_DRVRIOCTL_OPCODE_TGTDEVINFO: 2145 rval = mpi3mr_get_tgtinfo(sc, karg); 2146 break; 2147 case MPI3MR_DRVRIOCTL_OPCODE_ALLTGTDEVINFO: 2148 rval = mpi3mr_get_alltgtinfo(sc, karg->data_in_buf, karg->data_in_size); 2149 break; 2150 case MPI3MR_DRVRIOCTL_OPCODE_GETCHGCNT: 2151 rval = mpi3mr_get_change_count(sc, karg->data_in_buf, karg->data_in_size); 2152 break; 2153 case MPI3MR_DRVRIOCTL_OPCODE_LOGDATAENABLE: 2154 rval = mpi3mr_logdata_enable(sc, karg->data_in_buf, karg->data_in_size); 2155 break; 2156 case MPI3MR_DRVRIOCTL_OPCODE_GETLOGDATA: 2157 rval = mpi3mr_get_logdata(sc, karg->data_in_buf, karg->data_in_size); 2158 break; 2159 case MPI3MR_DRVRIOCTL_OPCODE_PELENABLE: 2160 rval = mpi3mr_pel_enable(sc, karg->data_out_buf, karg->data_out_size); 2161 break; 2162 case MPI3MR_DRVRIOCTL_OPCODE_ADPRESET: 2163 rval = mpi3mr_adp_reset(sc, karg->data_out_buf, karg->data_out_size); 2164 break; 2165 case MPI3MR_DRVRIOCTL_OPCODE_UNKNOWN: 2166 default: 2167 printf("Unsupported drvr ioctl opcode 0x%x\n", karg->opcode); 2168 break; 2169 } 2170 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2171 return rval; 2172 } 2173 /** 2174 * mpi3mr_ioctl - IOCTL Handler 2175 * @dev: char device 2176 * @cmd: IOCTL command 2177 * @arg: User data payload buffer for the IOCTL 2178 * @flag: flags 2179 * @thread: threads 2180 * 2181 * This is the IOCTL entry point which checks the command type and 2182 * executes proper sub handler specific for the command. 2183 * 2184 * Return: 0 on success and proper error codes on failure 2185 */ 2186 static int 2187 mpi3mr_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2188 { 2189 int rval = EINVAL; 2190 2191 struct mpi3mr_softc *sc = NULL; 2192 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)arg; 2193 2194 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2195 2196 if (!sc) 2197 return ENODEV; 2198 2199 mpi3mr_atomic_inc(&sc->pend_ioctls); 2200 2201 2202 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) { 2203 mpi3mr_dprint(sc, MPI3MR_INFO, 2204 "Return back IOCTL, shutdown is in progress\n"); 2205 mpi3mr_atomic_dec(&sc->pend_ioctls); 2206 return ENODEV; 2207 } 2208 2209 switch (cmd) { 2210 case MPI3MRDRVCMD: 2211 rval = mpi3mr_app_drvrcmds(dev, cmd, arg, flag, td); 2212 break; 2213 case MPI3MRMPTCMD: 2214 mtx_lock(&sc->ioctl_cmds.completion.lock); 2215 rval = mpi3mr_app_mptcmds(dev, cmd, arg, flag, td); 2216 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2217 break; 2218 default: 2219 printf("%s:Unsupported ioctl cmd (0x%08lx)\n", MPI3MR_DRIVER_NAME, cmd); 2220 break; 2221 } 2222 2223 mpi3mr_atomic_dec(&sc->pend_ioctls); 2224 2225 return rval; 2226 } 2227