1 /* 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved. 5 * Support: <fbsd-storage-driver.pdl@broadcom.com> 6 * 7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com> 8 * Chandrakanth Patil <chandrakanth.patil@broadcom.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are 12 * met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation and/or other 18 * materials provided with the distribution. 19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors 20 * may be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * The views and conclusions contained in the software and documentation are 36 * those of the authors and should not be interpreted as representing 37 * official policies,either expressed or implied, of the FreeBSD Project. 38 * 39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131 40 * 41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/proc.h> 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include "mpi3mr_cam.h" 52 #include "mpi3mr_app.h" 53 #include "mpi3mr.h" 54 55 static d_open_t mpi3mr_open; 56 static d_close_t mpi3mr_close; 57 static d_ioctl_t mpi3mr_ioctl; 58 static d_poll_t mpi3mr_poll; 59 60 static struct cdevsw mpi3mr_cdevsw = { 61 .d_version = D_VERSION, 62 .d_flags = 0, 63 .d_open = mpi3mr_open, 64 .d_close = mpi3mr_close, 65 .d_ioctl = mpi3mr_ioctl, 66 .d_poll = mpi3mr_poll, 67 .d_name = "mpi3mr", 68 }; 69 70 static struct mpi3mr_mgmt_info mpi3mr_mgmt_info; 71 72 static int 73 mpi3mr_open(struct cdev *dev, int flags, int fmt, struct thread *td) 74 { 75 76 return (0); 77 } 78 79 static int 80 mpi3mr_close(struct cdev *dev, int flags, int fmt, struct thread *td) 81 { 82 83 return (0); 84 } 85 86 /* 87 * mpi3mr_app_attach - Char device registration 88 * @sc: Adapter reference 89 * 90 * This function does char device registration. 91 * 92 * Return: 0 on success and proper error codes on failure 93 */ 94 int 95 mpi3mr_app_attach(struct mpi3mr_softc *sc) 96 { 97 98 /* Create a /dev entry for Avenger controller */ 99 sc->mpi3mr_cdev = make_dev(&mpi3mr_cdevsw, device_get_unit(sc->mpi3mr_dev), 100 UID_ROOT, GID_OPERATOR, 0640, "mpi3mr%d", 101 device_get_unit(sc->mpi3mr_dev)); 102 103 if (sc->mpi3mr_cdev == NULL) 104 return (ENOMEM); 105 106 sc->mpi3mr_cdev->si_drv1 = sc; 107 108 /* Assign controller instance to mgmt_info structure */ 109 if (device_get_unit(sc->mpi3mr_dev) == 0) 110 memset(&mpi3mr_mgmt_info, 0, sizeof(mpi3mr_mgmt_info)); 111 mpi3mr_mgmt_info.count++; 112 mpi3mr_mgmt_info.sc_ptr[mpi3mr_mgmt_info.max_index] = sc; 113 mpi3mr_mgmt_info.max_index++; 114 115 return (0); 116 } 117 118 void 119 mpi3mr_app_detach(struct mpi3mr_softc *sc) 120 { 121 U8 i = 0; 122 123 if (sc->mpi3mr_cdev == NULL) 124 return; 125 126 destroy_dev(sc->mpi3mr_cdev); 127 for (i = 0; i < mpi3mr_mgmt_info.max_index; i++) { 128 if (mpi3mr_mgmt_info.sc_ptr[i] == sc) { 129 mpi3mr_mgmt_info.count--; 130 mpi3mr_mgmt_info.sc_ptr[i] = NULL; 131 break; 132 } 133 } 134 return; 135 } 136 137 static int 138 mpi3mr_poll(struct cdev *dev, int poll_events, struct thread *td) 139 { 140 int revents = 0; 141 struct mpi3mr_softc *sc = NULL; 142 sc = dev->si_drv1; 143 144 if ((poll_events & (POLLIN | POLLRDNORM)) && 145 (sc->mpi3mr_aen_triggered)) 146 revents |= poll_events & (POLLIN | POLLRDNORM); 147 148 if (revents == 0) { 149 if (poll_events & (POLLIN | POLLRDNORM)) { 150 sc->mpi3mr_poll_waiting = 1; 151 selrecord(td, &sc->mpi3mr_select); 152 } 153 } 154 return revents; 155 } 156 157 /** 158 * mpi3mr_app_get_adp_instancs - Get Adapter instance 159 * @mrioc_id: Adapter ID 160 * 161 * This fucnction searches the Adapter reference with mrioc_id 162 * upon found, returns the adapter reference otherwise returns 163 * the NULL 164 * 165 * Return: Adapter reference on success and NULL on failure 166 */ 167 static struct mpi3mr_softc * 168 mpi3mr_app_get_adp_instance(U8 mrioc_id) 169 { 170 struct mpi3mr_softc *sc = NULL; 171 172 if (mrioc_id >= mpi3mr_mgmt_info.max_index) 173 return NULL; 174 175 sc = mpi3mr_mgmt_info.sc_ptr[mrioc_id]; 176 return sc; 177 } 178 179 static int 180 mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc *sc, 181 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 182 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 183 { 184 struct mpi3mr_nvme_pt_sge *nvme_sgl; 185 U64 sgl_dma; 186 U8 count; 187 U16 available_sges = 0, i; 188 U32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 189 size_t length = 0; 190 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 191 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 192 sc->facts.sge_mod_shift) << 32); 193 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 194 sc->facts.sge_mod_shift) << 32; 195 196 U32 size; 197 198 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 199 ((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_SGL_OFFSET); 200 201 /* 202 * Not all commands require a data transfer. If no data, just return 203 * without constructing any SGL. 204 */ 205 for (count = 0; count < bufcnt; count++, dma_buff++) { 206 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 207 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 208 length = dma_buff->kern_buf_len; 209 break; 210 } 211 } 212 if (!length || !dma_buff->num_dma_desc) 213 return 0; 214 215 if (dma_buff->num_dma_desc == 1) { 216 available_sges = 1; 217 goto build_sges; 218 } 219 sgl_dma = (U64)sc->ioctl_chain_sge.dma_addr; 220 221 if (sgl_dma & sgemod_mask) { 222 printf(IOCNAME "NVMe SGL address collides with SGEModifier\n",sc->name); 223 return -1; 224 } 225 226 sgl_dma &= ~sgemod_mask; 227 sgl_dma |= sgemod_val; 228 229 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 230 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 231 if (available_sges < dma_buff->num_dma_desc) 232 return -1; 233 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 234 nvme_sgl->base_addr = sgl_dma; 235 size = dma_buff->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 236 nvme_sgl->length = htole32(size); 237 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 238 239 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) sc->ioctl_chain_sge.addr; 240 241 build_sges: 242 for (i = 0; i < dma_buff->num_dma_desc; i++) { 243 sgl_dma = htole64(dma_buff->dma_desc[i].dma_addr); 244 if (sgl_dma & sgemod_mask) { 245 printf("%s: SGL address collides with SGE modifier\n", 246 __func__); 247 return -1; 248 } 249 250 sgl_dma &= ~sgemod_mask; 251 sgl_dma |= sgemod_val; 252 253 nvme_sgl->base_addr = sgl_dma; 254 nvme_sgl->length = htole32(dma_buff->dma_desc[i].size); 255 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 256 nvme_sgl++; 257 available_sges--; 258 } 259 260 return 0; 261 } 262 263 static int 264 mpi3mr_app_build_nvme_prp(struct mpi3mr_softc *sc, 265 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 266 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 267 { 268 int prp_size = MPI3MR_NVME_PRP_SIZE; 269 U64 *prp_entry, *prp1_entry, *prp2_entry; 270 U64 *prp_page; 271 bus_addr_t prp_entry_dma, prp_page_dma, dma_addr; 272 U32 offset, entry_len, dev_pgsz; 273 U32 page_mask_result, page_mask; 274 size_t length = 0, desc_len; 275 U8 count; 276 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 277 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 278 sc->facts.sge_mod_shift) << 32); 279 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 280 sc->facts.sge_mod_shift) << 32; 281 U16 dev_handle = nvme_encap_request->DevHandle; 282 struct mpi3mr_target *tgtdev; 283 U16 desc_count = 0; 284 285 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 286 if (!tgtdev) { 287 printf(IOCNAME "EncapNVMe Error: Invalid DevHandle 0x%02x\n", sc->name, 288 dev_handle); 289 return -1; 290 } 291 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 292 printf(IOCNAME "%s: NVME device page size is zero for handle 0x%04x\n", 293 sc->name, __func__, dev_handle); 294 return -1; 295 } 296 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 297 298 page_mask = dev_pgsz - 1; 299 300 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE){ 301 printf("%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 302 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 303 return -1; 304 } 305 306 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz){ 307 printf("%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 308 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 309 return -1; 310 } 311 312 /* 313 * Not all commands require a data transfer. If no data, just return 314 * without constructing any PRP. 315 */ 316 for (count = 0; count < bufcnt; count++, dma_buff++) { 317 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 318 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 319 length = dma_buff->kern_buf_len; 320 break; 321 } 322 } 323 if (!length || !dma_buff->num_dma_desc) 324 return 0; 325 326 for (count = 0; count < dma_buff->num_dma_desc; count++) { 327 dma_addr = dma_buff->dma_desc[count].dma_addr; 328 if (dma_addr & page_mask) { 329 printf("%s:dma_addr 0x%lu is not aligned with page size 0x%x\n", 330 __func__, dma_addr, dev_pgsz); 331 return -1; 332 } 333 } 334 335 dma_addr = dma_buff->dma_desc[0].dma_addr; 336 desc_len = dma_buff->dma_desc[0].size; 337 338 sc->nvme_encap_prp_sz = 0; 339 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */ 340 4, 0, /* algnmnt, boundary */ 341 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 342 BUS_SPACE_MAXADDR, /* highaddr */ 343 NULL, NULL, /* filter, filterarg */ 344 dev_pgsz, /* maxsize */ 345 1, /* nsegments */ 346 dev_pgsz, /* maxsegsize */ 347 0, /* flags */ 348 NULL, NULL, /* lockfunc, lockarg */ 349 &sc->nvme_encap_prp_list_dmatag)) { 350 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create ioctl NVME kernel buffer dma tag\n"); 351 return (ENOMEM); 352 } 353 354 if (bus_dmamem_alloc(sc->nvme_encap_prp_list_dmatag, (void **)&sc->nvme_encap_prp_list, 355 BUS_DMA_NOWAIT, &sc->nvme_encap_prp_list_dma_dmamap)) { 356 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate ioctl NVME dma memory\n"); 357 return (ENOMEM); 358 } 359 360 bzero(sc->nvme_encap_prp_list, dev_pgsz); 361 bus_dmamap_load(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap, 362 sc->nvme_encap_prp_list, dev_pgsz, mpi3mr_memaddr_cb, &sc->nvme_encap_prp_list_dma, 363 0); 364 365 if (!sc->nvme_encap_prp_list) { 366 printf(IOCNAME "%s:%d Cannot load ioctl NVME dma memory for size: %d\n", sc->name, 367 __func__, __LINE__, dev_pgsz); 368 goto err_out; 369 } 370 sc->nvme_encap_prp_sz = dev_pgsz; 371 372 /* 373 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 374 * PRP1 is located at a 24 byte offset from the start of the NVMe 375 * command. Then set the current PRP entry pointer to PRP1. 376 */ 377 prp1_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP1_OFFSET); 378 prp2_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP2_OFFSET); 379 prp_entry = prp1_entry; 380 /* 381 * For the PRP entries, use the specially allocated buffer of 382 * contiguous memory. 383 */ 384 prp_page = sc->nvme_encap_prp_list; 385 prp_page_dma = sc->nvme_encap_prp_list_dma; 386 387 /* 388 * Check if we are within 1 entry of a page boundary we don't 389 * want our first entry to be a PRP List entry. 390 */ 391 page_mask_result = (uintptr_t)((U8 *)prp_page + prp_size) & page_mask; 392 if (!page_mask_result) { 393 printf(IOCNAME "PRP Page is not page aligned\n", sc->name); 394 goto err_out; 395 } 396 397 /* 398 * Set PRP physical pointer, which initially points to the current PRP 399 * DMA memory page. 400 */ 401 prp_entry_dma = prp_page_dma; 402 403 404 /* Loop while the length is not zero. */ 405 while (length) { 406 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 407 if (!page_mask_result && (length > dev_pgsz)) { 408 printf(IOCNAME "Single PRP page is not sufficient\n", sc->name); 409 goto err_out; 410 } 411 412 /* Need to handle if entry will be part of a page. */ 413 offset = dma_addr & page_mask; 414 entry_len = dev_pgsz - offset; 415 416 if (prp_entry == prp1_entry) { 417 /* 418 * Must fill in the first PRP pointer (PRP1) before 419 * moving on. 420 */ 421 *prp1_entry = dma_addr; 422 if (*prp1_entry & sgemod_mask) { 423 printf(IOCNAME "PRP1 address collides with SGEModifier\n", sc->name); 424 goto err_out; 425 } 426 *prp1_entry &= ~sgemod_mask; 427 *prp1_entry |= sgemod_val; 428 429 /* 430 * Now point to the second PRP entry within the 431 * command (PRP2). 432 */ 433 prp_entry = prp2_entry; 434 } else if (prp_entry == prp2_entry) { 435 /* 436 * Should the PRP2 entry be a PRP List pointer or just 437 * a regular PRP pointer? If there is more than one 438 * more page of data, must use a PRP List pointer. 439 */ 440 if (length > dev_pgsz) { 441 /* 442 * PRP2 will contain a PRP List pointer because 443 * more PRP's are needed with this command. The 444 * list will start at the beginning of the 445 * contiguous buffer. 446 */ 447 *prp2_entry = prp_entry_dma; 448 if (*prp2_entry & sgemod_mask) { 449 printf(IOCNAME "PRP list address collides with SGEModifier\n", sc->name); 450 goto err_out; 451 } 452 *prp2_entry &= ~sgemod_mask; 453 *prp2_entry |= sgemod_val; 454 455 /* 456 * The next PRP Entry will be the start of the 457 * first PRP List. 458 */ 459 prp_entry = prp_page; 460 continue; 461 } else { 462 /* 463 * After this, the PRP Entries are complete. 464 * This command uses 2 PRP's and no PRP list. 465 */ 466 *prp2_entry = dma_addr; 467 if (*prp2_entry & sgemod_mask) { 468 printf(IOCNAME "PRP2 address collides with SGEModifier\n", sc->name); 469 goto err_out; 470 } 471 *prp2_entry &= ~sgemod_mask; 472 *prp2_entry |= sgemod_val; 473 } 474 } else { 475 /* 476 * Put entry in list and bump the addresses. 477 * 478 * After PRP1 and PRP2 are filled in, this will fill in 479 * all remaining PRP entries in a PRP List, one per 480 * each time through the loop. 481 */ 482 *prp_entry = dma_addr; 483 if (*prp_entry & sgemod_mask) { 484 printf(IOCNAME "PRP address collides with SGEModifier\n", sc->name); 485 goto err_out; 486 } 487 *prp_entry &= ~sgemod_mask; 488 *prp_entry |= sgemod_val; 489 prp_entry++; 490 prp_entry_dma += prp_size; 491 } 492 493 /* Decrement length accounting for last partial page. */ 494 if (entry_len >= length) 495 length = 0; 496 else { 497 if (entry_len <= desc_len) { 498 dma_addr += entry_len; 499 desc_len -= entry_len; 500 } 501 if (!desc_len) { 502 if ((++desc_count) >= 503 dma_buff->num_dma_desc) { 504 printf("%s: Invalid len %ld while building PRP\n", 505 __func__, length); 506 goto err_out; 507 } 508 dma_addr = 509 dma_buff->dma_desc[desc_count].dma_addr; 510 desc_len = 511 dma_buff->dma_desc[desc_count].size; 512 } 513 length -= entry_len; 514 } 515 } 516 return 0; 517 err_out: 518 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 519 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 520 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 521 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 522 sc->nvme_encap_prp_list = NULL; 523 } 524 return -1; 525 } 526 527 /** 528 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data 529 + * buffers 530 + * @sc: Adapter instance reference 531 + * @dma_buff: buffer map descriptor 532 + * @desc_count: Number of already consumed dma descriptors 533 + * 534 + * This function computes how many pre-allocated DMA descriptors 535 + * are required for the given data buffer and if those number of 536 + * descriptors are free, then setup the mapping of the scattered 537 + * DMA address to the given data buffer, if the data direction 538 + * of the buffer is DATA_OUT then the actual data is copied to 539 + * the DMA buffers 540 + * 541 + * Return: 0 on success, -1 on failure 542 + */ 543 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_softc *sc, 544 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 545 U8 desc_count) 546 { 547 U16 i, needed_desc = (dma_buffers->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE); 548 U32 buf_len = dma_buffers->kern_buf_len, copied_len = 0; 549 550 if (dma_buffers->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 551 needed_desc++; 552 553 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 554 printf("%s: DMA descriptor mapping error %d:%d:%d\n", 555 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 556 return -1; 557 } 558 559 dma_buffers->dma_desc = malloc(sizeof(*dma_buffers->dma_desc) * needed_desc, 560 M_MPI3MR, M_NOWAIT | M_ZERO); 561 if (!dma_buffers->dma_desc) 562 return -1; 563 564 for (i = 0; i < needed_desc; i++, desc_count++) { 565 566 dma_buffers->dma_desc[i].addr = sc->ioctl_sge[desc_count].addr; 567 dma_buffers->dma_desc[i].dma_addr = sc->ioctl_sge[desc_count].dma_addr; 568 569 if (buf_len < sc->ioctl_sge[desc_count].size) 570 dma_buffers->dma_desc[i].size = buf_len; 571 else 572 dma_buffers->dma_desc[i].size = sc->ioctl_sge[desc_count].size; 573 574 buf_len -= dma_buffers->dma_desc[i].size; 575 memset(dma_buffers->dma_desc[i].addr, 0, sc->ioctl_sge[desc_count].size); 576 577 if (dma_buffers->data_dir == MPI3MR_APP_DDO) { 578 copyin(((U8 *)dma_buffers->user_buf + copied_len), 579 dma_buffers->dma_desc[i].addr, 580 dma_buffers->dma_desc[i].size); 581 copied_len += dma_buffers->dma_desc[i].size; 582 } 583 } 584 585 dma_buffers->num_dma_desc = needed_desc; 586 587 return 0; 588 } 589 590 static unsigned int 591 mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request) 592 { 593 U8 format = 0; 594 595 format = ((nvme_encap_request->Command[0] & 0xc000) >> 14); 596 return format; 597 } 598 599 static inline U16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 600 U8 bufcnt) 601 { 602 U16 i, sge_count = 0; 603 for (i=0; i < bufcnt; i++, dma_buffers++) { 604 if ((dma_buffers->data_dir == MPI3MR_APP_DDN) || 605 dma_buffers->kern_buf) 606 continue; 607 sge_count += dma_buffers->num_dma_desc; 608 if (!dma_buffers->num_dma_desc) 609 sge_count++; 610 } 611 return sge_count; 612 } 613 614 static int 615 mpi3mr_app_construct_sgl(struct mpi3mr_softc *sc, U8 *mpi_request, U32 sgl_offset, 616 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 617 U8 bufcnt, U8 is_rmc, U8 is_rmr, U8 num_datasges) 618 { 619 U8 *sgl = (mpi_request + sgl_offset), count = 0; 620 Mpi3RequestHeader_t *mpi_header = (Mpi3RequestHeader_t *)mpi_request; 621 Mpi3MgmtPassthroughRequest_t *rmgmt_req = 622 (Mpi3MgmtPassthroughRequest_t *)mpi_request; 623 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 624 U8 flag, sgl_flags, sgl_flags_eob, sgl_flags_last, last_chain_sgl_flags; 625 U16 available_sges, i, sges_needed; 626 U32 sge_element_size = sizeof(struct _MPI3_SGE_COMMON); 627 bool chain_used = false; 628 629 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 630 MPI3_SGE_FLAGS_DLAS_SYSTEM ; 631 sgl_flags_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 632 sgl_flags_last = sgl_flags_eob | MPI3_SGE_FLAGS_END_OF_LIST; 633 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 634 MPI3_SGE_FLAGS_DLAS_SYSTEM; 635 636 sges_needed = mpi3mr_total_num_ioctl_sges(dma_buffers, bufcnt); 637 638 if (is_rmc) { 639 mpi3mr_add_sg_single(&rmgmt_req->CommandSGL, 640 sgl_flags_last, dma_buff->kern_buf_len, 641 dma_buff->kern_buf_dma); 642 sgl = (U8 *) dma_buff->kern_buf + dma_buff->user_buf_len; 643 available_sges = (dma_buff->kern_buf_len - 644 dma_buff->user_buf_len) / sge_element_size; 645 if (sges_needed > available_sges) 646 return -1; 647 chain_used = true; 648 dma_buff++; 649 count++; 650 if (is_rmr) { 651 mpi3mr_add_sg_single(&rmgmt_req->ResponseSGL, 652 sgl_flags_last, dma_buff->kern_buf_len, 653 dma_buff->kern_buf_dma); 654 dma_buff++; 655 count++; 656 } else 657 mpi3mr_build_zero_len_sge( 658 &rmgmt_req->ResponseSGL); 659 if (num_datasges) { 660 i = 0; 661 goto build_sges; 662 } 663 } else { 664 if (sgl_offset >= MPI3MR_AREQ_FRAME_SZ) 665 return -1; 666 available_sges = (MPI3MR_AREQ_FRAME_SZ - sgl_offset) / 667 sge_element_size; 668 if (!available_sges) 669 return -1; 670 } 671 672 if (!num_datasges) { 673 mpi3mr_build_zero_len_sge(sgl); 674 return 0; 675 } 676 677 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 678 if ((sges_needed > 2) || (sges_needed > available_sges)) 679 return -1; 680 for (; count < bufcnt; count++, dma_buff++) { 681 if ((dma_buff->data_dir == MPI3MR_APP_DDN) || 682 !dma_buff->num_dma_desc) 683 continue; 684 mpi3mr_add_sg_single(sgl, sgl_flags_last, 685 dma_buff->dma_desc[0].size, 686 dma_buff->dma_desc[0].dma_addr); 687 sgl += sge_element_size; 688 } 689 return 0; 690 } 691 i = 0; 692 693 build_sges: 694 for (; count < bufcnt; count++, dma_buff++) { 695 if (dma_buff->data_dir == MPI3MR_APP_DDN) 696 continue; 697 if (!dma_buff->num_dma_desc) { 698 if (chain_used && !available_sges) 699 return -1; 700 if (!chain_used && (available_sges == 1) && 701 (sges_needed > 1)) 702 goto setup_chain; 703 flag = sgl_flags_eob; 704 if (num_datasges == 1) 705 flag = sgl_flags_last; 706 mpi3mr_add_sg_single(sgl, flag, 0, 0); 707 sgl += sge_element_size; 708 available_sges--; 709 sges_needed--; 710 num_datasges--; 711 continue; 712 } 713 for (; i < dma_buff->num_dma_desc; i++) { 714 if (chain_used && !available_sges) 715 return -1; 716 if (!chain_used && (available_sges == 1) && 717 (sges_needed > 1)) 718 goto setup_chain; 719 flag = sgl_flags; 720 if (i == (dma_buff->num_dma_desc - 1)) { 721 if (num_datasges == 1) 722 flag = sgl_flags_last; 723 else 724 flag = sgl_flags_eob; 725 } 726 727 mpi3mr_add_sg_single(sgl, flag, 728 dma_buff->dma_desc[i].size, 729 dma_buff->dma_desc[i].dma_addr); 730 sgl += sge_element_size; 731 available_sges--; 732 sges_needed--; 733 } 734 num_datasges--; 735 i = 0; 736 } 737 return 0; 738 739 setup_chain: 740 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 741 if (sges_needed > available_sges) 742 return -1; 743 mpi3mr_add_sg_single(sgl, last_chain_sgl_flags, 744 (sges_needed * sge_element_size), sc->ioctl_chain_sge.dma_addr); 745 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 746 sgl = (U8 *)sc->ioctl_chain_sge.addr; 747 chain_used = true; 748 goto build_sges; 749 } 750 751 752 /** 753 * mpi3mr_app_mptcmds - MPI Pass through IOCTL handler 754 * @dev: char device 755 * @cmd: IOCTL command 756 * @arg: User data payload buffer for the IOCTL 757 * @flag: flags 758 * @thread: threads 759 * 760 * This function is the top level handler for MPI Pass through 761 * IOCTL, this does basic validation of the input data buffers, 762 * identifies the given buffer types and MPI command, allocates 763 * DMAable memory for user given buffers, construstcs SGL 764 * properly and passes the command to the firmware. 765 * 766 * Once the MPI command is completed the driver copies the data 767 * if any and reply, sense information to user provided buffers. 768 * If the command is timed out then issues controller reset 769 * prior to returning. 770 * 771 * Return: 0 on success and proper error codes on failure 772 */ 773 static long 774 mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg, 775 int flag, struct thread *td) 776 { 777 long rval = EINVAL; 778 U8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 779 U8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 780 U16 desc_count = 0; 781 U8 nvme_fmt = 0; 782 U32 tmplen = 0, erbsz = MPI3MR_SENSEBUF_SZ, din_sz = 0, dout_sz = 0; 783 U8 *kern_erb = NULL; 784 U8 *mpi_request = NULL; 785 Mpi3RequestHeader_t *mpi_header = NULL; 786 Mpi3PELReqActionGetCount_t *pel = NULL; 787 Mpi3StatusReplyDescriptor_t *status_desc = NULL; 788 struct mpi3mr_softc *sc = NULL; 789 struct mpi3mr_ioctl_buf_entry_list *buffer_list = NULL; 790 struct mpi3mr_buf_entry *buf_entries = NULL; 791 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL; 792 struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL; 793 struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg; 794 795 796 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 797 if (!sc) 798 return ENODEV; 799 800 if (!sc->ioctl_sges_allocated) { 801 printf("%s: DMA memory was not allocated\n", __func__); 802 return ENOMEM; 803 } 804 805 if (karg->timeout < MPI3MR_IOCTL_DEFAULT_TIMEOUT) 806 karg->timeout = MPI3MR_IOCTL_DEFAULT_TIMEOUT; 807 808 if (!karg->mpi_msg_size || !karg->buf_entry_list_size) { 809 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 810 __func__, __LINE__); 811 return rval; 812 } 813 if ((karg->mpi_msg_size * 4) > MPI3MR_AREQ_FRAME_SZ) { 814 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 815 __func__, __LINE__); 816 return rval; 817 } 818 819 mpi_request = malloc(MPI3MR_AREQ_FRAME_SZ, M_MPI3MR, M_NOWAIT | M_ZERO); 820 if (!mpi_request) { 821 printf(IOCNAME "%s: memory allocation failed for mpi_request\n", sc->name, 822 __func__); 823 return ENOMEM; 824 } 825 826 mpi_header = (Mpi3RequestHeader_t *)mpi_request; 827 pel = (Mpi3PELReqActionGetCount_t *)mpi_request; 828 if (copyin(karg->mpi_msg_buf, mpi_request, (karg->mpi_msg_size * 4))) { 829 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 830 __FILE__, __LINE__, __func__); 831 rval = EFAULT; 832 goto out; 833 } 834 835 buffer_list = malloc(karg->buf_entry_list_size, M_MPI3MR, M_NOWAIT | M_ZERO); 836 if (!buffer_list) { 837 printf(IOCNAME "%s: memory allocation failed for buffer_list\n", sc->name, 838 __func__); 839 rval = ENOMEM; 840 goto out; 841 } 842 if (copyin(karg->buf_entry_list, buffer_list, karg->buf_entry_list_size)) { 843 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 844 __FILE__, __LINE__, __func__); 845 rval = EFAULT; 846 goto out; 847 } 848 if (!buffer_list->num_of_buf_entries) { 849 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 850 __func__, __LINE__); 851 rval = EINVAL; 852 goto out; 853 } 854 bufcnt = buffer_list->num_of_buf_entries; 855 dma_buffers = malloc((sizeof(*dma_buffers) * bufcnt), M_MPI3MR, M_NOWAIT | M_ZERO); 856 if (!dma_buffers) { 857 printf(IOCNAME "%s: memory allocation failed for dma_buffers\n", sc->name, 858 __func__); 859 rval = ENOMEM; 860 goto out; 861 } 862 buf_entries = buffer_list->buf_entry; 863 dma_buff = dma_buffers; 864 for (count = 0; count < bufcnt; count++, buf_entries++, dma_buff++) { 865 memset(dma_buff, 0, sizeof(*dma_buff)); 866 dma_buff->user_buf = buf_entries->buffer; 867 dma_buff->user_buf_len = buf_entries->buf_len; 868 869 switch (buf_entries->buf_type) { 870 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_CMD: 871 is_rmcb = 1; 872 if ((count != 0) || !buf_entries->buf_len) 873 invalid_be = 1; 874 dma_buff->data_dir = MPI3MR_APP_DDO; 875 break; 876 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_RESP: 877 is_rmrb = 1; 878 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 879 invalid_be = 1; 880 dma_buff->data_dir = MPI3MR_APP_DDI; 881 break; 882 case MPI3MR_IOCTL_BUFTYPE_DATA_IN: 883 din_sz = dma_buff->user_buf_len; 884 din_cnt++; 885 if ((din_cnt > 1) && !is_rmcb) 886 invalid_be = 1; 887 dma_buff->data_dir = MPI3MR_APP_DDI; 888 break; 889 case MPI3MR_IOCTL_BUFTYPE_DATA_OUT: 890 dout_sz = dma_buff->user_buf_len; 891 dout_cnt++; 892 if ((dout_cnt > 1) && !is_rmcb) 893 invalid_be = 1; 894 dma_buff->data_dir = MPI3MR_APP_DDO; 895 break; 896 case MPI3MR_IOCTL_BUFTYPE_MPI_REPLY: 897 mpirep_offset = count; 898 dma_buff->data_dir = MPI3MR_APP_DDN; 899 if (!buf_entries->buf_len) 900 invalid_be = 1; 901 break; 902 case MPI3MR_IOCTL_BUFTYPE_ERR_RESPONSE: 903 erb_offset = count; 904 dma_buff->data_dir = MPI3MR_APP_DDN; 905 if (!buf_entries->buf_len) 906 invalid_be = 1; 907 break; 908 default: 909 invalid_be = 1; 910 break; 911 } 912 if (invalid_be) 913 break; 914 } 915 if (invalid_be) { 916 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 917 __func__, __LINE__); 918 rval = EINVAL; 919 goto out; 920 } 921 922 if (is_rmcb && ((din_sz + dout_sz) > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 923 printf("%s:%d: invalid data transfer size passed for function 0x%x" 924 "din_sz = %d, dout_size = %d\n", __func__, __LINE__, 925 mpi_header->Function, din_sz, dout_sz); 926 rval = EINVAL; 927 goto out; 928 } 929 930 if ((din_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE) || 931 (dout_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 932 printf("%s:%d: invalid data transfer size passed for function 0x%x" 933 "din_size=%d dout_size=%d\n", __func__, __LINE__, 934 mpi_header->Function, din_sz, dout_sz); 935 rval = EINVAL; 936 goto out; 937 } 938 939 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 940 if ((din_sz > MPI3MR_IOCTL_SGE_SIZE) || 941 (dout_sz > MPI3MR_IOCTL_SGE_SIZE)) { 942 printf("%s:%d: invalid message size passed:%d:%d:%d:%d\n", 943 __func__, __LINE__, din_cnt, dout_cnt, din_sz, dout_sz); 944 rval = EINVAL; 945 goto out; 946 } 947 } 948 949 dma_buff = dma_buffers; 950 for (count = 0; count < bufcnt; count++, dma_buff++) { 951 952 dma_buff->kern_buf_len = dma_buff->user_buf_len; 953 954 if (is_rmcb && !count) { 955 dma_buff->kern_buf = sc->ioctl_chain_sge.addr; 956 dma_buff->kern_buf_len = sc->ioctl_chain_sge.size; 957 dma_buff->kern_buf_dma = sc->ioctl_chain_sge.dma_addr; 958 dma_buff->dma_desc = NULL; 959 dma_buff->num_dma_desc = 0; 960 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 961 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 962 if (copyin(dma_buff->user_buf, dma_buff->kern_buf, tmplen)) { 963 mpi3mr_dprint(sc, MPI3MR_ERROR, "failure at %s() line: %d", 964 __func__, __LINE__); 965 rval = EFAULT; 966 goto out; 967 } 968 } else if (is_rmrb && (count == 1)) { 969 dma_buff->kern_buf = sc->ioctl_resp_sge.addr; 970 dma_buff->kern_buf_len = sc->ioctl_resp_sge.size; 971 dma_buff->kern_buf_dma = sc->ioctl_resp_sge.dma_addr; 972 dma_buff->dma_desc = NULL; 973 dma_buff->num_dma_desc = 0; 974 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 975 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 976 dma_buff->kern_buf_len = tmplen; 977 } else { 978 if (!dma_buff->kern_buf_len) 979 continue; 980 if (mpi3mr_map_data_buffer_dma(sc, dma_buff, desc_count)) { 981 rval = ENOMEM; 982 mpi3mr_dprint(sc, MPI3MR_ERROR, "mapping data buffers failed" 983 "at %s() line: %d\n", __func__, __LINE__); 984 goto out; 985 } 986 desc_count += dma_buff->num_dma_desc; 987 } 988 } 989 990 if (erb_offset != 0xFF) { 991 kern_erb = malloc(erbsz, M_MPI3MR, M_NOWAIT | M_ZERO); 992 if (!kern_erb) { 993 printf(IOCNAME "%s:%d Cannot allocate memory for sense buffer\n", sc->name, 994 __func__, __LINE__); 995 rval = ENOMEM; 996 goto out; 997 } 998 } 999 1000 if (sc->ioctl_cmds.state & MPI3MR_CMD_PENDING) { 1001 printf(IOCNAME "Issue IOCTL: Ioctl command is in use/previous command is pending\n", 1002 sc->name); 1003 rval = EAGAIN; 1004 goto out; 1005 } 1006 1007 if (sc->unrecoverable) { 1008 printf(IOCNAME "Issue IOCTL: controller is in unrecoverable state\n", sc->name); 1009 rval = EFAULT; 1010 goto out; 1011 } 1012 1013 if (sc->reset_in_progress) { 1014 printf(IOCNAME "Issue IOCTL: reset in progress\n", sc->name); 1015 rval = EAGAIN; 1016 goto out; 1017 } 1018 if (sc->block_ioctls) { 1019 printf(IOCNAME "Issue IOCTL: IOCTLs are blocked\n", sc->name); 1020 rval = EAGAIN; 1021 goto out; 1022 } 1023 1024 if (mpi_header->Function != MPI3_FUNCTION_NVME_ENCAPSULATED) { 1025 if (mpi3mr_app_construct_sgl(sc, mpi_request, (karg->mpi_msg_size * 4), dma_buffers, 1026 bufcnt, is_rmcb, is_rmrb, (dout_cnt + din_cnt))) { 1027 printf(IOCNAME "Issue IOCTL: sgl build failed\n", sc->name); 1028 rval = EAGAIN; 1029 goto out; 1030 } 1031 1032 } else { 1033 nvme_fmt = mpi3mr_app_get_nvme_data_fmt( 1034 (Mpi3NVMeEncapsulatedRequest_t *)mpi_request); 1035 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1036 if (mpi3mr_app_build_nvme_prp(sc, 1037 (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1038 dma_buffers, bufcnt)) { 1039 rval = ENOMEM; 1040 goto out; 1041 } 1042 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1043 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1044 if (mpi3mr_app_construct_nvme_sgl(sc, (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1045 dma_buffers, bufcnt)) { 1046 rval = EINVAL; 1047 goto out; 1048 } 1049 } else { 1050 printf(IOCNAME "%s: Invalid NVMe Command Format\n", sc->name, 1051 __func__); 1052 rval = EINVAL; 1053 goto out; 1054 } 1055 } 1056 1057 sc->ioctl_cmds.state = MPI3MR_CMD_PENDING; 1058 sc->ioctl_cmds.is_waiting = 1; 1059 sc->ioctl_cmds.callback = NULL; 1060 sc->ioctl_cmds.is_senseprst = 0; 1061 sc->ioctl_cmds.sensebuf = kern_erb; 1062 memset((sc->ioctl_cmds.reply), 0, sc->reply_sz); 1063 mpi_header->HostTag = MPI3MR_HOSTTAG_IOCTLCMDS; 1064 init_completion(&sc->ioctl_cmds.completion); 1065 rval = mpi3mr_submit_admin_cmd(sc, mpi_request, MPI3MR_AREQ_FRAME_SZ); 1066 if (rval) { 1067 printf(IOCNAME "Issue IOCTL: Admin Post failed\n", sc->name); 1068 goto out_failed; 1069 } 1070 wait_for_completion_timeout(&sc->ioctl_cmds.completion, karg->timeout); 1071 1072 if (!(sc->ioctl_cmds.state & MPI3MR_CMD_COMPLETE)) { 1073 sc->ioctl_cmds.is_waiting = 0; 1074 printf(IOCNAME "Issue IOCTL: command timed out\n", sc->name); 1075 rval = EAGAIN; 1076 if (sc->ioctl_cmds.state & MPI3MR_CMD_RESET) 1077 goto out_failed; 1078 1079 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1080 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL_TIMEOUT; 1081 goto out_failed; 1082 } 1083 1084 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 1085 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 1086 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 1087 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 1088 sc->nvme_encap_prp_list = NULL; 1089 } 1090 1091 if (((sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1092 != MPI3_IOCSTATUS_SUCCESS) && 1093 (sc->mpi3mr_debug & MPI3MR_DEBUG_IOCTL)) { 1094 printf(IOCNAME "Issue IOCTL: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", sc->name, 1095 (sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1096 sc->ioctl_cmds.ioc_loginfo); 1097 } 1098 1099 if ((mpirep_offset != 0xFF) && 1100 dma_buffers[mpirep_offset].user_buf_len) { 1101 dma_buff = &dma_buffers[mpirep_offset]; 1102 dma_buff->kern_buf_len = (sizeof(*mpirepbuf) - 1 + 1103 sc->reply_sz); 1104 mpirepbuf = malloc(dma_buff->kern_buf_len, M_MPI3MR, M_NOWAIT | M_ZERO); 1105 1106 if (!mpirepbuf) { 1107 printf(IOCNAME "%s: failed obtaining a memory for mpi reply\n", sc->name, 1108 __func__); 1109 rval = ENOMEM; 1110 goto out_failed; 1111 } 1112 if (sc->ioctl_cmds.state & MPI3MR_CMD_REPLYVALID) { 1113 mpirepbuf->mpirep_type = 1114 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_ADDRESS; 1115 memcpy(mpirepbuf->repbuf, sc->ioctl_cmds.reply, sc->reply_sz); 1116 } else { 1117 mpirepbuf->mpirep_type = 1118 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_STATUS; 1119 status_desc = (Mpi3StatusReplyDescriptor_t *) 1120 mpirepbuf->repbuf; 1121 status_desc->IOCStatus = sc->ioctl_cmds.ioc_status; 1122 status_desc->IOCLogInfo = sc->ioctl_cmds.ioc_loginfo; 1123 } 1124 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 1125 if (copyout(mpirepbuf, dma_buff->user_buf, tmplen)) { 1126 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1127 __FILE__, __LINE__, __func__); 1128 rval = EFAULT; 1129 goto out_failed; 1130 } 1131 } 1132 1133 if (erb_offset != 0xFF && sc->ioctl_cmds.sensebuf && 1134 sc->ioctl_cmds.is_senseprst) { 1135 dma_buff = &dma_buffers[erb_offset]; 1136 tmplen = min(erbsz, dma_buff->user_buf_len); 1137 if (copyout(kern_erb, dma_buff->user_buf, tmplen)) { 1138 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1139 __FILE__, __LINE__, __func__); 1140 rval = EFAULT; 1141 goto out_failed; 1142 } 1143 } 1144 1145 dma_buff = dma_buffers; 1146 for (count = 0; count < bufcnt; count++, dma_buff++) { 1147 if ((count == 1) && is_rmrb) { 1148 if (copyout(dma_buff->kern_buf, dma_buff->user_buf,dma_buff->kern_buf_len)) { 1149 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1150 __FILE__, __LINE__, __func__); 1151 rval = EFAULT; 1152 goto out_failed; 1153 } 1154 } else if (dma_buff->data_dir == MPI3MR_APP_DDI) { 1155 tmplen = 0; 1156 for (desc_count = 0; desc_count < dma_buff->num_dma_desc; desc_count++) { 1157 if (copyout(dma_buff->dma_desc[desc_count].addr, 1158 (U8 *)dma_buff->user_buf+tmplen, 1159 dma_buff->dma_desc[desc_count].size)) { 1160 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1161 __FILE__, __LINE__, __func__); 1162 rval = EFAULT; 1163 goto out_failed; 1164 } 1165 tmplen += dma_buff->dma_desc[desc_count].size; 1166 } 1167 } 1168 } 1169 1170 if ((pel->Function == MPI3_FUNCTION_PERSISTENT_EVENT_LOG) && 1171 (pel->Action == MPI3_PEL_ACTION_GET_COUNT)) 1172 sc->mpi3mr_aen_triggered = 0; 1173 1174 out_failed: 1175 sc->ioctl_cmds.is_senseprst = 0; 1176 sc->ioctl_cmds.sensebuf = NULL; 1177 sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED; 1178 out: 1179 if (kern_erb) 1180 free(kern_erb, M_MPI3MR); 1181 if (buffer_list) 1182 free(buffer_list, M_MPI3MR); 1183 if (mpi_request) 1184 free(mpi_request, M_MPI3MR); 1185 if (dma_buffers) { 1186 dma_buff = dma_buffers; 1187 for (count = 0; count < bufcnt; count++, dma_buff++) { 1188 free(dma_buff->dma_desc, M_MPI3MR); 1189 } 1190 free(dma_buffers, M_MPI3MR); 1191 } 1192 if (mpirepbuf) 1193 free(mpirepbuf, M_MPI3MR); 1194 return rval; 1195 } 1196 1197 /** 1198 * mpi3mr_soft_reset_from_app - Trigger controller reset 1199 * @sc: Adapter instance reference 1200 * 1201 * This function triggers the controller reset from the 1202 * watchdog context and wait for it to complete. It will 1203 * come out of wait upon completion or timeout exaustion. 1204 * 1205 * Return: 0 on success and proper error codes on failure 1206 */ 1207 static long 1208 mpi3mr_soft_reset_from_app(struct mpi3mr_softc *sc) 1209 { 1210 1211 U32 timeout; 1212 1213 /* if reset is not in progress, trigger soft reset from watchdog context */ 1214 if (!sc->reset_in_progress) { 1215 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1216 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL; 1217 1218 /* Wait for soft reset to start */ 1219 timeout = 50; 1220 while (timeout--) { 1221 if (sc->reset_in_progress == 1) 1222 break; 1223 DELAY(100 * 1000); 1224 } 1225 if (!timeout) 1226 return EFAULT; 1227 } 1228 1229 /* Wait for soft reset to complete */ 1230 int i = 0; 1231 timeout = sc->ready_timeout; 1232 while (timeout--) { 1233 if (sc->reset_in_progress == 0) 1234 break; 1235 i++; 1236 if (!(i % 5)) { 1237 mpi3mr_dprint(sc, MPI3MR_INFO, 1238 "[%2ds]waiting for controller reset to be finished from %s\n", i, __func__); 1239 } 1240 DELAY(1000 * 1000); 1241 } 1242 1243 /* 1244 * In case of soft reset failure or not completed within stipulated time, 1245 * fail back to application. 1246 */ 1247 if ((!timeout || sc->reset.status)) 1248 return EFAULT; 1249 1250 return 0; 1251 } 1252 1253 1254 /** 1255 * mpi3mr_adp_reset - Issue controller reset 1256 * @sc: Adapter instance reference 1257 * @data_out_buf: User buffer with reset type 1258 * @data_out_sz: length of the user buffer. 1259 * 1260 * This function identifies the user provided reset type and 1261 * issues approporiate reset to the controller and wait for that 1262 * to complete and reinitialize the controller and then returns. 1263 * 1264 * Return: 0 on success and proper error codes on failure 1265 */ 1266 static long 1267 mpi3mr_adp_reset(struct mpi3mr_softc *sc, 1268 void *data_out_buf, U32 data_out_sz) 1269 { 1270 long rval = EINVAL; 1271 struct mpi3mr_ioctl_adpreset adpreset; 1272 1273 memset(&adpreset, 0, sizeof(adpreset)); 1274 1275 if (data_out_sz != sizeof(adpreset)) { 1276 printf(IOCNAME "Invalid user adpreset buffer size %s() line: %d\n", sc->name, 1277 __func__, __LINE__); 1278 goto out; 1279 } 1280 1281 if (copyin(data_out_buf, &adpreset, sizeof(adpreset))) { 1282 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1283 __func__, __LINE__); 1284 rval = EFAULT; 1285 goto out; 1286 } 1287 1288 switch (adpreset.reset_type) { 1289 case MPI3MR_IOCTL_ADPRESET_SOFT: 1290 sc->reset.ioctl_reset_snapdump = false; 1291 break; 1292 case MPI3MR_IOCTL_ADPRESET_DIAG_FAULT: 1293 sc->reset.ioctl_reset_snapdump = true; 1294 break; 1295 default: 1296 printf(IOCNAME "Unknown reset_type(0x%x) issued\n", sc->name, 1297 adpreset.reset_type); 1298 goto out; 1299 } 1300 rval = mpi3mr_soft_reset_from_app(sc); 1301 if (rval) 1302 printf(IOCNAME "reset handler returned error (0x%lx) for reset type 0x%x\n", 1303 sc->name, rval, adpreset.reset_type); 1304 1305 out: 1306 return rval; 1307 } 1308 1309 void 1310 mpi3mr_app_send_aen(struct mpi3mr_softc *sc) 1311 { 1312 sc->mpi3mr_aen_triggered = 1; 1313 if (sc->mpi3mr_poll_waiting) { 1314 selwakeup(&sc->mpi3mr_select); 1315 sc->mpi3mr_poll_waiting = 0; 1316 } 1317 return; 1318 } 1319 1320 void 1321 mpi3mr_pel_wait_complete(struct mpi3mr_softc *sc, 1322 struct mpi3mr_drvr_cmd *drvr_cmd) 1323 { 1324 U8 retry = 0; 1325 Mpi3PELReply_t *pel_reply = NULL; 1326 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1327 1328 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1329 goto cleanup_drvrcmd; 1330 1331 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1332 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1333 goto out_failed; 1334 } 1335 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1336 1337 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1338 || ((le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS) 1339 && (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_ABORTED))){ 1340 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1341 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1342 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1343 retry = 1; 1344 } 1345 1346 if (retry) { 1347 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1348 drvr_cmd->retry_count++; 1349 printf(IOCNAME "%s : PELWaitretry=%d\n", sc->name, 1350 __func__, drvr_cmd->retry_count); 1351 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1352 return; 1353 } 1354 1355 printf(IOCNAME "%s :PELWait failed after all retries\n", sc->name, 1356 __func__); 1357 goto out_failed; 1358 } 1359 1360 mpi3mr_app_send_aen(sc); 1361 1362 if (!sc->pel_abort_requested) { 1363 sc->pel_cmds.retry_count = 0; 1364 mpi3mr_send_pel_getseq(sc, &sc->pel_cmds); 1365 } 1366 1367 return; 1368 out_failed: 1369 sc->pel_wait_pend = 0; 1370 cleanup_drvrcmd: 1371 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1372 drvr_cmd->callback = NULL; 1373 drvr_cmd->retry_count = 0; 1374 } 1375 1376 void 1377 mpi3mr_issue_pel_wait(struct mpi3mr_softc *sc, 1378 struct mpi3mr_drvr_cmd *drvr_cmd) 1379 { 1380 U8 retry_count = 0; 1381 Mpi3PELReqActionWait_t pel_wait; 1382 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1383 1384 sc->pel_abort_requested = 0; 1385 1386 memset(&pel_wait, 0, sizeof(pel_wait)); 1387 drvr_cmd->state = MPI3MR_CMD_PENDING; 1388 drvr_cmd->is_waiting = 0; 1389 drvr_cmd->callback = mpi3mr_pel_wait_complete; 1390 drvr_cmd->ioc_status = 0; 1391 drvr_cmd->ioc_loginfo = 0; 1392 pel_wait.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1393 pel_wait.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1394 pel_wait.Action = MPI3_PEL_ACTION_WAIT; 1395 pel_wait.StartingSequenceNumber = htole32(sc->newest_seqnum); 1396 pel_wait.Locale = htole16(sc->pel_locale); 1397 pel_wait.Class = htole16(sc->pel_class); 1398 pel_wait.WaitTime = MPI3_PEL_WAITTIME_INFINITE_WAIT; 1399 printf(IOCNAME "Issuing PELWait: seqnum %u class %u locale 0x%08x\n", 1400 sc->name, sc->newest_seqnum, sc->pel_class, sc->pel_locale); 1401 retry_pel_wait: 1402 if (mpi3mr_submit_admin_cmd(sc, &pel_wait, sizeof(pel_wait))) { 1403 printf(IOCNAME "%s: Issue PELWait IOCTL: Admin Post failed\n", sc->name, __func__); 1404 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1405 retry_count++; 1406 goto retry_pel_wait; 1407 } 1408 goto out_failed; 1409 } 1410 return; 1411 out_failed: 1412 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1413 drvr_cmd->callback = NULL; 1414 drvr_cmd->retry_count = 0; 1415 sc->pel_wait_pend = 0; 1416 return; 1417 } 1418 1419 void 1420 mpi3mr_send_pel_getseq(struct mpi3mr_softc *sc, 1421 struct mpi3mr_drvr_cmd *drvr_cmd) 1422 { 1423 U8 retry_count = 0; 1424 U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1425 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1426 1427 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1428 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1429 sc->pel_cmds.is_waiting = 0; 1430 sc->pel_cmds.ioc_status = 0; 1431 sc->pel_cmds.ioc_loginfo = 0; 1432 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1433 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1434 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1435 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1436 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1437 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1438 1439 retry_pel_getseq: 1440 if (mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))) { 1441 printf(IOCNAME "%s: Issuing PEL GetSeq IOCTL: Admin Post failed\n", sc->name, __func__); 1442 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1443 retry_count++; 1444 goto retry_pel_getseq; 1445 } 1446 goto out_failed; 1447 } 1448 return; 1449 out_failed: 1450 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1451 drvr_cmd->callback = NULL; 1452 drvr_cmd->retry_count = 0; 1453 sc->pel_wait_pend = 0; 1454 } 1455 1456 void 1457 mpi3mr_pel_getseq_complete(struct mpi3mr_softc *sc, 1458 struct mpi3mr_drvr_cmd *drvr_cmd) 1459 { 1460 U8 retry = 0; 1461 Mpi3PELReply_t *pel_reply = NULL; 1462 Mpi3PELSeq_t *pel_seq_num = (Mpi3PELSeq_t *)sc->pel_seq_number; 1463 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1464 1465 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1466 goto cleanup_drvrcmd; 1467 1468 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1469 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1470 goto out_failed; 1471 } 1472 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1473 1474 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1475 || (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)){ 1476 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1477 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1478 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1479 retry = 1; 1480 } 1481 1482 if (retry) { 1483 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1484 drvr_cmd->retry_count++; 1485 printf(IOCNAME "%s : PELGetSeqNUM retry=%d\n", sc->name, 1486 __func__, drvr_cmd->retry_count); 1487 mpi3mr_send_pel_getseq(sc, drvr_cmd); 1488 return; 1489 } 1490 printf(IOCNAME "%s :PELGetSeqNUM failed after all retries\n", 1491 sc->name, __func__); 1492 goto out_failed; 1493 } 1494 1495 sc->newest_seqnum = le32toh(pel_seq_num->Newest) + 1; 1496 drvr_cmd->retry_count = 0; 1497 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1498 return; 1499 out_failed: 1500 sc->pel_wait_pend = 0; 1501 cleanup_drvrcmd: 1502 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1503 drvr_cmd->callback = NULL; 1504 drvr_cmd->retry_count = 0; 1505 } 1506 1507 static int 1508 mpi3mr_pel_getseq(struct mpi3mr_softc *sc) 1509 { 1510 int rval = 0; 1511 U8 sgl_flags = 0; 1512 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1513 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1514 1515 if (sc->reset_in_progress || sc->block_ioctls) { 1516 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1517 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1518 return -1; 1519 } 1520 1521 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1522 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1523 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1524 sc->pel_cmds.is_waiting = 0; 1525 sc->pel_cmds.retry_count = 0; 1526 sc->pel_cmds.ioc_status = 0; 1527 sc->pel_cmds.ioc_loginfo = 0; 1528 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1529 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1530 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1531 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1532 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1533 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1534 1535 if ((rval = mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req)))) 1536 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1537 1538 return rval; 1539 } 1540 1541 int 1542 mpi3mr_pel_abort(struct mpi3mr_softc *sc) 1543 { 1544 int retval = 0; 1545 U16 pel_log_status; 1546 Mpi3PELReqActionAbort_t pel_abort_req; 1547 Mpi3PELReply_t *pel_reply = NULL; 1548 1549 if (sc->reset_in_progress || sc->block_ioctls) { 1550 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1551 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1552 return -1; 1553 } 1554 1555 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 1556 1557 mtx_lock(&sc->pel_abort_cmd.completion.lock); 1558 if (sc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 1559 printf(IOCNAME "%s: PEL Abort command is in use\n", sc->name, __func__); 1560 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1561 return -1; 1562 } 1563 1564 sc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 1565 sc->pel_abort_cmd.is_waiting = 1; 1566 sc->pel_abort_cmd.callback = NULL; 1567 pel_abort_req.HostTag = htole16(MPI3MR_HOSTTAG_PELABORT); 1568 pel_abort_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1569 pel_abort_req.Action = MPI3_PEL_ACTION_ABORT; 1570 pel_abort_req.AbortHostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1571 1572 sc->pel_abort_requested = 1; 1573 1574 init_completion(&sc->pel_abort_cmd.completion); 1575 retval = mpi3mr_submit_admin_cmd(sc, &pel_abort_req, sizeof(pel_abort_req)); 1576 if (retval) { 1577 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1578 sc->pel_abort_requested = 0; 1579 retval = -1; 1580 goto out_unlock; 1581 } 1582 wait_for_completion_timeout(&sc->pel_abort_cmd.completion, MPI3MR_INTADMCMD_TIMEOUT); 1583 1584 if (!(sc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 1585 printf(IOCNAME "%s: PEL Abort command timedout\n",sc->name, __func__); 1586 sc->pel_abort_cmd.is_waiting = 0; 1587 retval = -1; 1588 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1589 sc->reset.reason = MPI3MR_RESET_FROM_PELABORT_TIMEOUT; 1590 goto out_unlock; 1591 } 1592 if (((GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1593 || (!(sc->pel_abort_cmd.state & MPI3MR_CMD_REPLYVALID))) { 1594 printf(IOCNAME "%s: PEL Abort command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1595 sc->name, __func__, GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status), 1596 sc->pel_abort_cmd.ioc_loginfo); 1597 retval = -1; 1598 goto out_unlock; 1599 } 1600 1601 pel_reply = (Mpi3PELReply_t *)sc->pel_abort_cmd.reply; 1602 pel_log_status = le16toh(pel_reply->PELogStatus); 1603 if (pel_log_status != MPI3_PEL_STATUS_SUCCESS) { 1604 printf(IOCNAME "%s: PEL abort command failed, pel_status(0x%04x)\n", 1605 sc->name, __func__, pel_log_status); 1606 retval = -1; 1607 } 1608 1609 out_unlock: 1610 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1611 sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 1612 return retval; 1613 } 1614 1615 /** 1616 * mpi3mr_pel_enable - Handler for PEL enable 1617 * @sc: Adapter instance reference 1618 * @data_out_buf: User buffer containing PEL enable data 1619 * @data_out_sz: length of the user buffer. 1620 * 1621 * This function is the handler for PEL enable driver IOCTL. 1622 * Validates the application given class and locale and if 1623 * requires aborts the existing PEL wait request and/or issues 1624 * new PEL wait request to the firmware and returns. 1625 * 1626 * Return: 0 on success and proper error codes on failure. 1627 */ 1628 static long 1629 mpi3mr_pel_enable(struct mpi3mr_softc *sc, 1630 void *data_out_buf, U32 data_out_sz) 1631 { 1632 long rval = EINVAL; 1633 U8 tmp_class; 1634 U16 tmp_locale; 1635 struct mpi3mr_ioctl_pel_enable pel_enable; 1636 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1637 1638 1639 if ((data_out_sz != sizeof(pel_enable) || 1640 (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) { 1641 printf(IOCNAME "%s: Invalid user pel_enable buffer size %u\n", 1642 sc->name, __func__, data_out_sz); 1643 goto out; 1644 } 1645 memset(&pel_enable, 0, sizeof(pel_enable)); 1646 if (copyin(data_out_buf, &pel_enable, sizeof(pel_enable))) { 1647 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1648 __func__, __LINE__); 1649 rval = EFAULT; 1650 goto out; 1651 } 1652 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1653 printf(IOCNAME "%s: out of range class %d\n", 1654 sc->name, __func__, pel_enable.pel_class); 1655 goto out; 1656 } 1657 1658 if (sc->pel_wait_pend) { 1659 if ((sc->pel_class <= pel_enable.pel_class) && 1660 !((sc->pel_locale & pel_enable.pel_locale) ^ 1661 pel_enable.pel_locale)) { 1662 rval = 0; 1663 goto out; 1664 } else { 1665 pel_enable.pel_locale |= sc->pel_locale; 1666 if (sc->pel_class < pel_enable.pel_class) 1667 pel_enable.pel_class = sc->pel_class; 1668 1669 if (mpi3mr_pel_abort(sc)) { 1670 printf(IOCNAME "%s: pel_abort failed, status(%ld)\n", 1671 sc->name, __func__, rval); 1672 goto out; 1673 } 1674 } 1675 } 1676 1677 tmp_class = sc->pel_class; 1678 tmp_locale = sc->pel_locale; 1679 sc->pel_class = pel_enable.pel_class; 1680 sc->pel_locale = pel_enable.pel_locale; 1681 sc->pel_wait_pend = 1; 1682 1683 if ((rval = mpi3mr_pel_getseq(sc))) { 1684 sc->pel_class = tmp_class; 1685 sc->pel_locale = tmp_locale; 1686 sc->pel_wait_pend = 0; 1687 printf(IOCNAME "%s: pel get sequence number failed, status(%ld)\n", 1688 sc->name, __func__, rval); 1689 } 1690 1691 out: 1692 return rval; 1693 } 1694 1695 void 1696 mpi3mr_app_save_logdata(struct mpi3mr_softc *sc, char *event_data, 1697 U16 event_data_size) 1698 { 1699 struct mpi3mr_log_data_entry *entry; 1700 U32 index = sc->log_data_buffer_index, sz; 1701 1702 if (!(sc->log_data_buffer)) 1703 return; 1704 1705 entry = (struct mpi3mr_log_data_entry *) 1706 (sc->log_data_buffer + (index * sc->log_data_entry_size)); 1707 entry->valid_entry = 1; 1708 sz = min(sc->log_data_entry_size, event_data_size); 1709 memcpy(entry->data, event_data, sz); 1710 sc->log_data_buffer_index = 1711 ((++index) % MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES); 1712 mpi3mr_app_send_aen(sc); 1713 } 1714 1715 /** 1716 * mpi3mr_get_logdata - Handler for get log data 1717 * @sc: Adapter instance reference 1718 * @data_in_buf: User buffer to copy the logdata entries 1719 * @data_in_sz: length of the user buffer. 1720 * 1721 * This function copies the log data entries to the user buffer 1722 * when log caching is enabled in the driver. 1723 * 1724 * Return: 0 on success and proper error codes on failure 1725 */ 1726 static long 1727 mpi3mr_get_logdata(struct mpi3mr_softc *sc, 1728 void *data_in_buf, U32 data_in_sz) 1729 { 1730 long rval = EINVAL; 1731 U16 num_entries = 0; 1732 U16 entry_sz = sc->log_data_entry_size; 1733 1734 if ((!sc->log_data_buffer) || (data_in_sz < entry_sz)) 1735 return rval; 1736 1737 num_entries = data_in_sz / entry_sz; 1738 if (num_entries > MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES) 1739 num_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1740 1741 if ((rval = copyout(sc->log_data_buffer, data_in_buf, (num_entries * entry_sz)))) { 1742 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1743 rval = EFAULT; 1744 } 1745 1746 return rval; 1747 } 1748 1749 /** 1750 * mpi3mr_logdata_enable - Handler for log data enable 1751 * @sc: Adapter instance reference 1752 * @data_in_buf: User buffer to copy the max logdata entry count 1753 * @data_in_sz: length of the user buffer. 1754 * 1755 * This function enables log data caching in the driver if not 1756 * already enabled and return the maximum number of log data 1757 * entries that can be cached in the driver. 1758 * 1759 * Return: 0 on success and proper error codes on failure 1760 */ 1761 static long 1762 mpi3mr_logdata_enable(struct mpi3mr_softc *sc, 1763 void *data_in_buf, U32 data_in_sz) 1764 { 1765 long rval = EINVAL; 1766 struct mpi3mr_ioctl_logdata_enable logdata_enable; 1767 1768 if (data_in_sz < sizeof(logdata_enable)) 1769 return rval; 1770 1771 if (sc->log_data_buffer) 1772 goto copy_data; 1773 1774 sc->log_data_entry_size = (sc->reply_sz - (sizeof(Mpi3EventNotificationReply_t) - 4)) 1775 + MPI3MR_IOCTL_LOGDATA_ENTRY_HEADER_SZ; 1776 1777 sc->log_data_buffer = malloc((MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES * sc->log_data_entry_size), 1778 M_MPI3MR, M_NOWAIT | M_ZERO); 1779 if (!sc->log_data_buffer) { 1780 printf(IOCNAME "%s log data buffer memory allocation failed\n", sc->name, __func__); 1781 return ENOMEM; 1782 } 1783 1784 sc->log_data_buffer_index = 0; 1785 1786 copy_data: 1787 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1788 logdata_enable.max_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1789 1790 if ((rval = copyout(&logdata_enable, data_in_buf, sizeof(logdata_enable)))) { 1791 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1792 rval = EFAULT; 1793 } 1794 1795 return rval; 1796 } 1797 1798 /** 1799 * mpi3mr_get_change_count - Get topology change count 1800 * @sc: Adapter instance reference 1801 * @data_in_buf: User buffer to copy the change count 1802 * @data_in_sz: length of the user buffer. 1803 * 1804 * This function copies the toplogy change count provided by the 1805 * driver in events and cached in the driver to the user 1806 * provided buffer for the specific controller. 1807 * 1808 * Return: 0 on success and proper error codes on failure 1809 */ 1810 static long 1811 mpi3mr_get_change_count(struct mpi3mr_softc *sc, 1812 void *data_in_buf, U32 data_in_sz) 1813 { 1814 long rval = EINVAL; 1815 struct mpi3mr_ioctl_chgcnt chg_count; 1816 memset(&chg_count, 0, sizeof(chg_count)); 1817 1818 chg_count.change_count = sc->change_count; 1819 if (data_in_sz >= sizeof(chg_count)) { 1820 if ((rval = copyout(&chg_count, data_in_buf, sizeof(chg_count)))) { 1821 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1822 __LINE__, __func__); 1823 rval = EFAULT; 1824 } 1825 } 1826 return rval; 1827 } 1828 1829 /** 1830 * mpi3mr_get_alltgtinfo - Get all targets information 1831 * @sc: Adapter instance reference 1832 * @data_in_buf: User buffer to copy the target information 1833 * @data_in_sz: length of the user buffer. 1834 * 1835 * This function copies the driver managed target devices device 1836 * handle, persistent ID, bus ID and taret ID to the user 1837 * provided buffer for the specific controller. This function 1838 * also provides the number of devices managed by the driver for 1839 * the specific controller. 1840 * 1841 * Return: 0 on success and proper error codes on failure 1842 */ 1843 static long 1844 mpi3mr_get_alltgtinfo(struct mpi3mr_softc *sc, 1845 void *data_in_buf, U32 data_in_sz) 1846 { 1847 long rval = EINVAL; 1848 U8 get_count = 0; 1849 U16 i = 0, num_devices = 0; 1850 U32 min_entrylen = 0, kern_entrylen = 0, user_entrylen = 0; 1851 struct mpi3mr_target *tgtdev = NULL; 1852 struct mpi3mr_device_map_info *devmap_info = NULL; 1853 struct mpi3mr_cam_softc *cam_sc = sc->cam_sc; 1854 struct mpi3mr_ioctl_all_tgtinfo *all_tgtinfo = (struct mpi3mr_ioctl_all_tgtinfo *)data_in_buf; 1855 1856 if (data_in_sz < sizeof(uint32_t)) { 1857 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1858 __LINE__, __func__); 1859 goto out; 1860 } 1861 if (data_in_sz == sizeof(uint32_t)) 1862 get_count = 1; 1863 1864 if (TAILQ_EMPTY(&cam_sc->tgt_list)) { 1865 get_count = 1; 1866 goto copy_usrbuf; 1867 } 1868 1869 mtx_lock_spin(&cam_sc->sc->target_lock); 1870 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1871 num_devices++; 1872 } 1873 mtx_unlock_spin(&cam_sc->sc->target_lock); 1874 1875 if (get_count) 1876 goto copy_usrbuf; 1877 1878 kern_entrylen = num_devices * sizeof(*devmap_info); 1879 1880 devmap_info = malloc(kern_entrylen, M_MPI3MR, M_NOWAIT | M_ZERO); 1881 if (!devmap_info) { 1882 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1883 __LINE__, __func__); 1884 rval = ENOMEM; 1885 goto out; 1886 } 1887 memset((U8*)devmap_info, 0xFF, kern_entrylen); 1888 1889 mtx_lock_spin(&cam_sc->sc->target_lock); 1890 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1891 if (i < num_devices) { 1892 devmap_info[i].handle = tgtdev->dev_handle; 1893 devmap_info[i].per_id = tgtdev->per_id; 1894 /*n 1895 * For hidden/ugood device the target_id and bus_id should be 0xFFFFFFFF and 0xFF 1896 */ 1897 if (!tgtdev->exposed_to_os) { 1898 devmap_info[i].target_id = 0xFFFFFFFF; 1899 devmap_info[i].bus_id = 0xFF; 1900 } else { 1901 devmap_info[i].target_id = tgtdev->tid; 1902 devmap_info[i].bus_id = 0; 1903 } 1904 i++; 1905 } 1906 } 1907 num_devices = i; 1908 mtx_unlock_spin(&cam_sc->sc->target_lock); 1909 1910 copy_usrbuf: 1911 if (copyout(&num_devices, &all_tgtinfo->num_devices, sizeof(num_devices))) { 1912 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1913 __LINE__, __func__); 1914 rval = EFAULT; 1915 goto out; 1916 } 1917 user_entrylen = (data_in_sz - sizeof(uint32_t))/sizeof(*devmap_info); 1918 user_entrylen *= sizeof(*devmap_info); 1919 min_entrylen = min(user_entrylen, kern_entrylen); 1920 if (min_entrylen && (copyout(devmap_info, &all_tgtinfo->dmi, min_entrylen))) { 1921 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1922 __FILE__, __LINE__, __func__); 1923 rval = EFAULT; 1924 goto out; 1925 } 1926 rval = 0; 1927 out: 1928 if (devmap_info) 1929 free(devmap_info, M_MPI3MR); 1930 1931 return rval; 1932 } 1933 1934 /** 1935 * mpi3mr_get_tgtinfo - Get specific target information 1936 * @sc: Adapter instance reference 1937 * @karg: driver ponter to users payload buffer 1938 * 1939 * This function copies the driver managed specific target device 1940 * info like handle, persistent ID, bus ID and taret ID to the user 1941 * provided buffer for the specific controller. 1942 * 1943 * Return: 0 on success and proper error codes on failure 1944 */ 1945 static long 1946 mpi3mr_get_tgtinfo(struct mpi3mr_softc *sc, 1947 struct mpi3mr_ioctl_drvcmd *karg) 1948 { 1949 long rval = EINVAL; 1950 struct mpi3mr_target *tgtdev = NULL; 1951 struct mpi3mr_ioctl_tgtinfo tgtinfo; 1952 1953 memset(&tgtinfo, 0, sizeof(tgtinfo)); 1954 1955 if ((karg->data_out_size != sizeof(struct mpi3mr_ioctl_tgtinfo)) || 1956 (karg->data_in_size != sizeof(struct mpi3mr_ioctl_tgtinfo))) { 1957 printf(IOCNAME "Invalid user tgtinfo buffer size %s() line: %d\n", sc->name, 1958 __func__, __LINE__); 1959 goto out; 1960 } 1961 1962 if (copyin(karg->data_out_buf, &tgtinfo, sizeof(tgtinfo))) { 1963 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1964 __func__, __LINE__); 1965 rval = EFAULT; 1966 goto out; 1967 } 1968 1969 if ((tgtinfo.bus_id != 0xFF) && (tgtinfo.target_id != 0xFFFFFFFF)) { 1970 if ((tgtinfo.persistent_id != 0xFFFF) || 1971 (tgtinfo.dev_handle != 0xFFFF)) 1972 goto out; 1973 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.target_id); 1974 } else if (tgtinfo.persistent_id != 0xFFFF) { 1975 if ((tgtinfo.bus_id != 0xFF) || 1976 (tgtinfo.dev_handle !=0xFFFF) || 1977 (tgtinfo.target_id != 0xFFFFFFFF)) 1978 goto out; 1979 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.persistent_id); 1980 } else if (tgtinfo.dev_handle !=0xFFFF) { 1981 if ((tgtinfo.bus_id != 0xFF) || 1982 (tgtinfo.target_id != 0xFFFFFFFF) || 1983 (tgtinfo.persistent_id != 0xFFFF)) 1984 goto out; 1985 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tgtinfo.dev_handle); 1986 } 1987 if (!tgtdev) 1988 goto out; 1989 1990 tgtinfo.target_id = tgtdev->per_id; 1991 tgtinfo.bus_id = 0; 1992 tgtinfo.dev_handle = tgtdev->dev_handle; 1993 tgtinfo.persistent_id = tgtdev->per_id; 1994 tgtinfo.seq_num = 0; 1995 1996 if (copyout(&tgtinfo, karg->data_in_buf, sizeof(tgtinfo))) { 1997 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1998 __func__, __LINE__); 1999 rval = EFAULT; 2000 } 2001 2002 out: 2003 return rval; 2004 } 2005 2006 /** 2007 * mpi3mr_get_pciinfo - Get PCI info IOCTL handler 2008 * @sc: Adapter instance reference 2009 * @data_in_buf: User buffer to hold adapter information 2010 * @data_in_sz: length of the user buffer. 2011 * 2012 * This function provides the PCI spec information for the 2013 * given controller 2014 * 2015 * Return: 0 on success and proper error codes on failure 2016 */ 2017 static long 2018 mpi3mr_get_pciinfo(struct mpi3mr_softc *sc, 2019 void *data_in_buf, U32 data_in_sz) 2020 { 2021 long rval = EINVAL; 2022 U8 i; 2023 struct mpi3mr_ioctl_pciinfo pciinfo; 2024 memset(&pciinfo, 0, sizeof(pciinfo)); 2025 2026 for (i = 0; i < 64; i++) 2027 pciinfo.config_space[i] = pci_read_config(sc->mpi3mr_dev, (i * 4), 4); 2028 2029 if (data_in_sz >= sizeof(pciinfo)) { 2030 if ((rval = copyout(&pciinfo, data_in_buf, sizeof(pciinfo)))) { 2031 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2032 __FILE__, __LINE__, __func__); 2033 rval = EFAULT; 2034 } 2035 } 2036 return rval; 2037 } 2038 2039 /** 2040 * mpi3mr_get_adpinfo - Get adapter info IOCTL handler 2041 * @sc: Adapter instance reference 2042 * @data_in_buf: User buffer to hold adapter information 2043 * @data_in_sz: length of the user buffer. 2044 * 2045 * This function provides adapter information for the given 2046 * controller 2047 * 2048 * Return: 0 on success and proper error codes on failure 2049 */ 2050 static long 2051 mpi3mr_get_adpinfo(struct mpi3mr_softc *sc, 2052 void *data_in_buf, U32 data_in_sz) 2053 { 2054 long rval = EINVAL; 2055 struct mpi3mr_ioctl_adpinfo adpinfo; 2056 enum mpi3mr_iocstate ioc_state; 2057 memset(&adpinfo, 0, sizeof(adpinfo)); 2058 2059 adpinfo.adp_type = MPI3MR_IOCTL_ADPTYPE_AVGFAMILY; 2060 adpinfo.pci_dev_id = pci_get_device(sc->mpi3mr_dev); 2061 adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1); 2062 adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev); 2063 adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev); 2064 adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);; 2065 adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev); 2066 adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev); 2067 adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev); 2068 adpinfo.ioctl_ver = MPI3MR_IOCTL_VERSION; 2069 memcpy((U8 *)&adpinfo.driver_info, (U8 *)&sc->driver_info, sizeof(adpinfo.driver_info)); 2070 2071 ioc_state = mpi3mr_get_iocstate(sc); 2072 2073 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 2074 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_UNRECOVERABLE; 2075 else if (sc->reset_in_progress || sc->block_ioctls) 2076 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_IN_RESET; 2077 else if (ioc_state == MRIOC_STATE_FAULT) 2078 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_FAULT; 2079 else 2080 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_OPERATIONAL; 2081 2082 if (data_in_sz >= sizeof(adpinfo)) { 2083 if ((rval = copyout(&adpinfo, data_in_buf, sizeof(adpinfo)))) { 2084 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2085 __FILE__, __LINE__, __func__); 2086 rval = EFAULT; 2087 } 2088 } 2089 return rval; 2090 } 2091 /** 2092 * mpi3mr_app_drvrcmds - Driver IOCTL handler 2093 * @dev: char device 2094 * @cmd: IOCTL command 2095 * @arg: User data payload buffer for the IOCTL 2096 * @flag: flags 2097 * @thread: threads 2098 * 2099 * This function is the top level handler for driver commands, 2100 * this does basic validation of the buffer and identifies the 2101 * opcode and switches to correct sub handler. 2102 * 2103 * Return: 0 on success and proper error codes on failure 2104 */ 2105 2106 static int 2107 mpi3mr_app_drvrcmds(struct cdev *dev, u_long cmd, 2108 void *uarg, int flag, struct thread *td) 2109 { 2110 long rval = EINVAL; 2111 struct mpi3mr_softc *sc = NULL; 2112 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)uarg; 2113 2114 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2115 if (!sc) 2116 return ENODEV; 2117 2118 mtx_lock(&sc->ioctl_cmds.completion.lock); 2119 switch (karg->opcode) { 2120 case MPI3MR_DRVRIOCTL_OPCODE_ADPINFO: 2121 rval = mpi3mr_get_adpinfo(sc, karg->data_in_buf, karg->data_in_size); 2122 break; 2123 case MPI3MR_DRVRIOCTL_OPCODE_GETPCIINFO: 2124 rval = mpi3mr_get_pciinfo(sc, karg->data_in_buf, karg->data_in_size); 2125 break; 2126 case MPI3MR_DRVRIOCTL_OPCODE_TGTDEVINFO: 2127 rval = mpi3mr_get_tgtinfo(sc, karg); 2128 break; 2129 case MPI3MR_DRVRIOCTL_OPCODE_ALLTGTDEVINFO: 2130 rval = mpi3mr_get_alltgtinfo(sc, karg->data_in_buf, karg->data_in_size); 2131 break; 2132 case MPI3MR_DRVRIOCTL_OPCODE_GETCHGCNT: 2133 rval = mpi3mr_get_change_count(sc, karg->data_in_buf, karg->data_in_size); 2134 break; 2135 case MPI3MR_DRVRIOCTL_OPCODE_LOGDATAENABLE: 2136 rval = mpi3mr_logdata_enable(sc, karg->data_in_buf, karg->data_in_size); 2137 break; 2138 case MPI3MR_DRVRIOCTL_OPCODE_GETLOGDATA: 2139 rval = mpi3mr_get_logdata(sc, karg->data_in_buf, karg->data_in_size); 2140 break; 2141 case MPI3MR_DRVRIOCTL_OPCODE_PELENABLE: 2142 rval = mpi3mr_pel_enable(sc, karg->data_out_buf, karg->data_out_size); 2143 break; 2144 case MPI3MR_DRVRIOCTL_OPCODE_ADPRESET: 2145 rval = mpi3mr_adp_reset(sc, karg->data_out_buf, karg->data_out_size); 2146 break; 2147 case MPI3MR_DRVRIOCTL_OPCODE_UNKNOWN: 2148 default: 2149 printf("Unsupported drvr ioctl opcode 0x%x\n", karg->opcode); 2150 break; 2151 } 2152 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2153 return rval; 2154 } 2155 /** 2156 * mpi3mr_ioctl - IOCTL Handler 2157 * @dev: char device 2158 * @cmd: IOCTL command 2159 * @arg: User data payload buffer for the IOCTL 2160 * @flag: flags 2161 * @thread: threads 2162 * 2163 * This is the IOCTL entry point which checks the command type and 2164 * executes proper sub handler specific for the command. 2165 * 2166 * Return: 0 on success and proper error codes on failure 2167 */ 2168 static int 2169 mpi3mr_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2170 { 2171 int rval = EINVAL; 2172 2173 struct mpi3mr_softc *sc = NULL; 2174 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)arg; 2175 2176 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2177 2178 if (!sc) 2179 return ENODEV; 2180 2181 mpi3mr_atomic_inc(&sc->pend_ioctls); 2182 2183 2184 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) { 2185 mpi3mr_dprint(sc, MPI3MR_INFO, 2186 "Return back IOCTL, shutdown is in progress\n"); 2187 mpi3mr_atomic_dec(&sc->pend_ioctls); 2188 return ENODEV; 2189 } 2190 2191 switch (cmd) { 2192 case MPI3MRDRVCMD: 2193 rval = mpi3mr_app_drvrcmds(dev, cmd, arg, flag, td); 2194 break; 2195 case MPI3MRMPTCMD: 2196 mtx_lock(&sc->ioctl_cmds.completion.lock); 2197 rval = mpi3mr_app_mptcmds(dev, cmd, arg, flag, td); 2198 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2199 break; 2200 default: 2201 printf("%s:Unsupported ioctl cmd (0x%08lx)\n", MPI3MR_DRIVER_NAME, cmd); 2202 break; 2203 } 2204 2205 mpi3mr_atomic_dec(&sc->pend_ioctls); 2206 2207 return rval; 2208 } 2209