1 /* 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved. 5 * Support: <fbsd-storage-driver.pdl@broadcom.com> 6 * 7 * Authors: Sumit Saxena <sumit.saxena@broadcom.com> 8 * Chandrakanth Patil <chandrakanth.patil@broadcom.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are 12 * met: 13 * 14 * 1. Redistributions of source code must retain the above copyright notice, 15 * this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation and/or other 18 * materials provided with the distribution. 19 * 3. Neither the name of the Broadcom Inc. nor the names of its contributors 20 * may be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * The views and conclusions contained in the software and documentation are 36 * those of the authors and should not be interpreted as representing 37 * official policies,either expressed or implied, of the FreeBSD Project. 38 * 39 * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131 40 * 41 * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD 42 */ 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include "mpi3mr_cam.h" 49 #include "mpi3mr_app.h" 50 #include "mpi3mr.h" 51 52 static d_open_t mpi3mr_open; 53 static d_close_t mpi3mr_close; 54 static d_ioctl_t mpi3mr_ioctl; 55 static d_poll_t mpi3mr_poll; 56 57 static struct cdevsw mpi3mr_cdevsw = { 58 .d_version = D_VERSION, 59 .d_flags = 0, 60 .d_open = mpi3mr_open, 61 .d_close = mpi3mr_close, 62 .d_ioctl = mpi3mr_ioctl, 63 .d_poll = mpi3mr_poll, 64 .d_name = "mpi3mr", 65 }; 66 67 static struct mpi3mr_mgmt_info mpi3mr_mgmt_info; 68 69 static int 70 mpi3mr_open(struct cdev *dev, int flags, int fmt, struct thread *td) 71 { 72 73 return (0); 74 } 75 76 static int 77 mpi3mr_close(struct cdev *dev, int flags, int fmt, struct thread *td) 78 { 79 80 return (0); 81 } 82 83 /* 84 * mpi3mr_app_attach - Char device registration 85 * @sc: Adapter reference 86 * 87 * This function does char device registration. 88 * 89 * Return: 0 on success and proper error codes on failure 90 */ 91 int 92 mpi3mr_app_attach(struct mpi3mr_softc *sc) 93 { 94 95 /* Create a /dev entry for Avenger controller */ 96 sc->mpi3mr_cdev = make_dev(&mpi3mr_cdevsw, device_get_unit(sc->mpi3mr_dev), 97 UID_ROOT, GID_OPERATOR, 0640, "mpi3mr%d", 98 device_get_unit(sc->mpi3mr_dev)); 99 100 if (sc->mpi3mr_cdev == NULL) 101 return (ENOMEM); 102 103 sc->mpi3mr_cdev->si_drv1 = sc; 104 105 /* Assign controller instance to mgmt_info structure */ 106 if (device_get_unit(sc->mpi3mr_dev) == 0) 107 memset(&mpi3mr_mgmt_info, 0, sizeof(mpi3mr_mgmt_info)); 108 mpi3mr_mgmt_info.count++; 109 mpi3mr_mgmt_info.sc_ptr[mpi3mr_mgmt_info.max_index] = sc; 110 mpi3mr_mgmt_info.max_index++; 111 112 return (0); 113 } 114 115 void 116 mpi3mr_app_detach(struct mpi3mr_softc *sc) 117 { 118 U8 i = 0; 119 120 if (sc->mpi3mr_cdev == NULL) 121 return; 122 123 destroy_dev(sc->mpi3mr_cdev); 124 for (i = 0; i < mpi3mr_mgmt_info.max_index; i++) { 125 if (mpi3mr_mgmt_info.sc_ptr[i] == sc) { 126 mpi3mr_mgmt_info.count--; 127 mpi3mr_mgmt_info.sc_ptr[i] = NULL; 128 break; 129 } 130 } 131 return; 132 } 133 134 static int 135 mpi3mr_poll(struct cdev *dev, int poll_events, struct thread *td) 136 { 137 int revents = 0; 138 struct mpi3mr_softc *sc = NULL; 139 sc = dev->si_drv1; 140 141 if ((poll_events & (POLLIN | POLLRDNORM)) && 142 (sc->mpi3mr_aen_triggered)) 143 revents |= poll_events & (POLLIN | POLLRDNORM); 144 145 if (revents == 0) { 146 if (poll_events & (POLLIN | POLLRDNORM)) { 147 sc->mpi3mr_poll_waiting = 1; 148 selrecord(td, &sc->mpi3mr_select); 149 } 150 } 151 return revents; 152 } 153 154 /** 155 * mpi3mr_app_get_adp_instancs - Get Adapter instance 156 * @mrioc_id: Adapter ID 157 * 158 * This fucnction searches the Adapter reference with mrioc_id 159 * upon found, returns the adapter reference otherwise returns 160 * the NULL 161 * 162 * Return: Adapter reference on success and NULL on failure 163 */ 164 static struct mpi3mr_softc * 165 mpi3mr_app_get_adp_instance(U8 mrioc_id) 166 { 167 struct mpi3mr_softc *sc = NULL; 168 169 if (mrioc_id >= mpi3mr_mgmt_info.max_index) 170 return NULL; 171 172 sc = mpi3mr_mgmt_info.sc_ptr[mrioc_id]; 173 return sc; 174 } 175 176 static int 177 mpi3mr_app_construct_nvme_sgl(struct mpi3mr_softc *sc, 178 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 179 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 180 { 181 struct mpi3mr_nvme_pt_sge *nvme_sgl; 182 U64 sgl_dma; 183 U8 count; 184 U16 available_sges = 0, i; 185 U32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 186 size_t length = 0; 187 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 188 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 189 sc->facts.sge_mod_shift) << 32); 190 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 191 sc->facts.sge_mod_shift) << 32; 192 193 U32 size; 194 195 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 196 ((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_SGL_OFFSET); 197 198 /* 199 * Not all commands require a data transfer. If no data, just return 200 * without constructing any SGL. 201 */ 202 for (count = 0; count < bufcnt; count++, dma_buff++) { 203 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 204 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 205 length = dma_buff->kern_buf_len; 206 break; 207 } 208 } 209 if (!length || !dma_buff->num_dma_desc) 210 return 0; 211 212 if (dma_buff->num_dma_desc == 1) { 213 available_sges = 1; 214 goto build_sges; 215 } 216 sgl_dma = (U64)sc->ioctl_chain_sge.dma_addr; 217 218 if (sgl_dma & sgemod_mask) { 219 printf(IOCNAME "NVMe SGL address collides with SGEModifier\n",sc->name); 220 return -1; 221 } 222 223 sgl_dma &= ~sgemod_mask; 224 sgl_dma |= sgemod_val; 225 226 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 227 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 228 if (available_sges < dma_buff->num_dma_desc) 229 return -1; 230 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 231 nvme_sgl->base_addr = sgl_dma; 232 size = dma_buff->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 233 nvme_sgl->length = htole32(size); 234 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 235 236 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) sc->ioctl_chain_sge.addr; 237 238 build_sges: 239 for (i = 0; i < dma_buff->num_dma_desc; i++) { 240 sgl_dma = htole64(dma_buff->dma_desc[i].dma_addr); 241 if (sgl_dma & sgemod_mask) { 242 printf("%s: SGL address collides with SGE modifier\n", 243 __func__); 244 return -1; 245 } 246 247 sgl_dma &= ~sgemod_mask; 248 sgl_dma |= sgemod_val; 249 250 nvme_sgl->base_addr = sgl_dma; 251 nvme_sgl->length = htole32(dma_buff->dma_desc[i].size); 252 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 253 nvme_sgl++; 254 available_sges--; 255 } 256 257 return 0; 258 } 259 260 static int 261 mpi3mr_app_build_nvme_prp(struct mpi3mr_softc *sc, 262 Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request, 263 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, U8 bufcnt) 264 { 265 int prp_size = MPI3MR_NVME_PRP_SIZE; 266 U64 *prp_entry, *prp1_entry, *prp2_entry; 267 U64 *prp_page; 268 bus_addr_t prp_entry_dma, prp_page_dma, dma_addr; 269 U32 offset, entry_len, dev_pgsz; 270 U32 page_mask_result, page_mask; 271 size_t length = 0, desc_len; 272 U8 count; 273 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 274 U64 sgemod_mask = ((U64)((sc->facts.sge_mod_mask) << 275 sc->facts.sge_mod_shift) << 32); 276 U64 sgemod_val = ((U64)(sc->facts.sge_mod_value) << 277 sc->facts.sge_mod_shift) << 32; 278 U16 dev_handle = nvme_encap_request->DevHandle; 279 struct mpi3mr_target *tgtdev; 280 U16 desc_count = 0; 281 282 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle); 283 if (!tgtdev) { 284 printf(IOCNAME "EncapNVMe Error: Invalid DevHandle 0x%02x\n", sc->name, 285 dev_handle); 286 return -1; 287 } 288 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 289 printf(IOCNAME "%s: NVME device page size is zero for handle 0x%04x\n", 290 sc->name, __func__, dev_handle); 291 return -1; 292 } 293 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 294 295 page_mask = dev_pgsz - 1; 296 297 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE){ 298 printf("%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 299 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 300 return -1; 301 } 302 303 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz){ 304 printf("%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 305 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 306 return -1; 307 } 308 309 /* 310 * Not all commands require a data transfer. If no data, just return 311 * without constructing any PRP. 312 */ 313 for (count = 0; count < bufcnt; count++, dma_buff++) { 314 if ((dma_buff->data_dir == MPI3MR_APP_DDI) || 315 (dma_buff->data_dir == MPI3MR_APP_DDO)) { 316 length = dma_buff->kern_buf_len; 317 break; 318 } 319 } 320 if (!length || !dma_buff->num_dma_desc) 321 return 0; 322 323 for (count = 0; count < dma_buff->num_dma_desc; count++) { 324 dma_addr = dma_buff->dma_desc[count].dma_addr; 325 if (dma_addr & page_mask) { 326 printf("%s:dma_addr 0x%lu is not aligned with page size 0x%x\n", 327 __func__, dma_addr, dev_pgsz); 328 return -1; 329 } 330 } 331 332 dma_addr = dma_buff->dma_desc[0].dma_addr; 333 desc_len = dma_buff->dma_desc[0].size; 334 335 sc->nvme_encap_prp_sz = 0; 336 if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */ 337 4, 0, /* algnmnt, boundary */ 338 sc->dma_loaddr, /* lowaddr */ 339 BUS_SPACE_MAXADDR, /* highaddr */ 340 NULL, NULL, /* filter, filterarg */ 341 dev_pgsz, /* maxsize */ 342 1, /* nsegments */ 343 dev_pgsz, /* maxsegsize */ 344 0, /* flags */ 345 NULL, NULL, /* lockfunc, lockarg */ 346 &sc->nvme_encap_prp_list_dmatag)) { 347 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create ioctl NVME kernel buffer dma tag\n"); 348 return (ENOMEM); 349 } 350 351 if (bus_dmamem_alloc(sc->nvme_encap_prp_list_dmatag, (void **)&sc->nvme_encap_prp_list, 352 BUS_DMA_NOWAIT, &sc->nvme_encap_prp_list_dma_dmamap)) { 353 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate ioctl NVME dma memory\n"); 354 return (ENOMEM); 355 } 356 357 bzero(sc->nvme_encap_prp_list, dev_pgsz); 358 bus_dmamap_load(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap, 359 sc->nvme_encap_prp_list, dev_pgsz, mpi3mr_memaddr_cb, &sc->nvme_encap_prp_list_dma, 360 BUS_DMA_NOWAIT); 361 362 if (!sc->nvme_encap_prp_list) { 363 printf(IOCNAME "%s:%d Cannot load ioctl NVME dma memory for size: %d\n", sc->name, 364 __func__, __LINE__, dev_pgsz); 365 goto err_out; 366 } 367 sc->nvme_encap_prp_sz = dev_pgsz; 368 369 /* 370 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 371 * PRP1 is located at a 24 byte offset from the start of the NVMe 372 * command. Then set the current PRP entry pointer to PRP1. 373 */ 374 prp1_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP1_OFFSET); 375 prp2_entry = (U64 *)((U8 *)(nvme_encap_request->Command) + MPI3MR_NVME_CMD_PRP2_OFFSET); 376 prp_entry = prp1_entry; 377 /* 378 * For the PRP entries, use the specially allocated buffer of 379 * contiguous memory. 380 */ 381 prp_page = sc->nvme_encap_prp_list; 382 prp_page_dma = sc->nvme_encap_prp_list_dma; 383 384 /* 385 * Check if we are within 1 entry of a page boundary we don't 386 * want our first entry to be a PRP List entry. 387 */ 388 page_mask_result = (uintptr_t)((U8 *)prp_page + prp_size) & page_mask; 389 if (!page_mask_result) { 390 printf(IOCNAME "PRP Page is not page aligned\n", sc->name); 391 goto err_out; 392 } 393 394 /* 395 * Set PRP physical pointer, which initially points to the current PRP 396 * DMA memory page. 397 */ 398 prp_entry_dma = prp_page_dma; 399 400 401 /* Loop while the length is not zero. */ 402 while (length) { 403 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 404 if (!page_mask_result && (length > dev_pgsz)) { 405 printf(IOCNAME "Single PRP page is not sufficient\n", sc->name); 406 goto err_out; 407 } 408 409 /* Need to handle if entry will be part of a page. */ 410 offset = dma_addr & page_mask; 411 entry_len = dev_pgsz - offset; 412 413 if (prp_entry == prp1_entry) { 414 /* 415 * Must fill in the first PRP pointer (PRP1) before 416 * moving on. 417 */ 418 *prp1_entry = dma_addr; 419 if (*prp1_entry & sgemod_mask) { 420 printf(IOCNAME "PRP1 address collides with SGEModifier\n", sc->name); 421 goto err_out; 422 } 423 *prp1_entry &= ~sgemod_mask; 424 *prp1_entry |= sgemod_val; 425 426 /* 427 * Now point to the second PRP entry within the 428 * command (PRP2). 429 */ 430 prp_entry = prp2_entry; 431 } else if (prp_entry == prp2_entry) { 432 /* 433 * Should the PRP2 entry be a PRP List pointer or just 434 * a regular PRP pointer? If there is more than one 435 * more page of data, must use a PRP List pointer. 436 */ 437 if (length > dev_pgsz) { 438 /* 439 * PRP2 will contain a PRP List pointer because 440 * more PRP's are needed with this command. The 441 * list will start at the beginning of the 442 * contiguous buffer. 443 */ 444 *prp2_entry = prp_entry_dma; 445 if (*prp2_entry & sgemod_mask) { 446 printf(IOCNAME "PRP list address collides with SGEModifier\n", sc->name); 447 goto err_out; 448 } 449 *prp2_entry &= ~sgemod_mask; 450 *prp2_entry |= sgemod_val; 451 452 /* 453 * The next PRP Entry will be the start of the 454 * first PRP List. 455 */ 456 prp_entry = prp_page; 457 continue; 458 } else { 459 /* 460 * After this, the PRP Entries are complete. 461 * This command uses 2 PRP's and no PRP list. 462 */ 463 *prp2_entry = dma_addr; 464 if (*prp2_entry & sgemod_mask) { 465 printf(IOCNAME "PRP2 address collides with SGEModifier\n", sc->name); 466 goto err_out; 467 } 468 *prp2_entry &= ~sgemod_mask; 469 *prp2_entry |= sgemod_val; 470 } 471 } else { 472 /* 473 * Put entry in list and bump the addresses. 474 * 475 * After PRP1 and PRP2 are filled in, this will fill in 476 * all remaining PRP entries in a PRP List, one per 477 * each time through the loop. 478 */ 479 *prp_entry = dma_addr; 480 if (*prp_entry & sgemod_mask) { 481 printf(IOCNAME "PRP address collides with SGEModifier\n", sc->name); 482 goto err_out; 483 } 484 *prp_entry &= ~sgemod_mask; 485 *prp_entry |= sgemod_val; 486 prp_entry++; 487 prp_entry_dma += prp_size; 488 } 489 490 /* Decrement length accounting for last partial page. */ 491 if (entry_len >= length) 492 length = 0; 493 else { 494 if (entry_len <= desc_len) { 495 dma_addr += entry_len; 496 desc_len -= entry_len; 497 } 498 if (!desc_len) { 499 if ((++desc_count) >= 500 dma_buff->num_dma_desc) { 501 printf("%s: Invalid len %ld while building PRP\n", 502 __func__, length); 503 goto err_out; 504 } 505 dma_addr = 506 dma_buff->dma_desc[desc_count].dma_addr; 507 desc_len = 508 dma_buff->dma_desc[desc_count].size; 509 } 510 length -= entry_len; 511 } 512 } 513 return 0; 514 err_out: 515 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 516 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 517 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 518 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 519 sc->nvme_encap_prp_list = NULL; 520 } 521 return -1; 522 } 523 524 /** 525 + * mpi3mr_map_data_buffer_dma - build dma descriptors for data 526 + * buffers 527 + * @sc: Adapter instance reference 528 + * @dma_buff: buffer map descriptor 529 + * @desc_count: Number of already consumed dma descriptors 530 + * 531 + * This function computes how many pre-allocated DMA descriptors 532 + * are required for the given data buffer and if those number of 533 + * descriptors are free, then setup the mapping of the scattered 534 + * DMA address to the given data buffer, if the data direction 535 + * of the buffer is DATA_OUT then the actual data is copied to 536 + * the DMA buffers 537 + * 538 + * Return: 0 on success, -1 on failure 539 + */ 540 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_softc *sc, 541 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 542 U8 desc_count) 543 { 544 U16 i, needed_desc = (dma_buffers->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE); 545 U32 buf_len = dma_buffers->kern_buf_len, copied_len = 0; 546 int error; 547 548 if (dma_buffers->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 549 needed_desc++; 550 551 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 552 printf("%s: DMA descriptor mapping error %d:%d:%d\n", 553 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 554 return -1; 555 } 556 557 dma_buffers->dma_desc = malloc(sizeof(*dma_buffers->dma_desc) * needed_desc, 558 M_MPI3MR, M_NOWAIT | M_ZERO); 559 if (!dma_buffers->dma_desc) 560 return -1; 561 562 error = 0; 563 for (i = 0; i < needed_desc; i++, desc_count++) { 564 565 dma_buffers->dma_desc[i].addr = sc->ioctl_sge[desc_count].addr; 566 dma_buffers->dma_desc[i].dma_addr = sc->ioctl_sge[desc_count].dma_addr; 567 568 if (buf_len < sc->ioctl_sge[desc_count].size) 569 dma_buffers->dma_desc[i].size = buf_len; 570 else 571 dma_buffers->dma_desc[i].size = sc->ioctl_sge[desc_count].size; 572 573 buf_len -= dma_buffers->dma_desc[i].size; 574 memset(dma_buffers->dma_desc[i].addr, 0, sc->ioctl_sge[desc_count].size); 575 576 if (dma_buffers->data_dir == MPI3MR_APP_DDO) { 577 error = copyin(((U8 *)dma_buffers->user_buf + copied_len), 578 dma_buffers->dma_desc[i].addr, 579 dma_buffers->dma_desc[i].size); 580 if (error != 0) 581 break; 582 copied_len += dma_buffers->dma_desc[i].size; 583 } 584 } 585 if (error != 0) { 586 printf("%s: DMA copyin error %d\n", __func__, error); 587 free(dma_buffers->dma_desc, M_MPI3MR); 588 return -1; 589 } 590 591 dma_buffers->num_dma_desc = needed_desc; 592 593 return 0; 594 } 595 596 static unsigned int 597 mpi3mr_app_get_nvme_data_fmt(Mpi3NVMeEncapsulatedRequest_t *nvme_encap_request) 598 { 599 U8 format = 0; 600 601 format = ((nvme_encap_request->Command[0] & 0xc000) >> 14); 602 return format; 603 } 604 605 static inline U16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 606 U8 bufcnt) 607 { 608 U16 i, sge_count = 0; 609 for (i=0; i < bufcnt; i++, dma_buffers++) { 610 if ((dma_buffers->data_dir == MPI3MR_APP_DDN) || 611 dma_buffers->kern_buf) 612 continue; 613 sge_count += dma_buffers->num_dma_desc; 614 if (!dma_buffers->num_dma_desc) 615 sge_count++; 616 } 617 return sge_count; 618 } 619 620 static int 621 mpi3mr_app_construct_sgl(struct mpi3mr_softc *sc, U8 *mpi_request, U32 sgl_offset, 622 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers, 623 U8 bufcnt, U8 is_rmc, U8 is_rmr, U8 num_datasges) 624 { 625 U8 *sgl = (mpi_request + sgl_offset), count = 0; 626 Mpi3RequestHeader_t *mpi_header = (Mpi3RequestHeader_t *)mpi_request; 627 Mpi3MgmtPassthroughRequest_t *rmgmt_req = 628 (Mpi3MgmtPassthroughRequest_t *)mpi_request; 629 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buff = dma_buffers; 630 U8 flag, sgl_flags, sgl_flags_eob, sgl_flags_last, last_chain_sgl_flags; 631 U16 available_sges, i, sges_needed; 632 U32 sge_element_size = sizeof(struct _MPI3_SGE_COMMON); 633 bool chain_used = false; 634 635 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 636 MPI3_SGE_FLAGS_DLAS_SYSTEM ; 637 sgl_flags_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 638 sgl_flags_last = sgl_flags_eob | MPI3_SGE_FLAGS_END_OF_LIST; 639 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 640 MPI3_SGE_FLAGS_DLAS_SYSTEM; 641 642 sges_needed = mpi3mr_total_num_ioctl_sges(dma_buffers, bufcnt); 643 644 if (is_rmc) { 645 mpi3mr_add_sg_single(&rmgmt_req->CommandSGL, 646 sgl_flags_last, dma_buff->kern_buf_len, 647 dma_buff->kern_buf_dma); 648 sgl = (U8 *) dma_buff->kern_buf + dma_buff->user_buf_len; 649 available_sges = (dma_buff->kern_buf_len - 650 dma_buff->user_buf_len) / sge_element_size; 651 if (sges_needed > available_sges) 652 return -1; 653 chain_used = true; 654 dma_buff++; 655 count++; 656 if (is_rmr) { 657 mpi3mr_add_sg_single(&rmgmt_req->ResponseSGL, 658 sgl_flags_last, dma_buff->kern_buf_len, 659 dma_buff->kern_buf_dma); 660 dma_buff++; 661 count++; 662 } else 663 mpi3mr_build_zero_len_sge( 664 &rmgmt_req->ResponseSGL); 665 if (num_datasges) { 666 i = 0; 667 goto build_sges; 668 } 669 } else { 670 if (sgl_offset >= MPI3MR_AREQ_FRAME_SZ) 671 return -1; 672 available_sges = (MPI3MR_AREQ_FRAME_SZ - sgl_offset) / 673 sge_element_size; 674 if (!available_sges) 675 return -1; 676 } 677 678 if (!num_datasges) { 679 mpi3mr_build_zero_len_sge(sgl); 680 return 0; 681 } 682 683 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 684 if ((sges_needed > 2) || (sges_needed > available_sges)) 685 return -1; 686 for (; count < bufcnt; count++, dma_buff++) { 687 if ((dma_buff->data_dir == MPI3MR_APP_DDN) || 688 !dma_buff->num_dma_desc) 689 continue; 690 mpi3mr_add_sg_single(sgl, sgl_flags_last, 691 dma_buff->dma_desc[0].size, 692 dma_buff->dma_desc[0].dma_addr); 693 sgl += sge_element_size; 694 } 695 return 0; 696 } 697 i = 0; 698 699 build_sges: 700 for (; count < bufcnt; count++, dma_buff++) { 701 if (dma_buff->data_dir == MPI3MR_APP_DDN) 702 continue; 703 if (!dma_buff->num_dma_desc) { 704 if (chain_used && !available_sges) 705 return -1; 706 if (!chain_used && (available_sges == 1) && 707 (sges_needed > 1)) 708 goto setup_chain; 709 flag = sgl_flags_eob; 710 if (num_datasges == 1) 711 flag = sgl_flags_last; 712 mpi3mr_add_sg_single(sgl, flag, 0, 0); 713 sgl += sge_element_size; 714 available_sges--; 715 sges_needed--; 716 num_datasges--; 717 continue; 718 } 719 for (; i < dma_buff->num_dma_desc; i++) { 720 if (chain_used && !available_sges) 721 return -1; 722 if (!chain_used && (available_sges == 1) && 723 (sges_needed > 1)) 724 goto setup_chain; 725 flag = sgl_flags; 726 if (i == (dma_buff->num_dma_desc - 1)) { 727 if (num_datasges == 1) 728 flag = sgl_flags_last; 729 else 730 flag = sgl_flags_eob; 731 } 732 733 mpi3mr_add_sg_single(sgl, flag, 734 dma_buff->dma_desc[i].size, 735 dma_buff->dma_desc[i].dma_addr); 736 sgl += sge_element_size; 737 available_sges--; 738 sges_needed--; 739 } 740 num_datasges--; 741 i = 0; 742 } 743 return 0; 744 745 setup_chain: 746 available_sges = sc->ioctl_chain_sge.size / sge_element_size; 747 if (sges_needed > available_sges) 748 return -1; 749 mpi3mr_add_sg_single(sgl, last_chain_sgl_flags, 750 (sges_needed * sge_element_size), sc->ioctl_chain_sge.dma_addr); 751 memset(sc->ioctl_chain_sge.addr, 0, sc->ioctl_chain_sge.size); 752 sgl = (U8 *)sc->ioctl_chain_sge.addr; 753 chain_used = true; 754 goto build_sges; 755 } 756 757 758 /** 759 * mpi3mr_app_mptcmds - MPI Pass through IOCTL handler 760 * @dev: char device 761 * @cmd: IOCTL command 762 * @arg: User data payload buffer for the IOCTL 763 * @flag: flags 764 * @thread: threads 765 * 766 * This function is the top level handler for MPI Pass through 767 * IOCTL, this does basic validation of the input data buffers, 768 * identifies the given buffer types and MPI command, allocates 769 * DMAable memory for user given buffers, construstcs SGL 770 * properly and passes the command to the firmware. 771 * 772 * Once the MPI command is completed the driver copies the data 773 * if any and reply, sense information to user provided buffers. 774 * If the command is timed out then issues controller reset 775 * prior to returning. 776 * 777 * Return: 0 on success and proper error codes on failure 778 */ 779 static long 780 mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg, 781 int flag, struct thread *td) 782 { 783 long rval = EINVAL; 784 U8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 785 U8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 786 U16 desc_count = 0; 787 U8 nvme_fmt = 0; 788 U32 tmplen = 0, erbsz = MPI3MR_SENSEBUF_SZ, din_sz = 0, dout_sz = 0; 789 U8 *kern_erb = NULL; 790 U8 *mpi_request = NULL; 791 Mpi3RequestHeader_t *mpi_header = NULL; 792 Mpi3PELReqActionGetCount_t *pel = NULL; 793 Mpi3StatusReplyDescriptor_t *status_desc = NULL; 794 struct mpi3mr_softc *sc = NULL; 795 struct mpi3mr_ioctl_buf_entry_list *buffer_list = NULL; 796 struct mpi3mr_buf_entry *buf_entries = NULL; 797 struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL; 798 struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL; 799 struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg; 800 struct mpi3mr_target *tgtdev = NULL; 801 Mpi3SCSITaskMgmtRequest_t *tm_req = NULL; 802 803 804 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 805 if (!sc) 806 return ENODEV; 807 808 if (!sc->ioctl_sges_allocated) { 809 printf("%s: DMA memory was not allocated\n", __func__); 810 return ENOMEM; 811 } 812 813 if (karg->timeout < MPI3MR_IOCTL_DEFAULT_TIMEOUT) 814 karg->timeout = MPI3MR_IOCTL_DEFAULT_TIMEOUT; 815 816 if (!karg->mpi_msg_size || !karg->buf_entry_list_size) { 817 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 818 __func__, __LINE__); 819 return rval; 820 } 821 if ((karg->mpi_msg_size * 4) > MPI3MR_AREQ_FRAME_SZ) { 822 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 823 __func__, __LINE__); 824 return rval; 825 } 826 827 mpi_request = malloc(MPI3MR_AREQ_FRAME_SZ, M_MPI3MR, M_NOWAIT | M_ZERO); 828 if (!mpi_request) { 829 printf(IOCNAME "%s: memory allocation failed for mpi_request\n", sc->name, 830 __func__); 831 return ENOMEM; 832 } 833 834 mpi_header = (Mpi3RequestHeader_t *)mpi_request; 835 pel = (Mpi3PELReqActionGetCount_t *)mpi_request; 836 if (copyin(karg->mpi_msg_buf, mpi_request, (karg->mpi_msg_size * 4))) { 837 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 838 __FILE__, __LINE__, __func__); 839 rval = EFAULT; 840 goto out; 841 } 842 843 buffer_list = malloc(karg->buf_entry_list_size, M_MPI3MR, M_NOWAIT | M_ZERO); 844 if (!buffer_list) { 845 printf(IOCNAME "%s: memory allocation failed for buffer_list\n", sc->name, 846 __func__); 847 rval = ENOMEM; 848 goto out; 849 } 850 if (copyin(karg->buf_entry_list, buffer_list, karg->buf_entry_list_size)) { 851 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 852 __FILE__, __LINE__, __func__); 853 rval = EFAULT; 854 goto out; 855 } 856 if (!buffer_list->num_of_buf_entries) { 857 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 858 __func__, __LINE__); 859 rval = EINVAL; 860 goto out; 861 } 862 bufcnt = buffer_list->num_of_buf_entries; 863 dma_buffers = malloc((sizeof(*dma_buffers) * bufcnt), M_MPI3MR, M_NOWAIT | M_ZERO); 864 if (!dma_buffers) { 865 printf(IOCNAME "%s: memory allocation failed for dma_buffers\n", sc->name, 866 __func__); 867 rval = ENOMEM; 868 goto out; 869 } 870 buf_entries = buffer_list->buf_entry; 871 dma_buff = dma_buffers; 872 for (count = 0; count < bufcnt; count++, buf_entries++, dma_buff++) { 873 memset(dma_buff, 0, sizeof(*dma_buff)); 874 dma_buff->user_buf = buf_entries->buffer; 875 dma_buff->user_buf_len = buf_entries->buf_len; 876 877 switch (buf_entries->buf_type) { 878 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_CMD: 879 is_rmcb = 1; 880 if ((count != 0) || !buf_entries->buf_len) 881 invalid_be = 1; 882 dma_buff->data_dir = MPI3MR_APP_DDO; 883 break; 884 case MPI3MR_IOCTL_BUFTYPE_RAIDMGMT_RESP: 885 is_rmrb = 1; 886 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 887 invalid_be = 1; 888 dma_buff->data_dir = MPI3MR_APP_DDI; 889 break; 890 case MPI3MR_IOCTL_BUFTYPE_DATA_IN: 891 din_sz = dma_buff->user_buf_len; 892 din_cnt++; 893 if ((din_cnt > 1) && !is_rmcb) 894 invalid_be = 1; 895 dma_buff->data_dir = MPI3MR_APP_DDI; 896 break; 897 case MPI3MR_IOCTL_BUFTYPE_DATA_OUT: 898 dout_sz = dma_buff->user_buf_len; 899 dout_cnt++; 900 if ((dout_cnt > 1) && !is_rmcb) 901 invalid_be = 1; 902 dma_buff->data_dir = MPI3MR_APP_DDO; 903 break; 904 case MPI3MR_IOCTL_BUFTYPE_MPI_REPLY: 905 mpirep_offset = count; 906 dma_buff->data_dir = MPI3MR_APP_DDN; 907 if (!buf_entries->buf_len) 908 invalid_be = 1; 909 break; 910 case MPI3MR_IOCTL_BUFTYPE_ERR_RESPONSE: 911 erb_offset = count; 912 dma_buff->data_dir = MPI3MR_APP_DDN; 913 if (!buf_entries->buf_len) 914 invalid_be = 1; 915 break; 916 default: 917 invalid_be = 1; 918 break; 919 } 920 if (invalid_be) 921 break; 922 } 923 if (invalid_be) { 924 printf(IOCNAME "%s:%d Invalid IOCTL parameters passed\n", sc->name, 925 __func__, __LINE__); 926 rval = EINVAL; 927 goto out; 928 } 929 930 if (is_rmcb && ((din_sz + dout_sz) > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 931 printf("%s:%d: invalid data transfer size passed for function 0x%x" 932 "din_sz = %d, dout_size = %d\n", __func__, __LINE__, 933 mpi_header->Function, din_sz, dout_sz); 934 rval = EINVAL; 935 goto out; 936 } 937 938 if ((din_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE) || 939 (dout_sz > MPI3MR_MAX_IOCTL_TRANSFER_SIZE)) { 940 printf("%s:%d: invalid data transfer size passed for function 0x%x" 941 "din_size=%d dout_size=%d\n", __func__, __LINE__, 942 mpi_header->Function, din_sz, dout_sz); 943 rval = EINVAL; 944 goto out; 945 } 946 947 if (mpi_header->Function == MPI3_FUNCTION_SMP_PASSTHROUGH) { 948 if ((din_sz > MPI3MR_IOCTL_SGE_SIZE) || 949 (dout_sz > MPI3MR_IOCTL_SGE_SIZE)) { 950 printf("%s:%d: invalid message size passed:%d:%d:%d:%d\n", 951 __func__, __LINE__, din_cnt, dout_cnt, din_sz, dout_sz); 952 rval = EINVAL; 953 goto out; 954 } 955 } 956 957 dma_buff = dma_buffers; 958 for (count = 0; count < bufcnt; count++, dma_buff++) { 959 960 dma_buff->kern_buf_len = dma_buff->user_buf_len; 961 962 if (is_rmcb && !count) { 963 dma_buff->kern_buf = sc->ioctl_chain_sge.addr; 964 dma_buff->kern_buf_len = sc->ioctl_chain_sge.size; 965 dma_buff->kern_buf_dma = sc->ioctl_chain_sge.dma_addr; 966 dma_buff->dma_desc = NULL; 967 dma_buff->num_dma_desc = 0; 968 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 969 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 970 if (copyin(dma_buff->user_buf, dma_buff->kern_buf, tmplen)) { 971 mpi3mr_dprint(sc, MPI3MR_ERROR, "failure at %s() line: %d", 972 __func__, __LINE__); 973 rval = EFAULT; 974 goto out; 975 } 976 } else if (is_rmrb && (count == 1)) { 977 dma_buff->kern_buf = sc->ioctl_resp_sge.addr; 978 dma_buff->kern_buf_len = sc->ioctl_resp_sge.size; 979 dma_buff->kern_buf_dma = sc->ioctl_resp_sge.dma_addr; 980 dma_buff->dma_desc = NULL; 981 dma_buff->num_dma_desc = 0; 982 memset(dma_buff->kern_buf, 0, dma_buff->kern_buf_len); 983 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 984 dma_buff->kern_buf_len = tmplen; 985 } else { 986 if (!dma_buff->kern_buf_len) 987 continue; 988 if (mpi3mr_map_data_buffer_dma(sc, dma_buff, desc_count)) { 989 rval = ENOMEM; 990 mpi3mr_dprint(sc, MPI3MR_ERROR, "mapping data buffers failed" 991 "at %s() line: %d\n", __func__, __LINE__); 992 goto out; 993 } 994 desc_count += dma_buff->num_dma_desc; 995 } 996 } 997 998 if (erb_offset != 0xFF) { 999 kern_erb = malloc(erbsz, M_MPI3MR, M_NOWAIT | M_ZERO); 1000 if (!kern_erb) { 1001 printf(IOCNAME "%s:%d Cannot allocate memory for sense buffer\n", sc->name, 1002 __func__, __LINE__); 1003 rval = ENOMEM; 1004 goto out; 1005 } 1006 } 1007 1008 if (sc->ioctl_cmds.state & MPI3MR_CMD_PENDING) { 1009 printf(IOCNAME "Issue IOCTL: Ioctl command is in use/previous command is pending\n", 1010 sc->name); 1011 rval = EAGAIN; 1012 goto out; 1013 } 1014 1015 if (sc->unrecoverable) { 1016 printf(IOCNAME "Issue IOCTL: controller is in unrecoverable state\n", sc->name); 1017 rval = EFAULT; 1018 goto out; 1019 } 1020 1021 if (sc->reset_in_progress) { 1022 printf(IOCNAME "Issue IOCTL: reset in progress\n", sc->name); 1023 rval = EAGAIN; 1024 goto out; 1025 } 1026 if (sc->block_ioctls) { 1027 printf(IOCNAME "Issue IOCTL: IOCTLs are blocked\n", sc->name); 1028 rval = EAGAIN; 1029 goto out; 1030 } 1031 1032 if (mpi_header->Function != MPI3_FUNCTION_NVME_ENCAPSULATED) { 1033 if (mpi3mr_app_construct_sgl(sc, mpi_request, (karg->mpi_msg_size * 4), dma_buffers, 1034 bufcnt, is_rmcb, is_rmrb, (dout_cnt + din_cnt))) { 1035 printf(IOCNAME "Issue IOCTL: sgl build failed\n", sc->name); 1036 rval = EAGAIN; 1037 goto out; 1038 } 1039 1040 } else { 1041 nvme_fmt = mpi3mr_app_get_nvme_data_fmt( 1042 (Mpi3NVMeEncapsulatedRequest_t *)mpi_request); 1043 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1044 if (mpi3mr_app_build_nvme_prp(sc, 1045 (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1046 dma_buffers, bufcnt)) { 1047 rval = ENOMEM; 1048 goto out; 1049 } 1050 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1051 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1052 if (mpi3mr_app_construct_nvme_sgl(sc, (Mpi3NVMeEncapsulatedRequest_t *) mpi_request, 1053 dma_buffers, bufcnt)) { 1054 rval = EINVAL; 1055 goto out; 1056 } 1057 } else { 1058 printf(IOCNAME "%s: Invalid NVMe Command Format\n", sc->name, 1059 __func__); 1060 rval = EINVAL; 1061 goto out; 1062 } 1063 } 1064 1065 if (mpi_header->Function == MPI3_FUNCTION_SCSI_TASK_MGMT) { 1066 tm_req = (Mpi3SCSITaskMgmtRequest_t *)mpi_request; 1067 if (tm_req->TaskType != MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 1068 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tm_req->DevHandle); 1069 if (!tgtdev) { 1070 rval = ENODEV; 1071 goto out; 1072 } 1073 mpi3mr_atomic_inc(&tgtdev->block_io); 1074 } 1075 } 1076 1077 sc->ioctl_cmds.state = MPI3MR_CMD_PENDING; 1078 sc->ioctl_cmds.is_waiting = 1; 1079 sc->ioctl_cmds.callback = NULL; 1080 sc->ioctl_cmds.is_senseprst = 0; 1081 sc->ioctl_cmds.sensebuf = kern_erb; 1082 memset((sc->ioctl_cmds.reply), 0, sc->reply_sz); 1083 mpi_header->HostTag = MPI3MR_HOSTTAG_IOCTLCMDS; 1084 init_completion(&sc->ioctl_cmds.completion); 1085 rval = mpi3mr_submit_admin_cmd(sc, mpi_request, MPI3MR_AREQ_FRAME_SZ); 1086 if (rval) { 1087 printf(IOCNAME "Issue IOCTL: Admin Post failed\n", sc->name); 1088 goto out_failed; 1089 } 1090 wait_for_completion_timeout(&sc->ioctl_cmds.completion, karg->timeout); 1091 1092 if (!(sc->ioctl_cmds.state & MPI3MR_CMD_COMPLETE)) { 1093 sc->ioctl_cmds.is_waiting = 0; 1094 printf(IOCNAME "Issue IOCTL: command timed out\n", sc->name); 1095 rval = EAGAIN; 1096 if (sc->ioctl_cmds.state & MPI3MR_CMD_RESET) 1097 goto out_failed; 1098 1099 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1100 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL_TIMEOUT; 1101 goto out_failed; 1102 } 1103 1104 if (sc->nvme_encap_prp_list && sc->nvme_encap_prp_list_dma) { 1105 bus_dmamap_unload(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list_dma_dmamap); 1106 bus_dmamem_free(sc->nvme_encap_prp_list_dmatag, sc->nvme_encap_prp_list, sc->nvme_encap_prp_list_dma_dmamap); 1107 bus_dma_tag_destroy(sc->nvme_encap_prp_list_dmatag); 1108 sc->nvme_encap_prp_list = NULL; 1109 } 1110 1111 if (((sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1112 != MPI3_IOCSTATUS_SUCCESS) && 1113 (sc->mpi3mr_debug & MPI3MR_DEBUG_IOCTL)) { 1114 printf(IOCNAME "Issue IOCTL: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", sc->name, 1115 (sc->ioctl_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1116 sc->ioctl_cmds.ioc_loginfo); 1117 } 1118 1119 if ((mpirep_offset != 0xFF) && 1120 dma_buffers[mpirep_offset].user_buf_len) { 1121 dma_buff = &dma_buffers[mpirep_offset]; 1122 dma_buff->kern_buf_len = (sizeof(*mpirepbuf) - 1 + 1123 sc->reply_sz); 1124 mpirepbuf = malloc(dma_buff->kern_buf_len, M_MPI3MR, M_NOWAIT | M_ZERO); 1125 1126 if (!mpirepbuf) { 1127 printf(IOCNAME "%s: failed obtaining a memory for mpi reply\n", sc->name, 1128 __func__); 1129 rval = ENOMEM; 1130 goto out_failed; 1131 } 1132 if (sc->ioctl_cmds.state & MPI3MR_CMD_REPLYVALID) { 1133 mpirepbuf->mpirep_type = 1134 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_ADDRESS; 1135 memcpy(mpirepbuf->repbuf, sc->ioctl_cmds.reply, sc->reply_sz); 1136 } else { 1137 mpirepbuf->mpirep_type = 1138 MPI3MR_IOCTL_MPI_REPLY_BUFTYPE_STATUS; 1139 status_desc = (Mpi3StatusReplyDescriptor_t *) 1140 mpirepbuf->repbuf; 1141 status_desc->IOCStatus = sc->ioctl_cmds.ioc_status; 1142 status_desc->IOCLogInfo = sc->ioctl_cmds.ioc_loginfo; 1143 } 1144 tmplen = min(dma_buff->kern_buf_len, dma_buff->user_buf_len); 1145 if (copyout(mpirepbuf, dma_buff->user_buf, tmplen)) { 1146 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1147 __FILE__, __LINE__, __func__); 1148 rval = EFAULT; 1149 goto out_failed; 1150 } 1151 } 1152 1153 if (erb_offset != 0xFF && sc->ioctl_cmds.sensebuf && 1154 sc->ioctl_cmds.is_senseprst) { 1155 dma_buff = &dma_buffers[erb_offset]; 1156 tmplen = min(erbsz, dma_buff->user_buf_len); 1157 if (copyout(kern_erb, dma_buff->user_buf, tmplen)) { 1158 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1159 __FILE__, __LINE__, __func__); 1160 rval = EFAULT; 1161 goto out_failed; 1162 } 1163 } 1164 1165 dma_buff = dma_buffers; 1166 for (count = 0; count < bufcnt; count++, dma_buff++) { 1167 if ((count == 1) && is_rmrb) { 1168 if (copyout(dma_buff->kern_buf, dma_buff->user_buf,dma_buff->kern_buf_len)) { 1169 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1170 __FILE__, __LINE__, __func__); 1171 rval = EFAULT; 1172 goto out_failed; 1173 } 1174 } else if (dma_buff->data_dir == MPI3MR_APP_DDI) { 1175 tmplen = 0; 1176 for (desc_count = 0; desc_count < dma_buff->num_dma_desc; desc_count++) { 1177 if (copyout(dma_buff->dma_desc[desc_count].addr, 1178 (U8 *)dma_buff->user_buf+tmplen, 1179 dma_buff->dma_desc[desc_count].size)) { 1180 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1181 __FILE__, __LINE__, __func__); 1182 rval = EFAULT; 1183 goto out_failed; 1184 } 1185 tmplen += dma_buff->dma_desc[desc_count].size; 1186 } 1187 } 1188 } 1189 1190 if ((pel->Function == MPI3_FUNCTION_PERSISTENT_EVENT_LOG) && 1191 (pel->Action == MPI3_PEL_ACTION_GET_COUNT)) 1192 sc->mpi3mr_aen_triggered = 0; 1193 1194 out_failed: 1195 if (tgtdev) 1196 mpi3mr_atomic_dec(&tgtdev->block_io); 1197 1198 sc->ioctl_cmds.is_senseprst = 0; 1199 sc->ioctl_cmds.sensebuf = NULL; 1200 sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED; 1201 out: 1202 if (kern_erb) 1203 free(kern_erb, M_MPI3MR); 1204 if (buffer_list) 1205 free(buffer_list, M_MPI3MR); 1206 if (mpi_request) 1207 free(mpi_request, M_MPI3MR); 1208 if (dma_buffers) { 1209 dma_buff = dma_buffers; 1210 for (count = 0; count < bufcnt; count++, dma_buff++) { 1211 free(dma_buff->dma_desc, M_MPI3MR); 1212 } 1213 free(dma_buffers, M_MPI3MR); 1214 } 1215 if (mpirepbuf) 1216 free(mpirepbuf, M_MPI3MR); 1217 return rval; 1218 } 1219 1220 /** 1221 * mpi3mr_soft_reset_from_app - Trigger controller reset 1222 * @sc: Adapter instance reference 1223 * 1224 * This function triggers the controller reset from the 1225 * watchdog context and wait for it to complete. It will 1226 * come out of wait upon completion or timeout exaustion. 1227 * 1228 * Return: 0 on success and proper error codes on failure 1229 */ 1230 static long 1231 mpi3mr_soft_reset_from_app(struct mpi3mr_softc *sc) 1232 { 1233 1234 U32 timeout; 1235 1236 /* if reset is not in progress, trigger soft reset from watchdog context */ 1237 if (!sc->reset_in_progress) { 1238 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1239 sc->reset.reason = MPI3MR_RESET_FROM_IOCTL; 1240 1241 /* Wait for soft reset to start */ 1242 timeout = 50; 1243 while (timeout--) { 1244 if (sc->reset_in_progress == 1) 1245 break; 1246 DELAY(100 * 1000); 1247 } 1248 if (!timeout) 1249 return EFAULT; 1250 } 1251 1252 /* Wait for soft reset to complete */ 1253 int i = 0; 1254 timeout = sc->ready_timeout; 1255 while (timeout--) { 1256 if (sc->reset_in_progress == 0) 1257 break; 1258 i++; 1259 if (!(i % 5)) { 1260 mpi3mr_dprint(sc, MPI3MR_INFO, 1261 "[%2ds]waiting for controller reset to be finished from %s\n", i, __func__); 1262 } 1263 DELAY(1000 * 1000); 1264 } 1265 1266 /* 1267 * In case of soft reset failure or not completed within stipulated time, 1268 * fail back to application. 1269 */ 1270 if ((!timeout || sc->reset.status)) 1271 return EFAULT; 1272 1273 return 0; 1274 } 1275 1276 1277 /** 1278 * mpi3mr_adp_reset - Issue controller reset 1279 * @sc: Adapter instance reference 1280 * @data_out_buf: User buffer with reset type 1281 * @data_out_sz: length of the user buffer. 1282 * 1283 * This function identifies the user provided reset type and 1284 * issues approporiate reset to the controller and wait for that 1285 * to complete and reinitialize the controller and then returns. 1286 * 1287 * Return: 0 on success and proper error codes on failure 1288 */ 1289 static long 1290 mpi3mr_adp_reset(struct mpi3mr_softc *sc, 1291 void *data_out_buf, U32 data_out_sz) 1292 { 1293 long rval = EINVAL; 1294 struct mpi3mr_ioctl_adpreset adpreset; 1295 1296 memset(&adpreset, 0, sizeof(adpreset)); 1297 1298 if (data_out_sz != sizeof(adpreset)) { 1299 printf(IOCNAME "Invalid user adpreset buffer size %s() line: %d\n", sc->name, 1300 __func__, __LINE__); 1301 goto out; 1302 } 1303 1304 if (copyin(data_out_buf, &adpreset, sizeof(adpreset))) { 1305 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1306 __func__, __LINE__); 1307 rval = EFAULT; 1308 goto out; 1309 } 1310 1311 switch (adpreset.reset_type) { 1312 case MPI3MR_IOCTL_ADPRESET_SOFT: 1313 sc->reset.ioctl_reset_snapdump = false; 1314 break; 1315 case MPI3MR_IOCTL_ADPRESET_DIAG_FAULT: 1316 sc->reset.ioctl_reset_snapdump = true; 1317 break; 1318 default: 1319 printf(IOCNAME "Unknown reset_type(0x%x) issued\n", sc->name, 1320 adpreset.reset_type); 1321 goto out; 1322 } 1323 rval = mpi3mr_soft_reset_from_app(sc); 1324 if (rval) 1325 printf(IOCNAME "reset handler returned error (0x%lx) for reset type 0x%x\n", 1326 sc->name, rval, adpreset.reset_type); 1327 1328 out: 1329 return rval; 1330 } 1331 1332 void 1333 mpi3mr_app_send_aen(struct mpi3mr_softc *sc) 1334 { 1335 sc->mpi3mr_aen_triggered = 1; 1336 if (sc->mpi3mr_poll_waiting) { 1337 selwakeup(&sc->mpi3mr_select); 1338 sc->mpi3mr_poll_waiting = 0; 1339 } 1340 return; 1341 } 1342 1343 void 1344 mpi3mr_pel_wait_complete(struct mpi3mr_softc *sc, 1345 struct mpi3mr_drvr_cmd *drvr_cmd) 1346 { 1347 U8 retry = 0; 1348 Mpi3PELReply_t *pel_reply = NULL; 1349 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1350 1351 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1352 goto cleanup_drvrcmd; 1353 1354 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1355 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1356 goto out_failed; 1357 } 1358 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1359 1360 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1361 || ((le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS) 1362 && (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_ABORTED))){ 1363 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1364 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1365 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1366 retry = 1; 1367 } 1368 1369 if (retry) { 1370 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1371 drvr_cmd->retry_count++; 1372 printf(IOCNAME "%s : PELWaitretry=%d\n", sc->name, 1373 __func__, drvr_cmd->retry_count); 1374 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1375 return; 1376 } 1377 1378 printf(IOCNAME "%s :PELWait failed after all retries\n", sc->name, 1379 __func__); 1380 goto out_failed; 1381 } 1382 1383 mpi3mr_app_send_aen(sc); 1384 1385 if (!sc->pel_abort_requested) { 1386 sc->pel_cmds.retry_count = 0; 1387 mpi3mr_send_pel_getseq(sc, &sc->pel_cmds); 1388 } 1389 1390 return; 1391 out_failed: 1392 sc->pel_wait_pend = 0; 1393 cleanup_drvrcmd: 1394 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1395 drvr_cmd->callback = NULL; 1396 drvr_cmd->retry_count = 0; 1397 } 1398 1399 void 1400 mpi3mr_issue_pel_wait(struct mpi3mr_softc *sc, 1401 struct mpi3mr_drvr_cmd *drvr_cmd) 1402 { 1403 U8 retry_count = 0; 1404 Mpi3PELReqActionWait_t pel_wait; 1405 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1406 1407 sc->pel_abort_requested = 0; 1408 1409 memset(&pel_wait, 0, sizeof(pel_wait)); 1410 drvr_cmd->state = MPI3MR_CMD_PENDING; 1411 drvr_cmd->is_waiting = 0; 1412 drvr_cmd->callback = mpi3mr_pel_wait_complete; 1413 drvr_cmd->ioc_status = 0; 1414 drvr_cmd->ioc_loginfo = 0; 1415 pel_wait.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1416 pel_wait.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1417 pel_wait.Action = MPI3_PEL_ACTION_WAIT; 1418 pel_wait.StartingSequenceNumber = htole32(sc->newest_seqnum); 1419 pel_wait.Locale = htole16(sc->pel_locale); 1420 pel_wait.Class = htole16(sc->pel_class); 1421 pel_wait.WaitTime = MPI3_PEL_WAITTIME_INFINITE_WAIT; 1422 printf(IOCNAME "Issuing PELWait: seqnum %u class %u locale 0x%08x\n", 1423 sc->name, sc->newest_seqnum, sc->pel_class, sc->pel_locale); 1424 retry_pel_wait: 1425 if (mpi3mr_submit_admin_cmd(sc, &pel_wait, sizeof(pel_wait))) { 1426 printf(IOCNAME "%s: Issue PELWait IOCTL: Admin Post failed\n", sc->name, __func__); 1427 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1428 retry_count++; 1429 goto retry_pel_wait; 1430 } 1431 goto out_failed; 1432 } 1433 return; 1434 out_failed: 1435 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1436 drvr_cmd->callback = NULL; 1437 drvr_cmd->retry_count = 0; 1438 sc->pel_wait_pend = 0; 1439 return; 1440 } 1441 1442 void 1443 mpi3mr_send_pel_getseq(struct mpi3mr_softc *sc, 1444 struct mpi3mr_drvr_cmd *drvr_cmd) 1445 { 1446 U8 retry_count = 0; 1447 U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1448 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1449 1450 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1451 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1452 sc->pel_cmds.is_waiting = 0; 1453 sc->pel_cmds.ioc_status = 0; 1454 sc->pel_cmds.ioc_loginfo = 0; 1455 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1456 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1457 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1458 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1459 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1460 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1461 1462 retry_pel_getseq: 1463 if (mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req))) { 1464 printf(IOCNAME "%s: Issuing PEL GetSeq IOCTL: Admin Post failed\n", sc->name, __func__); 1465 if (retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1466 retry_count++; 1467 goto retry_pel_getseq; 1468 } 1469 goto out_failed; 1470 } 1471 return; 1472 out_failed: 1473 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1474 drvr_cmd->callback = NULL; 1475 drvr_cmd->retry_count = 0; 1476 sc->pel_wait_pend = 0; 1477 } 1478 1479 void 1480 mpi3mr_pel_getseq_complete(struct mpi3mr_softc *sc, 1481 struct mpi3mr_drvr_cmd *drvr_cmd) 1482 { 1483 U8 retry = 0; 1484 Mpi3PELReply_t *pel_reply = NULL; 1485 Mpi3PELSeq_t *pel_seq_num = (Mpi3PELSeq_t *)sc->pel_seq_number; 1486 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1487 1488 if (drvr_cmd->state & MPI3MR_CMD_RESET) 1489 goto cleanup_drvrcmd; 1490 1491 if (!(drvr_cmd->state & MPI3MR_CMD_REPLYVALID)) { 1492 printf(IOCNAME "%s: PELGetSeqNum Failed, No Reply\n", sc->name, __func__); 1493 goto out_failed; 1494 } 1495 pel_reply = (Mpi3PELReply_t *)drvr_cmd->reply; 1496 1497 if (((GET_IOC_STATUS(drvr_cmd->ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1498 || (le16toh(pel_reply->PELogStatus) != MPI3_PEL_STATUS_SUCCESS)){ 1499 printf(IOCNAME "%s: PELGetSeqNum Failed, IOCStatus(0x%04x) Loginfo(0x%08x) PEL_LogStatus(0x%04x)\n", 1500 sc->name, __func__, GET_IOC_STATUS(drvr_cmd->ioc_status), 1501 drvr_cmd->ioc_loginfo, le16toh(pel_reply->PELogStatus)); 1502 retry = 1; 1503 } 1504 1505 if (retry) { 1506 if (drvr_cmd->retry_count < MPI3MR_PELCMDS_RETRYCOUNT) { 1507 drvr_cmd->retry_count++; 1508 printf(IOCNAME "%s : PELGetSeqNUM retry=%d\n", sc->name, 1509 __func__, drvr_cmd->retry_count); 1510 mpi3mr_send_pel_getseq(sc, drvr_cmd); 1511 return; 1512 } 1513 printf(IOCNAME "%s :PELGetSeqNUM failed after all retries\n", 1514 sc->name, __func__); 1515 goto out_failed; 1516 } 1517 1518 sc->newest_seqnum = le32toh(pel_seq_num->Newest) + 1; 1519 drvr_cmd->retry_count = 0; 1520 mpi3mr_issue_pel_wait(sc, drvr_cmd); 1521 return; 1522 out_failed: 1523 sc->pel_wait_pend = 0; 1524 cleanup_drvrcmd: 1525 drvr_cmd->state = MPI3MR_CMD_NOTUSED; 1526 drvr_cmd->callback = NULL; 1527 drvr_cmd->retry_count = 0; 1528 } 1529 1530 static int 1531 mpi3mr_pel_getseq(struct mpi3mr_softc *sc) 1532 { 1533 int rval = 0; 1534 U8 sgl_flags = 0; 1535 Mpi3PELReqActionGetSequenceNumbers_t pel_getseq_req; 1536 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1537 1538 if (sc->reset_in_progress || sc->block_ioctls) { 1539 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1540 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1541 return -1; 1542 } 1543 1544 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 1545 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1546 sc->pel_cmds.state = MPI3MR_CMD_PENDING; 1547 sc->pel_cmds.is_waiting = 0; 1548 sc->pel_cmds.retry_count = 0; 1549 sc->pel_cmds.ioc_status = 0; 1550 sc->pel_cmds.ioc_loginfo = 0; 1551 sc->pel_cmds.callback = mpi3mr_pel_getseq_complete; 1552 pel_getseq_req.HostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1553 pel_getseq_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1554 pel_getseq_req.Action = MPI3_PEL_ACTION_GET_SEQNUM; 1555 mpi3mr_add_sg_single(&pel_getseq_req.SGL, sgl_flags, 1556 sc->pel_seq_number_sz, sc->pel_seq_number_dma); 1557 1558 if ((rval = mpi3mr_submit_admin_cmd(sc, &pel_getseq_req, sizeof(pel_getseq_req)))) 1559 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1560 1561 return rval; 1562 } 1563 1564 int 1565 mpi3mr_pel_abort(struct mpi3mr_softc *sc) 1566 { 1567 int retval = 0; 1568 U16 pel_log_status; 1569 Mpi3PELReqActionAbort_t pel_abort_req; 1570 Mpi3PELReply_t *pel_reply = NULL; 1571 1572 if (sc->reset_in_progress || sc->block_ioctls) { 1573 printf(IOCNAME "%s: IOCTL failed: reset in progress: %u ioctls blocked: %u\n", 1574 sc->name, __func__, sc->reset_in_progress, sc->block_ioctls); 1575 return -1; 1576 } 1577 1578 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 1579 1580 mtx_lock(&sc->pel_abort_cmd.completion.lock); 1581 if (sc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 1582 printf(IOCNAME "%s: PEL Abort command is in use\n", sc->name, __func__); 1583 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1584 return -1; 1585 } 1586 1587 sc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 1588 sc->pel_abort_cmd.is_waiting = 1; 1589 sc->pel_abort_cmd.callback = NULL; 1590 pel_abort_req.HostTag = htole16(MPI3MR_HOSTTAG_PELABORT); 1591 pel_abort_req.Function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 1592 pel_abort_req.Action = MPI3_PEL_ACTION_ABORT; 1593 pel_abort_req.AbortHostTag = htole16(MPI3MR_HOSTTAG_PELWAIT); 1594 1595 sc->pel_abort_requested = 1; 1596 1597 init_completion(&sc->pel_abort_cmd.completion); 1598 retval = mpi3mr_submit_admin_cmd(sc, &pel_abort_req, sizeof(pel_abort_req)); 1599 if (retval) { 1600 printf(IOCNAME "%s: Issue IOCTL: Admin Post failed\n", sc->name, __func__); 1601 sc->pel_abort_requested = 0; 1602 retval = -1; 1603 goto out_unlock; 1604 } 1605 wait_for_completion_timeout(&sc->pel_abort_cmd.completion, MPI3MR_INTADMCMD_TIMEOUT); 1606 1607 if (!(sc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 1608 printf(IOCNAME "%s: PEL Abort command timedout\n",sc->name, __func__); 1609 sc->pel_abort_cmd.is_waiting = 0; 1610 retval = -1; 1611 sc->reset.type = MPI3MR_TRIGGER_SOFT_RESET; 1612 sc->reset.reason = MPI3MR_RESET_FROM_PELABORT_TIMEOUT; 1613 goto out_unlock; 1614 } 1615 if (((GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) 1616 || (!(sc->pel_abort_cmd.state & MPI3MR_CMD_REPLYVALID))) { 1617 printf(IOCNAME "%s: PEL Abort command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1618 sc->name, __func__, GET_IOC_STATUS(sc->pel_abort_cmd.ioc_status), 1619 sc->pel_abort_cmd.ioc_loginfo); 1620 retval = -1; 1621 goto out_unlock; 1622 } 1623 1624 pel_reply = (Mpi3PELReply_t *)sc->pel_abort_cmd.reply; 1625 pel_log_status = le16toh(pel_reply->PELogStatus); 1626 if (pel_log_status != MPI3_PEL_STATUS_SUCCESS) { 1627 printf(IOCNAME "%s: PEL abort command failed, pel_status(0x%04x)\n", 1628 sc->name, __func__, pel_log_status); 1629 retval = -1; 1630 } 1631 1632 out_unlock: 1633 mtx_unlock(&sc->pel_abort_cmd.completion.lock); 1634 sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 1635 return retval; 1636 } 1637 1638 /** 1639 * mpi3mr_pel_enable - Handler for PEL enable 1640 * @sc: Adapter instance reference 1641 * @data_out_buf: User buffer containing PEL enable data 1642 * @data_out_sz: length of the user buffer. 1643 * 1644 * This function is the handler for PEL enable driver IOCTL. 1645 * Validates the application given class and locale and if 1646 * requires aborts the existing PEL wait request and/or issues 1647 * new PEL wait request to the firmware and returns. 1648 * 1649 * Return: 0 on success and proper error codes on failure. 1650 */ 1651 static long 1652 mpi3mr_pel_enable(struct mpi3mr_softc *sc, 1653 void *data_out_buf, U32 data_out_sz) 1654 { 1655 long rval = EINVAL; 1656 U8 tmp_class; 1657 U16 tmp_locale; 1658 struct mpi3mr_ioctl_pel_enable pel_enable; 1659 mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__); 1660 1661 if (sc->unrecoverable) { 1662 device_printf(sc->mpi3mr_dev, "Issue IOCTL: controller is in unrecoverable state\n"); 1663 return EFAULT; 1664 } 1665 if (sc->reset_in_progress) { 1666 device_printf(sc->mpi3mr_dev, "Issue IOCTL: reset in progress\n"); 1667 return EAGAIN; 1668 } 1669 if (sc->block_ioctls) { 1670 device_printf(sc->mpi3mr_dev, "Issue IOCTL: IOCTLs are blocked\n"); 1671 return EAGAIN; 1672 } 1673 1674 if ((data_out_sz != sizeof(pel_enable) || 1675 (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) { 1676 printf(IOCNAME "%s: Invalid user pel_enable buffer size %u\n", 1677 sc->name, __func__, data_out_sz); 1678 goto out; 1679 } 1680 memset(&pel_enable, 0, sizeof(pel_enable)); 1681 if (copyin(data_out_buf, &pel_enable, sizeof(pel_enable))) { 1682 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1683 __func__, __LINE__); 1684 rval = EFAULT; 1685 goto out; 1686 } 1687 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 1688 printf(IOCNAME "%s: out of range class %d\n", 1689 sc->name, __func__, pel_enable.pel_class); 1690 goto out; 1691 } 1692 1693 if (sc->pel_wait_pend) { 1694 if ((sc->pel_class <= pel_enable.pel_class) && 1695 !((sc->pel_locale & pel_enable.pel_locale) ^ 1696 pel_enable.pel_locale)) { 1697 rval = 0; 1698 goto out; 1699 } else { 1700 pel_enable.pel_locale |= sc->pel_locale; 1701 if (sc->pel_class < pel_enable.pel_class) 1702 pel_enable.pel_class = sc->pel_class; 1703 1704 if (mpi3mr_pel_abort(sc)) { 1705 printf(IOCNAME "%s: pel_abort failed, status(%ld)\n", 1706 sc->name, __func__, rval); 1707 goto out; 1708 } 1709 } 1710 } 1711 1712 tmp_class = sc->pel_class; 1713 tmp_locale = sc->pel_locale; 1714 sc->pel_class = pel_enable.pel_class; 1715 sc->pel_locale = pel_enable.pel_locale; 1716 sc->pel_wait_pend = 1; 1717 1718 if ((rval = mpi3mr_pel_getseq(sc))) { 1719 sc->pel_class = tmp_class; 1720 sc->pel_locale = tmp_locale; 1721 sc->pel_wait_pend = 0; 1722 printf(IOCNAME "%s: pel get sequence number failed, status(%ld)\n", 1723 sc->name, __func__, rval); 1724 } 1725 1726 out: 1727 return rval; 1728 } 1729 1730 void 1731 mpi3mr_app_save_logdata(struct mpi3mr_softc *sc, char *event_data, 1732 U16 event_data_size) 1733 { 1734 struct mpi3mr_log_data_entry *entry; 1735 U32 index = sc->log_data_buffer_index, sz; 1736 1737 if (!(sc->log_data_buffer)) 1738 return; 1739 1740 entry = (struct mpi3mr_log_data_entry *) 1741 (sc->log_data_buffer + (index * sc->log_data_entry_size)); 1742 entry->valid_entry = 1; 1743 sz = min(sc->log_data_entry_size, event_data_size); 1744 memcpy(entry->data, event_data, sz); 1745 sc->log_data_buffer_index = 1746 ((++index) % MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES); 1747 mpi3mr_app_send_aen(sc); 1748 } 1749 1750 /** 1751 * mpi3mr_get_logdata - Handler for get log data 1752 * @sc: Adapter instance reference 1753 * @data_in_buf: User buffer to copy the logdata entries 1754 * @data_in_sz: length of the user buffer. 1755 * 1756 * This function copies the log data entries to the user buffer 1757 * when log caching is enabled in the driver. 1758 * 1759 * Return: 0 on success and proper error codes on failure 1760 */ 1761 static long 1762 mpi3mr_get_logdata(struct mpi3mr_softc *sc, 1763 void *data_in_buf, U32 data_in_sz) 1764 { 1765 long rval = EINVAL; 1766 U16 num_entries = 0; 1767 U16 entry_sz = sc->log_data_entry_size; 1768 1769 if ((!sc->log_data_buffer) || (data_in_sz < entry_sz)) 1770 return rval; 1771 1772 num_entries = data_in_sz / entry_sz; 1773 if (num_entries > MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES) 1774 num_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1775 1776 if ((rval = copyout(sc->log_data_buffer, data_in_buf, (num_entries * entry_sz)))) { 1777 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1778 rval = EFAULT; 1779 } 1780 1781 return rval; 1782 } 1783 1784 /** 1785 * mpi3mr_logdata_enable - Handler for log data enable 1786 * @sc: Adapter instance reference 1787 * @data_in_buf: User buffer to copy the max logdata entry count 1788 * @data_in_sz: length of the user buffer. 1789 * 1790 * This function enables log data caching in the driver if not 1791 * already enabled and return the maximum number of log data 1792 * entries that can be cached in the driver. 1793 * 1794 * Return: 0 on success and proper error codes on failure 1795 */ 1796 static long 1797 mpi3mr_logdata_enable(struct mpi3mr_softc *sc, 1798 void *data_in_buf, U32 data_in_sz) 1799 { 1800 long rval = EINVAL; 1801 struct mpi3mr_ioctl_logdata_enable logdata_enable; 1802 1803 if (data_in_sz < sizeof(logdata_enable)) 1804 return rval; 1805 1806 if (sc->log_data_buffer) 1807 goto copy_data; 1808 1809 sc->log_data_entry_size = (sc->reply_sz - (sizeof(Mpi3EventNotificationReply_t) - 4)) 1810 + MPI3MR_IOCTL_LOGDATA_ENTRY_HEADER_SZ; 1811 1812 sc->log_data_buffer = malloc((MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES * sc->log_data_entry_size), 1813 M_MPI3MR, M_NOWAIT | M_ZERO); 1814 if (!sc->log_data_buffer) { 1815 printf(IOCNAME "%s log data buffer memory allocation failed\n", sc->name, __func__); 1816 return ENOMEM; 1817 } 1818 1819 sc->log_data_buffer_index = 0; 1820 1821 copy_data: 1822 memset(&logdata_enable, 0, sizeof(logdata_enable)); 1823 logdata_enable.max_entries = MPI3MR_IOCTL_LOGDATA_MAX_ENTRIES; 1824 1825 if ((rval = copyout(&logdata_enable, data_in_buf, sizeof(logdata_enable)))) { 1826 printf(IOCNAME "%s: copy to user failed\n", sc->name, __func__); 1827 rval = EFAULT; 1828 } 1829 1830 return rval; 1831 } 1832 1833 /** 1834 * mpi3mr_get_change_count - Get topology change count 1835 * @sc: Adapter instance reference 1836 * @data_in_buf: User buffer to copy the change count 1837 * @data_in_sz: length of the user buffer. 1838 * 1839 * This function copies the toplogy change count provided by the 1840 * driver in events and cached in the driver to the user 1841 * provided buffer for the specific controller. 1842 * 1843 * Return: 0 on success and proper error codes on failure 1844 */ 1845 static long 1846 mpi3mr_get_change_count(struct mpi3mr_softc *sc, 1847 void *data_in_buf, U32 data_in_sz) 1848 { 1849 long rval = EINVAL; 1850 struct mpi3mr_ioctl_chgcnt chg_count; 1851 memset(&chg_count, 0, sizeof(chg_count)); 1852 1853 chg_count.change_count = sc->change_count; 1854 if (data_in_sz >= sizeof(chg_count)) { 1855 if ((rval = copyout(&chg_count, data_in_buf, sizeof(chg_count)))) { 1856 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1857 __LINE__, __func__); 1858 rval = EFAULT; 1859 } 1860 } 1861 return rval; 1862 } 1863 1864 /** 1865 * mpi3mr_get_alltgtinfo - Get all targets information 1866 * @sc: Adapter instance reference 1867 * @data_in_buf: User buffer to copy the target information 1868 * @data_in_sz: length of the user buffer. 1869 * 1870 * This function copies the driver managed target devices device 1871 * handle, persistent ID, bus ID and taret ID to the user 1872 * provided buffer for the specific controller. This function 1873 * also provides the number of devices managed by the driver for 1874 * the specific controller. 1875 * 1876 * Return: 0 on success and proper error codes on failure 1877 */ 1878 static long 1879 mpi3mr_get_alltgtinfo(struct mpi3mr_softc *sc, 1880 void *data_in_buf, U32 data_in_sz) 1881 { 1882 long rval = EINVAL; 1883 U8 get_count = 0; 1884 U16 i = 0, num_devices = 0; 1885 U32 min_entrylen = 0, kern_entrylen = 0, user_entrylen = 0; 1886 struct mpi3mr_target *tgtdev = NULL; 1887 struct mpi3mr_device_map_info *devmap_info = NULL; 1888 struct mpi3mr_cam_softc *cam_sc = sc->cam_sc; 1889 struct mpi3mr_ioctl_all_tgtinfo *all_tgtinfo = (struct mpi3mr_ioctl_all_tgtinfo *)data_in_buf; 1890 1891 if (data_in_sz < sizeof(uint32_t)) { 1892 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1893 __LINE__, __func__); 1894 goto out; 1895 } 1896 if (data_in_sz == sizeof(uint32_t)) 1897 get_count = 1; 1898 1899 if (TAILQ_EMPTY(&cam_sc->tgt_list)) { 1900 get_count = 1; 1901 goto copy_usrbuf; 1902 } 1903 1904 mtx_lock_spin(&cam_sc->sc->target_lock); 1905 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1906 num_devices++; 1907 } 1908 mtx_unlock_spin(&cam_sc->sc->target_lock); 1909 1910 if (get_count) 1911 goto copy_usrbuf; 1912 1913 kern_entrylen = num_devices * sizeof(*devmap_info); 1914 1915 devmap_info = malloc(kern_entrylen, M_MPI3MR, M_NOWAIT | M_ZERO); 1916 if (!devmap_info) { 1917 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1918 __LINE__, __func__); 1919 rval = ENOMEM; 1920 goto out; 1921 } 1922 memset((U8*)devmap_info, 0xFF, kern_entrylen); 1923 1924 mtx_lock_spin(&cam_sc->sc->target_lock); 1925 TAILQ_FOREACH(tgtdev, &cam_sc->tgt_list, tgt_next) { 1926 if (i < num_devices) { 1927 devmap_info[i].handle = tgtdev->dev_handle; 1928 devmap_info[i].per_id = tgtdev->per_id; 1929 /*n 1930 * For hidden/ugood device the target_id and bus_id should be 0xFFFFFFFF and 0xFF 1931 */ 1932 if (!tgtdev->exposed_to_os) { 1933 devmap_info[i].target_id = 0xFFFFFFFF; 1934 devmap_info[i].bus_id = 0xFF; 1935 } else { 1936 devmap_info[i].target_id = tgtdev->tid; 1937 devmap_info[i].bus_id = 0; 1938 } 1939 i++; 1940 } 1941 } 1942 num_devices = i; 1943 mtx_unlock_spin(&cam_sc->sc->target_lock); 1944 1945 copy_usrbuf: 1946 if (copyout(&num_devices, &all_tgtinfo->num_devices, sizeof(num_devices))) { 1947 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, __FILE__, 1948 __LINE__, __func__); 1949 rval = EFAULT; 1950 goto out; 1951 } 1952 user_entrylen = (data_in_sz - sizeof(uint32_t))/sizeof(*devmap_info); 1953 user_entrylen *= sizeof(*devmap_info); 1954 min_entrylen = min(user_entrylen, kern_entrylen); 1955 if (min_entrylen && (copyout(devmap_info, &all_tgtinfo->dmi, min_entrylen))) { 1956 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 1957 __FILE__, __LINE__, __func__); 1958 rval = EFAULT; 1959 goto out; 1960 } 1961 rval = 0; 1962 out: 1963 if (devmap_info) 1964 free(devmap_info, M_MPI3MR); 1965 1966 return rval; 1967 } 1968 1969 /** 1970 * mpi3mr_get_tgtinfo - Get specific target information 1971 * @sc: Adapter instance reference 1972 * @karg: driver ponter to users payload buffer 1973 * 1974 * This function copies the driver managed specific target device 1975 * info like handle, persistent ID, bus ID and taret ID to the user 1976 * provided buffer for the specific controller. 1977 * 1978 * Return: 0 on success and proper error codes on failure 1979 */ 1980 static long 1981 mpi3mr_get_tgtinfo(struct mpi3mr_softc *sc, 1982 struct mpi3mr_ioctl_drvcmd *karg) 1983 { 1984 long rval = EINVAL; 1985 struct mpi3mr_target *tgtdev = NULL; 1986 struct mpi3mr_ioctl_tgtinfo tgtinfo; 1987 1988 memset(&tgtinfo, 0, sizeof(tgtinfo)); 1989 1990 if ((karg->data_out_size != sizeof(struct mpi3mr_ioctl_tgtinfo)) || 1991 (karg->data_in_size != sizeof(struct mpi3mr_ioctl_tgtinfo))) { 1992 printf(IOCNAME "Invalid user tgtinfo buffer size %s() line: %d\n", sc->name, 1993 __func__, __LINE__); 1994 goto out; 1995 } 1996 1997 if (copyin(karg->data_out_buf, &tgtinfo, sizeof(tgtinfo))) { 1998 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 1999 __func__, __LINE__); 2000 rval = EFAULT; 2001 goto out; 2002 } 2003 2004 if ((tgtinfo.bus_id != 0xFF) && (tgtinfo.target_id != 0xFFFFFFFF)) { 2005 if ((tgtinfo.persistent_id != 0xFFFF) || 2006 (tgtinfo.dev_handle != 0xFFFF)) 2007 goto out; 2008 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.target_id); 2009 } else if (tgtinfo.persistent_id != 0xFFFF) { 2010 if ((tgtinfo.bus_id != 0xFF) || 2011 (tgtinfo.dev_handle !=0xFFFF) || 2012 (tgtinfo.target_id != 0xFFFFFFFF)) 2013 goto out; 2014 tgtdev = mpi3mr_find_target_by_per_id(sc->cam_sc, tgtinfo.persistent_id); 2015 } else if (tgtinfo.dev_handle !=0xFFFF) { 2016 if ((tgtinfo.bus_id != 0xFF) || 2017 (tgtinfo.target_id != 0xFFFFFFFF) || 2018 (tgtinfo.persistent_id != 0xFFFF)) 2019 goto out; 2020 tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tgtinfo.dev_handle); 2021 } 2022 if (!tgtdev) 2023 goto out; 2024 2025 tgtinfo.target_id = tgtdev->per_id; 2026 tgtinfo.bus_id = 0; 2027 tgtinfo.dev_handle = tgtdev->dev_handle; 2028 tgtinfo.persistent_id = tgtdev->per_id; 2029 tgtinfo.seq_num = 0; 2030 2031 if (copyout(&tgtinfo, karg->data_in_buf, sizeof(tgtinfo))) { 2032 printf(IOCNAME "failure at %s() line:%d\n", sc->name, 2033 __func__, __LINE__); 2034 rval = EFAULT; 2035 } 2036 2037 out: 2038 return rval; 2039 } 2040 2041 /** 2042 * mpi3mr_get_pciinfo - Get PCI info IOCTL handler 2043 * @sc: Adapter instance reference 2044 * @data_in_buf: User buffer to hold adapter information 2045 * @data_in_sz: length of the user buffer. 2046 * 2047 * This function provides the PCI spec information for the 2048 * given controller 2049 * 2050 * Return: 0 on success and proper error codes on failure 2051 */ 2052 static long 2053 mpi3mr_get_pciinfo(struct mpi3mr_softc *sc, 2054 void *data_in_buf, U32 data_in_sz) 2055 { 2056 long rval = EINVAL; 2057 U8 i; 2058 struct mpi3mr_ioctl_pciinfo pciinfo; 2059 memset(&pciinfo, 0, sizeof(pciinfo)); 2060 2061 for (i = 0; i < 64; i++) 2062 pciinfo.config_space[i] = pci_read_config(sc->mpi3mr_dev, (i * 4), 4); 2063 2064 if (data_in_sz >= sizeof(pciinfo)) { 2065 if ((rval = copyout(&pciinfo, data_in_buf, sizeof(pciinfo)))) { 2066 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2067 __FILE__, __LINE__, __func__); 2068 rval = EFAULT; 2069 } 2070 } 2071 return rval; 2072 } 2073 2074 /** 2075 * mpi3mr_get_adpinfo - Get adapter info IOCTL handler 2076 * @sc: Adapter instance reference 2077 * @data_in_buf: User buffer to hold adapter information 2078 * @data_in_sz: length of the user buffer. 2079 * 2080 * This function provides adapter information for the given 2081 * controller 2082 * 2083 * Return: 0 on success and proper error codes on failure 2084 */ 2085 static long 2086 mpi3mr_get_adpinfo(struct mpi3mr_softc *sc, 2087 void *data_in_buf, U32 data_in_sz) 2088 { 2089 long rval = EINVAL; 2090 struct mpi3mr_ioctl_adpinfo adpinfo; 2091 enum mpi3mr_iocstate ioc_state; 2092 memset(&adpinfo, 0, sizeof(adpinfo)); 2093 2094 adpinfo.adp_type = MPI3MR_IOCTL_ADPTYPE_AVGFAMILY; 2095 adpinfo.pci_dev_id = pci_get_device(sc->mpi3mr_dev); 2096 adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1); 2097 adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev); 2098 adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev); 2099 adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev); 2100 adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev); 2101 adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev); 2102 adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev); 2103 adpinfo.ioctl_ver = MPI3MR_IOCTL_VERSION; 2104 memcpy((U8 *)&adpinfo.driver_info, (U8 *)&sc->driver_info, sizeof(adpinfo.driver_info)); 2105 2106 ioc_state = mpi3mr_get_iocstate(sc); 2107 2108 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 2109 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_UNRECOVERABLE; 2110 else if (sc->reset_in_progress || sc->block_ioctls) 2111 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_IN_RESET; 2112 else if (ioc_state == MRIOC_STATE_FAULT) 2113 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_FAULT; 2114 else 2115 adpinfo.adp_state = MPI3MR_IOCTL_ADP_STATE_OPERATIONAL; 2116 2117 if (data_in_sz >= sizeof(adpinfo)) { 2118 if ((rval = copyout(&adpinfo, data_in_buf, sizeof(adpinfo)))) { 2119 printf(IOCNAME "failure at %s:%d/%s()!\n", sc->name, 2120 __FILE__, __LINE__, __func__); 2121 rval = EFAULT; 2122 } 2123 } 2124 return rval; 2125 } 2126 /** 2127 * mpi3mr_app_drvrcmds - Driver IOCTL handler 2128 * @dev: char device 2129 * @cmd: IOCTL command 2130 * @arg: User data payload buffer for the IOCTL 2131 * @flag: flags 2132 * @thread: threads 2133 * 2134 * This function is the top level handler for driver commands, 2135 * this does basic validation of the buffer and identifies the 2136 * opcode and switches to correct sub handler. 2137 * 2138 * Return: 0 on success and proper error codes on failure 2139 */ 2140 2141 static int 2142 mpi3mr_app_drvrcmds(struct cdev *dev, u_long cmd, 2143 void *uarg, int flag, struct thread *td) 2144 { 2145 long rval = EINVAL; 2146 struct mpi3mr_softc *sc = NULL; 2147 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)uarg; 2148 2149 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2150 if (!sc) 2151 return ENODEV; 2152 2153 mtx_lock(&sc->ioctl_cmds.completion.lock); 2154 switch (karg->opcode) { 2155 case MPI3MR_DRVRIOCTL_OPCODE_ADPINFO: 2156 rval = mpi3mr_get_adpinfo(sc, karg->data_in_buf, karg->data_in_size); 2157 break; 2158 case MPI3MR_DRVRIOCTL_OPCODE_GETPCIINFO: 2159 rval = mpi3mr_get_pciinfo(sc, karg->data_in_buf, karg->data_in_size); 2160 break; 2161 case MPI3MR_DRVRIOCTL_OPCODE_TGTDEVINFO: 2162 rval = mpi3mr_get_tgtinfo(sc, karg); 2163 break; 2164 case MPI3MR_DRVRIOCTL_OPCODE_ALLTGTDEVINFO: 2165 rval = mpi3mr_get_alltgtinfo(sc, karg->data_in_buf, karg->data_in_size); 2166 break; 2167 case MPI3MR_DRVRIOCTL_OPCODE_GETCHGCNT: 2168 rval = mpi3mr_get_change_count(sc, karg->data_in_buf, karg->data_in_size); 2169 break; 2170 case MPI3MR_DRVRIOCTL_OPCODE_LOGDATAENABLE: 2171 rval = mpi3mr_logdata_enable(sc, karg->data_in_buf, karg->data_in_size); 2172 break; 2173 case MPI3MR_DRVRIOCTL_OPCODE_GETLOGDATA: 2174 rval = mpi3mr_get_logdata(sc, karg->data_in_buf, karg->data_in_size); 2175 break; 2176 case MPI3MR_DRVRIOCTL_OPCODE_PELENABLE: 2177 rval = mpi3mr_pel_enable(sc, karg->data_out_buf, karg->data_out_size); 2178 break; 2179 case MPI3MR_DRVRIOCTL_OPCODE_ADPRESET: 2180 rval = mpi3mr_adp_reset(sc, karg->data_out_buf, karg->data_out_size); 2181 break; 2182 case MPI3MR_DRVRIOCTL_OPCODE_UNKNOWN: 2183 default: 2184 printf("Unsupported drvr ioctl opcode 0x%x\n", karg->opcode); 2185 break; 2186 } 2187 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2188 return rval; 2189 } 2190 /** 2191 * mpi3mr_ioctl - IOCTL Handler 2192 * @dev: char device 2193 * @cmd: IOCTL command 2194 * @arg: User data payload buffer for the IOCTL 2195 * @flag: flags 2196 * @thread: threads 2197 * 2198 * This is the IOCTL entry point which checks the command type and 2199 * executes proper sub handler specific for the command. 2200 * 2201 * Return: 0 on success and proper error codes on failure 2202 */ 2203 static int 2204 mpi3mr_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) 2205 { 2206 int rval = EINVAL; 2207 2208 struct mpi3mr_softc *sc = NULL; 2209 struct mpi3mr_ioctl_drvcmd *karg = (struct mpi3mr_ioctl_drvcmd *)arg; 2210 2211 sc = mpi3mr_app_get_adp_instance(karg->mrioc_id); 2212 2213 if (!sc) 2214 return ENODEV; 2215 2216 mpi3mr_atomic_inc(&sc->pend_ioctls); 2217 2218 2219 if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) { 2220 mpi3mr_dprint(sc, MPI3MR_INFO, 2221 "Return back IOCTL, shutdown is in progress\n"); 2222 mpi3mr_atomic_dec(&sc->pend_ioctls); 2223 return ENODEV; 2224 } 2225 2226 switch (cmd) { 2227 case MPI3MRDRVCMD: 2228 rval = mpi3mr_app_drvrcmds(dev, cmd, arg, flag, td); 2229 break; 2230 case MPI3MRMPTCMD: 2231 mtx_lock(&sc->ioctl_cmds.completion.lock); 2232 rval = mpi3mr_app_mptcmds(dev, cmd, arg, flag, td); 2233 mtx_unlock(&sc->ioctl_cmds.completion.lock); 2234 break; 2235 default: 2236 printf("%s:Unsupported ioctl cmd (0x%08lx)\n", MPI3MR_DRIVER_NAME, cmd); 2237 break; 2238 } 2239 2240 mpi3mr_atomic_dec(&sc->pend_ioctls); 2241 2242 return rval; 2243 } 2244