1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <asm/byteorder.h> 7 #include <linux/completion.h> 8 #include <linux/crc32.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/kref.h> 12 #include <linux/list.h> 13 #include <linux/mhi.h> 14 #include <linux/mm.h> 15 #include <linux/moduleparam.h> 16 #include <linux/mutex.h> 17 #include <linux/overflow.h> 18 #include <linux/pci.h> 19 #include <linux/scatterlist.h> 20 #include <linux/sched/signal.h> 21 #include <linux/types.h> 22 #include <linux/uaccess.h> 23 #include <linux/workqueue.h> 24 #include <linux/wait.h> 25 #include <drm/drm_device.h> 26 #include <drm/drm_file.h> 27 #include <uapi/drm/qaic_accel.h> 28 29 #include "qaic.h" 30 31 #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */ 32 #define QAIC_DBC_Q_GAP SZ_256 33 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K 34 #define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */ 35 #define QAIC_WRAPPER_MAX_SIZE SZ_4K 36 #define QAIC_MHI_RETRY_WAIT_MS 100 37 #define QAIC_MHI_RETRY_MAX 20 38 39 static unsigned int control_resp_timeout_s = 60; /* 60 sec default */ 40 module_param(control_resp_timeout_s, uint, 0600); 41 MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM"); 42 43 struct manage_msg { 44 u32 len; 45 u32 count; 46 u8 data[]; 47 }; 48 49 /* 50 * wire encoding structures for the manage protocol. 51 * All fields are little endian on the wire 52 */ 53 struct wire_msg_hdr { 54 __le32 crc32; /* crc of everything following this field in the message */ 55 __le32 magic_number; 56 __le32 sequence_number; 57 __le32 len; /* length of this message */ 58 __le32 count; /* number of transactions in this message */ 59 __le32 handle; /* unique id to track the resources consumed */ 60 __le32 partition_id; /* partition id for the request (signed) */ 61 __le32 padding; /* must be 0 */ 62 } __packed; 63 64 struct wire_msg { 65 struct wire_msg_hdr hdr; 66 u8 data[]; 67 } __packed; 68 69 struct wire_trans_hdr { 70 __le32 type; 71 __le32 len; 72 } __packed; 73 74 /* Each message sent from driver to device are organized in a list of wrapper_msg */ 75 struct wrapper_msg { 76 struct list_head list; 77 struct kref ref_count; 78 u32 len; /* length of data to transfer */ 79 struct wrapper_list *head; 80 union { 81 struct wire_msg msg; 82 struct wire_trans_hdr trans; 83 }; 84 }; 85 86 struct wrapper_list { 87 struct list_head list; 88 spinlock_t lock; /* Protects the list state during additions and removals */ 89 }; 90 91 struct wire_trans_passthrough { 92 struct wire_trans_hdr hdr; 93 u8 data[]; 94 } __packed; 95 96 struct wire_addr_size_pair { 97 __le64 addr; 98 __le64 size; 99 } __packed; 100 101 struct wire_trans_dma_xfer { 102 struct wire_trans_hdr hdr; 103 __le32 tag; 104 __le32 count; 105 __le32 dma_chunk_id; 106 __le32 padding; 107 struct wire_addr_size_pair data[]; 108 } __packed; 109 110 /* Initiated by device to continue the DMA xfer of a large piece of data */ 111 struct wire_trans_dma_xfer_cont { 112 struct wire_trans_hdr hdr; 113 __le32 dma_chunk_id; 114 __le32 padding; 115 __le64 xferred_size; 116 } __packed; 117 118 struct wire_trans_activate_to_dev { 119 struct wire_trans_hdr hdr; 120 __le64 req_q_addr; 121 __le64 rsp_q_addr; 122 __le32 req_q_size; 123 __le32 rsp_q_size; 124 __le32 buf_len; 125 __le32 options; /* unused, but BIT(16) has meaning to the device */ 126 } __packed; 127 128 struct wire_trans_activate_from_dev { 129 struct wire_trans_hdr hdr; 130 __le32 status; 131 __le32 dbc_id; 132 __le64 options; /* unused */ 133 } __packed; 134 135 struct wire_trans_deactivate_from_dev { 136 struct wire_trans_hdr hdr; 137 __le32 status; 138 __le32 dbc_id; 139 } __packed; 140 141 struct wire_trans_terminate_to_dev { 142 struct wire_trans_hdr hdr; 143 __le32 handle; 144 __le32 padding; 145 } __packed; 146 147 struct wire_trans_terminate_from_dev { 148 struct wire_trans_hdr hdr; 149 __le32 status; 150 __le32 padding; 151 } __packed; 152 153 struct wire_trans_status_to_dev { 154 struct wire_trans_hdr hdr; 155 } __packed; 156 157 struct wire_trans_status_from_dev { 158 struct wire_trans_hdr hdr; 159 __le16 major; 160 __le16 minor; 161 __le32 status; 162 __le64 status_flags; 163 } __packed; 164 165 struct wire_trans_validate_part_to_dev { 166 struct wire_trans_hdr hdr; 167 __le32 part_id; 168 __le32 padding; 169 } __packed; 170 171 struct wire_trans_validate_part_from_dev { 172 struct wire_trans_hdr hdr; 173 __le32 status; 174 __le32 padding; 175 } __packed; 176 177 struct xfer_queue_elem { 178 /* 179 * Node in list of ongoing transfer request on control channel. 180 * Maintained by root device struct. 181 */ 182 struct list_head list; 183 /* Sequence number of this transfer request */ 184 u32 seq_num; 185 /* This is used to wait on until completion of transfer request */ 186 struct completion xfer_done; 187 /* Received data from device */ 188 void *buf; 189 }; 190 191 struct dma_xfer { 192 /* Node in list of DMA transfers which is used for cleanup */ 193 struct list_head list; 194 /* SG table of memory used for DMA */ 195 struct sg_table *sgt; 196 /* Array pages used for DMA */ 197 struct page **page_list; 198 /* Number of pages used for DMA */ 199 unsigned long nr_pages; 200 }; 201 202 struct ioctl_resources { 203 /* List of all DMA transfers which is used later for cleanup */ 204 struct list_head dma_xfers; 205 /* Base address of request queue which belongs to a DBC */ 206 void *buf; 207 /* 208 * Base bus address of request queue which belongs to a DBC. Response 209 * queue base bus address can be calculated by adding size of request 210 * queue to base bus address of request queue. 211 */ 212 dma_addr_t dma_addr; 213 /* Total size of request queue and response queue in byte */ 214 u32 total_size; 215 /* Total number of elements that can be queued in each of request and response queue */ 216 u32 nelem; 217 /* Base address of response queue which belongs to a DBC */ 218 void *rsp_q_base; 219 /* Status of the NNC message received */ 220 u32 status; 221 /* DBC id of the DBC received from device */ 222 u32 dbc_id; 223 /* 224 * DMA transfer request messages can be big in size and it may not be 225 * possible to send them in one shot. In such cases the messages are 226 * broken into chunks, this field stores ID of such chunks. 227 */ 228 u32 dma_chunk_id; 229 /* Total number of bytes transferred for a DMA xfer request */ 230 u64 xferred_dma_size; 231 /* Header of transaction message received from user. Used during DMA xfer request. */ 232 void *trans_hdr; 233 }; 234 235 struct resp_work { 236 struct work_struct work; 237 struct qaic_device *qdev; 238 void *buf; 239 }; 240 241 /* 242 * Since we're working with little endian messages, its useful to be able to 243 * increment without filling a whole line with conversions back and forth just 244 * to add one(1) to a message count. 245 */ 246 static __le32 incr_le32(__le32 val) 247 { 248 return cpu_to_le32(le32_to_cpu(val) + 1); 249 } 250 251 static u32 gen_crc(void *msg) 252 { 253 struct wrapper_list *wrappers = msg; 254 struct wrapper_msg *w; 255 u32 crc = ~0; 256 257 list_for_each_entry(w, &wrappers->list, list) 258 crc = crc32(crc, &w->msg, w->len); 259 260 return crc ^ ~0; 261 } 262 263 static u32 gen_crc_stub(void *msg) 264 { 265 return 0; 266 } 267 268 static bool valid_crc(void *msg) 269 { 270 struct wire_msg_hdr *hdr = msg; 271 bool ret; 272 u32 crc; 273 274 /* 275 * The output of this algorithm is always converted to the native 276 * endianness. 277 */ 278 crc = le32_to_cpu(hdr->crc32); 279 hdr->crc32 = 0; 280 ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc; 281 hdr->crc32 = cpu_to_le32(crc); 282 return ret; 283 } 284 285 static bool valid_crc_stub(void *msg) 286 { 287 return true; 288 } 289 290 static void free_wrapper(struct kref *ref) 291 { 292 struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count); 293 294 list_del(&wrapper->list); 295 kfree(wrapper); 296 } 297 298 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, 299 struct qaic_user *usr) 300 { 301 u32 dbc_id = resources->dbc_id; 302 303 if (resources->buf) { 304 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); 305 qdev->dbc[dbc_id].req_q_base = resources->buf; 306 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; 307 qdev->dbc[dbc_id].dma_addr = resources->dma_addr; 308 qdev->dbc[dbc_id].total_size = resources->total_size; 309 qdev->dbc[dbc_id].nelem = resources->nelem; 310 enable_dbc(qdev, dbc_id, usr); 311 qdev->dbc[dbc_id].in_use = true; 312 resources->buf = NULL; 313 set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED); 314 } 315 } 316 317 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources) 318 { 319 if (resources->buf) 320 dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf, 321 resources->dma_addr); 322 resources->buf = NULL; 323 } 324 325 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources) 326 { 327 struct dma_xfer *xfer; 328 struct dma_xfer *x; 329 int i; 330 331 list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) { 332 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); 333 sg_free_table(xfer->sgt); 334 kfree(xfer->sgt); 335 for (i = 0; i < xfer->nr_pages; ++i) 336 put_page(xfer->page_list[i]); 337 kfree(xfer->page_list); 338 list_del(&xfer->list); 339 kfree(xfer); 340 } 341 } 342 343 static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size) 344 { 345 struct wrapper_msg *w = kzalloc(size, GFP_KERNEL); 346 347 if (!w) 348 return NULL; 349 list_add_tail(&w->list, &wrappers->list); 350 kref_init(&w->ref_count); 351 w->head = wrappers; 352 return w; 353 } 354 355 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 356 u32 *user_len) 357 { 358 struct qaic_manage_trans_passthrough *in_trans = trans; 359 struct wire_trans_passthrough *out_trans; 360 struct wrapper_msg *trans_wrapper; 361 struct wrapper_msg *wrapper; 362 struct wire_msg *msg; 363 u32 msg_hdr_len; 364 365 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 366 msg = &wrapper->msg; 367 msg_hdr_len = le32_to_cpu(msg->hdr.len); 368 369 if (in_trans->hdr.len % 8 != 0) 370 return -EINVAL; 371 372 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) 373 return -ENOSPC; 374 375 trans_wrapper = add_wrapper(wrappers, 376 offsetof(struct wrapper_msg, trans) + in_trans->hdr.len); 377 if (!trans_wrapper) 378 return -ENOMEM; 379 trans_wrapper->len = in_trans->hdr.len; 380 out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans; 381 382 memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr)); 383 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); 384 msg->hdr.count = incr_le32(msg->hdr.count); 385 *user_len += in_trans->hdr.len; 386 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV); 387 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); 388 389 return 0; 390 } 391 392 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */ 393 static int find_and_map_user_pages(struct qaic_device *qdev, 394 struct qaic_manage_trans_dma_xfer *in_trans, 395 struct ioctl_resources *resources, struct dma_xfer *xfer) 396 { 397 u64 xfer_start_addr, remaining, end, total; 398 unsigned long need_pages; 399 struct page **page_list; 400 unsigned long nr_pages; 401 struct sg_table *sgt; 402 int ret; 403 int i; 404 405 if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr)) 406 return -EINVAL; 407 408 if (in_trans->size < resources->xferred_dma_size) 409 return -EINVAL; 410 remaining = in_trans->size - resources->xferred_dma_size; 411 if (remaining == 0) 412 return -EINVAL; 413 414 if (check_add_overflow(xfer_start_addr, remaining, &end)) 415 return -EINVAL; 416 417 total = remaining + offset_in_page(xfer_start_addr); 418 if (total >= SIZE_MAX) 419 return -EINVAL; 420 421 need_pages = DIV_ROUND_UP(total, PAGE_SIZE); 422 423 nr_pages = need_pages; 424 425 while (1) { 426 page_list = kmalloc_objs(*page_list, nr_pages, 427 GFP_KERNEL | __GFP_NOWARN); 428 if (!page_list) { 429 nr_pages = nr_pages / 2; 430 if (!nr_pages) 431 return -ENOMEM; 432 } else { 433 break; 434 } 435 } 436 437 ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list); 438 if (ret < 0) 439 goto free_page_list; 440 if (ret != nr_pages) { 441 nr_pages = ret; 442 ret = -EFAULT; 443 goto put_pages; 444 } 445 446 sgt = kmalloc_obj(*sgt); 447 if (!sgt) { 448 ret = -ENOMEM; 449 goto put_pages; 450 } 451 452 ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages, 453 offset_in_page(xfer_start_addr), 454 remaining, GFP_KERNEL); 455 if (ret) { 456 ret = -ENOMEM; 457 goto free_sgt; 458 } 459 460 ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0); 461 if (ret) 462 goto free_table; 463 464 xfer->sgt = sgt; 465 xfer->page_list = page_list; 466 xfer->nr_pages = nr_pages; 467 468 return need_pages > nr_pages ? 1 : 0; 469 470 free_table: 471 sg_free_table(sgt); 472 free_sgt: 473 kfree(sgt); 474 put_pages: 475 for (i = 0; i < nr_pages; ++i) 476 put_page(page_list[i]); 477 free_page_list: 478 kfree(page_list); 479 return ret; 480 } 481 482 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */ 483 static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers, 484 struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size, 485 struct wire_trans_dma_xfer **out_trans) 486 { 487 struct wrapper_msg *trans_wrapper; 488 struct sg_table *sgt = xfer->sgt; 489 struct wire_addr_size_pair *asp; 490 struct scatterlist *sg; 491 struct wrapper_msg *w; 492 unsigned int dma_len; 493 u64 dma_chunk_len; 494 void *boundary; 495 int nents_dma; 496 int nents; 497 int i; 498 499 nents = sgt->nents; 500 nents_dma = nents; 501 *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); 502 for_each_sgtable_dma_sg(sgt, sg, i) { 503 *size -= sizeof(*asp); 504 /* Save 1K for possible follow-up transactions. */ 505 if (*size < SZ_1K) { 506 nents_dma = i; 507 break; 508 } 509 } 510 511 trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); 512 if (!trans_wrapper) 513 return -ENOMEM; 514 *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans; 515 516 asp = (*out_trans)->data; 517 boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE; 518 *size = 0; 519 520 dma_len = 0; 521 w = trans_wrapper; 522 dma_chunk_len = 0; 523 for_each_sg(sgt->sgl, sg, nents_dma, i) { 524 asp->size = cpu_to_le64(dma_len); 525 dma_chunk_len += dma_len; 526 if (dma_len) { 527 asp++; 528 if ((void *)asp + sizeof(*asp) > boundary) { 529 w->len = (void *)asp - (void *)&w->msg; 530 *size += w->len; 531 w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); 532 if (!w) 533 return -ENOMEM; 534 boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE; 535 asp = (struct wire_addr_size_pair *)&w->msg; 536 } 537 } 538 asp->addr = cpu_to_le64(sg_dma_address(sg)); 539 dma_len = sg_dma_len(sg); 540 } 541 /* finalize the last segment */ 542 asp->size = cpu_to_le64(dma_len); 543 w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg; 544 *size += w->len; 545 dma_chunk_len += dma_len; 546 resources->xferred_dma_size += dma_chunk_len; 547 548 return nents_dma < nents ? 1 : 0; 549 } 550 551 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer) 552 { 553 int i; 554 555 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); 556 sg_free_table(xfer->sgt); 557 kfree(xfer->sgt); 558 for (i = 0; i < xfer->nr_pages; ++i) 559 put_page(xfer->page_list[i]); 560 kfree(xfer->page_list); 561 } 562 563 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 564 u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr) 565 { 566 struct qaic_manage_trans_dma_xfer *in_trans = trans; 567 struct wire_trans_dma_xfer *out_trans; 568 struct wrapper_msg *wrapper; 569 struct dma_xfer *xfer; 570 struct wire_msg *msg; 571 bool need_cont_dma; 572 u32 msg_hdr_len; 573 u32 size; 574 int ret; 575 576 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 577 msg = &wrapper->msg; 578 msg_hdr_len = le32_to_cpu(msg->hdr.len); 579 580 /* There should be enough space to hold at least one ASP entry. */ 581 if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) > 582 QAIC_MANAGE_WIRE_MSG_LENGTH) 583 return -ENOMEM; 584 585 xfer = kmalloc_obj(*xfer); 586 if (!xfer) 587 return -ENOMEM; 588 589 ret = find_and_map_user_pages(qdev, in_trans, resources, xfer); 590 if (ret < 0) 591 goto free_xfer; 592 593 need_cont_dma = (bool)ret; 594 595 ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans); 596 if (ret < 0) 597 goto cleanup_xfer; 598 599 need_cont_dma = need_cont_dma || (bool)ret; 600 601 msg->hdr.len = cpu_to_le32(msg_hdr_len + size); 602 msg->hdr.count = incr_le32(msg->hdr.count); 603 604 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); 605 out_trans->hdr.len = cpu_to_le32(size); 606 out_trans->tag = cpu_to_le32(in_trans->tag); 607 out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) / 608 sizeof(struct wire_addr_size_pair)); 609 610 *user_len += in_trans->hdr.len; 611 612 if (resources->dma_chunk_id) { 613 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); 614 } else if (need_cont_dma) { 615 while (resources->dma_chunk_id == 0) 616 resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id); 617 618 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); 619 } 620 resources->trans_hdr = trans; 621 622 list_add(&xfer->list, &resources->dma_xfers); 623 return 0; 624 625 cleanup_xfer: 626 cleanup_xfer(qdev, xfer); 627 free_xfer: 628 kfree(xfer); 629 return ret; 630 } 631 632 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 633 u32 *user_len, struct ioctl_resources *resources) 634 { 635 struct qaic_manage_trans_activate_to_dev *in_trans = trans; 636 struct wire_trans_activate_to_dev *out_trans; 637 struct wrapper_msg *trans_wrapper; 638 struct wrapper_msg *wrapper; 639 struct wire_msg *msg; 640 dma_addr_t dma_addr; 641 u32 msg_hdr_len; 642 void *buf; 643 u32 nelem; 644 u32 size; 645 int ret; 646 647 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 648 msg = &wrapper->msg; 649 msg_hdr_len = le32_to_cpu(msg->hdr.len); 650 651 if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH) 652 return -ENOSPC; 653 654 if (!in_trans->queue_size) 655 return -EINVAL; 656 657 if (in_trans->pad) 658 return -EINVAL; 659 660 nelem = in_trans->queue_size; 661 if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()), 662 nelem, 663 &size)) 664 return -EINVAL; 665 666 if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size) 667 return -EINVAL; 668 669 size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN); 670 671 buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL); 672 if (!buf) 673 return -ENOMEM; 674 675 trans_wrapper = add_wrapper(wrappers, 676 offsetof(struct wrapper_msg, trans) + sizeof(*out_trans)); 677 if (!trans_wrapper) { 678 ret = -ENOMEM; 679 goto free_dma; 680 } 681 trans_wrapper->len = sizeof(*out_trans); 682 out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans; 683 684 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV); 685 out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans)); 686 out_trans->buf_len = cpu_to_le32(size); 687 out_trans->req_q_addr = cpu_to_le64(dma_addr); 688 out_trans->req_q_size = cpu_to_le32(nelem); 689 out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size()); 690 out_trans->rsp_q_size = cpu_to_le32(nelem); 691 out_trans->options = cpu_to_le32(in_trans->options); 692 693 *user_len += in_trans->hdr.len; 694 msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans)); 695 msg->hdr.count = incr_le32(msg->hdr.count); 696 697 resources->buf = buf; 698 resources->dma_addr = dma_addr; 699 resources->total_size = size; 700 resources->nelem = nelem; 701 resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size(); 702 return 0; 703 704 free_dma: 705 dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr); 706 return ret; 707 } 708 709 static int encode_deactivate(struct qaic_device *qdev, void *trans, 710 u32 *user_len, struct qaic_user *usr) 711 { 712 struct qaic_manage_trans_deactivate *in_trans = trans; 713 714 if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad) 715 return -EINVAL; 716 717 *user_len += in_trans->hdr.len; 718 719 return disable_dbc(qdev, in_trans->dbc_id, usr); 720 } 721 722 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 723 u32 *user_len) 724 { 725 struct qaic_manage_trans_status_to_dev *in_trans = trans; 726 struct wire_trans_status_to_dev *out_trans; 727 struct wrapper_msg *trans_wrapper; 728 struct wrapper_msg *wrapper; 729 struct wire_msg *msg; 730 u32 msg_hdr_len; 731 732 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 733 msg = &wrapper->msg; 734 msg_hdr_len = le32_to_cpu(msg->hdr.len); 735 736 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) 737 return -ENOSPC; 738 739 trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper)); 740 if (!trans_wrapper) 741 return -ENOMEM; 742 743 trans_wrapper->len = sizeof(*out_trans); 744 out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans; 745 746 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV); 747 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); 748 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); 749 msg->hdr.count = incr_le32(msg->hdr.count); 750 *user_len += in_trans->hdr.len; 751 752 return 0; 753 } 754 755 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, 756 struct wrapper_list *wrappers, struct ioctl_resources *resources, 757 struct qaic_user *usr) 758 { 759 struct qaic_manage_trans_hdr *trans_hdr; 760 struct wrapper_msg *wrapper; 761 struct wire_msg *msg; 762 u32 user_len = 0; 763 int ret; 764 int i; 765 766 if (!user_msg->count || 767 user_msg->len < sizeof(*trans_hdr)) { 768 ret = -EINVAL; 769 goto out; 770 } 771 772 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 773 msg = &wrapper->msg; 774 775 msg->hdr.len = cpu_to_le32(sizeof(msg->hdr)); 776 777 if (resources->dma_chunk_id) { 778 ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr); 779 msg->hdr.count = cpu_to_le32(1); 780 goto out; 781 } 782 783 for (i = 0; i < user_msg->count; ++i) { 784 if (user_len > user_msg->len - sizeof(*trans_hdr)) { 785 ret = -EINVAL; 786 break; 787 } 788 trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len); 789 if (trans_hdr->len < sizeof(trans_hdr) || 790 size_add(user_len, trans_hdr->len) > user_msg->len) { 791 ret = -EINVAL; 792 break; 793 } 794 795 switch (trans_hdr->type) { 796 case QAIC_TRANS_PASSTHROUGH_FROM_USR: 797 ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len); 798 break; 799 case QAIC_TRANS_DMA_XFER_FROM_USR: 800 ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr); 801 break; 802 case QAIC_TRANS_ACTIVATE_FROM_USR: 803 ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources); 804 break; 805 case QAIC_TRANS_DEACTIVATE_FROM_USR: 806 ret = encode_deactivate(qdev, trans_hdr, &user_len, usr); 807 break; 808 case QAIC_TRANS_STATUS_FROM_USR: 809 ret = encode_status(qdev, trans_hdr, wrappers, &user_len); 810 break; 811 default: 812 ret = -EINVAL; 813 break; 814 } 815 816 if (ret) 817 goto out; 818 } 819 820 if (user_len != user_msg->len) 821 ret = -EINVAL; 822 out: 823 if (ret) { 824 free_dma_xfers(qdev, resources); 825 free_dbc_buf(qdev, resources); 826 return ret; 827 } 828 829 return 0; 830 } 831 832 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 833 u32 *msg_len) 834 { 835 struct qaic_manage_trans_passthrough *out_trans; 836 struct wire_trans_passthrough *in_trans = trans; 837 u32 len; 838 839 out_trans = (void *)user_msg->data + user_msg->len; 840 841 len = le32_to_cpu(in_trans->hdr.len); 842 if (len % 8 != 0) 843 return -EINVAL; 844 845 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 846 return -ENOSPC; 847 848 memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr)); 849 user_msg->len += len; 850 *msg_len += len; 851 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); 852 out_trans->hdr.len = len; 853 854 return 0; 855 } 856 857 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 858 u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr) 859 { 860 struct qaic_manage_trans_activate_from_dev *out_trans; 861 struct wire_trans_activate_from_dev *in_trans = trans; 862 u32 len; 863 864 out_trans = (void *)user_msg->data + user_msg->len; 865 866 len = le32_to_cpu(in_trans->hdr.len); 867 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 868 return -ENOSPC; 869 870 user_msg->len += len; 871 *msg_len += len; 872 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); 873 out_trans->hdr.len = len; 874 out_trans->status = le32_to_cpu(in_trans->status); 875 out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id); 876 out_trans->options = le64_to_cpu(in_trans->options); 877 878 if (!resources->buf) 879 /* how did we get an activate response without a request? */ 880 return -EINVAL; 881 882 if (out_trans->dbc_id >= qdev->num_dbc) 883 /* 884 * The device assigned an invalid resource, which should never 885 * happen. Return an error so the user can try to recover. 886 */ 887 return -ENODEV; 888 889 if (out_trans->status) 890 /* 891 * Allocating resources failed on device side. This is not an 892 * expected behaviour, user is expected to handle this situation. 893 */ 894 return -ECANCELED; 895 896 resources->status = out_trans->status; 897 resources->dbc_id = out_trans->dbc_id; 898 save_dbc_buf(qdev, resources, usr); 899 900 return 0; 901 } 902 903 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len, 904 struct qaic_user *usr) 905 { 906 struct wire_trans_deactivate_from_dev *in_trans = trans; 907 u32 dbc_id = le32_to_cpu(in_trans->dbc_id); 908 u32 status = le32_to_cpu(in_trans->status); 909 910 if (dbc_id >= qdev->num_dbc) 911 /* 912 * The device assigned an invalid resource, which should never 913 * happen. Inject an error so the user can try to recover. 914 */ 915 return -ENODEV; 916 917 if (usr && status) { 918 /* 919 * Releasing resources failed on the device side, which puts 920 * us in a bind since they may still be in use, so enable the 921 * dbc. User is expected to retry deactivation. 922 */ 923 enable_dbc(qdev, dbc_id, usr); 924 return -ECANCELED; 925 } 926 927 release_dbc(qdev, dbc_id); 928 set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE); 929 *msg_len += sizeof(*in_trans); 930 931 return 0; 932 } 933 934 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 935 u32 *user_len, struct wire_msg *msg) 936 { 937 struct qaic_manage_trans_status_from_dev *out_trans; 938 struct wire_trans_status_from_dev *in_trans = trans; 939 u32 len; 940 941 out_trans = (void *)user_msg->data + user_msg->len; 942 943 len = le32_to_cpu(in_trans->hdr.len); 944 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 945 return -ENOSPC; 946 947 out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV; 948 out_trans->hdr.len = len; 949 out_trans->major = le16_to_cpu(in_trans->major); 950 out_trans->minor = le16_to_cpu(in_trans->minor); 951 out_trans->status_flags = le64_to_cpu(in_trans->status_flags); 952 out_trans->status = le32_to_cpu(in_trans->status); 953 *user_len += le32_to_cpu(in_trans->hdr.len); 954 user_msg->len += len; 955 956 if (out_trans->status) 957 return -ECANCELED; 958 if (out_trans->status_flags & BIT(0) && !valid_crc(msg)) 959 return -EPIPE; 960 961 return 0; 962 } 963 964 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg, 965 struct wire_msg *msg, struct ioctl_resources *resources, 966 struct qaic_user *usr) 967 { 968 u32 msg_hdr_len = le32_to_cpu(msg->hdr.len); 969 struct wire_trans_hdr *trans_hdr; 970 u32 msg_len = 0; 971 int ret; 972 int i; 973 974 if (msg_hdr_len < sizeof(*trans_hdr) || 975 msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH) 976 return -EINVAL; 977 978 user_msg->len = 0; 979 user_msg->count = le32_to_cpu(msg->hdr.count); 980 981 for (i = 0; i < user_msg->count; ++i) { 982 u32 hdr_len; 983 984 if (msg_len > msg_hdr_len - sizeof(*trans_hdr)) 985 return -EINVAL; 986 987 trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len); 988 hdr_len = le32_to_cpu(trans_hdr->len); 989 if (hdr_len < sizeof(*trans_hdr) || 990 size_add(msg_len, hdr_len) > msg_hdr_len) 991 return -EINVAL; 992 993 switch (le32_to_cpu(trans_hdr->type)) { 994 case QAIC_TRANS_PASSTHROUGH_FROM_DEV: 995 ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len); 996 break; 997 case QAIC_TRANS_ACTIVATE_FROM_DEV: 998 ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr); 999 break; 1000 case QAIC_TRANS_DEACTIVATE_FROM_DEV: 1001 ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr); 1002 break; 1003 case QAIC_TRANS_STATUS_FROM_DEV: 1004 ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg); 1005 break; 1006 default: 1007 return -EINVAL; 1008 } 1009 1010 if (ret) 1011 return ret; 1012 } 1013 1014 if (msg_len != (msg_hdr_len - sizeof(msg->hdr))) 1015 return -EINVAL; 1016 1017 return 0; 1018 } 1019 1020 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num, 1021 bool ignore_signal) 1022 { 1023 struct xfer_queue_elem elem; 1024 struct wire_msg *out_buf; 1025 struct wrapper_msg *w; 1026 long ret = -EAGAIN; 1027 int xfer_count = 0; 1028 int retry_count; 1029 1030 /* Allow QAIC_BOOT state since we need to check control protocol version */ 1031 if (qdev->dev_state == QAIC_OFFLINE) { 1032 mutex_unlock(&qdev->cntl_mutex); 1033 return ERR_PTR(-ENODEV); 1034 } 1035 1036 /* Attempt to avoid a partial commit of a message */ 1037 list_for_each_entry(w, &wrappers->list, list) 1038 xfer_count++; 1039 1040 for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) { 1041 if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) { 1042 ret = 0; 1043 break; 1044 } 1045 msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS); 1046 if (signal_pending(current)) 1047 break; 1048 } 1049 1050 if (ret) { 1051 mutex_unlock(&qdev->cntl_mutex); 1052 return ERR_PTR(ret); 1053 } 1054 1055 elem.seq_num = seq_num; 1056 elem.buf = NULL; 1057 init_completion(&elem.xfer_done); 1058 if (likely(!qdev->cntl_lost_buf)) { 1059 /* 1060 * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH. 1061 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH. 1062 */ 1063 out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL); 1064 if (!out_buf) { 1065 mutex_unlock(&qdev->cntl_mutex); 1066 return ERR_PTR(-ENOMEM); 1067 } 1068 1069 ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf, 1070 QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT); 1071 if (ret) { 1072 mutex_unlock(&qdev->cntl_mutex); 1073 return ERR_PTR(ret); 1074 } 1075 } else { 1076 /* 1077 * we lost a buffer because we queued a recv buf, but then 1078 * queuing the corresponding tx buf failed. To try to avoid 1079 * a memory leak, lets reclaim it and use it for this 1080 * transaction. 1081 */ 1082 qdev->cntl_lost_buf = false; 1083 } 1084 1085 list_for_each_entry(w, &wrappers->list, list) { 1086 kref_get(&w->ref_count); 1087 ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, 1088 list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN); 1089 if (ret) { 1090 qdev->cntl_lost_buf = true; 1091 kref_put(&w->ref_count, free_wrapper); 1092 mutex_unlock(&qdev->cntl_mutex); 1093 return ERR_PTR(ret); 1094 } 1095 } 1096 1097 list_add_tail(&elem.list, &qdev->cntl_xfer_list); 1098 mutex_unlock(&qdev->cntl_mutex); 1099 1100 if (ignore_signal) 1101 ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ); 1102 else 1103 ret = wait_for_completion_interruptible_timeout(&elem.xfer_done, 1104 control_resp_timeout_s * HZ); 1105 /* 1106 * not using _interruptable because we have to cleanup or we'll 1107 * likely cause memory corruption 1108 */ 1109 mutex_lock(&qdev->cntl_mutex); 1110 if (!list_empty(&elem.list)) 1111 list_del(&elem.list); 1112 /* resp_worker() processed the response but the wait was interrupted */ 1113 else if (ret == -ERESTARTSYS) 1114 ret = 0; 1115 if (!ret && !elem.buf) 1116 ret = -ETIMEDOUT; 1117 else if (ret > 0 && !elem.buf) 1118 ret = -EIO; 1119 mutex_unlock(&qdev->cntl_mutex); 1120 1121 if (ret < 0) { 1122 kfree(elem.buf); 1123 return ERR_PTR(ret); 1124 } else if (!qdev->valid_crc(elem.buf)) { 1125 kfree(elem.buf); 1126 return ERR_PTR(-EPIPE); 1127 } 1128 1129 return elem.buf; 1130 } 1131 1132 /* Add a transaction to abort the outstanding DMA continuation */ 1133 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id) 1134 { 1135 struct wire_trans_dma_xfer *out_trans; 1136 u32 size = sizeof(*out_trans); 1137 struct wrapper_msg *wrapper; 1138 struct wrapper_msg *w; 1139 struct wire_msg *msg; 1140 1141 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 1142 msg = &wrapper->msg; 1143 1144 /* Remove all but the first wrapper which has the msg header */ 1145 list_for_each_entry_safe(wrapper, w, &wrappers->list, list) 1146 if (!list_is_first(&wrapper->list, &wrappers->list)) 1147 kref_put(&wrapper->ref_count, free_wrapper); 1148 1149 wrapper = add_wrapper(wrappers, sizeof(*wrapper)); 1150 1151 if (!wrapper) 1152 return -ENOMEM; 1153 1154 out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans; 1155 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); 1156 out_trans->hdr.len = cpu_to_le32(size); 1157 out_trans->tag = cpu_to_le32(0); 1158 out_trans->count = cpu_to_le32(0); 1159 out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id); 1160 1161 msg->hdr.len = cpu_to_le32(size + sizeof(*msg)); 1162 msg->hdr.count = cpu_to_le32(1); 1163 wrapper->len = size; 1164 1165 return 0; 1166 } 1167 1168 static struct wrapper_list *alloc_wrapper_list(void) 1169 { 1170 struct wrapper_list *wrappers; 1171 1172 wrappers = kmalloc_obj(*wrappers); 1173 if (!wrappers) 1174 return NULL; 1175 INIT_LIST_HEAD(&wrappers->list); 1176 spin_lock_init(&wrappers->lock); 1177 1178 return wrappers; 1179 } 1180 1181 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr, 1182 struct manage_msg *user_msg, struct ioctl_resources *resources, 1183 struct wire_msg **rsp) 1184 { 1185 struct wrapper_list *wrappers; 1186 struct wrapper_msg *wrapper; 1187 struct wrapper_msg *w; 1188 bool all_done = false; 1189 struct wire_msg *msg; 1190 int ret; 1191 1192 wrappers = alloc_wrapper_list(); 1193 if (!wrappers) 1194 return -ENOMEM; 1195 1196 wrapper = add_wrapper(wrappers, sizeof(*wrapper)); 1197 if (!wrapper) { 1198 kfree(wrappers); 1199 return -ENOMEM; 1200 } 1201 1202 msg = &wrapper->msg; 1203 wrapper->len = sizeof(*msg); 1204 1205 ret = encode_message(qdev, user_msg, wrappers, resources, usr); 1206 if (ret && resources->dma_chunk_id) 1207 ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id); 1208 if (ret) 1209 goto encode_failed; 1210 1211 ret = mutex_lock_interruptible(&qdev->cntl_mutex); 1212 if (ret) 1213 goto lock_failed; 1214 1215 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; 1216 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); 1217 1218 if (usr) { 1219 msg->hdr.handle = cpu_to_le32(usr->handle); 1220 msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id); 1221 } else { 1222 msg->hdr.handle = 0; 1223 msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION); 1224 } 1225 1226 msg->hdr.padding = cpu_to_le32(0); 1227 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); 1228 1229 /* msg_xfer releases the mutex */ 1230 *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false); 1231 if (IS_ERR(*rsp)) 1232 ret = PTR_ERR(*rsp); 1233 1234 lock_failed: 1235 free_dma_xfers(qdev, resources); 1236 encode_failed: 1237 spin_lock(&wrappers->lock); 1238 list_for_each_entry_safe(wrapper, w, &wrappers->list, list) 1239 kref_put(&wrapper->ref_count, free_wrapper); 1240 all_done = list_empty(&wrappers->list); 1241 spin_unlock(&wrappers->lock); 1242 if (all_done) 1243 kfree(wrappers); 1244 1245 return ret; 1246 } 1247 1248 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg) 1249 { 1250 struct wire_trans_dma_xfer_cont *dma_cont = NULL; 1251 struct ioctl_resources resources; 1252 struct wire_msg *rsp = NULL; 1253 int ret; 1254 1255 memset(&resources, 0, sizeof(struct ioctl_resources)); 1256 1257 INIT_LIST_HEAD(&resources.dma_xfers); 1258 1259 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH || 1260 user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr)) 1261 return -EINVAL; 1262 1263 dma_xfer_continue: 1264 ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp); 1265 if (ret) 1266 return ret; 1267 /* dma_cont should be the only transaction if present */ 1268 if (le32_to_cpu(rsp->hdr.count) == 1) { 1269 dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data; 1270 if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT) 1271 dma_cont = NULL; 1272 } 1273 if (dma_cont) { 1274 if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id && 1275 le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) { 1276 kfree(rsp); 1277 goto dma_xfer_continue; 1278 } 1279 1280 ret = -EINVAL; 1281 goto dma_cont_failed; 1282 } 1283 1284 ret = decode_message(qdev, user_msg, rsp, &resources, usr); 1285 1286 dma_cont_failed: 1287 free_dbc_buf(qdev, &resources); 1288 kfree(rsp); 1289 return ret; 1290 } 1291 1292 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1293 { 1294 struct qaic_manage_msg *user_msg = data; 1295 struct qaic_device *qdev; 1296 struct manage_msg *msg; 1297 struct qaic_user *usr; 1298 u8 __user *user_data; 1299 int qdev_rcu_id; 1300 int usr_rcu_id; 1301 int ret; 1302 1303 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) 1304 return -EINVAL; 1305 1306 usr = file_priv->driver_priv; 1307 1308 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1309 if (!usr->qddev) { 1310 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1311 return -ENODEV; 1312 } 1313 1314 qdev = usr->qddev->qdev; 1315 1316 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1317 if (qdev->dev_state != QAIC_ONLINE) { 1318 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1319 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1320 return -ENODEV; 1321 } 1322 1323 msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL); 1324 if (!msg) { 1325 ret = -ENOMEM; 1326 goto out; 1327 } 1328 1329 msg->len = user_msg->len; 1330 msg->count = user_msg->count; 1331 1332 user_data = u64_to_user_ptr(user_msg->data); 1333 1334 if (copy_from_user(msg->data, user_data, user_msg->len)) { 1335 ret = -EFAULT; 1336 goto free_msg; 1337 } 1338 1339 ret = qaic_manage(qdev, usr, msg); 1340 1341 /* 1342 * If the qaic_manage() is successful then we copy the message onto 1343 * userspace memory but we have an exception for -ECANCELED. 1344 * For -ECANCELED, it means that device has NACKed the message with a 1345 * status error code which userspace would like to know. 1346 */ 1347 if (ret == -ECANCELED || !ret) { 1348 if (copy_to_user(user_data, msg->data, msg->len)) { 1349 ret = -EFAULT; 1350 } else { 1351 user_msg->len = msg->len; 1352 user_msg->count = msg->count; 1353 } 1354 } 1355 1356 free_msg: 1357 kfree(msg); 1358 out: 1359 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1360 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1361 return ret; 1362 } 1363 1364 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor) 1365 { 1366 struct qaic_manage_trans_status_from_dev *status_result; 1367 struct qaic_manage_trans_status_to_dev *status_query; 1368 struct manage_msg *user_msg; 1369 int ret; 1370 1371 user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL); 1372 if (!user_msg) { 1373 ret = -ENOMEM; 1374 goto out; 1375 } 1376 user_msg->len = sizeof(*status_query); 1377 user_msg->count = 1; 1378 1379 status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data; 1380 status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR; 1381 status_query->hdr.len = sizeof(status_query->hdr); 1382 1383 ret = qaic_manage(qdev, usr, user_msg); 1384 if (ret) 1385 goto kfree_user_msg; 1386 status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data; 1387 *major = status_result->major; 1388 *minor = status_result->minor; 1389 1390 if (status_result->status_flags & BIT(0)) { /* device is using CRC */ 1391 /* By default qdev->gen_crc is programmed to generate CRC */ 1392 qdev->valid_crc = valid_crc; 1393 } else { 1394 /* By default qdev->valid_crc is programmed to bypass CRC */ 1395 qdev->gen_crc = gen_crc_stub; 1396 } 1397 1398 kfree_user_msg: 1399 kfree(user_msg); 1400 out: 1401 return ret; 1402 } 1403 1404 static void resp_worker(struct work_struct *work) 1405 { 1406 struct resp_work *resp = container_of(work, struct resp_work, work); 1407 struct qaic_device *qdev = resp->qdev; 1408 struct wire_msg *msg = resp->buf; 1409 struct xfer_queue_elem *elem; 1410 struct xfer_queue_elem *i; 1411 bool found = false; 1412 1413 mutex_lock(&qdev->cntl_mutex); 1414 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { 1415 if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) { 1416 found = true; 1417 list_del_init(&elem->list); 1418 elem->buf = msg; 1419 complete_all(&elem->xfer_done); 1420 break; 1421 } 1422 } 1423 mutex_unlock(&qdev->cntl_mutex); 1424 1425 if (!found) { 1426 /* 1427 * The user might have gone away at this point without waiting 1428 * for QAIC_TRANS_DEACTIVATE_FROM_DEV transaction coming from 1429 * the device. If this is not handled correctly, the host will 1430 * not know that the DBC[n] has been freed on the device. 1431 * Due to this failure in synchronization between the device and 1432 * the host, if another user requests to activate a network, and 1433 * the device assigns DBC[n] again, save_dbc_buf() will hang, 1434 * waiting for dbc[n]->in_use to be set to false, which will not 1435 * happen unless the qaic_dev_reset_clean_local_state() gets 1436 * called by resetting the device (or re-inserting the module). 1437 * 1438 * As a solution, we look for QAIC_TRANS_DEACTIVATE_FROM_DEV 1439 * transactions in the message before disposing of it, then 1440 * handle releasing the DBC resources. 1441 * 1442 * Since the user has gone away, if the device could not 1443 * deactivate the network (status != 0), there is no way to 1444 * enable and reassign the DBC to the user. We can put trust in 1445 * the device that it will release all the active DBCs in 1446 * response to the QAIC_TRANS_TERMINATE_TO_DEV transaction, 1447 * otherwise, the user can issue an soc_reset to the device. 1448 */ 1449 u32 msg_count = le32_to_cpu(msg->hdr.count); 1450 u32 msg_len = le32_to_cpu(msg->hdr.len); 1451 u32 len = 0; 1452 int j; 1453 1454 for (j = 0; j < msg_count && len < msg_len; ++j) { 1455 struct wire_trans_hdr *trans_hdr; 1456 1457 trans_hdr = (struct wire_trans_hdr *)(msg->data + len); 1458 if (le32_to_cpu(trans_hdr->type) == QAIC_TRANS_DEACTIVATE_FROM_DEV) { 1459 if (decode_deactivate(qdev, trans_hdr, &len, NULL)) 1460 len += le32_to_cpu(trans_hdr->len); 1461 } else { 1462 len += le32_to_cpu(trans_hdr->len); 1463 } 1464 } 1465 /* request must have timed out, drop packet */ 1466 kfree(msg); 1467 } 1468 1469 kfree(resp); 1470 } 1471 1472 static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper) 1473 { 1474 bool all_done = false; 1475 1476 spin_lock(&wrappers->lock); 1477 kref_put(&wrapper->ref_count, free_wrapper); 1478 all_done = list_empty(&wrappers->list); 1479 spin_unlock(&wrappers->lock); 1480 1481 if (all_done) 1482 kfree(wrappers); 1483 } 1484 1485 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 1486 { 1487 struct wire_msg *msg = mhi_result->buf_addr; 1488 struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg); 1489 1490 free_wrapper_from_list(wrapper->head, wrapper); 1491 } 1492 1493 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 1494 { 1495 struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); 1496 struct wire_msg *msg = mhi_result->buf_addr; 1497 struct resp_work *resp; 1498 1499 if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) { 1500 kfree(msg); 1501 return; 1502 } 1503 1504 resp = kmalloc_obj(*resp, GFP_ATOMIC); 1505 if (!resp) { 1506 kfree(msg); 1507 return; 1508 } 1509 1510 INIT_WORK(&resp->work, resp_worker); 1511 resp->qdev = qdev; 1512 resp->buf = msg; 1513 queue_work(qdev->cntl_wq, &resp->work); 1514 } 1515 1516 int qaic_control_open(struct qaic_device *qdev) 1517 { 1518 if (!qdev->cntl_ch) 1519 return -ENODEV; 1520 1521 qdev->cntl_lost_buf = false; 1522 /* 1523 * By default qaic should assume that device has CRC enabled. 1524 * Qaic comes to know if device has CRC enabled or disabled during the 1525 * device status transaction, which is the first transaction performed 1526 * on control channel. 1527 * 1528 * So CRC validation of first device status transaction response is 1529 * ignored (by calling valid_crc_stub) and is done later during decoding 1530 * if device has CRC enabled. 1531 * Now that qaic knows whether device has CRC enabled or not it acts 1532 * accordingly. 1533 */ 1534 qdev->gen_crc = gen_crc; 1535 qdev->valid_crc = valid_crc_stub; 1536 1537 return mhi_prepare_for_transfer(qdev->cntl_ch); 1538 } 1539 1540 void qaic_control_close(struct qaic_device *qdev) 1541 { 1542 mhi_unprepare_from_transfer(qdev->cntl_ch); 1543 } 1544 1545 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr) 1546 { 1547 struct wire_trans_terminate_to_dev *trans; 1548 struct wrapper_list *wrappers; 1549 struct wrapper_msg *wrapper; 1550 struct wire_msg *msg; 1551 struct wire_msg *rsp; 1552 1553 wrappers = alloc_wrapper_list(); 1554 if (!wrappers) 1555 return; 1556 1557 wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans)); 1558 if (!wrapper) 1559 return; 1560 1561 msg = &wrapper->msg; 1562 1563 trans = (struct wire_trans_terminate_to_dev *)msg->data; 1564 1565 trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV); 1566 trans->hdr.len = cpu_to_le32(sizeof(*trans)); 1567 trans->handle = cpu_to_le32(usr->handle); 1568 1569 mutex_lock(&qdev->cntl_mutex); 1570 wrapper->len = sizeof(msg->hdr) + sizeof(*trans); 1571 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; 1572 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); 1573 msg->hdr.len = cpu_to_le32(wrapper->len); 1574 msg->hdr.count = cpu_to_le32(1); 1575 msg->hdr.handle = cpu_to_le32(usr->handle); 1576 msg->hdr.padding = cpu_to_le32(0); 1577 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); 1578 1579 /* 1580 * msg_xfer releases the mutex 1581 * We don't care about the return of msg_xfer since we will not do 1582 * anything different based on what happens. 1583 * We ignore pending signals since one will be set if the user is 1584 * killed, and we need give the device a chance to cleanup, otherwise 1585 * DMA may still be in progress when we return. 1586 */ 1587 rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true); 1588 if (!IS_ERR(rsp)) 1589 kfree(rsp); 1590 free_wrapper_from_list(wrappers, wrapper); 1591 } 1592 1593 void wake_all_cntl(struct qaic_device *qdev) 1594 { 1595 struct xfer_queue_elem *elem; 1596 struct xfer_queue_elem *i; 1597 1598 mutex_lock(&qdev->cntl_mutex); 1599 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { 1600 list_del_init(&elem->list); 1601 complete_all(&elem->xfer_done); 1602 } 1603 mutex_unlock(&qdev->cntl_mutex); 1604 } 1605