1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bits.h> 8 #include <linux/completion.h> 9 #include <linux/delay.h> 10 #include <linux/dma-buf.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/interrupt.h> 13 #include <linux/kref.h> 14 #include <linux/list.h> 15 #include <linux/math64.h> 16 #include <linux/mm.h> 17 #include <linux/moduleparam.h> 18 #include <linux/scatterlist.h> 19 #include <linux/spinlock.h> 20 #include <linux/srcu.h> 21 #include <linux/types.h> 22 #include <linux/uaccess.h> 23 #include <linux/wait.h> 24 #include <drm/drm_file.h> 25 #include <drm/drm_gem.h> 26 #include <drm/drm_prime.h> 27 #include <drm/drm_print.h> 28 #include <uapi/drm/qaic_accel.h> 29 30 #include "qaic.h" 31 32 #define SEM_VAL_MASK GENMASK_ULL(11, 0) 33 #define SEM_INDEX_MASK GENMASK_ULL(4, 0) 34 #define BULK_XFER BIT(3) 35 #define GEN_COMPLETION BIT(4) 36 #define INBOUND_XFER 1 37 #define OUTBOUND_XFER 2 38 #define REQHP_OFF 0x0 /* we read this */ 39 #define REQTP_OFF 0x4 /* we write this */ 40 #define RSPHP_OFF 0x8 /* we write this */ 41 #define RSPTP_OFF 0xc /* we read this */ 42 43 #define ENCODE_SEM(val, index, sync, cmd, flags) \ 44 ({ \ 45 FIELD_PREP(GENMASK(11, 0), (val)) | \ 46 FIELD_PREP(GENMASK(20, 16), (index)) | \ 47 FIELD_PREP(BIT(22), (sync)) | \ 48 FIELD_PREP(GENMASK(26, 24), (cmd)) | \ 49 FIELD_PREP(GENMASK(30, 29), (flags)) | \ 50 FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \ 51 }) 52 #define NUM_EVENTS 128 53 #define NUM_DELAYS 10 54 #define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size()) 55 56 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */ 57 module_param(wait_exec_default_timeout_ms, uint, 0600); 58 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO"); 59 60 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */ 61 module_param(datapath_poll_interval_us, uint, 0600); 62 MODULE_PARM_DESC(datapath_poll_interval_us, 63 "Amount of time to sleep between activity when datapath polling is enabled"); 64 65 struct dbc_req { 66 /* 67 * A request ID is assigned to each memory handle going in DMA queue. 68 * As a single memory handle can enqueue multiple elements in DMA queue 69 * all of them will have the same request ID. 70 */ 71 __le16 req_id; 72 /* Future use */ 73 __u8 seq_id; 74 /* 75 * Special encoded variable 76 * 7 0 - Do not force to generate MSI after DMA is completed 77 * 1 - Force to generate MSI after DMA is completed 78 * 6:5 Reserved 79 * 4 1 - Generate completion element in the response queue 80 * 0 - No Completion Code 81 * 3 0 - DMA request is a Link list transfer 82 * 1 - DMA request is a Bulk transfer 83 * 2 Reserved 84 * 1:0 00 - No DMA transfer involved 85 * 01 - DMA transfer is part of inbound transfer 86 * 10 - DMA transfer has outbound transfer 87 * 11 - NA 88 */ 89 __u8 cmd; 90 __le32 resv; 91 /* Source address for the transfer */ 92 __le64 src_addr; 93 /* Destination address for the transfer */ 94 __le64 dest_addr; 95 /* Length of transfer request */ 96 __le32 len; 97 __le32 resv2; 98 /* Doorbell address */ 99 __le64 db_addr; 100 /* 101 * Special encoded variable 102 * 7 1 - Doorbell(db) write 103 * 0 - No doorbell write 104 * 6:2 Reserved 105 * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary 106 * 01 - 16 bit access, db address must be aligned to 16bit-boundary 107 * 10 - 8 bit access, db address must be aligned to 8bit-boundary 108 * 11 - Reserved 109 */ 110 __u8 db_len; 111 __u8 resv3; 112 __le16 resv4; 113 /* 32 bit data written to doorbell address */ 114 __le32 db_data; 115 /* 116 * Special encoded variable 117 * All the fields of sem_cmdX are passed from user and all are ORed 118 * together to form sem_cmd. 119 * 0:11 Semaphore value 120 * 15:12 Reserved 121 * 20:16 Semaphore index 122 * 21 Reserved 123 * 22 Semaphore Sync 124 * 23 Reserved 125 * 26:24 Semaphore command 126 * 28:27 Reserved 127 * 29 Semaphore DMA out bound sync fence 128 * 30 Semaphore DMA in bound sync fence 129 * 31 Enable semaphore command 130 */ 131 __le32 sem_cmd0; 132 __le32 sem_cmd1; 133 __le32 sem_cmd2; 134 __le32 sem_cmd3; 135 } __packed; 136 137 struct dbc_rsp { 138 /* Request ID of the memory handle whose DMA transaction is completed */ 139 __le16 req_id; 140 /* Status of the DMA transaction. 0 : Success otherwise failure */ 141 __le16 status; 142 } __packed; 143 144 static inline bool bo_queued(struct qaic_bo *bo) 145 { 146 return !list_empty(&bo->xfer_list); 147 } 148 149 inline int get_dbc_req_elem_size(void) 150 { 151 return sizeof(struct dbc_req); 152 } 153 154 inline int get_dbc_rsp_elem_size(void) 155 { 156 return sizeof(struct dbc_rsp); 157 } 158 159 static void free_slice(struct kref *kref) 160 { 161 struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); 162 163 slice->bo->total_slice_nents -= slice->nents; 164 list_del(&slice->slice); 165 drm_gem_object_put(&slice->bo->base); 166 sg_free_table(slice->sgt); 167 kfree(slice->sgt); 168 kfree(slice->reqs); 169 kfree(slice); 170 } 171 172 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, 173 struct sg_table *sgt_in, u64 size, u64 offset) 174 { 175 struct scatterlist *sg, *sgn, *sgf, *sgl; 176 unsigned int len, nents, offf, offl; 177 struct sg_table *sgt; 178 size_t total_len; 179 int ret, j; 180 181 /* find out number of relevant nents needed for this mem */ 182 total_len = 0; 183 sgf = NULL; 184 sgl = NULL; 185 nents = 0; 186 offf = 0; 187 offl = 0; 188 189 size = size ? size : PAGE_SIZE; 190 for_each_sgtable_dma_sg(sgt_in, sg, j) { 191 len = sg_dma_len(sg); 192 193 if (!len) 194 continue; 195 if (offset >= total_len && offset < total_len + len) { 196 sgf = sg; 197 offf = offset - total_len; 198 } 199 if (sgf) 200 nents++; 201 if (offset + size >= total_len && 202 offset + size <= total_len + len) { 203 sgl = sg; 204 offl = offset + size - total_len; 205 break; 206 } 207 total_len += len; 208 } 209 210 if (!sgf || !sgl) { 211 ret = -EINVAL; 212 goto out; 213 } 214 215 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 216 if (!sgt) { 217 ret = -ENOMEM; 218 goto out; 219 } 220 221 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 222 if (ret) 223 goto free_sgt; 224 225 /* copy relevant sg node and fix page and length */ 226 sgn = sgf; 227 for_each_sgtable_dma_sg(sgt, sg, j) { 228 memcpy(sg, sgn, sizeof(*sg)); 229 if (sgn == sgf) { 230 sg_dma_address(sg) += offf; 231 sg_dma_len(sg) -= offf; 232 sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf); 233 } else { 234 offf = 0; 235 } 236 if (sgn == sgl) { 237 sg_dma_len(sg) = offl - offf; 238 sg_set_page(sg, sg_page(sgn), offl - offf, offf); 239 sg_mark_end(sg); 240 break; 241 } 242 sgn = sg_next(sgn); 243 } 244 245 *sgt_out = sgt; 246 return ret; 247 248 free_sgt: 249 kfree(sgt); 250 out: 251 *sgt_out = NULL; 252 return ret; 253 } 254 255 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, 256 struct qaic_attach_slice_entry *req) 257 { 258 __le64 db_addr = cpu_to_le64(req->db_addr); 259 __le32 db_data = cpu_to_le32(req->db_data); 260 struct scatterlist *sg; 261 __u8 cmd = BULK_XFER; 262 int presync_sem; 263 u64 dev_addr; 264 __u8 db_len; 265 int i; 266 267 if (!slice->no_xfer) 268 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); 269 270 if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8)) 271 return -EINVAL; 272 273 presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync; 274 if (presync_sem > 1) 275 return -EINVAL; 276 277 presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 | 278 req->sem2.presync << 2 | req->sem3.presync << 3; 279 280 switch (req->db_len) { 281 case 32: 282 db_len = BIT(7); 283 break; 284 case 16: 285 db_len = BIT(7) | 1; 286 break; 287 case 8: 288 db_len = BIT(7) | 2; 289 break; 290 case 0: 291 db_len = 0; /* doorbell is not active for this command */ 292 break; 293 default: 294 return -EINVAL; /* should never hit this */ 295 } 296 297 /* 298 * When we end up splitting up a single request (ie a buf slice) into 299 * multiple DMA requests, we have to manage the sync data carefully. 300 * There can only be one presync sem. That needs to be on every xfer 301 * so that the DMA engine doesn't transfer data before the receiver is 302 * ready. We only do the doorbell and postsync sems after the xfer. 303 * To guarantee previous xfers for the request are complete, we use a 304 * fence. 305 */ 306 dev_addr = req->dev_addr; 307 for_each_sgtable_dma_sg(slice->sgt, sg, i) { 308 slice->reqs[i].cmd = cmd; 309 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 310 sg_dma_address(sg) : dev_addr); 311 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 312 dev_addr : sg_dma_address(sg)); 313 /* 314 * sg_dma_len(sg) returns size of a DMA segment, maximum DMA 315 * segment size is set to UINT_MAX by qaic and hence return 316 * values of sg_dma_len(sg) can never exceed u32 range. So, 317 * by down sizing we are not corrupting the value. 318 */ 319 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); 320 switch (presync_sem) { 321 case BIT(0): 322 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, 323 req->sem0.index, 324 req->sem0.presync, 325 req->sem0.cmd, 326 req->sem0.flags)); 327 break; 328 case BIT(1): 329 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, 330 req->sem1.index, 331 req->sem1.presync, 332 req->sem1.cmd, 333 req->sem1.flags)); 334 break; 335 case BIT(2): 336 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, 337 req->sem2.index, 338 req->sem2.presync, 339 req->sem2.cmd, 340 req->sem2.flags)); 341 break; 342 case BIT(3): 343 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, 344 req->sem3.index, 345 req->sem3.presync, 346 req->sem3.cmd, 347 req->sem3.flags)); 348 break; 349 } 350 dev_addr += sg_dma_len(sg); 351 } 352 /* add post transfer stuff to last segment */ 353 i--; 354 slice->reqs[i].cmd |= GEN_COMPLETION; 355 slice->reqs[i].db_addr = db_addr; 356 slice->reqs[i].db_len = db_len; 357 slice->reqs[i].db_data = db_data; 358 /* 359 * Add a fence if we have more than one request going to the hardware 360 * representing the entirety of the user request, and the user request 361 * has no presync condition. 362 * Fences are expensive, so we try to avoid them. We rely on the 363 * hardware behavior to avoid needing one when there is a presync 364 * condition. When a presync exists, all requests for that same 365 * presync will be queued into a fifo. Thus, since we queue the 366 * post xfer activity only on the last request we queue, the hardware 367 * will ensure that the last queued request is processed last, thus 368 * making sure the post xfer activity happens at the right time without 369 * a fence. 370 */ 371 if (i && !presync_sem) 372 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? 373 QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE); 374 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, 375 req->sem0.presync, req->sem0.cmd, 376 req->sem0.flags)); 377 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, 378 req->sem1.presync, req->sem1.cmd, 379 req->sem1.flags)); 380 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, 381 req->sem2.presync, req->sem2.cmd, 382 req->sem2.flags)); 383 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, 384 req->sem3.presync, req->sem3.cmd, 385 req->sem3.flags)); 386 387 return 0; 388 } 389 390 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, 391 struct qaic_attach_slice_entry *slice_ent) 392 { 393 struct sg_table *sgt = NULL; 394 struct bo_slice *slice; 395 int ret; 396 397 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset); 398 if (ret) 399 goto out; 400 401 slice = kmalloc(sizeof(*slice), GFP_KERNEL); 402 if (!slice) { 403 ret = -ENOMEM; 404 goto free_sgt; 405 } 406 407 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); 408 if (!slice->reqs) { 409 ret = -ENOMEM; 410 goto free_slice; 411 } 412 413 slice->no_xfer = !slice_ent->size; 414 slice->sgt = sgt; 415 slice->nents = sgt->nents; 416 slice->dir = bo->dir; 417 slice->bo = bo; 418 slice->size = slice_ent->size; 419 slice->offset = slice_ent->offset; 420 421 ret = encode_reqs(qdev, slice, slice_ent); 422 if (ret) 423 goto free_req; 424 425 bo->total_slice_nents += sgt->nents; 426 kref_init(&slice->ref_count); 427 drm_gem_object_get(&bo->base); 428 list_add_tail(&slice->slice, &bo->slices); 429 430 return 0; 431 432 free_req: 433 kfree(slice->reqs); 434 free_slice: 435 kfree(slice); 436 free_sgt: 437 sg_free_table(sgt); 438 kfree(sgt); 439 out: 440 return ret; 441 } 442 443 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) 444 { 445 struct scatterlist *sg; 446 struct sg_table *sgt; 447 struct page **pages; 448 int *pages_order; 449 int buf_extra; 450 int max_order; 451 int nr_pages; 452 int ret = 0; 453 int i, j, k; 454 int order; 455 456 if (size) { 457 nr_pages = DIV_ROUND_UP(size, PAGE_SIZE); 458 /* 459 * calculate how much extra we are going to allocate, to remove 460 * later 461 */ 462 buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE; 463 max_order = min(MAX_PAGE_ORDER, get_order(size)); 464 } else { 465 /* allocate a single page for book keeping */ 466 nr_pages = 1; 467 buf_extra = 0; 468 max_order = 0; 469 } 470 471 pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL); 472 if (!pages) { 473 ret = -ENOMEM; 474 goto out; 475 } 476 pages_order = (void *)pages + sizeof(*pages) * nr_pages; 477 478 /* 479 * Allocate requested memory using alloc_pages. It is possible to allocate 480 * the requested memory in multiple chunks by calling alloc_pages 481 * multiple times. Use SG table to handle multiple allocated pages. 482 */ 483 i = 0; 484 while (nr_pages > 0) { 485 order = min(get_order(nr_pages * PAGE_SIZE), max_order); 486 while (1) { 487 pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER | 488 __GFP_NOWARN | __GFP_ZERO | 489 (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL), 490 order); 491 if (pages[i]) 492 break; 493 if (!order--) { 494 ret = -ENOMEM; 495 goto free_partial_alloc; 496 } 497 } 498 499 max_order = order; 500 pages_order[i] = order; 501 502 nr_pages -= 1 << order; 503 if (nr_pages <= 0) 504 /* account for over allocation */ 505 buf_extra += abs(nr_pages) * PAGE_SIZE; 506 i++; 507 } 508 509 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 510 if (!sgt) { 511 ret = -ENOMEM; 512 goto free_partial_alloc; 513 } 514 515 if (sg_alloc_table(sgt, i, GFP_KERNEL)) { 516 ret = -ENOMEM; 517 goto free_sgt; 518 } 519 520 /* Populate the SG table with the allocated memory pages */ 521 sg = sgt->sgl; 522 for (k = 0; k < i; k++, sg = sg_next(sg)) { 523 /* Last entry requires special handling */ 524 if (k < i - 1) { 525 sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0); 526 } else { 527 sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0); 528 sg_mark_end(sg); 529 } 530 } 531 532 kvfree(pages); 533 *sgt_out = sgt; 534 return ret; 535 536 free_sgt: 537 kfree(sgt); 538 free_partial_alloc: 539 for (j = 0; j < i; j++) 540 __free_pages(pages[j], pages_order[j]); 541 kvfree(pages); 542 out: 543 *sgt_out = NULL; 544 return ret; 545 } 546 547 static bool invalid_sem(struct qaic_sem *sem) 548 { 549 if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK || 550 !(sem->presync == 0 || sem->presync == 1) || sem->pad || 551 sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) || 552 sem->cmd > QAIC_SEM_WAIT_GT_0) 553 return true; 554 return false; 555 } 556 557 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, 558 u32 count, u64 total_size) 559 { 560 u64 total; 561 int i; 562 563 for (i = 0; i < count; i++) { 564 if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 || 565 slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) || 566 invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) || 567 invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3)) 568 return -EINVAL; 569 570 if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) || 571 total > total_size) 572 return -EINVAL; 573 } 574 575 return 0; 576 } 577 578 static void qaic_free_sgt(struct sg_table *sgt) 579 { 580 struct scatterlist *sg; 581 582 if (!sgt) 583 return; 584 585 for (sg = sgt->sgl; sg; sg = sg_next(sg)) 586 if (sg_page(sg)) 587 __free_pages(sg_page(sg), get_order(sg->length)); 588 sg_free_table(sgt); 589 kfree(sgt); 590 } 591 592 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent, 593 const struct drm_gem_object *obj) 594 { 595 struct qaic_bo *bo = to_qaic_bo(obj); 596 597 drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir); 598 } 599 600 static const struct vm_operations_struct drm_vm_ops = { 601 .open = drm_gem_vm_open, 602 .close = drm_gem_vm_close, 603 }; 604 605 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 606 { 607 struct qaic_bo *bo = to_qaic_bo(obj); 608 unsigned long offset = 0; 609 struct scatterlist *sg; 610 int ret = 0; 611 612 if (drm_gem_is_imported(obj)) 613 return -EINVAL; 614 615 for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) { 616 if (sg_page(sg)) { 617 ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)), 618 sg->length, vma->vm_page_prot); 619 if (ret) 620 goto out; 621 offset += sg->length; 622 } 623 } 624 625 out: 626 return ret; 627 } 628 629 static void qaic_free_object(struct drm_gem_object *obj) 630 { 631 struct qaic_bo *bo = to_qaic_bo(obj); 632 633 if (drm_gem_is_imported(obj)) { 634 /* DMABUF/PRIME Path */ 635 drm_prime_gem_destroy(obj, NULL); 636 } else { 637 /* Private buffer allocation path */ 638 qaic_free_sgt(bo->sgt); 639 } 640 641 mutex_destroy(&bo->lock); 642 drm_gem_object_release(obj); 643 kfree(bo); 644 } 645 646 static const struct drm_gem_object_funcs qaic_gem_funcs = { 647 .free = qaic_free_object, 648 .print_info = qaic_gem_print_info, 649 .mmap = qaic_gem_object_mmap, 650 .vm_ops = &drm_vm_ops, 651 }; 652 653 static void qaic_init_bo(struct qaic_bo *bo, bool reinit) 654 { 655 if (reinit) { 656 bo->sliced = false; 657 reinit_completion(&bo->xfer_done); 658 } else { 659 mutex_init(&bo->lock); 660 init_completion(&bo->xfer_done); 661 } 662 complete_all(&bo->xfer_done); 663 INIT_LIST_HEAD(&bo->slices); 664 INIT_LIST_HEAD(&bo->xfer_list); 665 } 666 667 static struct qaic_bo *qaic_alloc_init_bo(void) 668 { 669 struct qaic_bo *bo; 670 671 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 672 if (!bo) 673 return ERR_PTR(-ENOMEM); 674 675 qaic_init_bo(bo, false); 676 677 return bo; 678 } 679 680 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 681 { 682 struct qaic_create_bo *args = data; 683 int usr_rcu_id, qdev_rcu_id; 684 struct drm_gem_object *obj; 685 struct qaic_device *qdev; 686 struct qaic_user *usr; 687 struct qaic_bo *bo; 688 size_t size; 689 int ret; 690 691 if (args->pad) 692 return -EINVAL; 693 694 size = PAGE_ALIGN(args->size); 695 if (size == 0) 696 return -EINVAL; 697 698 usr = file_priv->driver_priv; 699 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 700 if (!usr->qddev) { 701 ret = -ENODEV; 702 goto unlock_usr_srcu; 703 } 704 705 qdev = usr->qddev->qdev; 706 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 707 if (qdev->dev_state != QAIC_ONLINE) { 708 ret = -ENODEV; 709 goto unlock_dev_srcu; 710 } 711 712 bo = qaic_alloc_init_bo(); 713 if (IS_ERR(bo)) { 714 ret = PTR_ERR(bo); 715 goto unlock_dev_srcu; 716 } 717 obj = &bo->base; 718 719 drm_gem_private_object_init(dev, obj, size); 720 721 obj->funcs = &qaic_gem_funcs; 722 ret = create_sgt(qdev, &bo->sgt, size); 723 if (ret) 724 goto free_bo; 725 726 ret = drm_gem_create_mmap_offset(obj); 727 if (ret) 728 goto free_bo; 729 730 ret = drm_gem_handle_create(file_priv, obj, &args->handle); 731 if (ret) 732 goto free_bo; 733 734 drm_gem_object_put(obj); 735 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 736 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 737 738 return 0; 739 740 free_bo: 741 drm_gem_object_put(obj); 742 unlock_dev_srcu: 743 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 744 unlock_usr_srcu: 745 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 746 return ret; 747 } 748 749 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 750 { 751 struct qaic_mmap_bo *args = data; 752 int usr_rcu_id, qdev_rcu_id; 753 struct drm_gem_object *obj; 754 struct qaic_device *qdev; 755 struct qaic_user *usr; 756 int ret = 0; 757 758 usr = file_priv->driver_priv; 759 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 760 if (!usr->qddev) { 761 ret = -ENODEV; 762 goto unlock_usr_srcu; 763 } 764 765 qdev = usr->qddev->qdev; 766 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 767 if (qdev->dev_state != QAIC_ONLINE) { 768 ret = -ENODEV; 769 goto unlock_dev_srcu; 770 } 771 772 obj = drm_gem_object_lookup(file_priv, args->handle); 773 if (!obj) { 774 ret = -ENOENT; 775 goto unlock_dev_srcu; 776 } 777 778 args->offset = drm_vma_node_offset_addr(&obj->vma_node); 779 780 drm_gem_object_put(obj); 781 782 unlock_dev_srcu: 783 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 784 unlock_usr_srcu: 785 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 786 return ret; 787 } 788 789 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) 790 { 791 struct dma_buf_attachment *attach; 792 struct drm_gem_object *obj; 793 struct qaic_bo *bo; 794 int ret; 795 796 bo = qaic_alloc_init_bo(); 797 if (IS_ERR(bo)) { 798 ret = PTR_ERR(bo); 799 goto out; 800 } 801 802 obj = &bo->base; 803 get_dma_buf(dma_buf); 804 805 attach = dma_buf_attach(dma_buf, dev->dev); 806 if (IS_ERR(attach)) { 807 ret = PTR_ERR(attach); 808 goto attach_fail; 809 } 810 811 if (!attach->dmabuf->size) { 812 ret = -EINVAL; 813 goto size_align_fail; 814 } 815 816 drm_gem_private_object_init(dev, obj, attach->dmabuf->size); 817 /* 818 * skipping dma_buf_map_attachment() as we do not know the direction 819 * just yet. Once the direction is known in the subsequent IOCTL to 820 * attach slicing, we can do it then. 821 */ 822 823 obj->funcs = &qaic_gem_funcs; 824 obj->import_attach = attach; 825 obj->resv = dma_buf->resv; 826 827 return obj; 828 829 size_align_fail: 830 dma_buf_detach(dma_buf, attach); 831 attach_fail: 832 dma_buf_put(dma_buf); 833 kfree(bo); 834 out: 835 return ERR_PTR(ret); 836 } 837 838 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr) 839 { 840 struct drm_gem_object *obj = &bo->base; 841 struct sg_table *sgt; 842 int ret; 843 844 sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); 845 if (IS_ERR(sgt)) { 846 ret = PTR_ERR(sgt); 847 return ret; 848 } 849 850 bo->sgt = sgt; 851 852 return 0; 853 } 854 855 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, 856 struct qaic_attach_slice_hdr *hdr) 857 { 858 int ret; 859 860 ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0); 861 if (ret) 862 return -EFAULT; 863 864 return 0; 865 } 866 867 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, 868 struct qaic_attach_slice_hdr *hdr) 869 { 870 int ret; 871 872 if (drm_gem_is_imported(&bo->base)) 873 ret = qaic_prepare_import_bo(bo, hdr); 874 else 875 ret = qaic_prepare_export_bo(qdev, bo, hdr); 876 bo->dir = hdr->dir; 877 bo->dbc = &qdev->dbc[hdr->dbc_id]; 878 bo->nr_slice = hdr->count; 879 880 return ret; 881 } 882 883 static void qaic_unprepare_import_bo(struct qaic_bo *bo) 884 { 885 dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir); 886 bo->sgt = NULL; 887 } 888 889 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo) 890 { 891 dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0); 892 } 893 894 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) 895 { 896 if (drm_gem_is_imported(&bo->base)) 897 qaic_unprepare_import_bo(bo); 898 else 899 qaic_unprepare_export_bo(qdev, bo); 900 901 bo->dir = 0; 902 bo->dbc = NULL; 903 bo->nr_slice = 0; 904 } 905 906 static void qaic_free_slices_bo(struct qaic_bo *bo) 907 { 908 struct bo_slice *slice, *temp; 909 910 list_for_each_entry_safe(slice, temp, &bo->slices, slice) 911 kref_put(&slice->ref_count, free_slice); 912 if (WARN_ON_ONCE(bo->total_slice_nents != 0)) 913 bo->total_slice_nents = 0; 914 bo->nr_slice = 0; 915 } 916 917 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, 918 struct qaic_attach_slice_hdr *hdr, 919 struct qaic_attach_slice_entry *slice_ent) 920 { 921 int ret, i; 922 923 for (i = 0; i < hdr->count; i++) { 924 ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]); 925 if (ret) { 926 qaic_free_slices_bo(bo); 927 return ret; 928 } 929 } 930 931 if (bo->total_slice_nents > bo->dbc->nelem) { 932 qaic_free_slices_bo(bo); 933 return -ENOSPC; 934 } 935 936 return 0; 937 } 938 939 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 940 { 941 struct qaic_attach_slice_entry *slice_ent; 942 struct qaic_attach_slice *args = data; 943 int rcu_id, usr_rcu_id, qdev_rcu_id; 944 struct dma_bridge_chan *dbc; 945 struct drm_gem_object *obj; 946 struct qaic_device *qdev; 947 unsigned long arg_size; 948 struct qaic_user *usr; 949 u8 __user *user_data; 950 struct qaic_bo *bo; 951 int ret; 952 953 if (args->hdr.count == 0) 954 return -EINVAL; 955 956 arg_size = args->hdr.count * sizeof(*slice_ent); 957 if (arg_size / args->hdr.count != sizeof(*slice_ent)) 958 return -EINVAL; 959 960 if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) 961 return -EINVAL; 962 963 if (args->data == 0) 964 return -EINVAL; 965 966 usr = file_priv->driver_priv; 967 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 968 if (!usr->qddev) { 969 ret = -ENODEV; 970 goto unlock_usr_srcu; 971 } 972 973 qdev = usr->qddev->qdev; 974 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 975 if (qdev->dev_state != QAIC_ONLINE) { 976 ret = -ENODEV; 977 goto unlock_dev_srcu; 978 } 979 980 if (args->hdr.dbc_id >= qdev->num_dbc) { 981 ret = -EINVAL; 982 goto unlock_dev_srcu; 983 } 984 985 user_data = u64_to_user_ptr(args->data); 986 987 slice_ent = kzalloc(arg_size, GFP_KERNEL); 988 if (!slice_ent) { 989 ret = -EINVAL; 990 goto unlock_dev_srcu; 991 } 992 993 ret = copy_from_user(slice_ent, user_data, arg_size); 994 if (ret) { 995 ret = -EFAULT; 996 goto free_slice_ent; 997 } 998 999 obj = drm_gem_object_lookup(file_priv, args->hdr.handle); 1000 if (!obj) { 1001 ret = -ENOENT; 1002 goto free_slice_ent; 1003 } 1004 1005 ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size); 1006 if (ret) 1007 goto put_bo; 1008 1009 bo = to_qaic_bo(obj); 1010 ret = mutex_lock_interruptible(&bo->lock); 1011 if (ret) 1012 goto put_bo; 1013 1014 if (bo->sliced) { 1015 ret = -EINVAL; 1016 goto unlock_bo; 1017 } 1018 1019 dbc = &qdev->dbc[args->hdr.dbc_id]; 1020 rcu_id = srcu_read_lock(&dbc->ch_lock); 1021 if (dbc->usr != usr) { 1022 ret = -EINVAL; 1023 goto unlock_ch_srcu; 1024 } 1025 1026 ret = qaic_prepare_bo(qdev, bo, &args->hdr); 1027 if (ret) 1028 goto unlock_ch_srcu; 1029 1030 ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent); 1031 if (ret) 1032 goto unprepare_bo; 1033 1034 if (args->hdr.dir == DMA_TO_DEVICE) 1035 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir); 1036 1037 bo->sliced = true; 1038 list_add_tail(&bo->bo_list, &bo->dbc->bo_lists); 1039 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1040 mutex_unlock(&bo->lock); 1041 kfree(slice_ent); 1042 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1043 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1044 1045 return 0; 1046 1047 unprepare_bo: 1048 qaic_unprepare_bo(qdev, bo); 1049 unlock_ch_srcu: 1050 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1051 unlock_bo: 1052 mutex_unlock(&bo->lock); 1053 put_bo: 1054 drm_gem_object_put(obj); 1055 free_slice_ent: 1056 kfree(slice_ent); 1057 unlock_dev_srcu: 1058 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1059 unlock_usr_srcu: 1060 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1061 return ret; 1062 } 1063 1064 static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size) 1065 { 1066 u32 avail = head - tail - 1; 1067 1068 if (head <= tail) 1069 avail += q_size; 1070 1071 return avail; 1072 } 1073 1074 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, 1075 u32 head, u32 *ptail) 1076 { 1077 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1078 struct dbc_req *reqs = slice->reqs; 1079 u32 tail = *ptail; 1080 u32 avail; 1081 1082 avail = fifo_space_avail(head, tail, dbc->nelem); 1083 if (avail < slice->nents) 1084 return -EAGAIN; 1085 1086 if (tail + slice->nents > dbc->nelem) { 1087 avail = dbc->nelem - tail; 1088 avail = min_t(u32, avail, slice->nents); 1089 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); 1090 reqs += avail; 1091 avail = slice->nents - avail; 1092 if (avail) 1093 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); 1094 } else { 1095 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); 1096 } 1097 1098 *ptail = (tail + slice->nents) % dbc->nelem; 1099 1100 return 0; 1101 } 1102 1103 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, 1104 u64 resize, struct dma_bridge_chan *dbc, u32 head, 1105 u32 *ptail) 1106 { 1107 struct dbc_req *reqs = slice->reqs; 1108 struct dbc_req *last_req; 1109 u32 tail = *ptail; 1110 u64 last_bytes; 1111 u32 first_n; 1112 u32 avail; 1113 1114 avail = fifo_space_avail(head, tail, dbc->nelem); 1115 1116 /* 1117 * After this for loop is complete, first_n represents the index 1118 * of the last DMA request of this slice that needs to be 1119 * transferred after resizing and last_bytes represents DMA size 1120 * of that request. 1121 */ 1122 last_bytes = resize; 1123 for (first_n = 0; first_n < slice->nents; first_n++) 1124 if (last_bytes > le32_to_cpu(reqs[first_n].len)) 1125 last_bytes -= le32_to_cpu(reqs[first_n].len); 1126 else 1127 break; 1128 1129 if (avail < (first_n + 1)) 1130 return -EAGAIN; 1131 1132 if (first_n) { 1133 if (tail + first_n > dbc->nelem) { 1134 avail = dbc->nelem - tail; 1135 avail = min_t(u32, avail, first_n); 1136 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); 1137 last_req = reqs + avail; 1138 avail = first_n - avail; 1139 if (avail) 1140 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); 1141 } else { 1142 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n); 1143 } 1144 } 1145 1146 /* 1147 * Copy over the last entry. Here we need to adjust len to the left over 1148 * size, and set src and dst to the entry it is copied to. 1149 */ 1150 last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem); 1151 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); 1152 1153 /* 1154 * last_bytes holds size of a DMA segment, maximum DMA segment size is 1155 * set to UINT_MAX by qaic and hence last_bytes can never exceed u32 1156 * range. So, by down sizing we are not corrupting the value. 1157 */ 1158 last_req->len = cpu_to_le32((u32)last_bytes); 1159 last_req->src_addr = reqs[first_n].src_addr; 1160 last_req->dest_addr = reqs[first_n].dest_addr; 1161 if (!last_bytes) 1162 /* Disable DMA transfer */ 1163 last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd; 1164 1165 *ptail = (tail + first_n + 1) % dbc->nelem; 1166 1167 return 0; 1168 } 1169 1170 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv, 1171 struct qaic_execute_entry *exec, unsigned int count, 1172 bool is_partial, struct dma_bridge_chan *dbc, u32 head, 1173 u32 *tail) 1174 { 1175 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1176 struct drm_gem_object *obj; 1177 struct bo_slice *slice; 1178 unsigned long flags; 1179 struct qaic_bo *bo; 1180 int i, j; 1181 int ret; 1182 1183 for (i = 0; i < count; i++) { 1184 /* 1185 * ref count will be decremented when the transfer of this 1186 * buffer is complete. It is inside dbc_irq_threaded_fn(). 1187 */ 1188 obj = drm_gem_object_lookup(file_priv, 1189 is_partial ? pexec[i].handle : exec[i].handle); 1190 if (!obj) { 1191 ret = -ENOENT; 1192 goto failed_to_send_bo; 1193 } 1194 1195 bo = to_qaic_bo(obj); 1196 ret = mutex_lock_interruptible(&bo->lock); 1197 if (ret) 1198 goto failed_to_send_bo; 1199 1200 if (!bo->sliced) { 1201 ret = -EINVAL; 1202 goto unlock_bo; 1203 } 1204 1205 if (is_partial && pexec[i].resize > bo->base.size) { 1206 ret = -EINVAL; 1207 goto unlock_bo; 1208 } 1209 1210 spin_lock_irqsave(&dbc->xfer_lock, flags); 1211 if (bo_queued(bo)) { 1212 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1213 ret = -EINVAL; 1214 goto unlock_bo; 1215 } 1216 1217 bo->req_id = dbc->next_req_id++; 1218 1219 list_for_each_entry(slice, &bo->slices, slice) { 1220 for (j = 0; j < slice->nents; j++) 1221 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); 1222 1223 if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) 1224 /* Configure the slice for no DMA transfer */ 1225 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); 1226 else if (is_partial && pexec[i].resize < slice->offset + slice->size) 1227 /* Configure the slice to be partially DMA transferred */ 1228 ret = copy_partial_exec_reqs(qdev, slice, 1229 pexec[i].resize - slice->offset, dbc, 1230 head, tail); 1231 else 1232 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); 1233 if (ret) { 1234 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1235 goto unlock_bo; 1236 } 1237 } 1238 reinit_completion(&bo->xfer_done); 1239 list_add_tail(&bo->xfer_list, &dbc->xfer_list); 1240 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1241 dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir); 1242 mutex_unlock(&bo->lock); 1243 } 1244 1245 return 0; 1246 1247 unlock_bo: 1248 mutex_unlock(&bo->lock); 1249 failed_to_send_bo: 1250 if (likely(obj)) 1251 drm_gem_object_put(obj); 1252 for (j = 0; j < i; j++) { 1253 spin_lock_irqsave(&dbc->xfer_lock, flags); 1254 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); 1255 obj = &bo->base; 1256 list_del_init(&bo->xfer_list); 1257 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1258 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1259 drm_gem_object_put(obj); 1260 } 1261 return ret; 1262 } 1263 1264 static void update_profiling_data(struct drm_file *file_priv, 1265 struct qaic_execute_entry *exec, unsigned int count, 1266 bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level) 1267 { 1268 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1269 struct drm_gem_object *obj; 1270 struct qaic_bo *bo; 1271 int i; 1272 1273 for (i = 0; i < count; i++) { 1274 /* 1275 * Since we already committed the BO to hardware, the only way 1276 * this should fail is a pending signal. We can't cancel the 1277 * submit to hardware, so we have to just skip the profiling 1278 * data. In case the signal is not fatal to the process, we 1279 * return success so that the user doesn't try to resubmit. 1280 */ 1281 obj = drm_gem_object_lookup(file_priv, 1282 is_partial ? pexec[i].handle : exec[i].handle); 1283 if (!obj) 1284 break; 1285 bo = to_qaic_bo(obj); 1286 bo->perf_stats.req_received_ts = received_ts; 1287 bo->perf_stats.req_submit_ts = submit_ts; 1288 bo->perf_stats.queue_level_before = queue_level; 1289 queue_level += bo->total_slice_nents; 1290 drm_gem_object_put(obj); 1291 } 1292 } 1293 1294 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, 1295 bool is_partial) 1296 { 1297 struct qaic_execute *args = data; 1298 struct qaic_execute_entry *exec; 1299 struct dma_bridge_chan *dbc; 1300 int usr_rcu_id, qdev_rcu_id; 1301 struct qaic_device *qdev; 1302 struct qaic_user *usr; 1303 u8 __user *user_data; 1304 unsigned long n; 1305 u64 received_ts; 1306 u32 queue_level; 1307 u64 submit_ts; 1308 int rcu_id; 1309 u32 head; 1310 u32 tail; 1311 u64 size; 1312 int ret; 1313 1314 received_ts = ktime_get_ns(); 1315 1316 size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); 1317 n = (unsigned long)size * args->hdr.count; 1318 if (args->hdr.count == 0 || n / args->hdr.count != size) 1319 return -EINVAL; 1320 1321 user_data = u64_to_user_ptr(args->data); 1322 1323 exec = kcalloc(args->hdr.count, size, GFP_KERNEL); 1324 if (!exec) 1325 return -ENOMEM; 1326 1327 if (copy_from_user(exec, user_data, n)) { 1328 ret = -EFAULT; 1329 goto free_exec; 1330 } 1331 1332 usr = file_priv->driver_priv; 1333 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1334 if (!usr->qddev) { 1335 ret = -ENODEV; 1336 goto unlock_usr_srcu; 1337 } 1338 1339 qdev = usr->qddev->qdev; 1340 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1341 if (qdev->dev_state != QAIC_ONLINE) { 1342 ret = -ENODEV; 1343 goto unlock_dev_srcu; 1344 } 1345 1346 if (args->hdr.dbc_id >= qdev->num_dbc) { 1347 ret = -EINVAL; 1348 goto unlock_dev_srcu; 1349 } 1350 1351 dbc = &qdev->dbc[args->hdr.dbc_id]; 1352 1353 rcu_id = srcu_read_lock(&dbc->ch_lock); 1354 if (!dbc->usr || dbc->usr->handle != usr->handle) { 1355 ret = -EPERM; 1356 goto release_ch_rcu; 1357 } 1358 1359 head = readl(dbc->dbc_base + REQHP_OFF); 1360 tail = readl(dbc->dbc_base + REQTP_OFF); 1361 1362 if (head == U32_MAX || tail == U32_MAX) { 1363 /* PCI link error */ 1364 ret = -ENODEV; 1365 goto release_ch_rcu; 1366 } 1367 1368 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); 1369 1370 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, 1371 head, &tail); 1372 if (ret) 1373 goto release_ch_rcu; 1374 1375 /* Finalize commit to hardware */ 1376 submit_ts = ktime_get_ns(); 1377 writel(tail, dbc->dbc_base + REQTP_OFF); 1378 1379 update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, 1380 submit_ts, queue_level); 1381 1382 if (datapath_polling) 1383 schedule_work(&dbc->poll_work); 1384 1385 release_ch_rcu: 1386 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1387 unlock_dev_srcu: 1388 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1389 unlock_usr_srcu: 1390 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1391 free_exec: 1392 kfree(exec); 1393 return ret; 1394 } 1395 1396 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1397 { 1398 return __qaic_execute_bo_ioctl(dev, data, file_priv, false); 1399 } 1400 1401 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1402 { 1403 return __qaic_execute_bo_ioctl(dev, data, file_priv, true); 1404 } 1405 1406 /* 1407 * Our interrupt handling is a bit more complicated than a simple ideal, but 1408 * sadly necessary. 1409 * 1410 * Each dbc has a completion queue. Entries in the queue correspond to DMA 1411 * requests which the device has processed. The hardware already has a built 1412 * in irq mitigation. When the device puts an entry into the queue, it will 1413 * only trigger an interrupt if the queue was empty. Therefore, when adding 1414 * the Nth event to a non-empty queue, the hardware doesn't trigger an 1415 * interrupt. This means the host doesn't get additional interrupts signaling 1416 * the same thing - the queue has something to process. 1417 * This behavior can be overridden in the DMA request. 1418 * This means that when the host receives an interrupt, it is required to 1419 * drain the queue. 1420 * 1421 * This behavior is what NAPI attempts to accomplish, although we can't use 1422 * NAPI as we don't have a netdev. We use threaded irqs instead. 1423 * 1424 * However, there is a situation where the host drains the queue fast enough 1425 * that every event causes an interrupt. Typically this is not a problem as 1426 * the rate of events would be low. However, that is not the case with 1427 * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of 1428 * lprnet, the host receives roughly 80k interrupts per second from the device 1429 * (per /proc/interrupts). While NAPI documentation indicates the host should 1430 * just chug along, sadly that behavior causes instability in some hosts. 1431 * 1432 * Therefore, we implement an interrupt disable scheme similar to NAPI. The 1433 * key difference is that we will delay after draining the queue for a small 1434 * time to allow additional events to come in via polling. Using the above 1435 * lprnet workload, this reduces the number of interrupts processed from 1436 * ~80k/sec to about 64 in 5 minutes and appears to solve the system 1437 * instability. 1438 */ 1439 irqreturn_t dbc_irq_handler(int irq, void *data) 1440 { 1441 struct dma_bridge_chan *dbc = data; 1442 int rcu_id; 1443 u32 head; 1444 u32 tail; 1445 1446 rcu_id = srcu_read_lock(&dbc->ch_lock); 1447 1448 if (datapath_polling) { 1449 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1450 /* 1451 * Normally datapath_polling will not have irqs enabled, but 1452 * when running with only one MSI the interrupt is shared with 1453 * MHI so it cannot be disabled. Return ASAP instead. 1454 */ 1455 return IRQ_HANDLED; 1456 } 1457 1458 if (!dbc->usr) { 1459 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1460 return IRQ_HANDLED; 1461 } 1462 1463 head = readl(dbc->dbc_base + RSPHP_OFF); 1464 if (head == U32_MAX) { /* PCI link error */ 1465 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1466 return IRQ_NONE; 1467 } 1468 1469 tail = readl(dbc->dbc_base + RSPTP_OFF); 1470 if (tail == U32_MAX) { /* PCI link error */ 1471 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1472 return IRQ_NONE; 1473 } 1474 1475 if (head == tail) { /* queue empty */ 1476 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1477 return IRQ_NONE; 1478 } 1479 1480 if (!dbc->qdev->single_msi) 1481 disable_irq_nosync(irq); 1482 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1483 return IRQ_WAKE_THREAD; 1484 } 1485 1486 void irq_polling_work(struct work_struct *work) 1487 { 1488 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); 1489 unsigned long flags; 1490 int rcu_id; 1491 u32 head; 1492 u32 tail; 1493 1494 rcu_id = srcu_read_lock(&dbc->ch_lock); 1495 1496 while (1) { 1497 if (dbc->qdev->dev_state != QAIC_ONLINE) { 1498 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1499 return; 1500 } 1501 if (!dbc->usr) { 1502 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1503 return; 1504 } 1505 spin_lock_irqsave(&dbc->xfer_lock, flags); 1506 if (list_empty(&dbc->xfer_list)) { 1507 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1508 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1509 return; 1510 } 1511 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1512 1513 head = readl(dbc->dbc_base + RSPHP_OFF); 1514 if (head == U32_MAX) { /* PCI link error */ 1515 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1516 return; 1517 } 1518 1519 tail = readl(dbc->dbc_base + RSPTP_OFF); 1520 if (tail == U32_MAX) { /* PCI link error */ 1521 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1522 return; 1523 } 1524 1525 if (head != tail) { 1526 irq_wake_thread(dbc->irq, dbc); 1527 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1528 return; 1529 } 1530 1531 cond_resched(); 1532 usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us); 1533 } 1534 } 1535 1536 irqreturn_t dbc_irq_threaded_fn(int irq, void *data) 1537 { 1538 struct dma_bridge_chan *dbc = data; 1539 int event_count = NUM_EVENTS; 1540 int delay_count = NUM_DELAYS; 1541 struct qaic_device *qdev; 1542 struct qaic_bo *bo, *i; 1543 struct dbc_rsp *rsp; 1544 unsigned long flags; 1545 int rcu_id; 1546 u16 status; 1547 u16 req_id; 1548 u32 head; 1549 u32 tail; 1550 1551 rcu_id = srcu_read_lock(&dbc->ch_lock); 1552 qdev = dbc->qdev; 1553 1554 head = readl(dbc->dbc_base + RSPHP_OFF); 1555 if (head == U32_MAX) /* PCI link error */ 1556 goto error_out; 1557 1558 read_fifo: 1559 1560 if (!event_count) { 1561 event_count = NUM_EVENTS; 1562 cond_resched(); 1563 } 1564 1565 /* 1566 * if this channel isn't assigned or gets unassigned during processing 1567 * we have nothing further to do 1568 */ 1569 if (!dbc->usr) 1570 goto error_out; 1571 1572 tail = readl(dbc->dbc_base + RSPTP_OFF); 1573 if (tail == U32_MAX) /* PCI link error */ 1574 goto error_out; 1575 1576 if (head == tail) { /* queue empty */ 1577 if (delay_count) { 1578 --delay_count; 1579 usleep_range(100, 200); 1580 goto read_fifo; /* check for a new event */ 1581 } 1582 goto normal_out; 1583 } 1584 1585 delay_count = NUM_DELAYS; 1586 while (head != tail) { 1587 if (!event_count) 1588 break; 1589 --event_count; 1590 rsp = dbc->rsp_q_base + head * sizeof(*rsp); 1591 req_id = le16_to_cpu(rsp->req_id); 1592 status = le16_to_cpu(rsp->status); 1593 if (status) 1594 pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status); 1595 spin_lock_irqsave(&dbc->xfer_lock, flags); 1596 /* 1597 * A BO can receive multiple interrupts, since a BO can be 1598 * divided into multiple slices and a buffer receives as many 1599 * interrupts as slices. So until it receives interrupts for 1600 * all the slices we cannot mark that buffer complete. 1601 */ 1602 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) { 1603 if (bo->req_id == req_id) 1604 bo->nr_slice_xfer_done++; 1605 else 1606 continue; 1607 1608 if (bo->nr_slice_xfer_done < bo->nr_slice) 1609 break; 1610 1611 /* 1612 * At this point we have received all the interrupts for 1613 * BO, which means BO execution is complete. 1614 */ 1615 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1616 bo->nr_slice_xfer_done = 0; 1617 list_del_init(&bo->xfer_list); 1618 bo->perf_stats.req_processed_ts = ktime_get_ns(); 1619 complete_all(&bo->xfer_done); 1620 drm_gem_object_put(&bo->base); 1621 break; 1622 } 1623 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1624 head = (head + 1) % dbc->nelem; 1625 } 1626 1627 /* 1628 * Update the head pointer of response queue and let the device know 1629 * that we have consumed elements from the queue. 1630 */ 1631 writel(head, dbc->dbc_base + RSPHP_OFF); 1632 1633 /* elements might have been put in the queue while we were processing */ 1634 goto read_fifo; 1635 1636 normal_out: 1637 if (!qdev->single_msi && likely(!datapath_polling)) 1638 enable_irq(irq); 1639 else if (unlikely(datapath_polling)) 1640 schedule_work(&dbc->poll_work); 1641 /* checking the fifo and enabling irqs is a race, missed event check */ 1642 tail = readl(dbc->dbc_base + RSPTP_OFF); 1643 if (tail != U32_MAX && head != tail) { 1644 if (!qdev->single_msi && likely(!datapath_polling)) 1645 disable_irq_nosync(irq); 1646 goto read_fifo; 1647 } 1648 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1649 return IRQ_HANDLED; 1650 1651 error_out: 1652 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1653 if (!qdev->single_msi && likely(!datapath_polling)) 1654 enable_irq(irq); 1655 else if (unlikely(datapath_polling)) 1656 schedule_work(&dbc->poll_work); 1657 1658 return IRQ_HANDLED; 1659 } 1660 1661 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1662 { 1663 struct qaic_wait *args = data; 1664 int usr_rcu_id, qdev_rcu_id; 1665 struct dma_bridge_chan *dbc; 1666 struct drm_gem_object *obj; 1667 struct qaic_device *qdev; 1668 unsigned long timeout; 1669 struct qaic_user *usr; 1670 struct qaic_bo *bo; 1671 int rcu_id; 1672 int ret; 1673 1674 if (args->pad != 0) 1675 return -EINVAL; 1676 1677 usr = file_priv->driver_priv; 1678 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1679 if (!usr->qddev) { 1680 ret = -ENODEV; 1681 goto unlock_usr_srcu; 1682 } 1683 1684 qdev = usr->qddev->qdev; 1685 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1686 if (qdev->dev_state != QAIC_ONLINE) { 1687 ret = -ENODEV; 1688 goto unlock_dev_srcu; 1689 } 1690 1691 if (args->dbc_id >= qdev->num_dbc) { 1692 ret = -EINVAL; 1693 goto unlock_dev_srcu; 1694 } 1695 1696 dbc = &qdev->dbc[args->dbc_id]; 1697 1698 rcu_id = srcu_read_lock(&dbc->ch_lock); 1699 if (dbc->usr != usr) { 1700 ret = -EPERM; 1701 goto unlock_ch_srcu; 1702 } 1703 1704 obj = drm_gem_object_lookup(file_priv, args->handle); 1705 if (!obj) { 1706 ret = -ENOENT; 1707 goto unlock_ch_srcu; 1708 } 1709 1710 bo = to_qaic_bo(obj); 1711 timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms; 1712 timeout = msecs_to_jiffies(timeout); 1713 ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout); 1714 if (!ret) { 1715 ret = -ETIMEDOUT; 1716 goto put_obj; 1717 } 1718 if (ret > 0) 1719 ret = 0; 1720 1721 if (!dbc->usr) 1722 ret = -EPERM; 1723 1724 put_obj: 1725 drm_gem_object_put(obj); 1726 unlock_ch_srcu: 1727 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1728 unlock_dev_srcu: 1729 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1730 unlock_usr_srcu: 1731 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1732 return ret; 1733 } 1734 1735 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1736 { 1737 struct qaic_perf_stats_entry *ent = NULL; 1738 struct qaic_perf_stats *args = data; 1739 int usr_rcu_id, qdev_rcu_id; 1740 struct drm_gem_object *obj; 1741 struct qaic_device *qdev; 1742 struct qaic_user *usr; 1743 struct qaic_bo *bo; 1744 int ret, i; 1745 1746 usr = file_priv->driver_priv; 1747 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1748 if (!usr->qddev) { 1749 ret = -ENODEV; 1750 goto unlock_usr_srcu; 1751 } 1752 1753 qdev = usr->qddev->qdev; 1754 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1755 if (qdev->dev_state != QAIC_ONLINE) { 1756 ret = -ENODEV; 1757 goto unlock_dev_srcu; 1758 } 1759 1760 if (args->hdr.dbc_id >= qdev->num_dbc) { 1761 ret = -EINVAL; 1762 goto unlock_dev_srcu; 1763 } 1764 1765 ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL); 1766 if (!ent) { 1767 ret = -EINVAL; 1768 goto unlock_dev_srcu; 1769 } 1770 1771 ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent)); 1772 if (ret) { 1773 ret = -EFAULT; 1774 goto free_ent; 1775 } 1776 1777 for (i = 0; i < args->hdr.count; i++) { 1778 obj = drm_gem_object_lookup(file_priv, ent[i].handle); 1779 if (!obj) { 1780 ret = -ENOENT; 1781 goto free_ent; 1782 } 1783 bo = to_qaic_bo(obj); 1784 /* 1785 * perf stats ioctl is called before wait ioctl is complete then 1786 * the latency information is invalid. 1787 */ 1788 if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) { 1789 ent[i].device_latency_us = 0; 1790 } else { 1791 ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts - 1792 bo->perf_stats.req_submit_ts), 1000); 1793 } 1794 ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts - 1795 bo->perf_stats.req_received_ts), 1000); 1796 ent[i].queue_level_before = bo->perf_stats.queue_level_before; 1797 ent[i].num_queue_element = bo->total_slice_nents; 1798 drm_gem_object_put(obj); 1799 } 1800 1801 if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent))) 1802 ret = -EFAULT; 1803 1804 free_ent: 1805 kfree(ent); 1806 unlock_dev_srcu: 1807 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1808 unlock_usr_srcu: 1809 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1810 return ret; 1811 } 1812 1813 static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo) 1814 { 1815 qaic_free_slices_bo(bo); 1816 qaic_unprepare_bo(qdev, bo); 1817 qaic_init_bo(bo, true); 1818 list_del(&bo->bo_list); 1819 drm_gem_object_put(&bo->base); 1820 } 1821 1822 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1823 { 1824 struct qaic_detach_slice *args = data; 1825 int rcu_id, usr_rcu_id, qdev_rcu_id; 1826 struct dma_bridge_chan *dbc; 1827 struct drm_gem_object *obj; 1828 struct qaic_device *qdev; 1829 struct qaic_user *usr; 1830 unsigned long flags; 1831 struct qaic_bo *bo; 1832 int ret; 1833 1834 if (args->pad != 0) 1835 return -EINVAL; 1836 1837 usr = file_priv->driver_priv; 1838 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1839 if (!usr->qddev) { 1840 ret = -ENODEV; 1841 goto unlock_usr_srcu; 1842 } 1843 1844 qdev = usr->qddev->qdev; 1845 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1846 if (qdev->dev_state != QAIC_ONLINE) { 1847 ret = -ENODEV; 1848 goto unlock_dev_srcu; 1849 } 1850 1851 obj = drm_gem_object_lookup(file_priv, args->handle); 1852 if (!obj) { 1853 ret = -ENOENT; 1854 goto unlock_dev_srcu; 1855 } 1856 1857 bo = to_qaic_bo(obj); 1858 ret = mutex_lock_interruptible(&bo->lock); 1859 if (ret) 1860 goto put_bo; 1861 1862 if (!bo->sliced) { 1863 ret = -EINVAL; 1864 goto unlock_bo; 1865 } 1866 1867 dbc = bo->dbc; 1868 rcu_id = srcu_read_lock(&dbc->ch_lock); 1869 if (dbc->usr != usr) { 1870 ret = -EINVAL; 1871 goto unlock_ch_srcu; 1872 } 1873 1874 /* Check if BO is committed to H/W for DMA */ 1875 spin_lock_irqsave(&dbc->xfer_lock, flags); 1876 if (bo_queued(bo)) { 1877 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1878 ret = -EBUSY; 1879 goto unlock_ch_srcu; 1880 } 1881 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1882 1883 detach_slice_bo(qdev, bo); 1884 1885 unlock_ch_srcu: 1886 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1887 unlock_bo: 1888 mutex_unlock(&bo->lock); 1889 put_bo: 1890 drm_gem_object_put(obj); 1891 unlock_dev_srcu: 1892 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1893 unlock_usr_srcu: 1894 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1895 return ret; 1896 } 1897 1898 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) 1899 { 1900 unsigned long flags; 1901 struct qaic_bo *bo; 1902 1903 spin_lock_irqsave(&dbc->xfer_lock, flags); 1904 while (!list_empty(&dbc->xfer_list)) { 1905 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); 1906 list_del_init(&bo->xfer_list); 1907 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1908 bo->nr_slice_xfer_done = 0; 1909 bo->req_id = 0; 1910 bo->perf_stats.req_received_ts = 0; 1911 bo->perf_stats.req_submit_ts = 0; 1912 bo->perf_stats.req_processed_ts = 0; 1913 bo->perf_stats.queue_level_before = 0; 1914 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1915 complete_all(&bo->xfer_done); 1916 drm_gem_object_put(&bo->base); 1917 spin_lock_irqsave(&dbc->xfer_lock, flags); 1918 } 1919 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1920 } 1921 1922 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1923 { 1924 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) 1925 return -EPERM; 1926 1927 qdev->dbc[dbc_id].usr = NULL; 1928 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock); 1929 return 0; 1930 } 1931 1932 /** 1933 * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of 1934 * user. Add user context back to DBC to enable it. This function trusts the 1935 * DBC ID passed and expects the DBC to be disabled. 1936 * @qdev: Qranium device handle 1937 * @dbc_id: ID of the DBC 1938 * @usr: User context 1939 */ 1940 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1941 { 1942 qdev->dbc[dbc_id].usr = usr; 1943 } 1944 1945 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) 1946 { 1947 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1948 1949 dbc->usr = NULL; 1950 empty_xfer_list(qdev, dbc); 1951 synchronize_srcu(&dbc->ch_lock); 1952 /* 1953 * Threads holding channel lock, may add more elements in the xfer_list. 1954 * Flush out these elements from xfer_list. 1955 */ 1956 empty_xfer_list(qdev, dbc); 1957 } 1958 1959 void release_dbc(struct qaic_device *qdev, u32 dbc_id) 1960 { 1961 struct qaic_bo *bo, *bo_temp; 1962 struct dma_bridge_chan *dbc; 1963 1964 dbc = &qdev->dbc[dbc_id]; 1965 if (!dbc->in_use) 1966 return; 1967 1968 wakeup_dbc(qdev, dbc_id); 1969 1970 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr); 1971 dbc->total_size = 0; 1972 dbc->req_q_base = NULL; 1973 dbc->dma_addr = 0; 1974 dbc->nelem = 0; 1975 dbc->usr = NULL; 1976 1977 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { 1978 drm_gem_object_get(&bo->base); 1979 mutex_lock(&bo->lock); 1980 detach_slice_bo(qdev, bo); 1981 mutex_unlock(&bo->lock); 1982 drm_gem_object_put(&bo->base); 1983 } 1984 1985 dbc->in_use = false; 1986 wake_up(&dbc->dbc_release); 1987 } 1988 1989 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail) 1990 { 1991 if (!dbc || !head || !tail) 1992 return; 1993 1994 *head = readl(dbc->dbc_base + REQHP_OFF); 1995 *tail = readl(dbc->dbc_base + REQTP_OFF); 1996 } 1997