1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bits.h> 8 #include <linux/completion.h> 9 #include <linux/delay.h> 10 #include <linux/dma-buf.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/interrupt.h> 13 #include <linux/kref.h> 14 #include <linux/list.h> 15 #include <linux/math64.h> 16 #include <linux/mm.h> 17 #include <linux/moduleparam.h> 18 #include <linux/scatterlist.h> 19 #include <linux/spinlock.h> 20 #include <linux/srcu.h> 21 #include <linux/types.h> 22 #include <linux/uaccess.h> 23 #include <linux/wait.h> 24 #include <drm/drm_file.h> 25 #include <drm/drm_gem.h> 26 #include <drm/drm_prime.h> 27 #include <drm/drm_print.h> 28 #include <uapi/drm/qaic_accel.h> 29 30 #include "qaic.h" 31 32 #define SEM_VAL_MASK GENMASK_ULL(11, 0) 33 #define SEM_INDEX_MASK GENMASK_ULL(4, 0) 34 #define BULK_XFER BIT(3) 35 #define GEN_COMPLETION BIT(4) 36 #define INBOUND_XFER 1 37 #define OUTBOUND_XFER 2 38 #define REQHP_OFF 0x0 /* we read this */ 39 #define REQTP_OFF 0x4 /* we write this */ 40 #define RSPHP_OFF 0x8 /* we write this */ 41 #define RSPTP_OFF 0xc /* we read this */ 42 43 #define ENCODE_SEM(val, index, sync, cmd, flags) \ 44 ({ \ 45 FIELD_PREP(GENMASK(11, 0), (val)) | \ 46 FIELD_PREP(GENMASK(20, 16), (index)) | \ 47 FIELD_PREP(BIT(22), (sync)) | \ 48 FIELD_PREP(GENMASK(26, 24), (cmd)) | \ 49 FIELD_PREP(GENMASK(30, 29), (flags)) | \ 50 FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \ 51 }) 52 #define NUM_EVENTS 128 53 #define NUM_DELAYS 10 54 #define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size()) 55 56 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */ 57 module_param(wait_exec_default_timeout_ms, uint, 0600); 58 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO"); 59 60 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */ 61 module_param(datapath_poll_interval_us, uint, 0600); 62 MODULE_PARM_DESC(datapath_poll_interval_us, 63 "Amount of time to sleep between activity when datapath polling is enabled"); 64 65 struct dbc_req { 66 /* 67 * A request ID is assigned to each memory handle going in DMA queue. 68 * As a single memory handle can enqueue multiple elements in DMA queue 69 * all of them will have the same request ID. 70 */ 71 __le16 req_id; 72 /* Future use */ 73 __u8 seq_id; 74 /* 75 * Special encoded variable 76 * 7 0 - Do not force to generate MSI after DMA is completed 77 * 1 - Force to generate MSI after DMA is completed 78 * 6:5 Reserved 79 * 4 1 - Generate completion element in the response queue 80 * 0 - No Completion Code 81 * 3 0 - DMA request is a Link list transfer 82 * 1 - DMA request is a Bulk transfer 83 * 2 Reserved 84 * 1:0 00 - No DMA transfer involved 85 * 01 - DMA transfer is part of inbound transfer 86 * 10 - DMA transfer has outbound transfer 87 * 11 - NA 88 */ 89 __u8 cmd; 90 __le32 resv; 91 /* Source address for the transfer */ 92 __le64 src_addr; 93 /* Destination address for the transfer */ 94 __le64 dest_addr; 95 /* Length of transfer request */ 96 __le32 len; 97 __le32 resv2; 98 /* Doorbell address */ 99 __le64 db_addr; 100 /* 101 * Special encoded variable 102 * 7 1 - Doorbell(db) write 103 * 0 - No doorbell write 104 * 6:2 Reserved 105 * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary 106 * 01 - 16 bit access, db address must be aligned to 16bit-boundary 107 * 10 - 8 bit access, db address must be aligned to 8bit-boundary 108 * 11 - Reserved 109 */ 110 __u8 db_len; 111 __u8 resv3; 112 __le16 resv4; 113 /* 32 bit data written to doorbell address */ 114 __le32 db_data; 115 /* 116 * Special encoded variable 117 * All the fields of sem_cmdX are passed from user and all are ORed 118 * together to form sem_cmd. 119 * 0:11 Semaphore value 120 * 15:12 Reserved 121 * 20:16 Semaphore index 122 * 21 Reserved 123 * 22 Semaphore Sync 124 * 23 Reserved 125 * 26:24 Semaphore command 126 * 28:27 Reserved 127 * 29 Semaphore DMA out bound sync fence 128 * 30 Semaphore DMA in bound sync fence 129 * 31 Enable semaphore command 130 */ 131 __le32 sem_cmd0; 132 __le32 sem_cmd1; 133 __le32 sem_cmd2; 134 __le32 sem_cmd3; 135 } __packed; 136 137 struct dbc_rsp { 138 /* Request ID of the memory handle whose DMA transaction is completed */ 139 __le16 req_id; 140 /* Status of the DMA transaction. 0 : Success otherwise failure */ 141 __le16 status; 142 } __packed; 143 144 inline int get_dbc_req_elem_size(void) 145 { 146 return sizeof(struct dbc_req); 147 } 148 149 inline int get_dbc_rsp_elem_size(void) 150 { 151 return sizeof(struct dbc_rsp); 152 } 153 154 static void free_slice(struct kref *kref) 155 { 156 struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); 157 158 slice->bo->total_slice_nents -= slice->nents; 159 list_del(&slice->slice); 160 drm_gem_object_put(&slice->bo->base); 161 sg_free_table(slice->sgt); 162 kfree(slice->sgt); 163 kfree(slice->reqs); 164 kfree(slice); 165 } 166 167 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, 168 struct sg_table *sgt_in, u64 size, u64 offset) 169 { 170 int total_len, len, nents, offf = 0, offl = 0; 171 struct scatterlist *sg, *sgn, *sgf, *sgl; 172 struct sg_table *sgt; 173 int ret, j; 174 175 /* find out number of relevant nents needed for this mem */ 176 total_len = 0; 177 sgf = NULL; 178 sgl = NULL; 179 nents = 0; 180 181 size = size ? size : PAGE_SIZE; 182 for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) { 183 len = sg_dma_len(sg); 184 185 if (!len) 186 continue; 187 if (offset >= total_len && offset < total_len + len) { 188 sgf = sg; 189 offf = offset - total_len; 190 } 191 if (sgf) 192 nents++; 193 if (offset + size >= total_len && 194 offset + size <= total_len + len) { 195 sgl = sg; 196 offl = offset + size - total_len; 197 break; 198 } 199 total_len += len; 200 } 201 202 if (!sgf || !sgl) { 203 ret = -EINVAL; 204 goto out; 205 } 206 207 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 208 if (!sgt) { 209 ret = -ENOMEM; 210 goto out; 211 } 212 213 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 214 if (ret) 215 goto free_sgt; 216 217 /* copy relevant sg node and fix page and length */ 218 sgn = sgf; 219 for_each_sgtable_sg(sgt, sg, j) { 220 memcpy(sg, sgn, sizeof(*sg)); 221 if (sgn == sgf) { 222 sg_dma_address(sg) += offf; 223 sg_dma_len(sg) -= offf; 224 sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf); 225 } else { 226 offf = 0; 227 } 228 if (sgn == sgl) { 229 sg_dma_len(sg) = offl - offf; 230 sg_set_page(sg, sg_page(sgn), offl - offf, offf); 231 sg_mark_end(sg); 232 break; 233 } 234 sgn = sg_next(sgn); 235 } 236 237 *sgt_out = sgt; 238 return ret; 239 240 free_sgt: 241 kfree(sgt); 242 out: 243 *sgt_out = NULL; 244 return ret; 245 } 246 247 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, 248 struct qaic_attach_slice_entry *req) 249 { 250 __le64 db_addr = cpu_to_le64(req->db_addr); 251 __le32 db_data = cpu_to_le32(req->db_data); 252 struct scatterlist *sg; 253 __u8 cmd = BULK_XFER; 254 int presync_sem; 255 u64 dev_addr; 256 __u8 db_len; 257 int i; 258 259 if (!slice->no_xfer) 260 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); 261 262 if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8)) 263 return -EINVAL; 264 265 presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync; 266 if (presync_sem > 1) 267 return -EINVAL; 268 269 presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 | 270 req->sem2.presync << 2 | req->sem3.presync << 3; 271 272 switch (req->db_len) { 273 case 32: 274 db_len = BIT(7); 275 break; 276 case 16: 277 db_len = BIT(7) | 1; 278 break; 279 case 8: 280 db_len = BIT(7) | 2; 281 break; 282 case 0: 283 db_len = 0; /* doorbell is not active for this command */ 284 break; 285 default: 286 return -EINVAL; /* should never hit this */ 287 } 288 289 /* 290 * When we end up splitting up a single request (ie a buf slice) into 291 * multiple DMA requests, we have to manage the sync data carefully. 292 * There can only be one presync sem. That needs to be on every xfer 293 * so that the DMA engine doesn't transfer data before the receiver is 294 * ready. We only do the doorbell and postsync sems after the xfer. 295 * To guarantee previous xfers for the request are complete, we use a 296 * fence. 297 */ 298 dev_addr = req->dev_addr; 299 for_each_sgtable_sg(slice->sgt, sg, i) { 300 slice->reqs[i].cmd = cmd; 301 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 302 sg_dma_address(sg) : dev_addr); 303 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 304 dev_addr : sg_dma_address(sg)); 305 /* 306 * sg_dma_len(sg) returns size of a DMA segment, maximum DMA 307 * segment size is set to UINT_MAX by qaic and hence return 308 * values of sg_dma_len(sg) can never exceed u32 range. So, 309 * by down sizing we are not corrupting the value. 310 */ 311 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); 312 switch (presync_sem) { 313 case BIT(0): 314 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, 315 req->sem0.index, 316 req->sem0.presync, 317 req->sem0.cmd, 318 req->sem0.flags)); 319 break; 320 case BIT(1): 321 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, 322 req->sem1.index, 323 req->sem1.presync, 324 req->sem1.cmd, 325 req->sem1.flags)); 326 break; 327 case BIT(2): 328 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, 329 req->sem2.index, 330 req->sem2.presync, 331 req->sem2.cmd, 332 req->sem2.flags)); 333 break; 334 case BIT(3): 335 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, 336 req->sem3.index, 337 req->sem3.presync, 338 req->sem3.cmd, 339 req->sem3.flags)); 340 break; 341 } 342 dev_addr += sg_dma_len(sg); 343 } 344 /* add post transfer stuff to last segment */ 345 i--; 346 slice->reqs[i].cmd |= GEN_COMPLETION; 347 slice->reqs[i].db_addr = db_addr; 348 slice->reqs[i].db_len = db_len; 349 slice->reqs[i].db_data = db_data; 350 /* 351 * Add a fence if we have more than one request going to the hardware 352 * representing the entirety of the user request, and the user request 353 * has no presync condition. 354 * Fences are expensive, so we try to avoid them. We rely on the 355 * hardware behavior to avoid needing one when there is a presync 356 * condition. When a presync exists, all requests for that same 357 * presync will be queued into a fifo. Thus, since we queue the 358 * post xfer activity only on the last request we queue, the hardware 359 * will ensure that the last queued request is processed last, thus 360 * making sure the post xfer activity happens at the right time without 361 * a fence. 362 */ 363 if (i && !presync_sem) 364 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? 365 QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE); 366 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, 367 req->sem0.presync, req->sem0.cmd, 368 req->sem0.flags)); 369 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, 370 req->sem1.presync, req->sem1.cmd, 371 req->sem1.flags)); 372 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, 373 req->sem2.presync, req->sem2.cmd, 374 req->sem2.flags)); 375 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, 376 req->sem3.presync, req->sem3.cmd, 377 req->sem3.flags)); 378 379 return 0; 380 } 381 382 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, 383 struct qaic_attach_slice_entry *slice_ent) 384 { 385 struct sg_table *sgt = NULL; 386 struct bo_slice *slice; 387 int ret; 388 389 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset); 390 if (ret) 391 goto out; 392 393 slice = kmalloc(sizeof(*slice), GFP_KERNEL); 394 if (!slice) { 395 ret = -ENOMEM; 396 goto free_sgt; 397 } 398 399 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); 400 if (!slice->reqs) { 401 ret = -ENOMEM; 402 goto free_slice; 403 } 404 405 slice->no_xfer = !slice_ent->size; 406 slice->sgt = sgt; 407 slice->nents = sgt->nents; 408 slice->dir = bo->dir; 409 slice->bo = bo; 410 slice->size = slice_ent->size; 411 slice->offset = slice_ent->offset; 412 413 ret = encode_reqs(qdev, slice, slice_ent); 414 if (ret) 415 goto free_req; 416 417 bo->total_slice_nents += sgt->nents; 418 kref_init(&slice->ref_count); 419 drm_gem_object_get(&bo->base); 420 list_add_tail(&slice->slice, &bo->slices); 421 422 return 0; 423 424 free_req: 425 kfree(slice->reqs); 426 free_slice: 427 kfree(slice); 428 free_sgt: 429 sg_free_table(sgt); 430 kfree(sgt); 431 out: 432 return ret; 433 } 434 435 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) 436 { 437 struct scatterlist *sg; 438 struct sg_table *sgt; 439 struct page **pages; 440 int *pages_order; 441 int buf_extra; 442 int max_order; 443 int nr_pages; 444 int ret = 0; 445 int i, j, k; 446 int order; 447 448 if (size) { 449 nr_pages = DIV_ROUND_UP(size, PAGE_SIZE); 450 /* 451 * calculate how much extra we are going to allocate, to remove 452 * later 453 */ 454 buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE; 455 max_order = min(MAX_ORDER, get_order(size)); 456 } else { 457 /* allocate a single page for book keeping */ 458 nr_pages = 1; 459 buf_extra = 0; 460 max_order = 0; 461 } 462 463 pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL); 464 if (!pages) { 465 ret = -ENOMEM; 466 goto out; 467 } 468 pages_order = (void *)pages + sizeof(*pages) * nr_pages; 469 470 /* 471 * Allocate requested memory using alloc_pages. It is possible to allocate 472 * the requested memory in multiple chunks by calling alloc_pages 473 * multiple times. Use SG table to handle multiple allocated pages. 474 */ 475 i = 0; 476 while (nr_pages > 0) { 477 order = min(get_order(nr_pages * PAGE_SIZE), max_order); 478 while (1) { 479 pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER | 480 __GFP_NOWARN | __GFP_ZERO | 481 (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL), 482 order); 483 if (pages[i]) 484 break; 485 if (!order--) { 486 ret = -ENOMEM; 487 goto free_partial_alloc; 488 } 489 } 490 491 max_order = order; 492 pages_order[i] = order; 493 494 nr_pages -= 1 << order; 495 if (nr_pages <= 0) 496 /* account for over allocation */ 497 buf_extra += abs(nr_pages) * PAGE_SIZE; 498 i++; 499 } 500 501 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 502 if (!sgt) { 503 ret = -ENOMEM; 504 goto free_partial_alloc; 505 } 506 507 if (sg_alloc_table(sgt, i, GFP_KERNEL)) { 508 ret = -ENOMEM; 509 goto free_sgt; 510 } 511 512 /* Populate the SG table with the allocated memory pages */ 513 sg = sgt->sgl; 514 for (k = 0; k < i; k++, sg = sg_next(sg)) { 515 /* Last entry requires special handling */ 516 if (k < i - 1) { 517 sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0); 518 } else { 519 sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0); 520 sg_mark_end(sg); 521 } 522 } 523 524 kvfree(pages); 525 *sgt_out = sgt; 526 return ret; 527 528 free_sgt: 529 kfree(sgt); 530 free_partial_alloc: 531 for (j = 0; j < i; j++) 532 __free_pages(pages[j], pages_order[j]); 533 kvfree(pages); 534 out: 535 *sgt_out = NULL; 536 return ret; 537 } 538 539 static bool invalid_sem(struct qaic_sem *sem) 540 { 541 if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK || 542 !(sem->presync == 0 || sem->presync == 1) || sem->pad || 543 sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) || 544 sem->cmd > QAIC_SEM_WAIT_GT_0) 545 return true; 546 return false; 547 } 548 549 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, 550 u32 count, u64 total_size) 551 { 552 int i; 553 554 for (i = 0; i < count; i++) { 555 if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 || 556 slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) || 557 invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) || 558 invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3)) 559 return -EINVAL; 560 561 if (slice_ent[i].offset + slice_ent[i].size > total_size) 562 return -EINVAL; 563 } 564 565 return 0; 566 } 567 568 static void qaic_free_sgt(struct sg_table *sgt) 569 { 570 struct scatterlist *sg; 571 572 for (sg = sgt->sgl; sg; sg = sg_next(sg)) 573 if (sg_page(sg)) 574 __free_pages(sg_page(sg), get_order(sg->length)); 575 sg_free_table(sgt); 576 kfree(sgt); 577 } 578 579 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent, 580 const struct drm_gem_object *obj) 581 { 582 struct qaic_bo *bo = to_qaic_bo(obj); 583 584 drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir); 585 } 586 587 static const struct vm_operations_struct drm_vm_ops = { 588 .open = drm_gem_vm_open, 589 .close = drm_gem_vm_close, 590 }; 591 592 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 593 { 594 struct qaic_bo *bo = to_qaic_bo(obj); 595 unsigned long offset = 0; 596 struct scatterlist *sg; 597 int ret = 0; 598 599 if (obj->import_attach) 600 return -EINVAL; 601 602 for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) { 603 if (sg_page(sg)) { 604 ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)), 605 sg->length, vma->vm_page_prot); 606 if (ret) 607 goto out; 608 offset += sg->length; 609 } 610 } 611 612 out: 613 return ret; 614 } 615 616 static void qaic_free_object(struct drm_gem_object *obj) 617 { 618 struct qaic_bo *bo = to_qaic_bo(obj); 619 620 if (obj->import_attach) { 621 /* DMABUF/PRIME Path */ 622 drm_prime_gem_destroy(obj, NULL); 623 } else { 624 /* Private buffer allocation path */ 625 qaic_free_sgt(bo->sgt); 626 } 627 628 mutex_destroy(&bo->lock); 629 drm_gem_object_release(obj); 630 kfree(bo); 631 } 632 633 static const struct drm_gem_object_funcs qaic_gem_funcs = { 634 .free = qaic_free_object, 635 .print_info = qaic_gem_print_info, 636 .mmap = qaic_gem_object_mmap, 637 .vm_ops = &drm_vm_ops, 638 }; 639 640 static void qaic_init_bo(struct qaic_bo *bo, bool reinit) 641 { 642 if (reinit) { 643 bo->sliced = false; 644 reinit_completion(&bo->xfer_done); 645 } else { 646 mutex_init(&bo->lock); 647 init_completion(&bo->xfer_done); 648 } 649 complete_all(&bo->xfer_done); 650 INIT_LIST_HEAD(&bo->slices); 651 } 652 653 static struct qaic_bo *qaic_alloc_init_bo(void) 654 { 655 struct qaic_bo *bo; 656 657 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 658 if (!bo) 659 return ERR_PTR(-ENOMEM); 660 661 qaic_init_bo(bo, false); 662 663 return bo; 664 } 665 666 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 667 { 668 struct qaic_create_bo *args = data; 669 int usr_rcu_id, qdev_rcu_id; 670 struct drm_gem_object *obj; 671 struct qaic_device *qdev; 672 struct qaic_user *usr; 673 struct qaic_bo *bo; 674 size_t size; 675 int ret; 676 677 if (args->pad) 678 return -EINVAL; 679 680 size = PAGE_ALIGN(args->size); 681 if (size == 0) 682 return -EINVAL; 683 684 usr = file_priv->driver_priv; 685 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 686 if (!usr->qddev) { 687 ret = -ENODEV; 688 goto unlock_usr_srcu; 689 } 690 691 qdev = usr->qddev->qdev; 692 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 693 if (qdev->dev_state != QAIC_ONLINE) { 694 ret = -ENODEV; 695 goto unlock_dev_srcu; 696 } 697 698 bo = qaic_alloc_init_bo(); 699 if (IS_ERR(bo)) { 700 ret = PTR_ERR(bo); 701 goto unlock_dev_srcu; 702 } 703 obj = &bo->base; 704 705 drm_gem_private_object_init(dev, obj, size); 706 707 obj->funcs = &qaic_gem_funcs; 708 ret = create_sgt(qdev, &bo->sgt, size); 709 if (ret) 710 goto free_bo; 711 712 ret = drm_gem_handle_create(file_priv, obj, &args->handle); 713 if (ret) 714 goto free_sgt; 715 716 bo->handle = args->handle; 717 drm_gem_object_put(obj); 718 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 719 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 720 721 return 0; 722 723 free_sgt: 724 qaic_free_sgt(bo->sgt); 725 free_bo: 726 kfree(bo); 727 unlock_dev_srcu: 728 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 729 unlock_usr_srcu: 730 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 731 return ret; 732 } 733 734 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 735 { 736 struct qaic_mmap_bo *args = data; 737 int usr_rcu_id, qdev_rcu_id; 738 struct drm_gem_object *obj; 739 struct qaic_device *qdev; 740 struct qaic_user *usr; 741 int ret; 742 743 usr = file_priv->driver_priv; 744 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 745 if (!usr->qddev) { 746 ret = -ENODEV; 747 goto unlock_usr_srcu; 748 } 749 750 qdev = usr->qddev->qdev; 751 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 752 if (qdev->dev_state != QAIC_ONLINE) { 753 ret = -ENODEV; 754 goto unlock_dev_srcu; 755 } 756 757 obj = drm_gem_object_lookup(file_priv, args->handle); 758 if (!obj) { 759 ret = -ENOENT; 760 goto unlock_dev_srcu; 761 } 762 763 ret = drm_gem_create_mmap_offset(obj); 764 if (ret == 0) 765 args->offset = drm_vma_node_offset_addr(&obj->vma_node); 766 767 drm_gem_object_put(obj); 768 769 unlock_dev_srcu: 770 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 771 unlock_usr_srcu: 772 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 773 return ret; 774 } 775 776 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) 777 { 778 struct dma_buf_attachment *attach; 779 struct drm_gem_object *obj; 780 struct qaic_bo *bo; 781 size_t size; 782 int ret; 783 784 bo = qaic_alloc_init_bo(); 785 if (IS_ERR(bo)) { 786 ret = PTR_ERR(bo); 787 goto out; 788 } 789 790 obj = &bo->base; 791 get_dma_buf(dma_buf); 792 793 attach = dma_buf_attach(dma_buf, dev->dev); 794 if (IS_ERR(attach)) { 795 ret = PTR_ERR(attach); 796 goto attach_fail; 797 } 798 799 size = PAGE_ALIGN(attach->dmabuf->size); 800 if (size == 0) { 801 ret = -EINVAL; 802 goto size_align_fail; 803 } 804 805 drm_gem_private_object_init(dev, obj, size); 806 /* 807 * skipping dma_buf_map_attachment() as we do not know the direction 808 * just yet. Once the direction is known in the subsequent IOCTL to 809 * attach slicing, we can do it then. 810 */ 811 812 obj->funcs = &qaic_gem_funcs; 813 obj->import_attach = attach; 814 obj->resv = dma_buf->resv; 815 816 return obj; 817 818 size_align_fail: 819 dma_buf_detach(dma_buf, attach); 820 attach_fail: 821 dma_buf_put(dma_buf); 822 kfree(bo); 823 out: 824 return ERR_PTR(ret); 825 } 826 827 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr) 828 { 829 struct drm_gem_object *obj = &bo->base; 830 struct sg_table *sgt; 831 int ret; 832 833 if (obj->import_attach->dmabuf->size < hdr->size) 834 return -EINVAL; 835 836 sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); 837 if (IS_ERR(sgt)) { 838 ret = PTR_ERR(sgt); 839 return ret; 840 } 841 842 bo->sgt = sgt; 843 844 return 0; 845 } 846 847 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, 848 struct qaic_attach_slice_hdr *hdr) 849 { 850 int ret; 851 852 if (bo->base.size < hdr->size) 853 return -EINVAL; 854 855 ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0); 856 if (ret) 857 return -EFAULT; 858 859 return 0; 860 } 861 862 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, 863 struct qaic_attach_slice_hdr *hdr) 864 { 865 int ret; 866 867 if (bo->base.import_attach) 868 ret = qaic_prepare_import_bo(bo, hdr); 869 else 870 ret = qaic_prepare_export_bo(qdev, bo, hdr); 871 bo->dir = hdr->dir; 872 bo->dbc = &qdev->dbc[hdr->dbc_id]; 873 bo->nr_slice = hdr->count; 874 875 return ret; 876 } 877 878 static void qaic_unprepare_import_bo(struct qaic_bo *bo) 879 { 880 dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir); 881 bo->sgt = NULL; 882 } 883 884 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo) 885 { 886 dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0); 887 } 888 889 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) 890 { 891 if (bo->base.import_attach) 892 qaic_unprepare_import_bo(bo); 893 else 894 qaic_unprepare_export_bo(qdev, bo); 895 896 bo->dir = 0; 897 bo->dbc = NULL; 898 bo->nr_slice = 0; 899 } 900 901 static void qaic_free_slices_bo(struct qaic_bo *bo) 902 { 903 struct bo_slice *slice, *temp; 904 905 list_for_each_entry_safe(slice, temp, &bo->slices, slice) 906 kref_put(&slice->ref_count, free_slice); 907 if (WARN_ON_ONCE(bo->total_slice_nents != 0)) 908 bo->total_slice_nents = 0; 909 bo->nr_slice = 0; 910 } 911 912 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, 913 struct qaic_attach_slice_hdr *hdr, 914 struct qaic_attach_slice_entry *slice_ent) 915 { 916 int ret, i; 917 918 for (i = 0; i < hdr->count; i++) { 919 ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]); 920 if (ret) { 921 qaic_free_slices_bo(bo); 922 return ret; 923 } 924 } 925 926 if (bo->total_slice_nents > bo->dbc->nelem) { 927 qaic_free_slices_bo(bo); 928 return -ENOSPC; 929 } 930 931 return 0; 932 } 933 934 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 935 { 936 struct qaic_attach_slice_entry *slice_ent; 937 struct qaic_attach_slice *args = data; 938 int rcu_id, usr_rcu_id, qdev_rcu_id; 939 struct dma_bridge_chan *dbc; 940 struct drm_gem_object *obj; 941 struct qaic_device *qdev; 942 unsigned long arg_size; 943 struct qaic_user *usr; 944 u8 __user *user_data; 945 struct qaic_bo *bo; 946 int ret; 947 948 if (args->hdr.count == 0) 949 return -EINVAL; 950 951 arg_size = args->hdr.count * sizeof(*slice_ent); 952 if (arg_size / args->hdr.count != sizeof(*slice_ent)) 953 return -EINVAL; 954 955 if (args->hdr.size == 0) 956 return -EINVAL; 957 958 if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) 959 return -EINVAL; 960 961 if (args->data == 0) 962 return -EINVAL; 963 964 usr = file_priv->driver_priv; 965 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 966 if (!usr->qddev) { 967 ret = -ENODEV; 968 goto unlock_usr_srcu; 969 } 970 971 qdev = usr->qddev->qdev; 972 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 973 if (qdev->dev_state != QAIC_ONLINE) { 974 ret = -ENODEV; 975 goto unlock_dev_srcu; 976 } 977 978 if (args->hdr.dbc_id >= qdev->num_dbc) { 979 ret = -EINVAL; 980 goto unlock_dev_srcu; 981 } 982 983 user_data = u64_to_user_ptr(args->data); 984 985 slice_ent = kzalloc(arg_size, GFP_KERNEL); 986 if (!slice_ent) { 987 ret = -EINVAL; 988 goto unlock_dev_srcu; 989 } 990 991 ret = copy_from_user(slice_ent, user_data, arg_size); 992 if (ret) { 993 ret = -EFAULT; 994 goto free_slice_ent; 995 } 996 997 ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size); 998 if (ret) 999 goto free_slice_ent; 1000 1001 obj = drm_gem_object_lookup(file_priv, args->hdr.handle); 1002 if (!obj) { 1003 ret = -ENOENT; 1004 goto free_slice_ent; 1005 } 1006 1007 bo = to_qaic_bo(obj); 1008 ret = mutex_lock_interruptible(&bo->lock); 1009 if (ret) 1010 goto put_bo; 1011 1012 if (bo->sliced) { 1013 ret = -EINVAL; 1014 goto unlock_bo; 1015 } 1016 1017 dbc = &qdev->dbc[args->hdr.dbc_id]; 1018 rcu_id = srcu_read_lock(&dbc->ch_lock); 1019 if (dbc->usr != usr) { 1020 ret = -EINVAL; 1021 goto unlock_ch_srcu; 1022 } 1023 1024 ret = qaic_prepare_bo(qdev, bo, &args->hdr); 1025 if (ret) 1026 goto unlock_ch_srcu; 1027 1028 ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent); 1029 if (ret) 1030 goto unprepare_bo; 1031 1032 if (args->hdr.dir == DMA_TO_DEVICE) 1033 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir); 1034 1035 bo->sliced = true; 1036 list_add_tail(&bo->bo_list, &bo->dbc->bo_lists); 1037 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1038 mutex_unlock(&bo->lock); 1039 kfree(slice_ent); 1040 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1041 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1042 1043 return 0; 1044 1045 unprepare_bo: 1046 qaic_unprepare_bo(qdev, bo); 1047 unlock_ch_srcu: 1048 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1049 unlock_bo: 1050 mutex_unlock(&bo->lock); 1051 put_bo: 1052 drm_gem_object_put(obj); 1053 free_slice_ent: 1054 kfree(slice_ent); 1055 unlock_dev_srcu: 1056 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1057 unlock_usr_srcu: 1058 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1059 return ret; 1060 } 1061 1062 static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size) 1063 { 1064 u32 avail = head - tail - 1; 1065 1066 if (head <= tail) 1067 avail += q_size; 1068 1069 return avail; 1070 } 1071 1072 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, 1073 u32 head, u32 *ptail) 1074 { 1075 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1076 struct dbc_req *reqs = slice->reqs; 1077 u32 tail = *ptail; 1078 u32 avail; 1079 1080 avail = fifo_space_avail(head, tail, dbc->nelem); 1081 if (avail < slice->nents) 1082 return -EAGAIN; 1083 1084 if (tail + slice->nents > dbc->nelem) { 1085 avail = dbc->nelem - tail; 1086 avail = min_t(u32, avail, slice->nents); 1087 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); 1088 reqs += avail; 1089 avail = slice->nents - avail; 1090 if (avail) 1091 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); 1092 } else { 1093 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); 1094 } 1095 1096 *ptail = (tail + slice->nents) % dbc->nelem; 1097 1098 return 0; 1099 } 1100 1101 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, 1102 u64 resize, struct dma_bridge_chan *dbc, u32 head, 1103 u32 *ptail) 1104 { 1105 struct dbc_req *reqs = slice->reqs; 1106 struct dbc_req *last_req; 1107 u32 tail = *ptail; 1108 u64 last_bytes; 1109 u32 first_n; 1110 u32 avail; 1111 1112 avail = fifo_space_avail(head, tail, dbc->nelem); 1113 1114 /* 1115 * After this for loop is complete, first_n represents the index 1116 * of the last DMA request of this slice that needs to be 1117 * transferred after resizing and last_bytes represents DMA size 1118 * of that request. 1119 */ 1120 last_bytes = resize; 1121 for (first_n = 0; first_n < slice->nents; first_n++) 1122 if (last_bytes > le32_to_cpu(reqs[first_n].len)) 1123 last_bytes -= le32_to_cpu(reqs[first_n].len); 1124 else 1125 break; 1126 1127 if (avail < (first_n + 1)) 1128 return -EAGAIN; 1129 1130 if (first_n) { 1131 if (tail + first_n > dbc->nelem) { 1132 avail = dbc->nelem - tail; 1133 avail = min_t(u32, avail, first_n); 1134 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); 1135 last_req = reqs + avail; 1136 avail = first_n - avail; 1137 if (avail) 1138 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); 1139 } else { 1140 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n); 1141 } 1142 } 1143 1144 /* 1145 * Copy over the last entry. Here we need to adjust len to the left over 1146 * size, and set src and dst to the entry it is copied to. 1147 */ 1148 last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem); 1149 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); 1150 1151 /* 1152 * last_bytes holds size of a DMA segment, maximum DMA segment size is 1153 * set to UINT_MAX by qaic and hence last_bytes can never exceed u32 1154 * range. So, by down sizing we are not corrupting the value. 1155 */ 1156 last_req->len = cpu_to_le32((u32)last_bytes); 1157 last_req->src_addr = reqs[first_n].src_addr; 1158 last_req->dest_addr = reqs[first_n].dest_addr; 1159 if (!last_bytes) 1160 /* Disable DMA transfer */ 1161 last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd; 1162 1163 *ptail = (tail + first_n + 1) % dbc->nelem; 1164 1165 return 0; 1166 } 1167 1168 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv, 1169 struct qaic_execute_entry *exec, unsigned int count, 1170 bool is_partial, struct dma_bridge_chan *dbc, u32 head, 1171 u32 *tail) 1172 { 1173 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1174 struct drm_gem_object *obj; 1175 struct bo_slice *slice; 1176 unsigned long flags; 1177 struct qaic_bo *bo; 1178 bool queued; 1179 int i, j; 1180 int ret; 1181 1182 for (i = 0; i < count; i++) { 1183 /* 1184 * ref count will be decremented when the transfer of this 1185 * buffer is complete. It is inside dbc_irq_threaded_fn(). 1186 */ 1187 obj = drm_gem_object_lookup(file_priv, 1188 is_partial ? pexec[i].handle : exec[i].handle); 1189 if (!obj) { 1190 ret = -ENOENT; 1191 goto failed_to_send_bo; 1192 } 1193 1194 bo = to_qaic_bo(obj); 1195 ret = mutex_lock_interruptible(&bo->lock); 1196 if (ret) 1197 goto failed_to_send_bo; 1198 1199 if (!bo->sliced) { 1200 ret = -EINVAL; 1201 goto unlock_bo; 1202 } 1203 1204 if (is_partial && pexec[i].resize > bo->base.size) { 1205 ret = -EINVAL; 1206 goto unlock_bo; 1207 } 1208 1209 spin_lock_irqsave(&dbc->xfer_lock, flags); 1210 queued = bo->queued; 1211 bo->queued = true; 1212 if (queued) { 1213 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1214 ret = -EINVAL; 1215 goto unlock_bo; 1216 } 1217 1218 bo->req_id = dbc->next_req_id++; 1219 1220 list_for_each_entry(slice, &bo->slices, slice) { 1221 for (j = 0; j < slice->nents; j++) 1222 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); 1223 1224 if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) 1225 /* Configure the slice for no DMA transfer */ 1226 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); 1227 else if (is_partial && pexec[i].resize < slice->offset + slice->size) 1228 /* Configure the slice to be partially DMA transferred */ 1229 ret = copy_partial_exec_reqs(qdev, slice, 1230 pexec[i].resize - slice->offset, dbc, 1231 head, tail); 1232 else 1233 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); 1234 if (ret) { 1235 bo->queued = false; 1236 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1237 goto unlock_bo; 1238 } 1239 } 1240 reinit_completion(&bo->xfer_done); 1241 list_add_tail(&bo->xfer_list, &dbc->xfer_list); 1242 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1243 dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir); 1244 mutex_unlock(&bo->lock); 1245 } 1246 1247 return 0; 1248 1249 unlock_bo: 1250 mutex_unlock(&bo->lock); 1251 failed_to_send_bo: 1252 if (likely(obj)) 1253 drm_gem_object_put(obj); 1254 for (j = 0; j < i; j++) { 1255 spin_lock_irqsave(&dbc->xfer_lock, flags); 1256 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); 1257 obj = &bo->base; 1258 bo->queued = false; 1259 list_del(&bo->xfer_list); 1260 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1261 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1262 drm_gem_object_put(obj); 1263 } 1264 return ret; 1265 } 1266 1267 static void update_profiling_data(struct drm_file *file_priv, 1268 struct qaic_execute_entry *exec, unsigned int count, 1269 bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level) 1270 { 1271 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1272 struct drm_gem_object *obj; 1273 struct qaic_bo *bo; 1274 int i; 1275 1276 for (i = 0; i < count; i++) { 1277 /* 1278 * Since we already committed the BO to hardware, the only way 1279 * this should fail is a pending signal. We can't cancel the 1280 * submit to hardware, so we have to just skip the profiling 1281 * data. In case the signal is not fatal to the process, we 1282 * return success so that the user doesn't try to resubmit. 1283 */ 1284 obj = drm_gem_object_lookup(file_priv, 1285 is_partial ? pexec[i].handle : exec[i].handle); 1286 if (!obj) 1287 break; 1288 bo = to_qaic_bo(obj); 1289 bo->perf_stats.req_received_ts = received_ts; 1290 bo->perf_stats.req_submit_ts = submit_ts; 1291 bo->perf_stats.queue_level_before = queue_level; 1292 queue_level += bo->total_slice_nents; 1293 drm_gem_object_put(obj); 1294 } 1295 } 1296 1297 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, 1298 bool is_partial) 1299 { 1300 struct qaic_execute *args = data; 1301 struct qaic_execute_entry *exec; 1302 struct dma_bridge_chan *dbc; 1303 int usr_rcu_id, qdev_rcu_id; 1304 struct qaic_device *qdev; 1305 struct qaic_user *usr; 1306 u8 __user *user_data; 1307 unsigned long n; 1308 u64 received_ts; 1309 u32 queue_level; 1310 u64 submit_ts; 1311 int rcu_id; 1312 u32 head; 1313 u32 tail; 1314 u64 size; 1315 int ret; 1316 1317 received_ts = ktime_get_ns(); 1318 1319 size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); 1320 n = (unsigned long)size * args->hdr.count; 1321 if (args->hdr.count == 0 || n / args->hdr.count != size) 1322 return -EINVAL; 1323 1324 user_data = u64_to_user_ptr(args->data); 1325 1326 exec = kcalloc(args->hdr.count, size, GFP_KERNEL); 1327 if (!exec) 1328 return -ENOMEM; 1329 1330 if (copy_from_user(exec, user_data, n)) { 1331 ret = -EFAULT; 1332 goto free_exec; 1333 } 1334 1335 usr = file_priv->driver_priv; 1336 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1337 if (!usr->qddev) { 1338 ret = -ENODEV; 1339 goto unlock_usr_srcu; 1340 } 1341 1342 qdev = usr->qddev->qdev; 1343 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1344 if (qdev->dev_state != QAIC_ONLINE) { 1345 ret = -ENODEV; 1346 goto unlock_dev_srcu; 1347 } 1348 1349 if (args->hdr.dbc_id >= qdev->num_dbc) { 1350 ret = -EINVAL; 1351 goto unlock_dev_srcu; 1352 } 1353 1354 dbc = &qdev->dbc[args->hdr.dbc_id]; 1355 1356 rcu_id = srcu_read_lock(&dbc->ch_lock); 1357 if (!dbc->usr || dbc->usr->handle != usr->handle) { 1358 ret = -EPERM; 1359 goto release_ch_rcu; 1360 } 1361 1362 head = readl(dbc->dbc_base + REQHP_OFF); 1363 tail = readl(dbc->dbc_base + REQTP_OFF); 1364 1365 if (head == U32_MAX || tail == U32_MAX) { 1366 /* PCI link error */ 1367 ret = -ENODEV; 1368 goto release_ch_rcu; 1369 } 1370 1371 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); 1372 1373 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, 1374 head, &tail); 1375 if (ret) 1376 goto release_ch_rcu; 1377 1378 /* Finalize commit to hardware */ 1379 submit_ts = ktime_get_ns(); 1380 writel(tail, dbc->dbc_base + REQTP_OFF); 1381 1382 update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, 1383 submit_ts, queue_level); 1384 1385 if (datapath_polling) 1386 schedule_work(&dbc->poll_work); 1387 1388 release_ch_rcu: 1389 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1390 unlock_dev_srcu: 1391 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1392 unlock_usr_srcu: 1393 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1394 free_exec: 1395 kfree(exec); 1396 return ret; 1397 } 1398 1399 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1400 { 1401 return __qaic_execute_bo_ioctl(dev, data, file_priv, false); 1402 } 1403 1404 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1405 { 1406 return __qaic_execute_bo_ioctl(dev, data, file_priv, true); 1407 } 1408 1409 /* 1410 * Our interrupt handling is a bit more complicated than a simple ideal, but 1411 * sadly necessary. 1412 * 1413 * Each dbc has a completion queue. Entries in the queue correspond to DMA 1414 * requests which the device has processed. The hardware already has a built 1415 * in irq mitigation. When the device puts an entry into the queue, it will 1416 * only trigger an interrupt if the queue was empty. Therefore, when adding 1417 * the Nth event to a non-empty queue, the hardware doesn't trigger an 1418 * interrupt. This means the host doesn't get additional interrupts signaling 1419 * the same thing - the queue has something to process. 1420 * This behavior can be overridden in the DMA request. 1421 * This means that when the host receives an interrupt, it is required to 1422 * drain the queue. 1423 * 1424 * This behavior is what NAPI attempts to accomplish, although we can't use 1425 * NAPI as we don't have a netdev. We use threaded irqs instead. 1426 * 1427 * However, there is a situation where the host drains the queue fast enough 1428 * that every event causes an interrupt. Typically this is not a problem as 1429 * the rate of events would be low. However, that is not the case with 1430 * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of 1431 * lprnet, the host receives roughly 80k interrupts per second from the device 1432 * (per /proc/interrupts). While NAPI documentation indicates the host should 1433 * just chug along, sadly that behavior causes instability in some hosts. 1434 * 1435 * Therefore, we implement an interrupt disable scheme similar to NAPI. The 1436 * key difference is that we will delay after draining the queue for a small 1437 * time to allow additional events to come in via polling. Using the above 1438 * lprnet workload, this reduces the number of interrupts processed from 1439 * ~80k/sec to about 64 in 5 minutes and appears to solve the system 1440 * instability. 1441 */ 1442 irqreturn_t dbc_irq_handler(int irq, void *data) 1443 { 1444 struct dma_bridge_chan *dbc = data; 1445 int rcu_id; 1446 u32 head; 1447 u32 tail; 1448 1449 rcu_id = srcu_read_lock(&dbc->ch_lock); 1450 1451 if (datapath_polling) { 1452 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1453 /* 1454 * Normally datapath_polling will not have irqs enabled, but 1455 * when running with only one MSI the interrupt is shared with 1456 * MHI so it cannot be disabled. Return ASAP instead. 1457 */ 1458 return IRQ_HANDLED; 1459 } 1460 1461 if (!dbc->usr) { 1462 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1463 return IRQ_HANDLED; 1464 } 1465 1466 head = readl(dbc->dbc_base + RSPHP_OFF); 1467 if (head == U32_MAX) { /* PCI link error */ 1468 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1469 return IRQ_NONE; 1470 } 1471 1472 tail = readl(dbc->dbc_base + RSPTP_OFF); 1473 if (tail == U32_MAX) { /* PCI link error */ 1474 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1475 return IRQ_NONE; 1476 } 1477 1478 if (head == tail) { /* queue empty */ 1479 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1480 return IRQ_NONE; 1481 } 1482 1483 if (!dbc->qdev->single_msi) 1484 disable_irq_nosync(irq); 1485 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1486 return IRQ_WAKE_THREAD; 1487 } 1488 1489 void irq_polling_work(struct work_struct *work) 1490 { 1491 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); 1492 unsigned long flags; 1493 int rcu_id; 1494 u32 head; 1495 u32 tail; 1496 1497 rcu_id = srcu_read_lock(&dbc->ch_lock); 1498 1499 while (1) { 1500 if (dbc->qdev->dev_state != QAIC_ONLINE) { 1501 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1502 return; 1503 } 1504 if (!dbc->usr) { 1505 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1506 return; 1507 } 1508 spin_lock_irqsave(&dbc->xfer_lock, flags); 1509 if (list_empty(&dbc->xfer_list)) { 1510 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1511 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1512 return; 1513 } 1514 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1515 1516 head = readl(dbc->dbc_base + RSPHP_OFF); 1517 if (head == U32_MAX) { /* PCI link error */ 1518 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1519 return; 1520 } 1521 1522 tail = readl(dbc->dbc_base + RSPTP_OFF); 1523 if (tail == U32_MAX) { /* PCI link error */ 1524 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1525 return; 1526 } 1527 1528 if (head != tail) { 1529 irq_wake_thread(dbc->irq, dbc); 1530 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1531 return; 1532 } 1533 1534 cond_resched(); 1535 usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us); 1536 } 1537 } 1538 1539 irqreturn_t dbc_irq_threaded_fn(int irq, void *data) 1540 { 1541 struct dma_bridge_chan *dbc = data; 1542 int event_count = NUM_EVENTS; 1543 int delay_count = NUM_DELAYS; 1544 struct qaic_device *qdev; 1545 struct qaic_bo *bo, *i; 1546 struct dbc_rsp *rsp; 1547 unsigned long flags; 1548 int rcu_id; 1549 u16 status; 1550 u16 req_id; 1551 u32 head; 1552 u32 tail; 1553 1554 rcu_id = srcu_read_lock(&dbc->ch_lock); 1555 qdev = dbc->qdev; 1556 1557 head = readl(dbc->dbc_base + RSPHP_OFF); 1558 if (head == U32_MAX) /* PCI link error */ 1559 goto error_out; 1560 1561 read_fifo: 1562 1563 if (!event_count) { 1564 event_count = NUM_EVENTS; 1565 cond_resched(); 1566 } 1567 1568 /* 1569 * if this channel isn't assigned or gets unassigned during processing 1570 * we have nothing further to do 1571 */ 1572 if (!dbc->usr) 1573 goto error_out; 1574 1575 tail = readl(dbc->dbc_base + RSPTP_OFF); 1576 if (tail == U32_MAX) /* PCI link error */ 1577 goto error_out; 1578 1579 if (head == tail) { /* queue empty */ 1580 if (delay_count) { 1581 --delay_count; 1582 usleep_range(100, 200); 1583 goto read_fifo; /* check for a new event */ 1584 } 1585 goto normal_out; 1586 } 1587 1588 delay_count = NUM_DELAYS; 1589 while (head != tail) { 1590 if (!event_count) 1591 break; 1592 --event_count; 1593 rsp = dbc->rsp_q_base + head * sizeof(*rsp); 1594 req_id = le16_to_cpu(rsp->req_id); 1595 status = le16_to_cpu(rsp->status); 1596 if (status) 1597 pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status); 1598 spin_lock_irqsave(&dbc->xfer_lock, flags); 1599 /* 1600 * A BO can receive multiple interrupts, since a BO can be 1601 * divided into multiple slices and a buffer receives as many 1602 * interrupts as slices. So until it receives interrupts for 1603 * all the slices we cannot mark that buffer complete. 1604 */ 1605 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) { 1606 if (bo->req_id == req_id) 1607 bo->nr_slice_xfer_done++; 1608 else 1609 continue; 1610 1611 if (bo->nr_slice_xfer_done < bo->nr_slice) 1612 break; 1613 1614 /* 1615 * At this point we have received all the interrupts for 1616 * BO, which means BO execution is complete. 1617 */ 1618 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1619 bo->nr_slice_xfer_done = 0; 1620 bo->queued = false; 1621 list_del(&bo->xfer_list); 1622 bo->perf_stats.req_processed_ts = ktime_get_ns(); 1623 complete_all(&bo->xfer_done); 1624 drm_gem_object_put(&bo->base); 1625 break; 1626 } 1627 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1628 head = (head + 1) % dbc->nelem; 1629 } 1630 1631 /* 1632 * Update the head pointer of response queue and let the device know 1633 * that we have consumed elements from the queue. 1634 */ 1635 writel(head, dbc->dbc_base + RSPHP_OFF); 1636 1637 /* elements might have been put in the queue while we were processing */ 1638 goto read_fifo; 1639 1640 normal_out: 1641 if (!qdev->single_msi && likely(!datapath_polling)) 1642 enable_irq(irq); 1643 else if (unlikely(datapath_polling)) 1644 schedule_work(&dbc->poll_work); 1645 /* checking the fifo and enabling irqs is a race, missed event check */ 1646 tail = readl(dbc->dbc_base + RSPTP_OFF); 1647 if (tail != U32_MAX && head != tail) { 1648 if (!qdev->single_msi && likely(!datapath_polling)) 1649 disable_irq_nosync(irq); 1650 goto read_fifo; 1651 } 1652 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1653 return IRQ_HANDLED; 1654 1655 error_out: 1656 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1657 if (!qdev->single_msi && likely(!datapath_polling)) 1658 enable_irq(irq); 1659 else if (unlikely(datapath_polling)) 1660 schedule_work(&dbc->poll_work); 1661 1662 return IRQ_HANDLED; 1663 } 1664 1665 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1666 { 1667 struct qaic_wait *args = data; 1668 int usr_rcu_id, qdev_rcu_id; 1669 struct dma_bridge_chan *dbc; 1670 struct drm_gem_object *obj; 1671 struct qaic_device *qdev; 1672 unsigned long timeout; 1673 struct qaic_user *usr; 1674 struct qaic_bo *bo; 1675 int rcu_id; 1676 int ret; 1677 1678 if (args->pad != 0) 1679 return -EINVAL; 1680 1681 usr = file_priv->driver_priv; 1682 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1683 if (!usr->qddev) { 1684 ret = -ENODEV; 1685 goto unlock_usr_srcu; 1686 } 1687 1688 qdev = usr->qddev->qdev; 1689 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1690 if (qdev->dev_state != QAIC_ONLINE) { 1691 ret = -ENODEV; 1692 goto unlock_dev_srcu; 1693 } 1694 1695 if (args->dbc_id >= qdev->num_dbc) { 1696 ret = -EINVAL; 1697 goto unlock_dev_srcu; 1698 } 1699 1700 dbc = &qdev->dbc[args->dbc_id]; 1701 1702 rcu_id = srcu_read_lock(&dbc->ch_lock); 1703 if (dbc->usr != usr) { 1704 ret = -EPERM; 1705 goto unlock_ch_srcu; 1706 } 1707 1708 obj = drm_gem_object_lookup(file_priv, args->handle); 1709 if (!obj) { 1710 ret = -ENOENT; 1711 goto unlock_ch_srcu; 1712 } 1713 1714 bo = to_qaic_bo(obj); 1715 timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms; 1716 timeout = msecs_to_jiffies(timeout); 1717 ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout); 1718 if (!ret) { 1719 ret = -ETIMEDOUT; 1720 goto put_obj; 1721 } 1722 if (ret > 0) 1723 ret = 0; 1724 1725 if (!dbc->usr) 1726 ret = -EPERM; 1727 1728 put_obj: 1729 drm_gem_object_put(obj); 1730 unlock_ch_srcu: 1731 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1732 unlock_dev_srcu: 1733 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1734 unlock_usr_srcu: 1735 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1736 return ret; 1737 } 1738 1739 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1740 { 1741 struct qaic_perf_stats_entry *ent = NULL; 1742 struct qaic_perf_stats *args = data; 1743 int usr_rcu_id, qdev_rcu_id; 1744 struct drm_gem_object *obj; 1745 struct qaic_device *qdev; 1746 struct qaic_user *usr; 1747 struct qaic_bo *bo; 1748 int ret, i; 1749 1750 usr = file_priv->driver_priv; 1751 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1752 if (!usr->qddev) { 1753 ret = -ENODEV; 1754 goto unlock_usr_srcu; 1755 } 1756 1757 qdev = usr->qddev->qdev; 1758 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1759 if (qdev->dev_state != QAIC_ONLINE) { 1760 ret = -ENODEV; 1761 goto unlock_dev_srcu; 1762 } 1763 1764 if (args->hdr.dbc_id >= qdev->num_dbc) { 1765 ret = -EINVAL; 1766 goto unlock_dev_srcu; 1767 } 1768 1769 ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL); 1770 if (!ent) { 1771 ret = -EINVAL; 1772 goto unlock_dev_srcu; 1773 } 1774 1775 ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent)); 1776 if (ret) { 1777 ret = -EFAULT; 1778 goto free_ent; 1779 } 1780 1781 for (i = 0; i < args->hdr.count; i++) { 1782 obj = drm_gem_object_lookup(file_priv, ent[i].handle); 1783 if (!obj) { 1784 ret = -ENOENT; 1785 goto free_ent; 1786 } 1787 bo = to_qaic_bo(obj); 1788 /* 1789 * perf stats ioctl is called before wait ioctl is complete then 1790 * the latency information is invalid. 1791 */ 1792 if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) { 1793 ent[i].device_latency_us = 0; 1794 } else { 1795 ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts - 1796 bo->perf_stats.req_submit_ts), 1000); 1797 } 1798 ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts - 1799 bo->perf_stats.req_received_ts), 1000); 1800 ent[i].queue_level_before = bo->perf_stats.queue_level_before; 1801 ent[i].num_queue_element = bo->total_slice_nents; 1802 drm_gem_object_put(obj); 1803 } 1804 1805 if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent))) 1806 ret = -EFAULT; 1807 1808 free_ent: 1809 kfree(ent); 1810 unlock_dev_srcu: 1811 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1812 unlock_usr_srcu: 1813 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1814 return ret; 1815 } 1816 1817 static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo) 1818 { 1819 qaic_free_slices_bo(bo); 1820 qaic_unprepare_bo(qdev, bo); 1821 qaic_init_bo(bo, true); 1822 list_del(&bo->bo_list); 1823 drm_gem_object_put(&bo->base); 1824 } 1825 1826 int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1827 { 1828 struct qaic_detach_slice *args = data; 1829 int rcu_id, usr_rcu_id, qdev_rcu_id; 1830 struct dma_bridge_chan *dbc; 1831 struct drm_gem_object *obj; 1832 struct qaic_device *qdev; 1833 struct qaic_user *usr; 1834 unsigned long flags; 1835 struct qaic_bo *bo; 1836 int ret; 1837 1838 if (args->pad != 0) 1839 return -EINVAL; 1840 1841 usr = file_priv->driver_priv; 1842 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1843 if (!usr->qddev) { 1844 ret = -ENODEV; 1845 goto unlock_usr_srcu; 1846 } 1847 1848 qdev = usr->qddev->qdev; 1849 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1850 if (qdev->dev_state != QAIC_ONLINE) { 1851 ret = -ENODEV; 1852 goto unlock_dev_srcu; 1853 } 1854 1855 obj = drm_gem_object_lookup(file_priv, args->handle); 1856 if (!obj) { 1857 ret = -ENOENT; 1858 goto unlock_dev_srcu; 1859 } 1860 1861 bo = to_qaic_bo(obj); 1862 ret = mutex_lock_interruptible(&bo->lock); 1863 if (ret) 1864 goto put_bo; 1865 1866 if (!bo->sliced) { 1867 ret = -EINVAL; 1868 goto unlock_bo; 1869 } 1870 1871 dbc = bo->dbc; 1872 rcu_id = srcu_read_lock(&dbc->ch_lock); 1873 if (dbc->usr != usr) { 1874 ret = -EINVAL; 1875 goto unlock_ch_srcu; 1876 } 1877 1878 /* Check if BO is committed to H/W for DMA */ 1879 spin_lock_irqsave(&dbc->xfer_lock, flags); 1880 if (bo->queued) { 1881 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1882 ret = -EBUSY; 1883 goto unlock_ch_srcu; 1884 } 1885 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1886 1887 detach_slice_bo(qdev, bo); 1888 1889 unlock_ch_srcu: 1890 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1891 unlock_bo: 1892 mutex_unlock(&bo->lock); 1893 put_bo: 1894 drm_gem_object_put(obj); 1895 unlock_dev_srcu: 1896 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1897 unlock_usr_srcu: 1898 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1899 return ret; 1900 } 1901 1902 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) 1903 { 1904 unsigned long flags; 1905 struct qaic_bo *bo; 1906 1907 spin_lock_irqsave(&dbc->xfer_lock, flags); 1908 while (!list_empty(&dbc->xfer_list)) { 1909 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); 1910 bo->queued = false; 1911 list_del(&bo->xfer_list); 1912 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1913 bo->nr_slice_xfer_done = 0; 1914 bo->req_id = 0; 1915 bo->perf_stats.req_received_ts = 0; 1916 bo->perf_stats.req_submit_ts = 0; 1917 bo->perf_stats.req_processed_ts = 0; 1918 bo->perf_stats.queue_level_before = 0; 1919 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1920 complete_all(&bo->xfer_done); 1921 drm_gem_object_put(&bo->base); 1922 spin_lock_irqsave(&dbc->xfer_lock, flags); 1923 } 1924 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1925 } 1926 1927 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1928 { 1929 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) 1930 return -EPERM; 1931 1932 qdev->dbc[dbc_id].usr = NULL; 1933 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock); 1934 return 0; 1935 } 1936 1937 /** 1938 * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of 1939 * user. Add user context back to DBC to enable it. This function trusts the 1940 * DBC ID passed and expects the DBC to be disabled. 1941 * @qdev: Qranium device handle 1942 * @dbc_id: ID of the DBC 1943 * @usr: User context 1944 */ 1945 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1946 { 1947 qdev->dbc[dbc_id].usr = usr; 1948 } 1949 1950 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) 1951 { 1952 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1953 1954 dbc->usr = NULL; 1955 empty_xfer_list(qdev, dbc); 1956 synchronize_srcu(&dbc->ch_lock); 1957 /* 1958 * Threads holding channel lock, may add more elements in the xfer_list. 1959 * Flush out these elements from xfer_list. 1960 */ 1961 empty_xfer_list(qdev, dbc); 1962 } 1963 1964 void release_dbc(struct qaic_device *qdev, u32 dbc_id) 1965 { 1966 struct qaic_bo *bo, *bo_temp; 1967 struct dma_bridge_chan *dbc; 1968 1969 dbc = &qdev->dbc[dbc_id]; 1970 if (!dbc->in_use) 1971 return; 1972 1973 wakeup_dbc(qdev, dbc_id); 1974 1975 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr); 1976 dbc->total_size = 0; 1977 dbc->req_q_base = NULL; 1978 dbc->dma_addr = 0; 1979 dbc->nelem = 0; 1980 dbc->usr = NULL; 1981 1982 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { 1983 drm_gem_object_get(&bo->base); 1984 mutex_lock(&bo->lock); 1985 detach_slice_bo(qdev, bo); 1986 mutex_unlock(&bo->lock); 1987 drm_gem_object_put(&bo->base); 1988 } 1989 1990 dbc->in_use = false; 1991 wake_up(&dbc->dbc_release); 1992 } 1993