1 /* 2 * Copyright (C) 2008 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <drm/drm_gem_ttm_helper.h> 28 29 #include "nouveau_drv.h" 30 #include "nouveau_dma.h" 31 #include "nouveau_fence.h" 32 #include "nouveau_abi16.h" 33 34 #include "nouveau_ttm.h" 35 #include "nouveau_gem.h" 36 #include "nouveau_mem.h" 37 #include "nouveau_vmm.h" 38 39 #include <nvif/class.h> 40 #include <nvif/push206e.h> 41 42 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) 43 { 44 struct vm_area_struct *vma = vmf->vma; 45 struct ttm_buffer_object *bo = vma->vm_private_data; 46 pgprot_t prot; 47 vm_fault_t ret; 48 49 ret = ttm_bo_vm_reserve(bo, vmf); 50 if (ret) 51 return ret; 52 53 ret = nouveau_ttm_fault_reserve_notify(bo); 54 if (ret) 55 goto error_unlock; 56 57 nouveau_bo_del_io_reserve_lru(bo); 58 prot = vm_get_page_prot(vma->vm_flags); 59 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); 60 nouveau_bo_add_io_reserve_lru(bo); 61 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 62 return ret; 63 64 error_unlock: 65 dma_resv_unlock(bo->base.resv); 66 return ret; 67 } 68 69 static const struct vm_operations_struct nouveau_ttm_vm_ops = { 70 .fault = nouveau_ttm_fault, 71 .open = ttm_bo_vm_open, 72 .close = ttm_bo_vm_close, 73 .access = ttm_bo_vm_access 74 }; 75 76 void 77 nouveau_gem_object_del(struct drm_gem_object *gem) 78 { 79 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 80 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 81 struct device *dev = drm->dev->dev; 82 int ret; 83 84 ret = pm_runtime_get_sync(dev); 85 if (WARN_ON(ret < 0 && ret != -EACCES)) { 86 pm_runtime_put_autosuspend(dev); 87 return; 88 } 89 90 if (gem->import_attach) 91 drm_prime_gem_destroy(gem, nvbo->bo.sg); 92 93 ttm_bo_put(&nvbo->bo); 94 95 pm_runtime_mark_last_busy(dev); 96 pm_runtime_put_autosuspend(dev); 97 } 98 99 int 100 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 101 { 102 struct nouveau_cli *cli = nouveau_cli(file_priv); 103 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 104 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 105 struct device *dev = drm->dev->dev; 106 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli); 107 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); 108 struct nouveau_vma *vma; 109 int ret; 110 111 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 112 return 0; 113 114 if (nvbo->no_share && uvmm && 115 drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv) 116 return -EPERM; 117 118 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 119 if (ret) 120 return ret; 121 122 ret = pm_runtime_get_sync(dev); 123 if (ret < 0 && ret != -EACCES) { 124 pm_runtime_put_autosuspend(dev); 125 goto out; 126 } 127 128 /* only create a VMA on binding */ 129 if (!nouveau_cli_uvmm(cli)) 130 ret = nouveau_vma_new(nvbo, vmm, &vma); 131 else 132 ret = 0; 133 pm_runtime_mark_last_busy(dev); 134 pm_runtime_put_autosuspend(dev); 135 out: 136 ttm_bo_unreserve(&nvbo->bo); 137 return ret; 138 } 139 140 struct nouveau_gem_object_unmap { 141 struct nouveau_cli_work work; 142 struct nouveau_vma *vma; 143 }; 144 145 static void 146 nouveau_gem_object_delete(struct nouveau_vma *vma) 147 { 148 nouveau_fence_unref(&vma->fence); 149 nouveau_vma_del(&vma); 150 } 151 152 static void 153 nouveau_gem_object_delete_work(struct nouveau_cli_work *w) 154 { 155 struct nouveau_gem_object_unmap *work = 156 container_of(w, typeof(*work), work); 157 nouveau_gem_object_delete(work->vma); 158 kfree(work); 159 } 160 161 static void 162 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 163 { 164 struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL; 165 struct nouveau_gem_object_unmap *work; 166 167 list_del_init(&vma->head); 168 169 if (!fence) { 170 nouveau_gem_object_delete(vma); 171 return; 172 } 173 174 if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) { 175 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0); 176 nouveau_gem_object_delete(vma); 177 return; 178 } 179 180 work->work.func = nouveau_gem_object_delete_work; 181 work->vma = vma; 182 nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work); 183 } 184 185 void 186 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 187 { 188 struct nouveau_cli *cli = nouveau_cli(file_priv); 189 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 190 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 191 struct device *dev = drm->dev->dev; 192 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); 193 struct nouveau_vma *vma; 194 int ret; 195 196 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 197 return; 198 199 if (nouveau_cli_uvmm(cli)) 200 return; 201 202 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 203 if (ret) 204 return; 205 206 vma = nouveau_vma_find(nvbo, vmm); 207 if (vma) { 208 if (--vma->refs == 0) { 209 ret = pm_runtime_get_sync(dev); 210 if (!WARN_ON(ret < 0 && ret != -EACCES)) { 211 nouveau_gem_object_unmap(nvbo, vma); 212 pm_runtime_mark_last_busy(dev); 213 } 214 pm_runtime_put_autosuspend(dev); 215 } 216 } 217 ttm_bo_unreserve(&nvbo->bo); 218 } 219 220 const struct drm_gem_object_funcs nouveau_gem_object_funcs = { 221 .free = nouveau_gem_object_del, 222 .open = nouveau_gem_object_open, 223 .close = nouveau_gem_object_close, 224 .export = nouveau_gem_prime_export, 225 .pin = nouveau_gem_prime_pin, 226 .unpin = nouveau_gem_prime_unpin, 227 .get_sg_table = nouveau_gem_prime_get_sg_table, 228 .vmap = drm_gem_ttm_vmap, 229 .vunmap = drm_gem_ttm_vunmap, 230 .mmap = drm_gem_ttm_mmap, 231 .vm_ops = &nouveau_ttm_vm_ops, 232 }; 233 234 int 235 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, 236 uint32_t tile_mode, uint32_t tile_flags, 237 struct nouveau_bo **pnvbo) 238 { 239 struct nouveau_drm *drm = cli->drm; 240 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli); 241 struct dma_resv *resv = NULL; 242 struct nouveau_bo *nvbo; 243 int ret; 244 245 if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) { 246 if (unlikely(!uvmm)) 247 return -EINVAL; 248 249 resv = drm_gpuvm_resv(&uvmm->base); 250 } 251 252 if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) 253 domain |= NOUVEAU_GEM_DOMAIN_CPU; 254 255 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, 256 tile_flags, false); 257 if (IS_ERR(nvbo)) 258 return PTR_ERR(nvbo); 259 260 nvbo->bo.base.funcs = &nouveau_gem_object_funcs; 261 nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE; 262 263 /* Initialize the embedded gem-object. We return a single gem-reference 264 * to the caller, instead of a normal nouveau_bo ttm reference. */ 265 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); 266 if (ret) { 267 drm_gem_object_release(&nvbo->bo.base); 268 kfree(nvbo); 269 return ret; 270 } 271 272 if (resv) 273 dma_resv_lock(resv, NULL); 274 275 ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv); 276 277 if (resv) 278 dma_resv_unlock(resv); 279 280 if (ret) 281 return ret; 282 283 /* we restrict allowed domains on nv50+ to only the types 284 * that were requested at creation time. not possibly on 285 * earlier chips without busting the ABI. 286 */ 287 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 288 NOUVEAU_GEM_DOMAIN_GART; 289 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) 290 nvbo->valid_domains &= domain; 291 292 if (nvbo->no_share) { 293 nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base); 294 drm_gem_object_get(nvbo->r_obj); 295 } 296 297 *pnvbo = nvbo; 298 return 0; 299 } 300 301 static int 302 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 303 struct drm_nouveau_gem_info *rep) 304 { 305 struct nouveau_cli *cli = nouveau_cli(file_priv); 306 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 307 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); 308 struct nouveau_vma *vma; 309 310 if (is_power_of_2(nvbo->valid_domains)) 311 rep->domain = nvbo->valid_domains; 312 else if (nvbo->bo.resource->mem_type == TTM_PL_TT) 313 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 314 else 315 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 316 rep->offset = nvbo->offset; 317 if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 && 318 !nouveau_cli_uvmm(cli)) { 319 vma = nouveau_vma_find(nvbo, vmm); 320 if (!vma) 321 return -EINVAL; 322 323 rep->offset = vma->addr; 324 } else 325 rep->offset = 0; 326 327 rep->size = nvbo->bo.base.size; 328 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node); 329 rep->tile_mode = nvbo->mode; 330 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; 331 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 332 rep->tile_flags |= nvbo->kind << 8; 333 else 334 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 335 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; 336 else 337 rep->tile_flags |= nvbo->zeta; 338 return 0; 339 } 340 341 int 342 nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 343 struct drm_file *file_priv) 344 { 345 struct nouveau_cli *cli = nouveau_cli(file_priv); 346 struct drm_nouveau_gem_new *req = data; 347 struct nouveau_bo *nvbo = NULL; 348 int ret = 0; 349 350 /* If uvmm wasn't initialized until now disable it completely to prevent 351 * userspace from mixing up UAPIs. 352 */ 353 nouveau_cli_disable_uvmm_noinit(cli); 354 355 ret = nouveau_gem_new(cli, req->info.size, req->align, 356 req->info.domain, req->info.tile_mode, 357 req->info.tile_flags, &nvbo); 358 if (ret) 359 return ret; 360 361 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base, 362 &req->info.handle); 363 if (ret == 0) { 364 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); 365 if (ret) 366 drm_gem_handle_delete(file_priv, req->info.handle); 367 } 368 369 /* drop reference from allocate - handle holds it now */ 370 drm_gem_object_put(&nvbo->bo.base); 371 return ret; 372 } 373 374 static int 375 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 376 uint32_t write_domains, uint32_t valid_domains) 377 { 378 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 379 struct ttm_buffer_object *bo = &nvbo->bo; 380 uint32_t domains = valid_domains & nvbo->valid_domains & 381 (write_domains ? write_domains : read_domains); 382 uint32_t pref_domains = 0; 383 384 if (!domains) 385 return -EINVAL; 386 387 valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); 388 389 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 390 bo->resource->mem_type == TTM_PL_VRAM) 391 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; 392 393 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 394 bo->resource->mem_type == TTM_PL_TT) 395 pref_domains |= NOUVEAU_GEM_DOMAIN_GART; 396 397 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 398 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; 399 400 else 401 pref_domains |= NOUVEAU_GEM_DOMAIN_GART; 402 403 nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); 404 405 return 0; 406 } 407 408 struct validate_op { 409 struct list_head list; 410 struct ww_acquire_ctx ticket; 411 }; 412 413 static void 414 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan, 415 struct nouveau_fence *fence, 416 struct drm_nouveau_gem_pushbuf_bo *pbbo) 417 { 418 struct nouveau_bo *nvbo; 419 struct drm_nouveau_gem_pushbuf_bo *b; 420 421 while (!list_empty(&op->list)) { 422 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); 423 b = &pbbo[nvbo->pbbo_index]; 424 425 if (likely(fence)) { 426 nouveau_bo_fence(nvbo, fence, !!b->write_domains); 427 428 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 429 struct nouveau_vma *vma = 430 (void *)(unsigned long)b->user_priv; 431 nouveau_fence_unref(&vma->fence); 432 dma_fence_get(&fence->base); 433 vma->fence = fence; 434 } 435 } 436 437 if (unlikely(nvbo->validate_mapped)) { 438 ttm_bo_kunmap(&nvbo->kmap); 439 nvbo->validate_mapped = false; 440 } 441 442 list_del(&nvbo->entry); 443 nvbo->reserved_by = NULL; 444 ttm_bo_unreserve(&nvbo->bo); 445 drm_gem_object_put(&nvbo->bo.base); 446 } 447 } 448 449 static void 450 validate_fini(struct validate_op *op, struct nouveau_channel *chan, 451 struct nouveau_fence *fence, 452 struct drm_nouveau_gem_pushbuf_bo *pbbo) 453 { 454 validate_fini_no_ticket(op, chan, fence, pbbo); 455 ww_acquire_fini(&op->ticket); 456 } 457 458 static int 459 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, 460 struct drm_nouveau_gem_pushbuf_bo *pbbo, 461 int nr_buffers, struct validate_op *op) 462 { 463 struct nouveau_cli *cli = nouveau_cli(file_priv); 464 int trycnt = 0; 465 int ret = -EINVAL, i; 466 struct nouveau_bo *res_bo = NULL; 467 LIST_HEAD(gart_list); 468 LIST_HEAD(vram_list); 469 LIST_HEAD(both_list); 470 471 ww_acquire_init(&op->ticket, &reservation_ww_class); 472 retry: 473 if (++trycnt > 100000) { 474 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__); 475 return -EINVAL; 476 } 477 478 for (i = 0; i < nr_buffers; i++) { 479 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; 480 struct drm_gem_object *gem; 481 struct nouveau_bo *nvbo; 482 483 gem = drm_gem_object_lookup(file_priv, b->handle); 484 if (!gem) { 485 NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle); 486 ret = -ENOENT; 487 break; 488 } 489 nvbo = nouveau_gem_object(gem); 490 if (nvbo == res_bo) { 491 res_bo = NULL; 492 drm_gem_object_put(gem); 493 continue; 494 } 495 496 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 497 NV_PRINTK(err, cli, "multiple instances of buffer %d on " 498 "validation list\n", b->handle); 499 drm_gem_object_put(gem); 500 ret = -EINVAL; 501 break; 502 } 503 504 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); 505 if (ret) { 506 list_splice_tail_init(&vram_list, &op->list); 507 list_splice_tail_init(&gart_list, &op->list); 508 list_splice_tail_init(&both_list, &op->list); 509 validate_fini_no_ticket(op, chan, NULL, NULL); 510 if (unlikely(ret == -EDEADLK)) { 511 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 512 &op->ticket); 513 if (!ret) 514 res_bo = nvbo; 515 } 516 if (unlikely(ret)) { 517 if (ret != -ERESTARTSYS) 518 NV_PRINTK(err, cli, "fail reserve\n"); 519 break; 520 } 521 } 522 523 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 524 struct nouveau_vmm *vmm = chan->vmm; 525 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); 526 if (!vma) { 527 NV_PRINTK(err, cli, "vma not found!\n"); 528 ret = -EINVAL; 529 break; 530 } 531 532 b->user_priv = (uint64_t)(unsigned long)vma; 533 } else { 534 b->user_priv = (uint64_t)(unsigned long)nvbo; 535 } 536 537 nvbo->reserved_by = file_priv; 538 nvbo->pbbo_index = i; 539 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 540 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 541 list_add_tail(&nvbo->entry, &both_list); 542 else 543 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 544 list_add_tail(&nvbo->entry, &vram_list); 545 else 546 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 547 list_add_tail(&nvbo->entry, &gart_list); 548 else { 549 NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n", 550 b->valid_domains); 551 list_add_tail(&nvbo->entry, &both_list); 552 ret = -EINVAL; 553 break; 554 } 555 if (nvbo == res_bo) 556 goto retry; 557 } 558 559 ww_acquire_done(&op->ticket); 560 list_splice_tail(&vram_list, &op->list); 561 list_splice_tail(&gart_list, &op->list); 562 list_splice_tail(&both_list, &op->list); 563 if (ret) 564 validate_fini(op, chan, NULL, NULL); 565 return ret; 566 567 } 568 569 static int 570 validate_list(struct nouveau_channel *chan, 571 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo) 572 { 573 struct nouveau_cli *cli = chan->cli; 574 struct nouveau_drm *drm = cli->drm; 575 struct nouveau_bo *nvbo; 576 int ret, relocs = 0; 577 578 list_for_each_entry(nvbo, list, entry) { 579 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 580 581 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains, 582 b->write_domains, 583 b->valid_domains); 584 if (unlikely(ret)) { 585 NV_PRINTK(err, cli, "fail set_domain\n"); 586 return ret; 587 } 588 589 ret = nouveau_bo_validate(nvbo, true, false); 590 if (unlikely(ret)) { 591 if (ret != -ERESTARTSYS) 592 NV_PRINTK(err, cli, "fail ttm_validate\n"); 593 return ret; 594 } 595 596 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); 597 if (unlikely(ret)) { 598 if (ret != -ERESTARTSYS) 599 NV_PRINTK(err, cli, "fail post-validate sync\n"); 600 return ret; 601 } 602 603 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 604 if (nvbo->offset == b->presumed.offset && 605 ((nvbo->bo.resource->mem_type == TTM_PL_VRAM && 606 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 607 (nvbo->bo.resource->mem_type == TTM_PL_TT && 608 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 609 continue; 610 611 if (nvbo->bo.resource->mem_type == TTM_PL_TT) 612 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 613 else 614 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 615 b->presumed.offset = nvbo->offset; 616 b->presumed.valid = 0; 617 relocs++; 618 } 619 } 620 621 return relocs; 622 } 623 624 static int 625 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, 626 struct drm_file *file_priv, 627 struct drm_nouveau_gem_pushbuf_bo *pbbo, 628 int nr_buffers, 629 struct validate_op *op, bool *apply_relocs) 630 { 631 struct nouveau_cli *cli = nouveau_cli(file_priv); 632 int ret; 633 634 INIT_LIST_HEAD(&op->list); 635 636 if (nr_buffers == 0) 637 return 0; 638 639 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 640 if (unlikely(ret)) { 641 if (ret != -ERESTARTSYS) 642 NV_PRINTK(err, cli, "validate_init\n"); 643 return ret; 644 } 645 646 ret = validate_list(chan, &op->list, pbbo); 647 if (unlikely(ret < 0)) { 648 if (ret != -ERESTARTSYS) 649 NV_PRINTK(err, cli, "validating bo list\n"); 650 validate_fini(op, chan, NULL, NULL); 651 return ret; 652 } else if (ret > 0) { 653 *apply_relocs = true; 654 } 655 656 return 0; 657 } 658 659 static int 660 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, 661 struct drm_nouveau_gem_pushbuf *req, 662 struct drm_nouveau_gem_pushbuf_reloc *reloc, 663 struct drm_nouveau_gem_pushbuf_bo *bo) 664 { 665 int ret = 0; 666 unsigned i; 667 668 for (i = 0; i < req->nr_relocs; i++) { 669 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 670 struct drm_nouveau_gem_pushbuf_bo *b; 671 struct nouveau_bo *nvbo; 672 uint32_t data; 673 long lret; 674 675 if (unlikely(r->bo_index >= req->nr_buffers)) { 676 NV_PRINTK(err, cli, "reloc bo index invalid\n"); 677 ret = -EINVAL; 678 break; 679 } 680 681 b = &bo[r->bo_index]; 682 if (b->presumed.valid) 683 continue; 684 685 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { 686 NV_PRINTK(err, cli, "reloc container bo index invalid\n"); 687 ret = -EINVAL; 688 break; 689 } 690 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; 691 692 if (unlikely(r->reloc_bo_offset + 4 > 693 nvbo->bo.base.size)) { 694 NV_PRINTK(err, cli, "reloc outside of bo\n"); 695 ret = -EINVAL; 696 break; 697 } 698 699 if (!nvbo->kmap.virtual) { 700 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), 701 &nvbo->kmap); 702 if (ret) { 703 NV_PRINTK(err, cli, "failed kmap for reloc\n"); 704 break; 705 } 706 nvbo->validate_mapped = true; 707 } 708 709 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 710 data = b->presumed.offset + r->data; 711 else 712 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 713 data = (b->presumed.offset + r->data) >> 32; 714 else 715 data = r->data; 716 717 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 718 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) 719 data |= r->tor; 720 else 721 data |= r->vor; 722 } 723 724 lret = dma_resv_wait_timeout(nvbo->bo.base.resv, 725 DMA_RESV_USAGE_BOOKKEEP, 726 false, 15 * HZ); 727 if (!lret) 728 ret = -EBUSY; 729 else if (lret > 0) 730 ret = 0; 731 else 732 ret = lret; 733 734 if (ret) { 735 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", 736 ret); 737 break; 738 } 739 740 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 741 } 742 743 return ret; 744 } 745 746 int 747 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 748 struct drm_file *file_priv) 749 { 750 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 751 struct nouveau_cli *cli = nouveau_cli(file_priv); 752 struct nouveau_abi16_chan *temp; 753 struct nouveau_drm *drm = nouveau_drm(dev); 754 struct drm_nouveau_gem_pushbuf *req = data; 755 struct drm_nouveau_gem_pushbuf_push *push; 756 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 757 struct drm_nouveau_gem_pushbuf_bo *bo; 758 struct nouveau_channel *chan = NULL; 759 struct validate_op op; 760 struct nouveau_fence *fence = NULL; 761 int i, j, ret = 0; 762 bool do_reloc = false, sync = false; 763 764 if (unlikely(!abi16)) 765 return -ENOMEM; 766 767 if (unlikely(nouveau_cli_uvmm(cli))) 768 return nouveau_abi16_put(abi16, -ENOSYS); 769 770 list_for_each_entry(temp, &abi16->channels, head) { 771 if (temp->chan->chid == req->channel) { 772 chan = temp->chan; 773 break; 774 } 775 } 776 777 if (!chan) 778 return nouveau_abi16_put(abi16, -ENOENT); 779 if (unlikely(atomic_read(&chan->killed))) 780 return nouveau_abi16_put(abi16, -ENODEV); 781 782 sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC; 783 784 req->vram_available = drm->gem.vram_available; 785 req->gart_available = drm->gem.gart_available; 786 if (unlikely(req->nr_push == 0)) 787 goto out_next; 788 789 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 790 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", 791 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 792 return nouveau_abi16_put(abi16, -EINVAL); 793 } 794 795 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 796 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n", 797 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 798 return nouveau_abi16_put(abi16, -EINVAL); 799 } 800 801 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 802 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n", 803 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 804 return nouveau_abi16_put(abi16, -EINVAL); 805 } 806 807 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 808 if (IS_ERR(push)) 809 return nouveau_abi16_put(abi16, PTR_ERR(push)); 810 811 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 812 if (IS_ERR(bo)) { 813 u_free(push); 814 return nouveau_abi16_put(abi16, PTR_ERR(bo)); 815 } 816 817 /* Ensure all push buffers are on validate list */ 818 for (i = 0; i < req->nr_push; i++) { 819 if (push[i].bo_index >= req->nr_buffers) { 820 NV_PRINTK(err, cli, "push %d buffer not in list\n", i); 821 ret = -EINVAL; 822 goto out_prevalid; 823 } 824 } 825 826 /* Validate buffer list */ 827 revalidate: 828 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, 829 req->nr_buffers, &op, &do_reloc); 830 if (ret) { 831 if (ret != -ERESTARTSYS) 832 NV_PRINTK(err, cli, "validate: %d\n", ret); 833 goto out_prevalid; 834 } 835 836 /* Apply any relocations that are required */ 837 if (do_reloc) { 838 if (!reloc) { 839 validate_fini(&op, chan, NULL, bo); 840 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); 841 if (IS_ERR(reloc)) { 842 ret = PTR_ERR(reloc); 843 goto out_prevalid; 844 } 845 846 goto revalidate; 847 } 848 849 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo); 850 if (ret) { 851 NV_PRINTK(err, cli, "reloc apply: %d\n", ret); 852 goto out; 853 } 854 } 855 856 if (chan->dma.ib_max) { 857 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 858 if (ret) { 859 NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret); 860 goto out; 861 } 862 863 for (i = 0; i < req->nr_push; i++) { 864 struct nouveau_vma *vma = (void *)(unsigned long) 865 bo[push[i].bo_index].user_priv; 866 u64 addr = vma->addr + push[i].offset; 867 u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; 868 bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; 869 870 nv50_dma_push(chan, addr, length, no_prefetch); 871 } 872 } else 873 if (drm->client.device.info.chipset >= 0x25) { 874 ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2); 875 if (ret) { 876 NV_PRINTK(err, cli, "cal_space: %d\n", ret); 877 goto out; 878 } 879 880 for (i = 0; i < req->nr_push; i++) { 881 struct nouveau_bo *nvbo = (void *)(unsigned long) 882 bo[push[i].bo_index].user_priv; 883 884 PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset); 885 PUSH_DATA(&chan->chan.push, 0); 886 } 887 } else { 888 ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 889 if (ret) { 890 NV_PRINTK(err, cli, "jmp_space: %d\n", ret); 891 goto out; 892 } 893 894 for (i = 0; i < req->nr_push; i++) { 895 struct nouveau_bo *nvbo = (void *)(unsigned long) 896 bo[push[i].bo_index].user_priv; 897 uint32_t cmd; 898 899 cmd = chan->push.addr + ((chan->dma.cur + 2) << 2); 900 cmd |= 0x20000000; 901 if (unlikely(cmd != req->suffix0)) { 902 if (!nvbo->kmap.virtual) { 903 ret = ttm_bo_kmap(&nvbo->bo, 0, 904 PFN_UP(nvbo->bo.base.size), 905 &nvbo->kmap); 906 if (ret) { 907 WIND_RING(chan); 908 goto out; 909 } 910 nvbo->validate_mapped = true; 911 } 912 913 nouveau_bo_wr32(nvbo, (push[i].offset + 914 push[i].length - 8) / 4, cmd); 915 } 916 917 PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset); 918 PUSH_DATA(&chan->chan.push, 0); 919 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 920 PUSH_DATA(&chan->chan.push, 0); 921 } 922 } 923 924 ret = nouveau_fence_new(&fence, chan); 925 if (ret) { 926 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret); 927 WIND_RING(chan); 928 goto out; 929 } 930 931 if (sync) { 932 if (!(ret = nouveau_fence_wait(fence, false, false))) { 933 if ((ret = dma_fence_get_status(&fence->base)) == 1) 934 ret = 0; 935 } 936 } 937 938 out: 939 validate_fini(&op, chan, fence, bo); 940 nouveau_fence_unref(&fence); 941 942 if (do_reloc) { 943 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 944 u64_to_user_ptr(req->buffers); 945 946 for (i = 0; i < req->nr_buffers; i++) { 947 if (bo[i].presumed.valid) 948 continue; 949 950 if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed, 951 sizeof(bo[i].presumed))) { 952 ret = -EFAULT; 953 break; 954 } 955 } 956 } 957 out_prevalid: 958 if (!IS_ERR(reloc)) 959 u_free(reloc); 960 u_free(bo); 961 u_free(push); 962 963 out_next: 964 if (chan->dma.ib_max) { 965 req->suffix0 = 0x00000000; 966 req->suffix1 = 0x00000000; 967 } else 968 if (drm->client.device.info.chipset >= 0x25) { 969 req->suffix0 = 0x00020000; 970 req->suffix1 = 0x00000000; 971 } else { 972 req->suffix0 = 0x20000000 | 973 (chan->push.addr + ((chan->dma.cur + 2) << 2)); 974 req->suffix1 = 0x00000000; 975 } 976 977 return nouveau_abi16_put(abi16, ret); 978 } 979 980 int 981 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 982 struct drm_file *file_priv) 983 { 984 struct drm_nouveau_gem_cpu_prep *req = data; 985 struct drm_gem_object *gem; 986 struct nouveau_bo *nvbo; 987 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 988 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE); 989 long lret; 990 int ret; 991 992 gem = drm_gem_object_lookup(file_priv, req->handle); 993 if (!gem) 994 return -ENOENT; 995 nvbo = nouveau_gem_object(gem); 996 997 lret = dma_resv_wait_timeout(nvbo->bo.base.resv, 998 dma_resv_usage_rw(write), true, 999 no_wait ? 0 : 30 * HZ); 1000 if (!lret) 1001 ret = -EBUSY; 1002 else if (lret > 0) 1003 ret = 0; 1004 else 1005 ret = lret; 1006 1007 nouveau_bo_sync_for_cpu(nvbo); 1008 drm_gem_object_put(gem); 1009 1010 return ret; 1011 } 1012 1013 int 1014 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 1015 struct drm_file *file_priv) 1016 { 1017 struct drm_nouveau_gem_cpu_fini *req = data; 1018 struct drm_gem_object *gem; 1019 struct nouveau_bo *nvbo; 1020 1021 gem = drm_gem_object_lookup(file_priv, req->handle); 1022 if (!gem) 1023 return -ENOENT; 1024 nvbo = nouveau_gem_object(gem); 1025 1026 nouveau_bo_sync_for_device(nvbo); 1027 drm_gem_object_put(gem); 1028 return 0; 1029 } 1030 1031 int 1032 nouveau_gem_ioctl_info(struct drm_device *dev, void *data, 1033 struct drm_file *file_priv) 1034 { 1035 struct drm_nouveau_gem_info *req = data; 1036 struct drm_gem_object *gem; 1037 int ret; 1038 1039 gem = drm_gem_object_lookup(file_priv, req->handle); 1040 if (!gem) 1041 return -ENOENT; 1042 1043 ret = nouveau_gem_info(file_priv, gem, req); 1044 drm_gem_object_put(gem); 1045 return ret; 1046 } 1047 1048