Lines Matching refs:nvbo
79 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_del() local
80 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_del()
90 ttm_bo_put(&nvbo->bo); in nouveau_gem_object_del()
100 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_open() local
101 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_open()
111 if (nvbo->no_share && uvmm && in nouveau_gem_object_open()
112 drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv) in nouveau_gem_object_open()
115 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_gem_object_open()
127 ret = nouveau_vma_new(nvbo, vmm, &vma); in nouveau_gem_object_open()
133 ttm_bo_unreserve(&nvbo->bo); in nouveau_gem_object_open()
159 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) in nouveau_gem_object_unmap() argument
186 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_object_close() local
187 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_gem_object_close()
199 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_gem_object_close()
203 vma = nouveau_vma_find(nvbo, vmm); in nouveau_gem_object_close()
208 nouveau_gem_object_unmap(nvbo, vma); in nouveau_gem_object_close()
214 ttm_bo_unreserve(&nvbo->bo); in nouveau_gem_object_close()
239 struct nouveau_bo *nvbo; in nouveau_gem_new() local
252 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, in nouveau_gem_new()
254 if (IS_ERR(nvbo)) in nouveau_gem_new()
255 return PTR_ERR(nvbo); in nouveau_gem_new()
257 nvbo->bo.base.funcs = &nouveau_gem_object_funcs; in nouveau_gem_new()
258 nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE; in nouveau_gem_new()
262 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); in nouveau_gem_new()
264 drm_gem_object_release(&nvbo->bo.base); in nouveau_gem_new()
265 kfree(nvbo); in nouveau_gem_new()
272 ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv); in nouveau_gem_new()
284 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | in nouveau_gem_new()
287 nvbo->valid_domains &= domain; in nouveau_gem_new()
289 if (nvbo->no_share) { in nouveau_gem_new()
290 nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base); in nouveau_gem_new()
291 drm_gem_object_get(nvbo->r_obj); in nouveau_gem_new()
294 *pnvbo = nvbo; in nouveau_gem_new()
303 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_info() local
307 if (is_power_of_2(nvbo->valid_domains)) in nouveau_gem_info()
308 rep->domain = nvbo->valid_domains; in nouveau_gem_info()
309 else if (nvbo->bo.resource->mem_type == TTM_PL_TT) in nouveau_gem_info()
313 rep->offset = nvbo->offset; in nouveau_gem_info()
316 vma = nouveau_vma_find(nvbo, vmm); in nouveau_gem_info()
324 rep->size = nvbo->bo.base.size; in nouveau_gem_info()
325 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node); in nouveau_gem_info()
326 rep->tile_mode = nvbo->mode; in nouveau_gem_info()
327 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; in nouveau_gem_info()
329 rep->tile_flags |= nvbo->kind << 8; in nouveau_gem_info()
332 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; in nouveau_gem_info()
334 rep->tile_flags |= nvbo->zeta; in nouveau_gem_info()
344 struct nouveau_bo *nvbo = NULL; in nouveau_gem_ioctl_new() local
354 req->info.tile_flags, &nvbo); in nouveau_gem_ioctl_new()
358 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base, in nouveau_gem_ioctl_new()
361 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); in nouveau_gem_ioctl_new()
367 drm_gem_object_put(&nvbo->bo.base); in nouveau_gem_ioctl_new()
375 struct nouveau_bo *nvbo = nouveau_gem_object(gem); in nouveau_gem_set_domain() local
376 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_gem_set_domain()
377 uint32_t domains = valid_domains & nvbo->valid_domains & in nouveau_gem_set_domain()
400 nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); in nouveau_gem_set_domain()
415 struct nouveau_bo *nvbo; in validate_fini_no_ticket() local
419 nvbo = list_entry(op->list.next, struct nouveau_bo, entry); in validate_fini_no_ticket()
420 b = &pbbo[nvbo->pbbo_index]; in validate_fini_no_ticket()
423 nouveau_bo_fence(nvbo, fence, !!b->write_domains); in validate_fini_no_ticket()
434 if (unlikely(nvbo->validate_mapped)) { in validate_fini_no_ticket()
435 ttm_bo_kunmap(&nvbo->kmap); in validate_fini_no_ticket()
436 nvbo->validate_mapped = false; in validate_fini_no_ticket()
439 list_del(&nvbo->entry); in validate_fini_no_ticket()
440 nvbo->reserved_by = NULL; in validate_fini_no_ticket()
441 ttm_bo_unreserve(&nvbo->bo); in validate_fini_no_ticket()
442 drm_gem_object_put(&nvbo->bo.base); in validate_fini_no_ticket()
478 struct nouveau_bo *nvbo; in validate_init() local
486 nvbo = nouveau_gem_object(gem); in validate_init()
487 if (nvbo == res_bo) { in validate_init()
493 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { in validate_init()
501 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); in validate_init()
508 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, in validate_init()
511 res_bo = nvbo; in validate_init()
522 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); in validate_init()
531 b->user_priv = (uint64_t)(unsigned long)nvbo; in validate_init()
534 nvbo->reserved_by = file_priv; in validate_init()
535 nvbo->pbbo_index = i; in validate_init()
538 list_add_tail(&nvbo->entry, &both_list); in validate_init()
541 list_add_tail(&nvbo->entry, &vram_list); in validate_init()
544 list_add_tail(&nvbo->entry, &gart_list); in validate_init()
548 list_add_tail(&nvbo->entry, &both_list); in validate_init()
552 if (nvbo == res_bo) in validate_init()
572 struct nouveau_bo *nvbo; in validate_list() local
575 list_for_each_entry(nvbo, list, entry) { in validate_list()
576 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; in validate_list()
578 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains, in validate_list()
586 ret = nouveau_bo_validate(nvbo, true, false); in validate_list()
593 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); in validate_list()
601 if (nvbo->offset == b->presumed.offset && in validate_list()
602 ((nvbo->bo.resource->mem_type == TTM_PL_VRAM && in validate_list()
604 (nvbo->bo.resource->mem_type == TTM_PL_TT && in validate_list()
608 if (nvbo->bo.resource->mem_type == TTM_PL_TT) in validate_list()
612 b->presumed.offset = nvbo->offset; in validate_list()
668 struct nouveau_bo *nvbo; in nouveau_gem_pushbuf_reloc_apply() local
687 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; in nouveau_gem_pushbuf_reloc_apply()
690 nvbo->bo.base.size)) { in nouveau_gem_pushbuf_reloc_apply()
696 if (!nvbo->kmap.virtual) { in nouveau_gem_pushbuf_reloc_apply()
697 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), in nouveau_gem_pushbuf_reloc_apply()
698 &nvbo->kmap); in nouveau_gem_pushbuf_reloc_apply()
703 nvbo->validate_mapped = true; in nouveau_gem_pushbuf_reloc_apply()
721 lret = dma_resv_wait_timeout(nvbo->bo.base.resv, in nouveau_gem_pushbuf_reloc_apply()
737 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); in nouveau_gem_pushbuf_reloc_apply()
880 struct nouveau_bo *nvbo = (void *)(unsigned long) in nouveau_gem_ioctl_pushbuf() local
883 PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset); in nouveau_gem_ioctl_pushbuf()
894 struct nouveau_bo *nvbo = (void *)(unsigned long) in nouveau_gem_ioctl_pushbuf() local
901 if (!nvbo->kmap.virtual) { in nouveau_gem_ioctl_pushbuf()
902 ret = ttm_bo_kmap(&nvbo->bo, 0, in nouveau_gem_ioctl_pushbuf()
903 PFN_UP(nvbo->bo.base.size), in nouveau_gem_ioctl_pushbuf()
904 &nvbo->kmap); in nouveau_gem_ioctl_pushbuf()
909 nvbo->validate_mapped = true; in nouveau_gem_ioctl_pushbuf()
912 nouveau_bo_wr32(nvbo, (push[i].offset + in nouveau_gem_ioctl_pushbuf()
916 PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset); in nouveau_gem_ioctl_pushbuf()
985 struct nouveau_bo *nvbo; in nouveau_gem_ioctl_cpu_prep() local
994 nvbo = nouveau_gem_object(gem); in nouveau_gem_ioctl_cpu_prep()
996 lret = dma_resv_wait_timeout(nvbo->bo.base.resv, in nouveau_gem_ioctl_cpu_prep()
1006 nouveau_bo_sync_for_cpu(nvbo); in nouveau_gem_ioctl_cpu_prep()
1018 struct nouveau_bo *nvbo; in nouveau_gem_ioctl_cpu_fini() local
1023 nvbo = nouveau_gem_object(gem); in nouveau_gem_ioctl_cpu_fini()
1025 nouveau_bo_sync_for_device(nvbo); in nouveau_gem_ioctl_cpu_fini()