Lines Matching +full:p +full:- +full:tile

30 #include <linux/dma-mapping.h>
52 * NV10-NV40 tiling helpers
60 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
62 struct nvkm_fb_tile *tile = &fb->tile.region[i]; in nv10_bo_update_tile_region() local
64 nouveau_fence_unref(&reg->fence); in nv10_bo_update_tile_region()
66 if (tile->pitch) in nv10_bo_update_tile_region()
67 nvkm_fb_tile_fini(fb, i, tile); in nv10_bo_update_tile_region()
70 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); in nv10_bo_update_tile_region()
72 nvkm_fb_tile_prog(fb, i, tile); in nv10_bo_update_tile_region()
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region() local
81 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
83 if (!tile->used && in nv10_bo_get_tile_region()
84 (!tile->fence || nouveau_fence_done(tile->fence))) in nv10_bo_get_tile_region()
85 tile->used = true; in nv10_bo_get_tile_region()
87 tile = NULL; in nv10_bo_get_tile_region()
89 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
90 return tile; in nv10_bo_get_tile_region()
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, in nv10_bo_put_tile_region() argument
99 if (tile) { in nv10_bo_put_tile_region()
100 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
101 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); in nv10_bo_put_tile_region()
102 tile->used = false; in nv10_bo_put_tile_region()
103 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
113 struct nouveau_drm_tile *tile, *found = NULL; in nv10_bo_set_tiling() local
116 for (i = 0; i < fb->tile.regions; i++) { in nv10_bo_set_tiling()
117 tile = nv10_bo_get_tile_region(dev, i); in nv10_bo_set_tiling()
120 found = tile; in nv10_bo_set_tiling()
123 } else if (tile && fb->tile.region[i].pitch) { in nv10_bo_set_tiling()
124 /* Kill an unused tile region. */ in nv10_bo_set_tiling()
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); in nv10_bo_set_tiling()
128 nv10_bo_put_tile_region(dev, tile, NULL); in nv10_bo_set_tiling()
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm()
140 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
143 WARN_ON(nvbo->bo.pin_count > 0); in nouveau_bo_del_ttm()
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); in nouveau_bo_del_ttm()
147 if (bo->base.import_attach) in nouveau_bo_del_ttm()
148 drm_prime_gem_destroy(&bo->base, bo->sg); in nouveau_bo_del_ttm()
154 if (bo->base.dev) { in nouveau_bo_del_ttm()
158 if (nvbo->no_share) in nouveau_bo_del_ttm()
159 drm_gem_object_put(nvbo->r_obj); in nouveau_bo_del_ttm()
161 drm_gem_object_release(&bo->base); in nouveau_bo_del_ttm()
163 dma_resv_fini(&bo->base._resv); in nouveau_bo_del_ttm()
172 x += y - 1; in roundup_64()
180 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align()
181 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
183 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_fixup_align()
184 if (nvbo->mode) { in nouveau_bo_fixup_align()
185 if (device->info.chipset >= 0x40) { in nouveau_bo_fixup_align()
187 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
189 } else if (device->info.chipset >= 0x30) { in nouveau_bo_fixup_align()
191 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
193 } else if (device->info.chipset >= 0x20) { in nouveau_bo_fixup_align()
195 *size = roundup_64(*size, 64 * nvbo->mode); in nouveau_bo_fixup_align()
197 } else if (device->info.chipset >= 0x10) { in nouveau_bo_fixup_align()
199 *size = roundup_64(*size, 32 * nvbo->mode); in nouveau_bo_fixup_align()
203 *size = roundup_64(*size, (1 << nvbo->page)); in nouveau_bo_fixup_align()
204 *align = max((1 << nvbo->page), *align); in nouveau_bo_fixup_align()
214 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc()
216 struct nvif_mmu *mmu = &cli->mmu; in nouveau_bo_alloc()
217 struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; in nouveau_bo_alloc()
218 int i, pi = -1; in nouveau_bo_alloc()
222 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
227 return ERR_PTR(-ENOMEM); in nouveau_bo_alloc()
229 INIT_LIST_HEAD(&nvbo->head); in nouveau_bo_alloc()
230 INIT_LIST_HEAD(&nvbo->entry); in nouveau_bo_alloc()
231 INIT_LIST_HEAD(&nvbo->vma_list); in nouveau_bo_alloc()
232 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
239 /* Determine if we can get a cache-coherent map, forcing in nouveau_bo_alloc()
243 nvbo->force_coherent = true; in nouveau_bo_alloc()
246 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); in nouveau_bo_alloc()
248 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { in nouveau_bo_alloc()
249 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; in nouveau_bo_alloc()
250 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
252 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
255 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; in nouveau_bo_alloc()
256 } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_alloc()
257 nvbo->kind = (tile_flags & 0x00007f00) >> 8; in nouveau_bo_alloc()
258 nvbo->comp = (tile_flags & 0x00030000) >> 16; in nouveau_bo_alloc()
259 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { in nouveau_bo_alloc()
261 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
264 nvbo->zeta = (tile_flags & 0x00000007); in nouveau_bo_alloc()
266 nvbo->mode = tile_mode; in nouveau_bo_alloc()
270 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
273 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
278 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && in nouveau_bo_alloc()
279 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
282 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
289 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) in nouveau_bo_alloc()
293 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
299 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
303 if (nvbo->comp && !vmm->page[pi].comp) { in nouveau_bo_alloc()
304 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_bo_alloc()
305 nvbo->kind = mmu->kind[nvbo->kind]; in nouveau_bo_alloc()
306 nvbo->comp = 0; in nouveau_bo_alloc()
308 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
311 for (i = 0; i < vmm->page_nr; i++) { in nouveau_bo_alloc()
314 * size for the buffer up-front, and pre-allocate its in nouveau_bo_alloc()
319 if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) in nouveau_bo_alloc()
322 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) in nouveau_bo_alloc()
329 if (*size >= 1ULL << vmm->page[i].shift) in nouveau_bo_alloc()
334 return ERR_PTR(-EINVAL); in nouveau_bo_alloc()
336 nvbo->page = vmm->page[pi].shift; in nouveau_bo_alloc()
357 INIT_LIST_HEAD(&nvbo->io_reserve_lru); in nouveau_bo_init()
359 ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, in nouveau_bo_init()
360 &nvbo->placement, align >> PAGE_SHIFT, &ctx, in nouveau_bo_init()
368 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_init()
387 nvbo->bo.base.size = size; in nouveau_bo_new()
388 dma_resv_init(&nvbo->bo.base._resv); in nouveau_bo_new()
389 drm_vma_node_reset(&nvbo->bo.base.vma_node); in nouveau_bo_new()
394 drm_gem_gpuva_init(&nvbo->bo.base); in nouveau_bo_new()
484 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range()
485 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
488 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
489 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && in set_placement_range()
490 nvbo->bo.base.size < vram_size / 4) { in set_placement_range()
494 * speed up when alpha-blending and depth-test are enabled in set_placement_range()
497 if (nvbo->zeta) { in set_placement_range()
504 for (i = 0; i < nvbo->placement.num_placement; ++i) { in set_placement_range()
505 nvbo->placements[i].fpfn = fpfn; in set_placement_range()
506 nvbo->placements[i].lpfn = lpfn; in set_placement_range()
515 unsigned int *n = &nvbo->placement.num_placement; in nouveau_bo_placement_set()
516 struct ttm_place *pl = nvbo->placements; in nouveau_bo_placement_set()
540 nvbo->placement.placement = nvbo->placements; in nouveau_bo_placement_set()
546 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin_locked()
547 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin_locked()
551 dma_resv_assert_held(bo->base.resv); in nouveau_bo_pin_locked()
553 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin_locked()
555 if (!nvbo->contig) { in nouveau_bo_pin_locked()
556 nvbo->contig = true; in nouveau_bo_pin_locked()
562 if (nvbo->bo.pin_count) { in nouveau_bo_pin_locked()
565 switch (bo->resource->mem_type) { in nouveau_bo_pin_locked()
577 NV_ERROR(drm, "bo %p pinned elsewhere: " in nouveau_bo_pin_locked()
579 bo->resource->mem_type, domain); in nouveau_bo_pin_locked()
580 ret = -EBUSY; in nouveau_bo_pin_locked()
582 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin_locked()
598 ttm_bo_pin(&nvbo->bo); in nouveau_bo_pin_locked()
600 switch (bo->resource->mem_type) { in nouveau_bo_pin_locked()
602 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin_locked()
605 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin_locked()
613 nvbo->contig = false; in nouveau_bo_pin_locked()
619 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin_locked()
620 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin_locked()
622 dma_resv_assert_held(bo->base.resv); in nouveau_bo_unpin_locked()
624 ttm_bo_unpin(&nvbo->bo); in nouveau_bo_unpin_locked()
625 if (!nvbo->bo.pin_count) { in nouveau_bo_unpin_locked()
626 switch (bo->resource->mem_type) { in nouveau_bo_unpin_locked()
628 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin_locked()
631 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin_locked()
641 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_pin()
655 struct ttm_buffer_object *bo = &nvbo->bo; in nouveau_bo_unpin()
672 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); in nouveau_bo_map()
676 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); in nouveau_bo_map()
678 ttm_bo_unreserve(&nvbo->bo); in nouveau_bo_map()
688 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
694 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device()
695 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
698 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_device()
700 if (!ttm_dma->pages) { in nouveau_bo_sync_for_device()
701 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_device()
706 if (nvbo->force_coherent) in nouveau_bo_sync_for_device()
710 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_device()
711 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_device() local
714 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_device()
715 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_device()
720 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
721 ttm_dma->dma_address[i], in nouveau_bo_sync_for_device()
730 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu()
731 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
734 if (!ttm_dma || !ttm_dma->dma_address) in nouveau_bo_sync_for_cpu()
736 if (!ttm_dma->pages) { in nouveau_bo_sync_for_cpu()
737 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_cpu()
742 if (nvbo->force_coherent) in nouveau_bo_sync_for_cpu()
746 while (i < ttm_dma->num_pages) { in nouveau_bo_sync_for_cpu()
747 struct page *p = ttm_dma->pages[i]; in nouveau_bo_sync_for_cpu() local
750 for (j = i + 1; j < ttm_dma->num_pages; ++j) { in nouveau_bo_sync_for_cpu()
751 if (++p != ttm_dma->pages[j]) in nouveau_bo_sync_for_cpu()
757 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
765 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru()
768 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
769 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
770 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
775 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru()
778 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
779 list_del_init(&nvbo->io_reserve_lru); in nouveau_bo_del_io_reserve_lru()
780 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
790 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); in nouveau_bo_validate()
803 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
817 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
831 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()
845 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create()
847 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
848 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
863 return -EINVAL; in nouveau_ttm_tt_bind()
865 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
877 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
890 switch (bo->resource->mem_type) { in nouveau_bo_evict_flags()
900 *pl = nvbo->placement; in nouveau_bo_evict_flags()
907 struct nouveau_mem *old_mem = nouveau_mem(bo->resource); in nouveau_bo_move_prep()
909 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
912 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, in nouveau_bo_move_prep()
913 old_mem->mem.size, &old_mem->vma[0]); in nouveau_bo_move_prep()
917 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, in nouveau_bo_move_prep()
918 new_mem->mem.size, &old_mem->vma[1]); in nouveau_bo_move_prep()
922 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
926 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
929 nvif_vmm_put(vmm, &old_mem->vma[1]); in nouveau_bo_move_prep()
930 nvif_vmm_put(vmm, &old_mem->vma[0]); in nouveau_bo_move_prep()
940 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf()
941 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
942 struct nouveau_cli *cli = chan->cli; in nouveau_bo_move_m2mf()
950 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
956 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
957 mutex_lock(&cli->mutex); in nouveau_bo_move_m2mf()
959 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); in nouveau_bo_move_m2mf()
961 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); in nouveau_bo_move_m2mf()
965 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
982 ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, in nouveau_bo_move_m2mf()
987 mutex_unlock(&cli->mutex); in nouveau_bo_move_m2mf()
1038 if (mthd->engine) in nouveau_bo_move_init()
1039 chan = drm->cechan; in nouveau_bo_move_init()
1041 chan = drm->channel; in nouveau_bo_move_init()
1045 ret = nvif_object_ctor(&chan->user, "ttmBoMove", in nouveau_bo_move_init()
1046 mthd->oclass | (mthd->engine << 16), in nouveau_bo_move_init()
1047 mthd->oclass, NULL, 0, in nouveau_bo_move_init()
1048 &drm->ttm.copy); in nouveau_bo_move_init()
1050 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
1052 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
1056 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
1057 drm->ttm.chan = chan; in nouveau_bo_move_init()
1058 name = mthd->name; in nouveau_bo_move_init()
1061 } while ((++mthd)->exec); in nouveau_bo_move_init()
1075 if (bo->destroy != nouveau_bo_del_ttm) in nouveau_bo_move_ntfy()
1080 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && in nouveau_bo_move_ntfy()
1081 mem->mem.page == nvbo->page) { in nouveau_bo_move_ntfy()
1082 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1087 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_bo_move_ntfy()
1088 ret = dma_resv_wait_timeout(bo->base.resv, in nouveau_bo_move_ntfy()
1098 nvbo->offset = (new_reg->start << PAGE_SHIFT); in nouveau_bo_move_ntfy()
1106 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind()
1107 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
1109 u64 offset = new_reg->start << PAGE_SHIFT; in nouveau_bo_vm_bind()
1112 if (new_reg->mem_type != TTM_PL_VRAM) in nouveau_bo_vm_bind()
1115 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
1116 *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, in nouveau_bo_vm_bind()
1117 nvbo->mode, nvbo->zeta); in nouveau_bo_vm_bind()
1128 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup()
1129 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
1133 ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1136 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, in nouveau_bo_vm_cleanup()
1149 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move()
1151 struct drm_gem_object *obj = &bo->base; in nouveau_bo_move()
1152 struct ttm_resource *old_reg = bo->resource; in nouveau_bo_move()
1156 if (new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1157 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); in nouveau_bo_move()
1168 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1175 if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1176 !bo->ttm)) { in nouveau_bo_move()
1181 if (old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1182 new_reg->mem_type == TTM_PL_TT) { in nouveau_bo_move()
1187 if (old_reg->mem_type == TTM_PL_TT && in nouveau_bo_move()
1188 new_reg->mem_type == TTM_PL_SYSTEM) { in nouveau_bo_move()
1189 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); in nouveau_bo_move()
1190 ttm_resource_free(bo, &bo->resource); in nouveau_bo_move()
1196 if (drm->ttm.move) { in nouveau_bo_move()
1197 if ((old_reg->mem_type == TTM_PL_SYSTEM && in nouveau_bo_move()
1198 new_reg->mem_type == TTM_PL_VRAM) || in nouveau_bo_move()
1199 (old_reg->mem_type == TTM_PL_VRAM && in nouveau_bo_move()
1200 new_reg->mem_type == TTM_PL_SYSTEM)) { in nouveau_bo_move()
1201 hop->fpfn = 0; in nouveau_bo_move()
1202 hop->lpfn = 0; in nouveau_bo_move()
1203 hop->mem_type = TTM_PL_TT; in nouveau_bo_move()
1204 hop->flags = 0; in nouveau_bo_move()
1205 return -EMULTIHOP; in nouveau_bo_move()
1210 ret = -ENODEV; in nouveau_bo_move()
1218 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1222 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); in nouveau_bo_move()
1226 nouveau_bo_move_ntfy(bo, bo->resource); in nouveau_bo_move()
1238 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1239 switch (reg->mem_type) { in nouveau_ttm_io_mem_free_locked()
1241 if (mem->kind) in nouveau_ttm_io_mem_free_locked()
1242 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1245 nvif_object_unmap_handle(&mem->mem.object); in nouveau_ttm_io_mem_free_locked()
1259 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1262 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1264 switch (reg->mem_type) { in nouveau_ttm_io_mem_reserve()
1271 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1272 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1273 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1274 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1275 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1278 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1279 !mem->kind) { in nouveau_ttm_io_mem_reserve()
1286 reg->bus.offset = (reg->start << PAGE_SHIFT) + in nouveau_ttm_io_mem_reserve()
1287 device->func->resource_addr(device, NVKM_BAR1_FB); in nouveau_ttm_io_mem_reserve()
1288 reg->bus.is_iomem = true; in nouveau_ttm_io_mem_reserve()
1291 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1292 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1293 reg->bus.caching = ttm_uncached; in nouveau_ttm_io_mem_reserve()
1295 reg->bus.caching = ttm_write_combined; in nouveau_ttm_io_mem_reserve()
1297 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1305 switch (mem->mem.object.oclass) { in nouveau_ttm_io_mem_reserve()
1309 args.nv50.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1310 args.nv50.comp = mem->comp; in nouveau_ttm_io_mem_reserve()
1316 args.gf100.kind = mem->kind; in nouveau_ttm_io_mem_reserve()
1324 ret = nvif_object_map_handle(&mem->mem.object, in nouveau_ttm_io_mem_reserve()
1329 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1333 reg->bus.offset = handle; in nouveau_ttm_io_mem_reserve()
1338 ret = -EINVAL; in nouveau_ttm_io_mem_reserve()
1342 if (ret == -ENOSPC) { in nouveau_ttm_io_mem_reserve()
1345 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1349 list_del_init(&nvbo->io_reserve_lru); in nouveau_ttm_io_mem_reserve()
1350 drm_vma_node_unmap(&nvbo->bo.base.vma_node, in nouveau_ttm_io_mem_reserve()
1351 bdev->dev_mapping); in nouveau_ttm_io_mem_reserve()
1352 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1353 nvbo->bo.resource->bus.offset = 0; in nouveau_ttm_io_mem_reserve()
1354 nvbo->bo.resource->bus.addr = NULL; in nouveau_ttm_io_mem_reserve()
1359 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1368 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1370 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1375 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify()
1378 u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT; in nouveau_ttm_fault_reserve_notify()
1384 if (bo->resource->mem_type != TTM_PL_VRAM) { in nouveau_ttm_fault_reserve_notify()
1385 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1386 !nvbo->kind) in nouveau_ttm_fault_reserve_notify()
1389 if (bo->resource->mem_type != TTM_PL_SYSTEM) in nouveau_ttm_fault_reserve_notify()
1396 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1397 bo->resource->start + PFN_UP(bo->resource->size) < mappable) in nouveau_ttm_fault_reserve_notify()
1400 for (i = 0; i < nvbo->placement.num_placement; ++i) { in nouveau_ttm_fault_reserve_notify()
1401 nvbo->placements[i].fpfn = 0; in nouveau_ttm_fault_reserve_notify()
1402 nvbo->placements[i].lpfn = mappable; in nouveau_ttm_fault_reserve_notify()
1409 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) in nouveau_ttm_fault_reserve_notify()
1424 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_populate()
1429 if (slave && ttm->sg) { in nouveau_ttm_tt_populate()
1430 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, in nouveau_ttm_tt_populate()
1431 ttm->num_pages); in nouveau_ttm_tt_populate()
1437 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1445 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in nouveau_ttm_tt_unpopulate()
1454 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1463 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()
1474 struct dma_resv *resv = nvbo->bo.base.resv; in nouveau_bo_fence()
1479 dma_resv_add_fence(resv, &fence->base, exclusive ? in nouveau_bo_fence()