Lines Matching refs:drm
60 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
63 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
69 struct nouveau_drm *drm; member
81 struct nouveau_drm *drm; member
98 return chunk->drm; in page_to_drm()
113 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free()
141 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage, in nouveau_dmem_copy_one() argument
144 struct device *dev = drm->dev->dev; in nouveau_dmem_copy_one()
152 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, in nouveau_dmem_copy_one()
163 struct nouveau_drm *drm = page_to_drm(vmf->page); in nouveau_dmem_migrate_to_ram() local
164 struct nouveau_dmem *dmem = drm->dmem; in nouveau_dmem_migrate_to_ram()
177 .pgmap_owner = drm->dev, in nouveau_dmem_migrate_to_ram()
205 ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr); in nouveau_dmem_migrate_to_ram()
215 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_migrate_to_ram()
227 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) in nouveau_dmem_chunk_alloc() argument
250 chunk->drm = drm; in nouveau_dmem_chunk_alloc()
256 chunk->pagemap.owner = drm->dev; in nouveau_dmem_chunk_alloc()
258 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, in nouveau_dmem_chunk_alloc()
274 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_chunk_alloc()
275 list_add(&chunk->list, &drm->dmem->chunks); in nouveau_dmem_chunk_alloc()
276 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_chunk_alloc()
280 spin_lock(&drm->dmem->lock); in nouveau_dmem_chunk_alloc()
282 page->zone_device_data = drm->dmem->free_pages; in nouveau_dmem_chunk_alloc()
283 drm->dmem->free_pages = page; in nouveau_dmem_chunk_alloc()
287 spin_unlock(&drm->dmem->lock); in nouveau_dmem_chunk_alloc()
289 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", in nouveau_dmem_chunk_alloc()
307 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm) in nouveau_dmem_page_alloc_locked() argument
313 spin_lock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
314 if (drm->dmem->free_pages) { in nouveau_dmem_page_alloc_locked()
315 page = drm->dmem->free_pages; in nouveau_dmem_page_alloc_locked()
316 drm->dmem->free_pages = page->zone_device_data; in nouveau_dmem_page_alloc_locked()
319 spin_unlock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
321 spin_unlock(&drm->dmem->lock); in nouveau_dmem_page_alloc_locked()
322 ret = nouveau_dmem_chunk_alloc(drm, &page); in nouveau_dmem_page_alloc_locked()
332 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page) in nouveau_dmem_page_free_locked() argument
339 nouveau_dmem_resume(struct nouveau_drm *drm) in nouveau_dmem_resume() argument
344 if (drm->dmem == NULL) in nouveau_dmem_resume()
347 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_resume()
348 list_for_each_entry(chunk, &drm->dmem->chunks, list) { in nouveau_dmem_resume()
353 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_resume()
357 nouveau_dmem_suspend(struct nouveau_drm *drm) in nouveau_dmem_suspend() argument
361 if (drm->dmem == NULL) in nouveau_dmem_suspend()
364 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_suspend()
365 list_for_each_entry(chunk, &drm->dmem->chunks, list) in nouveau_dmem_suspend()
367 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_suspend()
399 nouveau_dmem_copy_one(chunk->drm, in nouveau_dmem_evict_chunk()
405 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan); in nouveau_dmem_evict_chunk()
412 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_evict_chunk()
417 nouveau_dmem_fini(struct nouveau_drm *drm) in nouveau_dmem_fini() argument
421 if (drm->dmem == NULL) in nouveau_dmem_fini()
424 mutex_lock(&drm->dmem->mutex); in nouveau_dmem_fini()
426 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { in nouveau_dmem_fini()
438 mutex_unlock(&drm->dmem->mutex); in nouveau_dmem_fini()
442 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages, in nvc0b5_migrate_copy() argument
446 struct nvif_push *push = &drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_copy()
516 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length, in nvc0b5_migrate_clear() argument
519 struct nvif_push *push = &drm->dmem->migrate.chan->chan.push; in nvc0b5_migrate_clear()
572 nouveau_dmem_migrate_init(struct nouveau_drm *drm) in nouveau_dmem_migrate_init() argument
574 switch (drm->ttm.copy.oclass) { in nouveau_dmem_migrate_init()
579 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy; in nouveau_dmem_migrate_init()
580 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear; in nouveau_dmem_migrate_init()
581 drm->dmem->migrate.chan = drm->ttm.chan; in nouveau_dmem_migrate_init()
590 nouveau_dmem_init(struct nouveau_drm *drm) in nouveau_dmem_init() argument
595 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL) in nouveau_dmem_init()
598 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) in nouveau_dmem_init()
601 drm->dmem->drm = drm; in nouveau_dmem_init()
602 mutex_init(&drm->dmem->mutex); in nouveau_dmem_init()
603 INIT_LIST_HEAD(&drm->dmem->chunks); in nouveau_dmem_init()
604 mutex_init(&drm->dmem->mutex); in nouveau_dmem_init()
605 spin_lock_init(&drm->dmem->lock); in nouveau_dmem_init()
608 ret = nouveau_dmem_migrate_init(drm); in nouveau_dmem_init()
610 kfree(drm->dmem); in nouveau_dmem_init()
611 drm->dmem = NULL; in nouveau_dmem_init()
615 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, in nouveau_dmem_migrate_copy_one() argument
619 struct device *dev = drm->dev->dev; in nouveau_dmem_migrate_copy_one()
627 dpage = nouveau_dmem_page_alloc_locked(drm); in nouveau_dmem_migrate_copy_one()
637 if (drm->dmem->migrate.copy_func(drm, 1, in nouveau_dmem_migrate_copy_one()
642 if (drm->dmem->migrate.clear_func(drm, page_size(dpage), in nouveau_dmem_migrate_copy_one()
657 nouveau_dmem_page_free_locked(drm, dpage); in nouveau_dmem_migrate_copy_one()
663 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, in nouveau_dmem_migrate_chunk() argument
671 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm, in nouveau_dmem_migrate_chunk()
673 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma])) in nouveau_dmem_migrate_chunk()
678 nouveau_fence_new(&fence, drm->dmem->migrate.chan); in nouveau_dmem_migrate_chunk()
684 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE, in nouveau_dmem_migrate_chunk()
691 nouveau_dmem_migrate_vma(struct nouveau_drm *drm, in nouveau_dmem_migrate_vma() argument
703 .pgmap_owner = drm->dev, in nouveau_dmem_migrate_vma()
710 if (drm->dmem == NULL) in nouveau_dmem_migrate_vma()
739 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs, in nouveau_dmem_migrate_vma()