Lines Matching refs:chunk

97 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
99 return chunk->drm;
104 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
106 chunk->pagemap.range.start;
108 return chunk->bo->offset + off;
113 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
114 struct nouveau_dmem *dmem = chunk->drm->dmem;
120 WARN_ON(!chunk->callocated);
121 chunk->callocated--;
123 * FIXME when chunk->callocated reach 0 we should add the chunk to
230 struct nouveau_dmem_chunk *chunk;
237 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
238 if (chunk == NULL) {
251 chunk->drm = drm;
252 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
253 chunk->pagemap.range.start = res->start;
254 chunk->pagemap.range.end = res->end;
255 chunk->pagemap.nr_range = 1;
256 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
257 chunk->pagemap.owner = drm->dev;
260 &chunk->bo);
264 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
271 list_add(&chunk->list, &drm->dmem->chunks);
274 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
282 chunk->callocated++;
291 nouveau_bo_unpin_del(&chunk->bo);
293 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
295 kfree(chunk);
303 struct nouveau_dmem_chunk *chunk;
311 chunk = nouveau_page_to_chunk(page);
312 chunk->callocated++;
335 struct nouveau_dmem_chunk *chunk;
342 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
343 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
353 struct nouveau_dmem_chunk *chunk;
359 list_for_each_entry(chunk, &drm->dmem->chunks, list)
360 nouveau_bo_unpin(chunk->bo);
365 * Evict all pages mapping a chunk.
368 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
370 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
379 migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
393 nouveau_dmem_copy_one(chunk->drm,
399 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
406 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
413 struct nouveau_dmem_chunk *chunk, *tmp;
420 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
421 nouveau_dmem_evict_chunk(chunk);
422 nouveau_bo_unpin_del(&chunk->bo);
423 WARN_ON(chunk->callocated);
424 list_del(&chunk->list);
425 memunmap_pages(&chunk->pagemap);
426 release_mem_region(chunk->pagemap.range.start,
427 range_len(&chunk->pagemap.range));
428 kfree(chunk);