xref: /linux/drivers/gpu/drm/nouveau/nouveau_bo.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include "drmP.h"
31 #include "ttm/ttm_page_alloc.h"
32 
33 #include "nouveau_drm.h"
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include "nouveau_mm.h"
37 #include "nouveau_vm.h"
38 
39 #include <linux/log2.h>
40 #include <linux/slab.h>
41 
42 static void
43 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 {
45 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
46 	struct drm_device *dev = dev_priv->dev;
47 	struct nouveau_bo *nvbo = nouveau_bo(bo);
48 
49 	if (unlikely(nvbo->gem))
50 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
51 
52 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
53 	kfree(nvbo);
54 }
55 
56 static void
57 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
58 		       int *align, int *size)
59 {
60 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61 
62 	if (dev_priv->card_type < NV_50) {
63 		if (nvbo->tile_mode) {
64 			if (dev_priv->chipset >= 0x40) {
65 				*align = 65536;
66 				*size = roundup(*size, 64 * nvbo->tile_mode);
67 
68 			} else if (dev_priv->chipset >= 0x30) {
69 				*align = 32768;
70 				*size = roundup(*size, 64 * nvbo->tile_mode);
71 
72 			} else if (dev_priv->chipset >= 0x20) {
73 				*align = 16384;
74 				*size = roundup(*size, 64 * nvbo->tile_mode);
75 
76 			} else if (dev_priv->chipset >= 0x10) {
77 				*align = 16384;
78 				*size = roundup(*size, 32 * nvbo->tile_mode);
79 			}
80 		}
81 	} else {
82 		*size = roundup(*size, (1 << nvbo->page_shift));
83 		*align = max((1 <<  nvbo->page_shift), *align);
84 	}
85 
86 	*size = roundup(*size, PAGE_SIZE);
87 }
88 
89 int
90 nouveau_bo_new(struct drm_device *dev, int size, int align,
91 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
92 	       struct nouveau_bo **pnvbo)
93 {
94 	struct drm_nouveau_private *dev_priv = dev->dev_private;
95 	struct nouveau_bo *nvbo;
96 	size_t acc_size;
97 	int ret;
98 
99 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
100 	if (!nvbo)
101 		return -ENOMEM;
102 	INIT_LIST_HEAD(&nvbo->head);
103 	INIT_LIST_HEAD(&nvbo->entry);
104 	INIT_LIST_HEAD(&nvbo->vma_list);
105 	nvbo->tile_mode = tile_mode;
106 	nvbo->tile_flags = tile_flags;
107 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
108 
109 	nvbo->page_shift = 12;
110 	if (dev_priv->bar1_vm) {
111 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
112 			nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
113 	}
114 
115 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
116 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
117 	nouveau_bo_placement_set(nvbo, flags, 0);
118 
119 	acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
120 				       sizeof(struct nouveau_bo));
121 
122 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
123 			  ttm_bo_type_device, &nvbo->placement,
124 			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
125 			  nouveau_bo_del_ttm);
126 	if (ret) {
127 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
128 		return ret;
129 	}
130 
131 	*pnvbo = nvbo;
132 	return 0;
133 }
134 
135 static void
136 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
137 {
138 	*n = 0;
139 
140 	if (type & TTM_PL_FLAG_VRAM)
141 		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
142 	if (type & TTM_PL_FLAG_TT)
143 		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
144 	if (type & TTM_PL_FLAG_SYSTEM)
145 		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
146 }
147 
148 static void
149 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
150 {
151 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
152 	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
153 
154 	if (dev_priv->card_type == NV_10 &&
155 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
156 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
157 		/*
158 		 * Make sure that the color and depth buffers are handled
159 		 * by independent memory controller units. Up to a 9x
160 		 * speed up when alpha-blending and depth-test are enabled
161 		 * at the same time.
162 		 */
163 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
164 			nvbo->placement.fpfn = vram_pages / 2;
165 			nvbo->placement.lpfn = ~0;
166 		} else {
167 			nvbo->placement.fpfn = 0;
168 			nvbo->placement.lpfn = vram_pages / 2;
169 		}
170 	}
171 }
172 
173 void
174 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
175 {
176 	struct ttm_placement *pl = &nvbo->placement;
177 	uint32_t flags = TTM_PL_MASK_CACHING |
178 		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
179 
180 	pl->placement = nvbo->placements;
181 	set_placement_list(nvbo->placements, &pl->num_placement,
182 			   type, flags);
183 
184 	pl->busy_placement = nvbo->busy_placements;
185 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
186 			   type | busy, flags);
187 
188 	set_placement_range(nvbo, type);
189 }
190 
191 int
192 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
193 {
194 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
195 	struct ttm_buffer_object *bo = &nvbo->bo;
196 	int ret;
197 
198 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
199 		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
200 			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
201 			 1 << bo->mem.mem_type, memtype);
202 		return -EINVAL;
203 	}
204 
205 	if (nvbo->pin_refcnt++)
206 		return 0;
207 
208 	ret = ttm_bo_reserve(bo, false, false, false, 0);
209 	if (ret)
210 		goto out;
211 
212 	nouveau_bo_placement_set(nvbo, memtype, 0);
213 
214 	ret = nouveau_bo_validate(nvbo, false, false, false);
215 	if (ret == 0) {
216 		switch (bo->mem.mem_type) {
217 		case TTM_PL_VRAM:
218 			dev_priv->fb_aper_free -= bo->mem.size;
219 			break;
220 		case TTM_PL_TT:
221 			dev_priv->gart_info.aper_free -= bo->mem.size;
222 			break;
223 		default:
224 			break;
225 		}
226 	}
227 	ttm_bo_unreserve(bo);
228 out:
229 	if (unlikely(ret))
230 		nvbo->pin_refcnt--;
231 	return ret;
232 }
233 
234 int
235 nouveau_bo_unpin(struct nouveau_bo *nvbo)
236 {
237 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
238 	struct ttm_buffer_object *bo = &nvbo->bo;
239 	int ret;
240 
241 	if (--nvbo->pin_refcnt)
242 		return 0;
243 
244 	ret = ttm_bo_reserve(bo, false, false, false, 0);
245 	if (ret)
246 		return ret;
247 
248 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
249 
250 	ret = nouveau_bo_validate(nvbo, false, false, false);
251 	if (ret == 0) {
252 		switch (bo->mem.mem_type) {
253 		case TTM_PL_VRAM:
254 			dev_priv->fb_aper_free += bo->mem.size;
255 			break;
256 		case TTM_PL_TT:
257 			dev_priv->gart_info.aper_free += bo->mem.size;
258 			break;
259 		default:
260 			break;
261 		}
262 	}
263 
264 	ttm_bo_unreserve(bo);
265 	return ret;
266 }
267 
268 int
269 nouveau_bo_map(struct nouveau_bo *nvbo)
270 {
271 	int ret;
272 
273 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
274 	if (ret)
275 		return ret;
276 
277 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
278 	ttm_bo_unreserve(&nvbo->bo);
279 	return ret;
280 }
281 
282 void
283 nouveau_bo_unmap(struct nouveau_bo *nvbo)
284 {
285 	if (nvbo)
286 		ttm_bo_kunmap(&nvbo->kmap);
287 }
288 
289 int
290 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
291 		    bool no_wait_reserve, bool no_wait_gpu)
292 {
293 	int ret;
294 
295 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
296 			      no_wait_reserve, no_wait_gpu);
297 	if (ret)
298 		return ret;
299 
300 	return 0;
301 }
302 
303 u16
304 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
305 {
306 	bool is_iomem;
307 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
308 	mem = &mem[index];
309 	if (is_iomem)
310 		return ioread16_native((void __force __iomem *)mem);
311 	else
312 		return *mem;
313 }
314 
315 void
316 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
317 {
318 	bool is_iomem;
319 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
320 	mem = &mem[index];
321 	if (is_iomem)
322 		iowrite16_native(val, (void __force __iomem *)mem);
323 	else
324 		*mem = val;
325 }
326 
327 u32
328 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
329 {
330 	bool is_iomem;
331 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
332 	mem = &mem[index];
333 	if (is_iomem)
334 		return ioread32_native((void __force __iomem *)mem);
335 	else
336 		return *mem;
337 }
338 
339 void
340 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
341 {
342 	bool is_iomem;
343 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
344 	mem = &mem[index];
345 	if (is_iomem)
346 		iowrite32_native(val, (void __force __iomem *)mem);
347 	else
348 		*mem = val;
349 }
350 
351 static struct ttm_tt *
352 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
353 		      unsigned long size, uint32_t page_flags,
354 		      struct page *dummy_read_page)
355 {
356 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
357 	struct drm_device *dev = dev_priv->dev;
358 
359 	switch (dev_priv->gart_info.type) {
360 #if __OS_HAS_AGP
361 	case NOUVEAU_GART_AGP:
362 		return ttm_agp_tt_create(bdev, dev->agp->bridge,
363 					 size, page_flags, dummy_read_page);
364 #endif
365 	case NOUVEAU_GART_PDMA:
366 	case NOUVEAU_GART_HW:
367 		return nouveau_sgdma_create_ttm(bdev, size, page_flags,
368 						dummy_read_page);
369 	default:
370 		NV_ERROR(dev, "Unknown GART type %d\n",
371 			 dev_priv->gart_info.type);
372 		break;
373 	}
374 
375 	return NULL;
376 }
377 
378 static int
379 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
380 {
381 	/* We'll do this from user space. */
382 	return 0;
383 }
384 
385 static int
386 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
387 			 struct ttm_mem_type_manager *man)
388 {
389 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
390 	struct drm_device *dev = dev_priv->dev;
391 
392 	switch (type) {
393 	case TTM_PL_SYSTEM:
394 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
395 		man->available_caching = TTM_PL_MASK_CACHING;
396 		man->default_caching = TTM_PL_FLAG_CACHED;
397 		break;
398 	case TTM_PL_VRAM:
399 		if (dev_priv->card_type >= NV_50) {
400 			man->func = &nouveau_vram_manager;
401 			man->io_reserve_fastpath = false;
402 			man->use_io_reserve_lru = true;
403 		} else {
404 			man->func = &ttm_bo_manager_func;
405 		}
406 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
407 			     TTM_MEMTYPE_FLAG_MAPPABLE;
408 		man->available_caching = TTM_PL_FLAG_UNCACHED |
409 					 TTM_PL_FLAG_WC;
410 		man->default_caching = TTM_PL_FLAG_WC;
411 		break;
412 	case TTM_PL_TT:
413 		if (dev_priv->card_type >= NV_50)
414 			man->func = &nouveau_gart_manager;
415 		else
416 			man->func = &ttm_bo_manager_func;
417 		switch (dev_priv->gart_info.type) {
418 		case NOUVEAU_GART_AGP:
419 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
420 			man->available_caching = TTM_PL_FLAG_UNCACHED |
421 				TTM_PL_FLAG_WC;
422 			man->default_caching = TTM_PL_FLAG_WC;
423 			break;
424 		case NOUVEAU_GART_PDMA:
425 		case NOUVEAU_GART_HW:
426 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
427 				     TTM_MEMTYPE_FLAG_CMA;
428 			man->available_caching = TTM_PL_MASK_CACHING;
429 			man->default_caching = TTM_PL_FLAG_CACHED;
430 			break;
431 		default:
432 			NV_ERROR(dev, "Unknown GART type: %d\n",
433 				 dev_priv->gart_info.type);
434 			return -EINVAL;
435 		}
436 		break;
437 	default:
438 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
439 		return -EINVAL;
440 	}
441 	return 0;
442 }
443 
444 static void
445 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
446 {
447 	struct nouveau_bo *nvbo = nouveau_bo(bo);
448 
449 	switch (bo->mem.mem_type) {
450 	case TTM_PL_VRAM:
451 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
452 					 TTM_PL_FLAG_SYSTEM);
453 		break;
454 	default:
455 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
456 		break;
457 	}
458 
459 	*pl = nvbo->placement;
460 }
461 
462 
463 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
464  * TTM_PL_{VRAM,TT} directly.
465  */
466 
467 static int
468 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 			      struct nouveau_bo *nvbo, bool evict,
470 			      bool no_wait_reserve, bool no_wait_gpu,
471 			      struct ttm_mem_reg *new_mem)
472 {
473 	struct nouveau_fence *fence = NULL;
474 	int ret;
475 
476 	ret = nouveau_fence_new(chan, &fence, true);
477 	if (ret)
478 		return ret;
479 
480 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
481 					no_wait_reserve, no_wait_gpu, new_mem);
482 	nouveau_fence_unref(&fence);
483 	return ret;
484 }
485 
486 static int
487 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
488 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
489 {
490 	struct nouveau_mem *node = old_mem->mm_node;
491 	u64 src_offset = node->vma[0].offset;
492 	u64 dst_offset = node->vma[1].offset;
493 	u32 page_count = new_mem->num_pages;
494 	int ret;
495 
496 	page_count = new_mem->num_pages;
497 	while (page_count) {
498 		int line_count = (page_count > 2047) ? 2047 : page_count;
499 
500 		ret = RING_SPACE(chan, 12);
501 		if (ret)
502 			return ret;
503 
504 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
505 		OUT_RING  (chan, upper_32_bits(dst_offset));
506 		OUT_RING  (chan, lower_32_bits(dst_offset));
507 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
508 		OUT_RING  (chan, upper_32_bits(src_offset));
509 		OUT_RING  (chan, lower_32_bits(src_offset));
510 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
511 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
512 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
513 		OUT_RING  (chan, line_count);
514 		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
515 		OUT_RING  (chan, 0x00100110);
516 
517 		page_count -= line_count;
518 		src_offset += (PAGE_SIZE * line_count);
519 		dst_offset += (PAGE_SIZE * line_count);
520 	}
521 
522 	return 0;
523 }
524 
525 static int
526 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528 {
529 	struct nouveau_mem *node = old_mem->mm_node;
530 	struct nouveau_bo *nvbo = nouveau_bo(bo);
531 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
532 	u64 src_offset = node->vma[0].offset;
533 	u64 dst_offset = node->vma[1].offset;
534 	int ret;
535 
536 	while (length) {
537 		u32 amount, stride, height;
538 
539 		amount  = min(length, (u64)(4 * 1024 * 1024));
540 		stride  = 16 * 4;
541 		height  = amount / stride;
542 
543 		if (new_mem->mem_type == TTM_PL_VRAM &&
544 		    nouveau_bo_tile_layout(nvbo)) {
545 			ret = RING_SPACE(chan, 8);
546 			if (ret)
547 				return ret;
548 
549 			BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
550 			OUT_RING  (chan, 0);
551 			OUT_RING  (chan, 0);
552 			OUT_RING  (chan, stride);
553 			OUT_RING  (chan, height);
554 			OUT_RING  (chan, 1);
555 			OUT_RING  (chan, 0);
556 			OUT_RING  (chan, 0);
557 		} else {
558 			ret = RING_SPACE(chan, 2);
559 			if (ret)
560 				return ret;
561 
562 			BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
563 			OUT_RING  (chan, 1);
564 		}
565 		if (old_mem->mem_type == TTM_PL_VRAM &&
566 		    nouveau_bo_tile_layout(nvbo)) {
567 			ret = RING_SPACE(chan, 8);
568 			if (ret)
569 				return ret;
570 
571 			BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
572 			OUT_RING  (chan, 0);
573 			OUT_RING  (chan, 0);
574 			OUT_RING  (chan, stride);
575 			OUT_RING  (chan, height);
576 			OUT_RING  (chan, 1);
577 			OUT_RING  (chan, 0);
578 			OUT_RING  (chan, 0);
579 		} else {
580 			ret = RING_SPACE(chan, 2);
581 			if (ret)
582 				return ret;
583 
584 			BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
585 			OUT_RING  (chan, 1);
586 		}
587 
588 		ret = RING_SPACE(chan, 14);
589 		if (ret)
590 			return ret;
591 
592 		BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
593 		OUT_RING  (chan, upper_32_bits(src_offset));
594 		OUT_RING  (chan, upper_32_bits(dst_offset));
595 		BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
596 		OUT_RING  (chan, lower_32_bits(src_offset));
597 		OUT_RING  (chan, lower_32_bits(dst_offset));
598 		OUT_RING  (chan, stride);
599 		OUT_RING  (chan, stride);
600 		OUT_RING  (chan, stride);
601 		OUT_RING  (chan, height);
602 		OUT_RING  (chan, 0x00000101);
603 		OUT_RING  (chan, 0x00000000);
604 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
605 		OUT_RING  (chan, 0);
606 
607 		length -= amount;
608 		src_offset += amount;
609 		dst_offset += amount;
610 	}
611 
612 	return 0;
613 }
614 
615 static inline uint32_t
616 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
617 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
618 {
619 	if (mem->mem_type == TTM_PL_TT)
620 		return chan->gart_handle;
621 	return chan->vram_handle;
622 }
623 
624 static int
625 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
626 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
627 {
628 	u32 src_offset = old_mem->start << PAGE_SHIFT;
629 	u32 dst_offset = new_mem->start << PAGE_SHIFT;
630 	u32 page_count = new_mem->num_pages;
631 	int ret;
632 
633 	ret = RING_SPACE(chan, 3);
634 	if (ret)
635 		return ret;
636 
637 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
638 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
639 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
640 
641 	page_count = new_mem->num_pages;
642 	while (page_count) {
643 		int line_count = (page_count > 2047) ? 2047 : page_count;
644 
645 		ret = RING_SPACE(chan, 11);
646 		if (ret)
647 			return ret;
648 
649 		BEGIN_RING(chan, NvSubM2MF,
650 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
651 		OUT_RING  (chan, src_offset);
652 		OUT_RING  (chan, dst_offset);
653 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
654 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
655 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
656 		OUT_RING  (chan, line_count);
657 		OUT_RING  (chan, 0x00000101);
658 		OUT_RING  (chan, 0x00000000);
659 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
660 		OUT_RING  (chan, 0);
661 
662 		page_count -= line_count;
663 		src_offset += (PAGE_SIZE * line_count);
664 		dst_offset += (PAGE_SIZE * line_count);
665 	}
666 
667 	return 0;
668 }
669 
670 static int
671 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
672 		   struct ttm_mem_reg *mem, struct nouveau_vma *vma)
673 {
674 	struct nouveau_mem *node = mem->mm_node;
675 	int ret;
676 
677 	ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
678 			     node->page_shift, NV_MEM_ACCESS_RO, vma);
679 	if (ret)
680 		return ret;
681 
682 	if (mem->mem_type == TTM_PL_VRAM)
683 		nouveau_vm_map(vma, node);
684 	else
685 		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
686 
687 	return 0;
688 }
689 
690 static int
691 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
692 		     bool no_wait_reserve, bool no_wait_gpu,
693 		     struct ttm_mem_reg *new_mem)
694 {
695 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
696 	struct nouveau_bo *nvbo = nouveau_bo(bo);
697 	struct ttm_mem_reg *old_mem = &bo->mem;
698 	struct nouveau_channel *chan;
699 	int ret;
700 
701 	chan = nvbo->channel;
702 	if (!chan) {
703 		chan = dev_priv->channel;
704 		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
705 	}
706 
707 	/* create temporary vmas for the transfer and attach them to the
708 	 * old nouveau_mem node, these will get cleaned up after ttm has
709 	 * destroyed the ttm_mem_reg
710 	 */
711 	if (dev_priv->card_type >= NV_50) {
712 		struct nouveau_mem *node = old_mem->mm_node;
713 
714 		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
715 		if (ret)
716 			goto out;
717 
718 		ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
719 		if (ret)
720 			goto out;
721 	}
722 
723 	if (dev_priv->card_type < NV_50)
724 		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
725 	else
726 	if (dev_priv->card_type < NV_C0)
727 		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
728 	else
729 		ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
730 	if (ret == 0) {
731 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
732 						    no_wait_reserve,
733 						    no_wait_gpu, new_mem);
734 	}
735 
736 out:
737 	if (chan == dev_priv->channel)
738 		mutex_unlock(&chan->mutex);
739 	return ret;
740 }
741 
742 static int
743 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
744 		      bool no_wait_reserve, bool no_wait_gpu,
745 		      struct ttm_mem_reg *new_mem)
746 {
747 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
748 	struct ttm_placement placement;
749 	struct ttm_mem_reg tmp_mem;
750 	int ret;
751 
752 	placement.fpfn = placement.lpfn = 0;
753 	placement.num_placement = placement.num_busy_placement = 1;
754 	placement.placement = placement.busy_placement = &placement_memtype;
755 
756 	tmp_mem = *new_mem;
757 	tmp_mem.mm_node = NULL;
758 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
759 	if (ret)
760 		return ret;
761 
762 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
763 	if (ret)
764 		goto out;
765 
766 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
767 	if (ret)
768 		goto out;
769 
770 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
771 out:
772 	ttm_bo_mem_put(bo, &tmp_mem);
773 	return ret;
774 }
775 
776 static int
777 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
778 		      bool no_wait_reserve, bool no_wait_gpu,
779 		      struct ttm_mem_reg *new_mem)
780 {
781 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
782 	struct ttm_placement placement;
783 	struct ttm_mem_reg tmp_mem;
784 	int ret;
785 
786 	placement.fpfn = placement.lpfn = 0;
787 	placement.num_placement = placement.num_busy_placement = 1;
788 	placement.placement = placement.busy_placement = &placement_memtype;
789 
790 	tmp_mem = *new_mem;
791 	tmp_mem.mm_node = NULL;
792 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
793 	if (ret)
794 		return ret;
795 
796 	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
797 	if (ret)
798 		goto out;
799 
800 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
801 	if (ret)
802 		goto out;
803 
804 out:
805 	ttm_bo_mem_put(bo, &tmp_mem);
806 	return ret;
807 }
808 
809 static void
810 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
811 {
812 	struct nouveau_bo *nvbo = nouveau_bo(bo);
813 	struct nouveau_vma *vma;
814 
815 	list_for_each_entry(vma, &nvbo->vma_list, head) {
816 		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
817 			nouveau_vm_map(vma, new_mem->mm_node);
818 		} else
819 		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
820 		    nvbo->page_shift == vma->vm->spg_shift) {
821 			nouveau_vm_map_sg(vma, 0, new_mem->
822 					  num_pages << PAGE_SHIFT,
823 					  new_mem->mm_node);
824 		} else {
825 			nouveau_vm_unmap(vma);
826 		}
827 	}
828 }
829 
830 static int
831 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
832 		   struct nouveau_tile_reg **new_tile)
833 {
834 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
835 	struct drm_device *dev = dev_priv->dev;
836 	struct nouveau_bo *nvbo = nouveau_bo(bo);
837 	u64 offset = new_mem->start << PAGE_SHIFT;
838 
839 	*new_tile = NULL;
840 	if (new_mem->mem_type != TTM_PL_VRAM)
841 		return 0;
842 
843 	if (dev_priv->card_type >= NV_10) {
844 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
845 						nvbo->tile_mode,
846 						nvbo->tile_flags);
847 	}
848 
849 	return 0;
850 }
851 
852 static void
853 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
854 		      struct nouveau_tile_reg *new_tile,
855 		      struct nouveau_tile_reg **old_tile)
856 {
857 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
858 	struct drm_device *dev = dev_priv->dev;
859 
860 	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
861 	*old_tile = new_tile;
862 }
863 
864 static int
865 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
866 		bool no_wait_reserve, bool no_wait_gpu,
867 		struct ttm_mem_reg *new_mem)
868 {
869 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
870 	struct nouveau_bo *nvbo = nouveau_bo(bo);
871 	struct ttm_mem_reg *old_mem = &bo->mem;
872 	struct nouveau_tile_reg *new_tile = NULL;
873 	int ret = 0;
874 
875 	if (dev_priv->card_type < NV_50) {
876 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
877 		if (ret)
878 			return ret;
879 	}
880 
881 	/* Fake bo copy. */
882 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
883 		BUG_ON(bo->mem.mm_node != NULL);
884 		bo->mem = *new_mem;
885 		new_mem->mm_node = NULL;
886 		goto out;
887 	}
888 
889 	/* Software copy if the card isn't up and running yet. */
890 	if (!dev_priv->channel) {
891 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
892 		goto out;
893 	}
894 
895 	/* Hardware assisted copy. */
896 	if (new_mem->mem_type == TTM_PL_SYSTEM)
897 		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
898 	else if (old_mem->mem_type == TTM_PL_SYSTEM)
899 		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
900 	else
901 		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
902 
903 	if (!ret)
904 		goto out;
905 
906 	/* Fallback to software copy. */
907 	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
908 
909 out:
910 	if (dev_priv->card_type < NV_50) {
911 		if (ret)
912 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
913 		else
914 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
915 	}
916 
917 	return ret;
918 }
919 
920 static int
921 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
922 {
923 	return 0;
924 }
925 
926 static int
927 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
928 {
929 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
930 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
931 	struct drm_device *dev = dev_priv->dev;
932 	int ret;
933 
934 	mem->bus.addr = NULL;
935 	mem->bus.offset = 0;
936 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
937 	mem->bus.base = 0;
938 	mem->bus.is_iomem = false;
939 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
940 		return -EINVAL;
941 	switch (mem->mem_type) {
942 	case TTM_PL_SYSTEM:
943 		/* System memory */
944 		return 0;
945 	case TTM_PL_TT:
946 #if __OS_HAS_AGP
947 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
948 			mem->bus.offset = mem->start << PAGE_SHIFT;
949 			mem->bus.base = dev_priv->gart_info.aper_base;
950 			mem->bus.is_iomem = true;
951 		}
952 #endif
953 		break;
954 	case TTM_PL_VRAM:
955 	{
956 		struct nouveau_mem *node = mem->mm_node;
957 		u8 page_shift;
958 
959 		if (!dev_priv->bar1_vm) {
960 			mem->bus.offset = mem->start << PAGE_SHIFT;
961 			mem->bus.base = pci_resource_start(dev->pdev, 1);
962 			mem->bus.is_iomem = true;
963 			break;
964 		}
965 
966 		if (dev_priv->card_type >= NV_C0)
967 			page_shift = node->page_shift;
968 		else
969 			page_shift = 12;
970 
971 		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
972 				     page_shift, NV_MEM_ACCESS_RW,
973 				     &node->bar_vma);
974 		if (ret)
975 			return ret;
976 
977 		nouveau_vm_map(&node->bar_vma, node);
978 		if (ret) {
979 			nouveau_vm_put(&node->bar_vma);
980 			return ret;
981 		}
982 
983 		mem->bus.offset = node->bar_vma.offset;
984 		if (dev_priv->card_type == NV_50) /*XXX*/
985 			mem->bus.offset -= 0x0020000000ULL;
986 		mem->bus.base = pci_resource_start(dev->pdev, 1);
987 		mem->bus.is_iomem = true;
988 	}
989 		break;
990 	default:
991 		return -EINVAL;
992 	}
993 	return 0;
994 }
995 
996 static void
997 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
998 {
999 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1000 	struct nouveau_mem *node = mem->mm_node;
1001 
1002 	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1003 		return;
1004 
1005 	if (!node->bar_vma.node)
1006 		return;
1007 
1008 	nouveau_vm_unmap(&node->bar_vma);
1009 	nouveau_vm_put(&node->bar_vma);
1010 }
1011 
1012 static int
1013 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1014 {
1015 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1016 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1017 
1018 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1019 	 * nothing to do here.
1020 	 */
1021 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1022 		if (dev_priv->card_type < NV_50 ||
1023 		    !nouveau_bo_tile_layout(nvbo))
1024 			return 0;
1025 	}
1026 
1027 	/* make sure bo is in mappable vram */
1028 	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1029 		return 0;
1030 
1031 
1032 	nvbo->placement.fpfn = 0;
1033 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1034 	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1035 	return nouveau_bo_validate(nvbo, false, true, false);
1036 }
1037 
1038 void
1039 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1040 {
1041 	struct nouveau_fence *old_fence;
1042 
1043 	if (likely(fence))
1044 		nouveau_fence_ref(fence);
1045 
1046 	spin_lock(&nvbo->bo.bdev->fence_lock);
1047 	old_fence = nvbo->bo.sync_obj;
1048 	nvbo->bo.sync_obj = fence;
1049 	spin_unlock(&nvbo->bo.bdev->fence_lock);
1050 
1051 	nouveau_fence_unref(&old_fence);
1052 }
1053 
1054 static int
1055 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1056 {
1057 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1058 	struct drm_nouveau_private *dev_priv;
1059 	struct drm_device *dev;
1060 	unsigned i;
1061 	int r;
1062 
1063 	if (ttm->state != tt_unpopulated)
1064 		return 0;
1065 
1066 	dev_priv = nouveau_bdev(ttm->bdev);
1067 	dev = dev_priv->dev;
1068 
1069 #if __OS_HAS_AGP
1070 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1071 		return ttm_agp_tt_populate(ttm);
1072 	}
1073 #endif
1074 
1075 #ifdef CONFIG_SWIOTLB
1076 	if (swiotlb_nr_tbl()) {
1077 		return ttm_dma_populate((void *)ttm, dev->dev);
1078 	}
1079 #endif
1080 
1081 	r = ttm_pool_populate(ttm);
1082 	if (r) {
1083 		return r;
1084 	}
1085 
1086 	for (i = 0; i < ttm->num_pages; i++) {
1087 		ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1088 						   0, PAGE_SIZE,
1089 						   PCI_DMA_BIDIRECTIONAL);
1090 		if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1091 			while (--i) {
1092 				pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1093 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1094 				ttm_dma->dma_address[i] = 0;
1095 			}
1096 			ttm_pool_unpopulate(ttm);
1097 			return -EFAULT;
1098 		}
1099 	}
1100 	return 0;
1101 }
1102 
1103 static void
1104 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1105 {
1106 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1107 	struct drm_nouveau_private *dev_priv;
1108 	struct drm_device *dev;
1109 	unsigned i;
1110 
1111 	dev_priv = nouveau_bdev(ttm->bdev);
1112 	dev = dev_priv->dev;
1113 
1114 #if __OS_HAS_AGP
1115 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1116 		ttm_agp_tt_unpopulate(ttm);
1117 		return;
1118 	}
1119 #endif
1120 
1121 #ifdef CONFIG_SWIOTLB
1122 	if (swiotlb_nr_tbl()) {
1123 		ttm_dma_unpopulate((void *)ttm, dev->dev);
1124 		return;
1125 	}
1126 #endif
1127 
1128 	for (i = 0; i < ttm->num_pages; i++) {
1129 		if (ttm_dma->dma_address[i]) {
1130 			pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1131 				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1132 		}
1133 	}
1134 
1135 	ttm_pool_unpopulate(ttm);
1136 }
1137 
1138 struct ttm_bo_driver nouveau_bo_driver = {
1139 	.ttm_tt_create = &nouveau_ttm_tt_create,
1140 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1141 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1142 	.invalidate_caches = nouveau_bo_invalidate_caches,
1143 	.init_mem_type = nouveau_bo_init_mem_type,
1144 	.evict_flags = nouveau_bo_evict_flags,
1145 	.move_notify = nouveau_bo_move_ntfy,
1146 	.move = nouveau_bo_move,
1147 	.verify_access = nouveau_bo_verify_access,
1148 	.sync_obj_signaled = __nouveau_fence_signalled,
1149 	.sync_obj_wait = __nouveau_fence_wait,
1150 	.sync_obj_flush = __nouveau_fence_flush,
1151 	.sync_obj_unref = __nouveau_fence_unref,
1152 	.sync_obj_ref = __nouveau_fence_ref,
1153 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1154 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1155 	.io_mem_free = &nouveau_ttm_io_mem_free,
1156 };
1157 
1158 struct nouveau_vma *
1159 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1160 {
1161 	struct nouveau_vma *vma;
1162 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1163 		if (vma->vm == vm)
1164 			return vma;
1165 	}
1166 
1167 	return NULL;
1168 }
1169 
1170 int
1171 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1172 		   struct nouveau_vma *vma)
1173 {
1174 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1175 	struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1176 	int ret;
1177 
1178 	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1179 			     NV_MEM_ACCESS_RW, vma);
1180 	if (ret)
1181 		return ret;
1182 
1183 	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1184 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1185 	else
1186 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1187 		nouveau_vm_map_sg(vma, 0, size, node);
1188 
1189 	list_add_tail(&vma->head, &nvbo->vma_list);
1190 	vma->refcount = 1;
1191 	return 0;
1192 }
1193 
1194 void
1195 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1196 {
1197 	if (vma->node) {
1198 		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1199 			spin_lock(&nvbo->bo.bdev->fence_lock);
1200 			ttm_bo_wait(&nvbo->bo, false, false, false);
1201 			spin_unlock(&nvbo->bo.bdev->fence_lock);
1202 			nouveau_vm_unmap(vma);
1203 		}
1204 
1205 		nouveau_vm_put(vma);
1206 		list_del(&vma->head);
1207 	}
1208 }
1209