xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (revision 56fb34d86e875dbb0d3e6a81c5d3d035db373031)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
32 
33 static const struct ttm_place vram_placement_flags = {
34 	.fpfn = 0,
35 	.lpfn = 0,
36 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37 };
38 
39 static const struct ttm_place vram_ne_placement_flags = {
40 	.fpfn = 0,
41 	.lpfn = 0,
42 	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43 };
44 
45 static const struct ttm_place sys_placement_flags = {
46 	.fpfn = 0,
47 	.lpfn = 0,
48 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49 };
50 
51 static const struct ttm_place sys_ne_placement_flags = {
52 	.fpfn = 0,
53 	.lpfn = 0,
54 	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55 };
56 
57 static const struct ttm_place gmr_placement_flags = {
58 	.fpfn = 0,
59 	.lpfn = 0,
60 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61 };
62 
63 static const struct ttm_place gmr_ne_placement_flags = {
64 	.fpfn = 0,
65 	.lpfn = 0,
66 	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67 };
68 
69 static const struct ttm_place mob_placement_flags = {
70 	.fpfn = 0,
71 	.lpfn = 0,
72 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73 };
74 
75 static const struct ttm_place mob_ne_placement_flags = {
76 	.fpfn = 0,
77 	.lpfn = 0,
78 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79 };
80 
81 struct ttm_placement vmw_vram_placement = {
82 	.num_placement = 1,
83 	.placement = &vram_placement_flags,
84 	.num_busy_placement = 1,
85 	.busy_placement = &vram_placement_flags
86 };
87 
88 static const struct ttm_place vram_gmr_placement_flags[] = {
89 	{
90 		.fpfn = 0,
91 		.lpfn = 0,
92 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
93 	}, {
94 		.fpfn = 0,
95 		.lpfn = 0,
96 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
97 	}
98 };
99 
100 static const struct ttm_place gmr_vram_placement_flags[] = {
101 	{
102 		.fpfn = 0,
103 		.lpfn = 0,
104 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105 	}, {
106 		.fpfn = 0,
107 		.lpfn = 0,
108 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109 	}
110 };
111 
112 struct ttm_placement vmw_vram_gmr_placement = {
113 	.num_placement = 2,
114 	.placement = vram_gmr_placement_flags,
115 	.num_busy_placement = 1,
116 	.busy_placement = &gmr_placement_flags
117 };
118 
119 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120 	{
121 		.fpfn = 0,
122 		.lpfn = 0,
123 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124 			 TTM_PL_FLAG_NO_EVICT
125 	}, {
126 		.fpfn = 0,
127 		.lpfn = 0,
128 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129 			 TTM_PL_FLAG_NO_EVICT
130 	}
131 };
132 
133 struct ttm_placement vmw_vram_gmr_ne_placement = {
134 	.num_placement = 2,
135 	.placement = vram_gmr_ne_placement_flags,
136 	.num_busy_placement = 1,
137 	.busy_placement = &gmr_ne_placement_flags
138 };
139 
140 struct ttm_placement vmw_vram_sys_placement = {
141 	.num_placement = 1,
142 	.placement = &vram_placement_flags,
143 	.num_busy_placement = 1,
144 	.busy_placement = &sys_placement_flags
145 };
146 
147 struct ttm_placement vmw_vram_ne_placement = {
148 	.num_placement = 1,
149 	.placement = &vram_ne_placement_flags,
150 	.num_busy_placement = 1,
151 	.busy_placement = &vram_ne_placement_flags
152 };
153 
154 struct ttm_placement vmw_sys_placement = {
155 	.num_placement = 1,
156 	.placement = &sys_placement_flags,
157 	.num_busy_placement = 1,
158 	.busy_placement = &sys_placement_flags
159 };
160 
161 struct ttm_placement vmw_sys_ne_placement = {
162 	.num_placement = 1,
163 	.placement = &sys_ne_placement_flags,
164 	.num_busy_placement = 1,
165 	.busy_placement = &sys_ne_placement_flags
166 };
167 
168 static const struct ttm_place evictable_placement_flags[] = {
169 	{
170 		.fpfn = 0,
171 		.lpfn = 0,
172 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173 	}, {
174 		.fpfn = 0,
175 		.lpfn = 0,
176 		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177 	}, {
178 		.fpfn = 0,
179 		.lpfn = 0,
180 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181 	}, {
182 		.fpfn = 0,
183 		.lpfn = 0,
184 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185 	}
186 };
187 
188 static const struct ttm_place nonfixed_placement_flags[] = {
189 	{
190 		.fpfn = 0,
191 		.lpfn = 0,
192 		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193 	}, {
194 		.fpfn = 0,
195 		.lpfn = 0,
196 		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197 	}, {
198 		.fpfn = 0,
199 		.lpfn = 0,
200 		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201 	}
202 };
203 
204 struct ttm_placement vmw_evictable_placement = {
205 	.num_placement = 4,
206 	.placement = evictable_placement_flags,
207 	.num_busy_placement = 1,
208 	.busy_placement = &sys_placement_flags
209 };
210 
211 struct ttm_placement vmw_srf_placement = {
212 	.num_placement = 1,
213 	.num_busy_placement = 2,
214 	.placement = &gmr_placement_flags,
215 	.busy_placement = gmr_vram_placement_flags
216 };
217 
218 struct ttm_placement vmw_mob_placement = {
219 	.num_placement = 1,
220 	.num_busy_placement = 1,
221 	.placement = &mob_placement_flags,
222 	.busy_placement = &mob_placement_flags
223 };
224 
225 struct ttm_placement vmw_mob_ne_placement = {
226 	.num_placement = 1,
227 	.num_busy_placement = 1,
228 	.placement = &mob_ne_placement_flags,
229 	.busy_placement = &mob_ne_placement_flags
230 };
231 
232 struct ttm_placement vmw_nonfixed_placement = {
233 	.num_placement = 3,
234 	.placement = nonfixed_placement_flags,
235 	.num_busy_placement = 1,
236 	.busy_placement = &sys_placement_flags
237 };
238 
239 struct vmw_ttm_tt {
240 	struct ttm_dma_tt dma_ttm;
241 	struct vmw_private *dev_priv;
242 	int gmr_id;
243 	struct vmw_mob *mob;
244 	int mem_type;
245 	struct sg_table sgt;
246 	struct vmw_sg_table vsgt;
247 	uint64_t sg_alloc_size;
248 	bool mapped;
249 };
250 
251 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252 
253 /**
254  * Helper functions to advance a struct vmw_piter iterator.
255  *
256  * @viter: Pointer to the iterator.
257  *
258  * These functions return false if past the end of the list,
259  * true otherwise. Functions are selected depending on the current
260  * DMA mapping mode.
261  */
262 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263 {
264 	return ++(viter->i) < viter->num_pages;
265 }
266 
267 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268 {
269 	bool ret = __vmw_piter_non_sg_next(viter);
270 
271 	return __sg_page_iter_dma_next(&viter->iter) && ret;
272 }
273 
274 
275 /**
276  * Helper functions to return a pointer to the current page.
277  *
278  * @viter: Pointer to the iterator
279  *
280  * These functions return a pointer to the page currently
281  * pointed to by @viter. Functions are selected depending on the
282  * current mapping mode.
283  */
284 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
285 {
286 	return viter->pages[viter->i];
287 }
288 
289 /**
290  * Helper functions to return the DMA address of the current page.
291  *
292  * @viter: Pointer to the iterator
293  *
294  * These functions return the DMA address of the page currently
295  * pointed to by @viter. Functions are selected depending on the
296  * current mapping mode.
297  */
298 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
299 {
300 	return page_to_phys(viter->pages[viter->i]);
301 }
302 
303 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
304 {
305 	return viter->addrs[viter->i];
306 }
307 
308 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
309 {
310 	return sg_page_iter_dma_address(&viter->iter);
311 }
312 
313 
314 /**
315  * vmw_piter_start - Initialize a struct vmw_piter.
316  *
317  * @viter: Pointer to the iterator to initialize
318  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
319  *
320  * Note that we're following the convention of __sg_page_iter_start, so that
321  * the iterator doesn't point to a valid page after initialization; it has
322  * to be advanced one step first.
323  */
324 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
325 		     unsigned long p_offset)
326 {
327 	viter->i = p_offset - 1;
328 	viter->num_pages = vsgt->num_pages;
329 	viter->page = &__vmw_piter_non_sg_page;
330 	viter->pages = vsgt->pages;
331 	switch (vsgt->mode) {
332 	case vmw_dma_phys:
333 		viter->next = &__vmw_piter_non_sg_next;
334 		viter->dma_address = &__vmw_piter_phys_addr;
335 		break;
336 	case vmw_dma_alloc_coherent:
337 		viter->next = &__vmw_piter_non_sg_next;
338 		viter->dma_address = &__vmw_piter_dma_addr;
339 		viter->addrs = vsgt->addrs;
340 		break;
341 	case vmw_dma_map_populate:
342 	case vmw_dma_map_bind:
343 		viter->next = &__vmw_piter_sg_next;
344 		viter->dma_address = &__vmw_piter_sg_addr;
345 		__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
346 				     vsgt->sgt->orig_nents, p_offset);
347 		break;
348 	default:
349 		BUG();
350 	}
351 }
352 
353 /**
354  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
355  * TTM pages
356  *
357  * @vmw_tt: Pointer to a struct vmw_ttm_backend
358  *
359  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
360  */
361 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
362 {
363 	struct device *dev = vmw_tt->dev_priv->dev->dev;
364 
365 	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
366 		DMA_BIDIRECTIONAL);
367 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
368 }
369 
370 /**
371  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
372  *
373  * @vmw_tt: Pointer to a struct vmw_ttm_backend
374  *
375  * This function is used to get device addresses from the kernel DMA layer.
376  * However, it's violating the DMA API in that when this operation has been
377  * performed, it's illegal for the CPU to write to the pages without first
378  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379  * therefore only legal to call this function if we know that the function
380  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381  * a CPU write buffer flush.
382  */
383 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
384 {
385 	struct device *dev = vmw_tt->dev_priv->dev->dev;
386 	int ret;
387 
388 	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
389 			 DMA_BIDIRECTIONAL);
390 	if (unlikely(ret == 0))
391 		return -ENOMEM;
392 
393 	vmw_tt->sgt.nents = ret;
394 
395 	return 0;
396 }
397 
398 /**
399  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
400  *
401  * @vmw_tt: Pointer to a struct vmw_ttm_tt
402  *
403  * Select the correct function for and make sure the TTM pages are
404  * visible to the device. Allocate storage for the device mappings.
405  * If a mapping has already been performed, indicated by the storage
406  * pointer being non NULL, the function returns success.
407  */
408 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
409 {
410 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
411 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
412 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
413 	struct ttm_operation_ctx ctx = {
414 		.interruptible = true,
415 		.no_wait_gpu = false
416 	};
417 	struct vmw_piter iter;
418 	dma_addr_t old;
419 	int ret = 0;
420 	static size_t sgl_size;
421 	static size_t sgt_size;
422 
423 	if (vmw_tt->mapped)
424 		return 0;
425 
426 	vsgt->mode = dev_priv->map_mode;
427 	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
428 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
429 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
430 	vsgt->sgt = &vmw_tt->sgt;
431 
432 	switch (dev_priv->map_mode) {
433 	case vmw_dma_map_bind:
434 	case vmw_dma_map_populate:
435 		if (unlikely(!sgl_size)) {
436 			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
437 			sgt_size = ttm_round_pot(sizeof(struct sg_table));
438 		}
439 		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
440 		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
441 		if (unlikely(ret != 0))
442 			return ret;
443 
444 		ret = __sg_alloc_table_from_pages
445 			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
446 			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
447 			 dma_get_max_seg_size(dev_priv->dev->dev),
448 			 GFP_KERNEL);
449 		if (unlikely(ret != 0))
450 			goto out_sg_alloc_fail;
451 
452 		if (vsgt->num_pages > vmw_tt->sgt.nents) {
453 			uint64_t over_alloc =
454 				sgl_size * (vsgt->num_pages -
455 					    vmw_tt->sgt.nents);
456 
457 			ttm_mem_global_free(glob, over_alloc);
458 			vmw_tt->sg_alloc_size -= over_alloc;
459 		}
460 
461 		ret = vmw_ttm_map_for_dma(vmw_tt);
462 		if (unlikely(ret != 0))
463 			goto out_map_fail;
464 
465 		break;
466 	default:
467 		break;
468 	}
469 
470 	old = ~((dma_addr_t) 0);
471 	vmw_tt->vsgt.num_regions = 0;
472 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
473 		dma_addr_t cur = vmw_piter_dma_addr(&iter);
474 
475 		if (cur != old + PAGE_SIZE)
476 			vmw_tt->vsgt.num_regions++;
477 		old = cur;
478 	}
479 
480 	vmw_tt->mapped = true;
481 	return 0;
482 
483 out_map_fail:
484 	sg_free_table(vmw_tt->vsgt.sgt);
485 	vmw_tt->vsgt.sgt = NULL;
486 out_sg_alloc_fail:
487 	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
488 	return ret;
489 }
490 
491 /**
492  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
493  *
494  * @vmw_tt: Pointer to a struct vmw_ttm_tt
495  *
496  * Tear down any previously set up device DMA mappings and free
497  * any storage space allocated for them. If there are no mappings set up,
498  * this function is a NOP.
499  */
500 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
501 {
502 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
503 
504 	if (!vmw_tt->vsgt.sgt)
505 		return;
506 
507 	switch (dev_priv->map_mode) {
508 	case vmw_dma_map_bind:
509 	case vmw_dma_map_populate:
510 		vmw_ttm_unmap_from_dma(vmw_tt);
511 		sg_free_table(vmw_tt->vsgt.sgt);
512 		vmw_tt->vsgt.sgt = NULL;
513 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
514 				    vmw_tt->sg_alloc_size);
515 		break;
516 	default:
517 		break;
518 	}
519 	vmw_tt->mapped = false;
520 }
521 
522 
523 /**
524  * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
525  *
526  * @bo: Pointer to a struct ttm_buffer_object
527  *
528  * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
529  * instead of a pointer to a struct vmw_ttm_backend as argument.
530  * Note that the buffer object must be either pinned or reserved before
531  * calling this function.
532  */
533 int vmw_bo_map_dma(struct ttm_buffer_object *bo)
534 {
535 	struct vmw_ttm_tt *vmw_tt =
536 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
537 
538 	return vmw_ttm_map_dma(vmw_tt);
539 }
540 
541 
542 /**
543  * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
544  *
545  * @bo: Pointer to a struct ttm_buffer_object
546  *
547  * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
548  * instead of a pointer to a struct vmw_ttm_backend as argument.
549  */
550 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
551 {
552 	struct vmw_ttm_tt *vmw_tt =
553 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
554 
555 	vmw_ttm_unmap_dma(vmw_tt);
556 }
557 
558 
559 /**
560  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
561  * TTM buffer object
562  *
563  * @bo: Pointer to a struct ttm_buffer_object
564  *
565  * Returns a pointer to a struct vmw_sg_table object. The object should
566  * not be freed after use.
567  * Note that for the device addresses to be valid, the buffer object must
568  * either be reserved or pinned.
569  */
570 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
571 {
572 	struct vmw_ttm_tt *vmw_tt =
573 		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
574 
575 	return &vmw_tt->vsgt;
576 }
577 
578 
579 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
580 {
581 	struct vmw_ttm_tt *vmw_be =
582 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
583 	int ret;
584 
585 	ret = vmw_ttm_map_dma(vmw_be);
586 	if (unlikely(ret != 0))
587 		return ret;
588 
589 	vmw_be->gmr_id = bo_mem->start;
590 	vmw_be->mem_type = bo_mem->mem_type;
591 
592 	switch (bo_mem->mem_type) {
593 	case VMW_PL_GMR:
594 		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
595 				    ttm->num_pages, vmw_be->gmr_id);
596 	case VMW_PL_MOB:
597 		if (unlikely(vmw_be->mob == NULL)) {
598 			vmw_be->mob =
599 				vmw_mob_create(ttm->num_pages);
600 			if (unlikely(vmw_be->mob == NULL))
601 				return -ENOMEM;
602 		}
603 
604 		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
605 				    &vmw_be->vsgt, ttm->num_pages,
606 				    vmw_be->gmr_id);
607 	default:
608 		BUG();
609 	}
610 	return 0;
611 }
612 
613 static int vmw_ttm_unbind(struct ttm_tt *ttm)
614 {
615 	struct vmw_ttm_tt *vmw_be =
616 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
617 
618 	switch (vmw_be->mem_type) {
619 	case VMW_PL_GMR:
620 		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
621 		break;
622 	case VMW_PL_MOB:
623 		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
624 		break;
625 	default:
626 		BUG();
627 	}
628 
629 	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
630 		vmw_ttm_unmap_dma(vmw_be);
631 
632 	return 0;
633 }
634 
635 
636 static void vmw_ttm_destroy(struct ttm_tt *ttm)
637 {
638 	struct vmw_ttm_tt *vmw_be =
639 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
640 
641 	vmw_ttm_unmap_dma(vmw_be);
642 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
643 		ttm_dma_tt_fini(&vmw_be->dma_ttm);
644 	else
645 		ttm_tt_fini(ttm);
646 
647 	if (vmw_be->mob)
648 		vmw_mob_destroy(vmw_be->mob);
649 
650 	kfree(vmw_be);
651 }
652 
653 
654 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
655 {
656 	struct vmw_ttm_tt *vmw_tt =
657 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
658 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
659 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
660 	int ret;
661 
662 	if (ttm->state != tt_unpopulated)
663 		return 0;
664 
665 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
666 		size_t size =
667 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
668 		ret = ttm_mem_global_alloc(glob, size, ctx);
669 		if (unlikely(ret != 0))
670 			return ret;
671 
672 		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
673 					ctx);
674 		if (unlikely(ret != 0))
675 			ttm_mem_global_free(glob, size);
676 	} else
677 		ret = ttm_pool_populate(ttm, ctx);
678 
679 	return ret;
680 }
681 
682 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
683 {
684 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
685 						 dma_ttm.ttm);
686 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
687 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
688 
689 
690 	if (vmw_tt->mob) {
691 		vmw_mob_destroy(vmw_tt->mob);
692 		vmw_tt->mob = NULL;
693 	}
694 
695 	vmw_ttm_unmap_dma(vmw_tt);
696 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
697 		size_t size =
698 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
699 
700 		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
701 		ttm_mem_global_free(glob, size);
702 	} else
703 		ttm_pool_unpopulate(ttm);
704 }
705 
706 static struct ttm_backend_func vmw_ttm_func = {
707 	.bind = vmw_ttm_bind,
708 	.unbind = vmw_ttm_unbind,
709 	.destroy = vmw_ttm_destroy,
710 };
711 
712 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
713 					uint32_t page_flags)
714 {
715 	struct vmw_ttm_tt *vmw_be;
716 	int ret;
717 
718 	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
719 	if (!vmw_be)
720 		return NULL;
721 
722 	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
723 	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
724 	vmw_be->mob = NULL;
725 
726 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
727 		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
728 	else
729 		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
730 	if (unlikely(ret != 0))
731 		goto out_no_init;
732 
733 	return &vmw_be->dma_ttm.ttm;
734 out_no_init:
735 	kfree(vmw_be);
736 	return NULL;
737 }
738 
739 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
740 {
741 	return 0;
742 }
743 
744 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
745 		      struct ttm_mem_type_manager *man)
746 {
747 	switch (type) {
748 	case TTM_PL_SYSTEM:
749 		/* System memory */
750 
751 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
752 		man->available_caching = TTM_PL_FLAG_CACHED;
753 		man->default_caching = TTM_PL_FLAG_CACHED;
754 		break;
755 	case TTM_PL_VRAM:
756 		/* "On-card" video ram */
757 		man->func = &ttm_bo_manager_func;
758 		man->gpu_offset = 0;
759 		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
760 		man->available_caching = TTM_PL_FLAG_CACHED;
761 		man->default_caching = TTM_PL_FLAG_CACHED;
762 		break;
763 	case VMW_PL_GMR:
764 	case VMW_PL_MOB:
765 		/*
766 		 * "Guest Memory Regions" is an aperture like feature with
767 		 *  one slot per bo. There is an upper limit of the number of
768 		 *  slots as well as the bo size.
769 		 */
770 		man->func = &vmw_gmrid_manager_func;
771 		man->gpu_offset = 0;
772 		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
773 		man->available_caching = TTM_PL_FLAG_CACHED;
774 		man->default_caching = TTM_PL_FLAG_CACHED;
775 		break;
776 	default:
777 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
778 		return -EINVAL;
779 	}
780 	return 0;
781 }
782 
783 static void vmw_evict_flags(struct ttm_buffer_object *bo,
784 		     struct ttm_placement *placement)
785 {
786 	*placement = vmw_sys_placement;
787 }
788 
789 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
790 {
791 	struct ttm_object_file *tfile =
792 		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
793 
794 	return vmw_user_bo_verify_access(bo, tfile);
795 }
796 
797 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
798 {
799 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
800 	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
801 
802 	mem->bus.addr = NULL;
803 	mem->bus.is_iomem = false;
804 	mem->bus.offset = 0;
805 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
806 	mem->bus.base = 0;
807 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
808 		return -EINVAL;
809 	switch (mem->mem_type) {
810 	case TTM_PL_SYSTEM:
811 	case VMW_PL_GMR:
812 	case VMW_PL_MOB:
813 		return 0;
814 	case TTM_PL_VRAM:
815 		mem->bus.offset = mem->start << PAGE_SHIFT;
816 		mem->bus.base = dev_priv->vram_start;
817 		mem->bus.is_iomem = true;
818 		break;
819 	default:
820 		return -EINVAL;
821 	}
822 	return 0;
823 }
824 
825 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
826 {
827 }
828 
829 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
830 {
831 	return 0;
832 }
833 
834 /**
835  * vmw_move_notify - TTM move_notify_callback
836  *
837  * @bo: The TTM buffer object about to move.
838  * @mem: The struct ttm_mem_reg indicating to what memory
839  *       region the move is taking place.
840  *
841  * Calls move_notify for all subsystems needing it.
842  * (currently only resources).
843  */
844 static void vmw_move_notify(struct ttm_buffer_object *bo,
845 			    bool evict,
846 			    struct ttm_mem_reg *mem)
847 {
848 	vmw_bo_move_notify(bo, mem);
849 	vmw_query_move_notify(bo, mem);
850 }
851 
852 
853 /**
854  * vmw_swap_notify - TTM move_notify_callback
855  *
856  * @bo: The TTM buffer object about to be swapped out.
857  */
858 static void vmw_swap_notify(struct ttm_buffer_object *bo)
859 {
860 	vmw_bo_swap_notify(bo);
861 	(void) ttm_bo_wait(bo, false, false);
862 }
863 
864 
865 struct ttm_bo_driver vmw_bo_driver = {
866 	.ttm_tt_create = &vmw_ttm_tt_create,
867 	.ttm_tt_populate = &vmw_ttm_populate,
868 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
869 	.invalidate_caches = vmw_invalidate_caches,
870 	.init_mem_type = vmw_init_mem_type,
871 	.eviction_valuable = ttm_bo_eviction_valuable,
872 	.evict_flags = vmw_evict_flags,
873 	.move = NULL,
874 	.verify_access = vmw_verify_access,
875 	.move_notify = vmw_move_notify,
876 	.swap_notify = vmw_swap_notify,
877 	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
878 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
879 	.io_mem_free = &vmw_ttm_io_mem_free,
880 };
881