xref: /freebsd/sys/dev/drm2/ttm/ttm_bo.c (revision 595e514d0df2bac5b813d35f83e32875dbf16a83)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <dev/drm2/drmP.h>
35 #include <dev/drm2/ttm/ttm_module.h>
36 #include <dev/drm2/ttm/ttm_bo_driver.h>
37 #include <dev/drm2/ttm/ttm_placement.h>
38 
39 #define TTM_ASSERT_LOCKED(param)
40 #define TTM_DEBUG(fmt, arg...)
41 #define TTM_BO_HASH_ORDER 13
42 
43 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
44 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
45 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
46 
47 MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
48 
49 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
50 {
51 	int i;
52 
53 	for (i = 0; i <= TTM_PL_PRIV5; i++)
54 		if (flags & (1 << i)) {
55 			*mem_type = i;
56 			return 0;
57 		}
58 	return -EINVAL;
59 }
60 
61 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
62 {
63 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
64 
65 	printf("    has_type: %d\n", man->has_type);
66 	printf("    use_type: %d\n", man->use_type);
67 	printf("    flags: 0x%08X\n", man->flags);
68 	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
69 	printf("    size: %ju\n", (uintmax_t)man->size);
70 	printf("    available_caching: 0x%08X\n", man->available_caching);
71 	printf("    default_caching: 0x%08X\n", man->default_caching);
72 	if (mem_type != TTM_PL_SYSTEM)
73 		(*man->func->debug)(man, TTM_PFX);
74 }
75 
76 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
77 					struct ttm_placement *placement)
78 {
79 	int i, ret, mem_type;
80 
81 	printf("No space for %p (%lu pages, %luK, %luM)\n",
82 	       bo, bo->mem.num_pages, bo->mem.size >> 10,
83 	       bo->mem.size >> 20);
84 	for (i = 0; i < placement->num_placement; i++) {
85 		ret = ttm_mem_type_from_flags(placement->placement[i],
86 						&mem_type);
87 		if (ret)
88 			return;
89 		printf("  placement[%d]=0x%08X (%d)\n",
90 		       i, placement->placement[i], mem_type);
91 		ttm_mem_type_debug(bo->bdev, mem_type);
92 	}
93 }
94 
95 #if 0
96 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
97     char *buffer)
98 {
99 
100 	return snprintf(buffer, PAGE_SIZE, "%lu\n",
101 			(unsigned long) atomic_read(&glob->bo_count));
102 }
103 #endif
104 
105 static inline uint32_t ttm_bo_type_flags(unsigned type)
106 {
107 	return 1 << (type);
108 }
109 
110 static void ttm_bo_release_list(struct ttm_buffer_object *bo)
111 {
112 	struct ttm_bo_device *bdev = bo->bdev;
113 	size_t acc_size = bo->acc_size;
114 
115 	MPASS(atomic_read(&bo->list_kref) == 0);
116 	MPASS(atomic_read(&bo->kref) == 0);
117 	MPASS(atomic_read(&bo->cpu_writers) == 0);
118 	MPASS(bo->sync_obj == NULL);
119 	MPASS(bo->mem.mm_node == NULL);
120 	MPASS(list_empty(&bo->lru));
121 	MPASS(list_empty(&bo->ddestroy));
122 
123 	if (bo->ttm)
124 		ttm_tt_destroy(bo->ttm);
125 	atomic_dec(&bo->glob->bo_count);
126 	if (bo->destroy)
127 		bo->destroy(bo);
128 	else {
129 		free(bo, M_TTM_BO);
130 	}
131 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
132 }
133 
134 int
135 ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
136 {
137 	const char *wmsg;
138 	int flags, ret;
139 
140 	ret = 0;
141 	if (interruptible) {
142 		flags = PCATCH;
143 		wmsg = "ttbowi";
144 	} else {
145 		flags = 0;
146 		wmsg = "ttbowu";
147 	}
148 	while (!ttm_bo_is_reserved(bo)) {
149 		ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
150 		if (ret != 0)
151 			break;
152 	}
153 	return (ret);
154 }
155 
156 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
157 {
158 	struct ttm_bo_device *bdev = bo->bdev;
159 	struct ttm_mem_type_manager *man;
160 
161 	MPASS(ttm_bo_is_reserved(bo));
162 
163 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
164 
165 		MPASS(list_empty(&bo->lru));
166 
167 		man = &bdev->man[bo->mem.mem_type];
168 		list_add_tail(&bo->lru, &man->lru);
169 		refcount_acquire(&bo->list_kref);
170 
171 		if (bo->ttm != NULL) {
172 			list_add_tail(&bo->swap, &bo->glob->swap_lru);
173 			refcount_acquire(&bo->list_kref);
174 		}
175 	}
176 }
177 
178 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
179 {
180 	int put_count = 0;
181 
182 	if (!list_empty(&bo->swap)) {
183 		list_del_init(&bo->swap);
184 		++put_count;
185 	}
186 	if (!list_empty(&bo->lru)) {
187 		list_del_init(&bo->lru);
188 		++put_count;
189 	}
190 
191 	/*
192 	 * TODO: Add a driver hook to delete from
193 	 * driver-specific LRU's here.
194 	 */
195 
196 	return put_count;
197 }
198 
199 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
200 			  bool interruptible,
201 			  bool no_wait, bool use_sequence, uint32_t sequence)
202 {
203 	int ret;
204 
205 	while (unlikely(atomic_read(&bo->reserved) != 0)) {
206 		/**
207 		 * Deadlock avoidance for multi-bo reserving.
208 		 */
209 		if (use_sequence && bo->seq_valid) {
210 			/**
211 			 * We've already reserved this one.
212 			 */
213 			if (unlikely(sequence == bo->val_seq))
214 				return -EDEADLK;
215 			/**
216 			 * Already reserved by a thread that will not back
217 			 * off for us. We need to back off.
218 			 */
219 			if (unlikely(sequence - bo->val_seq < (1 << 31)))
220 				return -EAGAIN;
221 		}
222 
223 		if (no_wait)
224 			return -EBUSY;
225 
226 		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
227 		if (unlikely(ret))
228 			return ret;
229 	}
230 
231 	atomic_set(&bo->reserved, 1);
232 	if (use_sequence) {
233 		/**
234 		 * Wake up waiters that may need to recheck for deadlock,
235 		 * if we decreased the sequence number.
236 		 */
237 		if (unlikely((bo->val_seq - sequence < (1 << 31))
238 			     || !bo->seq_valid))
239 			wakeup(bo);
240 
241 		bo->val_seq = sequence;
242 		bo->seq_valid = true;
243 	} else {
244 		bo->seq_valid = false;
245 	}
246 
247 	return 0;
248 }
249 
250 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
251 			 bool never_free)
252 {
253 	u_int old;
254 
255 	old = atomic_fetchadd_int(&bo->list_kref, -count);
256 	if (old <= count) {
257 		if (never_free)
258 			panic("ttm_bo_ref_buf");
259 		ttm_bo_release_list(bo);
260 	}
261 }
262 
263 int ttm_bo_reserve(struct ttm_buffer_object *bo,
264 		   bool interruptible,
265 		   bool no_wait, bool use_sequence, uint32_t sequence)
266 {
267 	struct ttm_bo_global *glob = bo->glob;
268 	int put_count = 0;
269 	int ret;
270 
271 	mtx_lock(&glob->lru_lock);
272 	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
273 				    sequence);
274 	if (likely(ret == 0))
275 		put_count = ttm_bo_del_from_lru(bo);
276 	mtx_unlock(&glob->lru_lock);
277 
278 	ttm_bo_list_ref_sub(bo, put_count, true);
279 
280 	return ret;
281 }
282 
283 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
284 {
285 	ttm_bo_add_to_lru(bo);
286 	atomic_set(&bo->reserved, 0);
287 	wakeup(bo);
288 }
289 
290 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
291 {
292 	struct ttm_bo_global *glob = bo->glob;
293 
294 	mtx_lock(&glob->lru_lock);
295 	ttm_bo_unreserve_locked(bo);
296 	mtx_unlock(&glob->lru_lock);
297 }
298 
299 /*
300  * Call bo->mutex locked.
301  */
302 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
303 {
304 	struct ttm_bo_device *bdev = bo->bdev;
305 	struct ttm_bo_global *glob = bo->glob;
306 	int ret = 0;
307 	uint32_t page_flags = 0;
308 
309 	TTM_ASSERT_LOCKED(&bo->mutex);
310 	bo->ttm = NULL;
311 
312 	if (bdev->need_dma32)
313 		page_flags |= TTM_PAGE_FLAG_DMA32;
314 
315 	switch (bo->type) {
316 	case ttm_bo_type_device:
317 		if (zero_alloc)
318 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
319 	case ttm_bo_type_kernel:
320 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
321 						      page_flags, glob->dummy_read_page);
322 		if (unlikely(bo->ttm == NULL))
323 			ret = -ENOMEM;
324 		break;
325 	case ttm_bo_type_sg:
326 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
327 						      page_flags | TTM_PAGE_FLAG_SG,
328 						      glob->dummy_read_page);
329 		if (unlikely(bo->ttm == NULL)) {
330 			ret = -ENOMEM;
331 			break;
332 		}
333 		bo->ttm->sg = bo->sg;
334 		break;
335 	default:
336 		printf("[TTM] Illegal buffer object type\n");
337 		ret = -EINVAL;
338 		break;
339 	}
340 
341 	return ret;
342 }
343 
344 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
345 				  struct ttm_mem_reg *mem,
346 				  bool evict, bool interruptible,
347 				  bool no_wait_gpu)
348 {
349 	struct ttm_bo_device *bdev = bo->bdev;
350 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
351 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
352 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
353 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
354 	int ret = 0;
355 
356 	if (old_is_pci || new_is_pci ||
357 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
358 		ret = ttm_mem_io_lock(old_man, true);
359 		if (unlikely(ret != 0))
360 			goto out_err;
361 		ttm_bo_unmap_virtual_locked(bo);
362 		ttm_mem_io_unlock(old_man);
363 	}
364 
365 	/*
366 	 * Create and bind a ttm if required.
367 	 */
368 
369 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
370 		if (bo->ttm == NULL) {
371 			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
372 			ret = ttm_bo_add_ttm(bo, zero);
373 			if (ret)
374 				goto out_err;
375 		}
376 
377 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
378 		if (ret)
379 			goto out_err;
380 
381 		if (mem->mem_type != TTM_PL_SYSTEM) {
382 			ret = ttm_tt_bind(bo->ttm, mem);
383 			if (ret)
384 				goto out_err;
385 		}
386 
387 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
388 			if (bdev->driver->move_notify)
389 				bdev->driver->move_notify(bo, mem);
390 			bo->mem = *mem;
391 			mem->mm_node = NULL;
392 			goto moved;
393 		}
394 	}
395 
396 	if (bdev->driver->move_notify)
397 		bdev->driver->move_notify(bo, mem);
398 
399 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
400 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
401 		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
402 	else if (bdev->driver->move)
403 		ret = bdev->driver->move(bo, evict, interruptible,
404 					 no_wait_gpu, mem);
405 	else
406 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
407 
408 	if (ret) {
409 		if (bdev->driver->move_notify) {
410 			struct ttm_mem_reg tmp_mem = *mem;
411 			*mem = bo->mem;
412 			bo->mem = tmp_mem;
413 			bdev->driver->move_notify(bo, mem);
414 			bo->mem = *mem;
415 		}
416 
417 		goto out_err;
418 	}
419 
420 moved:
421 	if (bo->evicted) {
422 		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
423 		if (ret)
424 			printf("[TTM] Can not flush read caches\n");
425 		bo->evicted = false;
426 	}
427 
428 	if (bo->mem.mm_node) {
429 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
430 		    bdev->man[bo->mem.mem_type].gpu_offset;
431 		bo->cur_placement = bo->mem.placement;
432 	} else
433 		bo->offset = 0;
434 
435 	return 0;
436 
437 out_err:
438 	new_man = &bdev->man[bo->mem.mem_type];
439 	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
440 		ttm_tt_unbind(bo->ttm);
441 		ttm_tt_destroy(bo->ttm);
442 		bo->ttm = NULL;
443 	}
444 
445 	return ret;
446 }
447 
448 /**
449  * Call bo::reserved.
450  * Will release GPU memory type usage on destruction.
451  * This is the place to put in driver specific hooks to release
452  * driver private resources.
453  * Will release the bo::reserved lock.
454  */
455 
456 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
457 {
458 	if (bo->bdev->driver->move_notify)
459 		bo->bdev->driver->move_notify(bo, NULL);
460 
461 	if (bo->ttm) {
462 		ttm_tt_unbind(bo->ttm);
463 		ttm_tt_destroy(bo->ttm);
464 		bo->ttm = NULL;
465 	}
466 	ttm_bo_mem_put(bo, &bo->mem);
467 
468 	atomic_set(&bo->reserved, 0);
469 	wakeup(&bo);
470 
471 	/*
472 	 * Since the final reference to this bo may not be dropped by
473 	 * the current task we have to put a memory barrier here to make
474 	 * sure the changes done in this function are always visible.
475 	 *
476 	 * This function only needs protection against the final kref_put.
477 	 */
478 	mb();
479 }
480 
481 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
482 {
483 	struct ttm_bo_device *bdev = bo->bdev;
484 	struct ttm_bo_global *glob = bo->glob;
485 	struct ttm_bo_driver *driver = bdev->driver;
486 	void *sync_obj = NULL;
487 	int put_count;
488 	int ret;
489 
490 	mtx_lock(&glob->lru_lock);
491 	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
492 
493 	mtx_lock(&bdev->fence_lock);
494 	(void) ttm_bo_wait(bo, false, false, true);
495 	if (!ret && !bo->sync_obj) {
496 		mtx_unlock(&bdev->fence_lock);
497 		put_count = ttm_bo_del_from_lru(bo);
498 
499 		mtx_unlock(&glob->lru_lock);
500 		ttm_bo_cleanup_memtype_use(bo);
501 
502 		ttm_bo_list_ref_sub(bo, put_count, true);
503 
504 		return;
505 	}
506 	if (bo->sync_obj)
507 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
508 	mtx_unlock(&bdev->fence_lock);
509 
510 	if (!ret) {
511 		atomic_set(&bo->reserved, 0);
512 		wakeup(bo);
513 	}
514 
515 	refcount_acquire(&bo->list_kref);
516 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
517 	mtx_unlock(&glob->lru_lock);
518 
519 	if (sync_obj) {
520 		driver->sync_obj_flush(sync_obj);
521 		driver->sync_obj_unref(&sync_obj);
522 	}
523 	taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
524 	    ((hz / 100) < 1) ? 1 : hz / 100);
525 }
526 
527 /**
528  * function ttm_bo_cleanup_refs_and_unlock
529  * If bo idle, remove from delayed- and lru lists, and unref.
530  * If not idle, do nothing.
531  *
532  * Must be called with lru_lock and reservation held, this function
533  * will drop both before returning.
534  *
535  * @interruptible         Any sleeps should occur interruptibly.
536  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
537  */
538 
539 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
540 					  bool interruptible,
541 					  bool no_wait_gpu)
542 {
543 	struct ttm_bo_device *bdev = bo->bdev;
544 	struct ttm_bo_driver *driver = bdev->driver;
545 	struct ttm_bo_global *glob = bo->glob;
546 	int put_count;
547 	int ret;
548 
549 	mtx_lock(&bdev->fence_lock);
550 	ret = ttm_bo_wait(bo, false, false, true);
551 
552 	if (ret && !no_wait_gpu) {
553 		void *sync_obj;
554 
555 		/*
556 		 * Take a reference to the fence and unreserve,
557 		 * at this point the buffer should be dead, so
558 		 * no new sync objects can be attached.
559 		 */
560 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
561 		mtx_unlock(&bdev->fence_lock);
562 
563 		atomic_set(&bo->reserved, 0);
564 		wakeup(bo);
565 		mtx_unlock(&glob->lru_lock);
566 
567 		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
568 		driver->sync_obj_unref(&sync_obj);
569 		if (ret)
570 			return ret;
571 
572 		/*
573 		 * remove sync_obj with ttm_bo_wait, the wait should be
574 		 * finished, and no new wait object should have been added.
575 		 */
576 		mtx_lock(&bdev->fence_lock);
577 		ret = ttm_bo_wait(bo, false, false, true);
578 		mtx_unlock(&bdev->fence_lock);
579 		if (ret)
580 			return ret;
581 
582 		mtx_lock(&glob->lru_lock);
583 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
584 
585 		/*
586 		 * We raced, and lost, someone else holds the reservation now,
587 		 * and is probably busy in ttm_bo_cleanup_memtype_use.
588 		 *
589 		 * Even if it's not the case, because we finished waiting any
590 		 * delayed destruction would succeed, so just return success
591 		 * here.
592 		 */
593 		if (ret) {
594 			mtx_unlock(&glob->lru_lock);
595 			return 0;
596 		}
597 	} else
598 		mtx_unlock(&bdev->fence_lock);
599 
600 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
601 		atomic_set(&bo->reserved, 0);
602 		wakeup(bo);
603 		mtx_unlock(&glob->lru_lock);
604 		return ret;
605 	}
606 
607 	put_count = ttm_bo_del_from_lru(bo);
608 	list_del_init(&bo->ddestroy);
609 	++put_count;
610 
611 	mtx_unlock(&glob->lru_lock);
612 	ttm_bo_cleanup_memtype_use(bo);
613 
614 	ttm_bo_list_ref_sub(bo, put_count, true);
615 
616 	return 0;
617 }
618 
619 /**
620  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
621  * encountered buffers.
622  */
623 
624 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
625 {
626 	struct ttm_bo_global *glob = bdev->glob;
627 	struct ttm_buffer_object *entry = NULL;
628 	int ret = 0;
629 
630 	mtx_lock(&glob->lru_lock);
631 	if (list_empty(&bdev->ddestroy))
632 		goto out_unlock;
633 
634 	entry = list_first_entry(&bdev->ddestroy,
635 		struct ttm_buffer_object, ddestroy);
636 	refcount_acquire(&entry->list_kref);
637 
638 	for (;;) {
639 		struct ttm_buffer_object *nentry = NULL;
640 
641 		if (entry->ddestroy.next != &bdev->ddestroy) {
642 			nentry = list_first_entry(&entry->ddestroy,
643 				struct ttm_buffer_object, ddestroy);
644 			refcount_acquire(&nentry->list_kref);
645 		}
646 
647 		ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
648 		if (!ret)
649 			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
650 							     !remove_all);
651 		else
652 			mtx_unlock(&glob->lru_lock);
653 
654 		if (refcount_release(&entry->list_kref))
655 			ttm_bo_release_list(entry);
656 		entry = nentry;
657 
658 		if (ret || !entry)
659 			goto out;
660 
661 		mtx_lock(&glob->lru_lock);
662 		if (list_empty(&entry->ddestroy))
663 			break;
664 	}
665 
666 out_unlock:
667 	mtx_unlock(&glob->lru_lock);
668 out:
669 	if (entry && refcount_release(&entry->list_kref))
670 		ttm_bo_release_list(entry);
671 	return ret;
672 }
673 
674 static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
675 {
676 	struct ttm_bo_device *bdev = arg;
677 
678 	if (ttm_bo_delayed_delete(bdev, false)) {
679 		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
680 		    ((hz / 100) < 1) ? 1 : hz / 100);
681 	}
682 }
683 
684 static void ttm_bo_release(struct ttm_buffer_object *bo)
685 {
686 	struct ttm_bo_device *bdev = bo->bdev;
687 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
688 
689 	rw_wlock(&bdev->vm_lock);
690 	if (likely(bo->vm_node != NULL)) {
691 		RB_REMOVE(ttm_bo_device_buffer_objects,
692 		    &bdev->addr_space_rb, bo);
693 		drm_mm_put_block(bo->vm_node);
694 		bo->vm_node = NULL;
695 	}
696 	rw_wunlock(&bdev->vm_lock);
697 	ttm_mem_io_lock(man, false);
698 	ttm_mem_io_free_vm(bo);
699 	ttm_mem_io_unlock(man);
700 	ttm_bo_cleanup_refs_or_queue(bo);
701 	if (refcount_release(&bo->list_kref))
702 		ttm_bo_release_list(bo);
703 }
704 
705 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
706 {
707 	struct ttm_buffer_object *bo = *p_bo;
708 
709 	*p_bo = NULL;
710 	if (refcount_release(&bo->kref))
711 		ttm_bo_release(bo);
712 }
713 
714 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
715 {
716 	int pending;
717 
718 	taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
719 	if (pending)
720 		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
721 	return (pending);
722 }
723 
724 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
725 {
726 	if (resched) {
727 		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
728 		    ((hz / 100) < 1) ? 1 : hz / 100);
729 	}
730 }
731 
732 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
733 			bool no_wait_gpu)
734 {
735 	struct ttm_bo_device *bdev = bo->bdev;
736 	struct ttm_mem_reg evict_mem;
737 	struct ttm_placement placement;
738 	int ret = 0;
739 
740 	mtx_lock(&bdev->fence_lock);
741 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
742 	mtx_unlock(&bdev->fence_lock);
743 
744 	if (unlikely(ret != 0)) {
745 		if (ret != -ERESTART) {
746 			printf("[TTM] Failed to expire sync object before buffer eviction\n");
747 		}
748 		goto out;
749 	}
750 
751 	MPASS(ttm_bo_is_reserved(bo));
752 
753 	evict_mem = bo->mem;
754 	evict_mem.mm_node = NULL;
755 	evict_mem.bus.io_reserved_vm = false;
756 	evict_mem.bus.io_reserved_count = 0;
757 
758 	placement.fpfn = 0;
759 	placement.lpfn = 0;
760 	placement.num_placement = 0;
761 	placement.num_busy_placement = 0;
762 	bdev->driver->evict_flags(bo, &placement);
763 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
764 				no_wait_gpu);
765 	if (ret) {
766 		if (ret != -ERESTART) {
767 			printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
768 			       bo);
769 			ttm_bo_mem_space_debug(bo, &placement);
770 		}
771 		goto out;
772 	}
773 
774 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
775 				     no_wait_gpu);
776 	if (ret) {
777 		if (ret != -ERESTART)
778 			printf("[TTM] Buffer eviction failed\n");
779 		ttm_bo_mem_put(bo, &evict_mem);
780 		goto out;
781 	}
782 	bo->evicted = true;
783 out:
784 	return ret;
785 }
786 
787 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
788 				uint32_t mem_type,
789 				bool interruptible,
790 				bool no_wait_gpu)
791 {
792 	struct ttm_bo_global *glob = bdev->glob;
793 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
794 	struct ttm_buffer_object *bo;
795 	int ret = -EBUSY, put_count;
796 
797 	mtx_lock(&glob->lru_lock);
798 	list_for_each_entry(bo, &man->lru, lru) {
799 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
800 		if (!ret)
801 			break;
802 	}
803 
804 	if (ret) {
805 		mtx_unlock(&glob->lru_lock);
806 		return ret;
807 	}
808 
809 	refcount_acquire(&bo->list_kref);
810 
811 	if (!list_empty(&bo->ddestroy)) {
812 		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
813 						     no_wait_gpu);
814 		if (refcount_release(&bo->list_kref))
815 			ttm_bo_release_list(bo);
816 		return ret;
817 	}
818 
819 	put_count = ttm_bo_del_from_lru(bo);
820 	mtx_unlock(&glob->lru_lock);
821 
822 	MPASS(ret == 0);
823 
824 	ttm_bo_list_ref_sub(bo, put_count, true);
825 
826 	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
827 	ttm_bo_unreserve(bo);
828 
829 	if (refcount_release(&bo->list_kref))
830 		ttm_bo_release_list(bo);
831 	return ret;
832 }
833 
834 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
835 {
836 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
837 
838 	if (mem->mm_node)
839 		(*man->func->put_node)(man, mem);
840 }
841 
842 /**
843  * Repeatedly evict memory from the LRU for @mem_type until we create enough
844  * space, or we've evicted everything and there isn't enough space.
845  */
846 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
847 					uint32_t mem_type,
848 					struct ttm_placement *placement,
849 					struct ttm_mem_reg *mem,
850 					bool interruptible,
851 					bool no_wait_gpu)
852 {
853 	struct ttm_bo_device *bdev = bo->bdev;
854 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
855 	int ret;
856 
857 	do {
858 		ret = (*man->func->get_node)(man, bo, placement, mem);
859 		if (unlikely(ret != 0))
860 			return ret;
861 		if (mem->mm_node)
862 			break;
863 		ret = ttm_mem_evict_first(bdev, mem_type,
864 					  interruptible, no_wait_gpu);
865 		if (unlikely(ret != 0))
866 			return ret;
867 	} while (1);
868 	if (mem->mm_node == NULL)
869 		return -ENOMEM;
870 	mem->mem_type = mem_type;
871 	return 0;
872 }
873 
874 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
875 				      uint32_t cur_placement,
876 				      uint32_t proposed_placement)
877 {
878 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
879 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
880 
881 	/**
882 	 * Keep current caching if possible.
883 	 */
884 
885 	if ((cur_placement & caching) != 0)
886 		result |= (cur_placement & caching);
887 	else if ((man->default_caching & caching) != 0)
888 		result |= man->default_caching;
889 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
890 		result |= TTM_PL_FLAG_CACHED;
891 	else if ((TTM_PL_FLAG_WC & caching) != 0)
892 		result |= TTM_PL_FLAG_WC;
893 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
894 		result |= TTM_PL_FLAG_UNCACHED;
895 
896 	return result;
897 }
898 
899 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
900 				 uint32_t mem_type,
901 				 uint32_t proposed_placement,
902 				 uint32_t *masked_placement)
903 {
904 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
905 
906 	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
907 		return false;
908 
909 	if ((proposed_placement & man->available_caching) == 0)
910 		return false;
911 
912 	cur_flags |= (proposed_placement & man->available_caching);
913 
914 	*masked_placement = cur_flags;
915 	return true;
916 }
917 
918 /**
919  * Creates space for memory region @mem according to its type.
920  *
921  * This function first searches for free space in compatible memory types in
922  * the priority order defined by the driver.  If free space isn't found, then
923  * ttm_bo_mem_force_space is attempted in priority order to evict and find
924  * space.
925  */
926 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
927 			struct ttm_placement *placement,
928 			struct ttm_mem_reg *mem,
929 			bool interruptible,
930 			bool no_wait_gpu)
931 {
932 	struct ttm_bo_device *bdev = bo->bdev;
933 	struct ttm_mem_type_manager *man;
934 	uint32_t mem_type = TTM_PL_SYSTEM;
935 	uint32_t cur_flags = 0;
936 	bool type_found = false;
937 	bool type_ok = false;
938 	bool has_erestartsys = false;
939 	int i, ret;
940 
941 	mem->mm_node = NULL;
942 	for (i = 0; i < placement->num_placement; ++i) {
943 		ret = ttm_mem_type_from_flags(placement->placement[i],
944 						&mem_type);
945 		if (ret)
946 			return ret;
947 		man = &bdev->man[mem_type];
948 
949 		type_ok = ttm_bo_mt_compatible(man,
950 						mem_type,
951 						placement->placement[i],
952 						&cur_flags);
953 
954 		if (!type_ok)
955 			continue;
956 
957 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
958 						  cur_flags);
959 		/*
960 		 * Use the access and other non-mapping-related flag bits from
961 		 * the memory placement flags to the current flags
962 		 */
963 		ttm_flag_masked(&cur_flags, placement->placement[i],
964 				~TTM_PL_MASK_MEMTYPE);
965 
966 		if (mem_type == TTM_PL_SYSTEM)
967 			break;
968 
969 		if (man->has_type && man->use_type) {
970 			type_found = true;
971 			ret = (*man->func->get_node)(man, bo, placement, mem);
972 			if (unlikely(ret))
973 				return ret;
974 		}
975 		if (mem->mm_node)
976 			break;
977 	}
978 
979 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
980 		mem->mem_type = mem_type;
981 		mem->placement = cur_flags;
982 		return 0;
983 	}
984 
985 	if (!type_found)
986 		return -EINVAL;
987 
988 	for (i = 0; i < placement->num_busy_placement; ++i) {
989 		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
990 						&mem_type);
991 		if (ret)
992 			return ret;
993 		man = &bdev->man[mem_type];
994 		if (!man->has_type)
995 			continue;
996 		if (!ttm_bo_mt_compatible(man,
997 						mem_type,
998 						placement->busy_placement[i],
999 						&cur_flags))
1000 			continue;
1001 
1002 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1003 						  cur_flags);
1004 		/*
1005 		 * Use the access and other non-mapping-related flag bits from
1006 		 * the memory placement flags to the current flags
1007 		 */
1008 		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1009 				~TTM_PL_MASK_MEMTYPE);
1010 
1011 
1012 		if (mem_type == TTM_PL_SYSTEM) {
1013 			mem->mem_type = mem_type;
1014 			mem->placement = cur_flags;
1015 			mem->mm_node = NULL;
1016 			return 0;
1017 		}
1018 
1019 		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1020 						interruptible, no_wait_gpu);
1021 		if (ret == 0 && mem->mm_node) {
1022 			mem->placement = cur_flags;
1023 			return 0;
1024 		}
1025 		if (ret == -ERESTART)
1026 			has_erestartsys = true;
1027 	}
1028 	ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
1029 	return ret;
1030 }
1031 
1032 static
1033 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1034 			struct ttm_placement *placement,
1035 			bool interruptible,
1036 			bool no_wait_gpu)
1037 {
1038 	int ret = 0;
1039 	struct ttm_mem_reg mem;
1040 	struct ttm_bo_device *bdev = bo->bdev;
1041 
1042 	MPASS(ttm_bo_is_reserved(bo));
1043 
1044 	/*
1045 	 * FIXME: It's possible to pipeline buffer moves.
1046 	 * Have the driver move function wait for idle when necessary,
1047 	 * instead of doing it here.
1048 	 */
1049 	mtx_lock(&bdev->fence_lock);
1050 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1051 	mtx_unlock(&bdev->fence_lock);
1052 	if (ret)
1053 		return ret;
1054 	mem.num_pages = bo->num_pages;
1055 	mem.size = mem.num_pages << PAGE_SHIFT;
1056 	mem.page_alignment = bo->mem.page_alignment;
1057 	mem.bus.io_reserved_vm = false;
1058 	mem.bus.io_reserved_count = 0;
1059 	/*
1060 	 * Determine where to move the buffer.
1061 	 */
1062 	ret = ttm_bo_mem_space(bo, placement, &mem,
1063 			       interruptible, no_wait_gpu);
1064 	if (ret)
1065 		goto out_unlock;
1066 	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1067 				     interruptible, no_wait_gpu);
1068 out_unlock:
1069 	if (ret && mem.mm_node)
1070 		ttm_bo_mem_put(bo, &mem);
1071 	return ret;
1072 }
1073 
1074 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1075 			     struct ttm_mem_reg *mem)
1076 {
1077 	int i;
1078 
1079 	if (mem->mm_node && placement->lpfn != 0 &&
1080 	    (mem->start < placement->fpfn ||
1081 	     mem->start + mem->num_pages > placement->lpfn))
1082 		return -1;
1083 
1084 	for (i = 0; i < placement->num_placement; i++) {
1085 		if ((placement->placement[i] & mem->placement &
1086 			TTM_PL_MASK_CACHING) &&
1087 			(placement->placement[i] & mem->placement &
1088 			TTM_PL_MASK_MEM))
1089 			return i;
1090 	}
1091 	return -1;
1092 }
1093 
1094 int ttm_bo_validate(struct ttm_buffer_object *bo,
1095 			struct ttm_placement *placement,
1096 			bool interruptible,
1097 			bool no_wait_gpu)
1098 {
1099 	int ret;
1100 
1101 	MPASS(ttm_bo_is_reserved(bo));
1102 	/* Check that range is valid */
1103 	if (placement->lpfn || placement->fpfn)
1104 		if (placement->fpfn > placement->lpfn ||
1105 			(placement->lpfn - placement->fpfn) < bo->num_pages)
1106 			return -EINVAL;
1107 	/*
1108 	 * Check whether we need to move buffer.
1109 	 */
1110 	ret = ttm_bo_mem_compat(placement, &bo->mem);
1111 	if (ret < 0) {
1112 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1113 					 no_wait_gpu);
1114 		if (ret)
1115 			return ret;
1116 	} else {
1117 		/*
1118 		 * Use the access and other non-mapping-related flag bits from
1119 		 * the compatible memory placement flags to the active flags
1120 		 */
1121 		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1122 				~TTM_PL_MASK_MEMTYPE);
1123 	}
1124 	/*
1125 	 * We might need to add a TTM.
1126 	 */
1127 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1128 		ret = ttm_bo_add_ttm(bo, true);
1129 		if (ret)
1130 			return ret;
1131 	}
1132 	return 0;
1133 }
1134 
1135 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1136 				struct ttm_placement *placement)
1137 {
1138 	MPASS(!((placement->fpfn || placement->lpfn) &&
1139 	    (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
1140 
1141 	return 0;
1142 }
1143 
1144 int ttm_bo_init(struct ttm_bo_device *bdev,
1145 		struct ttm_buffer_object *bo,
1146 		unsigned long size,
1147 		enum ttm_bo_type type,
1148 		struct ttm_placement *placement,
1149 		uint32_t page_alignment,
1150 		bool interruptible,
1151 		struct vm_object *persistent_swap_storage,
1152 		size_t acc_size,
1153 		struct sg_table *sg,
1154 		void (*destroy) (struct ttm_buffer_object *))
1155 {
1156 	int ret = 0;
1157 	unsigned long num_pages;
1158 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1159 
1160 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1161 	if (ret) {
1162 		printf("[TTM] Out of kernel memory\n");
1163 		if (destroy)
1164 			(*destroy)(bo);
1165 		else
1166 			free(bo, M_TTM_BO);
1167 		return -ENOMEM;
1168 	}
1169 
1170 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1171 	if (num_pages == 0) {
1172 		printf("[TTM] Illegal buffer object size\n");
1173 		if (destroy)
1174 			(*destroy)(bo);
1175 		else
1176 			free(bo, M_TTM_BO);
1177 		ttm_mem_global_free(mem_glob, acc_size);
1178 		return -EINVAL;
1179 	}
1180 	bo->destroy = destroy;
1181 
1182 	refcount_init(&bo->kref, 1);
1183 	refcount_init(&bo->list_kref, 1);
1184 	atomic_set(&bo->cpu_writers, 0);
1185 	atomic_set(&bo->reserved, 1);
1186 	INIT_LIST_HEAD(&bo->lru);
1187 	INIT_LIST_HEAD(&bo->ddestroy);
1188 	INIT_LIST_HEAD(&bo->swap);
1189 	INIT_LIST_HEAD(&bo->io_reserve_lru);
1190 	bo->bdev = bdev;
1191 	bo->glob = bdev->glob;
1192 	bo->type = type;
1193 	bo->num_pages = num_pages;
1194 	bo->mem.size = num_pages << PAGE_SHIFT;
1195 	bo->mem.mem_type = TTM_PL_SYSTEM;
1196 	bo->mem.num_pages = bo->num_pages;
1197 	bo->mem.mm_node = NULL;
1198 	bo->mem.page_alignment = page_alignment;
1199 	bo->mem.bus.io_reserved_vm = false;
1200 	bo->mem.bus.io_reserved_count = 0;
1201 	bo->priv_flags = 0;
1202 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1203 	bo->seq_valid = false;
1204 	bo->persistent_swap_storage = persistent_swap_storage;
1205 	bo->acc_size = acc_size;
1206 	bo->sg = sg;
1207 	atomic_inc(&bo->glob->bo_count);
1208 
1209 	ret = ttm_bo_check_placement(bo, placement);
1210 	if (unlikely(ret != 0))
1211 		goto out_err;
1212 
1213 	/*
1214 	 * For ttm_bo_type_device buffers, allocate
1215 	 * address space from the device.
1216 	 */
1217 	if (bo->type == ttm_bo_type_device ||
1218 	    bo->type == ttm_bo_type_sg) {
1219 		ret = ttm_bo_setup_vm(bo);
1220 		if (ret)
1221 			goto out_err;
1222 	}
1223 
1224 	ret = ttm_bo_validate(bo, placement, interruptible, false);
1225 	if (ret)
1226 		goto out_err;
1227 
1228 	ttm_bo_unreserve(bo);
1229 	return 0;
1230 
1231 out_err:
1232 	ttm_bo_unreserve(bo);
1233 	ttm_bo_unref(&bo);
1234 
1235 	return ret;
1236 }
1237 
1238 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1239 		       unsigned long bo_size,
1240 		       unsigned struct_size)
1241 {
1242 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1243 	size_t size = 0;
1244 
1245 	size += ttm_round_pot(struct_size);
1246 	size += PAGE_ALIGN(npages * sizeof(void *));
1247 	size += ttm_round_pot(sizeof(struct ttm_tt));
1248 	return size;
1249 }
1250 
1251 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1252 			   unsigned long bo_size,
1253 			   unsigned struct_size)
1254 {
1255 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1256 	size_t size = 0;
1257 
1258 	size += ttm_round_pot(struct_size);
1259 	size += PAGE_ALIGN(npages * sizeof(void *));
1260 	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1261 	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1262 	return size;
1263 }
1264 
1265 int ttm_bo_create(struct ttm_bo_device *bdev,
1266 			unsigned long size,
1267 			enum ttm_bo_type type,
1268 			struct ttm_placement *placement,
1269 			uint32_t page_alignment,
1270 			bool interruptible,
1271 			struct vm_object *persistent_swap_storage,
1272 			struct ttm_buffer_object **p_bo)
1273 {
1274 	struct ttm_buffer_object *bo;
1275 	size_t acc_size;
1276 	int ret;
1277 
1278 	bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
1279 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1280 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1281 			  interruptible, persistent_swap_storage, acc_size,
1282 			  NULL, NULL);
1283 	if (likely(ret == 0))
1284 		*p_bo = bo;
1285 
1286 	return ret;
1287 }
1288 
1289 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1290 					unsigned mem_type, bool allow_errors)
1291 {
1292 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1293 	struct ttm_bo_global *glob = bdev->glob;
1294 	int ret;
1295 
1296 	/*
1297 	 * Can't use standard list traversal since we're unlocking.
1298 	 */
1299 
1300 	mtx_lock(&glob->lru_lock);
1301 	while (!list_empty(&man->lru)) {
1302 		mtx_unlock(&glob->lru_lock);
1303 		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1304 		if (ret) {
1305 			if (allow_errors) {
1306 				return ret;
1307 			} else {
1308 				printf("[TTM] Cleanup eviction failed\n");
1309 			}
1310 		}
1311 		mtx_lock(&glob->lru_lock);
1312 	}
1313 	mtx_unlock(&glob->lru_lock);
1314 	return 0;
1315 }
1316 
1317 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1318 {
1319 	struct ttm_mem_type_manager *man;
1320 	int ret = -EINVAL;
1321 
1322 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1323 		printf("[TTM] Illegal memory type %d\n", mem_type);
1324 		return ret;
1325 	}
1326 	man = &bdev->man[mem_type];
1327 
1328 	if (!man->has_type) {
1329 		printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
1330 		       mem_type);
1331 		return ret;
1332 	}
1333 
1334 	man->use_type = false;
1335 	man->has_type = false;
1336 
1337 	ret = 0;
1338 	if (mem_type > 0) {
1339 		ttm_bo_force_list_clean(bdev, mem_type, false);
1340 
1341 		ret = (*man->func->takedown)(man);
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1348 {
1349 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1350 
1351 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1352 		printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
1353 		return -EINVAL;
1354 	}
1355 
1356 	if (!man->has_type) {
1357 		printf("[TTM] Memory type %u has not been initialized\n", mem_type);
1358 		return 0;
1359 	}
1360 
1361 	return ttm_bo_force_list_clean(bdev, mem_type, true);
1362 }
1363 
1364 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1365 			unsigned long p_size)
1366 {
1367 	int ret = -EINVAL;
1368 	struct ttm_mem_type_manager *man;
1369 
1370 	MPASS(type < TTM_NUM_MEM_TYPES);
1371 	man = &bdev->man[type];
1372 	MPASS(!man->has_type);
1373 	man->io_reserve_fastpath = true;
1374 	man->use_io_reserve_lru = false;
1375 	sx_init(&man->io_reserve_mutex, "ttmman");
1376 	INIT_LIST_HEAD(&man->io_reserve_lru);
1377 
1378 	ret = bdev->driver->init_mem_type(bdev, type, man);
1379 	if (ret)
1380 		return ret;
1381 	man->bdev = bdev;
1382 
1383 	ret = 0;
1384 	if (type != TTM_PL_SYSTEM) {
1385 		ret = (*man->func->init)(man, p_size);
1386 		if (ret)
1387 			return ret;
1388 	}
1389 	man->has_type = true;
1390 	man->use_type = true;
1391 	man->size = p_size;
1392 
1393 	INIT_LIST_HEAD(&man->lru);
1394 
1395 	return 0;
1396 }
1397 
1398 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1399 {
1400 
1401 	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1402 	vm_page_free(glob->dummy_read_page);
1403 }
1404 
1405 void ttm_bo_global_release(struct drm_global_reference *ref)
1406 {
1407 	struct ttm_bo_global *glob = ref->object;
1408 
1409 	if (refcount_release(&glob->kobj_ref))
1410 		ttm_bo_global_kobj_release(glob);
1411 }
1412 
1413 int ttm_bo_global_init(struct drm_global_reference *ref)
1414 {
1415 	struct ttm_bo_global_ref *bo_ref =
1416 		container_of(ref, struct ttm_bo_global_ref, ref);
1417 	struct ttm_bo_global *glob = ref->object;
1418 	int ret;
1419 
1420 	sx_init(&glob->device_list_mutex, "ttmdlm");
1421 	mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
1422 	glob->mem_glob = bo_ref->mem_glob;
1423 	glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
1424 	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
1425 	    1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
1426 
1427 	if (unlikely(glob->dummy_read_page == NULL)) {
1428 		ret = -ENOMEM;
1429 		goto out_no_drp;
1430 	}
1431 
1432 	INIT_LIST_HEAD(&glob->swap_lru);
1433 	INIT_LIST_HEAD(&glob->device_list);
1434 
1435 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1436 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1437 	if (unlikely(ret != 0)) {
1438 		printf("[TTM] Could not register buffer object swapout\n");
1439 		goto out_no_shrink;
1440 	}
1441 
1442 	atomic_set(&glob->bo_count, 0);
1443 
1444 	refcount_init(&glob->kobj_ref, 1);
1445 	return (0);
1446 
1447 out_no_shrink:
1448 	vm_page_free(glob->dummy_read_page);
1449 out_no_drp:
1450 	free(glob, M_DRM_GLOBAL);
1451 	return ret;
1452 }
1453 
1454 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1455 {
1456 	int ret = 0;
1457 	unsigned i = TTM_NUM_MEM_TYPES;
1458 	struct ttm_mem_type_manager *man;
1459 	struct ttm_bo_global *glob = bdev->glob;
1460 
1461 	while (i--) {
1462 		man = &bdev->man[i];
1463 		if (man->has_type) {
1464 			man->use_type = false;
1465 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1466 				ret = -EBUSY;
1467 				printf("[TTM] DRM memory manager type %d is not clean\n",
1468 				       i);
1469 			}
1470 			man->has_type = false;
1471 		}
1472 	}
1473 
1474 	sx_xlock(&glob->device_list_mutex);
1475 	list_del(&bdev->device_list);
1476 	sx_xunlock(&glob->device_list_mutex);
1477 
1478 	if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
1479 		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
1480 
1481 	while (ttm_bo_delayed_delete(bdev, true))
1482 		;
1483 
1484 	mtx_lock(&glob->lru_lock);
1485 	if (list_empty(&bdev->ddestroy))
1486 		TTM_DEBUG("Delayed destroy list was clean\n");
1487 
1488 	if (list_empty(&bdev->man[0].lru))
1489 		TTM_DEBUG("Swap list was clean\n");
1490 	mtx_unlock(&glob->lru_lock);
1491 
1492 	MPASS(drm_mm_clean(&bdev->addr_space_mm));
1493 	rw_wlock(&bdev->vm_lock);
1494 	drm_mm_takedown(&bdev->addr_space_mm);
1495 	rw_wunlock(&bdev->vm_lock);
1496 
1497 	return ret;
1498 }
1499 
1500 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1501 		       struct ttm_bo_global *glob,
1502 		       struct ttm_bo_driver *driver,
1503 		       uint64_t file_page_offset,
1504 		       bool need_dma32)
1505 {
1506 	int ret = -EINVAL;
1507 
1508 	rw_init(&bdev->vm_lock, "ttmvml");
1509 	bdev->driver = driver;
1510 
1511 	memset(bdev->man, 0, sizeof(bdev->man));
1512 
1513 	/*
1514 	 * Initialize the system memory buffer type.
1515 	 * Other types need to be driver / IOCTL initialized.
1516 	 */
1517 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1518 	if (unlikely(ret != 0))
1519 		goto out_no_sys;
1520 
1521 	RB_INIT(&bdev->addr_space_rb);
1522 	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1523 	if (unlikely(ret != 0))
1524 		goto out_no_addr_mm;
1525 
1526 	TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
1527 	    ttm_bo_delayed_workqueue, bdev);
1528 	INIT_LIST_HEAD(&bdev->ddestroy);
1529 	bdev->dev_mapping = NULL;
1530 	bdev->glob = glob;
1531 	bdev->need_dma32 = need_dma32;
1532 	bdev->val_seq = 0;
1533 	mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
1534 	sx_xlock(&glob->device_list_mutex);
1535 	list_add_tail(&bdev->device_list, &glob->device_list);
1536 	sx_xunlock(&glob->device_list_mutex);
1537 
1538 	return 0;
1539 out_no_addr_mm:
1540 	ttm_bo_clean_mm(bdev, 0);
1541 out_no_sys:
1542 	return ret;
1543 }
1544 
1545 /*
1546  * buffer object vm functions.
1547  */
1548 
1549 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1550 {
1551 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1552 
1553 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1554 		if (mem->mem_type == TTM_PL_SYSTEM)
1555 			return false;
1556 
1557 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1558 			return false;
1559 
1560 		if (mem->placement & TTM_PL_FLAG_CACHED)
1561 			return false;
1562 	}
1563 	return true;
1564 }
1565 
1566 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1567 {
1568 	struct ttm_bo_device *bdev = bo->bdev;
1569 	/* off_t offset = (off_t)bo->addr_space_offset;XXXKIB */
1570 	/* off_t holelen = ((off_t)bo->mem.num_pages) << PAGE_SHIFT;XXXKIB */
1571 
1572 	if (!bdev->dev_mapping)
1573 		return;
1574 	/* unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); XXXKIB */
1575 	ttm_mem_io_free_vm(bo);
1576 }
1577 
1578 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1579 {
1580 	struct ttm_bo_device *bdev = bo->bdev;
1581 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1582 
1583 	ttm_mem_io_lock(man, false);
1584 	ttm_bo_unmap_virtual_locked(bo);
1585 	ttm_mem_io_unlock(man);
1586 }
1587 
1588 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1589 {
1590 	struct ttm_bo_device *bdev = bo->bdev;
1591 
1592 	/* The caller acquired bdev->vm_lock. */
1593 	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1594 }
1595 
1596 /**
1597  * ttm_bo_setup_vm:
1598  *
1599  * @bo: the buffer to allocate address space for
1600  *
1601  * Allocate address space in the drm device so that applications
1602  * can mmap the buffer and access the contents. This only
1603  * applies to ttm_bo_type_device objects as others are not
1604  * placed in the drm device address space.
1605  */
1606 
1607 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1608 {
1609 	struct ttm_bo_device *bdev = bo->bdev;
1610 	int ret;
1611 
1612 retry_pre_get:
1613 	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1614 	if (unlikely(ret != 0))
1615 		return ret;
1616 
1617 	rw_wlock(&bdev->vm_lock);
1618 	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1619 					 bo->mem.num_pages, 0, 0);
1620 
1621 	if (unlikely(bo->vm_node == NULL)) {
1622 		ret = -ENOMEM;
1623 		goto out_unlock;
1624 	}
1625 
1626 	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1627 					      bo->mem.num_pages, 0);
1628 
1629 	if (unlikely(bo->vm_node == NULL)) {
1630 		rw_wunlock(&bdev->vm_lock);
1631 		goto retry_pre_get;
1632 	}
1633 
1634 	ttm_bo_vm_insert_rb(bo);
1635 	rw_wunlock(&bdev->vm_lock);
1636 	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1637 
1638 	return 0;
1639 out_unlock:
1640 	rw_wunlock(&bdev->vm_lock);
1641 	return ret;
1642 }
1643 
1644 int ttm_bo_wait(struct ttm_buffer_object *bo,
1645 		bool lazy, bool interruptible, bool no_wait)
1646 {
1647 	struct ttm_bo_driver *driver = bo->bdev->driver;
1648 	struct ttm_bo_device *bdev = bo->bdev;
1649 	void *sync_obj;
1650 	int ret = 0;
1651 
1652 	if (likely(bo->sync_obj == NULL))
1653 		return 0;
1654 
1655 	while (bo->sync_obj) {
1656 
1657 		if (driver->sync_obj_signaled(bo->sync_obj)) {
1658 			void *tmp_obj = bo->sync_obj;
1659 			bo->sync_obj = NULL;
1660 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1661 			mtx_unlock(&bdev->fence_lock);
1662 			driver->sync_obj_unref(&tmp_obj);
1663 			mtx_lock(&bdev->fence_lock);
1664 			continue;
1665 		}
1666 
1667 		if (no_wait)
1668 			return -EBUSY;
1669 
1670 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1671 		mtx_unlock(&bdev->fence_lock);
1672 		ret = driver->sync_obj_wait(sync_obj,
1673 					    lazy, interruptible);
1674 		if (unlikely(ret != 0)) {
1675 			driver->sync_obj_unref(&sync_obj);
1676 			mtx_lock(&bdev->fence_lock);
1677 			return ret;
1678 		}
1679 		mtx_lock(&bdev->fence_lock);
1680 		if (likely(bo->sync_obj == sync_obj)) {
1681 			void *tmp_obj = bo->sync_obj;
1682 			bo->sync_obj = NULL;
1683 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1684 				  &bo->priv_flags);
1685 			mtx_unlock(&bdev->fence_lock);
1686 			driver->sync_obj_unref(&sync_obj);
1687 			driver->sync_obj_unref(&tmp_obj);
1688 			mtx_lock(&bdev->fence_lock);
1689 		} else {
1690 			mtx_unlock(&bdev->fence_lock);
1691 			driver->sync_obj_unref(&sync_obj);
1692 			mtx_lock(&bdev->fence_lock);
1693 		}
1694 	}
1695 	return 0;
1696 }
1697 
1698 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1699 {
1700 	struct ttm_bo_device *bdev = bo->bdev;
1701 	int ret = 0;
1702 
1703 	/*
1704 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1705 	 */
1706 
1707 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1708 	if (unlikely(ret != 0))
1709 		return ret;
1710 	mtx_lock(&bdev->fence_lock);
1711 	ret = ttm_bo_wait(bo, false, true, no_wait);
1712 	mtx_unlock(&bdev->fence_lock);
1713 	if (likely(ret == 0))
1714 		atomic_inc(&bo->cpu_writers);
1715 	ttm_bo_unreserve(bo);
1716 	return ret;
1717 }
1718 
1719 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1720 {
1721 	atomic_dec(&bo->cpu_writers);
1722 }
1723 
1724 /**
1725  * A buffer object shrink method that tries to swap out the first
1726  * buffer object on the bo_global::swap_lru list.
1727  */
1728 
1729 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1730 {
1731 	struct ttm_bo_global *glob =
1732 	    container_of(shrink, struct ttm_bo_global, shrink);
1733 	struct ttm_buffer_object *bo;
1734 	int ret = -EBUSY;
1735 	int put_count;
1736 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1737 
1738 	mtx_lock(&glob->lru_lock);
1739 	list_for_each_entry(bo, &glob->swap_lru, swap) {
1740 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1741 		if (!ret)
1742 			break;
1743 	}
1744 
1745 	if (ret) {
1746 		mtx_unlock(&glob->lru_lock);
1747 		return ret;
1748 	}
1749 
1750 	refcount_acquire(&bo->list_kref);
1751 
1752 	if (!list_empty(&bo->ddestroy)) {
1753 		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1754 		if (refcount_release(&bo->list_kref))
1755 			ttm_bo_release_list(bo);
1756 		return ret;
1757 	}
1758 
1759 	put_count = ttm_bo_del_from_lru(bo);
1760 	mtx_unlock(&glob->lru_lock);
1761 
1762 	ttm_bo_list_ref_sub(bo, put_count, true);
1763 
1764 	/**
1765 	 * Wait for GPU, then move to system cached.
1766 	 */
1767 
1768 	mtx_lock(&bo->bdev->fence_lock);
1769 	ret = ttm_bo_wait(bo, false, false, false);
1770 	mtx_unlock(&bo->bdev->fence_lock);
1771 
1772 	if (unlikely(ret != 0))
1773 		goto out;
1774 
1775 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1776 		struct ttm_mem_reg evict_mem;
1777 
1778 		evict_mem = bo->mem;
1779 		evict_mem.mm_node = NULL;
1780 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1781 		evict_mem.mem_type = TTM_PL_SYSTEM;
1782 
1783 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1784 					     false, false);
1785 		if (unlikely(ret != 0))
1786 			goto out;
1787 	}
1788 
1789 	ttm_bo_unmap_virtual(bo);
1790 
1791 	/**
1792 	 * Swap out. Buffer will be swapped in again as soon as
1793 	 * anyone tries to access a ttm page.
1794 	 */
1795 
1796 	if (bo->bdev->driver->swap_notify)
1797 		bo->bdev->driver->swap_notify(bo);
1798 
1799 	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1800 out:
1801 
1802 	/**
1803 	 *
1804 	 * Unreserve without putting on LRU to avoid swapping out an
1805 	 * already swapped buffer.
1806 	 */
1807 
1808 	atomic_set(&bo->reserved, 0);
1809 	wakeup(bo);
1810 	if (refcount_release(&bo->list_kref))
1811 		ttm_bo_release_list(bo);
1812 	return ret;
1813 }
1814 
1815 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1816 {
1817 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1818 		;
1819 }
1820