xref: /linux/drivers/gpu/drm/ttm/ttm_bo.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <linux/atomic.h>
41 
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
45 
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49 
50 static struct attribute ttm_bo_count = {
51 	.name = "bo_count",
52 	.mode = S_IRUGO
53 };
54 
55 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
56 {
57 	int i;
58 
59 	for (i = 0; i <= TTM_PL_PRIV5; i++)
60 		if (flags & (1 << i)) {
61 			*mem_type = i;
62 			return 0;
63 		}
64 	return -EINVAL;
65 }
66 
67 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
68 {
69 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
70 
71 	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
72 	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
73 	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
74 	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
75 	printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
76 	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
77 		man->available_caching);
78 	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
79 		man->default_caching);
80 	if (mem_type != TTM_PL_SYSTEM)
81 		(*man->func->debug)(man, TTM_PFX);
82 }
83 
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85 					struct ttm_placement *placement)
86 {
87 	int i, ret, mem_type;
88 
89 	printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
90 		bo, bo->mem.num_pages, bo->mem.size >> 10,
91 		bo->mem.size >> 20);
92 	for (i = 0; i < placement->num_placement; i++) {
93 		ret = ttm_mem_type_from_flags(placement->placement[i],
94 						&mem_type);
95 		if (ret)
96 			return;
97 		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
98 			i, placement->placement[i], mem_type);
99 		ttm_mem_type_debug(bo->bdev, mem_type);
100 	}
101 }
102 
103 static ssize_t ttm_bo_global_show(struct kobject *kobj,
104 				  struct attribute *attr,
105 				  char *buffer)
106 {
107 	struct ttm_bo_global *glob =
108 		container_of(kobj, struct ttm_bo_global, kobj);
109 
110 	return snprintf(buffer, PAGE_SIZE, "%lu\n",
111 			(unsigned long) atomic_read(&glob->bo_count));
112 }
113 
114 static struct attribute *ttm_bo_global_attrs[] = {
115 	&ttm_bo_count,
116 	NULL
117 };
118 
119 static const struct sysfs_ops ttm_bo_global_ops = {
120 	.show = &ttm_bo_global_show
121 };
122 
123 static struct kobj_type ttm_bo_glob_kobj_type  = {
124 	.release = &ttm_bo_global_kobj_release,
125 	.sysfs_ops = &ttm_bo_global_ops,
126 	.default_attrs = ttm_bo_global_attrs
127 };
128 
129 
130 static inline uint32_t ttm_bo_type_flags(unsigned type)
131 {
132 	return 1 << (type);
133 }
134 
135 static void ttm_bo_release_list(struct kref *list_kref)
136 {
137 	struct ttm_buffer_object *bo =
138 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
139 	struct ttm_bo_device *bdev = bo->bdev;
140 
141 	BUG_ON(atomic_read(&bo->list_kref.refcount));
142 	BUG_ON(atomic_read(&bo->kref.refcount));
143 	BUG_ON(atomic_read(&bo->cpu_writers));
144 	BUG_ON(bo->sync_obj != NULL);
145 	BUG_ON(bo->mem.mm_node != NULL);
146 	BUG_ON(!list_empty(&bo->lru));
147 	BUG_ON(!list_empty(&bo->ddestroy));
148 
149 	if (bo->ttm)
150 		ttm_tt_destroy(bo->ttm);
151 	atomic_dec(&bo->glob->bo_count);
152 	if (bo->destroy)
153 		bo->destroy(bo);
154 	else {
155 		ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156 		kfree(bo);
157 	}
158 }
159 
160 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
161 {
162 	if (interruptible) {
163 		return wait_event_interruptible(bo->event_queue,
164 					       atomic_read(&bo->reserved) == 0);
165 	} else {
166 		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
167 		return 0;
168 	}
169 }
170 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171 
172 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173 {
174 	struct ttm_bo_device *bdev = bo->bdev;
175 	struct ttm_mem_type_manager *man;
176 
177 	BUG_ON(!atomic_read(&bo->reserved));
178 
179 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
180 
181 		BUG_ON(!list_empty(&bo->lru));
182 
183 		man = &bdev->man[bo->mem.mem_type];
184 		list_add_tail(&bo->lru, &man->lru);
185 		kref_get(&bo->list_kref);
186 
187 		if (bo->ttm != NULL) {
188 			list_add_tail(&bo->swap, &bo->glob->swap_lru);
189 			kref_get(&bo->list_kref);
190 		}
191 	}
192 }
193 
194 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 {
196 	int put_count = 0;
197 
198 	if (!list_empty(&bo->swap)) {
199 		list_del_init(&bo->swap);
200 		++put_count;
201 	}
202 	if (!list_empty(&bo->lru)) {
203 		list_del_init(&bo->lru);
204 		++put_count;
205 	}
206 
207 	/*
208 	 * TODO: Add a driver hook to delete from
209 	 * driver-specific LRU's here.
210 	 */
211 
212 	return put_count;
213 }
214 
215 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
216 			  bool interruptible,
217 			  bool no_wait, bool use_sequence, uint32_t sequence)
218 {
219 	struct ttm_bo_global *glob = bo->glob;
220 	int ret;
221 
222 	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
223 		/**
224 		 * Deadlock avoidance for multi-bo reserving.
225 		 */
226 		if (use_sequence && bo->seq_valid) {
227 			/**
228 			 * We've already reserved this one.
229 			 */
230 			if (unlikely(sequence == bo->val_seq))
231 				return -EDEADLK;
232 			/**
233 			 * Already reserved by a thread that will not back
234 			 * off for us. We need to back off.
235 			 */
236 			if (unlikely(sequence - bo->val_seq < (1 << 31)))
237 				return -EAGAIN;
238 		}
239 
240 		if (no_wait)
241 			return -EBUSY;
242 
243 		spin_unlock(&glob->lru_lock);
244 		ret = ttm_bo_wait_unreserved(bo, interruptible);
245 		spin_lock(&glob->lru_lock);
246 
247 		if (unlikely(ret))
248 			return ret;
249 	}
250 
251 	if (use_sequence) {
252 		/**
253 		 * Wake up waiters that may need to recheck for deadlock,
254 		 * if we decreased the sequence number.
255 		 */
256 		if (unlikely((bo->val_seq - sequence < (1 << 31))
257 			     || !bo->seq_valid))
258 			wake_up_all(&bo->event_queue);
259 
260 		bo->val_seq = sequence;
261 		bo->seq_valid = true;
262 	} else {
263 		bo->seq_valid = false;
264 	}
265 
266 	return 0;
267 }
268 EXPORT_SYMBOL(ttm_bo_reserve);
269 
270 static void ttm_bo_ref_bug(struct kref *list_kref)
271 {
272 	BUG();
273 }
274 
275 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
276 			 bool never_free)
277 {
278 	kref_sub(&bo->list_kref, count,
279 		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
280 }
281 
282 int ttm_bo_reserve(struct ttm_buffer_object *bo,
283 		   bool interruptible,
284 		   bool no_wait, bool use_sequence, uint32_t sequence)
285 {
286 	struct ttm_bo_global *glob = bo->glob;
287 	int put_count = 0;
288 	int ret;
289 
290 	spin_lock(&glob->lru_lock);
291 	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
292 				    sequence);
293 	if (likely(ret == 0))
294 		put_count = ttm_bo_del_from_lru(bo);
295 	spin_unlock(&glob->lru_lock);
296 
297 	ttm_bo_list_ref_sub(bo, put_count, true);
298 
299 	return ret;
300 }
301 
302 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
303 {
304 	ttm_bo_add_to_lru(bo);
305 	atomic_set(&bo->reserved, 0);
306 	wake_up_all(&bo->event_queue);
307 }
308 
309 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
310 {
311 	struct ttm_bo_global *glob = bo->glob;
312 
313 	spin_lock(&glob->lru_lock);
314 	ttm_bo_unreserve_locked(bo);
315 	spin_unlock(&glob->lru_lock);
316 }
317 EXPORT_SYMBOL(ttm_bo_unreserve);
318 
319 /*
320  * Call bo->mutex locked.
321  */
322 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
323 {
324 	struct ttm_bo_device *bdev = bo->bdev;
325 	struct ttm_bo_global *glob = bo->glob;
326 	int ret = 0;
327 	uint32_t page_flags = 0;
328 
329 	TTM_ASSERT_LOCKED(&bo->mutex);
330 	bo->ttm = NULL;
331 
332 	if (bdev->need_dma32)
333 		page_flags |= TTM_PAGE_FLAG_DMA32;
334 
335 	switch (bo->type) {
336 	case ttm_bo_type_device:
337 		if (zero_alloc)
338 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
339 	case ttm_bo_type_kernel:
340 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
341 					page_flags, glob->dummy_read_page);
342 		if (unlikely(bo->ttm == NULL))
343 			ret = -ENOMEM;
344 		break;
345 	case ttm_bo_type_user:
346 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
347 					page_flags | TTM_PAGE_FLAG_USER,
348 					glob->dummy_read_page);
349 		if (unlikely(bo->ttm == NULL)) {
350 			ret = -ENOMEM;
351 			break;
352 		}
353 
354 		ret = ttm_tt_set_user(bo->ttm, current,
355 				      bo->buffer_start, bo->num_pages);
356 		if (unlikely(ret != 0)) {
357 			ttm_tt_destroy(bo->ttm);
358 			bo->ttm = NULL;
359 		}
360 		break;
361 	default:
362 		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
363 		ret = -EINVAL;
364 		break;
365 	}
366 
367 	return ret;
368 }
369 
370 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
371 				  struct ttm_mem_reg *mem,
372 				  bool evict, bool interruptible,
373 				  bool no_wait_reserve, bool no_wait_gpu)
374 {
375 	struct ttm_bo_device *bdev = bo->bdev;
376 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
377 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
378 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
379 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
380 	int ret = 0;
381 
382 	if (old_is_pci || new_is_pci ||
383 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
384 		ret = ttm_mem_io_lock(old_man, true);
385 		if (unlikely(ret != 0))
386 			goto out_err;
387 		ttm_bo_unmap_virtual_locked(bo);
388 		ttm_mem_io_unlock(old_man);
389 	}
390 
391 	/*
392 	 * Create and bind a ttm if required.
393 	 */
394 
395 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
396 		if (bo->ttm == NULL) {
397 			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
398 			ret = ttm_bo_add_ttm(bo, zero);
399 			if (ret)
400 				goto out_err;
401 		}
402 
403 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
404 		if (ret)
405 			goto out_err;
406 
407 		if (mem->mem_type != TTM_PL_SYSTEM) {
408 			ret = ttm_tt_bind(bo->ttm, mem);
409 			if (ret)
410 				goto out_err;
411 		}
412 
413 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
414 			if (bdev->driver->move_notify)
415 				bdev->driver->move_notify(bo, mem);
416 			bo->mem = *mem;
417 			mem->mm_node = NULL;
418 			goto moved;
419 		}
420 	}
421 
422 	if (bdev->driver->move_notify)
423 		bdev->driver->move_notify(bo, mem);
424 
425 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
426 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
427 		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
428 	else if (bdev->driver->move)
429 		ret = bdev->driver->move(bo, evict, interruptible,
430 					 no_wait_reserve, no_wait_gpu, mem);
431 	else
432 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
433 
434 	if (ret)
435 		goto out_err;
436 
437 moved:
438 	if (bo->evicted) {
439 		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
440 		if (ret)
441 			printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
442 		bo->evicted = false;
443 	}
444 
445 	if (bo->mem.mm_node) {
446 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
447 		    bdev->man[bo->mem.mem_type].gpu_offset;
448 		bo->cur_placement = bo->mem.placement;
449 	} else
450 		bo->offset = 0;
451 
452 	return 0;
453 
454 out_err:
455 	new_man = &bdev->man[bo->mem.mem_type];
456 	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
457 		ttm_tt_unbind(bo->ttm);
458 		ttm_tt_destroy(bo->ttm);
459 		bo->ttm = NULL;
460 	}
461 
462 	return ret;
463 }
464 
465 /**
466  * Call bo::reserved.
467  * Will release GPU memory type usage on destruction.
468  * This is the place to put in driver specific hooks to release
469  * driver private resources.
470  * Will release the bo::reserved lock.
471  */
472 
473 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
474 {
475 	if (bo->ttm) {
476 		ttm_tt_unbind(bo->ttm);
477 		ttm_tt_destroy(bo->ttm);
478 		bo->ttm = NULL;
479 	}
480 	ttm_bo_mem_put(bo, &bo->mem);
481 
482 	atomic_set(&bo->reserved, 0);
483 
484 	/*
485 	 * Make processes trying to reserve really pick it up.
486 	 */
487 	smp_mb__after_atomic_dec();
488 	wake_up_all(&bo->event_queue);
489 }
490 
491 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
492 {
493 	struct ttm_bo_device *bdev = bo->bdev;
494 	struct ttm_bo_global *glob = bo->glob;
495 	struct ttm_bo_driver *driver;
496 	void *sync_obj = NULL;
497 	void *sync_obj_arg;
498 	int put_count;
499 	int ret;
500 
501 	spin_lock(&bdev->fence_lock);
502 	(void) ttm_bo_wait(bo, false, false, true);
503 	if (!bo->sync_obj) {
504 
505 		spin_lock(&glob->lru_lock);
506 
507 		/**
508 		 * Lock inversion between bo:reserve and bdev::fence_lock here,
509 		 * but that's OK, since we're only trylocking.
510 		 */
511 
512 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
513 
514 		if (unlikely(ret == -EBUSY))
515 			goto queue;
516 
517 		spin_unlock(&bdev->fence_lock);
518 		put_count = ttm_bo_del_from_lru(bo);
519 
520 		spin_unlock(&glob->lru_lock);
521 		ttm_bo_cleanup_memtype_use(bo);
522 
523 		ttm_bo_list_ref_sub(bo, put_count, true);
524 
525 		return;
526 	} else {
527 		spin_lock(&glob->lru_lock);
528 	}
529 queue:
530 	driver = bdev->driver;
531 	if (bo->sync_obj)
532 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
533 	sync_obj_arg = bo->sync_obj_arg;
534 
535 	kref_get(&bo->list_kref);
536 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
537 	spin_unlock(&glob->lru_lock);
538 	spin_unlock(&bdev->fence_lock);
539 
540 	if (sync_obj) {
541 		driver->sync_obj_flush(sync_obj, sync_obj_arg);
542 		driver->sync_obj_unref(&sync_obj);
543 	}
544 	schedule_delayed_work(&bdev->wq,
545 			      ((HZ / 100) < 1) ? 1 : HZ / 100);
546 }
547 
548 /**
549  * function ttm_bo_cleanup_refs
550  * If bo idle, remove from delayed- and lru lists, and unref.
551  * If not idle, do nothing.
552  *
553  * @interruptible         Any sleeps should occur interruptibly.
554  * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
555  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
556  */
557 
558 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
559 			       bool interruptible,
560 			       bool no_wait_reserve,
561 			       bool no_wait_gpu)
562 {
563 	struct ttm_bo_device *bdev = bo->bdev;
564 	struct ttm_bo_global *glob = bo->glob;
565 	int put_count;
566 	int ret = 0;
567 
568 retry:
569 	spin_lock(&bdev->fence_lock);
570 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
571 	spin_unlock(&bdev->fence_lock);
572 
573 	if (unlikely(ret != 0))
574 		return ret;
575 
576 	spin_lock(&glob->lru_lock);
577 
578 	if (unlikely(list_empty(&bo->ddestroy))) {
579 		spin_unlock(&glob->lru_lock);
580 		return 0;
581 	}
582 
583 	ret = ttm_bo_reserve_locked(bo, interruptible,
584 				    no_wait_reserve, false, 0);
585 
586 	if (unlikely(ret != 0)) {
587 		spin_unlock(&glob->lru_lock);
588 		return ret;
589 	}
590 
591 	/**
592 	 * We can re-check for sync object without taking
593 	 * the bo::lock since setting the sync object requires
594 	 * also bo::reserved. A busy object at this point may
595 	 * be caused by another thread recently starting an accelerated
596 	 * eviction.
597 	 */
598 
599 	if (unlikely(bo->sync_obj)) {
600 		atomic_set(&bo->reserved, 0);
601 		wake_up_all(&bo->event_queue);
602 		spin_unlock(&glob->lru_lock);
603 		goto retry;
604 	}
605 
606 	put_count = ttm_bo_del_from_lru(bo);
607 	list_del_init(&bo->ddestroy);
608 	++put_count;
609 
610 	spin_unlock(&glob->lru_lock);
611 	ttm_bo_cleanup_memtype_use(bo);
612 
613 	ttm_bo_list_ref_sub(bo, put_count, true);
614 
615 	return 0;
616 }
617 
618 /**
619  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
620  * encountered buffers.
621  */
622 
623 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
624 {
625 	struct ttm_bo_global *glob = bdev->glob;
626 	struct ttm_buffer_object *entry = NULL;
627 	int ret = 0;
628 
629 	spin_lock(&glob->lru_lock);
630 	if (list_empty(&bdev->ddestroy))
631 		goto out_unlock;
632 
633 	entry = list_first_entry(&bdev->ddestroy,
634 		struct ttm_buffer_object, ddestroy);
635 	kref_get(&entry->list_kref);
636 
637 	for (;;) {
638 		struct ttm_buffer_object *nentry = NULL;
639 
640 		if (entry->ddestroy.next != &bdev->ddestroy) {
641 			nentry = list_first_entry(&entry->ddestroy,
642 				struct ttm_buffer_object, ddestroy);
643 			kref_get(&nentry->list_kref);
644 		}
645 
646 		spin_unlock(&glob->lru_lock);
647 		ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
648 					  !remove_all);
649 		kref_put(&entry->list_kref, ttm_bo_release_list);
650 		entry = nentry;
651 
652 		if (ret || !entry)
653 			goto out;
654 
655 		spin_lock(&glob->lru_lock);
656 		if (list_empty(&entry->ddestroy))
657 			break;
658 	}
659 
660 out_unlock:
661 	spin_unlock(&glob->lru_lock);
662 out:
663 	if (entry)
664 		kref_put(&entry->list_kref, ttm_bo_release_list);
665 	return ret;
666 }
667 
668 static void ttm_bo_delayed_workqueue(struct work_struct *work)
669 {
670 	struct ttm_bo_device *bdev =
671 	    container_of(work, struct ttm_bo_device, wq.work);
672 
673 	if (ttm_bo_delayed_delete(bdev, false)) {
674 		schedule_delayed_work(&bdev->wq,
675 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
676 	}
677 }
678 
679 static void ttm_bo_release(struct kref *kref)
680 {
681 	struct ttm_buffer_object *bo =
682 	    container_of(kref, struct ttm_buffer_object, kref);
683 	struct ttm_bo_device *bdev = bo->bdev;
684 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
685 
686 	if (likely(bo->vm_node != NULL)) {
687 		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
688 		drm_mm_put_block(bo->vm_node);
689 		bo->vm_node = NULL;
690 	}
691 	write_unlock(&bdev->vm_lock);
692 	ttm_mem_io_lock(man, false);
693 	ttm_mem_io_free_vm(bo);
694 	ttm_mem_io_unlock(man);
695 	ttm_bo_cleanup_refs_or_queue(bo);
696 	kref_put(&bo->list_kref, ttm_bo_release_list);
697 	write_lock(&bdev->vm_lock);
698 }
699 
700 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
701 {
702 	struct ttm_buffer_object *bo = *p_bo;
703 	struct ttm_bo_device *bdev = bo->bdev;
704 
705 	*p_bo = NULL;
706 	write_lock(&bdev->vm_lock);
707 	kref_put(&bo->kref, ttm_bo_release);
708 	write_unlock(&bdev->vm_lock);
709 }
710 EXPORT_SYMBOL(ttm_bo_unref);
711 
712 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
713 {
714 	return cancel_delayed_work_sync(&bdev->wq);
715 }
716 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
717 
718 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
719 {
720 	if (resched)
721 		schedule_delayed_work(&bdev->wq,
722 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
723 }
724 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
725 
726 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
727 			bool no_wait_reserve, bool no_wait_gpu)
728 {
729 	struct ttm_bo_device *bdev = bo->bdev;
730 	struct ttm_mem_reg evict_mem;
731 	struct ttm_placement placement;
732 	int ret = 0;
733 
734 	spin_lock(&bdev->fence_lock);
735 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
736 	spin_unlock(&bdev->fence_lock);
737 
738 	if (unlikely(ret != 0)) {
739 		if (ret != -ERESTARTSYS) {
740 			printk(KERN_ERR TTM_PFX
741 			       "Failed to expire sync object before "
742 			       "buffer eviction.\n");
743 		}
744 		goto out;
745 	}
746 
747 	BUG_ON(!atomic_read(&bo->reserved));
748 
749 	evict_mem = bo->mem;
750 	evict_mem.mm_node = NULL;
751 	evict_mem.bus.io_reserved_vm = false;
752 	evict_mem.bus.io_reserved_count = 0;
753 
754 	placement.fpfn = 0;
755 	placement.lpfn = 0;
756 	placement.num_placement = 0;
757 	placement.num_busy_placement = 0;
758 	bdev->driver->evict_flags(bo, &placement);
759 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
760 				no_wait_reserve, no_wait_gpu);
761 	if (ret) {
762 		if (ret != -ERESTARTSYS) {
763 			printk(KERN_ERR TTM_PFX
764 			       "Failed to find memory space for "
765 			       "buffer 0x%p eviction.\n", bo);
766 			ttm_bo_mem_space_debug(bo, &placement);
767 		}
768 		goto out;
769 	}
770 
771 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
772 				     no_wait_reserve, no_wait_gpu);
773 	if (ret) {
774 		if (ret != -ERESTARTSYS)
775 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
776 		ttm_bo_mem_put(bo, &evict_mem);
777 		goto out;
778 	}
779 	bo->evicted = true;
780 out:
781 	return ret;
782 }
783 
784 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
785 				uint32_t mem_type,
786 				bool interruptible, bool no_wait_reserve,
787 				bool no_wait_gpu)
788 {
789 	struct ttm_bo_global *glob = bdev->glob;
790 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
791 	struct ttm_buffer_object *bo;
792 	int ret, put_count = 0;
793 
794 retry:
795 	spin_lock(&glob->lru_lock);
796 	if (list_empty(&man->lru)) {
797 		spin_unlock(&glob->lru_lock);
798 		return -EBUSY;
799 	}
800 
801 	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
802 	kref_get(&bo->list_kref);
803 
804 	if (!list_empty(&bo->ddestroy)) {
805 		spin_unlock(&glob->lru_lock);
806 		ret = ttm_bo_cleanup_refs(bo, interruptible,
807 					  no_wait_reserve, no_wait_gpu);
808 		kref_put(&bo->list_kref, ttm_bo_release_list);
809 
810 		if (likely(ret == 0 || ret == -ERESTARTSYS))
811 			return ret;
812 
813 		goto retry;
814 	}
815 
816 	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
817 
818 	if (unlikely(ret == -EBUSY)) {
819 		spin_unlock(&glob->lru_lock);
820 		if (likely(!no_wait_gpu))
821 			ret = ttm_bo_wait_unreserved(bo, interruptible);
822 
823 		kref_put(&bo->list_kref, ttm_bo_release_list);
824 
825 		/**
826 		 * We *need* to retry after releasing the lru lock.
827 		 */
828 
829 		if (unlikely(ret != 0))
830 			return ret;
831 		goto retry;
832 	}
833 
834 	put_count = ttm_bo_del_from_lru(bo);
835 	spin_unlock(&glob->lru_lock);
836 
837 	BUG_ON(ret != 0);
838 
839 	ttm_bo_list_ref_sub(bo, put_count, true);
840 
841 	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
842 	ttm_bo_unreserve(bo);
843 
844 	kref_put(&bo->list_kref, ttm_bo_release_list);
845 	return ret;
846 }
847 
848 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
849 {
850 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
851 
852 	if (mem->mm_node)
853 		(*man->func->put_node)(man, mem);
854 }
855 EXPORT_SYMBOL(ttm_bo_mem_put);
856 
857 /**
858  * Repeatedly evict memory from the LRU for @mem_type until we create enough
859  * space, or we've evicted everything and there isn't enough space.
860  */
861 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
862 					uint32_t mem_type,
863 					struct ttm_placement *placement,
864 					struct ttm_mem_reg *mem,
865 					bool interruptible,
866 					bool no_wait_reserve,
867 					bool no_wait_gpu)
868 {
869 	struct ttm_bo_device *bdev = bo->bdev;
870 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
871 	int ret;
872 
873 	do {
874 		ret = (*man->func->get_node)(man, bo, placement, mem);
875 		if (unlikely(ret != 0))
876 			return ret;
877 		if (mem->mm_node)
878 			break;
879 		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
880 						no_wait_reserve, no_wait_gpu);
881 		if (unlikely(ret != 0))
882 			return ret;
883 	} while (1);
884 	if (mem->mm_node == NULL)
885 		return -ENOMEM;
886 	mem->mem_type = mem_type;
887 	return 0;
888 }
889 
890 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
891 				      uint32_t cur_placement,
892 				      uint32_t proposed_placement)
893 {
894 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
895 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
896 
897 	/**
898 	 * Keep current caching if possible.
899 	 */
900 
901 	if ((cur_placement & caching) != 0)
902 		result |= (cur_placement & caching);
903 	else if ((man->default_caching & caching) != 0)
904 		result |= man->default_caching;
905 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
906 		result |= TTM_PL_FLAG_CACHED;
907 	else if ((TTM_PL_FLAG_WC & caching) != 0)
908 		result |= TTM_PL_FLAG_WC;
909 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
910 		result |= TTM_PL_FLAG_UNCACHED;
911 
912 	return result;
913 }
914 
915 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
916 				 bool disallow_fixed,
917 				 uint32_t mem_type,
918 				 uint32_t proposed_placement,
919 				 uint32_t *masked_placement)
920 {
921 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
922 
923 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
924 		return false;
925 
926 	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
927 		return false;
928 
929 	if ((proposed_placement & man->available_caching) == 0)
930 		return false;
931 
932 	cur_flags |= (proposed_placement & man->available_caching);
933 
934 	*masked_placement = cur_flags;
935 	return true;
936 }
937 
938 /**
939  * Creates space for memory region @mem according to its type.
940  *
941  * This function first searches for free space in compatible memory types in
942  * the priority order defined by the driver.  If free space isn't found, then
943  * ttm_bo_mem_force_space is attempted in priority order to evict and find
944  * space.
945  */
946 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
947 			struct ttm_placement *placement,
948 			struct ttm_mem_reg *mem,
949 			bool interruptible, bool no_wait_reserve,
950 			bool no_wait_gpu)
951 {
952 	struct ttm_bo_device *bdev = bo->bdev;
953 	struct ttm_mem_type_manager *man;
954 	uint32_t mem_type = TTM_PL_SYSTEM;
955 	uint32_t cur_flags = 0;
956 	bool type_found = false;
957 	bool type_ok = false;
958 	bool has_erestartsys = false;
959 	int i, ret;
960 
961 	mem->mm_node = NULL;
962 	for (i = 0; i < placement->num_placement; ++i) {
963 		ret = ttm_mem_type_from_flags(placement->placement[i],
964 						&mem_type);
965 		if (ret)
966 			return ret;
967 		man = &bdev->man[mem_type];
968 
969 		type_ok = ttm_bo_mt_compatible(man,
970 						bo->type == ttm_bo_type_user,
971 						mem_type,
972 						placement->placement[i],
973 						&cur_flags);
974 
975 		if (!type_ok)
976 			continue;
977 
978 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
979 						  cur_flags);
980 		/*
981 		 * Use the access and other non-mapping-related flag bits from
982 		 * the memory placement flags to the current flags
983 		 */
984 		ttm_flag_masked(&cur_flags, placement->placement[i],
985 				~TTM_PL_MASK_MEMTYPE);
986 
987 		if (mem_type == TTM_PL_SYSTEM)
988 			break;
989 
990 		if (man->has_type && man->use_type) {
991 			type_found = true;
992 			ret = (*man->func->get_node)(man, bo, placement, mem);
993 			if (unlikely(ret))
994 				return ret;
995 		}
996 		if (mem->mm_node)
997 			break;
998 	}
999 
1000 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1001 		mem->mem_type = mem_type;
1002 		mem->placement = cur_flags;
1003 		return 0;
1004 	}
1005 
1006 	if (!type_found)
1007 		return -EINVAL;
1008 
1009 	for (i = 0; i < placement->num_busy_placement; ++i) {
1010 		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1011 						&mem_type);
1012 		if (ret)
1013 			return ret;
1014 		man = &bdev->man[mem_type];
1015 		if (!man->has_type)
1016 			continue;
1017 		if (!ttm_bo_mt_compatible(man,
1018 						bo->type == ttm_bo_type_user,
1019 						mem_type,
1020 						placement->busy_placement[i],
1021 						&cur_flags))
1022 			continue;
1023 
1024 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1025 						  cur_flags);
1026 		/*
1027 		 * Use the access and other non-mapping-related flag bits from
1028 		 * the memory placement flags to the current flags
1029 		 */
1030 		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1031 				~TTM_PL_MASK_MEMTYPE);
1032 
1033 
1034 		if (mem_type == TTM_PL_SYSTEM) {
1035 			mem->mem_type = mem_type;
1036 			mem->placement = cur_flags;
1037 			mem->mm_node = NULL;
1038 			return 0;
1039 		}
1040 
1041 		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1042 						interruptible, no_wait_reserve, no_wait_gpu);
1043 		if (ret == 0 && mem->mm_node) {
1044 			mem->placement = cur_flags;
1045 			return 0;
1046 		}
1047 		if (ret == -ERESTARTSYS)
1048 			has_erestartsys = true;
1049 	}
1050 	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL(ttm_bo_mem_space);
1054 
1055 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1056 {
1057 	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1058 		return -EBUSY;
1059 
1060 	return wait_event_interruptible(bo->event_queue,
1061 					atomic_read(&bo->cpu_writers) == 0);
1062 }
1063 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1064 
1065 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1066 			struct ttm_placement *placement,
1067 			bool interruptible, bool no_wait_reserve,
1068 			bool no_wait_gpu)
1069 {
1070 	int ret = 0;
1071 	struct ttm_mem_reg mem;
1072 	struct ttm_bo_device *bdev = bo->bdev;
1073 
1074 	BUG_ON(!atomic_read(&bo->reserved));
1075 
1076 	/*
1077 	 * FIXME: It's possible to pipeline buffer moves.
1078 	 * Have the driver move function wait for idle when necessary,
1079 	 * instead of doing it here.
1080 	 */
1081 	spin_lock(&bdev->fence_lock);
1082 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1083 	spin_unlock(&bdev->fence_lock);
1084 	if (ret)
1085 		return ret;
1086 	mem.num_pages = bo->num_pages;
1087 	mem.size = mem.num_pages << PAGE_SHIFT;
1088 	mem.page_alignment = bo->mem.page_alignment;
1089 	mem.bus.io_reserved_vm = false;
1090 	mem.bus.io_reserved_count = 0;
1091 	/*
1092 	 * Determine where to move the buffer.
1093 	 */
1094 	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1095 	if (ret)
1096 		goto out_unlock;
1097 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1098 out_unlock:
1099 	if (ret && mem.mm_node)
1100 		ttm_bo_mem_put(bo, &mem);
1101 	return ret;
1102 }
1103 
1104 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1105 			     struct ttm_mem_reg *mem)
1106 {
1107 	int i;
1108 
1109 	if (mem->mm_node && placement->lpfn != 0 &&
1110 	    (mem->start < placement->fpfn ||
1111 	     mem->start + mem->num_pages > placement->lpfn))
1112 		return -1;
1113 
1114 	for (i = 0; i < placement->num_placement; i++) {
1115 		if ((placement->placement[i] & mem->placement &
1116 			TTM_PL_MASK_CACHING) &&
1117 			(placement->placement[i] & mem->placement &
1118 			TTM_PL_MASK_MEM))
1119 			return i;
1120 	}
1121 	return -1;
1122 }
1123 
1124 int ttm_bo_validate(struct ttm_buffer_object *bo,
1125 			struct ttm_placement *placement,
1126 			bool interruptible, bool no_wait_reserve,
1127 			bool no_wait_gpu)
1128 {
1129 	int ret;
1130 
1131 	BUG_ON(!atomic_read(&bo->reserved));
1132 	/* Check that range is valid */
1133 	if (placement->lpfn || placement->fpfn)
1134 		if (placement->fpfn > placement->lpfn ||
1135 			(placement->lpfn - placement->fpfn) < bo->num_pages)
1136 			return -EINVAL;
1137 	/*
1138 	 * Check whether we need to move buffer.
1139 	 */
1140 	ret = ttm_bo_mem_compat(placement, &bo->mem);
1141 	if (ret < 0) {
1142 		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1143 		if (ret)
1144 			return ret;
1145 	} else {
1146 		/*
1147 		 * Use the access and other non-mapping-related flag bits from
1148 		 * the compatible memory placement flags to the active flags
1149 		 */
1150 		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1151 				~TTM_PL_MASK_MEMTYPE);
1152 	}
1153 	/*
1154 	 * We might need to add a TTM.
1155 	 */
1156 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1157 		ret = ttm_bo_add_ttm(bo, true);
1158 		if (ret)
1159 			return ret;
1160 	}
1161 	return 0;
1162 }
1163 EXPORT_SYMBOL(ttm_bo_validate);
1164 
1165 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1166 				struct ttm_placement *placement)
1167 {
1168 	BUG_ON((placement->fpfn || placement->lpfn) &&
1169 	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1170 
1171 	return 0;
1172 }
1173 
1174 int ttm_bo_init(struct ttm_bo_device *bdev,
1175 		struct ttm_buffer_object *bo,
1176 		unsigned long size,
1177 		enum ttm_bo_type type,
1178 		struct ttm_placement *placement,
1179 		uint32_t page_alignment,
1180 		unsigned long buffer_start,
1181 		bool interruptible,
1182 		struct file *persistent_swap_storage,
1183 		size_t acc_size,
1184 		void (*destroy) (struct ttm_buffer_object *))
1185 {
1186 	int ret = 0;
1187 	unsigned long num_pages;
1188 
1189 	size += buffer_start & ~PAGE_MASK;
1190 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1191 	if (num_pages == 0) {
1192 		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1193 		if (destroy)
1194 			(*destroy)(bo);
1195 		else
1196 			kfree(bo);
1197 		return -EINVAL;
1198 	}
1199 	bo->destroy = destroy;
1200 
1201 	kref_init(&bo->kref);
1202 	kref_init(&bo->list_kref);
1203 	atomic_set(&bo->cpu_writers, 0);
1204 	atomic_set(&bo->reserved, 1);
1205 	init_waitqueue_head(&bo->event_queue);
1206 	INIT_LIST_HEAD(&bo->lru);
1207 	INIT_LIST_HEAD(&bo->ddestroy);
1208 	INIT_LIST_HEAD(&bo->swap);
1209 	INIT_LIST_HEAD(&bo->io_reserve_lru);
1210 	bo->bdev = bdev;
1211 	bo->glob = bdev->glob;
1212 	bo->type = type;
1213 	bo->num_pages = num_pages;
1214 	bo->mem.size = num_pages << PAGE_SHIFT;
1215 	bo->mem.mem_type = TTM_PL_SYSTEM;
1216 	bo->mem.num_pages = bo->num_pages;
1217 	bo->mem.mm_node = NULL;
1218 	bo->mem.page_alignment = page_alignment;
1219 	bo->mem.bus.io_reserved_vm = false;
1220 	bo->mem.bus.io_reserved_count = 0;
1221 	bo->buffer_start = buffer_start & PAGE_MASK;
1222 	bo->priv_flags = 0;
1223 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1224 	bo->seq_valid = false;
1225 	bo->persistent_swap_storage = persistent_swap_storage;
1226 	bo->acc_size = acc_size;
1227 	atomic_inc(&bo->glob->bo_count);
1228 
1229 	ret = ttm_bo_check_placement(bo, placement);
1230 	if (unlikely(ret != 0))
1231 		goto out_err;
1232 
1233 	/*
1234 	 * For ttm_bo_type_device buffers, allocate
1235 	 * address space from the device.
1236 	 */
1237 	if (bo->type == ttm_bo_type_device) {
1238 		ret = ttm_bo_setup_vm(bo);
1239 		if (ret)
1240 			goto out_err;
1241 	}
1242 
1243 	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1244 	if (ret)
1245 		goto out_err;
1246 
1247 	ttm_bo_unreserve(bo);
1248 	return 0;
1249 
1250 out_err:
1251 	ttm_bo_unreserve(bo);
1252 	ttm_bo_unref(&bo);
1253 
1254 	return ret;
1255 }
1256 EXPORT_SYMBOL(ttm_bo_init);
1257 
1258 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1259 				 unsigned long num_pages)
1260 {
1261 	size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1262 	    PAGE_MASK;
1263 
1264 	return glob->ttm_bo_size + 2 * page_array_size;
1265 }
1266 
1267 int ttm_bo_create(struct ttm_bo_device *bdev,
1268 			unsigned long size,
1269 			enum ttm_bo_type type,
1270 			struct ttm_placement *placement,
1271 			uint32_t page_alignment,
1272 			unsigned long buffer_start,
1273 			bool interruptible,
1274 			struct file *persistent_swap_storage,
1275 			struct ttm_buffer_object **p_bo)
1276 {
1277 	struct ttm_buffer_object *bo;
1278 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1279 	int ret;
1280 
1281 	size_t acc_size =
1282 	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1283 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1284 	if (unlikely(ret != 0))
1285 		return ret;
1286 
1287 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1288 
1289 	if (unlikely(bo == NULL)) {
1290 		ttm_mem_global_free(mem_glob, acc_size);
1291 		return -ENOMEM;
1292 	}
1293 
1294 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1295 				buffer_start, interruptible,
1296 				persistent_swap_storage, acc_size, NULL);
1297 	if (likely(ret == 0))
1298 		*p_bo = bo;
1299 
1300 	return ret;
1301 }
1302 EXPORT_SYMBOL(ttm_bo_create);
1303 
1304 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1305 					unsigned mem_type, bool allow_errors)
1306 {
1307 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1308 	struct ttm_bo_global *glob = bdev->glob;
1309 	int ret;
1310 
1311 	/*
1312 	 * Can't use standard list traversal since we're unlocking.
1313 	 */
1314 
1315 	spin_lock(&glob->lru_lock);
1316 	while (!list_empty(&man->lru)) {
1317 		spin_unlock(&glob->lru_lock);
1318 		ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1319 		if (ret) {
1320 			if (allow_errors) {
1321 				return ret;
1322 			} else {
1323 				printk(KERN_ERR TTM_PFX
1324 					"Cleanup eviction failed\n");
1325 			}
1326 		}
1327 		spin_lock(&glob->lru_lock);
1328 	}
1329 	spin_unlock(&glob->lru_lock);
1330 	return 0;
1331 }
1332 
1333 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1334 {
1335 	struct ttm_mem_type_manager *man;
1336 	int ret = -EINVAL;
1337 
1338 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1339 		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1340 		return ret;
1341 	}
1342 	man = &bdev->man[mem_type];
1343 
1344 	if (!man->has_type) {
1345 		printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1346 		       "memory manager type %u\n", mem_type);
1347 		return ret;
1348 	}
1349 
1350 	man->use_type = false;
1351 	man->has_type = false;
1352 
1353 	ret = 0;
1354 	if (mem_type > 0) {
1355 		ttm_bo_force_list_clean(bdev, mem_type, false);
1356 
1357 		ret = (*man->func->takedown)(man);
1358 	}
1359 
1360 	return ret;
1361 }
1362 EXPORT_SYMBOL(ttm_bo_clean_mm);
1363 
1364 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1365 {
1366 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1367 
1368 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1369 		printk(KERN_ERR TTM_PFX
1370 		       "Illegal memory manager memory type %u.\n",
1371 		       mem_type);
1372 		return -EINVAL;
1373 	}
1374 
1375 	if (!man->has_type) {
1376 		printk(KERN_ERR TTM_PFX
1377 		       "Memory type %u has not been initialized.\n",
1378 		       mem_type);
1379 		return 0;
1380 	}
1381 
1382 	return ttm_bo_force_list_clean(bdev, mem_type, true);
1383 }
1384 EXPORT_SYMBOL(ttm_bo_evict_mm);
1385 
1386 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1387 			unsigned long p_size)
1388 {
1389 	int ret = -EINVAL;
1390 	struct ttm_mem_type_manager *man;
1391 
1392 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1393 	man = &bdev->man[type];
1394 	BUG_ON(man->has_type);
1395 	man->io_reserve_fastpath = true;
1396 	man->use_io_reserve_lru = false;
1397 	mutex_init(&man->io_reserve_mutex);
1398 	INIT_LIST_HEAD(&man->io_reserve_lru);
1399 
1400 	ret = bdev->driver->init_mem_type(bdev, type, man);
1401 	if (ret)
1402 		return ret;
1403 	man->bdev = bdev;
1404 
1405 	ret = 0;
1406 	if (type != TTM_PL_SYSTEM) {
1407 		ret = (*man->func->init)(man, p_size);
1408 		if (ret)
1409 			return ret;
1410 	}
1411 	man->has_type = true;
1412 	man->use_type = true;
1413 	man->size = p_size;
1414 
1415 	INIT_LIST_HEAD(&man->lru);
1416 
1417 	return 0;
1418 }
1419 EXPORT_SYMBOL(ttm_bo_init_mm);
1420 
1421 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1422 {
1423 	struct ttm_bo_global *glob =
1424 		container_of(kobj, struct ttm_bo_global, kobj);
1425 
1426 	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1427 	__free_page(glob->dummy_read_page);
1428 	kfree(glob);
1429 }
1430 
1431 void ttm_bo_global_release(struct drm_global_reference *ref)
1432 {
1433 	struct ttm_bo_global *glob = ref->object;
1434 
1435 	kobject_del(&glob->kobj);
1436 	kobject_put(&glob->kobj);
1437 }
1438 EXPORT_SYMBOL(ttm_bo_global_release);
1439 
1440 int ttm_bo_global_init(struct drm_global_reference *ref)
1441 {
1442 	struct ttm_bo_global_ref *bo_ref =
1443 		container_of(ref, struct ttm_bo_global_ref, ref);
1444 	struct ttm_bo_global *glob = ref->object;
1445 	int ret;
1446 
1447 	mutex_init(&glob->device_list_mutex);
1448 	spin_lock_init(&glob->lru_lock);
1449 	glob->mem_glob = bo_ref->mem_glob;
1450 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1451 
1452 	if (unlikely(glob->dummy_read_page == NULL)) {
1453 		ret = -ENOMEM;
1454 		goto out_no_drp;
1455 	}
1456 
1457 	INIT_LIST_HEAD(&glob->swap_lru);
1458 	INIT_LIST_HEAD(&glob->device_list);
1459 
1460 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1461 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1462 	if (unlikely(ret != 0)) {
1463 		printk(KERN_ERR TTM_PFX
1464 		       "Could not register buffer object swapout.\n");
1465 		goto out_no_shrink;
1466 	}
1467 
1468 	glob->ttm_bo_extra_size =
1469 		ttm_round_pot(sizeof(struct ttm_tt)) +
1470 		ttm_round_pot(sizeof(struct ttm_backend));
1471 
1472 	glob->ttm_bo_size = glob->ttm_bo_extra_size +
1473 		ttm_round_pot(sizeof(struct ttm_buffer_object));
1474 
1475 	atomic_set(&glob->bo_count, 0);
1476 
1477 	ret = kobject_init_and_add(
1478 		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1479 	if (unlikely(ret != 0))
1480 		kobject_put(&glob->kobj);
1481 	return ret;
1482 out_no_shrink:
1483 	__free_page(glob->dummy_read_page);
1484 out_no_drp:
1485 	kfree(glob);
1486 	return ret;
1487 }
1488 EXPORT_SYMBOL(ttm_bo_global_init);
1489 
1490 
1491 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1492 {
1493 	int ret = 0;
1494 	unsigned i = TTM_NUM_MEM_TYPES;
1495 	struct ttm_mem_type_manager *man;
1496 	struct ttm_bo_global *glob = bdev->glob;
1497 
1498 	while (i--) {
1499 		man = &bdev->man[i];
1500 		if (man->has_type) {
1501 			man->use_type = false;
1502 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1503 				ret = -EBUSY;
1504 				printk(KERN_ERR TTM_PFX
1505 				       "DRM memory manager type %d "
1506 				       "is not clean.\n", i);
1507 			}
1508 			man->has_type = false;
1509 		}
1510 	}
1511 
1512 	mutex_lock(&glob->device_list_mutex);
1513 	list_del(&bdev->device_list);
1514 	mutex_unlock(&glob->device_list_mutex);
1515 
1516 	cancel_delayed_work_sync(&bdev->wq);
1517 
1518 	while (ttm_bo_delayed_delete(bdev, true))
1519 		;
1520 
1521 	spin_lock(&glob->lru_lock);
1522 	if (list_empty(&bdev->ddestroy))
1523 		TTM_DEBUG("Delayed destroy list was clean\n");
1524 
1525 	if (list_empty(&bdev->man[0].lru))
1526 		TTM_DEBUG("Swap list was clean\n");
1527 	spin_unlock(&glob->lru_lock);
1528 
1529 	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1530 	write_lock(&bdev->vm_lock);
1531 	drm_mm_takedown(&bdev->addr_space_mm);
1532 	write_unlock(&bdev->vm_lock);
1533 
1534 	return ret;
1535 }
1536 EXPORT_SYMBOL(ttm_bo_device_release);
1537 
1538 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1539 		       struct ttm_bo_global *glob,
1540 		       struct ttm_bo_driver *driver,
1541 		       uint64_t file_page_offset,
1542 		       bool need_dma32)
1543 {
1544 	int ret = -EINVAL;
1545 
1546 	rwlock_init(&bdev->vm_lock);
1547 	bdev->driver = driver;
1548 
1549 	memset(bdev->man, 0, sizeof(bdev->man));
1550 
1551 	/*
1552 	 * Initialize the system memory buffer type.
1553 	 * Other types need to be driver / IOCTL initialized.
1554 	 */
1555 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1556 	if (unlikely(ret != 0))
1557 		goto out_no_sys;
1558 
1559 	bdev->addr_space_rb = RB_ROOT;
1560 	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1561 	if (unlikely(ret != 0))
1562 		goto out_no_addr_mm;
1563 
1564 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1565 	bdev->nice_mode = true;
1566 	INIT_LIST_HEAD(&bdev->ddestroy);
1567 	bdev->dev_mapping = NULL;
1568 	bdev->glob = glob;
1569 	bdev->need_dma32 = need_dma32;
1570 	bdev->val_seq = 0;
1571 	spin_lock_init(&bdev->fence_lock);
1572 	mutex_lock(&glob->device_list_mutex);
1573 	list_add_tail(&bdev->device_list, &glob->device_list);
1574 	mutex_unlock(&glob->device_list_mutex);
1575 
1576 	return 0;
1577 out_no_addr_mm:
1578 	ttm_bo_clean_mm(bdev, 0);
1579 out_no_sys:
1580 	return ret;
1581 }
1582 EXPORT_SYMBOL(ttm_bo_device_init);
1583 
1584 /*
1585  * buffer object vm functions.
1586  */
1587 
1588 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1589 {
1590 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1591 
1592 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1593 		if (mem->mem_type == TTM_PL_SYSTEM)
1594 			return false;
1595 
1596 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1597 			return false;
1598 
1599 		if (mem->placement & TTM_PL_FLAG_CACHED)
1600 			return false;
1601 	}
1602 	return true;
1603 }
1604 
1605 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1606 {
1607 	struct ttm_bo_device *bdev = bo->bdev;
1608 	loff_t offset = (loff_t) bo->addr_space_offset;
1609 	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1610 
1611 	if (!bdev->dev_mapping)
1612 		return;
1613 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1614 	ttm_mem_io_free_vm(bo);
1615 }
1616 
1617 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1618 {
1619 	struct ttm_bo_device *bdev = bo->bdev;
1620 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1621 
1622 	ttm_mem_io_lock(man, false);
1623 	ttm_bo_unmap_virtual_locked(bo);
1624 	ttm_mem_io_unlock(man);
1625 }
1626 
1627 
1628 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1629 
1630 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1631 {
1632 	struct ttm_bo_device *bdev = bo->bdev;
1633 	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1634 	struct rb_node *parent = NULL;
1635 	struct ttm_buffer_object *cur_bo;
1636 	unsigned long offset = bo->vm_node->start;
1637 	unsigned long cur_offset;
1638 
1639 	while (*cur) {
1640 		parent = *cur;
1641 		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1642 		cur_offset = cur_bo->vm_node->start;
1643 		if (offset < cur_offset)
1644 			cur = &parent->rb_left;
1645 		else if (offset > cur_offset)
1646 			cur = &parent->rb_right;
1647 		else
1648 			BUG();
1649 	}
1650 
1651 	rb_link_node(&bo->vm_rb, parent, cur);
1652 	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1653 }
1654 
1655 /**
1656  * ttm_bo_setup_vm:
1657  *
1658  * @bo: the buffer to allocate address space for
1659  *
1660  * Allocate address space in the drm device so that applications
1661  * can mmap the buffer and access the contents. This only
1662  * applies to ttm_bo_type_device objects as others are not
1663  * placed in the drm device address space.
1664  */
1665 
1666 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1667 {
1668 	struct ttm_bo_device *bdev = bo->bdev;
1669 	int ret;
1670 
1671 retry_pre_get:
1672 	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1673 	if (unlikely(ret != 0))
1674 		return ret;
1675 
1676 	write_lock(&bdev->vm_lock);
1677 	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1678 					 bo->mem.num_pages, 0, 0);
1679 
1680 	if (unlikely(bo->vm_node == NULL)) {
1681 		ret = -ENOMEM;
1682 		goto out_unlock;
1683 	}
1684 
1685 	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1686 					      bo->mem.num_pages, 0);
1687 
1688 	if (unlikely(bo->vm_node == NULL)) {
1689 		write_unlock(&bdev->vm_lock);
1690 		goto retry_pre_get;
1691 	}
1692 
1693 	ttm_bo_vm_insert_rb(bo);
1694 	write_unlock(&bdev->vm_lock);
1695 	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1696 
1697 	return 0;
1698 out_unlock:
1699 	write_unlock(&bdev->vm_lock);
1700 	return ret;
1701 }
1702 
1703 int ttm_bo_wait(struct ttm_buffer_object *bo,
1704 		bool lazy, bool interruptible, bool no_wait)
1705 {
1706 	struct ttm_bo_driver *driver = bo->bdev->driver;
1707 	struct ttm_bo_device *bdev = bo->bdev;
1708 	void *sync_obj;
1709 	void *sync_obj_arg;
1710 	int ret = 0;
1711 
1712 	if (likely(bo->sync_obj == NULL))
1713 		return 0;
1714 
1715 	while (bo->sync_obj) {
1716 
1717 		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1718 			void *tmp_obj = bo->sync_obj;
1719 			bo->sync_obj = NULL;
1720 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1721 			spin_unlock(&bdev->fence_lock);
1722 			driver->sync_obj_unref(&tmp_obj);
1723 			spin_lock(&bdev->fence_lock);
1724 			continue;
1725 		}
1726 
1727 		if (no_wait)
1728 			return -EBUSY;
1729 
1730 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1731 		sync_obj_arg = bo->sync_obj_arg;
1732 		spin_unlock(&bdev->fence_lock);
1733 		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1734 					    lazy, interruptible);
1735 		if (unlikely(ret != 0)) {
1736 			driver->sync_obj_unref(&sync_obj);
1737 			spin_lock(&bdev->fence_lock);
1738 			return ret;
1739 		}
1740 		spin_lock(&bdev->fence_lock);
1741 		if (likely(bo->sync_obj == sync_obj &&
1742 			   bo->sync_obj_arg == sync_obj_arg)) {
1743 			void *tmp_obj = bo->sync_obj;
1744 			bo->sync_obj = NULL;
1745 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1746 				  &bo->priv_flags);
1747 			spin_unlock(&bdev->fence_lock);
1748 			driver->sync_obj_unref(&sync_obj);
1749 			driver->sync_obj_unref(&tmp_obj);
1750 			spin_lock(&bdev->fence_lock);
1751 		} else {
1752 			spin_unlock(&bdev->fence_lock);
1753 			driver->sync_obj_unref(&sync_obj);
1754 			spin_lock(&bdev->fence_lock);
1755 		}
1756 	}
1757 	return 0;
1758 }
1759 EXPORT_SYMBOL(ttm_bo_wait);
1760 
1761 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1762 {
1763 	struct ttm_bo_device *bdev = bo->bdev;
1764 	int ret = 0;
1765 
1766 	/*
1767 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1768 	 */
1769 
1770 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1771 	if (unlikely(ret != 0))
1772 		return ret;
1773 	spin_lock(&bdev->fence_lock);
1774 	ret = ttm_bo_wait(bo, false, true, no_wait);
1775 	spin_unlock(&bdev->fence_lock);
1776 	if (likely(ret == 0))
1777 		atomic_inc(&bo->cpu_writers);
1778 	ttm_bo_unreserve(bo);
1779 	return ret;
1780 }
1781 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1782 
1783 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1784 {
1785 	if (atomic_dec_and_test(&bo->cpu_writers))
1786 		wake_up_all(&bo->event_queue);
1787 }
1788 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1789 
1790 /**
1791  * A buffer object shrink method that tries to swap out the first
1792  * buffer object on the bo_global::swap_lru list.
1793  */
1794 
1795 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1796 {
1797 	struct ttm_bo_global *glob =
1798 	    container_of(shrink, struct ttm_bo_global, shrink);
1799 	struct ttm_buffer_object *bo;
1800 	int ret = -EBUSY;
1801 	int put_count;
1802 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1803 
1804 	spin_lock(&glob->lru_lock);
1805 	while (ret == -EBUSY) {
1806 		if (unlikely(list_empty(&glob->swap_lru))) {
1807 			spin_unlock(&glob->lru_lock);
1808 			return -EBUSY;
1809 		}
1810 
1811 		bo = list_first_entry(&glob->swap_lru,
1812 				      struct ttm_buffer_object, swap);
1813 		kref_get(&bo->list_kref);
1814 
1815 		if (!list_empty(&bo->ddestroy)) {
1816 			spin_unlock(&glob->lru_lock);
1817 			(void) ttm_bo_cleanup_refs(bo, false, false, false);
1818 			kref_put(&bo->list_kref, ttm_bo_release_list);
1819 			continue;
1820 		}
1821 
1822 		/**
1823 		 * Reserve buffer. Since we unlock while sleeping, we need
1824 		 * to re-check that nobody removed us from the swap-list while
1825 		 * we slept.
1826 		 */
1827 
1828 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1829 		if (unlikely(ret == -EBUSY)) {
1830 			spin_unlock(&glob->lru_lock);
1831 			ttm_bo_wait_unreserved(bo, false);
1832 			kref_put(&bo->list_kref, ttm_bo_release_list);
1833 			spin_lock(&glob->lru_lock);
1834 		}
1835 	}
1836 
1837 	BUG_ON(ret != 0);
1838 	put_count = ttm_bo_del_from_lru(bo);
1839 	spin_unlock(&glob->lru_lock);
1840 
1841 	ttm_bo_list_ref_sub(bo, put_count, true);
1842 
1843 	/**
1844 	 * Wait for GPU, then move to system cached.
1845 	 */
1846 
1847 	spin_lock(&bo->bdev->fence_lock);
1848 	ret = ttm_bo_wait(bo, false, false, false);
1849 	spin_unlock(&bo->bdev->fence_lock);
1850 
1851 	if (unlikely(ret != 0))
1852 		goto out;
1853 
1854 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1855 		struct ttm_mem_reg evict_mem;
1856 
1857 		evict_mem = bo->mem;
1858 		evict_mem.mm_node = NULL;
1859 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1860 		evict_mem.mem_type = TTM_PL_SYSTEM;
1861 
1862 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1863 					     false, false, false);
1864 		if (unlikely(ret != 0))
1865 			goto out;
1866 	}
1867 
1868 	ttm_bo_unmap_virtual(bo);
1869 
1870 	/**
1871 	 * Swap out. Buffer will be swapped in again as soon as
1872 	 * anyone tries to access a ttm page.
1873 	 */
1874 
1875 	if (bo->bdev->driver->swap_notify)
1876 		bo->bdev->driver->swap_notify(bo);
1877 
1878 	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1879 out:
1880 
1881 	/**
1882 	 *
1883 	 * Unreserve without putting on LRU to avoid swapping out an
1884 	 * already swapped buffer.
1885 	 */
1886 
1887 	atomic_set(&bo->reserved, 0);
1888 	wake_up_all(&bo->event_queue);
1889 	kref_put(&bo->list_kref, ttm_bo_release_list);
1890 	return ret;
1891 }
1892 
1893 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1894 {
1895 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1896 		;
1897 }
1898 EXPORT_SYMBOL(ttm_bo_swapout_all);
1899