xref: /linux/drivers/gpu/drm/ttm/ttm_bo.c (revision 4b2a108cd0d34880fe9d932258ca5b2ccebcd05e)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
44 
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
47 
48 static inline uint32_t ttm_bo_type_flags(unsigned type)
49 {
50 	return 1 << (type);
51 }
52 
53 static void ttm_bo_release_list(struct kref *list_kref)
54 {
55 	struct ttm_buffer_object *bo =
56 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
57 	struct ttm_bo_device *bdev = bo->bdev;
58 
59 	BUG_ON(atomic_read(&bo->list_kref.refcount));
60 	BUG_ON(atomic_read(&bo->kref.refcount));
61 	BUG_ON(atomic_read(&bo->cpu_writers));
62 	BUG_ON(bo->sync_obj != NULL);
63 	BUG_ON(bo->mem.mm_node != NULL);
64 	BUG_ON(!list_empty(&bo->lru));
65 	BUG_ON(!list_empty(&bo->ddestroy));
66 
67 	if (bo->ttm)
68 		ttm_tt_destroy(bo->ttm);
69 	if (bo->destroy)
70 		bo->destroy(bo);
71 	else {
72 		ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
73 		kfree(bo);
74 	}
75 }
76 
77 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
78 {
79 
80 	if (interruptible) {
81 		int ret = 0;
82 
83 		ret = wait_event_interruptible(bo->event_queue,
84 					       atomic_read(&bo->reserved) == 0);
85 		if (unlikely(ret != 0))
86 			return -ERESTART;
87 	} else {
88 		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
89 	}
90 	return 0;
91 }
92 
93 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
94 {
95 	struct ttm_bo_device *bdev = bo->bdev;
96 	struct ttm_mem_type_manager *man;
97 
98 	BUG_ON(!atomic_read(&bo->reserved));
99 
100 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
101 
102 		BUG_ON(!list_empty(&bo->lru));
103 
104 		man = &bdev->man[bo->mem.mem_type];
105 		list_add_tail(&bo->lru, &man->lru);
106 		kref_get(&bo->list_kref);
107 
108 		if (bo->ttm != NULL) {
109 			list_add_tail(&bo->swap, &bdev->swap_lru);
110 			kref_get(&bo->list_kref);
111 		}
112 	}
113 }
114 
115 /**
116  * Call with the lru_lock held.
117  */
118 
119 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
120 {
121 	int put_count = 0;
122 
123 	if (!list_empty(&bo->swap)) {
124 		list_del_init(&bo->swap);
125 		++put_count;
126 	}
127 	if (!list_empty(&bo->lru)) {
128 		list_del_init(&bo->lru);
129 		++put_count;
130 	}
131 
132 	/*
133 	 * TODO: Add a driver hook to delete from
134 	 * driver-specific LRU's here.
135 	 */
136 
137 	return put_count;
138 }
139 
140 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
141 			  bool interruptible,
142 			  bool no_wait, bool use_sequence, uint32_t sequence)
143 {
144 	struct ttm_bo_device *bdev = bo->bdev;
145 	int ret;
146 
147 	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
148 		if (use_sequence && bo->seq_valid &&
149 			(sequence - bo->val_seq < (1 << 31))) {
150 			return -EAGAIN;
151 		}
152 
153 		if (no_wait)
154 			return -EBUSY;
155 
156 		spin_unlock(&bdev->lru_lock);
157 		ret = ttm_bo_wait_unreserved(bo, interruptible);
158 		spin_lock(&bdev->lru_lock);
159 
160 		if (unlikely(ret))
161 			return ret;
162 	}
163 
164 	if (use_sequence) {
165 		bo->val_seq = sequence;
166 		bo->seq_valid = true;
167 	} else {
168 		bo->seq_valid = false;
169 	}
170 
171 	return 0;
172 }
173 EXPORT_SYMBOL(ttm_bo_reserve);
174 
175 static void ttm_bo_ref_bug(struct kref *list_kref)
176 {
177 	BUG();
178 }
179 
180 int ttm_bo_reserve(struct ttm_buffer_object *bo,
181 		   bool interruptible,
182 		   bool no_wait, bool use_sequence, uint32_t sequence)
183 {
184 	struct ttm_bo_device *bdev = bo->bdev;
185 	int put_count = 0;
186 	int ret;
187 
188 	spin_lock(&bdev->lru_lock);
189 	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
190 				    sequence);
191 	if (likely(ret == 0))
192 		put_count = ttm_bo_del_from_lru(bo);
193 	spin_unlock(&bdev->lru_lock);
194 
195 	while (put_count--)
196 		kref_put(&bo->list_kref, ttm_bo_ref_bug);
197 
198 	return ret;
199 }
200 
201 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
202 {
203 	struct ttm_bo_device *bdev = bo->bdev;
204 
205 	spin_lock(&bdev->lru_lock);
206 	ttm_bo_add_to_lru(bo);
207 	atomic_set(&bo->reserved, 0);
208 	wake_up_all(&bo->event_queue);
209 	spin_unlock(&bdev->lru_lock);
210 }
211 EXPORT_SYMBOL(ttm_bo_unreserve);
212 
213 /*
214  * Call bo->mutex locked.
215  */
216 
217 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
218 {
219 	struct ttm_bo_device *bdev = bo->bdev;
220 	int ret = 0;
221 	uint32_t page_flags = 0;
222 
223 	TTM_ASSERT_LOCKED(&bo->mutex);
224 	bo->ttm = NULL;
225 
226 	if (bdev->need_dma32)
227 		page_flags |= TTM_PAGE_FLAG_DMA32;
228 
229 	switch (bo->type) {
230 	case ttm_bo_type_device:
231 		if (zero_alloc)
232 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
233 	case ttm_bo_type_kernel:
234 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
235 					page_flags, bdev->dummy_read_page);
236 		if (unlikely(bo->ttm == NULL))
237 			ret = -ENOMEM;
238 		break;
239 	case ttm_bo_type_user:
240 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
241 					page_flags | TTM_PAGE_FLAG_USER,
242 					bdev->dummy_read_page);
243 		if (unlikely(bo->ttm == NULL))
244 			ret = -ENOMEM;
245 		break;
246 
247 		ret = ttm_tt_set_user(bo->ttm, current,
248 				      bo->buffer_start, bo->num_pages);
249 		if (unlikely(ret != 0))
250 			ttm_tt_destroy(bo->ttm);
251 		break;
252 	default:
253 		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
254 		ret = -EINVAL;
255 		break;
256 	}
257 
258 	return ret;
259 }
260 
261 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
262 				  struct ttm_mem_reg *mem,
263 				  bool evict, bool interruptible, bool no_wait)
264 {
265 	struct ttm_bo_device *bdev = bo->bdev;
266 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
267 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
268 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
269 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
270 	int ret = 0;
271 
272 	if (old_is_pci || new_is_pci ||
273 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
274 		ttm_bo_unmap_virtual(bo);
275 
276 	/*
277 	 * Create and bind a ttm if required.
278 	 */
279 
280 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
281 		ret = ttm_bo_add_ttm(bo, false);
282 		if (ret)
283 			goto out_err;
284 
285 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
286 		if (ret)
287 			goto out_err;
288 
289 		if (mem->mem_type != TTM_PL_SYSTEM) {
290 			ret = ttm_tt_bind(bo->ttm, mem);
291 			if (ret)
292 				goto out_err;
293 		}
294 
295 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
296 
297 			struct ttm_mem_reg *old_mem = &bo->mem;
298 			uint32_t save_flags = old_mem->placement;
299 
300 			*old_mem = *mem;
301 			mem->mm_node = NULL;
302 			ttm_flag_masked(&save_flags, mem->placement,
303 					TTM_PL_MASK_MEMTYPE);
304 			goto moved;
305 		}
306 
307 	}
308 
309 	if (bdev->driver->move_notify)
310 		bdev->driver->move_notify(bo, mem);
311 
312 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
313 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
314 		ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
315 	else if (bdev->driver->move)
316 		ret = bdev->driver->move(bo, evict, interruptible,
317 					 no_wait, mem);
318 	else
319 		ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
320 
321 	if (ret)
322 		goto out_err;
323 
324 moved:
325 	if (bo->evicted) {
326 		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
327 		if (ret)
328 			printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
329 		bo->evicted = false;
330 	}
331 
332 	if (bo->mem.mm_node) {
333 		spin_lock(&bo->lock);
334 		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
335 		    bdev->man[bo->mem.mem_type].gpu_offset;
336 		bo->cur_placement = bo->mem.placement;
337 		spin_unlock(&bo->lock);
338 	}
339 
340 	return 0;
341 
342 out_err:
343 	new_man = &bdev->man[bo->mem.mem_type];
344 	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
345 		ttm_tt_unbind(bo->ttm);
346 		ttm_tt_destroy(bo->ttm);
347 		bo->ttm = NULL;
348 	}
349 
350 	return ret;
351 }
352 
353 /**
354  * If bo idle, remove from delayed- and lru lists, and unref.
355  * If not idle, and already on delayed list, do nothing.
356  * If not idle, and not on delayed list, put on delayed list,
357  *   up the list_kref and schedule a delayed list check.
358  */
359 
360 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
361 {
362 	struct ttm_bo_device *bdev = bo->bdev;
363 	struct ttm_bo_driver *driver = bdev->driver;
364 	int ret;
365 
366 	spin_lock(&bo->lock);
367 	(void) ttm_bo_wait(bo, false, false, !remove_all);
368 
369 	if (!bo->sync_obj) {
370 		int put_count;
371 
372 		spin_unlock(&bo->lock);
373 
374 		spin_lock(&bdev->lru_lock);
375 		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
376 		BUG_ON(ret);
377 		if (bo->ttm)
378 			ttm_tt_unbind(bo->ttm);
379 
380 		if (!list_empty(&bo->ddestroy)) {
381 			list_del_init(&bo->ddestroy);
382 			kref_put(&bo->list_kref, ttm_bo_ref_bug);
383 		}
384 		if (bo->mem.mm_node) {
385 			drm_mm_put_block(bo->mem.mm_node);
386 			bo->mem.mm_node = NULL;
387 		}
388 		put_count = ttm_bo_del_from_lru(bo);
389 		spin_unlock(&bdev->lru_lock);
390 
391 		atomic_set(&bo->reserved, 0);
392 
393 		while (put_count--)
394 			kref_put(&bo->list_kref, ttm_bo_release_list);
395 
396 		return 0;
397 	}
398 
399 	spin_lock(&bdev->lru_lock);
400 	if (list_empty(&bo->ddestroy)) {
401 		void *sync_obj = bo->sync_obj;
402 		void *sync_obj_arg = bo->sync_obj_arg;
403 
404 		kref_get(&bo->list_kref);
405 		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
406 		spin_unlock(&bdev->lru_lock);
407 		spin_unlock(&bo->lock);
408 
409 		if (sync_obj)
410 			driver->sync_obj_flush(sync_obj, sync_obj_arg);
411 		schedule_delayed_work(&bdev->wq,
412 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
413 		ret = 0;
414 
415 	} else {
416 		spin_unlock(&bdev->lru_lock);
417 		spin_unlock(&bo->lock);
418 		ret = -EBUSY;
419 	}
420 
421 	return ret;
422 }
423 
424 /**
425  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
426  * encountered buffers.
427  */
428 
429 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
430 {
431 	struct ttm_buffer_object *entry, *nentry;
432 	struct list_head *list, *next;
433 	int ret;
434 
435 	spin_lock(&bdev->lru_lock);
436 	list_for_each_safe(list, next, &bdev->ddestroy) {
437 		entry = list_entry(list, struct ttm_buffer_object, ddestroy);
438 		nentry = NULL;
439 
440 		/*
441 		 * Protect the next list entry from destruction while we
442 		 * unlock the lru_lock.
443 		 */
444 
445 		if (next != &bdev->ddestroy) {
446 			nentry = list_entry(next, struct ttm_buffer_object,
447 					    ddestroy);
448 			kref_get(&nentry->list_kref);
449 		}
450 		kref_get(&entry->list_kref);
451 
452 		spin_unlock(&bdev->lru_lock);
453 		ret = ttm_bo_cleanup_refs(entry, remove_all);
454 		kref_put(&entry->list_kref, ttm_bo_release_list);
455 
456 		spin_lock(&bdev->lru_lock);
457 		if (nentry) {
458 			bool next_onlist = !list_empty(next);
459 			spin_unlock(&bdev->lru_lock);
460 			kref_put(&nentry->list_kref, ttm_bo_release_list);
461 			spin_lock(&bdev->lru_lock);
462 			/*
463 			 * Someone might have raced us and removed the
464 			 * next entry from the list. We don't bother restarting
465 			 * list traversal.
466 			 */
467 
468 			if (!next_onlist)
469 				break;
470 		}
471 		if (ret)
472 			break;
473 	}
474 	ret = !list_empty(&bdev->ddestroy);
475 	spin_unlock(&bdev->lru_lock);
476 
477 	return ret;
478 }
479 
480 static void ttm_bo_delayed_workqueue(struct work_struct *work)
481 {
482 	struct ttm_bo_device *bdev =
483 	    container_of(work, struct ttm_bo_device, wq.work);
484 
485 	if (ttm_bo_delayed_delete(bdev, false)) {
486 		schedule_delayed_work(&bdev->wq,
487 				      ((HZ / 100) < 1) ? 1 : HZ / 100);
488 	}
489 }
490 
491 static void ttm_bo_release(struct kref *kref)
492 {
493 	struct ttm_buffer_object *bo =
494 	    container_of(kref, struct ttm_buffer_object, kref);
495 	struct ttm_bo_device *bdev = bo->bdev;
496 
497 	if (likely(bo->vm_node != NULL)) {
498 		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
499 		drm_mm_put_block(bo->vm_node);
500 		bo->vm_node = NULL;
501 	}
502 	write_unlock(&bdev->vm_lock);
503 	ttm_bo_cleanup_refs(bo, false);
504 	kref_put(&bo->list_kref, ttm_bo_release_list);
505 	write_lock(&bdev->vm_lock);
506 }
507 
508 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
509 {
510 	struct ttm_buffer_object *bo = *p_bo;
511 	struct ttm_bo_device *bdev = bo->bdev;
512 
513 	*p_bo = NULL;
514 	write_lock(&bdev->vm_lock);
515 	kref_put(&bo->kref, ttm_bo_release);
516 	write_unlock(&bdev->vm_lock);
517 }
518 EXPORT_SYMBOL(ttm_bo_unref);
519 
520 static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
521 			bool interruptible, bool no_wait)
522 {
523 	int ret = 0;
524 	struct ttm_bo_device *bdev = bo->bdev;
525 	struct ttm_mem_reg evict_mem;
526 	uint32_t proposed_placement;
527 
528 	if (bo->mem.mem_type != mem_type)
529 		goto out;
530 
531 	spin_lock(&bo->lock);
532 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
533 	spin_unlock(&bo->lock);
534 
535 	if (unlikely(ret != 0)) {
536 		if (ret != -ERESTART) {
537 			printk(KERN_ERR TTM_PFX
538 			       "Failed to expire sync object before "
539 			       "buffer eviction.\n");
540 		}
541 		goto out;
542 	}
543 
544 	BUG_ON(!atomic_read(&bo->reserved));
545 
546 	evict_mem = bo->mem;
547 	evict_mem.mm_node = NULL;
548 
549 	proposed_placement = bdev->driver->evict_flags(bo);
550 
551 	ret = ttm_bo_mem_space(bo, proposed_placement,
552 			       &evict_mem, interruptible, no_wait);
553 	if (unlikely(ret != 0 && ret != -ERESTART))
554 		ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
555 				       &evict_mem, interruptible, no_wait);
556 
557 	if (ret) {
558 		if (ret != -ERESTART)
559 			printk(KERN_ERR TTM_PFX
560 			       "Failed to find memory space for "
561 			       "buffer 0x%p eviction.\n", bo);
562 		goto out;
563 	}
564 
565 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
566 				     no_wait);
567 	if (ret) {
568 		if (ret != -ERESTART)
569 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
570 		goto out;
571 	}
572 
573 	spin_lock(&bdev->lru_lock);
574 	if (evict_mem.mm_node) {
575 		drm_mm_put_block(evict_mem.mm_node);
576 		evict_mem.mm_node = NULL;
577 	}
578 	spin_unlock(&bdev->lru_lock);
579 	bo->evicted = true;
580 out:
581 	return ret;
582 }
583 
584 /**
585  * Repeatedly evict memory from the LRU for @mem_type until we create enough
586  * space, or we've evicted everything and there isn't enough space.
587  */
588 static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
589 				  struct ttm_mem_reg *mem,
590 				  uint32_t mem_type,
591 				  bool interruptible, bool no_wait)
592 {
593 	struct drm_mm_node *node;
594 	struct ttm_buffer_object *entry;
595 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
596 	struct list_head *lru;
597 	unsigned long num_pages = mem->num_pages;
598 	int put_count = 0;
599 	int ret;
600 
601 retry_pre_get:
602 	ret = drm_mm_pre_get(&man->manager);
603 	if (unlikely(ret != 0))
604 		return ret;
605 
606 	spin_lock(&bdev->lru_lock);
607 	do {
608 		node = drm_mm_search_free(&man->manager, num_pages,
609 					  mem->page_alignment, 1);
610 		if (node)
611 			break;
612 
613 		lru = &man->lru;
614 		if (list_empty(lru))
615 			break;
616 
617 		entry = list_first_entry(lru, struct ttm_buffer_object, lru);
618 		kref_get(&entry->list_kref);
619 
620 		ret =
621 		    ttm_bo_reserve_locked(entry, interruptible, no_wait,
622 					  false, 0);
623 
624 		if (likely(ret == 0))
625 			put_count = ttm_bo_del_from_lru(entry);
626 
627 		spin_unlock(&bdev->lru_lock);
628 
629 		if (unlikely(ret != 0))
630 			return ret;
631 
632 		while (put_count--)
633 			kref_put(&entry->list_kref, ttm_bo_ref_bug);
634 
635 		ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
636 
637 		ttm_bo_unreserve(entry);
638 
639 		kref_put(&entry->list_kref, ttm_bo_release_list);
640 		if (ret)
641 			return ret;
642 
643 		spin_lock(&bdev->lru_lock);
644 	} while (1);
645 
646 	if (!node) {
647 		spin_unlock(&bdev->lru_lock);
648 		return -ENOMEM;
649 	}
650 
651 	node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
652 	if (unlikely(!node)) {
653 		spin_unlock(&bdev->lru_lock);
654 		goto retry_pre_get;
655 	}
656 
657 	spin_unlock(&bdev->lru_lock);
658 	mem->mm_node = node;
659 	mem->mem_type = mem_type;
660 	return 0;
661 }
662 
663 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664 				      uint32_t cur_placement,
665 				      uint32_t proposed_placement)
666 {
667 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
669 
670 	/**
671 	 * Keep current caching if possible.
672 	 */
673 
674 	if ((cur_placement & caching) != 0)
675 		result |= (cur_placement & caching);
676 	else if ((man->default_caching & caching) != 0)
677 		result |= man->default_caching;
678 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679 		result |= TTM_PL_FLAG_CACHED;
680 	else if ((TTM_PL_FLAG_WC & caching) != 0)
681 		result |= TTM_PL_FLAG_WC;
682 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683 		result |= TTM_PL_FLAG_UNCACHED;
684 
685 	return result;
686 }
687 
688 
689 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
690 				 bool disallow_fixed,
691 				 uint32_t mem_type,
692 				 uint32_t proposed_placement,
693 				 uint32_t *masked_placement)
694 {
695 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
696 
697 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
698 		return false;
699 
700 	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
701 		return false;
702 
703 	if ((proposed_placement & man->available_caching) == 0)
704 		return false;
705 
706 	cur_flags |= (proposed_placement & man->available_caching);
707 
708 	*masked_placement = cur_flags;
709 	return true;
710 }
711 
712 /**
713  * Creates space for memory region @mem according to its type.
714  *
715  * This function first searches for free space in compatible memory types in
716  * the priority order defined by the driver.  If free space isn't found, then
717  * ttm_bo_mem_force_space is attempted in priority order to evict and find
718  * space.
719  */
720 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
721 		     uint32_t proposed_placement,
722 		     struct ttm_mem_reg *mem,
723 		     bool interruptible, bool no_wait)
724 {
725 	struct ttm_bo_device *bdev = bo->bdev;
726 	struct ttm_mem_type_manager *man;
727 
728 	uint32_t num_prios = bdev->driver->num_mem_type_prio;
729 	const uint32_t *prios = bdev->driver->mem_type_prio;
730 	uint32_t i;
731 	uint32_t mem_type = TTM_PL_SYSTEM;
732 	uint32_t cur_flags = 0;
733 	bool type_found = false;
734 	bool type_ok = false;
735 	bool has_eagain = false;
736 	struct drm_mm_node *node = NULL;
737 	int ret;
738 
739 	mem->mm_node = NULL;
740 	for (i = 0; i < num_prios; ++i) {
741 		mem_type = prios[i];
742 		man = &bdev->man[mem_type];
743 
744 		type_ok = ttm_bo_mt_compatible(man,
745 					       bo->type == ttm_bo_type_user,
746 					       mem_type, proposed_placement,
747 					       &cur_flags);
748 
749 		if (!type_ok)
750 			continue;
751 
752 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
753 						  cur_flags);
754 
755 		if (mem_type == TTM_PL_SYSTEM)
756 			break;
757 
758 		if (man->has_type && man->use_type) {
759 			type_found = true;
760 			do {
761 				ret = drm_mm_pre_get(&man->manager);
762 				if (unlikely(ret))
763 					return ret;
764 
765 				spin_lock(&bdev->lru_lock);
766 				node = drm_mm_search_free(&man->manager,
767 							  mem->num_pages,
768 							  mem->page_alignment,
769 							  1);
770 				if (unlikely(!node)) {
771 					spin_unlock(&bdev->lru_lock);
772 					break;
773 				}
774 				node = drm_mm_get_block_atomic(node,
775 							       mem->num_pages,
776 							       mem->
777 							       page_alignment);
778 				spin_unlock(&bdev->lru_lock);
779 			} while (!node);
780 		}
781 		if (node)
782 			break;
783 	}
784 
785 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
786 		mem->mm_node = node;
787 		mem->mem_type = mem_type;
788 		mem->placement = cur_flags;
789 		return 0;
790 	}
791 
792 	if (!type_found)
793 		return -EINVAL;
794 
795 	num_prios = bdev->driver->num_mem_busy_prio;
796 	prios = bdev->driver->mem_busy_prio;
797 
798 	for (i = 0; i < num_prios; ++i) {
799 		mem_type = prios[i];
800 		man = &bdev->man[mem_type];
801 
802 		if (!man->has_type)
803 			continue;
804 
805 		if (!ttm_bo_mt_compatible(man,
806 					  bo->type == ttm_bo_type_user,
807 					  mem_type,
808 					  proposed_placement, &cur_flags))
809 			continue;
810 
811 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
812 						  cur_flags);
813 
814 		ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
815 					     interruptible, no_wait);
816 
817 		if (ret == 0 && mem->mm_node) {
818 			mem->placement = cur_flags;
819 			return 0;
820 		}
821 
822 		if (ret == -ERESTART)
823 			has_eagain = true;
824 	}
825 
826 	ret = (has_eagain) ? -ERESTART : -ENOMEM;
827 	return ret;
828 }
829 EXPORT_SYMBOL(ttm_bo_mem_space);
830 
831 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
832 {
833 	int ret = 0;
834 
835 	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
836 		return -EBUSY;
837 
838 	ret = wait_event_interruptible(bo->event_queue,
839 				       atomic_read(&bo->cpu_writers) == 0);
840 
841 	if (ret == -ERESTARTSYS)
842 		ret = -ERESTART;
843 
844 	return ret;
845 }
846 
847 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
848 		       uint32_t proposed_placement,
849 		       bool interruptible, bool no_wait)
850 {
851 	struct ttm_bo_device *bdev = bo->bdev;
852 	int ret = 0;
853 	struct ttm_mem_reg mem;
854 
855 	BUG_ON(!atomic_read(&bo->reserved));
856 
857 	/*
858 	 * FIXME: It's possible to pipeline buffer moves.
859 	 * Have the driver move function wait for idle when necessary,
860 	 * instead of doing it here.
861 	 */
862 
863 	spin_lock(&bo->lock);
864 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
865 	spin_unlock(&bo->lock);
866 
867 	if (ret)
868 		return ret;
869 
870 	mem.num_pages = bo->num_pages;
871 	mem.size = mem.num_pages << PAGE_SHIFT;
872 	mem.page_alignment = bo->mem.page_alignment;
873 
874 	/*
875 	 * Determine where to move the buffer.
876 	 */
877 
878 	ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
879 			       interruptible, no_wait);
880 	if (ret)
881 		goto out_unlock;
882 
883 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
884 
885 out_unlock:
886 	if (ret && mem.mm_node) {
887 		spin_lock(&bdev->lru_lock);
888 		drm_mm_put_block(mem.mm_node);
889 		spin_unlock(&bdev->lru_lock);
890 	}
891 	return ret;
892 }
893 
894 static int ttm_bo_mem_compat(uint32_t proposed_placement,
895 			     struct ttm_mem_reg *mem)
896 {
897 	if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
898 		return 0;
899 	if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
900 		return 0;
901 
902 	return 1;
903 }
904 
905 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
906 			       uint32_t proposed_placement,
907 			       bool interruptible, bool no_wait)
908 {
909 	int ret;
910 
911 	BUG_ON(!atomic_read(&bo->reserved));
912 	bo->proposed_placement = proposed_placement;
913 
914 	TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
915 		  (unsigned long)proposed_placement,
916 		  (unsigned long)bo->mem.placement);
917 
918 	/*
919 	 * Check whether we need to move buffer.
920 	 */
921 
922 	if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
923 		ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
924 					 interruptible, no_wait);
925 		if (ret) {
926 			if (ret != -ERESTART)
927 				printk(KERN_ERR TTM_PFX
928 				       "Failed moving buffer. "
929 				       "Proposed placement 0x%08x\n",
930 				       bo->proposed_placement);
931 			if (ret == -ENOMEM)
932 				printk(KERN_ERR TTM_PFX
933 				       "Out of aperture space or "
934 				       "DRM memory quota.\n");
935 			return ret;
936 		}
937 	}
938 
939 	/*
940 	 * We might need to add a TTM.
941 	 */
942 
943 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
944 		ret = ttm_bo_add_ttm(bo, true);
945 		if (ret)
946 			return ret;
947 	}
948 	/*
949 	 * Validation has succeeded, move the access and other
950 	 * non-mapping-related flag bits from the proposed flags to
951 	 * the active flags
952 	 */
953 
954 	ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
955 			~TTM_PL_MASK_MEMTYPE);
956 
957 	return 0;
958 }
959 EXPORT_SYMBOL(ttm_buffer_object_validate);
960 
961 int
962 ttm_bo_check_placement(struct ttm_buffer_object *bo,
963 		       uint32_t set_flags, uint32_t clr_flags)
964 {
965 	uint32_t new_mask = set_flags | clr_flags;
966 
967 	if ((bo->type == ttm_bo_type_user) &&
968 	    (clr_flags & TTM_PL_FLAG_CACHED)) {
969 		printk(KERN_ERR TTM_PFX
970 		       "User buffers require cache-coherent memory.\n");
971 		return -EINVAL;
972 	}
973 
974 	if (!capable(CAP_SYS_ADMIN)) {
975 		if (new_mask & TTM_PL_FLAG_NO_EVICT) {
976 			printk(KERN_ERR TTM_PFX "Need to be root to modify"
977 			       " NO_EVICT status.\n");
978 			return -EINVAL;
979 		}
980 
981 		if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
982 		    (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
983 			printk(KERN_ERR TTM_PFX
984 			       "Incompatible memory specification"
985 			       " for NO_EVICT buffer.\n");
986 			return -EINVAL;
987 		}
988 	}
989 	return 0;
990 }
991 
992 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
993 			   struct ttm_buffer_object *bo,
994 			   unsigned long size,
995 			   enum ttm_bo_type type,
996 			   uint32_t flags,
997 			   uint32_t page_alignment,
998 			   unsigned long buffer_start,
999 			   bool interruptible,
1000 			   struct file *persistant_swap_storage,
1001 			   size_t acc_size,
1002 			   void (*destroy) (struct ttm_buffer_object *))
1003 {
1004 	int ret = 0;
1005 	unsigned long num_pages;
1006 
1007 	size += buffer_start & ~PAGE_MASK;
1008 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1009 	if (num_pages == 0) {
1010 		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1011 		return -EINVAL;
1012 	}
1013 	bo->destroy = destroy;
1014 
1015 	spin_lock_init(&bo->lock);
1016 	kref_init(&bo->kref);
1017 	kref_init(&bo->list_kref);
1018 	atomic_set(&bo->cpu_writers, 0);
1019 	atomic_set(&bo->reserved, 1);
1020 	init_waitqueue_head(&bo->event_queue);
1021 	INIT_LIST_HEAD(&bo->lru);
1022 	INIT_LIST_HEAD(&bo->ddestroy);
1023 	INIT_LIST_HEAD(&bo->swap);
1024 	bo->bdev = bdev;
1025 	bo->type = type;
1026 	bo->num_pages = num_pages;
1027 	bo->mem.mem_type = TTM_PL_SYSTEM;
1028 	bo->mem.num_pages = bo->num_pages;
1029 	bo->mem.mm_node = NULL;
1030 	bo->mem.page_alignment = page_alignment;
1031 	bo->buffer_start = buffer_start & PAGE_MASK;
1032 	bo->priv_flags = 0;
1033 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1034 	bo->seq_valid = false;
1035 	bo->persistant_swap_storage = persistant_swap_storage;
1036 	bo->acc_size = acc_size;
1037 
1038 	ret = ttm_bo_check_placement(bo, flags, 0ULL);
1039 	if (unlikely(ret != 0))
1040 		goto out_err;
1041 
1042 	/*
1043 	 * If no caching attributes are set, accept any form of caching.
1044 	 */
1045 
1046 	if ((flags & TTM_PL_MASK_CACHING) == 0)
1047 		flags |= TTM_PL_MASK_CACHING;
1048 
1049 	/*
1050 	 * For ttm_bo_type_device buffers, allocate
1051 	 * address space from the device.
1052 	 */
1053 
1054 	if (bo->type == ttm_bo_type_device) {
1055 		ret = ttm_bo_setup_vm(bo);
1056 		if (ret)
1057 			goto out_err;
1058 	}
1059 
1060 	ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1061 	if (ret)
1062 		goto out_err;
1063 
1064 	ttm_bo_unreserve(bo);
1065 	return 0;
1066 
1067 out_err:
1068 	ttm_bo_unreserve(bo);
1069 	ttm_bo_unref(&bo);
1070 
1071 	return ret;
1072 }
1073 EXPORT_SYMBOL(ttm_buffer_object_init);
1074 
1075 static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1076 				 unsigned long num_pages)
1077 {
1078 	size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1079 	    PAGE_MASK;
1080 
1081 	return bdev->ttm_bo_size + 2 * page_array_size;
1082 }
1083 
1084 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1085 			     unsigned long size,
1086 			     enum ttm_bo_type type,
1087 			     uint32_t flags,
1088 			     uint32_t page_alignment,
1089 			     unsigned long buffer_start,
1090 			     bool interruptible,
1091 			     struct file *persistant_swap_storage,
1092 			     struct ttm_buffer_object **p_bo)
1093 {
1094 	struct ttm_buffer_object *bo;
1095 	int ret;
1096 	struct ttm_mem_global *mem_glob = bdev->mem_glob;
1097 
1098 	size_t acc_size =
1099 	    ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1100 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1101 	if (unlikely(ret != 0))
1102 		return ret;
1103 
1104 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1105 
1106 	if (unlikely(bo == NULL)) {
1107 		ttm_mem_global_free(mem_glob, acc_size, false);
1108 		return -ENOMEM;
1109 	}
1110 
1111 	ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1112 				     page_alignment, buffer_start,
1113 				     interruptible,
1114 				     persistant_swap_storage, acc_size, NULL);
1115 	if (likely(ret == 0))
1116 		*p_bo = bo;
1117 
1118 	return ret;
1119 }
1120 
1121 static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1122 			     uint32_t mem_type, bool allow_errors)
1123 {
1124 	int ret;
1125 
1126 	spin_lock(&bo->lock);
1127 	ret = ttm_bo_wait(bo, false, false, false);
1128 	spin_unlock(&bo->lock);
1129 
1130 	if (ret && allow_errors)
1131 		goto out;
1132 
1133 	if (bo->mem.mem_type == mem_type)
1134 		ret = ttm_bo_evict(bo, mem_type, false, false);
1135 
1136 	if (ret) {
1137 		if (allow_errors) {
1138 			goto out;
1139 		} else {
1140 			ret = 0;
1141 			printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1142 		}
1143 	}
1144 
1145 out:
1146 	return ret;
1147 }
1148 
1149 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1150 				   struct list_head *head,
1151 				   unsigned mem_type, bool allow_errors)
1152 {
1153 	struct ttm_buffer_object *entry;
1154 	int ret;
1155 	int put_count;
1156 
1157 	/*
1158 	 * Can't use standard list traversal since we're unlocking.
1159 	 */
1160 
1161 	spin_lock(&bdev->lru_lock);
1162 
1163 	while (!list_empty(head)) {
1164 		entry = list_first_entry(head, struct ttm_buffer_object, lru);
1165 		kref_get(&entry->list_kref);
1166 		ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1167 		put_count = ttm_bo_del_from_lru(entry);
1168 		spin_unlock(&bdev->lru_lock);
1169 		while (put_count--)
1170 			kref_put(&entry->list_kref, ttm_bo_ref_bug);
1171 		BUG_ON(ret);
1172 		ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1173 		ttm_bo_unreserve(entry);
1174 		kref_put(&entry->list_kref, ttm_bo_release_list);
1175 		spin_lock(&bdev->lru_lock);
1176 	}
1177 
1178 	spin_unlock(&bdev->lru_lock);
1179 
1180 	return 0;
1181 }
1182 
1183 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1184 {
1185 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1186 	int ret = -EINVAL;
1187 
1188 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1189 		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1190 		return ret;
1191 	}
1192 
1193 	if (!man->has_type) {
1194 		printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1195 		       "memory manager type %u\n", mem_type);
1196 		return ret;
1197 	}
1198 
1199 	man->use_type = false;
1200 	man->has_type = false;
1201 
1202 	ret = 0;
1203 	if (mem_type > 0) {
1204 		ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1205 
1206 		spin_lock(&bdev->lru_lock);
1207 		if (drm_mm_clean(&man->manager))
1208 			drm_mm_takedown(&man->manager);
1209 		else
1210 			ret = -EBUSY;
1211 
1212 		spin_unlock(&bdev->lru_lock);
1213 	}
1214 
1215 	return ret;
1216 }
1217 EXPORT_SYMBOL(ttm_bo_clean_mm);
1218 
1219 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1220 {
1221 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1222 
1223 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1224 		printk(KERN_ERR TTM_PFX
1225 		       "Illegal memory manager memory type %u.\n",
1226 		       mem_type);
1227 		return -EINVAL;
1228 	}
1229 
1230 	if (!man->has_type) {
1231 		printk(KERN_ERR TTM_PFX
1232 		       "Memory type %u has not been initialized.\n",
1233 		       mem_type);
1234 		return 0;
1235 	}
1236 
1237 	return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1238 }
1239 EXPORT_SYMBOL(ttm_bo_evict_mm);
1240 
1241 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1242 		   unsigned long p_offset, unsigned long p_size)
1243 {
1244 	int ret = -EINVAL;
1245 	struct ttm_mem_type_manager *man;
1246 
1247 	if (type >= TTM_NUM_MEM_TYPES) {
1248 		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1249 		return ret;
1250 	}
1251 
1252 	man = &bdev->man[type];
1253 	if (man->has_type) {
1254 		printk(KERN_ERR TTM_PFX
1255 		       "Memory manager already initialized for type %d\n",
1256 		       type);
1257 		return ret;
1258 	}
1259 
1260 	ret = bdev->driver->init_mem_type(bdev, type, man);
1261 	if (ret)
1262 		return ret;
1263 
1264 	ret = 0;
1265 	if (type != TTM_PL_SYSTEM) {
1266 		if (!p_size) {
1267 			printk(KERN_ERR TTM_PFX
1268 			       "Zero size memory manager type %d\n",
1269 			       type);
1270 			return ret;
1271 		}
1272 		ret = drm_mm_init(&man->manager, p_offset, p_size);
1273 		if (ret)
1274 			return ret;
1275 	}
1276 	man->has_type = true;
1277 	man->use_type = true;
1278 	man->size = p_size;
1279 
1280 	INIT_LIST_HEAD(&man->lru);
1281 
1282 	return 0;
1283 }
1284 EXPORT_SYMBOL(ttm_bo_init_mm);
1285 
1286 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1287 {
1288 	int ret = 0;
1289 	unsigned i = TTM_NUM_MEM_TYPES;
1290 	struct ttm_mem_type_manager *man;
1291 
1292 	while (i--) {
1293 		man = &bdev->man[i];
1294 		if (man->has_type) {
1295 			man->use_type = false;
1296 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1297 				ret = -EBUSY;
1298 				printk(KERN_ERR TTM_PFX
1299 				       "DRM memory manager type %d "
1300 				       "is not clean.\n", i);
1301 			}
1302 			man->has_type = false;
1303 		}
1304 	}
1305 
1306 	if (!cancel_delayed_work(&bdev->wq))
1307 		flush_scheduled_work();
1308 
1309 	while (ttm_bo_delayed_delete(bdev, true))
1310 		;
1311 
1312 	spin_lock(&bdev->lru_lock);
1313 	if (list_empty(&bdev->ddestroy))
1314 		TTM_DEBUG("Delayed destroy list was clean\n");
1315 
1316 	if (list_empty(&bdev->man[0].lru))
1317 		TTM_DEBUG("Swap list was clean\n");
1318 	spin_unlock(&bdev->lru_lock);
1319 
1320 	ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1321 	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1322 	write_lock(&bdev->vm_lock);
1323 	drm_mm_takedown(&bdev->addr_space_mm);
1324 	write_unlock(&bdev->vm_lock);
1325 
1326 	__free_page(bdev->dummy_read_page);
1327 	return ret;
1328 }
1329 EXPORT_SYMBOL(ttm_bo_device_release);
1330 
1331 /*
1332  * This function is intended to be called on drm driver load.
1333  * If you decide to call it from firstopen, you must protect the call
1334  * from a potentially racing ttm_bo_driver_finish in lastclose.
1335  * (This may happen on X server restart).
1336  */
1337 
1338 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1339 		       struct ttm_mem_global *mem_glob,
1340 		       struct ttm_bo_driver *driver, uint64_t file_page_offset,
1341 		       bool need_dma32)
1342 {
1343 	int ret = -EINVAL;
1344 
1345 	bdev->dummy_read_page = NULL;
1346 	rwlock_init(&bdev->vm_lock);
1347 	spin_lock_init(&bdev->lru_lock);
1348 
1349 	bdev->driver = driver;
1350 	bdev->mem_glob = mem_glob;
1351 
1352 	memset(bdev->man, 0, sizeof(bdev->man));
1353 
1354 	bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1355 	if (unlikely(bdev->dummy_read_page == NULL)) {
1356 		ret = -ENOMEM;
1357 		goto out_err0;
1358 	}
1359 
1360 	/*
1361 	 * Initialize the system memory buffer type.
1362 	 * Other types need to be driver / IOCTL initialized.
1363 	 */
1364 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1365 	if (unlikely(ret != 0))
1366 		goto out_err1;
1367 
1368 	bdev->addr_space_rb = RB_ROOT;
1369 	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1370 	if (unlikely(ret != 0))
1371 		goto out_err2;
1372 
1373 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1374 	bdev->nice_mode = true;
1375 	INIT_LIST_HEAD(&bdev->ddestroy);
1376 	INIT_LIST_HEAD(&bdev->swap_lru);
1377 	bdev->dev_mapping = NULL;
1378 	bdev->need_dma32 = need_dma32;
1379 	ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1380 	ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1381 	if (unlikely(ret != 0)) {
1382 		printk(KERN_ERR TTM_PFX
1383 		       "Could not register buffer object swapout.\n");
1384 		goto out_err2;
1385 	}
1386 
1387 	bdev->ttm_bo_extra_size =
1388 		ttm_round_pot(sizeof(struct ttm_tt)) +
1389 		ttm_round_pot(sizeof(struct ttm_backend));
1390 
1391 	bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1392 		ttm_round_pot(sizeof(struct ttm_buffer_object));
1393 
1394 	return 0;
1395 out_err2:
1396 	ttm_bo_clean_mm(bdev, 0);
1397 out_err1:
1398 	__free_page(bdev->dummy_read_page);
1399 out_err0:
1400 	return ret;
1401 }
1402 EXPORT_SYMBOL(ttm_bo_device_init);
1403 
1404 /*
1405  * buffer object vm functions.
1406  */
1407 
1408 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1409 {
1410 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1411 
1412 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1413 		if (mem->mem_type == TTM_PL_SYSTEM)
1414 			return false;
1415 
1416 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1417 			return false;
1418 
1419 		if (mem->placement & TTM_PL_FLAG_CACHED)
1420 			return false;
1421 	}
1422 	return true;
1423 }
1424 
1425 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1426 		      struct ttm_mem_reg *mem,
1427 		      unsigned long *bus_base,
1428 		      unsigned long *bus_offset, unsigned long *bus_size)
1429 {
1430 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1431 
1432 	*bus_size = 0;
1433 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1434 		return -EINVAL;
1435 
1436 	if (ttm_mem_reg_is_pci(bdev, mem)) {
1437 		*bus_offset = mem->mm_node->start << PAGE_SHIFT;
1438 		*bus_size = mem->num_pages << PAGE_SHIFT;
1439 		*bus_base = man->io_offset;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1446 {
1447 	struct ttm_bo_device *bdev = bo->bdev;
1448 	loff_t offset = (loff_t) bo->addr_space_offset;
1449 	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1450 
1451 	if (!bdev->dev_mapping)
1452 		return;
1453 
1454 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1455 }
1456 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1457 
1458 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1459 {
1460 	struct ttm_bo_device *bdev = bo->bdev;
1461 	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1462 	struct rb_node *parent = NULL;
1463 	struct ttm_buffer_object *cur_bo;
1464 	unsigned long offset = bo->vm_node->start;
1465 	unsigned long cur_offset;
1466 
1467 	while (*cur) {
1468 		parent = *cur;
1469 		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1470 		cur_offset = cur_bo->vm_node->start;
1471 		if (offset < cur_offset)
1472 			cur = &parent->rb_left;
1473 		else if (offset > cur_offset)
1474 			cur = &parent->rb_right;
1475 		else
1476 			BUG();
1477 	}
1478 
1479 	rb_link_node(&bo->vm_rb, parent, cur);
1480 	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1481 }
1482 
1483 /**
1484  * ttm_bo_setup_vm:
1485  *
1486  * @bo: the buffer to allocate address space for
1487  *
1488  * Allocate address space in the drm device so that applications
1489  * can mmap the buffer and access the contents. This only
1490  * applies to ttm_bo_type_device objects as others are not
1491  * placed in the drm device address space.
1492  */
1493 
1494 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1495 {
1496 	struct ttm_bo_device *bdev = bo->bdev;
1497 	int ret;
1498 
1499 retry_pre_get:
1500 	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1501 	if (unlikely(ret != 0))
1502 		return ret;
1503 
1504 	write_lock(&bdev->vm_lock);
1505 	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1506 					 bo->mem.num_pages, 0, 0);
1507 
1508 	if (unlikely(bo->vm_node == NULL)) {
1509 		ret = -ENOMEM;
1510 		goto out_unlock;
1511 	}
1512 
1513 	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1514 					      bo->mem.num_pages, 0);
1515 
1516 	if (unlikely(bo->vm_node == NULL)) {
1517 		write_unlock(&bdev->vm_lock);
1518 		goto retry_pre_get;
1519 	}
1520 
1521 	ttm_bo_vm_insert_rb(bo);
1522 	write_unlock(&bdev->vm_lock);
1523 	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1524 
1525 	return 0;
1526 out_unlock:
1527 	write_unlock(&bdev->vm_lock);
1528 	return ret;
1529 }
1530 
1531 int ttm_bo_wait(struct ttm_buffer_object *bo,
1532 		bool lazy, bool interruptible, bool no_wait)
1533 {
1534 	struct ttm_bo_driver *driver = bo->bdev->driver;
1535 	void *sync_obj;
1536 	void *sync_obj_arg;
1537 	int ret = 0;
1538 
1539 	if (likely(bo->sync_obj == NULL))
1540 		return 0;
1541 
1542 	while (bo->sync_obj) {
1543 
1544 		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1545 			void *tmp_obj = bo->sync_obj;
1546 			bo->sync_obj = NULL;
1547 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1548 			spin_unlock(&bo->lock);
1549 			driver->sync_obj_unref(&tmp_obj);
1550 			spin_lock(&bo->lock);
1551 			continue;
1552 		}
1553 
1554 		if (no_wait)
1555 			return -EBUSY;
1556 
1557 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1558 		sync_obj_arg = bo->sync_obj_arg;
1559 		spin_unlock(&bo->lock);
1560 		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1561 					    lazy, interruptible);
1562 		if (unlikely(ret != 0)) {
1563 			driver->sync_obj_unref(&sync_obj);
1564 			spin_lock(&bo->lock);
1565 			return ret;
1566 		}
1567 		spin_lock(&bo->lock);
1568 		if (likely(bo->sync_obj == sync_obj &&
1569 			   bo->sync_obj_arg == sync_obj_arg)) {
1570 			void *tmp_obj = bo->sync_obj;
1571 			bo->sync_obj = NULL;
1572 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1573 				  &bo->priv_flags);
1574 			spin_unlock(&bo->lock);
1575 			driver->sync_obj_unref(&sync_obj);
1576 			driver->sync_obj_unref(&tmp_obj);
1577 			spin_lock(&bo->lock);
1578 		}
1579 	}
1580 	return 0;
1581 }
1582 EXPORT_SYMBOL(ttm_bo_wait);
1583 
1584 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1585 {
1586 	atomic_set(&bo->reserved, 0);
1587 	wake_up_all(&bo->event_queue);
1588 }
1589 
1590 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1591 			     bool no_wait)
1592 {
1593 	int ret;
1594 
1595 	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1596 		if (no_wait)
1597 			return -EBUSY;
1598 		else if (interruptible) {
1599 			ret = wait_event_interruptible
1600 			    (bo->event_queue, atomic_read(&bo->reserved) == 0);
1601 			if (unlikely(ret != 0))
1602 				return -ERESTART;
1603 		} else {
1604 			wait_event(bo->event_queue,
1605 				   atomic_read(&bo->reserved) == 0);
1606 		}
1607 	}
1608 	return 0;
1609 }
1610 
1611 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1612 {
1613 	int ret = 0;
1614 
1615 	/*
1616 	 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1617 	 * makes sure the lru lists are updated.
1618 	 */
1619 
1620 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1621 	if (unlikely(ret != 0))
1622 		return ret;
1623 	spin_lock(&bo->lock);
1624 	ret = ttm_bo_wait(bo, false, true, no_wait);
1625 	spin_unlock(&bo->lock);
1626 	if (likely(ret == 0))
1627 		atomic_inc(&bo->cpu_writers);
1628 	ttm_bo_unreserve(bo);
1629 	return ret;
1630 }
1631 
1632 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1633 {
1634 	if (atomic_dec_and_test(&bo->cpu_writers))
1635 		wake_up_all(&bo->event_queue);
1636 }
1637 
1638 /**
1639  * A buffer object shrink method that tries to swap out the first
1640  * buffer object on the bo_global::swap_lru list.
1641  */
1642 
1643 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1644 {
1645 	struct ttm_bo_device *bdev =
1646 	    container_of(shrink, struct ttm_bo_device, shrink);
1647 	struct ttm_buffer_object *bo;
1648 	int ret = -EBUSY;
1649 	int put_count;
1650 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1651 
1652 	spin_lock(&bdev->lru_lock);
1653 	while (ret == -EBUSY) {
1654 		if (unlikely(list_empty(&bdev->swap_lru))) {
1655 			spin_unlock(&bdev->lru_lock);
1656 			return -EBUSY;
1657 		}
1658 
1659 		bo = list_first_entry(&bdev->swap_lru,
1660 				      struct ttm_buffer_object, swap);
1661 		kref_get(&bo->list_kref);
1662 
1663 		/**
1664 		 * Reserve buffer. Since we unlock while sleeping, we need
1665 		 * to re-check that nobody removed us from the swap-list while
1666 		 * we slept.
1667 		 */
1668 
1669 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1670 		if (unlikely(ret == -EBUSY)) {
1671 			spin_unlock(&bdev->lru_lock);
1672 			ttm_bo_wait_unreserved(bo, false);
1673 			kref_put(&bo->list_kref, ttm_bo_release_list);
1674 			spin_lock(&bdev->lru_lock);
1675 		}
1676 	}
1677 
1678 	BUG_ON(ret != 0);
1679 	put_count = ttm_bo_del_from_lru(bo);
1680 	spin_unlock(&bdev->lru_lock);
1681 
1682 	while (put_count--)
1683 		kref_put(&bo->list_kref, ttm_bo_ref_bug);
1684 
1685 	/**
1686 	 * Wait for GPU, then move to system cached.
1687 	 */
1688 
1689 	spin_lock(&bo->lock);
1690 	ret = ttm_bo_wait(bo, false, false, false);
1691 	spin_unlock(&bo->lock);
1692 
1693 	if (unlikely(ret != 0))
1694 		goto out;
1695 
1696 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1697 		struct ttm_mem_reg evict_mem;
1698 
1699 		evict_mem = bo->mem;
1700 		evict_mem.mm_node = NULL;
1701 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1702 		evict_mem.mem_type = TTM_PL_SYSTEM;
1703 
1704 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1705 					     false, false);
1706 		if (unlikely(ret != 0))
1707 			goto out;
1708 	}
1709 
1710 	ttm_bo_unmap_virtual(bo);
1711 
1712 	/**
1713 	 * Swap out. Buffer will be swapped in again as soon as
1714 	 * anyone tries to access a ttm page.
1715 	 */
1716 
1717 	ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1718 out:
1719 
1720 	/**
1721 	 *
1722 	 * Unreserve without putting on LRU to avoid swapping out an
1723 	 * already swapped buffer.
1724 	 */
1725 
1726 	atomic_set(&bo->reserved, 0);
1727 	wake_up_all(&bo->event_queue);
1728 	kref_put(&bo->list_kref, ttm_bo_release_list);
1729 	return ret;
1730 }
1731 
1732 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1733 {
1734 	while (ttm_bo_swapout(&bdev->shrink) == 0)
1735 		;
1736 }
1737