xref: /linux/drivers/gpu/drm/ttm/ttm_execbuf_util.c (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34 
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 {
37 	struct ttm_validate_buffer *entry;
38 
39 	list_for_each_entry(entry, list, head) {
40 		struct ttm_buffer_object *bo = entry->bo;
41 		if (!entry->reserved)
42 			continue;
43 
44 		if (entry->removed) {
45 			ttm_bo_add_to_lru(bo);
46 			entry->removed = false;
47 
48 		}
49 		entry->reserved = false;
50 		atomic_set(&bo->reserved, 0);
51 		wake_up_all(&bo->event_queue);
52 	}
53 }
54 
55 static void ttm_eu_del_from_lru_locked(struct list_head *list)
56 {
57 	struct ttm_validate_buffer *entry;
58 
59 	list_for_each_entry(entry, list, head) {
60 		struct ttm_buffer_object *bo = entry->bo;
61 		if (!entry->reserved)
62 			continue;
63 
64 		if (!entry->removed) {
65 			entry->put_count = ttm_bo_del_from_lru(bo);
66 			entry->removed = true;
67 		}
68 	}
69 }
70 
71 static void ttm_eu_list_ref_sub(struct list_head *list)
72 {
73 	struct ttm_validate_buffer *entry;
74 
75 	list_for_each_entry(entry, list, head) {
76 		struct ttm_buffer_object *bo = entry->bo;
77 
78 		if (entry->put_count) {
79 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 			entry->put_count = 0;
81 		}
82 	}
83 }
84 
85 static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 					 struct ttm_buffer_object *bo)
87 {
88 	struct ttm_bo_global *glob = bo->glob;
89 	int ret;
90 
91 	ttm_eu_del_from_lru_locked(list);
92 	spin_unlock(&glob->lru_lock);
93 	ret = ttm_bo_wait_unreserved(bo, true);
94 	spin_lock(&glob->lru_lock);
95 	if (unlikely(ret != 0))
96 		ttm_eu_backoff_reservation_locked(list);
97 	return ret;
98 }
99 
100 
101 void ttm_eu_backoff_reservation(struct list_head *list)
102 {
103 	struct ttm_validate_buffer *entry;
104 	struct ttm_bo_global *glob;
105 
106 	if (list_empty(list))
107 		return;
108 
109 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
110 	glob = entry->bo->glob;
111 	spin_lock(&glob->lru_lock);
112 	ttm_eu_backoff_reservation_locked(list);
113 	spin_unlock(&glob->lru_lock);
114 }
115 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
116 
117 /*
118  * Reserve buffers for validation.
119  *
120  * If a buffer in the list is marked for CPU access, we back off and
121  * wait for that buffer to become free for GPU access.
122  *
123  * If a buffer is reserved for another validation, the validator with
124  * the highest validation sequence backs off and waits for that buffer
125  * to become unreserved. This prevents deadlocks when validating multiple
126  * buffers in different orders.
127  */
128 
129 int ttm_eu_reserve_buffers(struct list_head *list)
130 {
131 	struct ttm_bo_global *glob;
132 	struct ttm_validate_buffer *entry;
133 	int ret;
134 	uint32_t val_seq;
135 
136 	if (list_empty(list))
137 		return 0;
138 
139 	list_for_each_entry(entry, list, head) {
140 		entry->reserved = false;
141 		entry->put_count = 0;
142 		entry->removed = false;
143 	}
144 
145 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 	glob = entry->bo->glob;
147 
148 retry:
149 	spin_lock(&glob->lru_lock);
150 	val_seq = entry->bo->bdev->val_seq++;
151 
152 	list_for_each_entry(entry, list, head) {
153 		struct ttm_buffer_object *bo = entry->bo;
154 
155 retry_this_bo:
156 		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
157 		switch (ret) {
158 		case 0:
159 			break;
160 		case -EBUSY:
161 			ret = ttm_eu_wait_unreserved_locked(list, bo);
162 			if (unlikely(ret != 0)) {
163 				spin_unlock(&glob->lru_lock);
164 				ttm_eu_list_ref_sub(list);
165 				return ret;
166 			}
167 			goto retry_this_bo;
168 		case -EAGAIN:
169 			ttm_eu_backoff_reservation_locked(list);
170 			spin_unlock(&glob->lru_lock);
171 			ttm_eu_list_ref_sub(list);
172 			ret = ttm_bo_wait_unreserved(bo, true);
173 			if (unlikely(ret != 0))
174 				return ret;
175 			goto retry;
176 		default:
177 			ttm_eu_backoff_reservation_locked(list);
178 			spin_unlock(&glob->lru_lock);
179 			ttm_eu_list_ref_sub(list);
180 			return ret;
181 		}
182 
183 		entry->reserved = true;
184 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185 			ttm_eu_backoff_reservation_locked(list);
186 			spin_unlock(&glob->lru_lock);
187 			ttm_eu_list_ref_sub(list);
188 			ret = ttm_bo_wait_cpu(bo, false);
189 			if (ret)
190 				return ret;
191 			goto retry;
192 		}
193 	}
194 
195 	ttm_eu_del_from_lru_locked(list);
196 	spin_unlock(&glob->lru_lock);
197 	ttm_eu_list_ref_sub(list);
198 
199 	return 0;
200 }
201 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
202 
203 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
204 {
205 	struct ttm_validate_buffer *entry;
206 	struct ttm_buffer_object *bo;
207 	struct ttm_bo_global *glob;
208 	struct ttm_bo_device *bdev;
209 	struct ttm_bo_driver *driver;
210 
211 	if (list_empty(list))
212 		return;
213 
214 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 	bdev = bo->bdev;
216 	driver = bdev->driver;
217 	glob = bo->glob;
218 
219 	spin_lock(&bdev->fence_lock);
220 	spin_lock(&glob->lru_lock);
221 
222 	list_for_each_entry(entry, list, head) {
223 		bo = entry->bo;
224 		entry->old_sync_obj = bo->sync_obj;
225 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 		bo->sync_obj_arg = entry->new_sync_obj_arg;
227 		ttm_bo_unreserve_locked(bo);
228 		entry->reserved = false;
229 	}
230 	spin_unlock(&glob->lru_lock);
231 	spin_unlock(&bdev->fence_lock);
232 
233 	list_for_each_entry(entry, list, head) {
234 		if (entry->old_sync_obj)
235 			driver->sync_obj_unref(&entry->old_sync_obj);
236 	}
237 }
238 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
239