xref: /linux/drivers/gpu/drm/ttm/ttm_execbuf_util.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34 
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 {
37 	struct ttm_validate_buffer *entry;
38 
39 	list_for_each_entry(entry, list, head) {
40 		struct ttm_buffer_object *bo = entry->bo;
41 		if (!entry->reserved)
42 			continue;
43 
44 		entry->reserved = false;
45 		if (entry->removed) {
46 			ttm_bo_add_to_lru(bo);
47 			entry->removed = false;
48 		}
49 		__ttm_bo_unreserve(bo);
50 	}
51 }
52 
53 static void ttm_eu_del_from_lru_locked(struct list_head *list)
54 {
55 	struct ttm_validate_buffer *entry;
56 
57 	list_for_each_entry(entry, list, head) {
58 		struct ttm_buffer_object *bo = entry->bo;
59 		if (!entry->reserved)
60 			continue;
61 
62 		if (!entry->removed) {
63 			entry->put_count = ttm_bo_del_from_lru(bo);
64 			entry->removed = true;
65 		}
66 	}
67 }
68 
69 static void ttm_eu_list_ref_sub(struct list_head *list)
70 {
71 	struct ttm_validate_buffer *entry;
72 
73 	list_for_each_entry(entry, list, head) {
74 		struct ttm_buffer_object *bo = entry->bo;
75 
76 		if (entry->put_count) {
77 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 			entry->put_count = 0;
79 		}
80 	}
81 }
82 
83 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
84 				struct list_head *list)
85 {
86 	struct ttm_validate_buffer *entry;
87 	struct ttm_bo_global *glob;
88 
89 	if (list_empty(list))
90 		return;
91 
92 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 	glob = entry->bo->glob;
94 	spin_lock(&glob->lru_lock);
95 	ttm_eu_backoff_reservation_locked(list);
96 	if (ticket)
97 		ww_acquire_fini(ticket);
98 	spin_unlock(&glob->lru_lock);
99 }
100 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101 
102 /*
103  * Reserve buffers for validation.
104  *
105  * If a buffer in the list is marked for CPU access, we back off and
106  * wait for that buffer to become free for GPU access.
107  *
108  * If a buffer is reserved for another validation, the validator with
109  * the highest validation sequence backs off and waits for that buffer
110  * to become unreserved. This prevents deadlocks when validating multiple
111  * buffers in different orders.
112  */
113 
114 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 			   struct list_head *list)
116 {
117 	struct ttm_bo_global *glob;
118 	struct ttm_validate_buffer *entry;
119 	int ret;
120 
121 	if (list_empty(list))
122 		return 0;
123 
124 	list_for_each_entry(entry, list, head) {
125 		entry->reserved = false;
126 		entry->put_count = 0;
127 		entry->removed = false;
128 	}
129 
130 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 	glob = entry->bo->glob;
132 
133 	if (ticket)
134 		ww_acquire_init(ticket, &reservation_ww_class);
135 retry:
136 	list_for_each_entry(entry, list, head) {
137 		struct ttm_buffer_object *bo = entry->bo;
138 
139 		/* already slowpath reserved? */
140 		if (entry->reserved)
141 			continue;
142 
143 		ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
144 				       ticket);
145 
146 		if (ret == -EDEADLK) {
147 			/* uh oh, we lost out, drop every reservation and try
148 			 * to only reserve this buffer, then start over if
149 			 * this succeeds.
150 			 */
151 			BUG_ON(ticket == NULL);
152 			spin_lock(&glob->lru_lock);
153 			ttm_eu_backoff_reservation_locked(list);
154 			spin_unlock(&glob->lru_lock);
155 			ttm_eu_list_ref_sub(list);
156 			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
157 							       ticket);
158 			if (unlikely(ret != 0)) {
159 				if (ret == -EINTR)
160 					ret = -ERESTARTSYS;
161 				goto err_fini;
162 			}
163 
164 			entry->reserved = true;
165 			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
166 				ret = -EBUSY;
167 				goto err;
168 			}
169 			goto retry;
170 		} else if (ret)
171 			goto err;
172 
173 		entry->reserved = true;
174 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
175 			ret = -EBUSY;
176 			goto err;
177 		}
178 	}
179 
180 	if (ticket)
181 		ww_acquire_done(ticket);
182 	spin_lock(&glob->lru_lock);
183 	ttm_eu_del_from_lru_locked(list);
184 	spin_unlock(&glob->lru_lock);
185 	ttm_eu_list_ref_sub(list);
186 	return 0;
187 
188 err:
189 	spin_lock(&glob->lru_lock);
190 	ttm_eu_backoff_reservation_locked(list);
191 	spin_unlock(&glob->lru_lock);
192 	ttm_eu_list_ref_sub(list);
193 err_fini:
194 	if (ticket) {
195 		ww_acquire_done(ticket);
196 		ww_acquire_fini(ticket);
197 	}
198 	return ret;
199 }
200 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
201 
202 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
203 				 struct list_head *list, void *sync_obj)
204 {
205 	struct ttm_validate_buffer *entry;
206 	struct ttm_buffer_object *bo;
207 	struct ttm_bo_global *glob;
208 	struct ttm_bo_device *bdev;
209 	struct ttm_bo_driver *driver;
210 
211 	if (list_empty(list))
212 		return;
213 
214 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 	bdev = bo->bdev;
216 	driver = bdev->driver;
217 	glob = bo->glob;
218 
219 	spin_lock(&glob->lru_lock);
220 	spin_lock(&bdev->fence_lock);
221 
222 	list_for_each_entry(entry, list, head) {
223 		bo = entry->bo;
224 		entry->old_sync_obj = bo->sync_obj;
225 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 		ttm_bo_add_to_lru(bo);
227 		__ttm_bo_unreserve(bo);
228 		entry->reserved = false;
229 	}
230 	spin_unlock(&bdev->fence_lock);
231 	spin_unlock(&glob->lru_lock);
232 	if (ticket)
233 		ww_acquire_fini(ticket);
234 
235 	list_for_each_entry(entry, list, head) {
236 		if (entry->old_sync_obj)
237 			driver->sync_obj_unref(&entry->old_sync_obj);
238 	}
239 }
240 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
241