1 /*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/delay.h>
24
25 #include <drm/drm_print.h>
26
27 #include <trace/events/dma_fence.h>
28
29 #include "qxl_drv.h"
30 #include "qxl_object.h"
31
32 /*
33 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
34 * into 256 byte chunks for now - gives 16 cmds per page.
35 *
36 * use an ida to index into the chunks?
37 */
38 /* manage releaseables */
39 /* stack them 16 high for now -drawable object is 191 */
40 #define RELEASE_SIZE 256
41 #define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
42 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
43 #define SURFACE_RELEASE_SIZE 128
44 #define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
45
46 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
47 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
48
qxl_get_driver_name(struct dma_fence * fence)49 static const char *qxl_get_driver_name(struct dma_fence *fence)
50 {
51 return "qxl";
52 }
53
qxl_get_timeline_name(struct dma_fence * fence)54 static const char *qxl_get_timeline_name(struct dma_fence *fence)
55 {
56 return "release";
57 }
58
qxl_fence_wait(struct dma_fence * fence,bool intr,signed long timeout)59 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
60 signed long timeout)
61 {
62 struct qxl_device *qdev;
63 unsigned long cur, end = jiffies + timeout;
64
65 qdev = container_of(fence->lock, struct qxl_device, release_lock);
66
67 if (!wait_event_timeout(qdev->release_event,
68 (dma_fence_is_signaled(fence) ||
69 (qxl_io_notify_oom(qdev), 0)),
70 timeout))
71 return 0;
72
73 cur = jiffies;
74 if (time_after(cur, end))
75 return 0;
76 return end - cur;
77 }
78
79 static const struct dma_fence_ops qxl_fence_ops = {
80 .get_driver_name = qxl_get_driver_name,
81 .get_timeline_name = qxl_get_timeline_name,
82 .wait = qxl_fence_wait,
83 };
84
85 static int
qxl_release_alloc(struct qxl_device * qdev,int type,struct qxl_release ** ret)86 qxl_release_alloc(struct qxl_device *qdev, int type,
87 struct qxl_release **ret)
88 {
89 struct qxl_release *release;
90 int handle;
91 size_t size = sizeof(*release);
92
93 release = kmalloc(size, GFP_KERNEL);
94 if (!release) {
95 DRM_ERROR("Out of memory\n");
96 return -ENOMEM;
97 }
98 release->base.ops = NULL;
99 release->type = type;
100 release->release_offset = 0;
101 release->surface_release_id = 0;
102 INIT_LIST_HEAD(&release->bos);
103
104 idr_preload(GFP_KERNEL);
105 spin_lock(&qdev->release_idr_lock);
106 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
107 release->base.seqno = ++qdev->release_seqno;
108 spin_unlock(&qdev->release_idr_lock);
109 idr_preload_end();
110 if (handle < 0) {
111 kfree(release);
112 *ret = NULL;
113 return handle;
114 }
115 *ret = release;
116 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
117 release->id = handle;
118 return handle;
119 }
120
121 static void
qxl_release_free_list(struct qxl_release * release)122 qxl_release_free_list(struct qxl_release *release)
123 {
124 while (!list_empty(&release->bos)) {
125 struct qxl_bo_list *entry;
126
127 entry = container_of(release->bos.next,
128 struct qxl_bo_list, list);
129 qxl_bo_unref(&entry->bo);
130 list_del(&entry->list);
131 kfree(entry);
132 }
133 release->release_bo = NULL;
134 }
135
136 void
qxl_release_free(struct qxl_device * qdev,struct qxl_release * release)137 qxl_release_free(struct qxl_device *qdev,
138 struct qxl_release *release)
139 {
140 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
141
142 if (release->surface_release_id)
143 qxl_surface_id_dealloc(qdev, release->surface_release_id);
144
145 spin_lock(&qdev->release_idr_lock);
146 idr_remove(&qdev->release_idr, release->id);
147 spin_unlock(&qdev->release_idr_lock);
148
149 if (release->base.ops) {
150 WARN_ON(list_empty(&release->bos));
151 qxl_release_free_list(release);
152
153 dma_fence_signal(&release->base);
154 dma_fence_put(&release->base);
155 } else {
156 qxl_release_free_list(release);
157 kfree(release);
158 }
159 atomic_dec(&qdev->release_count);
160 }
161
qxl_release_bo_alloc(struct qxl_device * qdev,struct qxl_bo ** bo,u32 priority)162 static int qxl_release_bo_alloc(struct qxl_device *qdev,
163 struct qxl_bo **bo,
164 u32 priority)
165 {
166 /* pin releases bo's they are too messy to evict */
167 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
168 QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
169 }
170
qxl_release_list_add(struct qxl_release * release,struct qxl_bo * bo)171 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
172 {
173 struct qxl_bo_list *entry;
174
175 list_for_each_entry(entry, &release->bos, list) {
176 if (entry->bo == bo)
177 return 0;
178 }
179
180 entry = kmalloc_obj(struct qxl_bo_list);
181 if (!entry)
182 return -ENOMEM;
183
184 qxl_bo_ref(bo);
185 entry->bo = bo;
186 list_add_tail(&entry->list, &release->bos);
187 return 0;
188 }
189
qxl_release_validate_bo(struct qxl_bo * bo)190 static int qxl_release_validate_bo(struct qxl_bo *bo)
191 {
192 struct ttm_operation_ctx ctx = { true, false };
193 int ret;
194
195 if (!bo->tbo.pin_count) {
196 qxl_ttm_placement_from_domain(bo, bo->type);
197 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
198 if (ret)
199 return ret;
200 }
201
202 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
203 if (ret)
204 return ret;
205
206 /* allocate a surface for reserved + validated buffers */
207 ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
208 if (ret)
209 return ret;
210 return 0;
211 }
212
qxl_release_reserve_list(struct qxl_release * release,bool no_intr)213 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
214 {
215 int ret;
216 struct qxl_bo_list *entry;
217
218 /* if only one object on the release its the release itself
219 since these objects are pinned no need to reserve */
220 if (list_is_singular(&release->bos))
221 return 0;
222
223 drm_exec_init(&release->exec, no_intr ? 0 :
224 DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
225 drm_exec_until_all_locked(&release->exec) {
226 list_for_each_entry(entry, &release->bos, list) {
227 ret = drm_exec_prepare_obj(&release->exec,
228 &entry->bo->tbo.base,
229 1);
230 drm_exec_retry_on_contention(&release->exec);
231 if (ret)
232 goto error;
233 }
234 }
235
236 list_for_each_entry(entry, &release->bos, list) {
237 ret = qxl_release_validate_bo(entry->bo);
238 if (ret)
239 goto error;
240 }
241 return 0;
242 error:
243 drm_exec_fini(&release->exec);
244 return ret;
245 }
246
qxl_release_backoff_reserve_list(struct qxl_release * release)247 void qxl_release_backoff_reserve_list(struct qxl_release *release)
248 {
249 /* if only one object on the release its the release itself
250 since these objects are pinned no need to reserve */
251 if (list_is_singular(&release->bos))
252 return;
253
254 drm_exec_fini(&release->exec);
255 }
256
qxl_alloc_surface_release_reserved(struct qxl_device * qdev,enum qxl_surface_cmd_type surface_cmd_type,struct qxl_release * create_rel,struct qxl_release ** release)257 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
258 enum qxl_surface_cmd_type surface_cmd_type,
259 struct qxl_release *create_rel,
260 struct qxl_release **release)
261 {
262 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
263 int idr_ret;
264 struct qxl_bo *bo;
265 union qxl_release_info *info;
266
267 /* stash the release after the create command */
268 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
269 if (idr_ret < 0)
270 return idr_ret;
271 bo = create_rel->release_bo;
272
273 (*release)->release_bo = bo;
274 (*release)->release_offset = create_rel->release_offset + 64;
275
276 qxl_release_list_add(*release, bo);
277
278 info = qxl_release_map(qdev, *release);
279 info->id = idr_ret;
280 qxl_release_unmap(qdev, *release, info);
281 return 0;
282 }
283
284 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
285 QXL_RELEASE_SURFACE_CMD, release, NULL);
286 }
287
qxl_alloc_release_reserved(struct qxl_device * qdev,unsigned long size,int type,struct qxl_release ** release,struct qxl_bo ** rbo)288 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
289 int type, struct qxl_release **release,
290 struct qxl_bo **rbo)
291 {
292 struct qxl_bo *bo, *free_bo = NULL;
293 int idr_ret;
294 int ret = 0;
295 union qxl_release_info *info;
296 int cur_idx;
297 u32 priority;
298
299 if (type == QXL_RELEASE_DRAWABLE) {
300 cur_idx = 0;
301 priority = 0;
302 } else if (type == QXL_RELEASE_SURFACE_CMD) {
303 cur_idx = 1;
304 priority = 1;
305 } else if (type == QXL_RELEASE_CURSOR_CMD) {
306 cur_idx = 2;
307 priority = 1;
308 }
309 else {
310 DRM_ERROR("got illegal type: %d\n", type);
311 return -EINVAL;
312 }
313
314 idr_ret = qxl_release_alloc(qdev, type, release);
315 if (idr_ret < 0) {
316 if (rbo)
317 *rbo = NULL;
318 return idr_ret;
319 }
320 atomic_inc(&qdev->release_count);
321
322 mutex_lock(&qdev->release_mutex);
323 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
324 free_bo = qdev->current_release_bo[cur_idx];
325 qdev->current_release_bo_offset[cur_idx] = 0;
326 qdev->current_release_bo[cur_idx] = NULL;
327 }
328 if (!qdev->current_release_bo[cur_idx]) {
329 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
330 if (ret) {
331 mutex_unlock(&qdev->release_mutex);
332 if (free_bo) {
333 qxl_bo_unpin(free_bo);
334 qxl_bo_unref(&free_bo);
335 }
336 qxl_release_free(qdev, *release);
337 return ret;
338 }
339 }
340
341 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
342
343 (*release)->release_bo = bo;
344 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
345 qdev->current_release_bo_offset[cur_idx]++;
346
347 if (rbo)
348 *rbo = bo;
349
350 mutex_unlock(&qdev->release_mutex);
351 if (free_bo) {
352 qxl_bo_unpin(free_bo);
353 qxl_bo_unref(&free_bo);
354 }
355
356 ret = qxl_release_list_add(*release, bo);
357 qxl_bo_unref(&bo);
358 if (ret) {
359 qxl_release_free(qdev, *release);
360 return ret;
361 }
362
363 info = qxl_release_map(qdev, *release);
364 info->id = idr_ret;
365 qxl_release_unmap(qdev, *release, info);
366
367 return ret;
368 }
369
qxl_release_from_id_locked(struct qxl_device * qdev,uint64_t id)370 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
371 uint64_t id)
372 {
373 struct qxl_release *release;
374
375 spin_lock(&qdev->release_idr_lock);
376 release = idr_find(&qdev->release_idr, id);
377 spin_unlock(&qdev->release_idr_lock);
378 if (!release) {
379 DRM_ERROR("failed to find id in release_idr\n");
380 return NULL;
381 }
382
383 return release;
384 }
385
qxl_release_map(struct qxl_device * qdev,struct qxl_release * release)386 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
387 struct qxl_release *release)
388 {
389 void *ptr;
390 union qxl_release_info *info;
391 struct qxl_bo *bo = release->release_bo;
392
393 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
394 if (!ptr)
395 return NULL;
396 info = ptr + (release->release_offset & ~PAGE_MASK);
397 return info;
398 }
399
qxl_release_unmap(struct qxl_device * qdev,struct qxl_release * release,union qxl_release_info * info)400 void qxl_release_unmap(struct qxl_device *qdev,
401 struct qxl_release *release,
402 union qxl_release_info *info)
403 {
404 struct qxl_bo *bo = release->release_bo;
405 void *ptr;
406
407 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
408 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
409 }
410
qxl_release_fence_buffer_objects(struct qxl_release * release)411 void qxl_release_fence_buffer_objects(struct qxl_release *release)
412 {
413 struct ttm_device *bdev;
414 struct qxl_bo_list *entry;
415 struct qxl_device *qdev;
416 struct qxl_bo *bo;
417
418 /* if only one object on the release its the release itself
419 since these objects are pinned no need to reserve */
420 if (list_is_singular(&release->bos) || list_empty(&release->bos))
421 return;
422
423 bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
424 bdev = bo->tbo.bdev;
425 qdev = container_of(bdev, struct qxl_device, mman.bdev);
426
427 /*
428 * Since we never really allocated a context and we don't want to conflict,
429 * set the highest bits. This will break if we really allow exporting of dma-bufs.
430 */
431 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
432 release->id | 0xf0000000, release->base.seqno);
433 trace_dma_fence_emit(&release->base);
434
435 list_for_each_entry(entry, &release->bos, list) {
436 bo = entry->bo;
437
438 dma_resv_add_fence(bo->tbo.base.resv, &release->base,
439 DMA_RESV_USAGE_READ);
440 ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
441 }
442 drm_exec_fini(&release->exec);
443 }
444