1 /*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/delay.h>
24
25 #include <trace/events/dma_fence.h>
26
27 #include "qxl_drv.h"
28 #include "qxl_object.h"
29
30 /*
31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32 * into 256 byte chunks for now - gives 16 cmds per page.
33 *
34 * use an ida to index into the chunks?
35 */
36 /* manage releaseables */
37 /* stack them 16 high for now -drawable object is 191 */
38 #define RELEASE_SIZE 256
39 #define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
40 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41 #define SURFACE_RELEASE_SIZE 128
42 #define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
43
44 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46
qxl_get_driver_name(struct dma_fence * fence)47 static const char *qxl_get_driver_name(struct dma_fence *fence)
48 {
49 return "qxl";
50 }
51
qxl_get_timeline_name(struct dma_fence * fence)52 static const char *qxl_get_timeline_name(struct dma_fence *fence)
53 {
54 return "release";
55 }
56
qxl_fence_wait(struct dma_fence * fence,bool intr,signed long timeout)57 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58 signed long timeout)
59 {
60 struct qxl_device *qdev;
61 unsigned long cur, end = jiffies + timeout;
62
63 qdev = container_of(fence->lock, struct qxl_device, release_lock);
64
65 if (!wait_event_timeout(qdev->release_event,
66 (dma_fence_is_signaled(fence) ||
67 (qxl_io_notify_oom(qdev), 0)),
68 timeout))
69 return 0;
70
71 cur = jiffies;
72 if (time_after(cur, end))
73 return 0;
74 return end - cur;
75 }
76
77 static const struct dma_fence_ops qxl_fence_ops = {
78 .get_driver_name = qxl_get_driver_name,
79 .get_timeline_name = qxl_get_timeline_name,
80 .wait = qxl_fence_wait,
81 };
82
83 static int
qxl_release_alloc(struct qxl_device * qdev,int type,struct qxl_release ** ret)84 qxl_release_alloc(struct qxl_device *qdev, int type,
85 struct qxl_release **ret)
86 {
87 struct qxl_release *release;
88 int handle;
89 size_t size = sizeof(*release);
90
91 release = kmalloc(size, GFP_KERNEL);
92 if (!release) {
93 DRM_ERROR("Out of memory\n");
94 return -ENOMEM;
95 }
96 release->base.ops = NULL;
97 release->type = type;
98 release->release_offset = 0;
99 release->surface_release_id = 0;
100 INIT_LIST_HEAD(&release->bos);
101
102 idr_preload(GFP_KERNEL);
103 spin_lock(&qdev->release_idr_lock);
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
105 release->base.seqno = ++qdev->release_seqno;
106 spin_unlock(&qdev->release_idr_lock);
107 idr_preload_end();
108 if (handle < 0) {
109 kfree(release);
110 *ret = NULL;
111 return handle;
112 }
113 *ret = release;
114 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
115 release->id = handle;
116 return handle;
117 }
118
119 static void
qxl_release_free_list(struct qxl_release * release)120 qxl_release_free_list(struct qxl_release *release)
121 {
122 while (!list_empty(&release->bos)) {
123 struct qxl_bo_list *entry;
124
125 entry = container_of(release->bos.next,
126 struct qxl_bo_list, list);
127 qxl_bo_unref(&entry->bo);
128 list_del(&entry->list);
129 kfree(entry);
130 }
131 release->release_bo = NULL;
132 }
133
134 void
qxl_release_free(struct qxl_device * qdev,struct qxl_release * release)135 qxl_release_free(struct qxl_device *qdev,
136 struct qxl_release *release)
137 {
138 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
139
140 if (release->surface_release_id)
141 qxl_surface_id_dealloc(qdev, release->surface_release_id);
142
143 spin_lock(&qdev->release_idr_lock);
144 idr_remove(&qdev->release_idr, release->id);
145 spin_unlock(&qdev->release_idr_lock);
146
147 if (release->base.ops) {
148 WARN_ON(list_empty(&release->bos));
149 qxl_release_free_list(release);
150
151 dma_fence_signal(&release->base);
152 dma_fence_put(&release->base);
153 } else {
154 qxl_release_free_list(release);
155 kfree(release);
156 }
157 atomic_dec(&qdev->release_count);
158 }
159
qxl_release_bo_alloc(struct qxl_device * qdev,struct qxl_bo ** bo,u32 priority)160 static int qxl_release_bo_alloc(struct qxl_device *qdev,
161 struct qxl_bo **bo,
162 u32 priority)
163 {
164 /* pin releases bo's they are too messy to evict */
165 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
166 QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
167 }
168
qxl_release_list_add(struct qxl_release * release,struct qxl_bo * bo)169 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
170 {
171 struct qxl_bo_list *entry;
172
173 list_for_each_entry(entry, &release->bos, list) {
174 if (entry->bo == bo)
175 return 0;
176 }
177
178 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
179 if (!entry)
180 return -ENOMEM;
181
182 qxl_bo_ref(bo);
183 entry->bo = bo;
184 list_add_tail(&entry->list, &release->bos);
185 return 0;
186 }
187
qxl_release_validate_bo(struct qxl_bo * bo)188 static int qxl_release_validate_bo(struct qxl_bo *bo)
189 {
190 struct ttm_operation_ctx ctx = { true, false };
191 int ret;
192
193 if (!bo->tbo.pin_count) {
194 qxl_ttm_placement_from_domain(bo, bo->type);
195 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
196 if (ret)
197 return ret;
198 }
199
200 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
201 if (ret)
202 return ret;
203
204 /* allocate a surface for reserved + validated buffers */
205 ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
206 if (ret)
207 return ret;
208 return 0;
209 }
210
qxl_release_reserve_list(struct qxl_release * release,bool no_intr)211 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
212 {
213 int ret;
214 struct qxl_bo_list *entry;
215
216 /* if only one object on the release its the release itself
217 since these objects are pinned no need to reserve */
218 if (list_is_singular(&release->bos))
219 return 0;
220
221 drm_exec_init(&release->exec, no_intr ? 0 :
222 DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
223 drm_exec_until_all_locked(&release->exec) {
224 list_for_each_entry(entry, &release->bos, list) {
225 ret = drm_exec_prepare_obj(&release->exec,
226 &entry->bo->tbo.base,
227 1);
228 drm_exec_retry_on_contention(&release->exec);
229 if (ret)
230 goto error;
231 }
232 }
233
234 list_for_each_entry(entry, &release->bos, list) {
235 ret = qxl_release_validate_bo(entry->bo);
236 if (ret)
237 goto error;
238 }
239 return 0;
240 error:
241 drm_exec_fini(&release->exec);
242 return ret;
243 }
244
qxl_release_backoff_reserve_list(struct qxl_release * release)245 void qxl_release_backoff_reserve_list(struct qxl_release *release)
246 {
247 /* if only one object on the release its the release itself
248 since these objects are pinned no need to reserve */
249 if (list_is_singular(&release->bos))
250 return;
251
252 drm_exec_fini(&release->exec);
253 }
254
qxl_alloc_surface_release_reserved(struct qxl_device * qdev,enum qxl_surface_cmd_type surface_cmd_type,struct qxl_release * create_rel,struct qxl_release ** release)255 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
256 enum qxl_surface_cmd_type surface_cmd_type,
257 struct qxl_release *create_rel,
258 struct qxl_release **release)
259 {
260 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
261 int idr_ret;
262 struct qxl_bo *bo;
263 union qxl_release_info *info;
264
265 /* stash the release after the create command */
266 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
267 if (idr_ret < 0)
268 return idr_ret;
269 bo = create_rel->release_bo;
270
271 (*release)->release_bo = bo;
272 (*release)->release_offset = create_rel->release_offset + 64;
273
274 qxl_release_list_add(*release, bo);
275
276 info = qxl_release_map(qdev, *release);
277 info->id = idr_ret;
278 qxl_release_unmap(qdev, *release, info);
279 return 0;
280 }
281
282 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
283 QXL_RELEASE_SURFACE_CMD, release, NULL);
284 }
285
qxl_alloc_release_reserved(struct qxl_device * qdev,unsigned long size,int type,struct qxl_release ** release,struct qxl_bo ** rbo)286 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
287 int type, struct qxl_release **release,
288 struct qxl_bo **rbo)
289 {
290 struct qxl_bo *bo, *free_bo = NULL;
291 int idr_ret;
292 int ret = 0;
293 union qxl_release_info *info;
294 int cur_idx;
295 u32 priority;
296
297 if (type == QXL_RELEASE_DRAWABLE) {
298 cur_idx = 0;
299 priority = 0;
300 } else if (type == QXL_RELEASE_SURFACE_CMD) {
301 cur_idx = 1;
302 priority = 1;
303 } else if (type == QXL_RELEASE_CURSOR_CMD) {
304 cur_idx = 2;
305 priority = 1;
306 }
307 else {
308 DRM_ERROR("got illegal type: %d\n", type);
309 return -EINVAL;
310 }
311
312 idr_ret = qxl_release_alloc(qdev, type, release);
313 if (idr_ret < 0) {
314 if (rbo)
315 *rbo = NULL;
316 return idr_ret;
317 }
318 atomic_inc(&qdev->release_count);
319
320 mutex_lock(&qdev->release_mutex);
321 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
322 free_bo = qdev->current_release_bo[cur_idx];
323 qdev->current_release_bo_offset[cur_idx] = 0;
324 qdev->current_release_bo[cur_idx] = NULL;
325 }
326 if (!qdev->current_release_bo[cur_idx]) {
327 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
328 if (ret) {
329 mutex_unlock(&qdev->release_mutex);
330 if (free_bo) {
331 qxl_bo_unpin(free_bo);
332 qxl_bo_unref(&free_bo);
333 }
334 qxl_release_free(qdev, *release);
335 return ret;
336 }
337 }
338
339 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
340
341 (*release)->release_bo = bo;
342 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
343 qdev->current_release_bo_offset[cur_idx]++;
344
345 if (rbo)
346 *rbo = bo;
347
348 mutex_unlock(&qdev->release_mutex);
349 if (free_bo) {
350 qxl_bo_unpin(free_bo);
351 qxl_bo_unref(&free_bo);
352 }
353
354 ret = qxl_release_list_add(*release, bo);
355 qxl_bo_unref(&bo);
356 if (ret) {
357 qxl_release_free(qdev, *release);
358 return ret;
359 }
360
361 info = qxl_release_map(qdev, *release);
362 info->id = idr_ret;
363 qxl_release_unmap(qdev, *release, info);
364
365 return ret;
366 }
367
qxl_release_from_id_locked(struct qxl_device * qdev,uint64_t id)368 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
369 uint64_t id)
370 {
371 struct qxl_release *release;
372
373 spin_lock(&qdev->release_idr_lock);
374 release = idr_find(&qdev->release_idr, id);
375 spin_unlock(&qdev->release_idr_lock);
376 if (!release) {
377 DRM_ERROR("failed to find id in release_idr\n");
378 return NULL;
379 }
380
381 return release;
382 }
383
qxl_release_map(struct qxl_device * qdev,struct qxl_release * release)384 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
385 struct qxl_release *release)
386 {
387 void *ptr;
388 union qxl_release_info *info;
389 struct qxl_bo *bo = release->release_bo;
390
391 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
392 if (!ptr)
393 return NULL;
394 info = ptr + (release->release_offset & ~PAGE_MASK);
395 return info;
396 }
397
qxl_release_unmap(struct qxl_device * qdev,struct qxl_release * release,union qxl_release_info * info)398 void qxl_release_unmap(struct qxl_device *qdev,
399 struct qxl_release *release,
400 union qxl_release_info *info)
401 {
402 struct qxl_bo *bo = release->release_bo;
403 void *ptr;
404
405 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
406 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
407 }
408
qxl_release_fence_buffer_objects(struct qxl_release * release)409 void qxl_release_fence_buffer_objects(struct qxl_release *release)
410 {
411 struct ttm_device *bdev;
412 struct qxl_bo_list *entry;
413 struct qxl_device *qdev;
414 struct qxl_bo *bo;
415
416 /* if only one object on the release its the release itself
417 since these objects are pinned no need to reserve */
418 if (list_is_singular(&release->bos) || list_empty(&release->bos))
419 return;
420
421 bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
422 bdev = bo->tbo.bdev;
423 qdev = container_of(bdev, struct qxl_device, mman.bdev);
424
425 /*
426 * Since we never really allocated a context and we don't want to conflict,
427 * set the highest bits. This will break if we really allow exporting of dma-bufs.
428 */
429 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
430 release->id | 0xf0000000, release->base.seqno);
431 trace_dma_fence_emit(&release->base);
432
433 list_for_each_entry(entry, &release->bos, list) {
434 bo = entry->bo;
435
436 dma_resv_add_fence(bo->tbo.base.resv, &release->base,
437 DMA_RESV_USAGE_READ);
438 ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
439 }
440 drm_exec_fini(&release->exec);
441 }
442