1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26 /* QXL cmd/ring handling */
27
28 #include <linux/delay.h>
29
30 #include <drm/drm_print.h>
31 #include <drm/drm_util.h>
32
33 #include "qxl_drv.h"
34 #include "qxl_object.h"
35
36 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
37
38 struct ring {
39 struct qxl_ring_header header;
40 uint8_t elements[];
41 };
42
43 struct qxl_ring {
44 struct ring *ring;
45 int element_size;
46 int n_elements;
47 int prod_notify;
48 wait_queue_head_t *push_event;
49 spinlock_t lock;
50 };
51
qxl_ring_free(struct qxl_ring * ring)52 void qxl_ring_free(struct qxl_ring *ring)
53 {
54 kfree(ring);
55 }
56
57 struct qxl_ring *
qxl_ring_create(struct qxl_ring_header * header,int element_size,int n_elements,int prod_notify,wait_queue_head_t * push_event)58 qxl_ring_create(struct qxl_ring_header *header,
59 int element_size,
60 int n_elements,
61 int prod_notify,
62 wait_queue_head_t *push_event)
63 {
64 struct qxl_ring *ring;
65
66 ring = kmalloc_obj(*ring);
67 if (!ring)
68 return NULL;
69
70 ring->ring = (struct ring *)header;
71 ring->element_size = element_size;
72 ring->n_elements = n_elements;
73 ring->prod_notify = prod_notify;
74 ring->push_event = push_event;
75 spin_lock_init(&ring->lock);
76 return ring;
77 }
78
qxl_check_header(struct qxl_ring * ring)79 static int qxl_check_header(struct qxl_ring *ring)
80 {
81 int ret;
82 struct qxl_ring_header *header = &(ring->ring->header);
83 unsigned long flags;
84
85 spin_lock_irqsave(&ring->lock, flags);
86 ret = header->prod - header->cons < header->num_items;
87 if (ret == 0)
88 header->notify_on_cons = header->cons + 1;
89 spin_unlock_irqrestore(&ring->lock, flags);
90 return ret;
91 }
92
qxl_check_idle(struct qxl_ring * ring)93 int qxl_check_idle(struct qxl_ring *ring)
94 {
95 int ret;
96 struct qxl_ring_header *header = &(ring->ring->header);
97 unsigned long flags;
98
99 spin_lock_irqsave(&ring->lock, flags);
100 ret = header->prod == header->cons;
101 spin_unlock_irqrestore(&ring->lock, flags);
102 return ret;
103 }
104
qxl_ring_push(struct qxl_ring * ring,const void * new_elt,bool interruptible)105 int qxl_ring_push(struct qxl_ring *ring,
106 const void *new_elt, bool interruptible)
107 {
108 struct qxl_ring_header *header = &(ring->ring->header);
109 uint8_t *elt;
110 int idx, ret;
111 unsigned long flags;
112
113 spin_lock_irqsave(&ring->lock, flags);
114 if (header->prod - header->cons == header->num_items) {
115 header->notify_on_cons = header->cons + 1;
116 mb();
117 spin_unlock_irqrestore(&ring->lock, flags);
118 if (!drm_can_sleep()) {
119 while (!qxl_check_header(ring))
120 udelay(1);
121 } else {
122 if (interruptible) {
123 ret = wait_event_interruptible(*ring->push_event,
124 qxl_check_header(ring));
125 if (ret)
126 return ret;
127 } else {
128 wait_event(*ring->push_event,
129 qxl_check_header(ring));
130 }
131
132 }
133 spin_lock_irqsave(&ring->lock, flags);
134 }
135
136 idx = header->prod & (ring->n_elements - 1);
137 elt = ring->ring->elements + idx * ring->element_size;
138
139 memcpy((void *)elt, new_elt, ring->element_size);
140
141 header->prod++;
142
143 mb();
144
145 if (header->prod == header->notify_on_prod)
146 outb(0, ring->prod_notify);
147
148 spin_unlock_irqrestore(&ring->lock, flags);
149 return 0;
150 }
151
qxl_ring_pop(struct qxl_ring * ring,void * element)152 static bool qxl_ring_pop(struct qxl_ring *ring,
153 void *element)
154 {
155 volatile struct qxl_ring_header *header = &(ring->ring->header);
156 volatile uint8_t *ring_elt;
157 int idx;
158 unsigned long flags;
159
160 spin_lock_irqsave(&ring->lock, flags);
161 if (header->cons == header->prod) {
162 header->notify_on_prod = header->cons + 1;
163 spin_unlock_irqrestore(&ring->lock, flags);
164 return false;
165 }
166
167 idx = header->cons & (ring->n_elements - 1);
168 ring_elt = ring->ring->elements + idx * ring->element_size;
169
170 memcpy(element, (void *)ring_elt, ring->element_size);
171
172 header->cons++;
173
174 spin_unlock_irqrestore(&ring->lock, flags);
175 return true;
176 }
177
178 int
qxl_push_command_ring_release(struct qxl_device * qdev,struct qxl_release * release,uint32_t type,bool interruptible)179 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
180 uint32_t type, bool interruptible)
181 {
182 struct qxl_command cmd;
183
184 cmd.type = type;
185 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
186
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188 }
189
190 int
qxl_push_cursor_ring_release(struct qxl_device * qdev,struct qxl_release * release,uint32_t type,bool interruptible)191 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
192 uint32_t type, bool interruptible)
193 {
194 struct qxl_command cmd;
195
196 cmd.type = type;
197 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
198
199 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
200 }
201
qxl_queue_garbage_collect(struct qxl_device * qdev,bool flush)202 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
203 {
204 if (!qxl_check_idle(qdev->release_ring)) {
205 schedule_work(&qdev->gc_work);
206 if (flush)
207 flush_work(&qdev->gc_work);
208 return true;
209 }
210 return false;
211 }
212
qxl_garbage_collect(struct qxl_device * qdev)213 int qxl_garbage_collect(struct qxl_device *qdev)
214 {
215 struct qxl_release *release;
216 uint64_t id, next_id;
217 int i = 0;
218 union qxl_release_info *info;
219
220 while (qxl_ring_pop(qdev->release_ring, &id)) {
221 DRM_DEBUG_DRIVER("popped %lld\n", id);
222 while (id) {
223 release = qxl_release_from_id_locked(qdev, id);
224 if (release == NULL)
225 break;
226
227 info = qxl_release_map(qdev, release);
228 next_id = info->next;
229 qxl_release_unmap(qdev, release, info);
230
231 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
232 next_id);
233
234 switch (release->type) {
235 case QXL_RELEASE_DRAWABLE:
236 case QXL_RELEASE_SURFACE_CMD:
237 case QXL_RELEASE_CURSOR_CMD:
238 break;
239 default:
240 DRM_ERROR("unexpected release type\n");
241 break;
242 }
243 id = next_id;
244
245 qxl_release_free(qdev, release);
246 ++i;
247 }
248 }
249
250 wake_up_all(&qdev->release_event);
251 DRM_DEBUG_DRIVER("%d\n", i);
252
253 return i;
254 }
255
qxl_alloc_bo_reserved(struct qxl_device * qdev,struct qxl_release * release,unsigned long size,struct qxl_bo ** _bo)256 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257 struct qxl_release *release,
258 unsigned long size,
259 struct qxl_bo **_bo)
260 {
261 struct qxl_bo *bo;
262 int ret;
263
264 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
265 false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
266 if (ret) {
267 DRM_ERROR("failed to allocate VRAM BO\n");
268 return ret;
269 }
270 ret = qxl_release_list_add(release, bo);
271 if (ret)
272 goto out_unref;
273
274 *_bo = bo;
275 return 0;
276 out_unref:
277 qxl_bo_unref(&bo);
278 return ret;
279 }
280
wait_for_io_cmd_user(struct qxl_device * qdev,uint8_t val,long port,bool intr)281 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
282 {
283 int irq_num;
284 long addr = qdev->io_base + port;
285 int ret;
286
287 mutex_lock(&qdev->async_io_mutex);
288 irq_num = atomic_read(&qdev->irq_received_io_cmd);
289 if (qdev->last_sent_io_cmd > irq_num) {
290 if (intr)
291 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
293 else
294 ret = wait_event_timeout(qdev->io_cmd_event,
295 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
296 /* 0 is timeout, just bail the "hw" has gone away */
297 if (ret <= 0)
298 goto out;
299 irq_num = atomic_read(&qdev->irq_received_io_cmd);
300 }
301 outb(val, addr);
302 qdev->last_sent_io_cmd = irq_num + 1;
303 if (intr)
304 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
305 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
306 else
307 ret = wait_event_timeout(qdev->io_cmd_event,
308 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
309 out:
310 if (ret > 0)
311 ret = 0;
312 mutex_unlock(&qdev->async_io_mutex);
313 return ret;
314 }
315
wait_for_io_cmd(struct qxl_device * qdev,uint8_t val,long port)316 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
317 {
318 int ret;
319
320 restart:
321 ret = wait_for_io_cmd_user(qdev, val, port, false);
322 if (ret == -ERESTARTSYS)
323 goto restart;
324 }
325
qxl_io_update_area(struct qxl_device * qdev,struct qxl_bo * surf,const struct qxl_rect * area)326 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
327 const struct qxl_rect *area)
328 {
329 int surface_id;
330 uint32_t surface_width, surface_height;
331 int ret;
332
333 if (!surf->hw_surf_alloc)
334 DRM_ERROR("got io update area with no hw surface\n");
335
336 if (surf->is_primary)
337 surface_id = 0;
338 else
339 surface_id = surf->surface_id;
340 surface_width = surf->surf.width;
341 surface_height = surf->surf.height;
342
343 if (area->left < 0 || area->top < 0 ||
344 area->right > surface_width || area->bottom > surface_height)
345 return -EINVAL;
346
347 mutex_lock(&qdev->update_area_mutex);
348 qdev->ram_header->update_area = *area;
349 qdev->ram_header->update_surface = surface_id;
350 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
351 mutex_unlock(&qdev->update_area_mutex);
352 return ret;
353 }
354
qxl_io_notify_oom(struct qxl_device * qdev)355 void qxl_io_notify_oom(struct qxl_device *qdev)
356 {
357 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
358 }
359
qxl_io_flush_release(struct qxl_device * qdev)360 void qxl_io_flush_release(struct qxl_device *qdev)
361 {
362 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
363 }
364
qxl_io_flush_surfaces(struct qxl_device * qdev)365 void qxl_io_flush_surfaces(struct qxl_device *qdev)
366 {
367 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
368 }
369
qxl_io_destroy_primary(struct qxl_device * qdev)370 void qxl_io_destroy_primary(struct qxl_device *qdev)
371 {
372 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
373 qdev->primary_bo->is_primary = false;
374 drm_gem_object_put(&qdev->primary_bo->tbo.base);
375 qdev->primary_bo = NULL;
376 }
377
qxl_io_create_primary(struct qxl_device * qdev,struct qxl_bo * bo)378 void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
379 {
380 struct qxl_surface_create *create;
381
382 if (WARN_ON(qdev->primary_bo))
383 return;
384
385 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
386 create = &qdev->ram_header->create_surface;
387 create->format = bo->surf.format;
388 create->width = bo->surf.width;
389 create->height = bo->surf.height;
390 create->stride = bo->surf.stride;
391 create->mem = qxl_bo_physical_address(qdev, bo, 0);
392
393 DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
394
395 create->flags = QXL_SURF_FLAG_KEEP_DATA;
396 create->type = QXL_SURF_TYPE_PRIMARY;
397
398 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
399 qdev->primary_bo = bo;
400 qdev->primary_bo->is_primary = true;
401 drm_gem_object_get(&qdev->primary_bo->tbo.base);
402 }
403
qxl_io_memslot_add(struct qxl_device * qdev,uint8_t id)404 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
405 {
406 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
407 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
408 }
409
qxl_io_reset(struct qxl_device * qdev)410 void qxl_io_reset(struct qxl_device *qdev)
411 {
412 outb(0, qdev->io_base + QXL_IO_RESET);
413 }
414
qxl_io_monitors_config(struct qxl_device * qdev)415 void qxl_io_monitors_config(struct qxl_device *qdev)
416 {
417 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
418 }
419
qxl_surface_id_alloc(struct qxl_device * qdev,struct qxl_bo * surf)420 int qxl_surface_id_alloc(struct qxl_device *qdev,
421 struct qxl_bo *surf)
422 {
423 uint32_t handle;
424 int idr_ret;
425 again:
426 idr_preload(GFP_ATOMIC);
427 spin_lock(&qdev->surf_id_idr_lock);
428 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
429 spin_unlock(&qdev->surf_id_idr_lock);
430 idr_preload_end();
431 if (idr_ret < 0)
432 return idr_ret;
433 handle = idr_ret;
434
435 if (handle >= qdev->rom->n_surfaces) {
436 spin_lock(&qdev->surf_id_idr_lock);
437 idr_remove(&qdev->surf_id_idr, handle);
438 spin_unlock(&qdev->surf_id_idr_lock);
439 qxl_reap_surface_id(qdev, 2);
440 goto again;
441 }
442 surf->surface_id = handle;
443
444 spin_lock(&qdev->surf_id_idr_lock);
445 qdev->last_alloced_surf_id = handle;
446 spin_unlock(&qdev->surf_id_idr_lock);
447 return 0;
448 }
449
qxl_surface_id_dealloc(struct qxl_device * qdev,uint32_t surface_id)450 void qxl_surface_id_dealloc(struct qxl_device *qdev,
451 uint32_t surface_id)
452 {
453 spin_lock(&qdev->surf_id_idr_lock);
454 idr_remove(&qdev->surf_id_idr, surface_id);
455 spin_unlock(&qdev->surf_id_idr_lock);
456 }
457
qxl_hw_surface_alloc(struct qxl_device * qdev,struct qxl_bo * surf)458 int qxl_hw_surface_alloc(struct qxl_device *qdev,
459 struct qxl_bo *surf)
460 {
461 struct qxl_surface_cmd *cmd;
462 struct qxl_release *release;
463 int ret;
464
465 if (surf->hw_surf_alloc)
466 return 0;
467
468 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
469 NULL,
470 &release);
471 if (ret)
472 return ret;
473
474 ret = qxl_release_reserve_list(release, true);
475 if (ret) {
476 qxl_release_free(qdev, release);
477 return ret;
478 }
479 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
480 cmd->type = QXL_SURFACE_CMD_CREATE;
481 cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
482 cmd->u.surface_create.format = surf->surf.format;
483 cmd->u.surface_create.width = surf->surf.width;
484 cmd->u.surface_create.height = surf->surf.height;
485 cmd->u.surface_create.stride = surf->surf.stride;
486 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
487 cmd->surface_id = surf->surface_id;
488 qxl_release_unmap(qdev, release, &cmd->release_info);
489
490 surf->surf_create = release;
491
492 /* no need to add a release to the fence for this surface bo,
493 since it is only released when we ask to destroy the surface
494 and it would never signal otherwise */
495 qxl_release_fence_buffer_objects(release);
496 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
497
498 surf->hw_surf_alloc = true;
499 spin_lock(&qdev->surf_id_idr_lock);
500 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
501 spin_unlock(&qdev->surf_id_idr_lock);
502 return 0;
503 }
504
qxl_hw_surface_dealloc(struct qxl_device * qdev,struct qxl_bo * surf)505 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
506 struct qxl_bo *surf)
507 {
508 struct qxl_surface_cmd *cmd;
509 struct qxl_release *release;
510 int ret;
511 int id;
512
513 if (!surf->hw_surf_alloc)
514 return 0;
515
516 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
517 surf->surf_create,
518 &release);
519 if (ret)
520 return ret;
521
522 surf->surf_create = NULL;
523 /* remove the surface from the idr, but not the surface id yet */
524 spin_lock(&qdev->surf_id_idr_lock);
525 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
526 spin_unlock(&qdev->surf_id_idr_lock);
527 surf->hw_surf_alloc = false;
528
529 id = surf->surface_id;
530 surf->surface_id = 0;
531
532 release->surface_release_id = id;
533 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
534 cmd->type = QXL_SURFACE_CMD_DESTROY;
535 cmd->surface_id = id;
536 qxl_release_unmap(qdev, release, &cmd->release_info);
537
538 qxl_release_fence_buffer_objects(release);
539 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
540
541 return 0;
542 }
543
qxl_update_surface(struct qxl_device * qdev,struct qxl_bo * surf)544 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
545 {
546 struct qxl_rect rect;
547 int ret;
548
549 /* if we are evicting, we need to make sure the surface is up
550 to date */
551 rect.left = 0;
552 rect.right = surf->surf.width;
553 rect.top = 0;
554 rect.bottom = surf->surf.height;
555 retry:
556 ret = qxl_io_update_area(qdev, surf, &rect);
557 if (ret == -ERESTARTSYS)
558 goto retry;
559 return ret;
560 }
561
qxl_surface_evict_locked(struct qxl_device * qdev,struct qxl_bo * surf,bool do_update_area)562 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
563 {
564 /* no need to update area if we are just freeing the surface normally */
565 if (do_update_area)
566 qxl_update_surface(qdev, surf);
567
568 /* nuke the surface id at the hw */
569 qxl_hw_surface_dealloc(qdev, surf);
570 }
571
qxl_surface_evict(struct qxl_device * qdev,struct qxl_bo * surf,bool do_update_area)572 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
573 {
574 mutex_lock(&qdev->surf_evict_mutex);
575 qxl_surface_evict_locked(qdev, surf, do_update_area);
576 mutex_unlock(&qdev->surf_evict_mutex);
577 }
578
qxl_reap_surf(struct qxl_device * qdev,struct qxl_bo * surf,bool stall)579 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
580 {
581 long ret;
582
583 ret = qxl_bo_reserve(surf);
584 if (ret)
585 return ret;
586
587 if (stall)
588 mutex_unlock(&qdev->surf_evict_mutex);
589
590 if (stall) {
591 ret = dma_resv_wait_timeout(surf->tbo.base.resv,
592 DMA_RESV_USAGE_BOOKKEEP, true,
593 15 * HZ);
594 if (ret > 0)
595 ret = 0;
596 else if (ret == 0)
597 ret = -EBUSY;
598 } else {
599 ret = dma_resv_test_signaled(surf->tbo.base.resv,
600 DMA_RESV_USAGE_BOOKKEEP);
601 ret = ret ? -EBUSY : 0;
602 }
603
604 if (stall)
605 mutex_lock(&qdev->surf_evict_mutex);
606 if (ret) {
607 qxl_bo_unreserve(surf);
608 return ret;
609 }
610
611 qxl_surface_evict_locked(qdev, surf, true);
612 qxl_bo_unreserve(surf);
613 return 0;
614 }
615
qxl_reap_surface_id(struct qxl_device * qdev,int max_to_reap)616 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
617 {
618 int num_reaped = 0;
619 int i, ret;
620 bool stall = false;
621 int start = 0;
622
623 mutex_lock(&qdev->surf_evict_mutex);
624 again:
625
626 spin_lock(&qdev->surf_id_idr_lock);
627 start = qdev->last_alloced_surf_id + 1;
628 spin_unlock(&qdev->surf_id_idr_lock);
629
630 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
631 void *objptr;
632 int surfid = i % qdev->rom->n_surfaces;
633
634 /* this avoids the case where the objects is in the
635 idr but has been evicted half way - its makes
636 the idr lookup atomic with the eviction */
637 spin_lock(&qdev->surf_id_idr_lock);
638 objptr = idr_find(&qdev->surf_id_idr, surfid);
639 spin_unlock(&qdev->surf_id_idr_lock);
640
641 if (!objptr)
642 continue;
643
644 ret = qxl_reap_surf(qdev, objptr, stall);
645 if (ret == 0)
646 num_reaped++;
647 if (num_reaped >= max_to_reap)
648 break;
649 }
650 if (num_reaped == 0 && stall == false) {
651 stall = true;
652 goto again;
653 }
654
655 mutex_unlock(&qdev->surf_evict_mutex);
656 if (num_reaped) {
657 usleep_range(500, 1000);
658 qxl_queue_garbage_collect(qdev, true);
659 }
660
661 return 0;
662 }
663