Lines Matching refs:qdev
60 struct qxl_device *qdev; in qxl_fence_wait() local
63 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait()
65 if (!wait_event_timeout(qdev->release_event, in qxl_fence_wait()
67 (qxl_io_notify_oom(qdev), 0)), in qxl_fence_wait()
84 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument
103 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc()
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc()
105 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc()
106 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc()
135 qxl_release_free(struct qxl_device *qdev, in qxl_release_free() argument
141 qxl_surface_id_dealloc(qdev, release->surface_release_id); in qxl_release_free()
143 spin_lock(&qdev->release_idr_lock); in qxl_release_free()
144 idr_remove(&qdev->release_idr, release->id); in qxl_release_free()
145 spin_unlock(&qdev->release_idr_lock); in qxl_release_free()
157 atomic_dec(&qdev->release_count); in qxl_release_free()
160 static int qxl_release_bo_alloc(struct qxl_device *qdev, in qxl_release_bo_alloc() argument
165 return qxl_bo_create(qdev, PAGE_SIZE, false, true, in qxl_release_bo_alloc()
255 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, in qxl_alloc_surface_release_reserved() argument
266 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); in qxl_alloc_surface_release_reserved()
276 info = qxl_release_map(qdev, *release); in qxl_alloc_surface_release_reserved()
278 qxl_release_unmap(qdev, *release, info); in qxl_alloc_surface_release_reserved()
282 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), in qxl_alloc_surface_release_reserved()
286 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, in qxl_alloc_release_reserved() argument
312 idr_ret = qxl_release_alloc(qdev, type, release); in qxl_alloc_release_reserved()
318 atomic_inc(&qdev->release_count); in qxl_alloc_release_reserved()
320 mutex_lock(&qdev->release_mutex); in qxl_alloc_release_reserved()
321 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { in qxl_alloc_release_reserved()
322 free_bo = qdev->current_release_bo[cur_idx]; in qxl_alloc_release_reserved()
323 qdev->current_release_bo_offset[cur_idx] = 0; in qxl_alloc_release_reserved()
324 qdev->current_release_bo[cur_idx] = NULL; in qxl_alloc_release_reserved()
326 if (!qdev->current_release_bo[cur_idx]) { in qxl_alloc_release_reserved()
327 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority); in qxl_alloc_release_reserved()
329 mutex_unlock(&qdev->release_mutex); in qxl_alloc_release_reserved()
334 qxl_release_free(qdev, *release); in qxl_alloc_release_reserved()
339 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); in qxl_alloc_release_reserved()
342 …(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_id… in qxl_alloc_release_reserved()
343 qdev->current_release_bo_offset[cur_idx]++; in qxl_alloc_release_reserved()
348 mutex_unlock(&qdev->release_mutex); in qxl_alloc_release_reserved()
357 qxl_release_free(qdev, *release); in qxl_alloc_release_reserved()
361 info = qxl_release_map(qdev, *release); in qxl_alloc_release_reserved()
363 qxl_release_unmap(qdev, *release, info); in qxl_alloc_release_reserved()
368 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, in qxl_release_from_id_locked() argument
373 spin_lock(&qdev->release_idr_lock); in qxl_release_from_id_locked()
374 release = idr_find(&qdev->release_idr, id); in qxl_release_from_id_locked()
375 spin_unlock(&qdev->release_idr_lock); in qxl_release_from_id_locked()
384 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, in qxl_release_map() argument
391 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); in qxl_release_map()
398 void qxl_release_unmap(struct qxl_device *qdev, in qxl_release_unmap() argument
406 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); in qxl_release_unmap()
413 struct qxl_device *qdev; in qxl_release_fence_buffer_objects() local
423 qdev = container_of(bdev, struct qxl_device, mman.bdev); in qxl_release_fence_buffer_objects()
429 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, in qxl_release_fence_buffer_objects()