1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_file.h> 27 #include <drm/drm_fourcc.h> 28 29 #include "virtgpu_drv.h" 30 31 int virtio_gpu_gem_create(struct drm_file *file, 32 struct drm_device *dev, 33 struct virtio_gpu_object_params *params, 34 struct drm_gem_object **obj_p, 35 uint32_t *handle_p) 36 { 37 struct virtio_gpu_device *vgdev = dev->dev_private; 38 struct virtio_gpu_object *obj; 39 int ret; 40 u32 handle; 41 42 ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); 43 if (ret < 0) 44 return ret; 45 46 ret = drm_gem_handle_create(file, &obj->base.base, &handle); 47 if (ret) { 48 drm_gem_object_release(&obj->base.base); 49 return ret; 50 } 51 52 *obj_p = &obj->base.base; 53 54 /* drop reference from allocate - handle holds it now */ 55 drm_gem_object_put_unlocked(&obj->base.base); 56 57 *handle_p = handle; 58 return 0; 59 } 60 61 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, 62 struct drm_device *dev, 63 struct drm_mode_create_dumb *args) 64 { 65 struct drm_gem_object *gobj; 66 struct virtio_gpu_object_params params = { 0 }; 67 int ret; 68 uint32_t pitch; 69 70 if (args->bpp != 32) 71 return -EINVAL; 72 73 pitch = args->width * 4; 74 args->size = pitch * args->height; 75 args->size = ALIGN(args->size, PAGE_SIZE); 76 77 params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888); 78 params.width = args->width; 79 params.height = args->height; 80 params.size = args->size; 81 params.dumb = true; 82 ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj, 83 &args->handle); 84 if (ret) 85 goto fail; 86 87 args->pitch = pitch; 88 return ret; 89 90 fail: 91 return ret; 92 } 93 94 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, 95 struct drm_device *dev, 96 uint32_t handle, uint64_t *offset_p) 97 { 98 struct drm_gem_object *gobj; 99 struct virtio_gpu_object *obj; 100 101 BUG_ON(!offset_p); 102 gobj = drm_gem_object_lookup(file_priv, handle); 103 if (gobj == NULL) 104 return -ENOENT; 105 obj = gem_to_virtio_gpu_obj(gobj); 106 *offset_p = virtio_gpu_object_mmap_offset(obj); 107 drm_gem_object_put_unlocked(gobj); 108 return 0; 109 } 110 111 int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 112 struct drm_file *file) 113 { 114 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 115 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 116 struct virtio_gpu_object_array *objs; 117 118 if (!vgdev->has_virgl_3d) 119 return 0; 120 121 objs = virtio_gpu_array_alloc(1); 122 if (!objs) 123 return -ENOMEM; 124 virtio_gpu_array_add_obj(objs, obj); 125 126 virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, 127 objs); 128 return 0; 129 } 130 131 void virtio_gpu_gem_object_close(struct drm_gem_object *obj, 132 struct drm_file *file) 133 { 134 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 135 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 136 struct virtio_gpu_object_array *objs; 137 138 if (!vgdev->has_virgl_3d) 139 return; 140 141 objs = virtio_gpu_array_alloc(1); 142 if (!objs) 143 return; 144 virtio_gpu_array_add_obj(objs, obj); 145 146 virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, 147 objs); 148 } 149 150 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) 151 { 152 struct virtio_gpu_object_array *objs; 153 size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents; 154 155 objs = kmalloc(size, GFP_KERNEL); 156 if (!objs) 157 return NULL; 158 159 objs->nents = 0; 160 objs->total = nents; 161 return objs; 162 } 163 164 static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) 165 { 166 kfree(objs); 167 } 168 169 struct virtio_gpu_object_array* 170 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) 171 { 172 struct virtio_gpu_object_array *objs; 173 u32 i; 174 175 objs = virtio_gpu_array_alloc(nents); 176 if (!objs) 177 return NULL; 178 179 for (i = 0; i < nents; i++) { 180 objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); 181 if (!objs->objs[i]) { 182 objs->nents = i; 183 virtio_gpu_array_put_free(objs); 184 return NULL; 185 } 186 } 187 objs->nents = i; 188 return objs; 189 } 190 191 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, 192 struct drm_gem_object *obj) 193 { 194 if (WARN_ON_ONCE(objs->nents == objs->total)) 195 return; 196 197 drm_gem_object_get(obj); 198 objs->objs[objs->nents] = obj; 199 objs->nents++; 200 } 201 202 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) 203 { 204 int ret; 205 206 if (objs->nents == 1) { 207 ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); 208 } else { 209 ret = drm_gem_lock_reservations(objs->objs, objs->nents, 210 &objs->ticket); 211 } 212 return ret; 213 } 214 215 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) 216 { 217 if (objs->nents == 1) { 218 dma_resv_unlock(objs->objs[0]->resv); 219 } else { 220 drm_gem_unlock_reservations(objs->objs, objs->nents, 221 &objs->ticket); 222 } 223 } 224 225 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, 226 struct dma_fence *fence) 227 { 228 int i; 229 230 for (i = 0; i < objs->nents; i++) 231 dma_resv_add_excl_fence(objs->objs[i]->resv, fence); 232 } 233 234 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) 235 { 236 u32 i; 237 238 for (i = 0; i < objs->nents; i++) 239 drm_gem_object_put_unlocked(objs->objs[i]); 240 virtio_gpu_array_free(objs); 241 } 242 243 void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, 244 struct virtio_gpu_object_array *objs) 245 { 246 spin_lock(&vgdev->obj_free_lock); 247 list_add_tail(&objs->next, &vgdev->obj_free_list); 248 spin_unlock(&vgdev->obj_free_lock); 249 schedule_work(&vgdev->obj_free_work); 250 } 251 252 void virtio_gpu_array_put_free_work(struct work_struct *work) 253 { 254 struct virtio_gpu_device *vgdev = 255 container_of(work, struct virtio_gpu_device, obj_free_work); 256 struct virtio_gpu_object_array *objs; 257 258 spin_lock(&vgdev->obj_free_lock); 259 while (!list_empty(&vgdev->obj_free_list)) { 260 objs = list_first_entry(&vgdev->obj_free_list, 261 struct virtio_gpu_object_array, next); 262 list_del(&objs->next); 263 spin_unlock(&vgdev->obj_free_lock); 264 virtio_gpu_array_put_free(objs); 265 spin_lock(&vgdev->obj_free_lock); 266 } 267 spin_unlock(&vgdev->obj_free_lock); 268 } 269