1 /* 2 * Copyright 2014 Canonical 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Andreas Pokorny 23 */ 24 25 #include <drm/drm_prime.h> 26 #include <linux/virtio_dma_buf.h> 27 28 #include "virtgpu_drv.h" 29 30 MODULE_IMPORT_NS("DMA_BUF"); 31 32 static int virtgpu_virtio_get_uuid(struct dma_buf *buf, 33 uuid_t *uuid) 34 { 35 struct drm_gem_object *obj = buf->priv; 36 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 37 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 38 39 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); 40 if (bo->uuid_state != STATE_OK) 41 return -ENODEV; 42 43 uuid_copy(uuid, &bo->uuid); 44 45 return 0; 46 } 47 48 static struct sg_table * 49 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, 50 enum dma_data_direction dir) 51 { 52 struct drm_gem_object *obj = attach->dmabuf->priv; 53 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 54 55 if (virtio_gpu_is_vram(bo)) 56 return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); 57 58 return drm_gem_map_dma_buf(attach, dir); 59 } 60 61 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 62 struct sg_table *sgt, 63 enum dma_data_direction dir) 64 { 65 struct drm_gem_object *obj = attach->dmabuf->priv; 66 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 67 68 if (virtio_gpu_is_vram(bo)) { 69 virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); 70 return; 71 } 72 73 drm_gem_unmap_dma_buf(attach, sgt, dir); 74 } 75 76 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { 77 .ops = { 78 .cache_sgt_mapping = true, 79 .attach = virtio_dma_buf_attach, 80 .detach = drm_gem_map_detach, 81 .map_dma_buf = virtgpu_gem_map_dma_buf, 82 .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, 83 .release = drm_gem_dmabuf_release, 84 .mmap = drm_gem_dmabuf_mmap, 85 .vmap = drm_gem_dmabuf_vmap, 86 .vunmap = drm_gem_dmabuf_vunmap, 87 }, 88 .device_attach = drm_gem_map_attach, 89 .get_uuid = virtgpu_virtio_get_uuid, 90 }; 91 92 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, 93 struct virtio_gpu_object *bo) 94 { 95 struct virtio_gpu_object_array *objs; 96 97 objs = virtio_gpu_array_alloc(1); 98 if (!objs) 99 return -ENOMEM; 100 101 virtio_gpu_array_add_obj(objs, &bo->base.base); 102 103 return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); 104 } 105 106 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, 107 int flags) 108 { 109 struct dma_buf *buf; 110 struct drm_device *dev = obj->dev; 111 struct virtio_gpu_device *vgdev = dev->dev_private; 112 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 113 int ret = 0; 114 bool blob = bo->host3d_blob || bo->guest_blob; 115 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 116 117 if (!blob) { 118 if (vgdev->has_resource_assign_uuid) { 119 ret = virtio_gpu_resource_assign_uuid(vgdev, bo); 120 if (ret) 121 return ERR_PTR(ret); 122 123 virtio_gpu_notify(vgdev); 124 } else { 125 bo->uuid_state = STATE_ERR; 126 } 127 } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) { 128 bo->uuid_state = STATE_ERR; 129 } 130 131 exp_info.ops = &virtgpu_dmabuf_ops.ops; 132 exp_info.size = obj->size; 133 exp_info.flags = flags; 134 exp_info.priv = obj; 135 exp_info.resv = obj->resv; 136 137 buf = virtio_dma_buf_export(&exp_info); 138 if (IS_ERR(buf)) 139 return buf; 140 141 drm_dev_get(dev); 142 drm_gem_object_get(obj); 143 144 return buf; 145 } 146 147 int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, 148 unsigned int *nents, 149 struct virtio_gpu_object *bo, 150 struct dma_buf_attachment *attach) 151 { 152 struct scatterlist *sl; 153 struct sg_table *sgt; 154 long i, ret; 155 156 dma_resv_assert_held(attach->dmabuf->resv); 157 158 ret = dma_resv_wait_timeout(attach->dmabuf->resv, 159 DMA_RESV_USAGE_KERNEL, 160 false, MAX_SCHEDULE_TIMEOUT); 161 if (ret <= 0) 162 return ret < 0 ? ret : -ETIMEDOUT; 163 164 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 165 if (IS_ERR(sgt)) 166 return PTR_ERR(sgt); 167 168 *ents = kvmalloc_array(sgt->nents, 169 sizeof(struct virtio_gpu_mem_entry), 170 GFP_KERNEL); 171 if (!(*ents)) { 172 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 173 return -ENOMEM; 174 } 175 176 *nents = sgt->nents; 177 for_each_sgtable_dma_sg(sgt, sl, i) { 178 (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl)); 179 (*ents)[i].length = cpu_to_le32(sg_dma_len(sl)); 180 (*ents)[i].padding = 0; 181 } 182 183 bo->sgt = sgt; 184 return 0; 185 } 186 187 static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) 188 { 189 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 190 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 191 struct dma_buf_attachment *attach = obj->import_attach; 192 struct dma_resv *resv = attach->dmabuf->resv; 193 194 if (attach) { 195 dma_resv_lock(resv, NULL); 196 197 virtio_gpu_detach_object_fenced(bo); 198 199 if (bo->sgt) 200 dma_buf_unmap_attachment(attach, bo->sgt, 201 DMA_BIDIRECTIONAL); 202 203 dma_resv_unlock(resv); 204 205 dma_buf_detach(attach->dmabuf, attach); 206 dma_buf_put(attach->dmabuf); 207 } 208 209 if (bo->created) { 210 virtio_gpu_cmd_unref_resource(vgdev, bo); 211 virtio_gpu_notify(vgdev); 212 return; 213 } 214 virtio_gpu_cleanup_object(bo); 215 } 216 217 static int virtgpu_dma_buf_init_obj(struct drm_device *dev, 218 struct virtio_gpu_object *bo, 219 struct dma_buf_attachment *attach) 220 { 221 struct virtio_gpu_device *vgdev = dev->dev_private; 222 struct virtio_gpu_object_params params = { 0 }; 223 struct dma_resv *resv = attach->dmabuf->resv; 224 struct virtio_gpu_mem_entry *ents = NULL; 225 unsigned int nents; 226 int ret; 227 228 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 229 if (ret) { 230 virtgpu_dma_buf_free_obj(&bo->base.base); 231 return ret; 232 } 233 234 dma_resv_lock(resv, NULL); 235 236 ret = dma_buf_pin(attach); 237 if (ret) 238 goto err_pin; 239 240 ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach); 241 if (ret) 242 goto err_import; 243 244 params.blob = true; 245 params.blob_mem = VIRTGPU_BLOB_MEM_GUEST; 246 params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE; 247 params.size = attach->dmabuf->size; 248 249 virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms, 250 ents, nents); 251 bo->guest_blob = true; 252 bo->attached = true; 253 254 dma_buf_unpin(attach); 255 dma_resv_unlock(resv); 256 257 return 0; 258 259 err_import: 260 dma_buf_unpin(attach); 261 err_pin: 262 dma_resv_unlock(resv); 263 virtgpu_dma_buf_free_obj(&bo->base.base); 264 return ret; 265 } 266 267 static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = { 268 .free = virtgpu_dma_buf_free_obj, 269 }; 270 271 static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) 272 { 273 struct drm_gem_object *obj = attach->importer_priv; 274 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 275 276 if (bo->created && kref_read(&obj->refcount)) { 277 virtio_gpu_detach_object_fenced(bo); 278 279 if (bo->sgt) 280 dma_buf_unmap_attachment(attach, bo->sgt, 281 DMA_BIDIRECTIONAL); 282 283 bo->sgt = NULL; 284 } 285 } 286 287 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = { 288 .allow_peer2peer = true, 289 .move_notify = virtgpu_dma_buf_move_notify 290 }; 291 292 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, 293 struct dma_buf *buf) 294 { 295 struct virtio_gpu_device *vgdev = dev->dev_private; 296 struct dma_buf_attachment *attach; 297 struct virtio_gpu_object *bo; 298 struct drm_gem_object *obj; 299 int ret; 300 301 if (buf->ops == &virtgpu_dmabuf_ops.ops) { 302 obj = buf->priv; 303 if (obj->dev == dev) { 304 /* 305 * Importing dmabuf exported from our own gem increases 306 * refcount on gem itself instead of f_count of dmabuf. 307 */ 308 drm_gem_object_get(obj); 309 return obj; 310 } 311 } 312 313 if (!vgdev->has_resource_blob || vgdev->has_virgl_3d) 314 return drm_gem_prime_import(dev, buf); 315 316 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 317 if (!bo) 318 return ERR_PTR(-ENOMEM); 319 320 obj = &bo->base.base; 321 obj->funcs = &virtgpu_gem_dma_buf_funcs; 322 drm_gem_private_object_init(dev, obj, buf->size); 323 324 attach = dma_buf_dynamic_attach(buf, dev->dev, 325 &virtgpu_dma_buf_attach_ops, obj); 326 if (IS_ERR(attach)) { 327 kfree(bo); 328 return ERR_CAST(attach); 329 } 330 331 obj->import_attach = attach; 332 get_dma_buf(buf); 333 334 ret = virtgpu_dma_buf_init_obj(dev, bo, attach); 335 if (ret < 0) 336 return ERR_PTR(ret); 337 338 return obj; 339 } 340 341 struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 342 struct drm_device *dev, struct dma_buf_attachment *attach, 343 struct sg_table *table) 344 { 345 return ERR_PTR(-ENODEV); 346 } 347