1 /* 2 * Copyright 2017 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Zhiyuan Lv <zhiyuan.lv@intel.com> 25 * 26 * Contributors: 27 * Xiaoguang Chen 28 * Tina Zhang <tina.zhang@intel.com> 29 */ 30 31 #include <linux/dma-buf.h> 32 #include <linux/mdev.h> 33 34 #include <drm/drm_fourcc.h> 35 #include <drm/drm_plane.h> 36 37 #include "gem/i915_gem_dmabuf.h" 38 39 #include "i915_drv.h" 40 #include "i915_reg.h" 41 #include "gvt.h" 42 43 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) 44 45 static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj) 46 { 47 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 48 struct intel_vgpu *vgpu; 49 struct sg_table *st; 50 struct scatterlist *sg; 51 int i, j, ret; 52 gen8_pte_t __iomem *gtt_entries; 53 struct intel_vgpu_fb_info *fb_info; 54 unsigned int page_num; /* limited by sg_alloc_table */ 55 56 if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num)) 57 return -E2BIG; 58 59 page_num = obj->base.size >> PAGE_SHIFT; 60 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; 61 if (drm_WARN_ON(&dev_priv->drm, !fb_info)) 62 return -ENODEV; 63 64 vgpu = fb_info->obj->vgpu; 65 if (drm_WARN_ON(&dev_priv->drm, !vgpu)) 66 return -ENODEV; 67 68 st = kmalloc(sizeof(*st), GFP_KERNEL); 69 if (unlikely(!st)) 70 return -ENOMEM; 71 72 ret = sg_alloc_table(st, page_num, GFP_KERNEL); 73 if (ret) { 74 kfree(st); 75 return ret; 76 } 77 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm + 78 (fb_info->start >> PAGE_SHIFT); 79 for_each_sg(st->sgl, sg, page_num, i) { 80 dma_addr_t dma_addr = 81 GEN8_DECODE_PTE(readq(>t_entries[i])); 82 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { 83 ret = -EINVAL; 84 goto out; 85 } 86 87 sg->offset = 0; 88 sg->length = PAGE_SIZE; 89 sg_dma_len(sg) = PAGE_SIZE; 90 sg_dma_address(sg) = dma_addr; 91 } 92 93 __i915_gem_object_set_pages(obj, st); 94 out: 95 if (ret) { 96 dma_addr_t dma_addr; 97 98 for_each_sg(st->sgl, sg, i, j) { 99 dma_addr = sg_dma_address(sg); 100 if (dma_addr) 101 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); 102 } 103 sg_free_table(st); 104 kfree(st); 105 } 106 107 return ret; 108 109 } 110 111 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, 112 struct sg_table *pages) 113 { 114 struct scatterlist *sg; 115 116 if (obj->base.dma_buf) { 117 struct intel_vgpu_fb_info *fb_info = obj->gvt_info; 118 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; 119 struct intel_vgpu *vgpu = obj->vgpu; 120 int i; 121 122 for_each_sg(pages->sgl, sg, fb_info->size, i) 123 intel_gvt_dma_unmap_guest_page(vgpu, 124 sg_dma_address(sg)); 125 } 126 127 sg_free_table(pages); 128 kfree(pages); 129 } 130 131 static void dmabuf_gem_object_free(struct kref *kref) 132 { 133 struct intel_vgpu_dmabuf_obj *obj = 134 container_of(kref, struct intel_vgpu_dmabuf_obj, kref); 135 struct intel_vgpu *vgpu = obj->vgpu; 136 struct list_head *pos; 137 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 138 139 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) { 140 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 141 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 142 if (dmabuf_obj == obj) { 143 list_del(pos); 144 idr_remove(&vgpu->object_idr, 145 dmabuf_obj->dmabuf_id); 146 kfree(dmabuf_obj->info); 147 kfree(dmabuf_obj); 148 break; 149 } 150 } 151 } else { 152 /* Free the orphan dmabuf_objs here */ 153 kfree(obj->info); 154 kfree(obj); 155 } 156 } 157 158 159 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj) 160 { 161 kref_get(&obj->kref); 162 } 163 164 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj) 165 { 166 kref_put(&obj->kref, dmabuf_gem_object_free); 167 } 168 169 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj) 170 { 171 172 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info; 173 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; 174 struct intel_vgpu *vgpu = obj->vgpu; 175 176 if (vgpu) { 177 mutex_lock(&vgpu->dmabuf_lock); 178 gem_obj->base.dma_buf = NULL; 179 dmabuf_obj_put(obj); 180 mutex_unlock(&vgpu->dmabuf_lock); 181 } else { 182 /* vgpu is NULL, as it has been removed already */ 183 gem_obj->base.dma_buf = NULL; 184 dmabuf_obj_put(obj); 185 } 186 } 187 188 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { 189 .name = "i915_gem_object_vgpu", 190 .flags = I915_GEM_OBJECT_IS_PROXY, 191 .get_pages = vgpu_gem_get_pages, 192 .put_pages = vgpu_gem_put_pages, 193 .release = vgpu_gem_release, 194 }; 195 196 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, 197 struct intel_vgpu_fb_info *info) 198 { 199 static struct lock_class_key lock_class; 200 struct drm_i915_private *dev_priv = to_i915(dev); 201 struct drm_i915_gem_object *obj; 202 203 obj = i915_gem_object_alloc(); 204 if (obj == NULL) 205 return NULL; 206 207 drm_gem_private_object_init(dev, &obj->base, 208 roundup(info->size, PAGE_SIZE)); 209 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0); 210 i915_gem_object_set_readonly(obj); 211 212 obj->read_domains = I915_GEM_DOMAIN_GTT; 213 obj->write_domain = 0; 214 if (GRAPHICS_VER(dev_priv) >= 9) { 215 unsigned int tiling_mode = 0; 216 unsigned int stride = 0; 217 218 switch (info->drm_format_mod) { 219 case DRM_FORMAT_MOD_LINEAR: 220 tiling_mode = I915_TILING_NONE; 221 break; 222 case I915_FORMAT_MOD_X_TILED: 223 tiling_mode = I915_TILING_X; 224 stride = info->stride; 225 break; 226 case I915_FORMAT_MOD_Y_TILED: 227 case I915_FORMAT_MOD_Yf_TILED: 228 tiling_mode = I915_TILING_Y; 229 stride = info->stride; 230 break; 231 default: 232 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", 233 info->drm_format_mod); 234 } 235 obj->tiling_and_stride = tiling_mode | stride; 236 } else { 237 obj->tiling_and_stride = info->drm_format_mod ? 238 I915_TILING_X : 0; 239 } 240 241 return obj; 242 } 243 244 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) 245 { 246 if (c && c->x_hot <= c->width && c->y_hot <= c->height) 247 return true; 248 else 249 return false; 250 } 251 252 static int vgpu_get_plane_info(struct drm_device *dev, 253 struct intel_vgpu *vgpu, 254 struct intel_vgpu_fb_info *info, 255 int plane_id) 256 { 257 struct intel_vgpu_primary_plane_format p; 258 struct intel_vgpu_cursor_plane_format c; 259 int ret, tile_height = 1; 260 261 memset(info, 0, sizeof(*info)); 262 263 if (plane_id == DRM_PLANE_TYPE_PRIMARY) { 264 ret = intel_vgpu_decode_primary_plane(vgpu, &p); 265 if (ret) 266 return ret; 267 info->start = p.base; 268 info->start_gpa = p.base_gpa; 269 info->width = p.width; 270 info->height = p.height; 271 info->stride = p.stride; 272 info->drm_format = p.drm_format; 273 274 switch (p.tiled) { 275 case PLANE_CTL_TILED_LINEAR: 276 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; 277 break; 278 case PLANE_CTL_TILED_X: 279 info->drm_format_mod = I915_FORMAT_MOD_X_TILED; 280 tile_height = 8; 281 break; 282 case PLANE_CTL_TILED_Y: 283 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; 284 tile_height = 32; 285 break; 286 case PLANE_CTL_TILED_YF: 287 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; 288 tile_height = 32; 289 break; 290 default: 291 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 292 } 293 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 294 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 295 if (ret) 296 return ret; 297 info->start = c.base; 298 info->start_gpa = c.base_gpa; 299 info->width = c.width; 300 info->height = c.height; 301 info->stride = c.width * (c.bpp / 8); 302 info->drm_format = c.drm_format; 303 info->drm_format_mod = 0; 304 info->x_pos = c.x_pos; 305 info->y_pos = c.y_pos; 306 307 if (validate_hotspot(&c)) { 308 info->x_hot = c.x_hot; 309 info->y_hot = c.y_hot; 310 } else { 311 info->x_hot = UINT_MAX; 312 info->y_hot = UINT_MAX; 313 } 314 } else { 315 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 316 return -EINVAL; 317 } 318 319 info->size = info->stride * roundup(info->height, tile_height); 320 if (info->size == 0) { 321 gvt_vgpu_err("fb size is zero\n"); 322 return -EINVAL; 323 } 324 325 if (info->start & (PAGE_SIZE - 1)) { 326 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); 327 return -EFAULT; 328 } 329 330 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { 331 gvt_vgpu_err("invalid gma addr\n"); 332 return -EFAULT; 333 } 334 335 return 0; 336 } 337 338 static struct intel_vgpu_dmabuf_obj * 339 pick_dmabuf_by_info(struct intel_vgpu *vgpu, 340 struct intel_vgpu_fb_info *latest_info) 341 { 342 struct list_head *pos; 343 struct intel_vgpu_fb_info *fb_info; 344 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; 345 struct intel_vgpu_dmabuf_obj *ret = NULL; 346 347 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 348 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 349 if (!dmabuf_obj->info) 350 continue; 351 352 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; 353 if ((fb_info->start == latest_info->start) && 354 (fb_info->start_gpa == latest_info->start_gpa) && 355 (fb_info->size == latest_info->size) && 356 (fb_info->drm_format_mod == latest_info->drm_format_mod) && 357 (fb_info->drm_format == latest_info->drm_format) && 358 (fb_info->width == latest_info->width) && 359 (fb_info->height == latest_info->height)) { 360 ret = dmabuf_obj; 361 break; 362 } 363 } 364 365 return ret; 366 } 367 368 static struct intel_vgpu_dmabuf_obj * 369 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) 370 { 371 struct list_head *pos; 372 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; 373 struct intel_vgpu_dmabuf_obj *ret = NULL; 374 375 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 376 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 377 if (dmabuf_obj->dmabuf_id == id) { 378 ret = dmabuf_obj; 379 break; 380 } 381 } 382 383 return ret; 384 } 385 386 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, 387 struct intel_vgpu_fb_info *fb_info) 388 { 389 gvt_dmabuf->drm_format = fb_info->drm_format; 390 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; 391 gvt_dmabuf->width = fb_info->width; 392 gvt_dmabuf->height = fb_info->height; 393 gvt_dmabuf->stride = fb_info->stride; 394 gvt_dmabuf->size = fb_info->size; 395 gvt_dmabuf->x_pos = fb_info->x_pos; 396 gvt_dmabuf->y_pos = fb_info->y_pos; 397 gvt_dmabuf->x_hot = fb_info->x_hot; 398 gvt_dmabuf->y_hot = fb_info->y_hot; 399 } 400 401 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) 402 { 403 struct drm_device *dev = &vgpu->gvt->gt->i915->drm; 404 struct vfio_device_gfx_plane_info *gfx_plane_info = args; 405 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 406 struct intel_vgpu_fb_info fb_info; 407 int ret = 0; 408 409 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF | 410 VFIO_GFX_PLANE_TYPE_PROBE)) 411 return ret; 412 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) || 413 (!gfx_plane_info->flags)) 414 return -EINVAL; 415 416 ret = vgpu_get_plane_info(dev, vgpu, &fb_info, 417 gfx_plane_info->drm_plane_type); 418 if (ret != 0) 419 goto out; 420 421 mutex_lock(&vgpu->dmabuf_lock); 422 /* If exists, pick up the exposed dmabuf_obj */ 423 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info); 424 if (dmabuf_obj) { 425 update_fb_info(gfx_plane_info, &fb_info); 426 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id; 427 428 /* This buffer may be released between query_plane ioctl and 429 * get_dmabuf ioctl. Add the refcount to make sure it won't 430 * be released between the two ioctls. 431 */ 432 if (!dmabuf_obj->initref) { 433 dmabuf_obj->initref = true; 434 dmabuf_obj_get(dmabuf_obj); 435 } 436 ret = 0; 437 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n", 438 vgpu->id, kref_read(&dmabuf_obj->kref), 439 gfx_plane_info->dmabuf_id); 440 mutex_unlock(&vgpu->dmabuf_lock); 441 goto out; 442 } 443 444 mutex_unlock(&vgpu->dmabuf_lock); 445 446 /* Need to allocate a new one*/ 447 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL); 448 if (unlikely(!dmabuf_obj)) { 449 gvt_vgpu_err("alloc dmabuf_obj failed\n"); 450 ret = -ENOMEM; 451 goto out; 452 } 453 454 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info), 455 GFP_KERNEL); 456 if (unlikely(!dmabuf_obj->info)) { 457 gvt_vgpu_err("allocate intel vgpu fb info failed\n"); 458 ret = -ENOMEM; 459 goto out_free_dmabuf; 460 } 461 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info)); 462 463 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj; 464 465 dmabuf_obj->vgpu = vgpu; 466 467 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT); 468 if (ret < 0) 469 goto out_free_info; 470 gfx_plane_info->dmabuf_id = ret; 471 dmabuf_obj->dmabuf_id = ret; 472 473 dmabuf_obj->initref = true; 474 475 kref_init(&dmabuf_obj->kref); 476 477 update_fb_info(gfx_plane_info, &fb_info); 478 479 INIT_LIST_HEAD(&dmabuf_obj->list); 480 mutex_lock(&vgpu->dmabuf_lock); 481 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head); 482 mutex_unlock(&vgpu->dmabuf_lock); 483 484 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id, 485 __func__, kref_read(&dmabuf_obj->kref), ret); 486 487 return 0; 488 489 out_free_info: 490 kfree(dmabuf_obj->info); 491 out_free_dmabuf: 492 kfree(dmabuf_obj); 493 out: 494 /* ENODEV means plane isn't ready, which might be a normal case. */ 495 return (ret == -ENODEV) ? 0 : ret; 496 } 497 498 /* To associate an exposed dmabuf with the dmabuf_obj */ 499 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) 500 { 501 struct drm_device *dev = &vgpu->gvt->gt->i915->drm; 502 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 503 struct drm_i915_gem_object *obj; 504 struct dma_buf *dmabuf; 505 int dmabuf_fd; 506 int ret = 0; 507 508 mutex_lock(&vgpu->dmabuf_lock); 509 510 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id); 511 if (dmabuf_obj == NULL) { 512 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id); 513 ret = -EINVAL; 514 goto out; 515 } 516 517 obj = vgpu_create_gem(dev, dmabuf_obj->info); 518 if (obj == NULL) { 519 gvt_vgpu_err("create gvt gem obj failed\n"); 520 ret = -ENOMEM; 521 goto out; 522 } 523 524 obj->gvt_info = dmabuf_obj->info; 525 526 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); 527 if (IS_ERR(dmabuf)) { 528 gvt_vgpu_err("export dma-buf failed\n"); 529 ret = PTR_ERR(dmabuf); 530 goto out_free_gem; 531 } 532 533 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); 534 if (ret < 0) { 535 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); 536 goto out_free_dmabuf; 537 } 538 dmabuf_fd = ret; 539 540 dmabuf_obj_get(dmabuf_obj); 541 542 if (dmabuf_obj->initref) { 543 dmabuf_obj->initref = false; 544 dmabuf_obj_put(dmabuf_obj); 545 } 546 547 mutex_unlock(&vgpu->dmabuf_lock); 548 549 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n" 550 " file count: %ld, GEM ref: %d\n", 551 vgpu->id, dmabuf_obj->dmabuf_id, 552 kref_read(&dmabuf_obj->kref), 553 dmabuf_fd, 554 file_count(dmabuf->file), 555 kref_read(&obj->base.refcount)); 556 557 i915_gem_object_put(obj); 558 559 return dmabuf_fd; 560 561 out_free_dmabuf: 562 dma_buf_put(dmabuf); 563 out_free_gem: 564 i915_gem_object_put(obj); 565 out: 566 mutex_unlock(&vgpu->dmabuf_lock); 567 return ret; 568 } 569 570 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) 571 { 572 struct list_head *pos, *n; 573 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 574 575 mutex_lock(&vgpu->dmabuf_lock); 576 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { 577 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 578 dmabuf_obj->vgpu = NULL; 579 580 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); 581 list_del(pos); 582 583 /* dmabuf_obj might be freed in dmabuf_obj_put */ 584 if (dmabuf_obj->initref) { 585 dmabuf_obj->initref = false; 586 dmabuf_obj_put(dmabuf_obj); 587 } 588 589 } 590 mutex_unlock(&vgpu->dmabuf_lock); 591 } 592