1 /* 2 * Copyright 2017 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Zhiyuan Lv <zhiyuan.lv@intel.com> 25 * 26 * Contributors: 27 * Xiaoguang Chen 28 * Tina Zhang <tina.zhang@intel.com> 29 */ 30 31 #include <linux/dma-buf.h> 32 #include <linux/mdev.h> 33 34 #include <drm/drm_fourcc.h> 35 #include <drm/drm_plane.h> 36 37 #include "gem/i915_gem_dmabuf.h" 38 39 #include "i915_drv.h" 40 #include "gvt.h" 41 42 #include "display/skl_universal_plane_regs.h" 43 44 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) 45 46 static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj) 47 { 48 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 49 struct intel_vgpu *vgpu; 50 struct sg_table *st; 51 struct scatterlist *sg; 52 int i, j, ret; 53 gen8_pte_t __iomem *gtt_entries; 54 struct intel_vgpu_fb_info *fb_info; 55 unsigned int page_num; /* limited by sg_alloc_table */ 56 57 if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num)) 58 return -E2BIG; 59 60 page_num = obj->base.size >> PAGE_SHIFT; 61 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; 62 if (drm_WARN_ON(&dev_priv->drm, !fb_info)) 63 return -ENODEV; 64 65 vgpu = fb_info->obj->vgpu; 66 if (drm_WARN_ON(&dev_priv->drm, !vgpu)) 67 return -ENODEV; 68 69 st = kmalloc(sizeof(*st), GFP_KERNEL); 70 if (unlikely(!st)) 71 return -ENOMEM; 72 73 ret = sg_alloc_table(st, page_num, GFP_KERNEL); 74 if (ret) { 75 kfree(st); 76 return ret; 77 } 78 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm + 79 (fb_info->start >> PAGE_SHIFT); 80 for_each_sg(st->sgl, sg, page_num, i) { 81 dma_addr_t dma_addr = 82 GEN8_DECODE_PTE(readq(>t_entries[i])); 83 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { 84 ret = -EINVAL; 85 goto out; 86 } 87 88 sg->offset = 0; 89 sg->length = PAGE_SIZE; 90 sg_dma_len(sg) = PAGE_SIZE; 91 sg_dma_address(sg) = dma_addr; 92 } 93 94 __i915_gem_object_set_pages(obj, st); 95 out: 96 if (ret) { 97 dma_addr_t dma_addr; 98 99 for_each_sg(st->sgl, sg, i, j) { 100 dma_addr = sg_dma_address(sg); 101 if (dma_addr) 102 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); 103 } 104 sg_free_table(st); 105 kfree(st); 106 } 107 108 return ret; 109 110 } 111 112 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, 113 struct sg_table *pages) 114 { 115 struct scatterlist *sg; 116 117 if (obj->base.dma_buf) { 118 struct intel_vgpu_fb_info *fb_info = obj->gvt_info; 119 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; 120 struct intel_vgpu *vgpu = obj->vgpu; 121 int i; 122 123 for_each_sg(pages->sgl, sg, fb_info->size, i) 124 intel_gvt_dma_unmap_guest_page(vgpu, 125 sg_dma_address(sg)); 126 } 127 128 sg_free_table(pages); 129 kfree(pages); 130 } 131 132 static void dmabuf_gem_object_free(struct kref *kref) 133 { 134 struct intel_vgpu_dmabuf_obj *obj = 135 container_of(kref, struct intel_vgpu_dmabuf_obj, kref); 136 struct intel_vgpu *vgpu = obj->vgpu; 137 struct list_head *pos; 138 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 139 140 if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) && 141 !list_empty(&vgpu->dmabuf_obj_list_head)) { 142 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 143 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 144 if (dmabuf_obj == obj) { 145 list_del(pos); 146 idr_remove(&vgpu->object_idr, 147 dmabuf_obj->dmabuf_id); 148 kfree(dmabuf_obj->info); 149 kfree(dmabuf_obj); 150 break; 151 } 152 } 153 } else { 154 /* Free the orphan dmabuf_objs here */ 155 kfree(obj->info); 156 kfree(obj); 157 } 158 } 159 160 161 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj) 162 { 163 kref_get(&obj->kref); 164 } 165 166 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj) 167 { 168 kref_put(&obj->kref, dmabuf_gem_object_free); 169 } 170 171 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj) 172 { 173 174 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info; 175 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; 176 struct intel_vgpu *vgpu = obj->vgpu; 177 178 if (vgpu) { 179 mutex_lock(&vgpu->dmabuf_lock); 180 gem_obj->base.dma_buf = NULL; 181 dmabuf_obj_put(obj); 182 mutex_unlock(&vgpu->dmabuf_lock); 183 } else { 184 /* vgpu is NULL, as it has been removed already */ 185 gem_obj->base.dma_buf = NULL; 186 dmabuf_obj_put(obj); 187 } 188 } 189 190 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { 191 .name = "i915_gem_object_vgpu", 192 .flags = I915_GEM_OBJECT_IS_PROXY, 193 .get_pages = vgpu_gem_get_pages, 194 .put_pages = vgpu_gem_put_pages, 195 .release = vgpu_gem_release, 196 }; 197 198 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, 199 struct intel_vgpu_fb_info *info) 200 { 201 static struct lock_class_key lock_class; 202 struct drm_i915_private *dev_priv = to_i915(dev); 203 struct drm_i915_gem_object *obj; 204 205 obj = i915_gem_object_alloc(); 206 if (obj == NULL) 207 return NULL; 208 209 drm_gem_private_object_init(dev, &obj->base, 210 roundup(info->size, PAGE_SIZE)); 211 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0); 212 i915_gem_object_set_readonly(obj); 213 214 obj->read_domains = I915_GEM_DOMAIN_GTT; 215 obj->write_domain = 0; 216 if (GRAPHICS_VER(dev_priv) >= 9) { 217 unsigned int tiling_mode = 0; 218 unsigned int stride = 0; 219 220 switch (info->drm_format_mod) { 221 case DRM_FORMAT_MOD_LINEAR: 222 tiling_mode = I915_TILING_NONE; 223 break; 224 case I915_FORMAT_MOD_X_TILED: 225 tiling_mode = I915_TILING_X; 226 stride = info->stride; 227 break; 228 case I915_FORMAT_MOD_Y_TILED: 229 case I915_FORMAT_MOD_Yf_TILED: 230 tiling_mode = I915_TILING_Y; 231 stride = info->stride; 232 break; 233 default: 234 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", 235 info->drm_format_mod); 236 } 237 obj->tiling_and_stride = tiling_mode | stride; 238 } else { 239 obj->tiling_and_stride = info->drm_format_mod ? 240 I915_TILING_X : 0; 241 } 242 243 return obj; 244 } 245 246 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) 247 { 248 if (c && c->x_hot <= c->width && c->y_hot <= c->height) 249 return true; 250 else 251 return false; 252 } 253 254 static int vgpu_get_plane_info(struct drm_device *dev, 255 struct intel_vgpu *vgpu, 256 struct intel_vgpu_fb_info *info, 257 int plane_id) 258 { 259 struct intel_vgpu_primary_plane_format p; 260 struct intel_vgpu_cursor_plane_format c; 261 int ret, tile_height = 1; 262 263 memset(info, 0, sizeof(*info)); 264 265 if (plane_id == DRM_PLANE_TYPE_PRIMARY) { 266 ret = intel_vgpu_decode_primary_plane(vgpu, &p); 267 if (ret) 268 return ret; 269 info->start = p.base; 270 info->start_gpa = p.base_gpa; 271 info->width = p.width; 272 info->height = p.height; 273 info->stride = p.stride; 274 info->drm_format = p.drm_format; 275 276 switch (p.tiled) { 277 case PLANE_CTL_TILED_LINEAR: 278 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; 279 break; 280 case PLANE_CTL_TILED_X: 281 info->drm_format_mod = I915_FORMAT_MOD_X_TILED; 282 tile_height = 8; 283 break; 284 case PLANE_CTL_TILED_Y: 285 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; 286 tile_height = 32; 287 break; 288 case PLANE_CTL_TILED_YF: 289 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; 290 tile_height = 32; 291 break; 292 default: 293 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 294 } 295 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 296 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 297 if (ret) 298 return ret; 299 info->start = c.base; 300 info->start_gpa = c.base_gpa; 301 info->width = c.width; 302 info->height = c.height; 303 info->stride = c.width * (c.bpp / 8); 304 info->drm_format = c.drm_format; 305 info->drm_format_mod = 0; 306 info->x_pos = c.x_pos; 307 info->y_pos = c.y_pos; 308 309 if (validate_hotspot(&c)) { 310 info->x_hot = c.x_hot; 311 info->y_hot = c.y_hot; 312 } else { 313 info->x_hot = UINT_MAX; 314 info->y_hot = UINT_MAX; 315 } 316 } else { 317 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 318 return -EINVAL; 319 } 320 321 info->size = info->stride * roundup(info->height, tile_height); 322 if (info->size == 0) { 323 gvt_vgpu_err("fb size is zero\n"); 324 return -EINVAL; 325 } 326 327 if (info->start & (PAGE_SIZE - 1)) { 328 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); 329 return -EFAULT; 330 } 331 332 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { 333 gvt_vgpu_err("invalid gma addr\n"); 334 return -EFAULT; 335 } 336 337 return 0; 338 } 339 340 static struct intel_vgpu_dmabuf_obj * 341 pick_dmabuf_by_info(struct intel_vgpu *vgpu, 342 struct intel_vgpu_fb_info *latest_info) 343 { 344 struct list_head *pos; 345 struct intel_vgpu_fb_info *fb_info; 346 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; 347 struct intel_vgpu_dmabuf_obj *ret = NULL; 348 349 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 350 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 351 if (!dmabuf_obj->info) 352 continue; 353 354 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; 355 if ((fb_info->start == latest_info->start) && 356 (fb_info->start_gpa == latest_info->start_gpa) && 357 (fb_info->size == latest_info->size) && 358 (fb_info->drm_format_mod == latest_info->drm_format_mod) && 359 (fb_info->drm_format == latest_info->drm_format) && 360 (fb_info->width == latest_info->width) && 361 (fb_info->height == latest_info->height)) { 362 ret = dmabuf_obj; 363 break; 364 } 365 } 366 367 return ret; 368 } 369 370 static struct intel_vgpu_dmabuf_obj * 371 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) 372 { 373 struct list_head *pos; 374 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; 375 struct intel_vgpu_dmabuf_obj *ret = NULL; 376 377 list_for_each(pos, &vgpu->dmabuf_obj_list_head) { 378 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 379 if (dmabuf_obj->dmabuf_id == id) { 380 ret = dmabuf_obj; 381 break; 382 } 383 } 384 385 return ret; 386 } 387 388 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, 389 struct intel_vgpu_fb_info *fb_info) 390 { 391 gvt_dmabuf->drm_format = fb_info->drm_format; 392 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; 393 gvt_dmabuf->width = fb_info->width; 394 gvt_dmabuf->height = fb_info->height; 395 gvt_dmabuf->stride = fb_info->stride; 396 gvt_dmabuf->size = fb_info->size; 397 gvt_dmabuf->x_pos = fb_info->x_pos; 398 gvt_dmabuf->y_pos = fb_info->y_pos; 399 gvt_dmabuf->x_hot = fb_info->x_hot; 400 gvt_dmabuf->y_hot = fb_info->y_hot; 401 } 402 403 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) 404 { 405 struct drm_device *dev = &vgpu->gvt->gt->i915->drm; 406 struct vfio_device_gfx_plane_info *gfx_plane_info = args; 407 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 408 struct intel_vgpu_fb_info fb_info; 409 int ret = 0; 410 411 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF | 412 VFIO_GFX_PLANE_TYPE_PROBE)) 413 return ret; 414 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) || 415 (!gfx_plane_info->flags)) 416 return -EINVAL; 417 418 ret = vgpu_get_plane_info(dev, vgpu, &fb_info, 419 gfx_plane_info->drm_plane_type); 420 if (ret != 0) 421 goto out; 422 423 mutex_lock(&vgpu->dmabuf_lock); 424 /* If exists, pick up the exposed dmabuf_obj */ 425 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info); 426 if (dmabuf_obj) { 427 update_fb_info(gfx_plane_info, &fb_info); 428 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id; 429 430 /* This buffer may be released between query_plane ioctl and 431 * get_dmabuf ioctl. Add the refcount to make sure it won't 432 * be released between the two ioctls. 433 */ 434 if (!dmabuf_obj->initref) { 435 dmabuf_obj->initref = true; 436 dmabuf_obj_get(dmabuf_obj); 437 } 438 ret = 0; 439 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n", 440 vgpu->id, kref_read(&dmabuf_obj->kref), 441 gfx_plane_info->dmabuf_id); 442 mutex_unlock(&vgpu->dmabuf_lock); 443 goto out; 444 } 445 446 mutex_unlock(&vgpu->dmabuf_lock); 447 448 /* Need to allocate a new one*/ 449 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL); 450 if (unlikely(!dmabuf_obj)) { 451 gvt_vgpu_err("alloc dmabuf_obj failed\n"); 452 ret = -ENOMEM; 453 goto out; 454 } 455 456 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info), 457 GFP_KERNEL); 458 if (unlikely(!dmabuf_obj->info)) { 459 gvt_vgpu_err("allocate intel vgpu fb info failed\n"); 460 ret = -ENOMEM; 461 goto out_free_dmabuf; 462 } 463 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info)); 464 465 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj; 466 467 dmabuf_obj->vgpu = vgpu; 468 469 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT); 470 if (ret < 0) 471 goto out_free_info; 472 gfx_plane_info->dmabuf_id = ret; 473 dmabuf_obj->dmabuf_id = ret; 474 475 dmabuf_obj->initref = true; 476 477 kref_init(&dmabuf_obj->kref); 478 479 update_fb_info(gfx_plane_info, &fb_info); 480 481 INIT_LIST_HEAD(&dmabuf_obj->list); 482 mutex_lock(&vgpu->dmabuf_lock); 483 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head); 484 mutex_unlock(&vgpu->dmabuf_lock); 485 486 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id, 487 __func__, kref_read(&dmabuf_obj->kref), ret); 488 489 return 0; 490 491 out_free_info: 492 kfree(dmabuf_obj->info); 493 out_free_dmabuf: 494 kfree(dmabuf_obj); 495 out: 496 /* ENODEV means plane isn't ready, which might be a normal case. */ 497 return (ret == -ENODEV) ? 0 : ret; 498 } 499 500 /* To associate an exposed dmabuf with the dmabuf_obj */ 501 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) 502 { 503 struct drm_device *dev = &vgpu->gvt->gt->i915->drm; 504 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 505 struct drm_i915_gem_object *obj; 506 struct dma_buf *dmabuf; 507 int dmabuf_fd; 508 int ret = 0; 509 510 mutex_lock(&vgpu->dmabuf_lock); 511 512 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id); 513 if (dmabuf_obj == NULL) { 514 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id); 515 ret = -EINVAL; 516 goto out; 517 } 518 519 obj = vgpu_create_gem(dev, dmabuf_obj->info); 520 if (obj == NULL) { 521 gvt_vgpu_err("create gvt gem obj failed\n"); 522 ret = -ENOMEM; 523 goto out; 524 } 525 526 obj->gvt_info = dmabuf_obj->info; 527 528 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); 529 if (IS_ERR(dmabuf)) { 530 gvt_vgpu_err("export dma-buf failed\n"); 531 ret = PTR_ERR(dmabuf); 532 goto out_free_gem; 533 } 534 535 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); 536 if (ret < 0) { 537 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); 538 goto out_free_dmabuf; 539 } 540 dmabuf_fd = ret; 541 542 dmabuf_obj_get(dmabuf_obj); 543 544 if (dmabuf_obj->initref) { 545 dmabuf_obj->initref = false; 546 dmabuf_obj_put(dmabuf_obj); 547 } 548 549 mutex_unlock(&vgpu->dmabuf_lock); 550 551 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n" 552 " file count: %ld, GEM ref: %d\n", 553 vgpu->id, dmabuf_obj->dmabuf_id, 554 kref_read(&dmabuf_obj->kref), 555 dmabuf_fd, 556 file_count(dmabuf->file), 557 kref_read(&obj->base.refcount)); 558 559 i915_gem_object_put(obj); 560 561 return dmabuf_fd; 562 563 out_free_dmabuf: 564 dma_buf_put(dmabuf); 565 out_free_gem: 566 i915_gem_object_put(obj); 567 out: 568 mutex_unlock(&vgpu->dmabuf_lock); 569 return ret; 570 } 571 572 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) 573 { 574 struct list_head *pos, *n; 575 struct intel_vgpu_dmabuf_obj *dmabuf_obj; 576 577 mutex_lock(&vgpu->dmabuf_lock); 578 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { 579 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); 580 dmabuf_obj->vgpu = NULL; 581 582 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); 583 list_del(pos); 584 585 /* dmabuf_obj might be freed in dmabuf_obj_put */ 586 if (dmabuf_obj->initref) { 587 dmabuf_obj->initref = false; 588 dmabuf_obj_put(dmabuf_obj); 589 } 590 591 } 592 mutex_unlock(&vgpu->dmabuf_lock); 593 } 594