1 /*
2 * Copyright 2017 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
25 *
26 * Contributors:
27 * Xiaoguang Chen
28 * Tina Zhang <tina.zhang@intel.com>
29 */
30
31 #include <linux/dma-buf.h>
32 #include <linux/mdev.h>
33
34 #include <drm/drm_fourcc.h>
35 #include <drm/drm_plane.h>
36 #include <drm/drm_print.h>
37
38 #include "display/skl_universal_plane_regs.h"
39
40 #include "gem/i915_gem_dmabuf.h"
41
42 #include "gvt.h"
43 #include "i915_drv.h"
44
45 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
46
vgpu_gem_get_pages(struct drm_i915_gem_object * obj)47 static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj)
48 {
49 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
50 struct intel_vgpu *vgpu;
51 struct sg_table *st;
52 struct scatterlist *sg;
53 int i, j, ret;
54 gen8_pte_t __iomem *gtt_entries;
55 struct intel_vgpu_fb_info *fb_info;
56 unsigned int page_num; /* limited by sg_alloc_table */
57
58 if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num))
59 return -E2BIG;
60
61 page_num = obj->base.size >> PAGE_SHIFT;
62 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
63 if (drm_WARN_ON(&dev_priv->drm, !fb_info))
64 return -ENODEV;
65
66 vgpu = fb_info->obj->vgpu;
67 if (drm_WARN_ON(&dev_priv->drm, !vgpu))
68 return -ENODEV;
69
70 st = kmalloc_obj(*st);
71 if (unlikely(!st))
72 return -ENOMEM;
73
74 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
75 if (ret) {
76 kfree(st);
77 return ret;
78 }
79 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
80 (fb_info->start >> PAGE_SHIFT);
81 for_each_sg(st->sgl, sg, page_num, i) {
82 dma_addr_t dma_addr =
83 GEN8_DECODE_PTE(readq(>t_entries[i]));
84 if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
85 ret = -EINVAL;
86 goto out;
87 }
88
89 sg->offset = 0;
90 sg->length = PAGE_SIZE;
91 sg_dma_len(sg) = PAGE_SIZE;
92 sg_dma_address(sg) = dma_addr;
93 }
94
95 __i915_gem_object_set_pages(obj, st);
96 out:
97 if (ret) {
98 dma_addr_t dma_addr;
99
100 for_each_sg(st->sgl, sg, i, j) {
101 dma_addr = sg_dma_address(sg);
102 if (dma_addr)
103 intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
104 }
105 sg_free_table(st);
106 kfree(st);
107 }
108
109 return ret;
110
111 }
112
vgpu_gem_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)113 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
114 struct sg_table *pages)
115 {
116 struct scatterlist *sg;
117
118 if (obj->base.dma_buf) {
119 struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
120 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
121 struct intel_vgpu *vgpu = obj->vgpu;
122 int i;
123
124 for_each_sg(pages->sgl, sg, fb_info->size, i)
125 intel_gvt_dma_unmap_guest_page(vgpu,
126 sg_dma_address(sg));
127 }
128
129 sg_free_table(pages);
130 kfree(pages);
131 }
132
dmabuf_gem_object_free(struct kref * kref)133 static void dmabuf_gem_object_free(struct kref *kref)
134 {
135 struct intel_vgpu_dmabuf_obj *obj =
136 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
137 struct intel_vgpu *vgpu = obj->vgpu;
138 struct list_head *pos;
139 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
140
141 if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
142 !list_empty(&vgpu->dmabuf_obj_list_head)) {
143 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
144 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
145 if (dmabuf_obj == obj) {
146 list_del(pos);
147 idr_remove(&vgpu->object_idr,
148 dmabuf_obj->dmabuf_id);
149 kfree(dmabuf_obj->info);
150 kfree(dmabuf_obj);
151 break;
152 }
153 }
154 } else {
155 /* Free the orphan dmabuf_objs here */
156 kfree(obj->info);
157 kfree(obj);
158 }
159 }
160
161
dmabuf_obj_get(struct intel_vgpu_dmabuf_obj * obj)162 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
163 {
164 kref_get(&obj->kref);
165 }
166
dmabuf_obj_put(struct intel_vgpu_dmabuf_obj * obj)167 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
168 {
169 kref_put(&obj->kref, dmabuf_gem_object_free);
170 }
171
vgpu_gem_release(struct drm_i915_gem_object * gem_obj)172 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
173 {
174
175 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
176 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
177 struct intel_vgpu *vgpu = obj->vgpu;
178
179 if (vgpu) {
180 mutex_lock(&vgpu->dmabuf_lock);
181 gem_obj->base.dma_buf = NULL;
182 dmabuf_obj_put(obj);
183 mutex_unlock(&vgpu->dmabuf_lock);
184 } else {
185 /* vgpu is NULL, as it has been removed already */
186 gem_obj->base.dma_buf = NULL;
187 dmabuf_obj_put(obj);
188 }
189 }
190
191 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
192 .name = "i915_gem_object_vgpu",
193 .flags = I915_GEM_OBJECT_IS_PROXY,
194 .get_pages = vgpu_gem_get_pages,
195 .put_pages = vgpu_gem_put_pages,
196 .release = vgpu_gem_release,
197 };
198
vgpu_create_gem(struct drm_device * dev,struct intel_vgpu_fb_info * info)199 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
200 struct intel_vgpu_fb_info *info)
201 {
202 static struct lock_class_key lock_class;
203 struct drm_i915_private *dev_priv = to_i915(dev);
204 struct drm_i915_gem_object *obj;
205
206 obj = i915_gem_object_alloc();
207 if (obj == NULL)
208 return NULL;
209
210 drm_gem_private_object_init(dev, &obj->base,
211 roundup(info->size, PAGE_SIZE));
212 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
213 i915_gem_object_set_readonly(obj);
214
215 obj->read_domains = I915_GEM_DOMAIN_GTT;
216 obj->write_domain = 0;
217 if (GRAPHICS_VER(dev_priv) >= 9) {
218 unsigned int tiling_mode = 0;
219 unsigned int stride = 0;
220
221 switch (info->drm_format_mod) {
222 case DRM_FORMAT_MOD_LINEAR:
223 tiling_mode = I915_TILING_NONE;
224 break;
225 case I915_FORMAT_MOD_X_TILED:
226 tiling_mode = I915_TILING_X;
227 stride = info->stride;
228 break;
229 case I915_FORMAT_MOD_Y_TILED:
230 case I915_FORMAT_MOD_Yf_TILED:
231 tiling_mode = I915_TILING_Y;
232 stride = info->stride;
233 break;
234 default:
235 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
236 info->drm_format_mod);
237 }
238 obj->tiling_and_stride = tiling_mode | stride;
239 } else {
240 obj->tiling_and_stride = info->drm_format_mod ?
241 I915_TILING_X : 0;
242 }
243
244 return obj;
245 }
246
validate_hotspot(struct intel_vgpu_cursor_plane_format * c)247 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
248 {
249 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
250 return true;
251 else
252 return false;
253 }
254
vgpu_get_plane_info(struct drm_device * dev,struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * info,int plane_id)255 static int vgpu_get_plane_info(struct drm_device *dev,
256 struct intel_vgpu *vgpu,
257 struct intel_vgpu_fb_info *info,
258 int plane_id)
259 {
260 struct intel_vgpu_primary_plane_format p;
261 struct intel_vgpu_cursor_plane_format c;
262 int ret, tile_height = 1;
263
264 memset(info, 0, sizeof(*info));
265
266 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
267 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
268 if (ret)
269 return ret;
270 info->start = p.base;
271 info->start_gpa = p.base_gpa;
272 info->width = p.width;
273 info->height = p.height;
274 info->stride = p.stride;
275 info->drm_format = p.drm_format;
276
277 switch (p.tiled) {
278 case PLANE_CTL_TILED_LINEAR:
279 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
280 break;
281 case PLANE_CTL_TILED_X:
282 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
283 tile_height = 8;
284 break;
285 case PLANE_CTL_TILED_Y:
286 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
287 tile_height = 32;
288 break;
289 case PLANE_CTL_TILED_YF:
290 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
291 tile_height = 32;
292 break;
293 default:
294 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
295 }
296 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
297 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
298 if (ret)
299 return ret;
300 info->start = c.base;
301 info->start_gpa = c.base_gpa;
302 info->width = c.width;
303 info->height = c.height;
304 info->stride = c.width * (c.bpp / 8);
305 info->drm_format = c.drm_format;
306 info->drm_format_mod = 0;
307 info->x_pos = c.x_pos;
308 info->y_pos = c.y_pos;
309
310 if (validate_hotspot(&c)) {
311 info->x_hot = c.x_hot;
312 info->y_hot = c.y_hot;
313 } else {
314 info->x_hot = UINT_MAX;
315 info->y_hot = UINT_MAX;
316 }
317 } else {
318 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
319 return -EINVAL;
320 }
321
322 info->size = info->stride * roundup(info->height, tile_height);
323 if (info->size == 0) {
324 gvt_vgpu_err("fb size is zero\n");
325 return -EINVAL;
326 }
327
328 if (info->start & (PAGE_SIZE - 1)) {
329 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
330 return -EFAULT;
331 }
332
333 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
334 gvt_vgpu_err("invalid gma addr\n");
335 return -EFAULT;
336 }
337
338 return 0;
339 }
340
341 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_info(struct intel_vgpu * vgpu,struct intel_vgpu_fb_info * latest_info)342 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
343 struct intel_vgpu_fb_info *latest_info)
344 {
345 struct list_head *pos;
346 struct intel_vgpu_fb_info *fb_info;
347 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
348 struct intel_vgpu_dmabuf_obj *ret = NULL;
349
350 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
351 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
352 if (!dmabuf_obj->info)
353 continue;
354
355 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
356 if ((fb_info->start == latest_info->start) &&
357 (fb_info->start_gpa == latest_info->start_gpa) &&
358 (fb_info->size == latest_info->size) &&
359 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
360 (fb_info->drm_format == latest_info->drm_format) &&
361 (fb_info->width == latest_info->width) &&
362 (fb_info->height == latest_info->height)) {
363 ret = dmabuf_obj;
364 break;
365 }
366 }
367
368 return ret;
369 }
370
371 static struct intel_vgpu_dmabuf_obj *
pick_dmabuf_by_num(struct intel_vgpu * vgpu,u32 id)372 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
373 {
374 struct list_head *pos;
375 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
376 struct intel_vgpu_dmabuf_obj *ret = NULL;
377
378 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
379 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
380 if (dmabuf_obj->dmabuf_id == id) {
381 ret = dmabuf_obj;
382 break;
383 }
384 }
385
386 return ret;
387 }
388
update_fb_info(struct vfio_device_gfx_plane_info * gvt_dmabuf,struct intel_vgpu_fb_info * fb_info)389 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
390 struct intel_vgpu_fb_info *fb_info)
391 {
392 gvt_dmabuf->drm_format = fb_info->drm_format;
393 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
394 gvt_dmabuf->width = fb_info->width;
395 gvt_dmabuf->height = fb_info->height;
396 gvt_dmabuf->stride = fb_info->stride;
397 gvt_dmabuf->size = fb_info->size;
398 gvt_dmabuf->x_pos = fb_info->x_pos;
399 gvt_dmabuf->y_pos = fb_info->y_pos;
400 gvt_dmabuf->x_hot = fb_info->x_hot;
401 gvt_dmabuf->y_hot = fb_info->y_hot;
402 }
403
intel_vgpu_query_plane(struct intel_vgpu * vgpu,void * args)404 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
405 {
406 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
407 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
408 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
409 struct intel_vgpu_fb_info fb_info;
410 int ret = 0;
411
412 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
413 VFIO_GFX_PLANE_TYPE_PROBE))
414 return ret;
415 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
416 (!gfx_plane_info->flags))
417 return -EINVAL;
418
419 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
420 gfx_plane_info->drm_plane_type);
421 if (ret != 0)
422 goto out;
423
424 mutex_lock(&vgpu->dmabuf_lock);
425 /* If exists, pick up the exposed dmabuf_obj */
426 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
427 if (dmabuf_obj) {
428 update_fb_info(gfx_plane_info, &fb_info);
429 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
430
431 /* This buffer may be released between query_plane ioctl and
432 * get_dmabuf ioctl. Add the refcount to make sure it won't
433 * be released between the two ioctls.
434 */
435 if (!dmabuf_obj->initref) {
436 dmabuf_obj->initref = true;
437 dmabuf_obj_get(dmabuf_obj);
438 }
439 ret = 0;
440 gvt_dbg_dpy("vgpu%d: reuse dmabuf_obj ref %d, id %d\n",
441 vgpu->id, kref_read(&dmabuf_obj->kref),
442 gfx_plane_info->dmabuf_id);
443 mutex_unlock(&vgpu->dmabuf_lock);
444 goto out;
445 }
446
447 mutex_unlock(&vgpu->dmabuf_lock);
448
449 /* Need to allocate a new one*/
450 dmabuf_obj = kmalloc_obj(struct intel_vgpu_dmabuf_obj);
451 if (unlikely(!dmabuf_obj)) {
452 gvt_vgpu_err("alloc dmabuf_obj failed\n");
453 ret = -ENOMEM;
454 goto out;
455 }
456
457 dmabuf_obj->info = kmalloc_obj(struct intel_vgpu_fb_info);
458 if (unlikely(!dmabuf_obj->info)) {
459 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
460 ret = -ENOMEM;
461 goto out_free_dmabuf;
462 }
463 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
464
465 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
466
467 dmabuf_obj->vgpu = vgpu;
468
469 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
470 if (ret < 0)
471 goto out_free_info;
472 gfx_plane_info->dmabuf_id = ret;
473 dmabuf_obj->dmabuf_id = ret;
474
475 dmabuf_obj->initref = true;
476
477 kref_init(&dmabuf_obj->kref);
478
479 update_fb_info(gfx_plane_info, &fb_info);
480
481 INIT_LIST_HEAD(&dmabuf_obj->list);
482 mutex_lock(&vgpu->dmabuf_lock);
483 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
484 mutex_unlock(&vgpu->dmabuf_lock);
485
486 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
487 __func__, kref_read(&dmabuf_obj->kref), ret);
488
489 return 0;
490
491 out_free_info:
492 kfree(dmabuf_obj->info);
493 out_free_dmabuf:
494 kfree(dmabuf_obj);
495 out:
496 /* ENODEV means plane isn't ready, which might be a normal case. */
497 return (ret == -ENODEV) ? 0 : ret;
498 }
499
500 /* To associate an exposed dmabuf with the dmabuf_obj */
intel_vgpu_get_dmabuf(struct intel_vgpu * vgpu,unsigned int dmabuf_id)501 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
502 {
503 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
504 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
505 struct drm_i915_gem_object *obj;
506 struct dma_buf *dmabuf;
507 int dmabuf_fd;
508 int ret = 0;
509
510 mutex_lock(&vgpu->dmabuf_lock);
511
512 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
513 if (dmabuf_obj == NULL) {
514 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
515 ret = -EINVAL;
516 goto out;
517 }
518
519 obj = vgpu_create_gem(dev, dmabuf_obj->info);
520 if (obj == NULL) {
521 gvt_vgpu_err("create gvt gem obj failed\n");
522 ret = -ENOMEM;
523 goto out;
524 }
525
526 obj->gvt_info = dmabuf_obj->info;
527
528 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
529 if (IS_ERR(dmabuf)) {
530 gvt_vgpu_err("export dma-buf failed\n");
531 ret = PTR_ERR(dmabuf);
532 goto out_free_gem;
533 }
534
535 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
536 if (ret < 0) {
537 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
538 goto out_free_dmabuf;
539 }
540 dmabuf_fd = ret;
541
542 dmabuf_obj_get(dmabuf_obj);
543
544 if (dmabuf_obj->initref) {
545 dmabuf_obj->initref = false;
546 dmabuf_obj_put(dmabuf_obj);
547 }
548
549 mutex_unlock(&vgpu->dmabuf_lock);
550
551 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
552 " file count: %ld, GEM ref: %d\n",
553 vgpu->id, dmabuf_obj->dmabuf_id,
554 kref_read(&dmabuf_obj->kref),
555 dmabuf_fd,
556 file_count(dmabuf->file),
557 kref_read(&obj->base.refcount));
558
559 i915_gem_object_put(obj);
560
561 return dmabuf_fd;
562
563 out_free_dmabuf:
564 dma_buf_put(dmabuf);
565 out_free_gem:
566 i915_gem_object_put(obj);
567 out:
568 mutex_unlock(&vgpu->dmabuf_lock);
569 return ret;
570 }
571
intel_vgpu_dmabuf_cleanup(struct intel_vgpu * vgpu)572 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
573 {
574 struct list_head *pos, *n;
575 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
576
577 mutex_lock(&vgpu->dmabuf_lock);
578 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
579 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
580 dmabuf_obj->vgpu = NULL;
581
582 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
583 list_del(pos);
584
585 /* dmabuf_obj might be freed in dmabuf_obj_put */
586 if (dmabuf_obj->initref) {
587 dmabuf_obj->initref = false;
588 dmabuf_obj_put(dmabuf_obj);
589 }
590
591 }
592 mutex_unlock(&vgpu->dmabuf_lock);
593 }
594