xref: /linux/drivers/gpu/drm/virtio/virtgpu_gem.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drmP.h>
27 #include "virtgpu_drv.h"
28 
29 void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
30 {
31 	struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
32 
33 	if (obj)
34 		virtio_gpu_object_unref(&obj);
35 }
36 
37 struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
38 						  size_t size, bool kernel,
39 						  bool pinned)
40 {
41 	struct virtio_gpu_device *vgdev = dev->dev_private;
42 	struct virtio_gpu_object *obj;
43 	int ret;
44 
45 	ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
46 	if (ret)
47 		return ERR_PTR(ret);
48 
49 	return obj;
50 }
51 
52 int virtio_gpu_gem_create(struct drm_file *file,
53 			  struct drm_device *dev,
54 			  uint64_t size,
55 			  struct drm_gem_object **obj_p,
56 			  uint32_t *handle_p)
57 {
58 	struct virtio_gpu_object *obj;
59 	int ret;
60 	u32 handle;
61 
62 	obj = virtio_gpu_alloc_object(dev, size, false, false);
63 	if (IS_ERR(obj))
64 		return PTR_ERR(obj);
65 
66 	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
67 	if (ret) {
68 		drm_gem_object_release(&obj->gem_base);
69 		return ret;
70 	}
71 
72 	*obj_p = &obj->gem_base;
73 
74 	/* drop reference from allocate - handle holds it now */
75 	drm_gem_object_put_unlocked(&obj->gem_base);
76 
77 	*handle_p = handle;
78 	return 0;
79 }
80 
81 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
82 				struct drm_device *dev,
83 				struct drm_mode_create_dumb *args)
84 {
85 	struct virtio_gpu_device *vgdev = dev->dev_private;
86 	struct drm_gem_object *gobj;
87 	struct virtio_gpu_object *obj;
88 	int ret;
89 	uint32_t pitch;
90 	uint32_t resid;
91 	uint32_t format;
92 
93 	pitch = args->width * ((args->bpp + 1) / 8);
94 	args->size = pitch * args->height;
95 	args->size = ALIGN(args->size, PAGE_SIZE);
96 
97 	ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
98 				    &args->handle);
99 	if (ret)
100 		goto fail;
101 
102 	format = virtio_gpu_translate_format(DRM_FORMAT_XRGB8888);
103 	virtio_gpu_resource_id_get(vgdev, &resid);
104 	virtio_gpu_cmd_create_resource(vgdev, resid, format,
105 				       args->width, args->height);
106 
107 	/* attach the object to the resource */
108 	obj = gem_to_virtio_gpu_obj(gobj);
109 	ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
110 	if (ret)
111 		goto fail;
112 
113 	obj->dumb = true;
114 	args->pitch = pitch;
115 	return ret;
116 
117 fail:
118 	return ret;
119 }
120 
121 int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
122 			      struct drm_device *dev,
123 			      uint32_t handle, uint64_t *offset_p)
124 {
125 	struct drm_gem_object *gobj;
126 	struct virtio_gpu_object *obj;
127 
128 	BUG_ON(!offset_p);
129 	gobj = drm_gem_object_lookup(file_priv, handle);
130 	if (gobj == NULL)
131 		return -ENOENT;
132 	obj = gem_to_virtio_gpu_obj(gobj);
133 	*offset_p = virtio_gpu_object_mmap_offset(obj);
134 	drm_gem_object_put_unlocked(gobj);
135 	return 0;
136 }
137 
138 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
139 			       struct drm_file *file)
140 {
141 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
142 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
143 	struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
144 	int r;
145 
146 	if (!vgdev->has_virgl_3d)
147 		return 0;
148 
149 	r = virtio_gpu_object_reserve(qobj, false);
150 	if (r)
151 		return r;
152 
153 	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
154 					       qobj->hw_res_handle);
155 	virtio_gpu_object_unreserve(qobj);
156 	return 0;
157 }
158 
159 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
160 				 struct drm_file *file)
161 {
162 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
163 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
164 	struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
165 	int r;
166 
167 	if (!vgdev->has_virgl_3d)
168 		return;
169 
170 	r = virtio_gpu_object_reserve(qobj, false);
171 	if (r)
172 		return;
173 
174 	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
175 						qobj->hw_res_handle);
176 	virtio_gpu_object_unreserve(qobj);
177 }
178