xref: /linux/drivers/gpu/drm/virtio/virtgpu_vram.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "virtgpu_drv.h"
3 
4 static void virtio_gpu_vram_free(struct drm_gem_object *obj)
5 {
6 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
7 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
8 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
9 	bool unmap;
10 
11 	if (bo->created) {
12 		spin_lock(&vgdev->host_visible_lock);
13 		unmap = drm_mm_node_allocated(&vram->vram_node);
14 		spin_unlock(&vgdev->host_visible_lock);
15 
16 		if (unmap)
17 			virtio_gpu_cmd_unmap(vgdev, bo);
18 
19 		virtio_gpu_cmd_unref_resource(vgdev, bo);
20 		virtio_gpu_notify(vgdev);
21 		return;
22 	}
23 }
24 
25 static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
26 	.open = drm_gem_vm_open,
27 	.close = drm_gem_vm_close,
28 };
29 
30 static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
31 				struct vm_area_struct *vma)
32 {
33 	int ret;
34 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
35 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
36 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
37 	unsigned long vm_size = vma->vm_end - vma->vm_start;
38 
39 	if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
40 		return -EINVAL;
41 
42 	wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
43 	if (vram->map_state != STATE_OK)
44 		return -EINVAL;
45 
46 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
47 	vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
48 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
50 	vma->vm_ops = &virtio_gpu_vram_vm_ops;
51 
52 	if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
53 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
54 	else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
55 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
56 
57 	/* Partial mappings of GEM buffers don't happen much in practice. */
58 	if (vm_size != vram->vram_node.size)
59 		return -EINVAL;
60 
61 	ret = io_remap_pfn_range(vma, vma->vm_start,
62 				 vram->vram_node.start >> PAGE_SHIFT,
63 				 vm_size, vma->vm_page_prot);
64 	return ret;
65 }
66 
67 static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
68 	.open = virtio_gpu_gem_object_open,
69 	.close = virtio_gpu_gem_object_close,
70 	.free = virtio_gpu_vram_free,
71 	.mmap = virtio_gpu_vram_mmap,
72 	.export = virtgpu_gem_prime_export,
73 };
74 
75 bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
76 {
77 	return bo->base.base.funcs == &virtio_gpu_vram_funcs;
78 }
79 
80 static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
81 {
82 	int ret;
83 	uint64_t offset;
84 	struct virtio_gpu_object_array *objs;
85 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
86 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
87 
88 	if (!vgdev->has_host_visible)
89 		return -EINVAL;
90 
91 	spin_lock(&vgdev->host_visible_lock);
92 	ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
93 				 bo->base.base.size);
94 	spin_unlock(&vgdev->host_visible_lock);
95 
96 	if (ret)
97 		return ret;
98 
99 	objs = virtio_gpu_array_alloc(1);
100 	if (!objs) {
101 		ret = -ENOMEM;
102 		goto err_remove_node;
103 	}
104 
105 	virtio_gpu_array_add_obj(objs, &bo->base.base);
106 	/*TODO: Add an error checking helper function in drm_mm.h */
107 	offset = vram->vram_node.start - vgdev->host_visible_region.addr;
108 
109 	ret = virtio_gpu_cmd_map(vgdev, objs, offset);
110 	if (ret) {
111 		virtio_gpu_array_put_free(objs);
112 		goto err_remove_node;
113 	}
114 
115 	return 0;
116 
117 err_remove_node:
118 	spin_lock(&vgdev->host_visible_lock);
119 	drm_mm_remove_node(&vram->vram_node);
120 	spin_unlock(&vgdev->host_visible_lock);
121 	return ret;
122 }
123 
124 int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
125 			   struct virtio_gpu_object_params *params,
126 			   struct virtio_gpu_object **bo_ptr)
127 {
128 	struct drm_gem_object *obj;
129 	struct virtio_gpu_object_vram *vram;
130 	int ret;
131 
132 	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
133 	if (!vram)
134 		return -ENOMEM;
135 
136 	obj = &vram->base.base.base;
137 	obj->funcs = &virtio_gpu_vram_funcs;
138 
139 	params->size = PAGE_ALIGN(params->size);
140 	drm_gem_private_object_init(vgdev->ddev, obj, params->size);
141 
142 	/* Create fake offset */
143 	ret = drm_gem_create_mmap_offset(obj);
144 	if (ret) {
145 		kfree(vram);
146 		return ret;
147 	}
148 
149 	ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
150 	if (ret) {
151 		kfree(vram);
152 		return ret;
153 	}
154 
155 	virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
156 					    0);
157 	if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
158 		ret = virtio_gpu_vram_map(&vram->base);
159 		if (ret) {
160 			virtio_gpu_vram_free(obj);
161 			return ret;
162 		}
163 	}
164 
165 	*bo_ptr = &vram->base;
166 	return 0;
167 }
168