1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2025 Intel Corporation
4 */
5
6 #include "xe_mmio_gem.h"
7
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem.h>
10 #include <drm/drm_managed.h>
11
12 #include "xe_device_types.h"
13
14 /**
15 * DOC: Exposing MMIO regions to userspace
16 *
17 * In certain cases, the driver may allow userspace to mmap a portion of the hardware registers.
18 *
19 * This can be done as follows:
20 * 1. Call xe_mmio_gem_create() to create a GEM object with an mmap-able fake offset.
21 * 2. Use xe_mmio_gem_mmap_offset() on the created GEM object to retrieve the fake offset.
22 * 3. Provide the fake offset to userspace.
23 * 4. Userspace can call mmap with the fake offset. The length provided to mmap
24 * must match the size of the GEM object.
25 * 5. When the region is no longer needed, call xe_mmio_gem_destroy() to release the GEM object.
26 *
27 * NOTE: The exposed MMIO region must be page-aligned with regards to its BAR offset and size.
28 *
29 * WARNING: Exposing MMIO regions to userspace can have security and stability implications.
30 * Make sure not to expose any sensitive registers.
31 */
32
33 static void xe_mmio_gem_free(struct drm_gem_object *);
34 static int xe_mmio_gem_mmap(struct drm_gem_object *, struct vm_area_struct *);
35 static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *);
36
37 struct xe_mmio_gem {
38 struct drm_gem_object base;
39 phys_addr_t phys_addr;
40 };
41
42 static const struct vm_operations_struct vm_ops = {
43 .open = drm_gem_vm_open,
44 .close = drm_gem_vm_close,
45 .fault = xe_mmio_gem_vm_fault,
46 };
47
48 static const struct drm_gem_object_funcs xe_mmio_gem_funcs = {
49 .free = xe_mmio_gem_free,
50 .mmap = xe_mmio_gem_mmap,
51 .vm_ops = &vm_ops,
52 };
53
to_xe_mmio_gem(struct drm_gem_object * obj)54 static inline struct xe_mmio_gem *to_xe_mmio_gem(struct drm_gem_object *obj)
55 {
56 return container_of(obj, struct xe_mmio_gem, base);
57 }
58
59 /**
60 * xe_mmio_gem_create - Expose an MMIO region to userspace
61 * @xe: The xe device
62 * @file: DRM file descriptor
63 * @phys_addr: Start of the exposed MMIO region
64 * @size: The size of the exposed MMIO region
65 *
66 * This function creates a GEM object that exposes an MMIO region with an mmap-able
67 * fake offset.
68 *
69 * See: "Exposing MMIO regions to userspace"
70 */
xe_mmio_gem_create(struct xe_device * xe,struct drm_file * file,phys_addr_t phys_addr,size_t size)71 struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
72 phys_addr_t phys_addr, size_t size)
73 {
74 struct xe_mmio_gem *obj;
75 struct drm_gem_object *base;
76 int err;
77
78 if ((phys_addr % PAGE_SIZE != 0) || (size % PAGE_SIZE != 0))
79 return ERR_PTR(-EINVAL);
80
81 obj = kzalloc_obj(*obj);
82 if (!obj)
83 return ERR_PTR(-ENOMEM);
84
85 base = &obj->base;
86 base->funcs = &xe_mmio_gem_funcs;
87 obj->phys_addr = phys_addr;
88
89 drm_gem_private_object_init(&xe->drm, base, size);
90
91 err = drm_gem_create_mmap_offset(base);
92 if (err)
93 goto free_gem;
94
95 err = drm_vma_node_allow(&base->vma_node, file);
96 if (err)
97 goto free_gem;
98
99 return obj;
100
101 free_gem:
102 xe_mmio_gem_free(base);
103 return ERR_PTR(err);
104 }
105
106 /**
107 * xe_mmio_gem_mmap_offset - Return the mmap-able fake offset
108 * @gem: the GEM object created with xe_mmio_gem_create()
109 *
110 * This function returns the mmap-able fake offset allocated during
111 * xe_mmio_gem_create().
112 *
113 * See: "Exposing MMIO regions to userspace"
114 */
xe_mmio_gem_mmap_offset(struct xe_mmio_gem * gem)115 u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem)
116 {
117 return drm_vma_node_offset_addr(&gem->base.vma_node);
118 }
119
xe_mmio_gem_free(struct drm_gem_object * base)120 static void xe_mmio_gem_free(struct drm_gem_object *base)
121 {
122 struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
123
124 drm_gem_object_release(base);
125 kfree(obj);
126 }
127
128 /**
129 * xe_mmio_gem_destroy - Destroy the GEM object that exposes an MMIO region
130 * @gem: the GEM object to destroy
131 *
132 * This function releases resources associated with the GEM object created by
133 * xe_mmio_gem_create().
134 *
135 * See: "Exposing MMIO regions to userspace"
136 */
xe_mmio_gem_destroy(struct xe_mmio_gem * gem)137 void xe_mmio_gem_destroy(struct xe_mmio_gem *gem)
138 {
139 xe_mmio_gem_free(&gem->base);
140 }
141
xe_mmio_gem_mmap(struct drm_gem_object * base,struct vm_area_struct * vma)142 static int xe_mmio_gem_mmap(struct drm_gem_object *base, struct vm_area_struct *vma)
143 {
144 if (vma->vm_end - vma->vm_start != base->size)
145 return -EINVAL;
146
147 if ((vma->vm_flags & VM_SHARED) == 0)
148 return -EINVAL;
149
150 /* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 */
151 vma->vm_pgoff = 0;
152 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
153 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
154 VM_DONTCOPY | VM_NORESERVE);
155
156 /* Defer actual mapping to the fault handler. */
157 return 0;
158 }
159
xe_mmio_gem_release_dummy_page(struct drm_device * dev,void * res)160 static void xe_mmio_gem_release_dummy_page(struct drm_device *dev, void *res)
161 {
162 __free_page((struct page *)res);
163 }
164
xe_mmio_gem_vm_fault_dummy_page(struct vm_area_struct * vma)165 static vm_fault_t xe_mmio_gem_vm_fault_dummy_page(struct vm_area_struct *vma)
166 {
167 struct drm_gem_object *base = vma->vm_private_data;
168 struct drm_device *dev = base->dev;
169 vm_fault_t ret = VM_FAULT_NOPAGE;
170 struct page *page;
171 unsigned long pfn;
172 unsigned long i;
173
174 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
175 if (!page)
176 return VM_FAULT_OOM;
177
178 if (drmm_add_action_or_reset(dev, xe_mmio_gem_release_dummy_page, page))
179 return VM_FAULT_OOM;
180
181 pfn = page_to_pfn(page);
182
183 /* Map the entire VMA to the same dummy page */
184 for (i = 0; i < base->size; i += PAGE_SIZE) {
185 unsigned long addr = vma->vm_start + i;
186
187 ret = vmf_insert_pfn(vma, addr, pfn);
188 if (ret & VM_FAULT_ERROR)
189 break;
190 }
191
192 return ret;
193 }
194
xe_mmio_gem_vm_fault(struct vm_fault * vmf)195 static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *vmf)
196 {
197 struct vm_area_struct *vma = vmf->vma;
198 struct drm_gem_object *base = vma->vm_private_data;
199 struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
200 struct drm_device *dev = base->dev;
201 vm_fault_t ret = VM_FAULT_NOPAGE;
202 unsigned long i;
203 int idx;
204
205 if (!drm_dev_enter(dev, &idx)) {
206 /*
207 * Provide a dummy page to avoid SIGBUS for events such as hot-unplug.
208 * This gives the userspace the option to recover instead of crashing.
209 * It is assumed the userspace will receive the notification via some
210 * other channel (e.g. drm uevent).
211 */
212 return xe_mmio_gem_vm_fault_dummy_page(vma);
213 }
214
215 for (i = 0; i < base->size; i += PAGE_SIZE) {
216 unsigned long addr = vma->vm_start + i;
217 unsigned long phys_addr = obj->phys_addr + i;
218
219 ret = vmf_insert_pfn(vma, addr, PHYS_PFN(phys_addr));
220 if (ret & VM_FAULT_ERROR)
221 break;
222 }
223
224 drm_dev_exit(idx);
225 return ret;
226 }
227