xref: /linux/drivers/gpu/drm/xe/xe_device.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_device.h"
7 
8 #include <drm/drm_gem_ttm_helper.h>
9 #include <drm/drm_aperture.h>
10 #include <drm/drm_ioctl.h>
11 #include <drm/xe_drm.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_atomic_helper.h>
14 
15 #include "xe_bo.h"
16 #include "xe_debugfs.h"
17 #include "xe_dma_buf.h"
18 #include "xe_drv.h"
19 #include "xe_engine.h"
20 #include "xe_exec.h"
21 #include "xe_gt.h"
22 #include "xe_irq.h"
23 #include "xe_module.h"
24 #include "xe_mmio.h"
25 #include "xe_pcode.h"
26 #include "xe_pm.h"
27 #include "xe_query.h"
28 #include "xe_vm.h"
29 #include "xe_vm_madvise.h"
30 #include "xe_wait_user_fence.h"
31 
32 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
33 {
34 	struct xe_file *xef;
35 
36 	xef = kzalloc(sizeof(*xef), GFP_KERNEL);
37 	if (!xef)
38 		return -ENOMEM;
39 
40 	xef->drm = file;
41 
42 	mutex_init(&xef->vm.lock);
43 	xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
44 
45 	mutex_init(&xef->engine.lock);
46 	xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
47 
48 	file->driver_priv = xef;
49 	return 0;
50 }
51 
52 static void device_kill_persitent_engines(struct xe_device *xe,
53 					  struct xe_file *xef);
54 
55 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
56 {
57 	struct xe_device *xe = to_xe_device(dev);
58 	struct xe_file *xef = file->driver_priv;
59 	struct xe_vm *vm;
60 	struct xe_engine *e;
61 	unsigned long idx;
62 
63 	mutex_lock(&xef->engine.lock);
64 	xa_for_each(&xef->engine.xa, idx, e) {
65 		xe_engine_kill(e);
66 		xe_engine_put(e);
67 	}
68 	mutex_unlock(&xef->engine.lock);
69 	mutex_destroy(&xef->engine.lock);
70 	device_kill_persitent_engines(xe, xef);
71 
72 	mutex_lock(&xef->vm.lock);
73 	xa_for_each(&xef->vm.xa, idx, vm)
74 		xe_vm_close_and_put(vm);
75 	mutex_unlock(&xef->vm.lock);
76 	mutex_destroy(&xef->vm.lock);
77 
78 	kfree(xef);
79 }
80 
81 static const struct drm_ioctl_desc xe_ioctls[] = {
82 	DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
83 	DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
84 	DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
85 			  DRM_RENDER_ALLOW),
86 	DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
87 	DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
88 	DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
89 	DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
90 			  DRM_RENDER_ALLOW),
91 	DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
92 			  DRM_RENDER_ALLOW),
93 	DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
94 	DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
95 	DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
96 			  DRM_RENDER_ALLOW),
97 	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
98 			  DRM_RENDER_ALLOW),
99 	DRM_IOCTL_DEF_DRV(XE_VM_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
100 };
101 
102 static const struct file_operations xe_driver_fops = {
103 	.owner = THIS_MODULE,
104 	.open = drm_open,
105 	.release = drm_release_noglobal,
106 	.unlocked_ioctl = drm_ioctl,
107 	.mmap = drm_gem_mmap,
108 	.poll = drm_poll,
109 	.read = drm_read,
110 //	.compat_ioctl = i915_ioc32_compat_ioctl,
111 	.llseek = noop_llseek,
112 };
113 
114 static void xe_driver_release(struct drm_device *dev)
115 {
116 	struct xe_device *xe = to_xe_device(dev);
117 
118 	pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL);
119 }
120 
121 static struct drm_driver driver = {
122 	/* Don't use MTRRs here; the Xserver or userspace app should
123 	 * deal with them for Intel hardware.
124 	 */
125 	.driver_features =
126 	    DRIVER_GEM |
127 	    DRIVER_RENDER | DRIVER_SYNCOBJ |
128 	    DRIVER_SYNCOBJ_TIMELINE,
129 	.open = xe_file_open,
130 	.postclose = xe_file_close,
131 
132 	.gem_prime_import = xe_gem_prime_import,
133 
134 	.dumb_create = xe_bo_dumb_create,
135 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
136 	.release = &xe_driver_release,
137 
138 	.ioctls = xe_ioctls,
139 	.num_ioctls = ARRAY_SIZE(xe_ioctls),
140 	.fops = &xe_driver_fops,
141 	.name = DRIVER_NAME,
142 	.desc = DRIVER_DESC,
143 	.date = DRIVER_DATE,
144 	.major = DRIVER_MAJOR,
145 	.minor = DRIVER_MINOR,
146 	.patchlevel = DRIVER_PATCHLEVEL,
147 };
148 
149 static void xe_device_destroy(struct drm_device *dev, void *dummy)
150 {
151 	struct xe_device *xe = to_xe_device(dev);
152 
153 	destroy_workqueue(xe->ordered_wq);
154 	mutex_destroy(&xe->persitent_engines.lock);
155 	ttm_device_fini(&xe->ttm);
156 }
157 
158 struct xe_device *xe_device_create(struct pci_dev *pdev,
159 				   const struct pci_device_id *ent)
160 {
161 	struct xe_device *xe;
162 	int err;
163 
164 	err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
165 	if (err)
166 		return ERR_PTR(err);
167 
168 	xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
169 	if (IS_ERR(xe))
170 		return xe;
171 
172 	err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
173 			      xe->drm.anon_inode->i_mapping,
174 			      xe->drm.vma_offset_manager, false, false);
175 	if (WARN_ON(err))
176 		goto err_put;
177 
178 	xe->info.devid = pdev->device;
179 	xe->info.revid = pdev->revision;
180 	xe->info.enable_guc = enable_guc;
181 
182 	spin_lock_init(&xe->irq.lock);
183 
184 	init_waitqueue_head(&xe->ufence_wq);
185 
186 	mutex_init(&xe->usm.lock);
187 	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC1);
188 
189 	mutex_init(&xe->persitent_engines.lock);
190 	INIT_LIST_HEAD(&xe->persitent_engines.list);
191 
192 	spin_lock_init(&xe->pinned.lock);
193 	INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
194 	INIT_LIST_HEAD(&xe->pinned.external_vram);
195 	INIT_LIST_HEAD(&xe->pinned.evicted);
196 
197 	xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
198 
199 	mutex_init(&xe->sb_lock);
200 	xe->enabled_irq_mask = ~0;
201 
202 	err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
203 	if (err)
204 		goto err_put;
205 
206 	mutex_init(&xe->mem_access.lock);
207 	return xe;
208 
209 err_put:
210 	drm_dev_put(&xe->drm);
211 
212 	return ERR_PTR(err);
213 }
214 
215 int xe_device_probe(struct xe_device *xe)
216 {
217 	struct xe_gt *gt;
218 	int err;
219 	u8 id;
220 
221 	xe->info.mem_region_mask = 1;
222 
223 	for_each_gt(gt, xe, id) {
224 		err = xe_gt_alloc(xe, gt);
225 		if (err)
226 			return err;
227 	}
228 
229 	err = xe_mmio_init(xe);
230 	if (err)
231 		return err;
232 
233 	for_each_gt(gt, xe, id) {
234 		err = xe_pcode_probe(gt);
235 		if (err)
236 			return err;
237 	}
238 
239 	err = xe_irq_install(xe);
240 	if (err)
241 		return err;
242 
243 	for_each_gt(gt, xe, id) {
244 		err = xe_gt_init_early(gt);
245 		if (err)
246 			goto err_irq_shutdown;
247 	}
248 
249 	err = xe_mmio_probe_vram(xe);
250 	if (err)
251 		goto err_irq_shutdown;
252 
253 	for_each_gt(gt, xe, id) {
254 		err = xe_gt_init_noalloc(gt);
255 		if (err)
256 			goto err_irq_shutdown;
257 	}
258 
259 	for_each_gt(gt, xe, id) {
260 		err = xe_gt_init(gt);
261 		if (err)
262 			goto err_irq_shutdown;
263 	}
264 
265 	err = drm_dev_register(&xe->drm, 0);
266 	if (err)
267 		goto err_irq_shutdown;
268 
269 	xe_debugfs_register(xe);
270 
271 	return 0;
272 
273 err_irq_shutdown:
274 	xe_irq_shutdown(xe);
275 	return err;
276 }
277 
278 void xe_device_remove(struct xe_device *xe)
279 {
280 	xe_irq_shutdown(xe);
281 }
282 
283 void xe_device_shutdown(struct xe_device *xe)
284 {
285 }
286 
287 void xe_device_add_persitent_engines(struct xe_device *xe, struct xe_engine *e)
288 {
289 	mutex_lock(&xe->persitent_engines.lock);
290 	list_add_tail(&e->persitent.link, &xe->persitent_engines.list);
291 	mutex_unlock(&xe->persitent_engines.lock);
292 }
293 
294 void xe_device_remove_persitent_engines(struct xe_device *xe,
295 					struct xe_engine *e)
296 {
297 	mutex_lock(&xe->persitent_engines.lock);
298 	if (!list_empty(&e->persitent.link))
299 		list_del(&e->persitent.link);
300 	mutex_unlock(&xe->persitent_engines.lock);
301 }
302 
303 static void device_kill_persitent_engines(struct xe_device *xe,
304 					  struct xe_file *xef)
305 {
306 	struct xe_engine *e, *next;
307 
308 	mutex_lock(&xe->persitent_engines.lock);
309 	list_for_each_entry_safe(e, next, &xe->persitent_engines.list,
310 				 persitent.link)
311 		if (e->persitent.xef == xef) {
312 			xe_engine_kill(e);
313 			list_del_init(&e->persitent.link);
314 		}
315 	mutex_unlock(&xe->persitent_engines.lock);
316 }
317 
318 #define SOFTWARE_FLAGS_SPR33         _MMIO(0x4F084)
319 
320 void xe_device_wmb(struct xe_device *xe)
321 {
322 	struct xe_gt *gt = xe_device_get_gt(xe, 0);
323 
324 	wmb();
325 	if (IS_DGFX(xe))
326 		xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33.reg, 0);
327 }
328 
329 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
330 {
331 	return xe_device_has_flat_ccs(xe) ?
332 		DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0;
333 }
334 
335 void xe_device_mem_access_get(struct xe_device *xe)
336 {
337 	bool resumed = xe_pm_runtime_resume_if_suspended(xe);
338 
339 	mutex_lock(&xe->mem_access.lock);
340 	if (xe->mem_access.ref++ == 0)
341 		xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
342 	mutex_unlock(&xe->mem_access.lock);
343 
344 	/* The usage counter increased if device was immediately resumed */
345 	if (resumed)
346 		xe_pm_runtime_put(xe);
347 
348 	XE_WARN_ON(xe->mem_access.ref == U32_MAX);
349 }
350 
351 void xe_device_mem_access_put(struct xe_device *xe)
352 {
353 	mutex_lock(&xe->mem_access.lock);
354 	if (--xe->mem_access.ref == 0 && xe->mem_access.hold_rpm)
355 		xe_pm_runtime_put(xe);
356 	mutex_unlock(&xe->mem_access.lock);
357 
358 	XE_WARN_ON(xe->mem_access.ref < 0);
359 }
360