xref: /linux/drivers/gpu/drm/xe/xe_device.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_device.h"
7 
8 #include <drm/drm_aperture.h>
9 #include <drm/drm_atomic_helper.h>
10 #include <drm/drm_gem_ttm_helper.h>
11 #include <drm/drm_ioctl.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_print.h>
14 #include <drm/xe_drm.h>
15 
16 #include "regs/xe_regs.h"
17 #include "xe_bo.h"
18 #include "xe_debugfs.h"
19 #include "xe_dma_buf.h"
20 #include "xe_drv.h"
21 #include "xe_engine.h"
22 #include "xe_exec.h"
23 #include "xe_gt.h"
24 #include "xe_irq.h"
25 #include "xe_mmio.h"
26 #include "xe_module.h"
27 #include "xe_pcode.h"
28 #include "xe_pm.h"
29 #include "xe_query.h"
30 #include "xe_tile.h"
31 #include "xe_ttm_stolen_mgr.h"
32 #include "xe_ttm_sys_mgr.h"
33 #include "xe_vm.h"
34 #include "xe_vm_madvise.h"
35 #include "xe_wait_user_fence.h"
36 
37 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
38 {
39 	struct xe_file *xef;
40 
41 	xef = kzalloc(sizeof(*xef), GFP_KERNEL);
42 	if (!xef)
43 		return -ENOMEM;
44 
45 	xef->drm = file;
46 
47 	mutex_init(&xef->vm.lock);
48 	xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
49 
50 	mutex_init(&xef->engine.lock);
51 	xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
52 
53 	file->driver_priv = xef;
54 	return 0;
55 }
56 
57 static void device_kill_persistent_engines(struct xe_device *xe,
58 					   struct xe_file *xef);
59 
60 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
61 {
62 	struct xe_device *xe = to_xe_device(dev);
63 	struct xe_file *xef = file->driver_priv;
64 	struct xe_vm *vm;
65 	struct xe_engine *e;
66 	unsigned long idx;
67 
68 	mutex_lock(&xef->engine.lock);
69 	xa_for_each(&xef->engine.xa, idx, e) {
70 		xe_engine_kill(e);
71 		xe_engine_put(e);
72 	}
73 	mutex_unlock(&xef->engine.lock);
74 	xa_destroy(&xef->engine.xa);
75 	mutex_destroy(&xef->engine.lock);
76 	device_kill_persistent_engines(xe, xef);
77 
78 	mutex_lock(&xef->vm.lock);
79 	xa_for_each(&xef->vm.xa, idx, vm)
80 		xe_vm_close_and_put(vm);
81 	mutex_unlock(&xef->vm.lock);
82 	xa_destroy(&xef->vm.xa);
83 	mutex_destroy(&xef->vm.lock);
84 
85 	kfree(xef);
86 }
87 
88 static const struct drm_ioctl_desc xe_ioctls[] = {
89 	DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
90 	DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
91 	DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
92 			  DRM_RENDER_ALLOW),
93 	DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
94 	DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
95 	DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
96 	DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
97 			  DRM_RENDER_ALLOW),
98 	DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl,
99 			  DRM_RENDER_ALLOW),
100 	DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
101 			  DRM_RENDER_ALLOW),
102 	DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
103 	DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
104 	DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
105 			  DRM_RENDER_ALLOW),
106 	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
107 			  DRM_RENDER_ALLOW),
108 	DRM_IOCTL_DEF_DRV(XE_VM_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
109 };
110 
111 static const struct file_operations xe_driver_fops = {
112 	.owner = THIS_MODULE,
113 	.open = drm_open,
114 	.release = drm_release_noglobal,
115 	.unlocked_ioctl = drm_ioctl,
116 	.mmap = drm_gem_mmap,
117 	.poll = drm_poll,
118 	.read = drm_read,
119 	.compat_ioctl = drm_compat_ioctl,
120 	.llseek = noop_llseek,
121 };
122 
123 static void xe_driver_release(struct drm_device *dev)
124 {
125 	struct xe_device *xe = to_xe_device(dev);
126 
127 	pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL);
128 }
129 
130 static struct drm_driver driver = {
131 	/* Don't use MTRRs here; the Xserver or userspace app should
132 	 * deal with them for Intel hardware.
133 	 */
134 	.driver_features =
135 	    DRIVER_GEM |
136 	    DRIVER_RENDER | DRIVER_SYNCOBJ |
137 	    DRIVER_SYNCOBJ_TIMELINE,
138 	.open = xe_file_open,
139 	.postclose = xe_file_close,
140 
141 	.gem_prime_import = xe_gem_prime_import,
142 
143 	.dumb_create = xe_bo_dumb_create,
144 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
145 	.release = &xe_driver_release,
146 
147 	.ioctls = xe_ioctls,
148 	.num_ioctls = ARRAY_SIZE(xe_ioctls),
149 	.fops = &xe_driver_fops,
150 	.name = DRIVER_NAME,
151 	.desc = DRIVER_DESC,
152 	.date = DRIVER_DATE,
153 	.major = DRIVER_MAJOR,
154 	.minor = DRIVER_MINOR,
155 	.patchlevel = DRIVER_PATCHLEVEL,
156 };
157 
158 static void xe_device_destroy(struct drm_device *dev, void *dummy)
159 {
160 	struct xe_device *xe = to_xe_device(dev);
161 
162 	if (xe->ordered_wq)
163 		destroy_workqueue(xe->ordered_wq);
164 
165 	ttm_device_fini(&xe->ttm);
166 }
167 
168 struct xe_device *xe_device_create(struct pci_dev *pdev,
169 				   const struct pci_device_id *ent)
170 {
171 	struct xe_device *xe;
172 	int err;
173 
174 	err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
175 	if (err)
176 		return ERR_PTR(err);
177 
178 	xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
179 	if (IS_ERR(xe))
180 		return xe;
181 
182 	err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
183 			      xe->drm.anon_inode->i_mapping,
184 			      xe->drm.vma_offset_manager, false, false);
185 	if (WARN_ON(err))
186 		goto err_put;
187 
188 	err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
189 	if (err)
190 		goto err_put;
191 
192 	xe->info.devid = pdev->device;
193 	xe->info.revid = pdev->revision;
194 	xe->info.enable_guc = enable_guc;
195 
196 	spin_lock_init(&xe->irq.lock);
197 
198 	init_waitqueue_head(&xe->ufence_wq);
199 
200 	drmm_mutex_init(&xe->drm, &xe->usm.lock);
201 	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC1);
202 
203 	drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
204 	INIT_LIST_HEAD(&xe->persistent_engines.list);
205 
206 	spin_lock_init(&xe->pinned.lock);
207 	INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
208 	INIT_LIST_HEAD(&xe->pinned.external_vram);
209 	INIT_LIST_HEAD(&xe->pinned.evicted);
210 
211 	xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
212 	if (!xe->ordered_wq) {
213 		drm_err(&xe->drm, "Failed to allocate xe-ordered-wq\n");
214 		err = -ENOMEM;
215 		goto err_put;
216 	}
217 
218 	drmm_mutex_init(&xe->drm, &xe->sb_lock);
219 	xe->enabled_irq_mask = ~0;
220 
221 	return xe;
222 
223 err_put:
224 	drm_dev_put(&xe->drm);
225 
226 	return ERR_PTR(err);
227 }
228 
229 static void xe_device_sanitize(struct drm_device *drm, void *arg)
230 {
231 	struct xe_device *xe = arg;
232 	struct xe_gt *gt;
233 	u8 id;
234 
235 	for_each_gt(gt, xe, id)
236 		xe_gt_sanitize(gt);
237 }
238 
239 int xe_device_probe(struct xe_device *xe)
240 {
241 	struct xe_tile *tile;
242 	struct xe_gt *gt;
243 	int err;
244 	u8 id;
245 
246 	xe->info.mem_region_mask = 1;
247 
248 	for_each_tile(tile, xe, id) {
249 		err = xe_tile_alloc(tile);
250 		if (err)
251 			return err;
252 	}
253 
254 	err = xe_mmio_init(xe);
255 	if (err)
256 		return err;
257 
258 	for_each_gt(gt, xe, id) {
259 		err = xe_pcode_probe(gt);
260 		if (err)
261 			return err;
262 	}
263 
264 	err = xe_irq_install(xe);
265 	if (err)
266 		return err;
267 
268 	for_each_gt(gt, xe, id) {
269 		err = xe_gt_init_early(gt);
270 		if (err)
271 			goto err_irq_shutdown;
272 	}
273 
274 	err = xe_mmio_probe_vram(xe);
275 	if (err)
276 		goto err_irq_shutdown;
277 
278 	xe_ttm_sys_mgr_init(xe);
279 
280 	for_each_tile(tile, xe, id) {
281 		err = xe_tile_init_noalloc(tile);
282 		if (err)
283 			goto err_irq_shutdown;
284 	}
285 
286 	/* Allocate and map stolen after potential VRAM resize */
287 	xe_ttm_stolen_mgr_init(xe);
288 
289 	for_each_gt(gt, xe, id) {
290 		err = xe_gt_init(gt);
291 		if (err)
292 			goto err_irq_shutdown;
293 	}
294 
295 	err = drm_dev_register(&xe->drm, 0);
296 	if (err)
297 		goto err_irq_shutdown;
298 
299 	xe_debugfs_register(xe);
300 
301 	err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
302 	if (err)
303 		return err;
304 
305 	return 0;
306 
307 err_irq_shutdown:
308 	xe_irq_shutdown(xe);
309 	return err;
310 }
311 
312 void xe_device_remove(struct xe_device *xe)
313 {
314 	xe_irq_shutdown(xe);
315 }
316 
317 void xe_device_shutdown(struct xe_device *xe)
318 {
319 }
320 
321 void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e)
322 {
323 	mutex_lock(&xe->persistent_engines.lock);
324 	list_add_tail(&e->persistent.link, &xe->persistent_engines.list);
325 	mutex_unlock(&xe->persistent_engines.lock);
326 }
327 
328 void xe_device_remove_persistent_engines(struct xe_device *xe,
329 					 struct xe_engine *e)
330 {
331 	mutex_lock(&xe->persistent_engines.lock);
332 	if (!list_empty(&e->persistent.link))
333 		list_del(&e->persistent.link);
334 	mutex_unlock(&xe->persistent_engines.lock);
335 }
336 
337 static void device_kill_persistent_engines(struct xe_device *xe,
338 					   struct xe_file *xef)
339 {
340 	struct xe_engine *e, *next;
341 
342 	mutex_lock(&xe->persistent_engines.lock);
343 	list_for_each_entry_safe(e, next, &xe->persistent_engines.list,
344 				 persistent.link)
345 		if (e->persistent.xef == xef) {
346 			xe_engine_kill(e);
347 			list_del_init(&e->persistent.link);
348 		}
349 	mutex_unlock(&xe->persistent_engines.lock);
350 }
351 
352 void xe_device_wmb(struct xe_device *xe)
353 {
354 	struct xe_gt *gt = xe_root_mmio_gt(xe);
355 
356 	wmb();
357 	if (IS_DGFX(xe))
358 		xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
359 }
360 
361 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
362 {
363 	return xe_device_has_flat_ccs(xe) ?
364 		DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0;
365 }
366 
367 void xe_device_mem_access_get(struct xe_device *xe)
368 {
369 	bool resumed = xe_pm_runtime_resume_if_suspended(xe);
370 	int ref = atomic_inc_return(&xe->mem_access.ref);
371 
372 	if (ref == 1)
373 		xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
374 
375 	/* The usage counter increased if device was immediately resumed */
376 	if (resumed)
377 		xe_pm_runtime_put(xe);
378 
379 	XE_WARN_ON(ref == S32_MAX);
380 }
381 
382 void xe_device_mem_access_put(struct xe_device *xe)
383 {
384 	bool hold = xe->mem_access.hold_rpm;
385 	int ref = atomic_dec_return(&xe->mem_access.ref);
386 
387 	if (!ref && hold)
388 		xe_pm_runtime_put(xe);
389 
390 	XE_WARN_ON(ref < 0);
391 }
392