xref: /linux/drivers/gpu/drm/xe/xe_pci.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_pci.h"
7 
8 #include <linux/device/driver.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
12 
13 #include <drm/drm_color_mgmt.h>
14 #include <drm/drm_drv.h>
15 #include <drm/xe_pciids.h>
16 
17 #include "regs/xe_regs.h"
18 #include "regs/xe_gt_regs.h"
19 #include "xe_device.h"
20 #include "xe_display.h"
21 #include "xe_drv.h"
22 #include "xe_gt.h"
23 #include "xe_macros.h"
24 #include "xe_module.h"
25 #include "xe_pci_types.h"
26 #include "xe_pm.h"
27 #include "xe_sriov.h"
28 #include "xe_step.h"
29 
30 enum toggle_d3cold {
31 	D3COLD_DISABLE,
32 	D3COLD_ENABLE,
33 };
34 
35 struct xe_subplatform_desc {
36 	enum xe_subplatform subplatform;
37 	const char *name;
38 	const u16 *pciidlist;
39 };
40 
41 struct xe_gt_desc {
42 	enum xe_gt_type type;
43 	u32 mmio_adj_limit;
44 	u32 mmio_adj_offset;
45 };
46 
47 struct xe_device_desc {
48 	/* Should only ever be set for platforms without GMD_ID */
49 	const struct xe_graphics_desc *graphics;
50 	/* Should only ever be set for platforms without GMD_ID */
51 	const struct xe_media_desc *media;
52 
53 	const char *platform_name;
54 	const struct xe_subplatform_desc *subplatforms;
55 
56 	enum xe_platform platform;
57 
58 	u8 require_force_probe:1;
59 	u8 is_dgfx:1;
60 	u8 has_display:1;
61 	u8 has_heci_gscfi:1;
62 
63 	u8 has_llc:1;
64 	u8 has_sriov:1;
65 	u8 bypass_mtcfg:1;
66 	u8 supports_mmio_ext:1;
67 };
68 
69 __diag_push();
70 __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
71 
72 #define PLATFORM(x)		\
73 	.platform = (x),	\
74 	.platform_name = #x
75 
76 #define NOP(x)	x
77 
78 static const struct xe_graphics_desc graphics_xelp = {
79 	.name = "Xe_LP",
80 	.ver = 12,
81 	.rel = 0,
82 
83 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
84 
85 	.dma_mask_size = 39,
86 	.va_bits = 48,
87 	.vm_max_level = 3,
88 };
89 
90 static const struct xe_graphics_desc graphics_xelpp = {
91 	.name = "Xe_LP+",
92 	.ver = 12,
93 	.rel = 10,
94 
95 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
96 
97 	.dma_mask_size = 39,
98 	.va_bits = 48,
99 	.vm_max_level = 3,
100 };
101 
102 #define XE_HP_FEATURES \
103 	.has_range_tlb_invalidation = true, \
104 	.has_flat_ccs = true, \
105 	.dma_mask_size = 46, \
106 	.va_bits = 48, \
107 	.vm_max_level = 3
108 
109 static const struct xe_graphics_desc graphics_xehpg = {
110 	.name = "Xe_HPG",
111 	.ver = 12,
112 	.rel = 55,
113 
114 	.hw_engine_mask =
115 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
116 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
117 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
118 
119 	XE_HP_FEATURES,
120 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
121 };
122 
123 static const struct xe_graphics_desc graphics_xehpc = {
124 	.name = "Xe_HPC",
125 	.ver = 12,
126 	.rel = 60,
127 
128 	.hw_engine_mask =
129 		BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
130 		BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
131 		BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
132 		BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
133 		BIT(XE_HW_ENGINE_BCS8) |
134 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
135 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
136 
137 	XE_HP_FEATURES,
138 	.dma_mask_size = 52,
139 	.max_remote_tiles = 1,
140 	.va_bits = 57,
141 	.vm_max_level = 4,
142 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
143 
144 	.has_asid = 1,
145 	.has_flat_ccs = 0,
146 	.supports_usm = 1,
147 };
148 
149 static const struct xe_graphics_desc graphics_xelpg = {
150 	.name = "Xe_LPG",
151 	.hw_engine_mask =
152 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
153 		BIT(XE_HW_ENGINE_CCS0),
154 
155 	XE_HP_FEATURES,
156 	.has_flat_ccs = 0,
157 };
158 
159 #define XE2_GFX_FEATURES \
160 	.dma_mask_size = 46, \
161 	.has_asid = 1, \
162 	.has_flat_ccs = 0 /* FIXME: implementation missing */, \
163 	.has_range_tlb_invalidation = 1, \
164 	.supports_usm = 0 /* FIXME: implementation missing */, \
165 	.va_bits = 48, \
166 	.vm_max_level = 4, \
167 	.hw_engine_mask = \
168 		BIT(XE_HW_ENGINE_RCS0) | \
169 		BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
170 		GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
171 
172 static const struct xe_graphics_desc graphics_xe2 = {
173 	.name = "Xe2_LPG",
174 
175 	XE2_GFX_FEATURES,
176 };
177 
178 static const struct xe_media_desc media_xem = {
179 	.name = "Xe_M",
180 	.ver = 12,
181 	.rel = 0,
182 
183 	.hw_engine_mask =
184 		BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
185 		BIT(XE_HW_ENGINE_VECS0),
186 };
187 
188 static const struct xe_media_desc media_xehpm = {
189 	.name = "Xe_HPM",
190 	.ver = 12,
191 	.rel = 55,
192 
193 	.hw_engine_mask =
194 		BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
195 		BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_VECS1),
196 };
197 
198 static const struct xe_media_desc media_xelpmp = {
199 	.name = "Xe_LPM+",
200 	.hw_engine_mask =
201 		BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
202 		BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_GSCCS0)
203 };
204 
205 static const struct xe_media_desc media_xe2 = {
206 	.name = "Xe2_LPM",
207 	.hw_engine_mask =
208 		BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
209 };
210 
211 static const struct xe_device_desc tgl_desc = {
212 	.graphics = &graphics_xelp,
213 	.media = &media_xem,
214 	PLATFORM(XE_TIGERLAKE),
215 	.has_display = true,
216 	.has_llc = true,
217 	.require_force_probe = true,
218 };
219 
220 static const struct xe_device_desc rkl_desc = {
221 	.graphics = &graphics_xelp,
222 	.media = &media_xem,
223 	PLATFORM(XE_ROCKETLAKE),
224 	.has_display = true,
225 	.has_llc = true,
226 	.require_force_probe = true,
227 };
228 
229 static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 };
230 
231 static const struct xe_device_desc adl_s_desc = {
232 	.graphics = &graphics_xelp,
233 	.media = &media_xem,
234 	PLATFORM(XE_ALDERLAKE_S),
235 	.has_display = true,
236 	.has_llc = true,
237 	.require_force_probe = true,
238 	.subplatforms = (const struct xe_subplatform_desc[]) {
239 		{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
240 		{},
241 	},
242 };
243 
244 static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 };
245 
246 static const struct xe_device_desc adl_p_desc = {
247 	.graphics = &graphics_xelp,
248 	.media = &media_xem,
249 	PLATFORM(XE_ALDERLAKE_P),
250 	.has_display = true,
251 	.has_llc = true,
252 	.require_force_probe = true,
253 	.subplatforms = (const struct xe_subplatform_desc[]) {
254 		{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
255 		{},
256 	},
257 };
258 
259 static const struct xe_device_desc adl_n_desc = {
260 	.graphics = &graphics_xelp,
261 	.media = &media_xem,
262 	PLATFORM(XE_ALDERLAKE_N),
263 	.has_display = true,
264 	.has_llc = true,
265 	.require_force_probe = true,
266 };
267 
268 #define DGFX_FEATURES \
269 	.is_dgfx = 1
270 
271 static const struct xe_device_desc dg1_desc = {
272 	.graphics = &graphics_xelpp,
273 	.media = &media_xem,
274 	DGFX_FEATURES,
275 	PLATFORM(XE_DG1),
276 	.has_display = true,
277 	.require_force_probe = true,
278 	.has_heci_gscfi = 1,
279 };
280 
281 static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
282 static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 };
283 static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
284 
285 #define DG2_FEATURES \
286 	DGFX_FEATURES, \
287 	PLATFORM(XE_DG2), \
288 	.has_heci_gscfi = 1, \
289 	.subplatforms = (const struct xe_subplatform_desc[]) { \
290 		{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
291 		{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
292 		{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
293 		{ } \
294 	}
295 
296 static const struct xe_device_desc ats_m_desc = {
297 	.graphics = &graphics_xehpg,
298 	.media = &media_xehpm,
299 	.require_force_probe = true,
300 
301 	DG2_FEATURES,
302 	.has_display = false,
303 };
304 
305 static const struct xe_device_desc dg2_desc = {
306 	.graphics = &graphics_xehpg,
307 	.media = &media_xehpm,
308 	.require_force_probe = true,
309 
310 	DG2_FEATURES,
311 	.has_display = true,
312 };
313 
314 static const __maybe_unused struct xe_device_desc pvc_desc = {
315 	.graphics = &graphics_xehpc,
316 	DGFX_FEATURES,
317 	PLATFORM(XE_PVC),
318 	.has_display = false,
319 	.require_force_probe = true,
320 	.has_heci_gscfi = 1,
321 };
322 
323 static const struct xe_device_desc mtl_desc = {
324 	/* .graphics and .media determined via GMD_ID */
325 	.require_force_probe = true,
326 	PLATFORM(XE_METEORLAKE),
327 	.has_display = true,
328 };
329 
330 static const struct xe_device_desc lnl_desc = {
331 	PLATFORM(XE_LUNARLAKE),
332 	.require_force_probe = true,
333 };
334 
335 #undef PLATFORM
336 __diag_pop();
337 
338 /* Map of GMD_ID values to graphics IP */
339 static struct gmdid_map graphics_ip_map[] = {
340 	{ 1270, &graphics_xelpg },
341 	{ 1271, &graphics_xelpg },
342 	{ 2004, &graphics_xe2 },
343 };
344 
345 /* Map of GMD_ID values to media IP */
346 static struct gmdid_map media_ip_map[] = {
347 	{ 1300, &media_xelpmp },
348 	{ 2000, &media_xe2 },
349 };
350 
351 #define INTEL_VGA_DEVICE(id, info) {			\
352 	PCI_DEVICE(PCI_VENDOR_ID_INTEL, id),		\
353 	PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16,	\
354 	(unsigned long) info }
355 
356 /*
357  * Make sure any device matches here are from most specific to most
358  * general.  For example, since the Quanta match is based on the subsystem
359  * and subvendor IDs, we need it to come before the more general IVB
360  * PCI ID matches, otherwise we'll use the wrong info struct above.
361  */
362 static const struct pci_device_id pciidlist[] = {
363 	XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
364 	XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
365 	XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
366 	XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
367 	XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
368 	XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
369 	XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
370 	XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
371 	XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
372 	XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
373 	XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
374 	XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
375 	{ }
376 };
377 MODULE_DEVICE_TABLE(pci, pciidlist);
378 
379 #undef INTEL_VGA_DEVICE
380 
381 /* is device_id present in comma separated list of ids */
382 static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
383 {
384 	char *s, *p, *tok;
385 	bool ret;
386 
387 	if (!devices || !*devices)
388 		return false;
389 
390 	/* match everything */
391 	if (negative && strcmp(devices, "!*") == 0)
392 		return true;
393 	if (!negative && strcmp(devices, "*") == 0)
394 		return true;
395 
396 	s = kstrdup(devices, GFP_KERNEL);
397 	if (!s)
398 		return false;
399 
400 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
401 		u16 val;
402 
403 		if (negative && tok[0] == '!')
404 			tok++;
405 		else if ((negative && tok[0] != '!') ||
406 			 (!negative && tok[0] == '!'))
407 			continue;
408 
409 		if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
410 			ret = true;
411 			break;
412 		}
413 	}
414 
415 	kfree(s);
416 
417 	return ret;
418 }
419 
420 static bool id_forced(u16 device_id)
421 {
422 	return device_id_in_list(device_id, xe_modparam.force_probe, false);
423 }
424 
425 static bool id_blocked(u16 device_id)
426 {
427 	return device_id_in_list(device_id, xe_modparam.force_probe, true);
428 }
429 
430 static const struct xe_subplatform_desc *
431 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
432 {
433 	const struct xe_subplatform_desc *sp;
434 	const u16 *id;
435 
436 	for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
437 		for (id = sp->pciidlist; *id; id++)
438 			if (*id == xe->info.devid)
439 				return sp;
440 
441 	return NULL;
442 }
443 
444 static void peek_gmdid(struct xe_device *xe, u32 gmdid_offset, u32 *ver, u32 *revid)
445 {
446 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
447 	void __iomem *map = pci_iomap_range(pdev, 0, gmdid_offset, sizeof(u32));
448 	u32 val;
449 
450 	if (!map) {
451 		drm_err(&xe->drm, "Failed to read GMD_ID (%#x) from PCI BAR.\n",
452 			gmdid_offset);
453 		*ver = 0;
454 		*revid = 0;
455 
456 		return;
457 	}
458 
459 	val = ioread32(map);
460 	pci_iounmap(pdev, map);
461 
462 	*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 +
463 		REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
464 	*revid = REG_FIELD_GET(GMD_ID_REVID, val);
465 }
466 
467 /*
468  * Pre-GMD_ID platform: device descriptor already points to the appropriate
469  * graphics descriptor. Simply forward the description and calculate the version
470  * appropriately. "graphics" should be present in all such platforms, while
471  * media is optional.
472  */
473 static void handle_pre_gmdid(struct xe_device *xe,
474 			     const struct xe_device_desc *desc,
475 			     const struct xe_graphics_desc **graphics,
476 			     const struct xe_media_desc **media)
477 {
478 	*graphics = desc->graphics;
479 	xe->info.graphics_verx100 = (*graphics)->ver * 100 + (*graphics)->rel;
480 
481 	*media = desc->media;
482 	if (*media)
483 		xe->info.media_verx100 = (*media)->ver * 100 + (*media)->rel;
484 
485 }
486 
487 /*
488  * GMD_ID platform: read IP version from hardware and select graphics descriptor
489  * based on the result.
490  */
491 static void handle_gmdid(struct xe_device *xe,
492 			 const struct xe_device_desc *desc,
493 			 const struct xe_graphics_desc **graphics,
494 			 const struct xe_media_desc **media,
495 			 u32 *graphics_revid,
496 			 u32 *media_revid)
497 {
498 	u32 ver;
499 
500 	peek_gmdid(xe, GMD_ID.addr, &ver, graphics_revid);
501 	for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
502 		if (ver == graphics_ip_map[i].ver) {
503 			xe->info.graphics_verx100 = ver;
504 			*graphics = graphics_ip_map[i].ip;
505 
506 			break;
507 		}
508 	}
509 
510 	if (!xe->info.graphics_verx100) {
511 		drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
512 			ver / 100, ver % 100);
513 	}
514 
515 	peek_gmdid(xe, GMD_ID.addr + 0x380000, &ver, media_revid);
516 
517 	/* Media may legitimately be fused off / not present */
518 	if (ver == 0)
519 		return;
520 
521 	for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
522 		if (ver == media_ip_map[i].ver) {
523 			xe->info.media_verx100 = ver;
524 			*media = media_ip_map[i].ip;
525 
526 			break;
527 		}
528 	}
529 
530 	if (!xe->info.media_verx100) {
531 		drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
532 			ver / 100, ver % 100);
533 	}
534 }
535 
536 static int xe_info_init(struct xe_device *xe,
537 			const struct xe_device_desc *desc,
538 			const struct xe_subplatform_desc *subplatform_desc)
539 {
540 	const struct xe_graphics_desc *graphics_desc = NULL;
541 	const struct xe_media_desc *media_desc = NULL;
542 	u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
543 	struct xe_tile *tile;
544 	struct xe_gt *gt;
545 	u8 id;
546 
547 	xe->info.platform = desc->platform;
548 	xe->info.subplatform = subplatform_desc ?
549 		subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
550 
551 	/*
552 	 * If this platform supports GMD_ID, we'll detect the proper IP
553 	 * descriptor to use from hardware registers. desc->graphics will only
554 	 * ever be set at this point for platforms before GMD_ID. In that case
555 	 * the IP descriptions and versions are simply derived from that.
556 	 */
557 	if (desc->graphics) {
558 		handle_pre_gmdid(xe, desc, &graphics_desc, &media_desc);
559 		xe->info.step = xe_step_pre_gmdid_get(xe);
560 	} else {
561 		handle_gmdid(xe, desc, &graphics_desc, &media_desc,
562 			     &graphics_gmdid_revid, &media_gmdid_revid);
563 		xe->info.step = xe_step_gmdid_get(xe,
564 						  graphics_gmdid_revid,
565 						  media_gmdid_revid);
566 	}
567 
568 	/*
569 	 * If we couldn't detect the graphics IP, that's considered a fatal
570 	 * error and we should abort driver load.  Failing to detect media
571 	 * IP is non-fatal; we'll just proceed without enabling media support.
572 	 */
573 	if (!graphics_desc)
574 		return -ENODEV;
575 
576 	xe->info.is_dgfx = desc->is_dgfx;
577 	xe->info.has_heci_gscfi = desc->has_heci_gscfi;
578 	xe->info.graphics_name = graphics_desc->name;
579 	xe->info.media_name = media_desc ? media_desc->name : "none";
580 	xe->info.has_llc = desc->has_llc;
581 	xe->info.has_sriov = desc->has_sriov;
582 	xe->info.bypass_mtcfg = desc->bypass_mtcfg;
583 	xe->info.supports_mmio_ext = desc->supports_mmio_ext;
584 	xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
585 
586 	xe->info.dma_mask_size = graphics_desc->dma_mask_size;
587 	xe->info.vram_flags = graphics_desc->vram_flags;
588 	xe->info.va_bits = graphics_desc->va_bits;
589 	xe->info.vm_max_level = graphics_desc->vm_max_level;
590 	xe->info.supports_usm = graphics_desc->supports_usm;
591 	xe->info.has_asid = graphics_desc->has_asid;
592 	xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
593 	xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
594 
595 	xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
596 				  xe_modparam.enable_display &&
597 				  desc->has_display;
598 	/*
599 	 * All platforms have at least one primary GT.  Any platform with media
600 	 * version 13 or higher has an additional dedicated media GT.  And
601 	 * depending on the graphics IP there may be additional "remote tiles."
602 	 * All of these together determine the overall GT count.
603 	 *
604 	 * FIXME: 'tile_count' here is misnamed since the rest of the driver
605 	 * treats it as the number of GTs rather than just the number of tiles.
606 	 */
607 	xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
608 
609 	for_each_tile(tile, xe, id) {
610 		tile->xe = xe;
611 		tile->id = id;
612 
613 		tile->primary_gt = xe_gt_alloc(tile);
614 		if (IS_ERR(tile->primary_gt))
615 			return PTR_ERR(tile->primary_gt);
616 
617 		gt = tile->primary_gt;
618 		gt->info.id = xe->info.gt_count++;
619 		gt->info.type = XE_GT_TYPE_MAIN;
620 		gt->info.__engine_mask = graphics_desc->hw_engine_mask;
621 		if (MEDIA_VER(xe) < 13 && media_desc)
622 			gt->info.__engine_mask |= media_desc->hw_engine_mask;
623 
624 		if (MEDIA_VER(xe) < 13 || !media_desc)
625 			continue;
626 
627 		/*
628 		 * Allocate and setup media GT for platforms with standalone
629 		 * media.
630 		 */
631 		tile->media_gt = xe_gt_alloc(tile);
632 		if (IS_ERR(tile->media_gt))
633 			return PTR_ERR(tile->media_gt);
634 
635 		gt = tile->media_gt;
636 		gt->info.type = XE_GT_TYPE_MEDIA;
637 		gt->info.__engine_mask = media_desc->hw_engine_mask;
638 		gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
639 		gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
640 
641 		/*
642 		 * FIXME: At the moment multi-tile and standalone media are
643 		 * mutually exclusive on current platforms.  We'll need to
644 		 * come up with a better way to number GTs if we ever wind
645 		 * up with platforms that support both together.
646 		 */
647 		drm_WARN_ON(&xe->drm, id != 0);
648 		gt->info.id = xe->info.gt_count++;
649 	}
650 
651 	return 0;
652 }
653 
654 static void xe_pci_remove(struct pci_dev *pdev)
655 {
656 	struct xe_device *xe;
657 
658 	xe = pci_get_drvdata(pdev);
659 	if (!xe) /* driver load aborted, nothing to cleanup */
660 		return;
661 
662 	xe_device_remove(xe);
663 	xe_pm_runtime_fini(xe);
664 	pci_set_drvdata(pdev, NULL);
665 }
666 
667 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
668 {
669 	const struct xe_device_desc *desc = (const void *)ent->driver_data;
670 	const struct xe_subplatform_desc *subplatform_desc;
671 	struct xe_device *xe;
672 	int err;
673 
674 	if (desc->require_force_probe && !id_forced(pdev->device)) {
675 		dev_info(&pdev->dev,
676 			 "Your graphics device %04x is not officially supported\n"
677 			 "by xe driver in this kernel version. To force Xe probe,\n"
678 			 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
679 			 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
680 			 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
681 			 pdev->device, pdev->device, pdev->device,
682 			 pdev->device, pdev->device);
683 		return -ENODEV;
684 	}
685 
686 	if (id_blocked(pdev->device)) {
687 		dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
688 			 pdev->vendor, pdev->device);
689 		return -ENODEV;
690 	}
691 
692 	if (xe_display_driver_probe_defer(pdev))
693 		return -EPROBE_DEFER;
694 
695 	xe = xe_device_create(pdev, ent);
696 	if (IS_ERR(xe))
697 		return PTR_ERR(xe);
698 
699 	xe_pm_assert_unbounded_bridge(xe);
700 	subplatform_desc = find_subplatform(xe, desc);
701 
702 	pci_set_drvdata(pdev, xe);
703 	err = pci_enable_device(pdev);
704 	if (err)
705 		goto err_drm_put;
706 
707 	pci_set_master(pdev);
708 
709 	xe_sriov_probe_early(xe, desc->has_sriov);
710 
711 	err = xe_info_init(xe, desc, subplatform_desc);
712 	if (err)
713 		goto err_pci_disable;
714 
715 	xe_display_probe(xe);
716 
717 	drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
718 		desc->platform_name,
719 		subplatform_desc ? subplatform_desc->name : "",
720 		xe->info.devid, xe->info.revid,
721 		xe->info.is_dgfx,
722 		xe->info.graphics_name,
723 		xe->info.graphics_verx100 / 100,
724 		xe->info.graphics_verx100 % 100,
725 		xe->info.media_name,
726 		xe->info.media_verx100 / 100,
727 		xe->info.media_verx100 % 100,
728 		str_yes_no(xe->info.enable_display),
729 		xe->info.dma_mask_size, xe->info.tile_count,
730 		xe->info.has_heci_gscfi);
731 
732 	drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
733 		xe_step_name(xe->info.step.graphics),
734 		xe_step_name(xe->info.step.media),
735 		xe_step_name(xe->info.step.display),
736 		xe_step_name(xe->info.step.basedie));
737 
738 	drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
739 		str_yes_no(xe_device_has_sriov(xe)),
740 		xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
741 
742 	err = xe_device_probe(xe);
743 	if (err)
744 		goto err_pci_disable;
745 
746 	xe_pm_init(xe);
747 
748 	return 0;
749 
750 err_pci_disable:
751 	pci_disable_device(pdev);
752 
753 err_drm_put:
754 	drm_dev_put(&xe->drm);
755 
756 	return err;
757 }
758 
759 static void xe_pci_shutdown(struct pci_dev *pdev)
760 {
761 	xe_device_shutdown(pdev_to_xe_device(pdev));
762 }
763 
764 #ifdef CONFIG_PM_SLEEP
765 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
766 {
767 	struct xe_device *xe = pdev_to_xe_device(pdev);
768 	struct pci_dev *root_pdev;
769 
770 	if (!xe->d3cold.capable)
771 		return;
772 
773 	root_pdev = pcie_find_root_port(pdev);
774 	if (!root_pdev)
775 		return;
776 
777 	switch (toggle) {
778 	case D3COLD_DISABLE:
779 		pci_d3cold_disable(root_pdev);
780 		break;
781 	case D3COLD_ENABLE:
782 		pci_d3cold_enable(root_pdev);
783 		break;
784 	}
785 }
786 
787 static int xe_pci_suspend(struct device *dev)
788 {
789 	struct pci_dev *pdev = to_pci_dev(dev);
790 	int err;
791 
792 	err = xe_pm_suspend(pdev_to_xe_device(pdev));
793 	if (err)
794 		return err;
795 
796 	/*
797 	 * Enabling D3Cold is needed for S2Idle/S0ix.
798 	 * It is save to allow here since xe_pm_suspend has evicted
799 	 * the local memory and the direct complete optimization is disabled.
800 	 */
801 	d3cold_toggle(pdev, D3COLD_ENABLE);
802 
803 	pci_save_state(pdev);
804 	pci_disable_device(pdev);
805 
806 	return 0;
807 }
808 
809 static int xe_pci_resume(struct device *dev)
810 {
811 	struct pci_dev *pdev = to_pci_dev(dev);
812 	int err;
813 
814 	/* Give back the D3Cold decision to the runtime P M*/
815 	d3cold_toggle(pdev, D3COLD_DISABLE);
816 
817 	err = pci_set_power_state(pdev, PCI_D0);
818 	if (err)
819 		return err;
820 
821 	err = pci_enable_device(pdev);
822 	if (err)
823 		return err;
824 
825 	pci_set_master(pdev);
826 
827 	err = xe_pm_resume(pdev_to_xe_device(pdev));
828 	if (err)
829 		return err;
830 
831 	return 0;
832 }
833 
834 static int xe_pci_runtime_suspend(struct device *dev)
835 {
836 	struct pci_dev *pdev = to_pci_dev(dev);
837 	struct xe_device *xe = pdev_to_xe_device(pdev);
838 	int err;
839 
840 	err = xe_pm_runtime_suspend(xe);
841 	if (err)
842 		return err;
843 
844 	pci_save_state(pdev);
845 
846 	if (xe->d3cold.allowed) {
847 		d3cold_toggle(pdev, D3COLD_ENABLE);
848 		pci_disable_device(pdev);
849 		pci_ignore_hotplug(pdev);
850 		pci_set_power_state(pdev, PCI_D3cold);
851 	} else {
852 		d3cold_toggle(pdev, D3COLD_DISABLE);
853 		pci_set_power_state(pdev, PCI_D3hot);
854 	}
855 
856 	return 0;
857 }
858 
859 static int xe_pci_runtime_resume(struct device *dev)
860 {
861 	struct pci_dev *pdev = to_pci_dev(dev);
862 	struct xe_device *xe = pdev_to_xe_device(pdev);
863 	int err;
864 
865 	err = pci_set_power_state(pdev, PCI_D0);
866 	if (err)
867 		return err;
868 
869 	pci_restore_state(pdev);
870 
871 	if (xe->d3cold.allowed) {
872 		err = pci_enable_device(pdev);
873 		if (err)
874 			return err;
875 
876 		pci_set_master(pdev);
877 	}
878 
879 	return xe_pm_runtime_resume(xe);
880 }
881 
882 static int xe_pci_runtime_idle(struct device *dev)
883 {
884 	struct pci_dev *pdev = to_pci_dev(dev);
885 	struct xe_device *xe = pdev_to_xe_device(pdev);
886 
887 	xe_pm_d3cold_allowed_toggle(xe);
888 
889 	return 0;
890 }
891 
892 static const struct dev_pm_ops xe_pm_ops = {
893 	SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
894 	SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
895 };
896 #endif
897 
898 static struct pci_driver xe_pci_driver = {
899 	.name = DRIVER_NAME,
900 	.id_table = pciidlist,
901 	.probe = xe_pci_probe,
902 	.remove = xe_pci_remove,
903 	.shutdown = xe_pci_shutdown,
904 #ifdef CONFIG_PM_SLEEP
905 	.driver.pm = &xe_pm_ops,
906 #endif
907 };
908 
909 int xe_register_pci_driver(void)
910 {
911 	return pci_register_driver(&xe_pci_driver);
912 }
913 
914 void xe_unregister_pci_driver(void)
915 {
916 	pci_unregister_driver(&xe_pci_driver);
917 }
918 
919 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
920 #include "tests/xe_pci.c"
921 #endif
922