xref: /linux/drivers/gpu/drm/xe/xe_pci.c (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_pci.h"
7 
8 #include <kunit/static_stub.h>
9 #include <linux/device/driver.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/pm_runtime.h>
13 
14 #include <drm/drm_color_mgmt.h>
15 #include <drm/drm_drv.h>
16 #include <drm/intel/pciids.h>
17 
18 #include "display/xe_display.h"
19 #include "regs/xe_gt_regs.h"
20 #include "xe_device.h"
21 #include "xe_drv.h"
22 #include "xe_gt.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_guc.h"
25 #include "xe_macros.h"
26 #include "xe_mmio.h"
27 #include "xe_module.h"
28 #include "xe_pci_sriov.h"
29 #include "xe_pci_types.h"
30 #include "xe_pm.h"
31 #include "xe_sriov.h"
32 #include "xe_step.h"
33 #include "xe_tile.h"
34 
35 enum toggle_d3cold {
36 	D3COLD_DISABLE,
37 	D3COLD_ENABLE,
38 };
39 
40 struct xe_subplatform_desc {
41 	enum xe_subplatform subplatform;
42 	const char *name;
43 	const u16 *pciidlist;
44 };
45 
46 struct xe_device_desc {
47 	/* Should only ever be set for platforms without GMD_ID */
48 	const struct xe_graphics_desc *graphics;
49 	/* Should only ever be set for platforms without GMD_ID */
50 	const struct xe_media_desc *media;
51 
52 	const char *platform_name;
53 	const struct xe_subplatform_desc *subplatforms;
54 
55 	enum xe_platform platform;
56 
57 	u8 require_force_probe:1;
58 	u8 is_dgfx:1;
59 
60 	u8 has_display:1;
61 	u8 has_heci_gscfi:1;
62 	u8 has_heci_cscfi:1;
63 	u8 has_llc:1;
64 	u8 has_mmio_ext:1;
65 	u8 has_sriov:1;
66 	u8 skip_guc_pc:1;
67 	u8 skip_mtcfg:1;
68 	u8 skip_pcode:1;
69 };
70 
71 __diag_push();
72 __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
73 
74 #define PLATFORM(x)		\
75 	.platform = XE_##x,	\
76 	.platform_name = #x
77 
78 #define NOP(x)	x
79 
80 static const struct xe_graphics_desc graphics_xelp = {
81 	.name = "Xe_LP",
82 	.ver = 12,
83 	.rel = 0,
84 
85 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
86 
87 	.dma_mask_size = 39,
88 	.va_bits = 48,
89 	.vm_max_level = 3,
90 };
91 
92 static const struct xe_graphics_desc graphics_xelpp = {
93 	.name = "Xe_LP+",
94 	.ver = 12,
95 	.rel = 10,
96 
97 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
98 
99 	.dma_mask_size = 39,
100 	.va_bits = 48,
101 	.vm_max_level = 3,
102 };
103 
104 #define XE_HP_FEATURES \
105 	.has_range_tlb_invalidation = true, \
106 	.dma_mask_size = 46, \
107 	.va_bits = 48, \
108 	.vm_max_level = 3
109 
110 static const struct xe_graphics_desc graphics_xehpg = {
111 	.name = "Xe_HPG",
112 	.ver = 12,
113 	.rel = 55,
114 
115 	.hw_engine_mask =
116 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
117 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
118 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
119 
120 	XE_HP_FEATURES,
121 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
122 
123 	.has_flat_ccs = 1,
124 };
125 
126 static const struct xe_graphics_desc graphics_xehpc = {
127 	.name = "Xe_HPC",
128 	.ver = 12,
129 	.rel = 60,
130 
131 	.hw_engine_mask =
132 		BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
133 		BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
134 		BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
135 		BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
136 		BIT(XE_HW_ENGINE_BCS8) |
137 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
138 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
139 
140 	XE_HP_FEATURES,
141 	.dma_mask_size = 52,
142 	.max_remote_tiles = 1,
143 	.va_bits = 57,
144 	.vm_max_level = 4,
145 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
146 
147 	.has_asid = 1,
148 	.has_atomic_enable_pte_bit = 1,
149 	.has_usm = 1,
150 };
151 
152 static const struct xe_graphics_desc graphics_xelpg = {
153 	.name = "Xe_LPG",
154 	.hw_engine_mask =
155 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
156 		BIT(XE_HW_ENGINE_CCS0),
157 
158 	XE_HP_FEATURES,
159 };
160 
161 #define XE2_GFX_FEATURES \
162 	.dma_mask_size = 46, \
163 	.has_asid = 1, \
164 	.has_atomic_enable_pte_bit = 1, \
165 	.has_flat_ccs = 1, \
166 	.has_indirect_ring_state = 1, \
167 	.has_range_tlb_invalidation = 1, \
168 	.has_usm = 1, \
169 	.va_bits = 48, \
170 	.vm_max_level = 4, \
171 	.hw_engine_mask = \
172 		BIT(XE_HW_ENGINE_RCS0) | \
173 		BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
174 		GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
175 
176 static const struct xe_graphics_desc graphics_xe2 = {
177 	.name = "Xe2_LPG / Xe2_HPG",
178 
179 	XE2_GFX_FEATURES,
180 };
181 
182 static const struct xe_media_desc media_xem = {
183 	.name = "Xe_M",
184 	.ver = 12,
185 	.rel = 0,
186 
187 	.hw_engine_mask =
188 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
189 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
190 };
191 
192 static const struct xe_media_desc media_xehpm = {
193 	.name = "Xe_HPM",
194 	.ver = 12,
195 	.rel = 55,
196 
197 	.hw_engine_mask =
198 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
199 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
200 };
201 
202 static const struct xe_media_desc media_xelpmp = {
203 	.name = "Xe_LPM+",
204 	.hw_engine_mask =
205 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
206 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
207 		BIT(XE_HW_ENGINE_GSCCS0)
208 };
209 
210 static const struct xe_media_desc media_xe2 = {
211 	.name = "Xe2_LPM / Xe2_HPM / Xe3_LPM",
212 	.hw_engine_mask =
213 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
214 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
215 		BIT(XE_HW_ENGINE_GSCCS0)
216 };
217 
218 static const struct xe_device_desc tgl_desc = {
219 	.graphics = &graphics_xelp,
220 	.media = &media_xem,
221 	PLATFORM(TIGERLAKE),
222 	.has_display = true,
223 	.has_llc = true,
224 	.require_force_probe = true,
225 };
226 
227 static const struct xe_device_desc rkl_desc = {
228 	.graphics = &graphics_xelp,
229 	.media = &media_xem,
230 	PLATFORM(ROCKETLAKE),
231 	.has_display = true,
232 	.has_llc = true,
233 	.require_force_probe = true,
234 };
235 
236 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
237 
238 static const struct xe_device_desc adl_s_desc = {
239 	.graphics = &graphics_xelp,
240 	.media = &media_xem,
241 	PLATFORM(ALDERLAKE_S),
242 	.has_display = true,
243 	.has_llc = true,
244 	.require_force_probe = true,
245 	.subplatforms = (const struct xe_subplatform_desc[]) {
246 		{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
247 		{},
248 	},
249 };
250 
251 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
252 
253 static const struct xe_device_desc adl_p_desc = {
254 	.graphics = &graphics_xelp,
255 	.media = &media_xem,
256 	PLATFORM(ALDERLAKE_P),
257 	.has_display = true,
258 	.has_llc = true,
259 	.require_force_probe = true,
260 	.subplatforms = (const struct xe_subplatform_desc[]) {
261 		{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
262 		{},
263 	},
264 };
265 
266 static const struct xe_device_desc adl_n_desc = {
267 	.graphics = &graphics_xelp,
268 	.media = &media_xem,
269 	PLATFORM(ALDERLAKE_N),
270 	.has_display = true,
271 	.has_llc = true,
272 	.require_force_probe = true,
273 };
274 
275 #define DGFX_FEATURES \
276 	.is_dgfx = 1
277 
278 static const struct xe_device_desc dg1_desc = {
279 	.graphics = &graphics_xelpp,
280 	.media = &media_xem,
281 	DGFX_FEATURES,
282 	PLATFORM(DG1),
283 	.has_display = true,
284 	.has_heci_gscfi = 1,
285 	.require_force_probe = true,
286 };
287 
288 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
289 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
290 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
291 
292 #define DG2_FEATURES \
293 	DGFX_FEATURES, \
294 	PLATFORM(DG2), \
295 	.has_heci_gscfi = 1, \
296 	.subplatforms = (const struct xe_subplatform_desc[]) { \
297 		{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
298 		{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
299 		{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
300 		{ } \
301 	}
302 
303 static const struct xe_device_desc ats_m_desc = {
304 	.graphics = &graphics_xehpg,
305 	.media = &media_xehpm,
306 	.require_force_probe = true,
307 
308 	DG2_FEATURES,
309 	.has_display = false,
310 };
311 
312 static const struct xe_device_desc dg2_desc = {
313 	.graphics = &graphics_xehpg,
314 	.media = &media_xehpm,
315 	.require_force_probe = true,
316 
317 	DG2_FEATURES,
318 	.has_display = true,
319 };
320 
321 static const __maybe_unused struct xe_device_desc pvc_desc = {
322 	.graphics = &graphics_xehpc,
323 	DGFX_FEATURES,
324 	PLATFORM(PVC),
325 	.has_display = false,
326 	.has_heci_gscfi = 1,
327 	.require_force_probe = true,
328 };
329 
330 static const struct xe_device_desc mtl_desc = {
331 	/* .graphics and .media determined via GMD_ID */
332 	.require_force_probe = true,
333 	PLATFORM(METEORLAKE),
334 	.has_display = true,
335 };
336 
337 static const struct xe_device_desc lnl_desc = {
338 	PLATFORM(LUNARLAKE),
339 	.has_display = true,
340 };
341 
342 static const struct xe_device_desc bmg_desc = {
343 	DGFX_FEATURES,
344 	PLATFORM(BATTLEMAGE),
345 	.has_display = true,
346 	.has_heci_cscfi = 1,
347 };
348 
349 static const struct xe_device_desc ptl_desc = {
350 	PLATFORM(PANTHERLAKE),
351 	.has_display = true,
352 	.require_force_probe = true,
353 };
354 
355 #undef PLATFORM
356 __diag_pop();
357 
358 /* Map of GMD_ID values to graphics IP */
359 static const struct gmdid_map graphics_ip_map[] = {
360 	{ 1270, &graphics_xelpg },
361 	{ 1271, &graphics_xelpg },
362 	{ 1274, &graphics_xelpg },	/* Xe_LPG+ */
363 	{ 2001, &graphics_xe2 },
364 	{ 2004, &graphics_xe2 },
365 	{ 3000, &graphics_xe2 },
366 	{ 3001, &graphics_xe2 },
367 };
368 
369 /* Map of GMD_ID values to media IP */
370 static const struct gmdid_map media_ip_map[] = {
371 	{ 1300, &media_xelpmp },
372 	{ 1301, &media_xe2 },
373 	{ 2000, &media_xe2 },
374 	{ 3000, &media_xe2 },
375 };
376 
377 /*
378  * Make sure any device matches here are from most specific to most
379  * general.  For example, since the Quanta match is based on the subsystem
380  * and subvendor IDs, we need it to come before the more general IVB
381  * PCI ID matches, otherwise we'll use the wrong info struct above.
382  */
383 static const struct pci_device_id pciidlist[] = {
384 	INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
385 	INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
386 	INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
387 	INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
388 	INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
389 	INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
390 	INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
391 	INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
392 	INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
393 	INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
394 	INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
395 	INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
396 	INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
397 	INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
398 	INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
399 	INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
400 	{ }
401 };
402 MODULE_DEVICE_TABLE(pci, pciidlist);
403 
404 /* is device_id present in comma separated list of ids */
405 static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
406 {
407 	char *s, *p, *tok;
408 	bool ret;
409 
410 	if (!devices || !*devices)
411 		return false;
412 
413 	/* match everything */
414 	if (negative && strcmp(devices, "!*") == 0)
415 		return true;
416 	if (!negative && strcmp(devices, "*") == 0)
417 		return true;
418 
419 	s = kstrdup(devices, GFP_KERNEL);
420 	if (!s)
421 		return false;
422 
423 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
424 		u16 val;
425 
426 		if (negative && tok[0] == '!')
427 			tok++;
428 		else if ((negative && tok[0] != '!') ||
429 			 (!negative && tok[0] == '!'))
430 			continue;
431 
432 		if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
433 			ret = true;
434 			break;
435 		}
436 	}
437 
438 	kfree(s);
439 
440 	return ret;
441 }
442 
443 static bool id_forced(u16 device_id)
444 {
445 	return device_id_in_list(device_id, xe_modparam.force_probe, false);
446 }
447 
448 static bool id_blocked(u16 device_id)
449 {
450 	return device_id_in_list(device_id, xe_modparam.force_probe, true);
451 }
452 
453 static const struct xe_subplatform_desc *
454 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
455 {
456 	const struct xe_subplatform_desc *sp;
457 	const u16 *id;
458 
459 	for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
460 		for (id = sp->pciidlist; *id; id++)
461 			if (*id == xe->info.devid)
462 				return sp;
463 
464 	return NULL;
465 }
466 
467 enum xe_gmdid_type {
468 	GMDID_GRAPHICS,
469 	GMDID_MEDIA
470 };
471 
472 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
473 {
474 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
475 	struct xe_reg gmdid_reg = GMD_ID;
476 	u32 val;
477 
478 	KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
479 
480 	if (IS_SRIOV_VF(xe)) {
481 		struct xe_gt *gt = xe_root_mmio_gt(xe);
482 
483 		/*
484 		 * To get the value of the GMDID register, VFs must obtain it
485 		 * from the GuC using MMIO communication.
486 		 *
487 		 * Note that at this point the xe_gt is not fully uninitialized
488 		 * and only basic access to MMIO registers is possible. To use
489 		 * our existing GuC communication functions we must perform at
490 		 * least basic xe_gt and xe_guc initialization.
491 		 *
492 		 * Since to obtain the value of GMDID_MEDIA we need to use the
493 		 * media GuC, temporarly tweak the gt type.
494 		 */
495 		xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
496 
497 		if (type == GMDID_MEDIA) {
498 			gt->info.id = 1;
499 			gt->info.type = XE_GT_TYPE_MEDIA;
500 		} else {
501 			gt->info.id = 0;
502 			gt->info.type = XE_GT_TYPE_MAIN;
503 		}
504 
505 		xe_guc_comm_init_early(&gt->uc.guc);
506 
507 		/* Don't bother with GMDID if failed to negotiate the GuC ABI */
508 		val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt);
509 
510 		/*
511 		 * Only undo xe_gt.info here, the remaining changes made above
512 		 * will be overwritten as part of the regular initialization.
513 		 */
514 		gt->info.id = 0;
515 		gt->info.type = XE_GT_TYPE_UNINITIALIZED;
516 	} else {
517 		/*
518 		 * GMD_ID is a GT register, but at this point in the driver
519 		 * init we haven't fully initialized the GT yet so we need to
520 		 * read the register with the tile's MMIO accessor.  That means
521 		 * we need to apply the GSI offset manually since it won't get
522 		 * automatically added as it would if we were using a GT mmio
523 		 * accessor.
524 		 */
525 		if (type == GMDID_MEDIA)
526 			gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
527 
528 		val = xe_mmio_read32(mmio, gmdid_reg);
529 	}
530 
531 	*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
532 	*revid = REG_FIELD_GET(GMD_ID_REVID, val);
533 }
534 
535 /*
536  * Pre-GMD_ID platform: device descriptor already points to the appropriate
537  * graphics descriptor. Simply forward the description and calculate the version
538  * appropriately. "graphics" should be present in all such platforms, while
539  * media is optional.
540  */
541 static void handle_pre_gmdid(struct xe_device *xe,
542 			     const struct xe_graphics_desc *graphics,
543 			     const struct xe_media_desc *media)
544 {
545 	xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel;
546 
547 	if (media)
548 		xe->info.media_verx100 = media->ver * 100 + media->rel;
549 
550 }
551 
552 /*
553  * GMD_ID platform: read IP version from hardware and select graphics descriptor
554  * based on the result.
555  */
556 static void handle_gmdid(struct xe_device *xe,
557 			 const struct xe_graphics_desc **graphics,
558 			 const struct xe_media_desc **media,
559 			 u32 *graphics_revid,
560 			 u32 *media_revid)
561 {
562 	u32 ver;
563 
564 	read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
565 
566 	for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
567 		if (ver == graphics_ip_map[i].ver) {
568 			xe->info.graphics_verx100 = ver;
569 			*graphics = graphics_ip_map[i].ip;
570 
571 			break;
572 		}
573 	}
574 
575 	if (!xe->info.graphics_verx100) {
576 		drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
577 			ver / 100, ver % 100);
578 	}
579 
580 	read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
581 
582 	/* Media may legitimately be fused off / not present */
583 	if (ver == 0)
584 		return;
585 
586 	for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
587 		if (ver == media_ip_map[i].ver) {
588 			xe->info.media_verx100 = ver;
589 			*media = media_ip_map[i].ip;
590 
591 			break;
592 		}
593 	}
594 
595 	if (!xe->info.media_verx100) {
596 		drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
597 			ver / 100, ver % 100);
598 	}
599 }
600 
601 /*
602  * Initialize device info content that only depends on static driver_data
603  * passed to the driver at probe time from PCI ID table.
604  */
605 static int xe_info_init_early(struct xe_device *xe,
606 			      const struct xe_device_desc *desc,
607 			      const struct xe_subplatform_desc *subplatform_desc)
608 {
609 	int err;
610 
611 	xe->info.platform_name = desc->platform_name;
612 	xe->info.platform = desc->platform;
613 	xe->info.subplatform = subplatform_desc ?
614 		subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
615 
616 	xe->info.is_dgfx = desc->is_dgfx;
617 	xe->info.has_heci_gscfi = desc->has_heci_gscfi;
618 	xe->info.has_heci_cscfi = desc->has_heci_cscfi;
619 	xe->info.has_llc = desc->has_llc;
620 	xe->info.has_mmio_ext = desc->has_mmio_ext;
621 	xe->info.has_sriov = desc->has_sriov;
622 	xe->info.skip_guc_pc = desc->skip_guc_pc;
623 	xe->info.skip_mtcfg = desc->skip_mtcfg;
624 	xe->info.skip_pcode = desc->skip_pcode;
625 
626 	xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
627 				 xe_modparam.probe_display &&
628 				 desc->has_display;
629 
630 	err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
631 	if (err)
632 		return err;
633 
634 	return 0;
635 }
636 
637 /*
638  * Initialize device info content that does require knowledge about
639  * graphics / media IP version.
640  * Make sure that GT / tile structures allocated by the driver match the data
641  * present in device info.
642  */
643 static int xe_info_init(struct xe_device *xe,
644 			const struct xe_graphics_desc *graphics_desc,
645 			const struct xe_media_desc *media_desc)
646 {
647 	u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
648 	struct xe_tile *tile;
649 	struct xe_gt *gt;
650 	u8 id;
651 
652 	/*
653 	 * If this platform supports GMD_ID, we'll detect the proper IP
654 	 * descriptor to use from hardware registers. desc->graphics will only
655 	 * ever be set at this point for platforms before GMD_ID. In that case
656 	 * the IP descriptions and versions are simply derived from that.
657 	 */
658 	if (graphics_desc) {
659 		handle_pre_gmdid(xe, graphics_desc, media_desc);
660 		xe->info.step = xe_step_pre_gmdid_get(xe);
661 	} else {
662 		xe_assert(xe, !media_desc);
663 		handle_gmdid(xe, &graphics_desc, &media_desc,
664 			     &graphics_gmdid_revid, &media_gmdid_revid);
665 		xe->info.step = xe_step_gmdid_get(xe,
666 						  graphics_gmdid_revid,
667 						  media_gmdid_revid);
668 	}
669 
670 	/*
671 	 * If we couldn't detect the graphics IP, that's considered a fatal
672 	 * error and we should abort driver load.  Failing to detect media
673 	 * IP is non-fatal; we'll just proceed without enabling media support.
674 	 */
675 	if (!graphics_desc)
676 		return -ENODEV;
677 
678 	xe->info.graphics_name = graphics_desc->name;
679 	xe->info.media_name = media_desc ? media_desc->name : "none";
680 	xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
681 
682 	xe->info.dma_mask_size = graphics_desc->dma_mask_size;
683 	xe->info.vram_flags = graphics_desc->vram_flags;
684 	xe->info.va_bits = graphics_desc->va_bits;
685 	xe->info.vm_max_level = graphics_desc->vm_max_level;
686 	xe->info.has_asid = graphics_desc->has_asid;
687 	xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
688 	if (xe->info.platform != XE_PVC)
689 		xe->info.has_device_atomics_on_smem = 1;
690 
691 	/* Runtime detection may change this later */
692 	xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
693 
694 	xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
695 	xe->info.has_usm = graphics_desc->has_usm;
696 
697 	/*
698 	 * All platforms have at least one primary GT.  Any platform with media
699 	 * version 13 or higher has an additional dedicated media GT.  And
700 	 * depending on the graphics IP there may be additional "remote tiles."
701 	 * All of these together determine the overall GT count.
702 	 *
703 	 * FIXME: 'tile_count' here is misnamed since the rest of the driver
704 	 * treats it as the number of GTs rather than just the number of tiles.
705 	 */
706 	xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
707 
708 	for_each_remote_tile(tile, xe, id) {
709 		int err;
710 
711 		err = xe_tile_init_early(tile, xe, id);
712 		if (err)
713 			return err;
714 	}
715 
716 	for_each_tile(tile, xe, id) {
717 		gt = tile->primary_gt;
718 		gt->info.id = xe->info.gt_count++;
719 		gt->info.type = XE_GT_TYPE_MAIN;
720 		gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
721 		gt->info.engine_mask = graphics_desc->hw_engine_mask;
722 
723 		if (MEDIA_VER(xe) < 13 && media_desc)
724 			gt->info.engine_mask |= media_desc->hw_engine_mask;
725 
726 		if (MEDIA_VER(xe) < 13 || !media_desc)
727 			continue;
728 
729 		/*
730 		 * Allocate and setup media GT for platforms with standalone
731 		 * media.
732 		 */
733 		tile->media_gt = xe_gt_alloc(tile);
734 		if (IS_ERR(tile->media_gt))
735 			return PTR_ERR(tile->media_gt);
736 
737 		gt = tile->media_gt;
738 		gt->info.type = XE_GT_TYPE_MEDIA;
739 		gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
740 		gt->info.engine_mask = media_desc->hw_engine_mask;
741 
742 		/*
743 		 * FIXME: At the moment multi-tile and standalone media are
744 		 * mutually exclusive on current platforms.  We'll need to
745 		 * come up with a better way to number GTs if we ever wind
746 		 * up with platforms that support both together.
747 		 */
748 		drm_WARN_ON(&xe->drm, id != 0);
749 		gt->info.id = xe->info.gt_count++;
750 	}
751 
752 	return 0;
753 }
754 
755 static void xe_pci_remove(struct pci_dev *pdev)
756 {
757 	struct xe_device *xe;
758 
759 	xe = pdev_to_xe_device(pdev);
760 	if (!xe) /* driver load aborted, nothing to cleanup */
761 		return;
762 
763 	if (IS_SRIOV_PF(xe))
764 		xe_pci_sriov_configure(pdev, 0);
765 
766 	xe_device_remove(xe);
767 	xe_pm_runtime_fini(xe);
768 	pci_set_drvdata(pdev, NULL);
769 }
770 
771 /*
772  * Probe the PCI device, initialize various parts of the driver.
773  *
774  * Fault injection is used to test the error paths of some initialization
775  * functions called either directly from xe_pci_probe() or indirectly for
776  * example through xe_device_probe(). Those functions use the kernel fault
777  * injection capabilities infrastructure, see
778  * Documentation/fault-injection/fault-injection.rst for details. The macro
779  * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution
780  * at runtime and use a provided return value. The first requirement for
781  * error injectable functions is proper handling of the error code by the
782  * caller for recovery, which is always the case here. The second
783  * requirement is that no state is changed before the first error return.
784  * It is not strictly fullfilled for all initialization functions using the
785  * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
786  * error cases at probe time, the error code is simply propagated up by the
787  * caller. Therefore there is no consequence on those specific callers when
788  * function error injection skips the whole function.
789  */
790 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
791 {
792 	const struct xe_device_desc *desc = (const void *)ent->driver_data;
793 	const struct xe_subplatform_desc *subplatform_desc;
794 	struct xe_device *xe;
795 	int err;
796 
797 	if (desc->require_force_probe && !id_forced(pdev->device)) {
798 		dev_info(&pdev->dev,
799 			 "Your graphics device %04x is not officially supported\n"
800 			 "by xe driver in this kernel version. To force Xe probe,\n"
801 			 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
802 			 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
803 			 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
804 			 pdev->device, pdev->device, pdev->device,
805 			 pdev->device, pdev->device);
806 		return -ENODEV;
807 	}
808 
809 	if (id_blocked(pdev->device)) {
810 		dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
811 			 pdev->vendor, pdev->device);
812 		return -ENODEV;
813 	}
814 
815 	if (xe_display_driver_probe_defer(pdev))
816 		return -EPROBE_DEFER;
817 
818 	err = pcim_enable_device(pdev);
819 	if (err)
820 		return err;
821 
822 	xe = xe_device_create(pdev, ent);
823 	if (IS_ERR(xe))
824 		return PTR_ERR(xe);
825 
826 	pci_set_drvdata(pdev, &xe->drm);
827 
828 	xe_pm_assert_unbounded_bridge(xe);
829 	subplatform_desc = find_subplatform(xe, desc);
830 
831 	pci_set_master(pdev);
832 
833 	err = xe_info_init_early(xe, desc, subplatform_desc);
834 	if (err)
835 		return err;
836 
837 	err = xe_device_probe_early(xe);
838 	if (err)
839 		return err;
840 
841 	err = xe_info_init(xe, desc->graphics, desc->media);
842 	if (err)
843 		return err;
844 
845 	err = xe_display_probe(xe);
846 	if (err)
847 		return err;
848 
849 	drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
850 		desc->platform_name,
851 		subplatform_desc ? subplatform_desc->name : "",
852 		xe->info.devid, xe->info.revid,
853 		xe->info.is_dgfx,
854 		xe->info.graphics_name,
855 		xe->info.graphics_verx100 / 100,
856 		xe->info.graphics_verx100 % 100,
857 		xe->info.media_name,
858 		xe->info.media_verx100 / 100,
859 		xe->info.media_verx100 % 100,
860 		str_yes_no(xe->info.probe_display),
861 		xe->info.dma_mask_size, xe->info.tile_count,
862 		xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
863 
864 	drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n",
865 		xe_step_name(xe->info.step.graphics),
866 		xe_step_name(xe->info.step.media),
867 		xe_step_name(xe->info.step.basedie));
868 
869 	drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
870 		str_yes_no(xe_device_has_sriov(xe)),
871 		xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
872 
873 	err = xe_pm_init_early(xe);
874 	if (err)
875 		return err;
876 
877 	err = xe_device_probe(xe);
878 	if (err)
879 		return err;
880 
881 	err = xe_pm_init(xe);
882 	if (err)
883 		goto err_driver_cleanup;
884 
885 	drm_dbg(&xe->drm, "d3cold: capable=%s\n",
886 		str_yes_no(xe->d3cold.capable));
887 
888 	return 0;
889 
890 err_driver_cleanup:
891 	xe_pci_remove(pdev);
892 	return err;
893 }
894 
895 static void xe_pci_shutdown(struct pci_dev *pdev)
896 {
897 	xe_device_shutdown(pdev_to_xe_device(pdev));
898 }
899 
900 #ifdef CONFIG_PM_SLEEP
901 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
902 {
903 	struct xe_device *xe = pdev_to_xe_device(pdev);
904 	struct pci_dev *root_pdev;
905 
906 	if (!xe->d3cold.capable)
907 		return;
908 
909 	root_pdev = pcie_find_root_port(pdev);
910 	if (!root_pdev)
911 		return;
912 
913 	switch (toggle) {
914 	case D3COLD_DISABLE:
915 		pci_d3cold_disable(root_pdev);
916 		break;
917 	case D3COLD_ENABLE:
918 		pci_d3cold_enable(root_pdev);
919 		break;
920 	}
921 }
922 
923 static int xe_pci_suspend(struct device *dev)
924 {
925 	struct pci_dev *pdev = to_pci_dev(dev);
926 	int err;
927 
928 	err = xe_pm_suspend(pdev_to_xe_device(pdev));
929 	if (err)
930 		return err;
931 
932 	/*
933 	 * Enabling D3Cold is needed for S2Idle/S0ix.
934 	 * It is save to allow here since xe_pm_suspend has evicted
935 	 * the local memory and the direct complete optimization is disabled.
936 	 */
937 	d3cold_toggle(pdev, D3COLD_ENABLE);
938 
939 	pci_save_state(pdev);
940 	pci_disable_device(pdev);
941 
942 	return 0;
943 }
944 
945 static int xe_pci_resume(struct device *dev)
946 {
947 	struct pci_dev *pdev = to_pci_dev(dev);
948 	int err;
949 
950 	/* Give back the D3Cold decision to the runtime P M*/
951 	d3cold_toggle(pdev, D3COLD_DISABLE);
952 
953 	err = pci_set_power_state(pdev, PCI_D0);
954 	if (err)
955 		return err;
956 
957 	pci_restore_state(pdev);
958 
959 	err = pci_enable_device(pdev);
960 	if (err)
961 		return err;
962 
963 	pci_set_master(pdev);
964 
965 	err = xe_pm_resume(pdev_to_xe_device(pdev));
966 	if (err)
967 		return err;
968 
969 	return 0;
970 }
971 
972 static int xe_pci_runtime_suspend(struct device *dev)
973 {
974 	struct pci_dev *pdev = to_pci_dev(dev);
975 	struct xe_device *xe = pdev_to_xe_device(pdev);
976 	int err;
977 
978 	err = xe_pm_runtime_suspend(xe);
979 	if (err)
980 		return err;
981 
982 	pci_save_state(pdev);
983 
984 	if (xe->d3cold.allowed) {
985 		d3cold_toggle(pdev, D3COLD_ENABLE);
986 		pci_disable_device(pdev);
987 		pci_ignore_hotplug(pdev);
988 		pci_set_power_state(pdev, PCI_D3cold);
989 	} else {
990 		d3cold_toggle(pdev, D3COLD_DISABLE);
991 		pci_set_power_state(pdev, PCI_D3hot);
992 	}
993 
994 	return 0;
995 }
996 
997 static int xe_pci_runtime_resume(struct device *dev)
998 {
999 	struct pci_dev *pdev = to_pci_dev(dev);
1000 	struct xe_device *xe = pdev_to_xe_device(pdev);
1001 	int err;
1002 
1003 	err = pci_set_power_state(pdev, PCI_D0);
1004 	if (err)
1005 		return err;
1006 
1007 	pci_restore_state(pdev);
1008 
1009 	if (xe->d3cold.allowed) {
1010 		err = pci_enable_device(pdev);
1011 		if (err)
1012 			return err;
1013 
1014 		pci_set_master(pdev);
1015 	}
1016 
1017 	return xe_pm_runtime_resume(xe);
1018 }
1019 
1020 static int xe_pci_runtime_idle(struct device *dev)
1021 {
1022 	struct pci_dev *pdev = to_pci_dev(dev);
1023 	struct xe_device *xe = pdev_to_xe_device(pdev);
1024 
1025 	xe_pm_d3cold_allowed_toggle(xe);
1026 
1027 	return 0;
1028 }
1029 
1030 static const struct dev_pm_ops xe_pm_ops = {
1031 	SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
1032 	SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
1033 };
1034 #endif
1035 
1036 static struct pci_driver xe_pci_driver = {
1037 	.name = DRIVER_NAME,
1038 	.id_table = pciidlist,
1039 	.probe = xe_pci_probe,
1040 	.remove = xe_pci_remove,
1041 	.shutdown = xe_pci_shutdown,
1042 	.sriov_configure = xe_pci_sriov_configure,
1043 #ifdef CONFIG_PM_SLEEP
1044 	.driver.pm = &xe_pm_ops,
1045 #endif
1046 };
1047 
1048 int xe_register_pci_driver(void)
1049 {
1050 	return pci_register_driver(&xe_pci_driver);
1051 }
1052 
1053 void xe_unregister_pci_driver(void)
1054 {
1055 	pci_unregister_driver(&xe_pci_driver);
1056 }
1057 
1058 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1059 #include "tests/xe_pci.c"
1060 #endif
1061