xref: /linux/drivers/gpu/drm/xe/xe_pci.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_pci.h"
7 
8 #include <kunit/static_stub.h>
9 #include <linux/device/driver.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/pm_runtime.h>
13 
14 #include <drm/drm_color_mgmt.h>
15 #include <drm/drm_drv.h>
16 #include <drm/intel/pciids.h>
17 
18 #include "display/xe_display.h"
19 #include "regs/xe_gt_regs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_configfs.h"
22 #include "xe_device.h"
23 #include "xe_drv.h"
24 #include "xe_gt.h"
25 #include "xe_gt_sriov_vf.h"
26 #include "xe_guc.h"
27 #include "xe_macros.h"
28 #include "xe_mmio.h"
29 #include "xe_module.h"
30 #include "xe_pci_sriov.h"
31 #include "xe_pci_types.h"
32 #include "xe_pm.h"
33 #include "xe_sriov.h"
34 #include "xe_step.h"
35 #include "xe_survivability_mode.h"
36 #include "xe_tile.h"
37 
38 enum toggle_d3cold {
39 	D3COLD_DISABLE,
40 	D3COLD_ENABLE,
41 };
42 
43 __diag_push();
44 __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
45 
46 #define PLATFORM(x)		\
47 	.platform = XE_##x,	\
48 	.platform_name = #x
49 
50 #define NOP(x)	x
51 
52 static const struct xe_graphics_desc graphics_xelp = {
53 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
54 
55 	.va_bits = 48,
56 	.vm_max_level = 3,
57 };
58 
59 #define XE_HP_FEATURES \
60 	.has_range_tlb_inval = true, \
61 	.va_bits = 48, \
62 	.vm_max_level = 3
63 
64 static const struct xe_graphics_desc graphics_xehpg = {
65 	.hw_engine_mask =
66 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
67 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
68 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
69 
70 	XE_HP_FEATURES,
71 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
72 
73 	.has_flat_ccs = 1,
74 };
75 
76 static const struct xe_graphics_desc graphics_xehpc = {
77 	.hw_engine_mask =
78 		BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
79 		BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
80 		BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
81 		BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
82 		BIT(XE_HW_ENGINE_BCS8) |
83 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
84 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
85 
86 	XE_HP_FEATURES,
87 	.va_bits = 57,
88 	.vm_max_level = 4,
89 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
90 
91 	.has_asid = 1,
92 	.has_atomic_enable_pte_bit = 1,
93 	.has_usm = 1,
94 };
95 
96 static const struct xe_graphics_desc graphics_xelpg = {
97 	.hw_engine_mask =
98 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
99 		BIT(XE_HW_ENGINE_CCS0),
100 
101 	XE_HP_FEATURES,
102 };
103 
104 #define XE2_GFX_FEATURES \
105 	.has_asid = 1, \
106 	.has_atomic_enable_pte_bit = 1, \
107 	.has_flat_ccs = 1, \
108 	.has_range_tlb_inval = 1, \
109 	.has_usm = 1, \
110 	.has_64bit_timestamp = 1, \
111 	.va_bits = 48, \
112 	.vm_max_level = 4, \
113 	.hw_engine_mask = \
114 		BIT(XE_HW_ENGINE_RCS0) | \
115 		BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
116 		GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
117 
118 static const struct xe_graphics_desc graphics_xe2 = {
119 	XE2_GFX_FEATURES,
120 };
121 
122 static const struct xe_media_desc media_xem = {
123 	.hw_engine_mask =
124 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
125 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
126 };
127 
128 static const struct xe_media_desc media_xelpmp = {
129 	.hw_engine_mask =
130 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
131 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
132 		BIT(XE_HW_ENGINE_GSCCS0)
133 };
134 
135 /* Pre-GMDID Graphics IPs */
136 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp };
137 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp };
138 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg };
139 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc };
140 
141 /* GMDID-based Graphics IPs */
142 static const struct xe_ip graphics_ips[] = {
143 	{ 1270, "Xe_LPG", &graphics_xelpg },
144 	{ 1271, "Xe_LPG", &graphics_xelpg },
145 	{ 1274, "Xe_LPG+", &graphics_xelpg },
146 	{ 2001, "Xe2_HPG", &graphics_xe2 },
147 	{ 2002, "Xe2_HPG", &graphics_xe2 },
148 	{ 2004, "Xe2_LPG", &graphics_xe2 },
149 	{ 3000, "Xe3_LPG", &graphics_xe2 },
150 	{ 3001, "Xe3_LPG", &graphics_xe2 },
151 	{ 3003, "Xe3_LPG", &graphics_xe2 },
152 };
153 
154 /* Pre-GMDID Media IPs */
155 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem };
156 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem };
157 
158 /* GMDID-based Media IPs */
159 static const struct xe_ip media_ips[] = {
160 	{ 1300, "Xe_LPM+", &media_xelpmp },
161 	{ 1301, "Xe2_HPM", &media_xelpmp },
162 	{ 2000, "Xe2_LPM", &media_xelpmp },
163 	{ 3000, "Xe3_LPM", &media_xelpmp },
164 	{ 3002, "Xe3_LPM", &media_xelpmp },
165 };
166 
167 static const struct xe_device_desc tgl_desc = {
168 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
169 	.pre_gmdid_media_ip = &media_ip_xem,
170 	PLATFORM(TIGERLAKE),
171 	.dma_mask_size = 39,
172 	.has_display = true,
173 	.has_llc = true,
174 	.has_sriov = true,
175 	.max_gt_per_tile = 1,
176 	.require_force_probe = true,
177 };
178 
179 static const struct xe_device_desc rkl_desc = {
180 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
181 	.pre_gmdid_media_ip = &media_ip_xem,
182 	PLATFORM(ROCKETLAKE),
183 	.dma_mask_size = 39,
184 	.has_display = true,
185 	.has_llc = true,
186 	.max_gt_per_tile = 1,
187 	.require_force_probe = true,
188 };
189 
190 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
191 
192 static const struct xe_device_desc adl_s_desc = {
193 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
194 	.pre_gmdid_media_ip = &media_ip_xem,
195 	PLATFORM(ALDERLAKE_S),
196 	.dma_mask_size = 39,
197 	.has_display = true,
198 	.has_llc = true,
199 	.has_sriov = true,
200 	.max_gt_per_tile = 1,
201 	.require_force_probe = true,
202 	.subplatforms = (const struct xe_subplatform_desc[]) {
203 		{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
204 		{},
205 	},
206 };
207 
208 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
209 
210 static const struct xe_device_desc adl_p_desc = {
211 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
212 	.pre_gmdid_media_ip = &media_ip_xem,
213 	PLATFORM(ALDERLAKE_P),
214 	.dma_mask_size = 39,
215 	.has_display = true,
216 	.has_llc = true,
217 	.has_sriov = true,
218 	.max_gt_per_tile = 1,
219 	.require_force_probe = true,
220 	.subplatforms = (const struct xe_subplatform_desc[]) {
221 		{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
222 		{},
223 	},
224 };
225 
226 static const struct xe_device_desc adl_n_desc = {
227 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
228 	.pre_gmdid_media_ip = &media_ip_xem,
229 	PLATFORM(ALDERLAKE_N),
230 	.dma_mask_size = 39,
231 	.has_display = true,
232 	.has_llc = true,
233 	.has_sriov = true,
234 	.max_gt_per_tile = 1,
235 	.require_force_probe = true,
236 };
237 
238 #define DGFX_FEATURES \
239 	.is_dgfx = 1
240 
241 static const struct xe_device_desc dg1_desc = {
242 	.pre_gmdid_graphics_ip = &graphics_ip_xelpp,
243 	.pre_gmdid_media_ip = &media_ip_xem,
244 	DGFX_FEATURES,
245 	PLATFORM(DG1),
246 	.dma_mask_size = 39,
247 	.has_display = true,
248 	.has_gsc_nvm = 1,
249 	.has_heci_gscfi = 1,
250 	.max_gt_per_tile = 1,
251 	.require_force_probe = true,
252 };
253 
254 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
255 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
256 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
257 
258 #define DG2_FEATURES \
259 	DGFX_FEATURES, \
260 	PLATFORM(DG2), \
261 	.has_gsc_nvm = 1, \
262 	.has_heci_gscfi = 1, \
263 	.subplatforms = (const struct xe_subplatform_desc[]) { \
264 		{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
265 		{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
266 		{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
267 		{ } \
268 	}
269 
270 static const struct xe_device_desc ats_m_desc = {
271 	.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
272 	.pre_gmdid_media_ip = &media_ip_xehpm,
273 	.dma_mask_size = 46,
274 	.max_gt_per_tile = 1,
275 	.require_force_probe = true,
276 
277 	DG2_FEATURES,
278 	.has_display = false,
279 	.has_sriov = true,
280 };
281 
282 static const struct xe_device_desc dg2_desc = {
283 	.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
284 	.pre_gmdid_media_ip = &media_ip_xehpm,
285 	.dma_mask_size = 46,
286 	.max_gt_per_tile = 1,
287 	.require_force_probe = true,
288 
289 	DG2_FEATURES,
290 	.has_display = true,
291 	.has_fan_control = true,
292 	.has_mbx_power_limits = false,
293 };
294 
295 static const __maybe_unused struct xe_device_desc pvc_desc = {
296 	.pre_gmdid_graphics_ip = &graphics_ip_xehpc,
297 	DGFX_FEATURES,
298 	PLATFORM(PVC),
299 	.dma_mask_size = 52,
300 	.has_display = false,
301 	.has_gsc_nvm = 1,
302 	.has_heci_gscfi = 1,
303 	.max_gt_per_tile = 1,
304 	.max_remote_tiles = 1,
305 	.require_force_probe = true,
306 	.has_mbx_power_limits = false,
307 };
308 
309 static const struct xe_device_desc mtl_desc = {
310 	/* .graphics and .media determined via GMD_ID */
311 	.require_force_probe = true,
312 	PLATFORM(METEORLAKE),
313 	.dma_mask_size = 46,
314 	.has_display = true,
315 	.has_pxp = true,
316 	.max_gt_per_tile = 2,
317 };
318 
319 static const struct xe_device_desc lnl_desc = {
320 	PLATFORM(LUNARLAKE),
321 	.dma_mask_size = 46,
322 	.has_display = true,
323 	.has_pxp = true,
324 	.max_gt_per_tile = 2,
325 	.needs_scratch = true,
326 };
327 
328 static const struct xe_device_desc bmg_desc = {
329 	DGFX_FEATURES,
330 	PLATFORM(BATTLEMAGE),
331 	.dma_mask_size = 46,
332 	.has_display = true,
333 	.has_fan_control = true,
334 	.has_mbx_power_limits = true,
335 	.has_gsc_nvm = 1,
336 	.has_heci_cscfi = 1,
337 	.has_late_bind = true,
338 	.has_sriov = true,
339 	.max_gt_per_tile = 2,
340 	.needs_scratch = true,
341 };
342 
343 static const struct xe_device_desc ptl_desc = {
344 	PLATFORM(PANTHERLAKE),
345 	.dma_mask_size = 46,
346 	.has_display = true,
347 	.has_sriov = true,
348 	.max_gt_per_tile = 2,
349 	.needs_scratch = true,
350 };
351 
352 #undef PLATFORM
353 __diag_pop();
354 
355 /*
356  * Make sure any device matches here are from most specific to most
357  * general.  For example, since the Quanta match is based on the subsystem
358  * and subvendor IDs, we need it to come before the more general IVB
359  * PCI ID matches, otherwise we'll use the wrong info struct above.
360  */
361 static const struct pci_device_id pciidlist[] = {
362 	INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
363 	INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
364 	INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
365 	INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
366 	INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
367 	INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
368 	INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
369 	INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
370 	INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
371 	INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
372 	INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
373 	INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
374 	INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
375 	INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
376 	INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
377 	INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
378 	{ }
379 };
380 MODULE_DEVICE_TABLE(pci, pciidlist);
381 
382 /* is device_id present in comma separated list of ids */
383 static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
384 {
385 	char *s, *p, *tok;
386 	bool ret;
387 
388 	if (!devices || !*devices)
389 		return false;
390 
391 	/* match everything */
392 	if (negative && strcmp(devices, "!*") == 0)
393 		return true;
394 	if (!negative && strcmp(devices, "*") == 0)
395 		return true;
396 
397 	s = kstrdup(devices, GFP_KERNEL);
398 	if (!s)
399 		return false;
400 
401 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
402 		u16 val;
403 
404 		if (negative && tok[0] == '!')
405 			tok++;
406 		else if ((negative && tok[0] != '!') ||
407 			 (!negative && tok[0] == '!'))
408 			continue;
409 
410 		if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
411 			ret = true;
412 			break;
413 		}
414 	}
415 
416 	kfree(s);
417 
418 	return ret;
419 }
420 
421 static bool id_forced(u16 device_id)
422 {
423 	return device_id_in_list(device_id, xe_modparam.force_probe, false);
424 }
425 
426 static bool id_blocked(u16 device_id)
427 {
428 	return device_id_in_list(device_id, xe_modparam.force_probe, true);
429 }
430 
431 static const struct xe_subplatform_desc *
432 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
433 {
434 	const struct xe_subplatform_desc *sp;
435 	const u16 *id;
436 
437 	for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
438 		for (id = sp->pciidlist; *id; id++)
439 			if (*id == xe->info.devid)
440 				return sp;
441 
442 	return NULL;
443 }
444 
445 enum xe_gmdid_type {
446 	GMDID_GRAPHICS,
447 	GMDID_MEDIA
448 };
449 
450 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
451 {
452 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
453 	struct xe_reg gmdid_reg = GMD_ID;
454 	u32 val;
455 
456 	KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
457 
458 	if (IS_SRIOV_VF(xe)) {
459 		struct xe_gt *gt = xe_root_mmio_gt(xe);
460 
461 		/*
462 		 * To get the value of the GMDID register, VFs must obtain it
463 		 * from the GuC using MMIO communication.
464 		 *
465 		 * Note that at this point the xe_gt is not fully uninitialized
466 		 * and only basic access to MMIO registers is possible. To use
467 		 * our existing GuC communication functions we must perform at
468 		 * least basic xe_gt and xe_guc initialization.
469 		 *
470 		 * Since to obtain the value of GMDID_MEDIA we need to use the
471 		 * media GuC, temporarily tweak the gt type.
472 		 */
473 		xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
474 
475 		if (type == GMDID_MEDIA) {
476 			gt->info.id = 1;
477 			gt->info.type = XE_GT_TYPE_MEDIA;
478 		} else {
479 			gt->info.id = 0;
480 			gt->info.type = XE_GT_TYPE_MAIN;
481 		}
482 
483 		xe_gt_mmio_init(gt);
484 		xe_guc_comm_init_early(&gt->uc.guc);
485 
486 		/* Don't bother with GMDID if failed to negotiate the GuC ABI */
487 		val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt);
488 
489 		/*
490 		 * Only undo xe_gt.info here, the remaining changes made above
491 		 * will be overwritten as part of the regular initialization.
492 		 */
493 		gt->info.id = 0;
494 		gt->info.type = XE_GT_TYPE_UNINITIALIZED;
495 	} else {
496 		/*
497 		 * GMD_ID is a GT register, but at this point in the driver
498 		 * init we haven't fully initialized the GT yet so we need to
499 		 * read the register with the tile's MMIO accessor.  That means
500 		 * we need to apply the GSI offset manually since it won't get
501 		 * automatically added as it would if we were using a GT mmio
502 		 * accessor.
503 		 */
504 		if (type == GMDID_MEDIA)
505 			gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
506 
507 		val = xe_mmio_read32(mmio, gmdid_reg);
508 	}
509 
510 	*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
511 	*revid = REG_FIELD_GET(GMD_ID_REVID, val);
512 }
513 
514 static const struct xe_ip *find_graphics_ip(unsigned int verx100)
515 {
516 	KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100);
517 
518 	for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++)
519 		if (graphics_ips[i].verx100 == verx100)
520 			return &graphics_ips[i];
521 	return NULL;
522 }
523 
524 static const struct xe_ip *find_media_ip(unsigned int verx100)
525 {
526 	KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100);
527 
528 	for (int i = 0; i < ARRAY_SIZE(media_ips); i++)
529 		if (media_ips[i].verx100 == verx100)
530 			return &media_ips[i];
531 	return NULL;
532 }
533 
534 /*
535  * Read IP version from hardware and select graphics/media IP descriptors
536  * based on the result.
537  */
538 static void handle_gmdid(struct xe_device *xe,
539 			 const struct xe_ip **graphics_ip,
540 			 const struct xe_ip **media_ip,
541 			 u32 *graphics_revid,
542 			 u32 *media_revid)
543 {
544 	u32 ver;
545 
546 	*graphics_ip = NULL;
547 	*media_ip = NULL;
548 
549 	read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
550 
551 	*graphics_ip = find_graphics_ip(ver);
552 	if (!*graphics_ip) {
553 		drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
554 			ver / 100, ver % 100);
555 	}
556 
557 	read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
558 	/* Media may legitimately be fused off / not present */
559 	if (ver == 0)
560 		return;
561 
562 	*media_ip = find_media_ip(ver);
563 	if (!*media_ip) {
564 		drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
565 			ver / 100, ver % 100);
566 	}
567 }
568 
569 /*
570  * Initialize device info content that only depends on static driver_data
571  * passed to the driver at probe time from PCI ID table.
572  */
573 static int xe_info_init_early(struct xe_device *xe,
574 			      const struct xe_device_desc *desc,
575 			      const struct xe_subplatform_desc *subplatform_desc)
576 {
577 	int err;
578 
579 	xe->info.platform_name = desc->platform_name;
580 	xe->info.platform = desc->platform;
581 	xe->info.subplatform = subplatform_desc ?
582 		subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
583 
584 	xe->info.dma_mask_size = desc->dma_mask_size;
585 	xe->info.is_dgfx = desc->is_dgfx;
586 	xe->info.has_fan_control = desc->has_fan_control;
587 	xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
588 	xe->info.has_gsc_nvm = desc->has_gsc_nvm;
589 	xe->info.has_heci_gscfi = desc->has_heci_gscfi;
590 	xe->info.has_heci_cscfi = desc->has_heci_cscfi;
591 	xe->info.has_late_bind = desc->has_late_bind;
592 	xe->info.has_llc = desc->has_llc;
593 	xe->info.has_pxp = desc->has_pxp;
594 	xe->info.has_sriov = desc->has_sriov;
595 	xe->info.skip_guc_pc = desc->skip_guc_pc;
596 	xe->info.skip_mtcfg = desc->skip_mtcfg;
597 	xe->info.skip_pcode = desc->skip_pcode;
598 	xe->info.needs_scratch = desc->needs_scratch;
599 
600 	xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
601 				 xe_modparam.probe_display &&
602 				 desc->has_display;
603 
604 	xe_assert(xe, desc->max_gt_per_tile > 0);
605 	xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE);
606 	xe->info.max_gt_per_tile = desc->max_gt_per_tile;
607 	xe->info.tile_count = 1 + desc->max_remote_tiles;
608 
609 	err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
610 	if (err)
611 		return err;
612 
613 	return 0;
614 }
615 
616 /*
617  * Possibly override number of tile based on configuration register.
618  */
619 static void xe_info_probe_tile_count(struct xe_device *xe)
620 {
621 	struct xe_mmio *mmio;
622 	u8 tile_count;
623 	u32 mtcfg;
624 
625 	KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe);
626 
627 	/*
628 	 * Probe for tile count only for platforms that support multiple
629 	 * tiles.
630 	 */
631 	if (xe->info.tile_count == 1)
632 		return;
633 
634 	if (xe->info.skip_mtcfg)
635 		return;
636 
637 	mmio = xe_root_tile_mmio(xe);
638 
639 	/*
640 	 * Although the per-tile mmio regs are not yet initialized, this
641 	 * is fine as it's going to the root tile's mmio, that's
642 	 * guaranteed to be initialized earlier in xe_mmio_probe_early()
643 	 */
644 	mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
645 	tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
646 
647 	if (tile_count < xe->info.tile_count) {
648 		drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
649 			 xe->info.tile_count, tile_count);
650 		xe->info.tile_count = tile_count;
651 	}
652 }
653 
654 /*
655  * Initialize device info content that does require knowledge about
656  * graphics / media IP version.
657  * Make sure that GT / tile structures allocated by the driver match the data
658  * present in device info.
659  */
660 static int xe_info_init(struct xe_device *xe,
661 			const struct xe_device_desc *desc)
662 {
663 	u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
664 	const struct xe_ip *graphics_ip;
665 	const struct xe_ip *media_ip;
666 	const struct xe_graphics_desc *graphics_desc;
667 	const struct xe_media_desc *media_desc;
668 	struct xe_tile *tile;
669 	struct xe_gt *gt;
670 	u8 id;
671 
672 	/*
673 	 * If this platform supports GMD_ID, we'll detect the proper IP
674 	 * descriptor to use from hardware registers.
675 	 * desc->pre_gmdid_graphics_ip will only ever be set at this point for
676 	 * platforms before GMD_ID. In that case the IP descriptions and
677 	 * versions are simply derived from that.
678 	 */
679 	if (desc->pre_gmdid_graphics_ip) {
680 		graphics_ip = desc->pre_gmdid_graphics_ip;
681 		media_ip = desc->pre_gmdid_media_ip;
682 		xe->info.step = xe_step_pre_gmdid_get(xe);
683 	} else {
684 		xe_assert(xe, !desc->pre_gmdid_media_ip);
685 		handle_gmdid(xe, &graphics_ip, &media_ip,
686 			     &graphics_gmdid_revid, &media_gmdid_revid);
687 		xe->info.step = xe_step_gmdid_get(xe,
688 						  graphics_gmdid_revid,
689 						  media_gmdid_revid);
690 	}
691 
692 	/*
693 	 * If we couldn't detect the graphics IP, that's considered a fatal
694 	 * error and we should abort driver load.  Failing to detect media
695 	 * IP is non-fatal; we'll just proceed without enabling media support.
696 	 */
697 	if (!graphics_ip)
698 		return -ENODEV;
699 
700 	xe->info.graphics_verx100 = graphics_ip->verx100;
701 	xe->info.graphics_name = graphics_ip->name;
702 	graphics_desc = graphics_ip->desc;
703 
704 	if (media_ip) {
705 		xe->info.media_verx100 = media_ip->verx100;
706 		xe->info.media_name = media_ip->name;
707 		media_desc = media_ip->desc;
708 	} else {
709 		xe->info.media_name = "none";
710 		media_desc = NULL;
711 	}
712 
713 	xe->info.vram_flags = graphics_desc->vram_flags;
714 	xe->info.va_bits = graphics_desc->va_bits;
715 	xe->info.vm_max_level = graphics_desc->vm_max_level;
716 	xe->info.has_asid = graphics_desc->has_asid;
717 	xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
718 	if (xe->info.platform != XE_PVC)
719 		xe->info.has_device_atomics_on_smem = 1;
720 
721 	/* Runtime detection may change this later */
722 	xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
723 
724 	xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
725 	xe->info.has_usm = graphics_desc->has_usm;
726 	xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
727 
728 	xe_info_probe_tile_count(xe);
729 
730 	for_each_remote_tile(tile, xe, id) {
731 		int err;
732 
733 		err = xe_tile_init_early(tile, xe, id);
734 		if (err)
735 			return err;
736 	}
737 
738 	/*
739 	 * All platforms have at least one primary GT.  Any platform with media
740 	 * version 13 or higher has an additional dedicated media GT.  And
741 	 * depending on the graphics IP there may be additional "remote tiles."
742 	 * All of these together determine the overall GT count.
743 	 */
744 	for_each_tile(tile, xe, id) {
745 		int err;
746 
747 		gt = tile->primary_gt;
748 		gt->info.type = XE_GT_TYPE_MAIN;
749 		gt->info.id = tile->id * xe->info.max_gt_per_tile;
750 		gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
751 		gt->info.engine_mask = graphics_desc->hw_engine_mask;
752 
753 		err = xe_tile_alloc_vram(tile);
754 		if (err)
755 			return err;
756 
757 		if (MEDIA_VER(xe) < 13 && media_desc)
758 			gt->info.engine_mask |= media_desc->hw_engine_mask;
759 
760 		if (MEDIA_VER(xe) < 13 || !media_desc)
761 			continue;
762 
763 		/*
764 		 * Allocate and setup media GT for platforms with standalone
765 		 * media.
766 		 */
767 		tile->media_gt = xe_gt_alloc(tile);
768 		if (IS_ERR(tile->media_gt))
769 			return PTR_ERR(tile->media_gt);
770 
771 		gt = tile->media_gt;
772 		gt->info.type = XE_GT_TYPE_MEDIA;
773 		gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
774 		gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
775 		gt->info.engine_mask = media_desc->hw_engine_mask;
776 	}
777 
778 	/*
779 	 * Now that we have tiles and GTs defined, let's loop over valid GTs
780 	 * in order to define gt_count.
781 	 */
782 	for_each_gt(gt, xe, id)
783 		xe->info.gt_count++;
784 
785 	return 0;
786 }
787 
788 static void xe_pci_remove(struct pci_dev *pdev)
789 {
790 	struct xe_device *xe = pdev_to_xe_device(pdev);
791 
792 	if (IS_SRIOV_PF(xe))
793 		xe_pci_sriov_configure(pdev, 0);
794 
795 	if (xe_survivability_mode_is_boot_enabled(xe))
796 		return;
797 
798 	xe_device_remove(xe);
799 	xe_pm_fini(xe);
800 }
801 
802 /*
803  * Probe the PCI device, initialize various parts of the driver.
804  *
805  * Fault injection is used to test the error paths of some initialization
806  * functions called either directly from xe_pci_probe() or indirectly for
807  * example through xe_device_probe(). Those functions use the kernel fault
808  * injection capabilities infrastructure, see
809  * Documentation/fault-injection/fault-injection.rst for details. The macro
810  * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution
811  * at runtime and use a provided return value. The first requirement for
812  * error injectable functions is proper handling of the error code by the
813  * caller for recovery, which is always the case here. The second
814  * requirement is that no state is changed before the first error return.
815  * It is not strictly fulfilled for all initialization functions using the
816  * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
817  * error cases at probe time, the error code is simply propagated up by the
818  * caller. Therefore there is no consequence on those specific callers when
819  * function error injection skips the whole function.
820  */
821 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
822 {
823 	const struct xe_device_desc *desc = (const void *)ent->driver_data;
824 	const struct xe_subplatform_desc *subplatform_desc;
825 	struct xe_device *xe;
826 	int err;
827 
828 	xe_configfs_check_device(pdev);
829 
830 	if (desc->require_force_probe && !id_forced(pdev->device)) {
831 		dev_info(&pdev->dev,
832 			 "Your graphics device %04x is not officially supported\n"
833 			 "by xe driver in this kernel version. To force Xe probe,\n"
834 			 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
835 			 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
836 			 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
837 			 pdev->device, pdev->device, pdev->device,
838 			 pdev->device, pdev->device);
839 		return -ENODEV;
840 	}
841 
842 	if (id_blocked(pdev->device)) {
843 		dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
844 			 pdev->vendor, pdev->device);
845 		return -ENODEV;
846 	}
847 
848 	if (xe_display_driver_probe_defer(pdev))
849 		return -EPROBE_DEFER;
850 
851 	err = pcim_enable_device(pdev);
852 	if (err)
853 		return err;
854 
855 	xe = xe_device_create(pdev, ent);
856 	if (IS_ERR(xe))
857 		return PTR_ERR(xe);
858 
859 	pci_set_drvdata(pdev, &xe->drm);
860 
861 	xe_pm_assert_unbounded_bridge(xe);
862 	subplatform_desc = find_subplatform(xe, desc);
863 
864 	pci_set_master(pdev);
865 
866 	err = xe_info_init_early(xe, desc, subplatform_desc);
867 	if (err)
868 		return err;
869 
870 	err = xe_device_probe_early(xe);
871 	/*
872 	 * In Boot Survivability mode, no drm card is exposed and driver
873 	 * is loaded with bare minimum to allow for firmware to be
874 	 * flashed through mei. Return success, if survivability mode
875 	 * is enabled due to pcode failure or configfs being set
876 	 */
877 	if (xe_survivability_mode_is_boot_enabled(xe))
878 		return 0;
879 
880 	if (err)
881 		return err;
882 
883 	err = xe_info_init(xe, desc);
884 	if (err)
885 		return err;
886 
887 	err = xe_display_probe(xe);
888 	if (err)
889 		return err;
890 
891 	drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
892 		desc->platform_name,
893 		subplatform_desc ? subplatform_desc->name : "",
894 		xe->info.devid, xe->info.revid,
895 		xe->info.is_dgfx,
896 		xe->info.graphics_name,
897 		xe->info.graphics_verx100 / 100,
898 		xe->info.graphics_verx100 % 100,
899 		xe->info.media_name,
900 		xe->info.media_verx100 / 100,
901 		xe->info.media_verx100 % 100,
902 		str_yes_no(xe->info.probe_display),
903 		xe->info.dma_mask_size, xe->info.tile_count,
904 		xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
905 
906 	drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n",
907 		xe_step_name(xe->info.step.graphics),
908 		xe_step_name(xe->info.step.media),
909 		xe_step_name(xe->info.step.basedie));
910 
911 	drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
912 		str_yes_no(xe_device_has_sriov(xe)),
913 		xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
914 
915 	err = xe_pm_init_early(xe);
916 	if (err)
917 		return err;
918 
919 	err = xe_device_probe(xe);
920 	if (err)
921 		return err;
922 
923 	err = xe_pm_init(xe);
924 	if (err)
925 		goto err_driver_cleanup;
926 
927 	drm_dbg(&xe->drm, "d3cold: capable=%s\n",
928 		str_yes_no(xe->d3cold.capable));
929 
930 	return 0;
931 
932 err_driver_cleanup:
933 	xe_pci_remove(pdev);
934 	return err;
935 }
936 
937 static void xe_pci_shutdown(struct pci_dev *pdev)
938 {
939 	xe_device_shutdown(pdev_to_xe_device(pdev));
940 }
941 
942 #ifdef CONFIG_PM_SLEEP
943 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
944 {
945 	struct xe_device *xe = pdev_to_xe_device(pdev);
946 	struct pci_dev *root_pdev;
947 
948 	if (!xe->d3cold.capable)
949 		return;
950 
951 	root_pdev = pcie_find_root_port(pdev);
952 	if (!root_pdev)
953 		return;
954 
955 	switch (toggle) {
956 	case D3COLD_DISABLE:
957 		pci_d3cold_disable(root_pdev);
958 		break;
959 	case D3COLD_ENABLE:
960 		pci_d3cold_enable(root_pdev);
961 		break;
962 	}
963 }
964 
965 static int xe_pci_suspend(struct device *dev)
966 {
967 	struct pci_dev *pdev = to_pci_dev(dev);
968 	struct xe_device *xe = pdev_to_xe_device(pdev);
969 	int err;
970 
971 	if (xe_survivability_mode_is_boot_enabled(xe))
972 		return -EBUSY;
973 
974 	err = xe_pm_suspend(xe);
975 	if (err)
976 		return err;
977 
978 	/*
979 	 * Enabling D3Cold is needed for S2Idle/S0ix.
980 	 * It is save to allow here since xe_pm_suspend has evicted
981 	 * the local memory and the direct complete optimization is disabled.
982 	 */
983 	d3cold_toggle(pdev, D3COLD_ENABLE);
984 
985 	pci_save_state(pdev);
986 	pci_disable_device(pdev);
987 	pci_set_power_state(pdev, PCI_D3cold);
988 
989 	return 0;
990 }
991 
992 static int xe_pci_resume(struct device *dev)
993 {
994 	struct pci_dev *pdev = to_pci_dev(dev);
995 	int err;
996 
997 	/* Give back the D3Cold decision to the runtime P M*/
998 	d3cold_toggle(pdev, D3COLD_DISABLE);
999 
1000 	err = pci_set_power_state(pdev, PCI_D0);
1001 	if (err)
1002 		return err;
1003 
1004 	pci_restore_state(pdev);
1005 
1006 	err = pci_enable_device(pdev);
1007 	if (err)
1008 		return err;
1009 
1010 	pci_set_master(pdev);
1011 
1012 	err = xe_pm_resume(pdev_to_xe_device(pdev));
1013 	if (err)
1014 		return err;
1015 
1016 	return 0;
1017 }
1018 
1019 static int xe_pci_runtime_suspend(struct device *dev)
1020 {
1021 	struct pci_dev *pdev = to_pci_dev(dev);
1022 	struct xe_device *xe = pdev_to_xe_device(pdev);
1023 	int err;
1024 
1025 	err = xe_pm_runtime_suspend(xe);
1026 	if (err)
1027 		return err;
1028 
1029 	pci_save_state(pdev);
1030 
1031 	if (xe->d3cold.allowed) {
1032 		d3cold_toggle(pdev, D3COLD_ENABLE);
1033 		pci_disable_device(pdev);
1034 		pci_ignore_hotplug(pdev);
1035 		pci_set_power_state(pdev, PCI_D3cold);
1036 	} else {
1037 		d3cold_toggle(pdev, D3COLD_DISABLE);
1038 		pci_set_power_state(pdev, PCI_D3hot);
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static int xe_pci_runtime_resume(struct device *dev)
1045 {
1046 	struct pci_dev *pdev = to_pci_dev(dev);
1047 	struct xe_device *xe = pdev_to_xe_device(pdev);
1048 	int err;
1049 
1050 	err = pci_set_power_state(pdev, PCI_D0);
1051 	if (err)
1052 		return err;
1053 
1054 	pci_restore_state(pdev);
1055 
1056 	if (xe->d3cold.allowed) {
1057 		err = pci_enable_device(pdev);
1058 		if (err)
1059 			return err;
1060 
1061 		pci_set_master(pdev);
1062 	}
1063 
1064 	return xe_pm_runtime_resume(xe);
1065 }
1066 
1067 static int xe_pci_runtime_idle(struct device *dev)
1068 {
1069 	struct pci_dev *pdev = to_pci_dev(dev);
1070 	struct xe_device *xe = pdev_to_xe_device(pdev);
1071 
1072 	xe_pm_d3cold_allowed_toggle(xe);
1073 
1074 	return 0;
1075 }
1076 
1077 static const struct dev_pm_ops xe_pm_ops = {
1078 	SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
1079 	SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
1080 };
1081 #endif
1082 
1083 static struct pci_driver xe_pci_driver = {
1084 	.name = DRIVER_NAME,
1085 	.id_table = pciidlist,
1086 	.probe = xe_pci_probe,
1087 	.remove = xe_pci_remove,
1088 	.shutdown = xe_pci_shutdown,
1089 	.sriov_configure = xe_pci_sriov_configure,
1090 #ifdef CONFIG_PM_SLEEP
1091 	.driver.pm = &xe_pm_ops,
1092 #endif
1093 };
1094 
1095 int xe_register_pci_driver(void)
1096 {
1097 	return pci_register_driver(&xe_pci_driver);
1098 }
1099 
1100 void xe_unregister_pci_driver(void)
1101 {
1102 	pci_unregister_driver(&xe_pci_driver);
1103 }
1104 
1105 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1106 #include "tests/xe_pci.c"
1107 #endif
1108