xref: /linux/drivers/gpu/drm/xe/xe_pci.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_pci.h"
7 
8 #include <kunit/static_stub.h>
9 #include <linux/device/driver.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/pm_runtime.h>
13 
14 #include <drm/drm_color_mgmt.h>
15 #include <drm/drm_drv.h>
16 #include <drm/intel/pciids.h>
17 
18 #include "display/xe_display.h"
19 #include "regs/xe_gt_regs.h"
20 #include "xe_device.h"
21 #include "xe_drv.h"
22 #include "xe_gt.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_guc.h"
25 #include "xe_macros.h"
26 #include "xe_mmio.h"
27 #include "xe_module.h"
28 #include "xe_pci_sriov.h"
29 #include "xe_pci_types.h"
30 #include "xe_pm.h"
31 #include "xe_sriov.h"
32 #include "xe_step.h"
33 #include "xe_survivability_mode.h"
34 #include "xe_tile.h"
35 
36 enum toggle_d3cold {
37 	D3COLD_DISABLE,
38 	D3COLD_ENABLE,
39 };
40 
41 __diag_push();
42 __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
43 
44 #define PLATFORM(x)		\
45 	.platform = XE_##x,	\
46 	.platform_name = #x
47 
48 #define NOP(x)	x
49 
50 static const struct xe_graphics_desc graphics_xelp = {
51 	.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
52 
53 	.va_bits = 48,
54 	.vm_max_level = 3,
55 };
56 
57 #define XE_HP_FEATURES \
58 	.has_range_tlb_invalidation = true, \
59 	.va_bits = 48, \
60 	.vm_max_level = 3
61 
62 static const struct xe_graphics_desc graphics_xehpg = {
63 	.hw_engine_mask =
64 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
65 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
66 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
67 
68 	XE_HP_FEATURES,
69 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
70 
71 	.has_flat_ccs = 1,
72 };
73 
74 static const struct xe_graphics_desc graphics_xehpc = {
75 	.hw_engine_mask =
76 		BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
77 		BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
78 		BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
79 		BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
80 		BIT(XE_HW_ENGINE_BCS8) |
81 		BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
82 		BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
83 
84 	XE_HP_FEATURES,
85 	.va_bits = 57,
86 	.vm_max_level = 4,
87 	.vram_flags = XE_VRAM_FLAGS_NEED64K,
88 
89 	.has_asid = 1,
90 	.has_atomic_enable_pte_bit = 1,
91 	.has_usm = 1,
92 };
93 
94 static const struct xe_graphics_desc graphics_xelpg = {
95 	.hw_engine_mask =
96 		BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
97 		BIT(XE_HW_ENGINE_CCS0),
98 
99 	XE_HP_FEATURES,
100 };
101 
102 #define XE2_GFX_FEATURES \
103 	.has_asid = 1, \
104 	.has_atomic_enable_pte_bit = 1, \
105 	.has_flat_ccs = 1, \
106 	.has_range_tlb_invalidation = 1, \
107 	.has_usm = 1, \
108 	.has_64bit_timestamp = 1, \
109 	.va_bits = 48, \
110 	.vm_max_level = 4, \
111 	.hw_engine_mask = \
112 		BIT(XE_HW_ENGINE_RCS0) | \
113 		BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
114 		GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
115 
116 static const struct xe_graphics_desc graphics_xe2 = {
117 	XE2_GFX_FEATURES,
118 };
119 
120 static const struct xe_media_desc media_xem = {
121 	.hw_engine_mask =
122 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
123 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
124 };
125 
126 static const struct xe_media_desc media_xelpmp = {
127 	.hw_engine_mask =
128 		GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
129 		GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
130 		BIT(XE_HW_ENGINE_GSCCS0)
131 };
132 
133 /* Pre-GMDID Graphics IPs */
134 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp };
135 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp };
136 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg };
137 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc };
138 
139 /* GMDID-based Graphics IPs */
140 static const struct xe_ip graphics_ips[] = {
141 	{ 1270, "Xe_LPG", &graphics_xelpg },
142 	{ 1271, "Xe_LPG", &graphics_xelpg },
143 	{ 1274, "Xe_LPG+", &graphics_xelpg },
144 	{ 2001, "Xe2_HPG", &graphics_xe2 },
145 	{ 2002, "Xe2_HPG", &graphics_xe2 },
146 	{ 2004, "Xe2_LPG", &graphics_xe2 },
147 	{ 3000, "Xe3_LPG", &graphics_xe2 },
148 	{ 3001, "Xe3_LPG", &graphics_xe2 },
149 	{ 3003, "Xe3_LPG", &graphics_xe2 },
150 };
151 
152 /* Pre-GMDID Media IPs */
153 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem };
154 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem };
155 
156 /* GMDID-based Media IPs */
157 static const struct xe_ip media_ips[] = {
158 	{ 1300, "Xe_LPM+", &media_xelpmp },
159 	{ 1301, "Xe2_HPM", &media_xelpmp },
160 	{ 2000, "Xe2_LPM", &media_xelpmp },
161 	{ 3000, "Xe3_LPM", &media_xelpmp },
162 	{ 3002, "Xe3_LPM", &media_xelpmp },
163 };
164 
165 static const struct xe_device_desc tgl_desc = {
166 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
167 	.pre_gmdid_media_ip = &media_ip_xem,
168 	PLATFORM(TIGERLAKE),
169 	.dma_mask_size = 39,
170 	.has_display = true,
171 	.has_llc = true,
172 	.max_gt_per_tile = 1,
173 	.require_force_probe = true,
174 };
175 
176 static const struct xe_device_desc rkl_desc = {
177 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
178 	.pre_gmdid_media_ip = &media_ip_xem,
179 	PLATFORM(ROCKETLAKE),
180 	.dma_mask_size = 39,
181 	.has_display = true,
182 	.has_llc = true,
183 	.max_gt_per_tile = 1,
184 	.require_force_probe = true,
185 };
186 
187 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
188 
189 static const struct xe_device_desc adl_s_desc = {
190 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
191 	.pre_gmdid_media_ip = &media_ip_xem,
192 	PLATFORM(ALDERLAKE_S),
193 	.dma_mask_size = 39,
194 	.has_display = true,
195 	.has_llc = true,
196 	.max_gt_per_tile = 1,
197 	.require_force_probe = true,
198 	.subplatforms = (const struct xe_subplatform_desc[]) {
199 		{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
200 		{},
201 	},
202 };
203 
204 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
205 
206 static const struct xe_device_desc adl_p_desc = {
207 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
208 	.pre_gmdid_media_ip = &media_ip_xem,
209 	PLATFORM(ALDERLAKE_P),
210 	.dma_mask_size = 39,
211 	.has_display = true,
212 	.has_llc = true,
213 	.max_gt_per_tile = 1,
214 	.require_force_probe = true,
215 	.subplatforms = (const struct xe_subplatform_desc[]) {
216 		{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
217 		{},
218 	},
219 };
220 
221 static const struct xe_device_desc adl_n_desc = {
222 	.pre_gmdid_graphics_ip = &graphics_ip_xelp,
223 	.pre_gmdid_media_ip = &media_ip_xem,
224 	PLATFORM(ALDERLAKE_N),
225 	.dma_mask_size = 39,
226 	.has_display = true,
227 	.has_llc = true,
228 	.max_gt_per_tile = 1,
229 	.require_force_probe = true,
230 };
231 
232 #define DGFX_FEATURES \
233 	.is_dgfx = 1
234 
235 static const struct xe_device_desc dg1_desc = {
236 	.pre_gmdid_graphics_ip = &graphics_ip_xelpp,
237 	.pre_gmdid_media_ip = &media_ip_xem,
238 	DGFX_FEATURES,
239 	PLATFORM(DG1),
240 	.dma_mask_size = 39,
241 	.has_display = true,
242 	.has_gsc_nvm = 1,
243 	.has_heci_gscfi = 1,
244 	.max_gt_per_tile = 1,
245 	.require_force_probe = true,
246 };
247 
248 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
249 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
250 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
251 
252 #define DG2_FEATURES \
253 	DGFX_FEATURES, \
254 	PLATFORM(DG2), \
255 	.has_gsc_nvm = 1, \
256 	.has_heci_gscfi = 1, \
257 	.subplatforms = (const struct xe_subplatform_desc[]) { \
258 		{ XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
259 		{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
260 		{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
261 		{ } \
262 	}
263 
264 static const struct xe_device_desc ats_m_desc = {
265 	.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
266 	.pre_gmdid_media_ip = &media_ip_xehpm,
267 	.dma_mask_size = 46,
268 	.max_gt_per_tile = 1,
269 	.require_force_probe = true,
270 
271 	DG2_FEATURES,
272 	.has_display = false,
273 };
274 
275 static const struct xe_device_desc dg2_desc = {
276 	.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
277 	.pre_gmdid_media_ip = &media_ip_xehpm,
278 	.dma_mask_size = 46,
279 	.max_gt_per_tile = 1,
280 	.require_force_probe = true,
281 
282 	DG2_FEATURES,
283 	.has_display = true,
284 	.has_fan_control = true,
285 	.has_mbx_power_limits = false,
286 };
287 
288 static const __maybe_unused struct xe_device_desc pvc_desc = {
289 	.pre_gmdid_graphics_ip = &graphics_ip_xehpc,
290 	DGFX_FEATURES,
291 	PLATFORM(PVC),
292 	.dma_mask_size = 52,
293 	.has_display = false,
294 	.has_gsc_nvm = 1,
295 	.has_heci_gscfi = 1,
296 	.max_gt_per_tile = 1,
297 	.max_remote_tiles = 1,
298 	.require_force_probe = true,
299 	.has_mbx_power_limits = false,
300 };
301 
302 static const struct xe_device_desc mtl_desc = {
303 	/* .graphics and .media determined via GMD_ID */
304 	.require_force_probe = true,
305 	PLATFORM(METEORLAKE),
306 	.dma_mask_size = 46,
307 	.has_display = true,
308 	.has_pxp = true,
309 	.max_gt_per_tile = 2,
310 };
311 
312 static const struct xe_device_desc lnl_desc = {
313 	PLATFORM(LUNARLAKE),
314 	.dma_mask_size = 46,
315 	.has_display = true,
316 	.has_pxp = true,
317 	.max_gt_per_tile = 2,
318 	.needs_scratch = true,
319 };
320 
321 static const struct xe_device_desc bmg_desc = {
322 	DGFX_FEATURES,
323 	PLATFORM(BATTLEMAGE),
324 	.dma_mask_size = 46,
325 	.has_display = true,
326 	.has_fan_control = true,
327 	.has_mbx_power_limits = true,
328 	.has_gsc_nvm = 1,
329 	.has_heci_cscfi = 1,
330 	.has_sriov = true,
331 	.max_gt_per_tile = 2,
332 	.needs_scratch = true,
333 };
334 
335 static const struct xe_device_desc ptl_desc = {
336 	PLATFORM(PANTHERLAKE),
337 	.dma_mask_size = 46,
338 	.has_display = true,
339 	.has_sriov = true,
340 	.max_gt_per_tile = 2,
341 	.needs_scratch = true,
342 };
343 
344 #undef PLATFORM
345 __diag_pop();
346 
347 /*
348  * Make sure any device matches here are from most specific to most
349  * general.  For example, since the Quanta match is based on the subsystem
350  * and subvendor IDs, we need it to come before the more general IVB
351  * PCI ID matches, otherwise we'll use the wrong info struct above.
352  */
353 static const struct pci_device_id pciidlist[] = {
354 	INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
355 	INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
356 	INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
357 	INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
358 	INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
359 	INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
360 	INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
361 	INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
362 	INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
363 	INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
364 	INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
365 	INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
366 	INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
367 	INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
368 	INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
369 	INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
370 	{ }
371 };
372 MODULE_DEVICE_TABLE(pci, pciidlist);
373 
374 /* is device_id present in comma separated list of ids */
device_id_in_list(u16 device_id,const char * devices,bool negative)375 static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
376 {
377 	char *s, *p, *tok;
378 	bool ret;
379 
380 	if (!devices || !*devices)
381 		return false;
382 
383 	/* match everything */
384 	if (negative && strcmp(devices, "!*") == 0)
385 		return true;
386 	if (!negative && strcmp(devices, "*") == 0)
387 		return true;
388 
389 	s = kstrdup(devices, GFP_KERNEL);
390 	if (!s)
391 		return false;
392 
393 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
394 		u16 val;
395 
396 		if (negative && tok[0] == '!')
397 			tok++;
398 		else if ((negative && tok[0] != '!') ||
399 			 (!negative && tok[0] == '!'))
400 			continue;
401 
402 		if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
403 			ret = true;
404 			break;
405 		}
406 	}
407 
408 	kfree(s);
409 
410 	return ret;
411 }
412 
id_forced(u16 device_id)413 static bool id_forced(u16 device_id)
414 {
415 	return device_id_in_list(device_id, xe_modparam.force_probe, false);
416 }
417 
id_blocked(u16 device_id)418 static bool id_blocked(u16 device_id)
419 {
420 	return device_id_in_list(device_id, xe_modparam.force_probe, true);
421 }
422 
423 static const struct xe_subplatform_desc *
find_subplatform(const struct xe_device * xe,const struct xe_device_desc * desc)424 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
425 {
426 	const struct xe_subplatform_desc *sp;
427 	const u16 *id;
428 
429 	for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
430 		for (id = sp->pciidlist; *id; id++)
431 			if (*id == xe->info.devid)
432 				return sp;
433 
434 	return NULL;
435 }
436 
437 enum xe_gmdid_type {
438 	GMDID_GRAPHICS,
439 	GMDID_MEDIA
440 };
441 
read_gmdid(struct xe_device * xe,enum xe_gmdid_type type,u32 * ver,u32 * revid)442 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
443 {
444 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
445 	struct xe_reg gmdid_reg = GMD_ID;
446 	u32 val;
447 
448 	KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
449 
450 	if (IS_SRIOV_VF(xe)) {
451 		struct xe_gt *gt = xe_root_mmio_gt(xe);
452 
453 		/*
454 		 * To get the value of the GMDID register, VFs must obtain it
455 		 * from the GuC using MMIO communication.
456 		 *
457 		 * Note that at this point the xe_gt is not fully uninitialized
458 		 * and only basic access to MMIO registers is possible. To use
459 		 * our existing GuC communication functions we must perform at
460 		 * least basic xe_gt and xe_guc initialization.
461 		 *
462 		 * Since to obtain the value of GMDID_MEDIA we need to use the
463 		 * media GuC, temporarily tweak the gt type.
464 		 */
465 		xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
466 
467 		if (type == GMDID_MEDIA) {
468 			gt->info.id = 1;
469 			gt->info.type = XE_GT_TYPE_MEDIA;
470 		} else {
471 			gt->info.id = 0;
472 			gt->info.type = XE_GT_TYPE_MAIN;
473 		}
474 
475 		xe_gt_mmio_init(gt);
476 		xe_guc_comm_init_early(&gt->uc.guc);
477 
478 		/* Don't bother with GMDID if failed to negotiate the GuC ABI */
479 		val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt);
480 
481 		/*
482 		 * Only undo xe_gt.info here, the remaining changes made above
483 		 * will be overwritten as part of the regular initialization.
484 		 */
485 		gt->info.id = 0;
486 		gt->info.type = XE_GT_TYPE_UNINITIALIZED;
487 	} else {
488 		/*
489 		 * GMD_ID is a GT register, but at this point in the driver
490 		 * init we haven't fully initialized the GT yet so we need to
491 		 * read the register with the tile's MMIO accessor.  That means
492 		 * we need to apply the GSI offset manually since it won't get
493 		 * automatically added as it would if we were using a GT mmio
494 		 * accessor.
495 		 */
496 		if (type == GMDID_MEDIA)
497 			gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
498 
499 		val = xe_mmio_read32(mmio, gmdid_reg);
500 	}
501 
502 	*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
503 	*revid = REG_FIELD_GET(GMD_ID_REVID, val);
504 }
505 
506 /*
507  * Read IP version from hardware and select graphics/media IP descriptors
508  * based on the result.
509  */
handle_gmdid(struct xe_device * xe,const struct xe_ip ** graphics_ip,const struct xe_ip ** media_ip,u32 * graphics_revid,u32 * media_revid)510 static void handle_gmdid(struct xe_device *xe,
511 			 const struct xe_ip **graphics_ip,
512 			 const struct xe_ip **media_ip,
513 			 u32 *graphics_revid,
514 			 u32 *media_revid)
515 {
516 	u32 ver;
517 
518 	*graphics_ip = NULL;
519 	*media_ip = NULL;
520 
521 	read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
522 
523 	for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
524 		if (ver == graphics_ips[i].verx100) {
525 			*graphics_ip = &graphics_ips[i];
526 
527 			break;
528 		}
529 	}
530 
531 	if (!*graphics_ip) {
532 		drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
533 			ver / 100, ver % 100);
534 	}
535 
536 	read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
537 	/* Media may legitimately be fused off / not present */
538 	if (ver == 0)
539 		return;
540 
541 	for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
542 		if (ver == media_ips[i].verx100) {
543 			*media_ip = &media_ips[i];
544 
545 			break;
546 		}
547 	}
548 
549 	if (!*media_ip) {
550 		drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
551 			ver / 100, ver % 100);
552 	}
553 }
554 
555 /*
556  * Initialize device info content that only depends on static driver_data
557  * passed to the driver at probe time from PCI ID table.
558  */
xe_info_init_early(struct xe_device * xe,const struct xe_device_desc * desc,const struct xe_subplatform_desc * subplatform_desc)559 static int xe_info_init_early(struct xe_device *xe,
560 			      const struct xe_device_desc *desc,
561 			      const struct xe_subplatform_desc *subplatform_desc)
562 {
563 	int err;
564 
565 	xe->info.platform_name = desc->platform_name;
566 	xe->info.platform = desc->platform;
567 	xe->info.subplatform = subplatform_desc ?
568 		subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
569 
570 	xe->info.dma_mask_size = desc->dma_mask_size;
571 	xe->info.is_dgfx = desc->is_dgfx;
572 	xe->info.has_fan_control = desc->has_fan_control;
573 	xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
574 	xe->info.has_gsc_nvm = desc->has_gsc_nvm;
575 	xe->info.has_heci_gscfi = desc->has_heci_gscfi;
576 	xe->info.has_heci_cscfi = desc->has_heci_cscfi;
577 	xe->info.has_llc = desc->has_llc;
578 	xe->info.has_pxp = desc->has_pxp;
579 	xe->info.has_sriov = desc->has_sriov;
580 	xe->info.skip_guc_pc = desc->skip_guc_pc;
581 	xe->info.skip_mtcfg = desc->skip_mtcfg;
582 	xe->info.skip_pcode = desc->skip_pcode;
583 	xe->info.needs_scratch = desc->needs_scratch;
584 
585 	xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
586 				 xe_modparam.probe_display &&
587 				 desc->has_display;
588 
589 	xe_assert(xe, desc->max_gt_per_tile > 0);
590 	xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE);
591 	xe->info.max_gt_per_tile = desc->max_gt_per_tile;
592 	xe->info.tile_count = 1 + desc->max_remote_tiles;
593 
594 	err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
595 	if (err)
596 		return err;
597 
598 	return 0;
599 }
600 
601 /*
602  * Initialize device info content that does require knowledge about
603  * graphics / media IP version.
604  * Make sure that GT / tile structures allocated by the driver match the data
605  * present in device info.
606  */
xe_info_init(struct xe_device * xe,const struct xe_device_desc * desc)607 static int xe_info_init(struct xe_device *xe,
608 			const struct xe_device_desc *desc)
609 {
610 	u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
611 	const struct xe_ip *graphics_ip;
612 	const struct xe_ip *media_ip;
613 	const struct xe_graphics_desc *graphics_desc;
614 	const struct xe_media_desc *media_desc;
615 	struct xe_tile *tile;
616 	struct xe_gt *gt;
617 	u8 id;
618 
619 	/*
620 	 * If this platform supports GMD_ID, we'll detect the proper IP
621 	 * descriptor to use from hardware registers.
622 	 * desc->pre_gmdid_graphics_ip will only ever be set at this point for
623 	 * platforms before GMD_ID. In that case the IP descriptions and
624 	 * versions are simply derived from that.
625 	 */
626 	if (desc->pre_gmdid_graphics_ip) {
627 		graphics_ip = desc->pre_gmdid_graphics_ip;
628 		media_ip = desc->pre_gmdid_media_ip;
629 		xe->info.step = xe_step_pre_gmdid_get(xe);
630 	} else {
631 		xe_assert(xe, !desc->pre_gmdid_media_ip);
632 		handle_gmdid(xe, &graphics_ip, &media_ip,
633 			     &graphics_gmdid_revid, &media_gmdid_revid);
634 		xe->info.step = xe_step_gmdid_get(xe,
635 						  graphics_gmdid_revid,
636 						  media_gmdid_revid);
637 	}
638 
639 	/*
640 	 * If we couldn't detect the graphics IP, that's considered a fatal
641 	 * error and we should abort driver load.  Failing to detect media
642 	 * IP is non-fatal; we'll just proceed without enabling media support.
643 	 */
644 	if (!graphics_ip)
645 		return -ENODEV;
646 
647 	xe->info.graphics_verx100 = graphics_ip->verx100;
648 	xe->info.graphics_name = graphics_ip->name;
649 	graphics_desc = graphics_ip->desc;
650 
651 	if (media_ip) {
652 		xe->info.media_verx100 = media_ip->verx100;
653 		xe->info.media_name = media_ip->name;
654 		media_desc = media_ip->desc;
655 	} else {
656 		xe->info.media_name = "none";
657 		media_desc = NULL;
658 	}
659 
660 	xe->info.vram_flags = graphics_desc->vram_flags;
661 	xe->info.va_bits = graphics_desc->va_bits;
662 	xe->info.vm_max_level = graphics_desc->vm_max_level;
663 	xe->info.has_asid = graphics_desc->has_asid;
664 	xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
665 	if (xe->info.platform != XE_PVC)
666 		xe->info.has_device_atomics_on_smem = 1;
667 
668 	/* Runtime detection may change this later */
669 	xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
670 
671 	xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
672 	xe->info.has_usm = graphics_desc->has_usm;
673 	xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
674 
675 	for_each_remote_tile(tile, xe, id) {
676 		int err;
677 
678 		err = xe_tile_init_early(tile, xe, id);
679 		if (err)
680 			return err;
681 	}
682 
683 	/*
684 	 * All platforms have at least one primary GT.  Any platform with media
685 	 * version 13 or higher has an additional dedicated media GT.  And
686 	 * depending on the graphics IP there may be additional "remote tiles."
687 	 * All of these together determine the overall GT count.
688 	 */
689 	for_each_tile(tile, xe, id) {
690 		gt = tile->primary_gt;
691 		gt->info.type = XE_GT_TYPE_MAIN;
692 		gt->info.id = tile->id * xe->info.max_gt_per_tile;
693 		gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
694 		gt->info.engine_mask = graphics_desc->hw_engine_mask;
695 		xe->info.gt_count++;
696 
697 		if (MEDIA_VER(xe) < 13 && media_desc)
698 			gt->info.engine_mask |= media_desc->hw_engine_mask;
699 
700 		if (MEDIA_VER(xe) < 13 || !media_desc)
701 			continue;
702 
703 		/*
704 		 * Allocate and setup media GT for platforms with standalone
705 		 * media.
706 		 */
707 		tile->media_gt = xe_gt_alloc(tile);
708 		if (IS_ERR(tile->media_gt))
709 			return PTR_ERR(tile->media_gt);
710 
711 		gt = tile->media_gt;
712 		gt->info.type = XE_GT_TYPE_MEDIA;
713 		gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
714 		gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
715 		gt->info.engine_mask = media_desc->hw_engine_mask;
716 		xe->info.gt_count++;
717 	}
718 
719 	return 0;
720 }
721 
xe_pci_remove(struct pci_dev * pdev)722 static void xe_pci_remove(struct pci_dev *pdev)
723 {
724 	struct xe_device *xe = pdev_to_xe_device(pdev);
725 
726 	if (IS_SRIOV_PF(xe))
727 		xe_pci_sriov_configure(pdev, 0);
728 
729 	if (xe_survivability_mode_is_enabled(xe))
730 		return;
731 
732 	xe_device_remove(xe);
733 	xe_pm_fini(xe);
734 }
735 
736 /*
737  * Probe the PCI device, initialize various parts of the driver.
738  *
739  * Fault injection is used to test the error paths of some initialization
740  * functions called either directly from xe_pci_probe() or indirectly for
741  * example through xe_device_probe(). Those functions use the kernel fault
742  * injection capabilities infrastructure, see
743  * Documentation/fault-injection/fault-injection.rst for details. The macro
744  * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution
745  * at runtime and use a provided return value. The first requirement for
746  * error injectable functions is proper handling of the error code by the
747  * caller for recovery, which is always the case here. The second
748  * requirement is that no state is changed before the first error return.
749  * It is not strictly fulfilled for all initialization functions using the
750  * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
751  * error cases at probe time, the error code is simply propagated up by the
752  * caller. Therefore there is no consequence on those specific callers when
753  * function error injection skips the whole function.
754  */
xe_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)755 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
756 {
757 	const struct xe_device_desc *desc = (const void *)ent->driver_data;
758 	const struct xe_subplatform_desc *subplatform_desc;
759 	struct xe_device *xe;
760 	int err;
761 
762 	if (desc->require_force_probe && !id_forced(pdev->device)) {
763 		dev_info(&pdev->dev,
764 			 "Your graphics device %04x is not officially supported\n"
765 			 "by xe driver in this kernel version. To force Xe probe,\n"
766 			 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
767 			 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
768 			 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
769 			 pdev->device, pdev->device, pdev->device,
770 			 pdev->device, pdev->device);
771 		return -ENODEV;
772 	}
773 
774 	if (id_blocked(pdev->device)) {
775 		dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
776 			 pdev->vendor, pdev->device);
777 		return -ENODEV;
778 	}
779 
780 	if (xe_display_driver_probe_defer(pdev))
781 		return -EPROBE_DEFER;
782 
783 	err = pcim_enable_device(pdev);
784 	if (err)
785 		return err;
786 
787 	xe = xe_device_create(pdev, ent);
788 	if (IS_ERR(xe))
789 		return PTR_ERR(xe);
790 
791 	pci_set_drvdata(pdev, &xe->drm);
792 
793 	xe_pm_assert_unbounded_bridge(xe);
794 	subplatform_desc = find_subplatform(xe, desc);
795 
796 	pci_set_master(pdev);
797 
798 	err = xe_info_init_early(xe, desc, subplatform_desc);
799 	if (err)
800 		return err;
801 
802 	err = xe_device_probe_early(xe);
803 	/*
804 	 * In Boot Survivability mode, no drm card is exposed and driver
805 	 * is loaded with bare minimum to allow for firmware to be
806 	 * flashed through mei. Return success, if survivability mode
807 	 * is enabled due to pcode failure or configfs being set
808 	 */
809 	if (xe_survivability_mode_is_enabled(xe))
810 		return 0;
811 
812 	if (err)
813 		return err;
814 
815 	err = xe_info_init(xe, desc);
816 	if (err)
817 		return err;
818 
819 	err = xe_display_probe(xe);
820 	if (err)
821 		return err;
822 
823 	drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
824 		desc->platform_name,
825 		subplatform_desc ? subplatform_desc->name : "",
826 		xe->info.devid, xe->info.revid,
827 		xe->info.is_dgfx,
828 		xe->info.graphics_name,
829 		xe->info.graphics_verx100 / 100,
830 		xe->info.graphics_verx100 % 100,
831 		xe->info.media_name,
832 		xe->info.media_verx100 / 100,
833 		xe->info.media_verx100 % 100,
834 		str_yes_no(xe->info.probe_display),
835 		xe->info.dma_mask_size, xe->info.tile_count,
836 		xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
837 
838 	drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n",
839 		xe_step_name(xe->info.step.graphics),
840 		xe_step_name(xe->info.step.media),
841 		xe_step_name(xe->info.step.basedie));
842 
843 	drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
844 		str_yes_no(xe_device_has_sriov(xe)),
845 		xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
846 
847 	err = xe_pm_init_early(xe);
848 	if (err)
849 		return err;
850 
851 	err = xe_device_probe(xe);
852 	if (err)
853 		return err;
854 
855 	err = xe_pm_init(xe);
856 	if (err)
857 		goto err_driver_cleanup;
858 
859 	drm_dbg(&xe->drm, "d3cold: capable=%s\n",
860 		str_yes_no(xe->d3cold.capable));
861 
862 	return 0;
863 
864 err_driver_cleanup:
865 	xe_pci_remove(pdev);
866 	return err;
867 }
868 
xe_pci_shutdown(struct pci_dev * pdev)869 static void xe_pci_shutdown(struct pci_dev *pdev)
870 {
871 	xe_device_shutdown(pdev_to_xe_device(pdev));
872 }
873 
874 #ifdef CONFIG_PM_SLEEP
d3cold_toggle(struct pci_dev * pdev,enum toggle_d3cold toggle)875 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
876 {
877 	struct xe_device *xe = pdev_to_xe_device(pdev);
878 	struct pci_dev *root_pdev;
879 
880 	if (!xe->d3cold.capable)
881 		return;
882 
883 	root_pdev = pcie_find_root_port(pdev);
884 	if (!root_pdev)
885 		return;
886 
887 	switch (toggle) {
888 	case D3COLD_DISABLE:
889 		pci_d3cold_disable(root_pdev);
890 		break;
891 	case D3COLD_ENABLE:
892 		pci_d3cold_enable(root_pdev);
893 		break;
894 	}
895 }
896 
xe_pci_suspend(struct device * dev)897 static int xe_pci_suspend(struct device *dev)
898 {
899 	struct pci_dev *pdev = to_pci_dev(dev);
900 	struct xe_device *xe = pdev_to_xe_device(pdev);
901 	int err;
902 
903 	if (xe_survivability_mode_is_enabled(xe))
904 		return -EBUSY;
905 
906 	err = xe_pm_suspend(xe);
907 	if (err)
908 		return err;
909 
910 	/*
911 	 * Enabling D3Cold is needed for S2Idle/S0ix.
912 	 * It is save to allow here since xe_pm_suspend has evicted
913 	 * the local memory and the direct complete optimization is disabled.
914 	 */
915 	d3cold_toggle(pdev, D3COLD_ENABLE);
916 
917 	pci_save_state(pdev);
918 	pci_disable_device(pdev);
919 	pci_set_power_state(pdev, PCI_D3cold);
920 
921 	return 0;
922 }
923 
xe_pci_resume(struct device * dev)924 static int xe_pci_resume(struct device *dev)
925 {
926 	struct pci_dev *pdev = to_pci_dev(dev);
927 	int err;
928 
929 	/* Give back the D3Cold decision to the runtime P M*/
930 	d3cold_toggle(pdev, D3COLD_DISABLE);
931 
932 	err = pci_set_power_state(pdev, PCI_D0);
933 	if (err)
934 		return err;
935 
936 	pci_restore_state(pdev);
937 
938 	err = pci_enable_device(pdev);
939 	if (err)
940 		return err;
941 
942 	pci_set_master(pdev);
943 
944 	err = xe_pm_resume(pdev_to_xe_device(pdev));
945 	if (err)
946 		return err;
947 
948 	return 0;
949 }
950 
xe_pci_runtime_suspend(struct device * dev)951 static int xe_pci_runtime_suspend(struct device *dev)
952 {
953 	struct pci_dev *pdev = to_pci_dev(dev);
954 	struct xe_device *xe = pdev_to_xe_device(pdev);
955 	int err;
956 
957 	err = xe_pm_runtime_suspend(xe);
958 	if (err)
959 		return err;
960 
961 	pci_save_state(pdev);
962 
963 	if (xe->d3cold.allowed) {
964 		d3cold_toggle(pdev, D3COLD_ENABLE);
965 		pci_disable_device(pdev);
966 		pci_ignore_hotplug(pdev);
967 		pci_set_power_state(pdev, PCI_D3cold);
968 	} else {
969 		d3cold_toggle(pdev, D3COLD_DISABLE);
970 		pci_set_power_state(pdev, PCI_D3hot);
971 	}
972 
973 	return 0;
974 }
975 
xe_pci_runtime_resume(struct device * dev)976 static int xe_pci_runtime_resume(struct device *dev)
977 {
978 	struct pci_dev *pdev = to_pci_dev(dev);
979 	struct xe_device *xe = pdev_to_xe_device(pdev);
980 	int err;
981 
982 	err = pci_set_power_state(pdev, PCI_D0);
983 	if (err)
984 		return err;
985 
986 	pci_restore_state(pdev);
987 
988 	if (xe->d3cold.allowed) {
989 		err = pci_enable_device(pdev);
990 		if (err)
991 			return err;
992 
993 		pci_set_master(pdev);
994 	}
995 
996 	return xe_pm_runtime_resume(xe);
997 }
998 
xe_pci_runtime_idle(struct device * dev)999 static int xe_pci_runtime_idle(struct device *dev)
1000 {
1001 	struct pci_dev *pdev = to_pci_dev(dev);
1002 	struct xe_device *xe = pdev_to_xe_device(pdev);
1003 
1004 	xe_pm_d3cold_allowed_toggle(xe);
1005 
1006 	return 0;
1007 }
1008 
1009 static const struct dev_pm_ops xe_pm_ops = {
1010 	SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
1011 	SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
1012 };
1013 #endif
1014 
1015 static struct pci_driver xe_pci_driver = {
1016 	.name = DRIVER_NAME,
1017 	.id_table = pciidlist,
1018 	.probe = xe_pci_probe,
1019 	.remove = xe_pci_remove,
1020 	.shutdown = xe_pci_shutdown,
1021 	.sriov_configure = xe_pci_sriov_configure,
1022 #ifdef CONFIG_PM_SLEEP
1023 	.driver.pm = &xe_pm_ops,
1024 #endif
1025 };
1026 
xe_register_pci_driver(void)1027 int xe_register_pci_driver(void)
1028 {
1029 	return pci_register_driver(&xe_pci_driver);
1030 }
1031 
xe_unregister_pci_driver(void)1032 void xe_unregister_pci_driver(void)
1033 {
1034 	pci_unregister_driver(&xe_pci_driver);
1035 }
1036 
1037 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1038 #include "tests/xe_pci.c"
1039 #endif
1040