xref: /linux/drivers/gpu/drm/xe/xe_gt_topology.c (revision 08df80a3c51674ab73ae770885a383ca553fbbbf)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt_topology.h"
7 
8 #include <linux/bitmap.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "xe_gt.h"
12 #include "xe_mmio.h"
13 
14 #define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
15 #define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
16 
17 static void
18 load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
19 {
20 	va_list argp;
21 	u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
22 	int i;
23 
24 	if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
25 		numregs = XE_MAX_DSS_FUSE_REGS;
26 
27 	va_start(argp, numregs);
28 	for (i = 0; i < numregs; i++)
29 		fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
30 	va_end(argp);
31 
32 	bitmap_from_arr32(mask, fuse_val, numregs * 32);
33 }
34 
35 static void
36 load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
37 {
38 	struct xe_device *xe = gt_to_xe(gt);
39 	u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
40 	u32 val = 0;
41 	int i;
42 
43 	BUILD_BUG_ON(XE_MAX_EU_FUSE_REGS > 1);
44 
45 	/*
46 	 * Pre-Xe_HP platforms inverted the bit meaning (disable instead
47 	 * of enable).
48 	 */
49 	if (GRAPHICS_VERx100(xe) < 1250)
50 		reg_val = ~reg_val & XELP_EU_MASK;
51 
52 	/* On PVC, one bit = one EU */
53 	if (GRAPHICS_VERx100(xe) == 1260) {
54 		val = reg_val;
55 	} else {
56 		/* All other platforms, one bit = 2 EU */
57 		for (i = 0; i < fls(reg_val); i++)
58 			if (reg_val & BIT(i))
59 				val |= 0x3 << 2 * i;
60 	}
61 
62 	bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
63 }
64 
65 static void
66 get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
67 {
68 	if (GRAPHICS_VER(xe) > 20) {
69 		*geometry_regs = 3;
70 		*compute_regs = 3;
71 	} else if (GRAPHICS_VERx100(xe) == 1260) {
72 		*geometry_regs = 0;
73 		*compute_regs = 2;
74 	} else if (GRAPHICS_VERx100(xe) >= 1250) {
75 		*geometry_regs = 1;
76 		*compute_regs = 1;
77 	} else {
78 		*geometry_regs = 1;
79 		*compute_regs = 0;
80 	}
81 }
82 
83 void
84 xe_gt_topology_init(struct xe_gt *gt)
85 {
86 	struct xe_device *xe = gt_to_xe(gt);
87 	struct drm_printer p = drm_debug_printer("GT topology");
88 	int num_geometry_regs, num_compute_regs;
89 
90 	get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
91 
92 	/*
93 	 * Register counts returned shouldn't exceed the number of registers
94 	 * passed as parameters below.
95 	 */
96 	drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
97 	drm_WARN_ON(&xe->drm, num_compute_regs > 3);
98 
99 	load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
100 		      num_geometry_regs,
101 		      XELP_GT_GEOMETRY_DSS_ENABLE,
102 		      XE2_GT_GEOMETRY_DSS_1,
103 		      XE2_GT_GEOMETRY_DSS_2);
104 	load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
105 		      XEHP_GT_COMPUTE_DSS_ENABLE,
106 		      XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
107 		      XE2_GT_COMPUTE_DSS_2);
108 	load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
109 
110 	xe_gt_topology_dump(gt, &p);
111 }
112 
113 void
114 xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
115 {
116 	drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS,
117 		   gt->fuse_topo.g_dss_mask);
118 	drm_printf(p, "dss mask (compute):  %*pb\n", XE_MAX_DSS_FUSE_BITS,
119 		   gt->fuse_topo.c_dss_mask);
120 
121 	drm_printf(p, "EU mask per DSS:     %*pb\n", XE_MAX_EU_FUSE_BITS,
122 		   gt->fuse_topo.eu_mask_per_dss);
123 
124 }
125 
126 /*
127  * Used to obtain the index of the first DSS.  Can start searching from the
128  * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
129  * groupsize and groupnum are non-zero.
130  */
131 unsigned int
132 xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
133 {
134 	return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
135 }
136 
137 bool xe_dss_mask_empty(const xe_dss_mask_t mask)
138 {
139 	return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
140 }
141 
142 /**
143  * xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
144  * @gt: GT to check
145  * @quad: Which quadrant of the DSS space to check
146  *
147  * Since Xe_HP platforms can have up to four CCS engines, those engines
148  * are each logically associated with a quarter of the possible DSS.  If there
149  * are no DSS present in one of the four quadrants of the DSS space, the
150  * corresponding CCS engine is also not available for use.
151  *
152  * Returns false if all DSS in a quadrant of the GT are fused off, else true.
153  */
154 bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad)
155 {
156 	struct xe_device *xe = gt_to_xe(gt);
157 	xe_dss_mask_t all_dss;
158 	int g_dss_regs, c_dss_regs, dss_per_quad, quad_first;
159 
160 	bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
161 		  XE_MAX_DSS_FUSE_BITS);
162 
163 	get_num_dss_regs(xe, &g_dss_regs, &c_dss_regs);
164 	dss_per_quad = 32 * max(g_dss_regs, c_dss_regs) / 4;
165 
166 	quad_first = xe_dss_mask_group_ffs(all_dss, dss_per_quad, quad);
167 
168 	return quad_first < (quad + 1) * dss_per_quad;
169 }
170