xref: /linux/drivers/gpu/drm/i915/gt/intel_sseu.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_SSEU_H__
7 #define __INTEL_SSEU_H__
8 
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 
12 #include "i915_gem.h"
13 
14 struct drm_i915_private;
15 struct intel_gt;
16 struct drm_printer;
17 
18 /*
19  * Maximum number of slices on older platforms.  Slices no longer exist
20  * starting on Xe_HP ("gslices," "cslices," etc. are a different concept and
21  * are not expressed through fusing).
22  */
23 #define GEN_MAX_HSW_SLICES		3
24 
25 /*
26  * Maximum number of subslices that can exist within a HSW-style slice.  This
27  * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
28  * I915_MAX_SS_FUSE_BITS value below).
29  */
30 #define GEN_MAX_SS_PER_HSW_SLICE	8
31 
32 /*
33  * Maximum number of 32-bit registers used by hardware to express the
34  * enabled/disabled subslices.
35  */
36 #define I915_MAX_SS_FUSE_REGS	2
37 #define I915_MAX_SS_FUSE_BITS	(I915_MAX_SS_FUSE_REGS * 32)
38 
39 /* Maximum number of EUs that can exist within a subslice or DSS. */
40 #define GEN_MAX_EUS_PER_SS		16
41 
42 #define SSEU_MAX(a, b)			((a) > (b) ? (a) : (b))
43 
44 /* The maximum number of bits needed to express each subslice/DSS independently */
45 #define GEN_SS_MASK_SIZE		SSEU_MAX(I915_MAX_SS_FUSE_BITS, \
46 						 GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
47 
48 #define GEN_SSEU_STRIDE(max_entries)	DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
49 #define GEN_MAX_SUBSLICE_STRIDE		GEN_SSEU_STRIDE(GEN_SS_MASK_SIZE)
50 #define GEN_MAX_EU_STRIDE		GEN_SSEU_STRIDE(GEN_MAX_EUS_PER_SS)
51 
52 #define GEN_DSS_PER_GSLICE	4
53 #define GEN_DSS_PER_CSLICE	8
54 #define GEN_DSS_PER_MSLICE	8
55 
56 #define GEN_MAX_GSLICES		(I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_GSLICE)
57 #define GEN_MAX_CSLICES		(I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_CSLICE)
58 
59 typedef union {
60 	u8 hsw[GEN_MAX_HSW_SLICES];
61 
62 	/* Bitmap compatible with linux/bitmap.h; may exceed size of u64 */
63 	unsigned long xehp[BITS_TO_LONGS(I915_MAX_SS_FUSE_BITS)];
64 } intel_sseu_ss_mask_t;
65 
66 #define XEHP_BITMAP_BITS(mask)	((int)BITS_PER_TYPE(typeof(mask.xehp)))
67 
68 struct sseu_dev_info {
69 	u8 slice_mask;
70 	intel_sseu_ss_mask_t subslice_mask;
71 	intel_sseu_ss_mask_t geometry_subslice_mask;
72 	intel_sseu_ss_mask_t compute_subslice_mask;
73 	union {
74 		u16 hsw[GEN_MAX_HSW_SLICES][GEN_MAX_SS_PER_HSW_SLICE];
75 		u16 xehp[I915_MAX_SS_FUSE_BITS];
76 	} eu_mask;
77 
78 	u16 eu_total;
79 	u8 eu_per_subslice;
80 	u8 min_eu_in_pool;
81 	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
82 	u8 subslice_7eu[3];
83 	u8 has_slice_pg:1;
84 	u8 has_subslice_pg:1;
85 	u8 has_eu_pg:1;
86 	/*
87 	 * For Xe_HP and beyond, the hardware no longer has traditional slices
88 	 * so we just report the entire DSS pool under a fake "slice 0."
89 	 */
90 	u8 has_xehp_dss:1;
91 
92 	/* Topology fields */
93 	u8 max_slices;
94 	u8 max_subslices;
95 	u8 max_eus_per_subslice;
96 };
97 
98 /*
99  * Powergating configuration for a particular (context,engine).
100  */
101 struct intel_sseu {
102 	u8 slice_mask;
103 	u8 subslice_mask;
104 	u8 min_eus_per_subslice;
105 	u8 max_eus_per_subslice;
106 };
107 
108 static inline struct intel_sseu
109 intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
110 {
111 	struct intel_sseu value = {
112 		.slice_mask = sseu->slice_mask,
113 		.subslice_mask = sseu->subslice_mask.hsw[0],
114 		.min_eus_per_subslice = sseu->max_eus_per_subslice,
115 		.max_eus_per_subslice = sseu->max_eus_per_subslice,
116 	};
117 
118 	return value;
119 }
120 
121 static inline bool
122 intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
123 			int subslice)
124 {
125 	if (slice >= sseu->max_slices ||
126 	    subslice >= sseu->max_subslices)
127 		return false;
128 
129 	if (sseu->has_xehp_dss)
130 		return test_bit(subslice, sseu->subslice_mask.xehp);
131 	else
132 		return sseu->subslice_mask.hsw[slice] & BIT(subslice);
133 }
134 
135 /*
136  * Used to obtain the index of the first DSS.  Can start searching from the
137  * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
138  * groupsize and groupnum are non-zero.
139  */
140 static inline unsigned int
141 intel_sseu_find_first_xehp_dss(const struct sseu_dev_info *sseu, int groupsize,
142 			       int groupnum)
143 {
144 	return find_next_bit(sseu->subslice_mask.xehp,
145 			     XEHP_BITMAP_BITS(sseu->subslice_mask),
146 			     groupnum * groupsize);
147 }
148 
149 void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
150 			 u8 max_subslices, u8 max_eus_per_subslice);
151 
152 unsigned int
153 intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
154 
155 unsigned int
156 intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice);
157 
158 intel_sseu_ss_mask_t
159 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
160 
161 void intel_sseu_info_init(struct intel_gt *gt);
162 
163 u32 intel_sseu_make_rpcs(struct intel_gt *gt,
164 			 const struct intel_sseu *req_sseu);
165 
166 void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
167 void intel_sseu_print_topology(struct drm_i915_private *i915,
168 			       const struct sseu_dev_info *sseu,
169 			       struct drm_printer *p);
170 
171 u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask, int dss_per_slice);
172 
173 int intel_sseu_copy_eumask_to_user(void __user *to,
174 				   const struct sseu_dev_info *sseu);
175 int intel_sseu_copy_ssmask_to_user(void __user *to,
176 				   const struct sseu_dev_info *sseu);
177 
178 void intel_sseu_print_ss_info(const char *type,
179 			      const struct sseu_dev_info *sseu,
180 			      struct seq_file *m);
181 
182 #endif /* __INTEL_SSEU_H__ */
183