xref: /linux/drivers/gpu/drm/xe/xe_gt_ccs_mode.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_gt_regs.h"
9 #include "xe_assert.h"
10 #include "xe_gt.h"
11 #include "xe_gt_ccs_mode.h"
12 #include "xe_gt_printk.h"
13 #include "xe_gt_sysfs.h"
14 #include "xe_mmio.h"
15 #include "xe_pm.h"
16 #include "xe_sriov.h"
17 #include "xe_sriov_pf.h"
18 
19 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
20 {
21 	u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
22 	int num_slices = hweight32(CCS_INSTANCES(gt));
23 	struct xe_device *xe = gt_to_xe(gt);
24 	int width, cslice = 0;
25 	u32 config = 0;
26 
27 	xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
28 
29 	xe_assert(xe, num_engines && num_engines <= num_slices);
30 	xe_assert(xe, !(num_slices % num_engines));
31 
32 	/*
33 	 * Loop over all available slices and assign each a user engine.
34 	 * For example, if there are four compute slices available, the
35 	 * assignment of compute slices to compute engines would be,
36 	 *
37 	 * With 1 engine (ccs0):
38 	 *   slice 0, 1, 2, 3: ccs0
39 	 *
40 	 * With 2 engines (ccs0, ccs1):
41 	 *   slice 0, 2: ccs0
42 	 *   slice 1, 3: ccs1
43 	 *
44 	 * With 4 engines (ccs0, ccs1, ccs2, ccs3):
45 	 *   slice 0: ccs0
46 	 *   slice 1: ccs1
47 	 *   slice 2: ccs2
48 	 *   slice 3: ccs3
49 	 */
50 	for (width = num_slices / num_engines; width; width--) {
51 		struct xe_hw_engine *hwe;
52 		enum xe_hw_engine_id id;
53 
54 		for_each_hw_engine(hwe, gt, id) {
55 			if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
56 				continue;
57 
58 			if (hwe->logical_instance >= num_engines)
59 				break;
60 
61 			config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
62 
63 			/* If a slice is fused off, leave disabled */
64 			while ((CCS_INSTANCES(gt) & BIT(cslice)) == 0)
65 				cslice++;
66 
67 			mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
68 			mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
69 			cslice++;
70 		}
71 	}
72 
73 	/*
74 	 * Mask bits need to be set for the register. Though only Xe2+
75 	 * platforms require setting of mask bits, it won't harm for older
76 	 * platforms as these bits are unused there.
77 	 */
78 	mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
79 	xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
80 
81 	xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
82 		  mode, config, num_engines, num_slices);
83 }
84 
85 void xe_gt_apply_ccs_mode(struct xe_gt *gt)
86 {
87 	if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
88 		return;
89 
90 	__xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
91 }
92 
93 static bool gt_ccs_mode_default(struct xe_gt *gt)
94 {
95 	return gt->ccs_mode == 1;
96 }
97 
98 static ssize_t
99 num_cslices_show(struct device *kdev,
100 		 struct device_attribute *attr, char *buf)
101 {
102 	struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
103 
104 	return sysfs_emit(buf, "%u\n", hweight32(CCS_INSTANCES(gt)));
105 }
106 
107 static DEVICE_ATTR_RO(num_cslices);
108 
109 static ssize_t
110 ccs_mode_show(struct device *kdev,
111 	      struct device_attribute *attr, char *buf)
112 {
113 	struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
114 
115 	return sysfs_emit(buf, "%u\n", gt->ccs_mode);
116 }
117 
118 static ssize_t
119 ccs_mode_store(struct device *kdev, struct device_attribute *attr,
120 	       const char *buff, size_t count)
121 {
122 	struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
123 	struct xe_device *xe = gt_to_xe(gt);
124 	u32 num_engines, num_slices;
125 	int ret;
126 
127 	ret = kstrtou32(buff, 0, &num_engines);
128 	if (ret)
129 		return ret;
130 
131 	/*
132 	 * Ensure number of engines specified is valid and there is an
133 	 * exact multiple of engines for slices.
134 	 */
135 	num_slices = hweight32(CCS_INSTANCES(gt));
136 	if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
137 		xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
138 			  num_engines, num_slices);
139 		return -EINVAL;
140 	}
141 
142 	/* CCS mode can only be updated when there are no drm clients */
143 	guard(mutex)(&xe->drm.filelist_mutex);
144 	if (!list_empty(&xe->drm.filelist)) {
145 		xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
146 		return -EBUSY;
147 	}
148 
149 	if (gt->ccs_mode == num_engines)
150 		return count;
151 
152 	/*
153 	 * Changing default CCS mode is only allowed when there
154 	 * are no VFs. Try to lockdown PF to find out.
155 	 */
156 	if (gt_ccs_mode_default(gt) && IS_SRIOV_PF(xe)) {
157 		ret = xe_sriov_pf_lockdown(xe);
158 		if (ret) {
159 			xe_gt_dbg(gt, "Can't change CCS Mode: VFs are enabled\n");
160 			return ret;
161 		}
162 	}
163 
164 	xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
165 	gt->ccs_mode = num_engines;
166 	xe_gt_record_user_engines(gt);
167 	guard(xe_pm_runtime)(xe);
168 	xe_gt_reset(gt);
169 
170 	/* We may end PF lockdown once CCS mode is default again */
171 	if (gt_ccs_mode_default(gt) && IS_SRIOV_PF(xe))
172 		xe_sriov_pf_end_lockdown(xe);
173 
174 	return count;
175 }
176 
177 static DEVICE_ATTR_RW(ccs_mode);
178 
179 static const struct attribute *gt_ccs_mode_attrs[] = {
180 	&dev_attr_ccs_mode.attr,
181 	&dev_attr_num_cslices.attr,
182 	NULL,
183 };
184 
185 static void xe_gt_ccs_mode_sysfs_fini(void *arg)
186 {
187 	struct xe_gt *gt = arg;
188 
189 	sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
190 }
191 
192 /**
193  * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
194  * @gt: GT structure
195  *
196  * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
197  * number of compute hardware engines to which the available compute slices
198  * are to be allocated. This user configuration change triggers a gt reset
199  * and it is expected that there are no open drm clients while doing so.
200  * The number of available compute slices is exposed to user through a per-gt
201  * 'num_cslices' sysfs interface.
202  *
203  * Returns: Returns error value for failure and 0 for success.
204  */
205 int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
206 {
207 	struct xe_device *xe = gt_to_xe(gt);
208 	int err;
209 
210 	if (!xe_gt_ccs_mode_enabled(gt) || IS_SRIOV_VF(xe))
211 		return 0;
212 
213 	err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
214 	if (err)
215 		return err;
216 
217 	return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
218 }
219