1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include "regs/xe_gt_regs.h"
9 #include "xe_assert.h"
10 #include "xe_gt.h"
11 #include "xe_gt_ccs_mode.h"
12 #include "xe_gt_printk.h"
13 #include "xe_gt_sysfs.h"
14 #include "xe_mmio.h"
15 #include "xe_sriov.h"
16
__xe_gt_apply_ccs_mode(struct xe_gt * gt,u32 num_engines)17 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
18 {
19 u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
20 int num_slices = hweight32(CCS_MASK(gt));
21 struct xe_device *xe = gt_to_xe(gt);
22 int width, cslice = 0;
23 u32 config = 0;
24
25 xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
26
27 xe_assert(xe, num_engines && num_engines <= num_slices);
28 xe_assert(xe, !(num_slices % num_engines));
29
30 /*
31 * Loop over all available slices and assign each a user engine.
32 * For example, if there are four compute slices available, the
33 * assignment of compute slices to compute engines would be,
34 *
35 * With 1 engine (ccs0):
36 * slice 0, 1, 2, 3: ccs0
37 *
38 * With 2 engines (ccs0, ccs1):
39 * slice 0, 2: ccs0
40 * slice 1, 3: ccs1
41 *
42 * With 4 engines (ccs0, ccs1, ccs2, ccs3):
43 * slice 0: ccs0
44 * slice 1: ccs1
45 * slice 2: ccs2
46 * slice 3: ccs3
47 */
48 for (width = num_slices / num_engines; width; width--) {
49 struct xe_hw_engine *hwe;
50 enum xe_hw_engine_id id;
51
52 for_each_hw_engine(hwe, gt, id) {
53 if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
54 continue;
55
56 if (hwe->logical_instance >= num_engines)
57 break;
58
59 config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
60
61 /* If a slice is fused off, leave disabled */
62 while ((CCS_MASK(gt) & BIT(cslice)) == 0)
63 cslice++;
64
65 mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
66 mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
67 cslice++;
68 }
69 }
70
71 xe_mmio_write32(gt, CCS_MODE, mode);
72
73 xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
74 mode, config, num_engines, num_slices);
75 }
76
xe_gt_apply_ccs_mode(struct xe_gt * gt)77 void xe_gt_apply_ccs_mode(struct xe_gt *gt)
78 {
79 if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt)))
80 return;
81
82 __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
83 }
84
85 static ssize_t
num_cslices_show(struct device * kdev,struct device_attribute * attr,char * buf)86 num_cslices_show(struct device *kdev,
87 struct device_attribute *attr, char *buf)
88 {
89 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
90
91 return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
92 }
93
94 static DEVICE_ATTR_RO(num_cslices);
95
96 static ssize_t
ccs_mode_show(struct device * kdev,struct device_attribute * attr,char * buf)97 ccs_mode_show(struct device *kdev,
98 struct device_attribute *attr, char *buf)
99 {
100 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
101
102 return sysfs_emit(buf, "%u\n", gt->ccs_mode);
103 }
104
105 static ssize_t
ccs_mode_store(struct device * kdev,struct device_attribute * attr,const char * buff,size_t count)106 ccs_mode_store(struct device *kdev, struct device_attribute *attr,
107 const char *buff, size_t count)
108 {
109 struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
110 struct xe_device *xe = gt_to_xe(gt);
111 u32 num_engines, num_slices;
112 int ret;
113
114 if (IS_SRIOV(xe)) {
115 xe_gt_dbg(gt, "Can't change compute mode when running as %s\n",
116 xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
117 return -EOPNOTSUPP;
118 }
119
120 ret = kstrtou32(buff, 0, &num_engines);
121 if (ret)
122 return ret;
123
124 /*
125 * Ensure number of engines specified is valid and there is an
126 * exact multiple of engines for slices.
127 */
128 num_slices = hweight32(CCS_MASK(gt));
129 if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
130 xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
131 num_engines, num_slices);
132 return -EINVAL;
133 }
134
135 /* CCS mode can only be updated when there are no drm clients */
136 spin_lock(&xe->clients.lock);
137 if (xe->clients.count) {
138 spin_unlock(&xe->clients.lock);
139 return -EBUSY;
140 }
141
142 if (gt->ccs_mode != num_engines) {
143 xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
144 gt->ccs_mode = num_engines;
145 xe_gt_record_user_engines(gt);
146 xe_gt_reset_async(gt);
147 }
148
149 spin_unlock(&xe->clients.lock);
150
151 return count;
152 }
153
154 static DEVICE_ATTR_RW(ccs_mode);
155
156 static const struct attribute *gt_ccs_mode_attrs[] = {
157 &dev_attr_ccs_mode.attr,
158 &dev_attr_num_cslices.attr,
159 NULL,
160 };
161
xe_gt_ccs_mode_sysfs_fini(void * arg)162 static void xe_gt_ccs_mode_sysfs_fini(void *arg)
163 {
164 struct xe_gt *gt = arg;
165
166 sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
167 }
168
169 /**
170 * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
171 * @gt: GT structure
172 *
173 * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
174 * number of compute hardware engines to which the available compute slices
175 * are to be allocated. This user configuration change triggers a gt reset
176 * and it is expected that there are no open drm clients while doing so.
177 * The number of available compute slices is exposed to user through a per-gt
178 * 'num_cslices' sysfs interface.
179 *
180 * Returns: Returns error value for failure and 0 for success.
181 */
xe_gt_ccs_mode_sysfs_init(struct xe_gt * gt)182 int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
183 {
184 struct xe_device *xe = gt_to_xe(gt);
185 int err;
186
187 if (!xe_gt_ccs_mode_enabled(gt))
188 return 0;
189
190 err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
191 if (err)
192 return err;
193
194 return devm_add_action_or_reset(xe->drm.dev, xe_gt_ccs_mode_sysfs_fini, gt);
195 }
196