1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_managed.h> 7 8 #include "regs/xe_gt_regs.h" 9 #include "xe_assert.h" 10 #include "xe_gt.h" 11 #include "xe_gt_ccs_mode.h" 12 #include "xe_gt_sysfs.h" 13 #include "xe_mmio.h" 14 15 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) 16 { 17 u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */ 18 int num_slices = hweight32(CCS_MASK(gt)); 19 struct xe_device *xe = gt_to_xe(gt); 20 int width, cslice = 0; 21 u32 config = 0; 22 23 xe_assert(xe, xe_gt_ccs_mode_enabled(gt)); 24 25 xe_assert(xe, num_engines && num_engines <= num_slices); 26 xe_assert(xe, !(num_slices % num_engines)); 27 28 /* 29 * Loop over all available slices and assign each a user engine. 30 * For example, if there are four compute slices available, the 31 * assignment of compute slices to compute engines would be, 32 * 33 * With 1 engine (ccs0): 34 * slice 0, 1, 2, 3: ccs0 35 * 36 * With 2 engines (ccs0, ccs1): 37 * slice 0, 2: ccs0 38 * slice 1, 3: ccs1 39 * 40 * With 4 engines (ccs0, ccs1, ccs2, ccs3): 41 * slice 0: ccs0 42 * slice 1: ccs1 43 * slice 2: ccs2 44 * slice 3: ccs3 45 */ 46 for (width = num_slices / num_engines; width; width--) { 47 struct xe_hw_engine *hwe; 48 enum xe_hw_engine_id id; 49 50 for_each_hw_engine(hwe, gt, id) { 51 if (hwe->class != XE_ENGINE_CLASS_COMPUTE) 52 continue; 53 54 if (hwe->logical_instance >= num_engines) 55 break; 56 57 config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0; 58 59 /* If a slice is fused off, leave disabled */ 60 while ((CCS_MASK(gt) & BIT(cslice)) == 0) 61 cslice++; 62 63 mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK); 64 mode |= CCS_MODE_CSLICE(cslice, hwe->instance); 65 cslice++; 66 } 67 } 68 69 xe_mmio_write32(gt, CCS_MODE, mode); 70 71 xe_gt_info(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", 72 mode, config, num_engines, num_slices); 73 } 74 75 void xe_gt_apply_ccs_mode(struct xe_gt *gt) 76 { 77 if (!gt->ccs_mode) 78 return; 79 80 __xe_gt_apply_ccs_mode(gt, gt->ccs_mode); 81 } 82 83 static ssize_t 84 num_cslices_show(struct device *kdev, 85 struct device_attribute *attr, char *buf) 86 { 87 struct xe_gt *gt = kobj_to_gt(&kdev->kobj); 88 89 return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt))); 90 } 91 92 static DEVICE_ATTR_RO(num_cslices); 93 94 static ssize_t 95 ccs_mode_show(struct device *kdev, 96 struct device_attribute *attr, char *buf) 97 { 98 struct xe_gt *gt = kobj_to_gt(&kdev->kobj); 99 100 return sysfs_emit(buf, "%u\n", gt->ccs_mode); 101 } 102 103 static ssize_t 104 ccs_mode_store(struct device *kdev, struct device_attribute *attr, 105 const char *buff, size_t count) 106 { 107 struct xe_gt *gt = kobj_to_gt(&kdev->kobj); 108 struct xe_device *xe = gt_to_xe(gt); 109 u32 num_engines, num_slices; 110 int ret; 111 112 ret = kstrtou32(buff, 0, &num_engines); 113 if (ret) 114 return ret; 115 116 /* 117 * Ensure number of engines specified is valid and there is an 118 * exact multiple of engines for slices. 119 */ 120 num_slices = hweight32(CCS_MASK(gt)); 121 if (!num_engines || num_engines > num_slices || num_slices % num_engines) { 122 xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n", 123 num_engines, num_slices); 124 return -EINVAL; 125 } 126 127 /* CCS mode can only be updated when there are no drm clients */ 128 spin_lock(&xe->clients.lock); 129 if (xe->clients.count) { 130 spin_unlock(&xe->clients.lock); 131 return -EBUSY; 132 } 133 134 if (gt->ccs_mode != num_engines) { 135 xe_gt_info(gt, "Setting compute mode to %d\n", num_engines); 136 gt->ccs_mode = num_engines; 137 xe_gt_reset_async(gt); 138 } 139 140 spin_unlock(&xe->clients.lock); 141 142 return count; 143 } 144 145 static DEVICE_ATTR_RW(ccs_mode); 146 147 static const struct attribute *gt_ccs_mode_attrs[] = { 148 &dev_attr_ccs_mode.attr, 149 &dev_attr_num_cslices.attr, 150 NULL, 151 }; 152 153 static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg) 154 { 155 struct xe_gt *gt = arg; 156 157 sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs); 158 } 159 160 /** 161 * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces 162 * @gt: GT structure 163 * 164 * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed 165 * number of compute hardware engines to which the available compute slices 166 * are to be allocated. This user configuration change triggers a gt reset 167 * and it is expected that there are no open drm clients while doing so. 168 * The number of available compute slices is exposed to user through a per-gt 169 * 'num_cslices' sysfs interface. 170 * 171 * Returns: Returns error value for failure and 0 for success. 172 */ 173 int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt) 174 { 175 struct xe_device *xe = gt_to_xe(gt); 176 int err; 177 178 if (!xe_gt_ccs_mode_enabled(gt)) 179 return 0; 180 181 err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs); 182 if (err) 183 return err; 184 185 return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt); 186 } 187