1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_managed.h> 7 8 #include "xe_force_wake.h" 9 #include "xe_device.h" 10 #include "xe_gt.h" 11 #include "xe_gt_idle.h" 12 #include "xe_gt_sysfs.h" 13 #include "xe_guc_pc.h" 14 #include "regs/xe_gt_regs.h" 15 #include "xe_macros.h" 16 #include "xe_mmio.h" 17 #include "xe_pm.h" 18 #include "xe_sriov.h" 19 20 /** 21 * DOC: Xe GT Idle 22 * 23 * Contains functions that init GT idle features like C6 24 * 25 * device/gt#/gtidle/name - name of the state 26 * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms 27 * device/gt#/gtidle/idle_status - Provides current idle state 28 */ 29 30 static struct xe_gt_idle *dev_to_gtidle(struct device *dev) 31 { 32 struct kobject *kobj = &dev->kobj; 33 34 return &kobj_to_gt(kobj->parent)->gtidle; 35 } 36 37 static struct xe_gt *gtidle_to_gt(struct xe_gt_idle *gtidle) 38 { 39 return container_of(gtidle, struct xe_gt, gtidle); 40 } 41 42 static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle) 43 { 44 return >idle_to_gt(gtidle)->uc.guc.pc; 45 } 46 47 static struct xe_device * 48 pc_to_xe(struct xe_guc_pc *pc) 49 { 50 struct xe_guc *guc = container_of(pc, struct xe_guc, pc); 51 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc); 52 53 return gt_to_xe(gt); 54 } 55 56 static const char *gt_idle_state_to_string(enum xe_gt_idle_state state) 57 { 58 switch (state) { 59 case GT_IDLE_C0: 60 return "gt-c0"; 61 case GT_IDLE_C6: 62 return "gt-c6"; 63 default: 64 return "unknown"; 65 } 66 } 67 68 static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency) 69 { 70 u64 delta, overflow_residency, prev_residency; 71 72 overflow_residency = BIT_ULL(32); 73 74 /* 75 * Counter wrap handling 76 * Store previous hw counter values for counter wrap-around handling 77 * Relying on sufficient frequency of queries otherwise counters can still wrap. 78 */ 79 prev_residency = gtidle->prev_residency; 80 gtidle->prev_residency = cur_residency; 81 82 /* delta */ 83 if (cur_residency >= prev_residency) 84 delta = cur_residency - prev_residency; 85 else 86 delta = cur_residency + (overflow_residency - prev_residency); 87 88 /* Add delta to extended raw driver copy of idle residency */ 89 cur_residency = gtidle->cur_residency + delta; 90 gtidle->cur_residency = cur_residency; 91 92 /* residency multiplier in ns, convert to ms */ 93 cur_residency = mul_u64_u32_div(cur_residency, gtidle->residency_multiplier, 1e6); 94 95 return cur_residency; 96 } 97 98 void xe_gt_idle_enable_pg(struct xe_gt *gt) 99 { 100 struct xe_device *xe = gt_to_xe(gt); 101 u32 pg_enable; 102 int i, j; 103 104 if (IS_SRIOV_VF(xe)) 105 return; 106 107 /* Disable CPG for PVC */ 108 if (xe->info.platform == XE_PVC) 109 return; 110 111 xe_device_assert_mem_access(gt_to_xe(gt)); 112 113 pg_enable = RENDER_POWERGATE_ENABLE | MEDIA_POWERGATE_ENABLE; 114 115 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) { 116 if ((gt->info.engine_mask & BIT(i))) 117 pg_enable |= (VDN_HCP_POWERGATE_ENABLE(j) | 118 VDN_MFXVDENC_POWERGATE_ENABLE(j)); 119 } 120 121 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); 122 if (xe->info.skip_guc_pc) { 123 /* 124 * GuC sets the hysteresis value when GuC PC is enabled 125 * else set it to 25 (25 * 1.28us) 126 */ 127 xe_mmio_write32(gt, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25); 128 xe_mmio_write32(gt, RENDER_POWERGATE_IDLE_HYSTERESIS, 25); 129 } 130 131 xe_mmio_write32(gt, POWERGATE_ENABLE, pg_enable); 132 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); 133 } 134 135 void xe_gt_idle_disable_pg(struct xe_gt *gt) 136 { 137 if (IS_SRIOV_VF(gt_to_xe(gt))) 138 return; 139 140 xe_device_assert_mem_access(gt_to_xe(gt)); 141 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); 142 143 xe_mmio_write32(gt, POWERGATE_ENABLE, 0); 144 145 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT)); 146 } 147 148 static ssize_t name_show(struct device *dev, 149 struct device_attribute *attr, char *buff) 150 { 151 struct xe_gt_idle *gtidle = dev_to_gtidle(dev); 152 struct xe_guc_pc *pc = gtidle_to_pc(gtidle); 153 ssize_t ret; 154 155 xe_pm_runtime_get(pc_to_xe(pc)); 156 ret = sysfs_emit(buff, "%s\n", gtidle->name); 157 xe_pm_runtime_put(pc_to_xe(pc)); 158 159 return ret; 160 } 161 static DEVICE_ATTR_RO(name); 162 163 static ssize_t idle_status_show(struct device *dev, 164 struct device_attribute *attr, char *buff) 165 { 166 struct xe_gt_idle *gtidle = dev_to_gtidle(dev); 167 struct xe_guc_pc *pc = gtidle_to_pc(gtidle); 168 enum xe_gt_idle_state state; 169 170 xe_pm_runtime_get(pc_to_xe(pc)); 171 state = gtidle->idle_status(pc); 172 xe_pm_runtime_put(pc_to_xe(pc)); 173 174 return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state)); 175 } 176 static DEVICE_ATTR_RO(idle_status); 177 178 static ssize_t idle_residency_ms_show(struct device *dev, 179 struct device_attribute *attr, char *buff) 180 { 181 struct xe_gt_idle *gtidle = dev_to_gtidle(dev); 182 struct xe_guc_pc *pc = gtidle_to_pc(gtidle); 183 u64 residency; 184 185 xe_pm_runtime_get(pc_to_xe(pc)); 186 residency = gtidle->idle_residency(pc); 187 xe_pm_runtime_put(pc_to_xe(pc)); 188 189 return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency)); 190 } 191 static DEVICE_ATTR_RO(idle_residency_ms); 192 193 static const struct attribute *gt_idle_attrs[] = { 194 &dev_attr_name.attr, 195 &dev_attr_idle_status.attr, 196 &dev_attr_idle_residency_ms.attr, 197 NULL, 198 }; 199 200 static void gt_idle_fini(void *arg) 201 { 202 struct kobject *kobj = arg; 203 struct xe_gt *gt = kobj_to_gt(kobj->parent); 204 205 xe_gt_idle_disable_pg(gt); 206 207 if (gt_to_xe(gt)->info.skip_guc_pc) { 208 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); 209 xe_gt_idle_disable_c6(gt); 210 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 211 } 212 213 sysfs_remove_files(kobj, gt_idle_attrs); 214 kobject_put(kobj); 215 } 216 217 int xe_gt_idle_init(struct xe_gt_idle *gtidle) 218 { 219 struct xe_gt *gt = gtidle_to_gt(gtidle); 220 struct xe_device *xe = gt_to_xe(gt); 221 struct kobject *kobj; 222 int err; 223 224 if (IS_SRIOV_VF(xe)) 225 return 0; 226 227 kobj = kobject_create_and_add("gtidle", gt->sysfs); 228 if (!kobj) 229 return -ENOMEM; 230 231 if (xe_gt_is_media_type(gt)) { 232 snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id); 233 gtidle->idle_residency = xe_guc_pc_mc6_residency; 234 } else { 235 snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-rc", gt->info.id); 236 gtidle->idle_residency = xe_guc_pc_rc6_residency; 237 } 238 239 /* Multiplier for Residency counter in units of 1.28us */ 240 gtidle->residency_multiplier = 1280; 241 gtidle->idle_status = xe_guc_pc_c_status; 242 243 err = sysfs_create_files(kobj, gt_idle_attrs); 244 if (err) { 245 kobject_put(kobj); 246 return err; 247 } 248 249 xe_gt_idle_enable_pg(gt); 250 251 return devm_add_action_or_reset(xe->drm.dev, gt_idle_fini, kobj); 252 } 253 254 void xe_gt_idle_enable_c6(struct xe_gt *gt) 255 { 256 xe_device_assert_mem_access(gt_to_xe(gt)); 257 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 258 259 if (IS_SRIOV_VF(gt_to_xe(gt))) 260 return; 261 262 /* Units of 1280 ns for a total of 5s */ 263 xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA); 264 /* Enable RC6 */ 265 xe_mmio_write32(gt, RC_CONTROL, 266 RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE); 267 } 268 269 void xe_gt_idle_disable_c6(struct xe_gt *gt) 270 { 271 xe_device_assert_mem_access(gt_to_xe(gt)); 272 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 273 274 if (IS_SRIOV_VF(gt_to_xe(gt))) 275 return; 276 277 xe_mmio_write32(gt, RC_CONTROL, 0); 278 xe_mmio_write32(gt, RC_STATE, 0); 279 } 280