1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019 Collabora ltd. */ 3 4 #include <linux/clk.h> 5 #include <linux/devfreq.h> 6 #include <linux/devfreq_cooling.h> 7 #include <linux/nvmem-consumer.h> 8 #include <linux/platform_device.h> 9 #include <linux/pm_opp.h> 10 11 #include "panfrost_device.h" 12 #include "panfrost_devfreq.h" 13 14 static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq) 15 { 16 ktime_t now, last; 17 18 now = ktime_get(); 19 last = pfdevfreq->time_last_update; 20 21 if (pfdevfreq->busy_count > 0) 22 pfdevfreq->busy_time += ktime_sub(now, last); 23 else 24 pfdevfreq->idle_time += ktime_sub(now, last); 25 26 pfdevfreq->time_last_update = now; 27 } 28 29 static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, 30 u32 flags) 31 { 32 struct panfrost_device *ptdev = dev_get_drvdata(dev); 33 struct dev_pm_opp *opp; 34 int err; 35 36 opp = devfreq_recommended_opp(dev, freq, flags); 37 if (IS_ERR(opp)) 38 return PTR_ERR(opp); 39 dev_pm_opp_put(opp); 40 41 err = dev_pm_opp_set_rate(dev, *freq); 42 if (!err) 43 ptdev->pfdevfreq.current_frequency = *freq; 44 45 return err; 46 } 47 48 static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq) 49 { 50 pfdevfreq->busy_time = 0; 51 pfdevfreq->idle_time = 0; 52 pfdevfreq->time_last_update = ktime_get(); 53 } 54 55 static int panfrost_devfreq_get_dev_status(struct device *dev, 56 struct devfreq_dev_status *status) 57 { 58 struct panfrost_device *pfdev = dev_get_drvdata(dev); 59 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; 60 unsigned long irqflags; 61 62 status->current_frequency = clk_get_rate(pfdev->clock); 63 64 spin_lock_irqsave(&pfdevfreq->lock, irqflags); 65 66 panfrost_devfreq_update_utilization(pfdevfreq); 67 68 status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time, 69 pfdevfreq->idle_time)); 70 71 status->busy_time = ktime_to_ns(pfdevfreq->busy_time); 72 73 panfrost_devfreq_reset(pfdevfreq); 74 75 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); 76 77 dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", 78 status->busy_time, status->total_time, 79 status->busy_time / (status->total_time / 100), 80 status->current_frequency / 1000 / 1000); 81 82 return 0; 83 } 84 85 static struct devfreq_dev_profile panfrost_devfreq_profile = { 86 .timer = DEVFREQ_TIMER_DELAYED, 87 .polling_ms = 50, /* ~3 frames */ 88 .target = panfrost_devfreq_target, 89 .get_dev_status = panfrost_devfreq_get_dev_status, 90 }; 91 92 static int panfrost_read_speedbin(struct device *dev) 93 { 94 u32 val; 95 int ret; 96 97 ret = nvmem_cell_read_variable_le_u32(dev, "speed-bin", &val); 98 if (ret) { 99 /* 100 * -ENOENT means that this platform doesn't support speedbins 101 * as it didn't declare any speed-bin nvmem: in this case, we 102 * keep going without it; any other error means that we are 103 * supposed to read the bin value, but we failed doing so. 104 */ 105 if (ret != -ENOENT && ret != -EOPNOTSUPP) { 106 DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret); 107 return ret; 108 } 109 110 return 0; 111 } 112 DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val); 113 114 return devm_pm_opp_set_supported_hw(dev, &val, 1); 115 } 116 117 int panfrost_devfreq_init(struct panfrost_device *pfdev) 118 { 119 int ret; 120 struct dev_pm_opp *opp; 121 unsigned long cur_freq; 122 struct device *dev = &pfdev->pdev->dev; 123 struct devfreq *devfreq; 124 struct thermal_cooling_device *cooling; 125 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; 126 unsigned long freq = ULONG_MAX; 127 128 if (pfdev->comp->num_supplies > 1) { 129 /* 130 * GPUs with more than 1 supply require platform-specific handling: 131 * continue without devfreq 132 */ 133 DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n"); 134 return 0; 135 } 136 137 ret = panfrost_read_speedbin(dev); 138 if (ret) 139 return ret; 140 141 ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names); 142 if (ret) { 143 /* Continue if the optional regulator is missing */ 144 if (ret != -ENODEV) { 145 if (ret != -EPROBE_DEFER) 146 DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); 147 return ret; 148 } 149 } 150 151 ret = devm_pm_opp_of_add_table(dev); 152 if (ret) { 153 /* Optional, continue without devfreq */ 154 if (ret == -ENODEV) 155 ret = 0; 156 return ret; 157 } 158 pfdevfreq->opp_of_table_added = true; 159 160 spin_lock_init(&pfdevfreq->lock); 161 162 panfrost_devfreq_reset(pfdevfreq); 163 164 cur_freq = clk_get_rate(pfdev->clock); 165 166 opp = devfreq_recommended_opp(dev, &cur_freq, 0); 167 if (IS_ERR(opp)) 168 return PTR_ERR(opp); 169 170 panfrost_devfreq_profile.initial_freq = cur_freq; 171 172 /* 173 * We could wait until panfrost_devfreq_target() to set this value, but 174 * since the simple_ondemand governor works asynchronously, there's a 175 * chance by the time someone opens the device's fdinfo file, current 176 * frequency hasn't been updated yet, so let's just do an early set. 177 */ 178 pfdevfreq->current_frequency = cur_freq; 179 180 /* 181 * Set the recommend OPP this will enable and configure the regulator 182 * if any and will avoid a switch off by regulator_late_cleanup() 183 */ 184 ret = dev_pm_opp_set_opp(dev, opp); 185 if (ret) { 186 DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n"); 187 return ret; 188 } 189 190 /* Find the fastest defined rate */ 191 opp = dev_pm_opp_find_freq_floor(dev, &freq); 192 if (IS_ERR(opp)) 193 return PTR_ERR(opp); 194 pfdevfreq->fast_rate = freq; 195 196 dev_pm_opp_put(opp); 197 198 /* 199 * Setup default thresholds for the simple_ondemand governor. 200 * The values are chosen based on experiments. 201 */ 202 pfdevfreq->gov_data.upthreshold = 45; 203 pfdevfreq->gov_data.downdifferential = 5; 204 205 devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile, 206 DEVFREQ_GOV_SIMPLE_ONDEMAND, 207 &pfdevfreq->gov_data); 208 if (IS_ERR(devfreq)) { 209 DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n"); 210 return PTR_ERR(devfreq); 211 } 212 pfdevfreq->devfreq = devfreq; 213 214 cooling = devfreq_cooling_em_register(devfreq, NULL); 215 if (IS_ERR(cooling)) 216 DRM_DEV_INFO(dev, "Failed to register cooling device\n"); 217 else 218 pfdevfreq->cooling = cooling; 219 220 return 0; 221 } 222 223 void panfrost_devfreq_fini(struct panfrost_device *pfdev) 224 { 225 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; 226 227 if (pfdevfreq->cooling) { 228 devfreq_cooling_unregister(pfdevfreq->cooling); 229 pfdevfreq->cooling = NULL; 230 } 231 } 232 233 void panfrost_devfreq_resume(struct panfrost_device *pfdev) 234 { 235 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; 236 237 if (!pfdevfreq->devfreq) 238 return; 239 240 panfrost_devfreq_reset(pfdevfreq); 241 242 devfreq_resume_device(pfdevfreq->devfreq); 243 } 244 245 void panfrost_devfreq_suspend(struct panfrost_device *pfdev) 246 { 247 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; 248 249 if (!pfdevfreq->devfreq) 250 return; 251 252 devfreq_suspend_device(pfdevfreq->devfreq); 253 } 254 255 void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq) 256 { 257 unsigned long irqflags; 258 259 if (!pfdevfreq->devfreq) 260 return; 261 262 spin_lock_irqsave(&pfdevfreq->lock, irqflags); 263 264 panfrost_devfreq_update_utilization(pfdevfreq); 265 266 pfdevfreq->busy_count++; 267 268 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); 269 } 270 271 void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq) 272 { 273 unsigned long irqflags; 274 275 if (!pfdevfreq->devfreq) 276 return; 277 278 spin_lock_irqsave(&pfdevfreq->lock, irqflags); 279 280 panfrost_devfreq_update_utilization(pfdevfreq); 281 282 WARN_ON(--pfdevfreq->busy_count < 0); 283 284 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); 285 } 286