1*6ca1701cSAaron Kling // SPDX-License-Identifier: MIT 2*6ca1701cSAaron Kling #include <linux/clk.h> 3*6ca1701cSAaron Kling #include <linux/math64.h> 4*6ca1701cSAaron Kling #include <linux/platform_device.h> 5*6ca1701cSAaron Kling #include <linux/pm_opp.h> 6*6ca1701cSAaron Kling 7*6ca1701cSAaron Kling #include <drm/drm_managed.h> 8*6ca1701cSAaron Kling 9*6ca1701cSAaron Kling #include <subdev/clk.h> 10*6ca1701cSAaron Kling 11*6ca1701cSAaron Kling #include "nouveau_drv.h" 12*6ca1701cSAaron Kling #include "nouveau_chan.h" 13*6ca1701cSAaron Kling #include "priv.h" 14*6ca1701cSAaron Kling #include "gk20a_devfreq.h" 15*6ca1701cSAaron Kling #include "gk20a.h" 16*6ca1701cSAaron Kling #include "gp10b.h" 17*6ca1701cSAaron Kling 18*6ca1701cSAaron Kling #define PMU_BUSY_CYCLES_NORM_MAX 1000U 19*6ca1701cSAaron Kling 20*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNTER_TOTAL 0U 21*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNTER_BUSY 4U 22*6ca1701cSAaron Kling 23*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U 24*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNT_REG_SIZE 16U 25*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU 26*6ca1701cSAaron Kling #define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U) 27*6ca1701cSAaron Kling 28*6ca1701cSAaron Kling #define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U 29*6ca1701cSAaron Kling #define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U 30*6ca1701cSAaron Kling 31*6ca1701cSAaron Kling #define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU 32*6ca1701cSAaron Kling #define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U 33*6ca1701cSAaron Kling #define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U 34*6ca1701cSAaron Kling 35*6ca1701cSAaron Kling #define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U 36*6ca1701cSAaron Kling #define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U 37*6ca1701cSAaron Kling #define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU 38*6ca1701cSAaron Kling 39*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU 40*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_REG_SIZE 16U 41*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U 42*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U 43*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U 44*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2) 45*6ca1701cSAaron Kling #define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U 46*6ca1701cSAaron Kling 47*6ca1701cSAaron Kling #define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U 48*6ca1701cSAaron Kling #define PWR_PMU_IDLE_MASK_REG_SIZE 16U 49*6ca1701cSAaron Kling #define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U 50*6ca1701cSAaron Kling #define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U 51*6ca1701cSAaron Kling 52*6ca1701cSAaron Kling /** 53*6ca1701cSAaron Kling * struct gk20a_devfreq - Device frequency management 54*6ca1701cSAaron Kling */ 55*6ca1701cSAaron Kling struct gk20a_devfreq { 56*6ca1701cSAaron Kling /** @devfreq: devfreq device. */ 57*6ca1701cSAaron Kling struct devfreq *devfreq; 58*6ca1701cSAaron Kling 59*6ca1701cSAaron Kling /** @regs: Device registers. */ 60*6ca1701cSAaron Kling void __iomem *regs; 61*6ca1701cSAaron Kling 62*6ca1701cSAaron Kling /** @gov_data: Governor data. */ 63*6ca1701cSAaron Kling struct devfreq_simple_ondemand_data gov_data; 64*6ca1701cSAaron Kling 65*6ca1701cSAaron Kling /** @busy_time: Busy time. */ 66*6ca1701cSAaron Kling ktime_t busy_time; 67*6ca1701cSAaron Kling 68*6ca1701cSAaron Kling /** @total_time: Total time. */ 69*6ca1701cSAaron Kling ktime_t total_time; 70*6ca1701cSAaron Kling 71*6ca1701cSAaron Kling /** @time_last_update: Last update time. */ 72*6ca1701cSAaron Kling ktime_t time_last_update; 73*6ca1701cSAaron Kling }; 74*6ca1701cSAaron Kling 75*6ca1701cSAaron Kling static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev) 76*6ca1701cSAaron Kling { 77*6ca1701cSAaron Kling struct nouveau_drm *drm = dev_get_drvdata(dev); 78*6ca1701cSAaron Kling struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 79*6ca1701cSAaron Kling struct nvkm_clk *base = nvkm_clk(subdev); 80*6ca1701cSAaron Kling 81*6ca1701cSAaron Kling switch (drm->nvkm->chipset) { 82*6ca1701cSAaron Kling case 0x13b: return gp10b_clk(base)->devfreq; break; 83*6ca1701cSAaron Kling default: return gk20a_clk(base)->devfreq; break; 84*6ca1701cSAaron Kling } 85*6ca1701cSAaron Kling } 86*6ca1701cSAaron Kling 87*6ca1701cSAaron Kling static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq) 88*6ca1701cSAaron Kling { 89*6ca1701cSAaron Kling u32 data; 90*6ca1701cSAaron Kling 91*6ca1701cSAaron Kling // Set pmu idle intr status bit on total counter overflow 92*6ca1701cSAaron Kling writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE, 93*6ca1701cSAaron Kling gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET); 94*6ca1701cSAaron Kling 95*6ca1701cSAaron Kling writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE, 96*6ca1701cSAaron Kling gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET + 97*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE)); 98*6ca1701cSAaron Kling 99*6ca1701cSAaron Kling // Setup counter for total cycles 100*6ca1701cSAaron Kling data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 101*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); 102*6ca1701cSAaron Kling data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); 103*6ca1701cSAaron Kling data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; 104*6ca1701cSAaron Kling writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 105*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); 106*6ca1701cSAaron Kling 107*6ca1701cSAaron Kling // Setup counter for busy cycles 108*6ca1701cSAaron Kling writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED, 109*6ca1701cSAaron Kling gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET + 110*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE)); 111*6ca1701cSAaron Kling 112*6ca1701cSAaron Kling data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 113*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); 114*6ca1701cSAaron Kling data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); 115*6ca1701cSAaron Kling data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; 116*6ca1701cSAaron Kling writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + 117*6ca1701cSAaron Kling (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); 118*6ca1701cSAaron Kling } 119*6ca1701cSAaron Kling 120*6ca1701cSAaron Kling static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) 121*6ca1701cSAaron Kling { 122*6ca1701cSAaron Kling u32 ret; 123*6ca1701cSAaron Kling 124*6ca1701cSAaron Kling ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + 125*6ca1701cSAaron Kling (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); 126*6ca1701cSAaron Kling 127*6ca1701cSAaron Kling return ret & PWR_PMU_IDLE_COUNT_MASK; 128*6ca1701cSAaron Kling } 129*6ca1701cSAaron Kling 130*6ca1701cSAaron Kling static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) 131*6ca1701cSAaron Kling { 132*6ca1701cSAaron Kling writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + 133*6ca1701cSAaron Kling (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); 134*6ca1701cSAaron Kling } 135*6ca1701cSAaron Kling 136*6ca1701cSAaron Kling static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq) 137*6ca1701cSAaron Kling { 138*6ca1701cSAaron Kling u32 ret; 139*6ca1701cSAaron Kling 140*6ca1701cSAaron Kling ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); 141*6ca1701cSAaron Kling 142*6ca1701cSAaron Kling return ret & PWR_PMU_IDLE_INTR_STATUS_MASK; 143*6ca1701cSAaron Kling } 144*6ca1701cSAaron Kling 145*6ca1701cSAaron Kling static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq) 146*6ca1701cSAaron Kling { 147*6ca1701cSAaron Kling writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE, 148*6ca1701cSAaron Kling gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); 149*6ca1701cSAaron Kling } 150*6ca1701cSAaron Kling 151*6ca1701cSAaron Kling static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq) 152*6ca1701cSAaron Kling { 153*6ca1701cSAaron Kling ktime_t now, last; 154*6ca1701cSAaron Kling u64 busy_cycles, total_cycles; 155*6ca1701cSAaron Kling u32 norm, intr_status; 156*6ca1701cSAaron Kling 157*6ca1701cSAaron Kling now = ktime_get(); 158*6ca1701cSAaron Kling last = gdevfreq->time_last_update; 159*6ca1701cSAaron Kling gdevfreq->total_time = ktime_us_delta(now, last); 160*6ca1701cSAaron Kling 161*6ca1701cSAaron Kling busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 162*6ca1701cSAaron Kling total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 163*6ca1701cSAaron Kling intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq); 164*6ca1701cSAaron Kling 165*6ca1701cSAaron Kling gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 166*6ca1701cSAaron Kling gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 167*6ca1701cSAaron Kling 168*6ca1701cSAaron Kling if (intr_status != 0UL) { 169*6ca1701cSAaron Kling norm = PMU_BUSY_CYCLES_NORM_MAX; 170*6ca1701cSAaron Kling gk20a_pmu_clear_idle_intr_status(gdevfreq); 171*6ca1701cSAaron Kling } else if (total_cycles == 0ULL || busy_cycles > total_cycles) { 172*6ca1701cSAaron Kling norm = PMU_BUSY_CYCLES_NORM_MAX; 173*6ca1701cSAaron Kling } else { 174*6ca1701cSAaron Kling norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX, 175*6ca1701cSAaron Kling total_cycles); 176*6ca1701cSAaron Kling } 177*6ca1701cSAaron Kling 178*6ca1701cSAaron Kling gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX); 179*6ca1701cSAaron Kling gdevfreq->time_last_update = now; 180*6ca1701cSAaron Kling } 181*6ca1701cSAaron Kling 182*6ca1701cSAaron Kling static int gk20a_devfreq_target(struct device *dev, unsigned long *freq, 183*6ca1701cSAaron Kling u32 flags) 184*6ca1701cSAaron Kling { 185*6ca1701cSAaron Kling struct nouveau_drm *drm = dev_get_drvdata(dev); 186*6ca1701cSAaron Kling struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 187*6ca1701cSAaron Kling struct nvkm_clk *base = nvkm_clk(subdev); 188*6ca1701cSAaron Kling struct nvkm_pstate *pstates = base->func->pstates; 189*6ca1701cSAaron Kling int nr_pstates = base->func->nr_pstates; 190*6ca1701cSAaron Kling int i, ret; 191*6ca1701cSAaron Kling 192*6ca1701cSAaron Kling for (i = 0; i < nr_pstates - 1; i++) 193*6ca1701cSAaron Kling if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq) 194*6ca1701cSAaron Kling break; 195*6ca1701cSAaron Kling 196*6ca1701cSAaron Kling ret = nvkm_clk_ustate(base, pstates[i].pstate, 0); 197*6ca1701cSAaron Kling ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1); 198*6ca1701cSAaron Kling if (ret) { 199*6ca1701cSAaron Kling nvkm_error(subdev, "cannot update clock\n"); 200*6ca1701cSAaron Kling return ret; 201*6ca1701cSAaron Kling } 202*6ca1701cSAaron Kling 203*6ca1701cSAaron Kling *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; 204*6ca1701cSAaron Kling 205*6ca1701cSAaron Kling return 0; 206*6ca1701cSAaron Kling } 207*6ca1701cSAaron Kling 208*6ca1701cSAaron Kling static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) 209*6ca1701cSAaron Kling { 210*6ca1701cSAaron Kling struct nouveau_drm *drm = dev_get_drvdata(dev); 211*6ca1701cSAaron Kling struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); 212*6ca1701cSAaron Kling struct nvkm_clk *base = nvkm_clk(subdev); 213*6ca1701cSAaron Kling 214*6ca1701cSAaron Kling *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; 215*6ca1701cSAaron Kling 216*6ca1701cSAaron Kling return 0; 217*6ca1701cSAaron Kling } 218*6ca1701cSAaron Kling 219*6ca1701cSAaron Kling static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq) 220*6ca1701cSAaron Kling { 221*6ca1701cSAaron Kling gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); 222*6ca1701cSAaron Kling gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); 223*6ca1701cSAaron Kling gk20a_pmu_clear_idle_intr_status(gdevfreq); 224*6ca1701cSAaron Kling 225*6ca1701cSAaron Kling gdevfreq->busy_time = 0; 226*6ca1701cSAaron Kling gdevfreq->total_time = 0; 227*6ca1701cSAaron Kling gdevfreq->time_last_update = ktime_get(); 228*6ca1701cSAaron Kling } 229*6ca1701cSAaron Kling 230*6ca1701cSAaron Kling static int gk20a_devfreq_get_dev_status(struct device *dev, 231*6ca1701cSAaron Kling struct devfreq_dev_status *status) 232*6ca1701cSAaron Kling { 233*6ca1701cSAaron Kling struct nouveau_drm *drm = dev_get_drvdata(dev); 234*6ca1701cSAaron Kling struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 235*6ca1701cSAaron Kling 236*6ca1701cSAaron Kling gk20a_devfreq_get_cur_freq(dev, &status->current_frequency); 237*6ca1701cSAaron Kling 238*6ca1701cSAaron Kling gk20a_devfreq_update_utilization(gdevfreq); 239*6ca1701cSAaron Kling 240*6ca1701cSAaron Kling status->busy_time = ktime_to_ns(gdevfreq->busy_time); 241*6ca1701cSAaron Kling status->total_time = ktime_to_ns(gdevfreq->total_time); 242*6ca1701cSAaron Kling 243*6ca1701cSAaron Kling gk20a_devfreq_reset(gdevfreq); 244*6ca1701cSAaron Kling 245*6ca1701cSAaron Kling NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n", 246*6ca1701cSAaron Kling status->busy_time, status->total_time, 247*6ca1701cSAaron Kling status->busy_time / (status->total_time / 100), 248*6ca1701cSAaron Kling status->current_frequency / 1000 / 1000); 249*6ca1701cSAaron Kling 250*6ca1701cSAaron Kling return 0; 251*6ca1701cSAaron Kling } 252*6ca1701cSAaron Kling 253*6ca1701cSAaron Kling static struct devfreq_dev_profile gk20a_devfreq_profile = { 254*6ca1701cSAaron Kling .timer = DEVFREQ_TIMER_DELAYED, 255*6ca1701cSAaron Kling .polling_ms = 50, 256*6ca1701cSAaron Kling .target = gk20a_devfreq_target, 257*6ca1701cSAaron Kling .get_cur_freq = gk20a_devfreq_get_cur_freq, 258*6ca1701cSAaron Kling .get_dev_status = gk20a_devfreq_get_dev_status, 259*6ca1701cSAaron Kling }; 260*6ca1701cSAaron Kling 261*6ca1701cSAaron Kling int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq) 262*6ca1701cSAaron Kling { 263*6ca1701cSAaron Kling struct nvkm_device *device = base->subdev.device; 264*6ca1701cSAaron Kling struct nouveau_drm *drm = dev_get_drvdata(device->dev); 265*6ca1701cSAaron Kling struct nvkm_device_tegra *tdev = device->func->tegra(device); 266*6ca1701cSAaron Kling struct nvkm_pstate *pstates = base->func->pstates; 267*6ca1701cSAaron Kling int nr_pstates = base->func->nr_pstates; 268*6ca1701cSAaron Kling struct gk20a_devfreq *new_gdevfreq; 269*6ca1701cSAaron Kling int i; 270*6ca1701cSAaron Kling 271*6ca1701cSAaron Kling new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL); 272*6ca1701cSAaron Kling if (!new_gdevfreq) 273*6ca1701cSAaron Kling return -ENOMEM; 274*6ca1701cSAaron Kling 275*6ca1701cSAaron Kling new_gdevfreq->regs = tdev->regs; 276*6ca1701cSAaron Kling 277*6ca1701cSAaron Kling for (i = 0; i < nr_pstates; i++) 278*6ca1701cSAaron Kling dev_pm_opp_add(base->subdev.device->dev, 279*6ca1701cSAaron Kling pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0); 280*6ca1701cSAaron Kling 281*6ca1701cSAaron Kling gk20a_pmu_init_perfmon_counter(new_gdevfreq); 282*6ca1701cSAaron Kling gk20a_devfreq_reset(new_gdevfreq); 283*6ca1701cSAaron Kling 284*6ca1701cSAaron Kling gk20a_devfreq_profile.initial_freq = 285*6ca1701cSAaron Kling nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; 286*6ca1701cSAaron Kling 287*6ca1701cSAaron Kling new_gdevfreq->gov_data.upthreshold = 45; 288*6ca1701cSAaron Kling new_gdevfreq->gov_data.downdifferential = 5; 289*6ca1701cSAaron Kling 290*6ca1701cSAaron Kling new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev, 291*6ca1701cSAaron Kling &gk20a_devfreq_profile, 292*6ca1701cSAaron Kling DEVFREQ_GOV_SIMPLE_ONDEMAND, 293*6ca1701cSAaron Kling &new_gdevfreq->gov_data); 294*6ca1701cSAaron Kling if (IS_ERR(new_gdevfreq->devfreq)) 295*6ca1701cSAaron Kling return PTR_ERR(new_gdevfreq->devfreq); 296*6ca1701cSAaron Kling 297*6ca1701cSAaron Kling *gdevfreq = new_gdevfreq; 298*6ca1701cSAaron Kling 299*6ca1701cSAaron Kling return 0; 300*6ca1701cSAaron Kling } 301*6ca1701cSAaron Kling 302*6ca1701cSAaron Kling int gk20a_devfreq_resume(struct device *dev) 303*6ca1701cSAaron Kling { 304*6ca1701cSAaron Kling struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 305*6ca1701cSAaron Kling 306*6ca1701cSAaron Kling if (!gdevfreq || !gdevfreq->devfreq) 307*6ca1701cSAaron Kling return 0; 308*6ca1701cSAaron Kling 309*6ca1701cSAaron Kling return devfreq_resume_device(gdevfreq->devfreq); 310*6ca1701cSAaron Kling } 311*6ca1701cSAaron Kling 312*6ca1701cSAaron Kling int gk20a_devfreq_suspend(struct device *dev) 313*6ca1701cSAaron Kling { 314*6ca1701cSAaron Kling struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); 315*6ca1701cSAaron Kling 316*6ca1701cSAaron Kling if (!gdevfreq || !gdevfreq->devfreq) 317*6ca1701cSAaron Kling return 0; 318*6ca1701cSAaron Kling 319*6ca1701cSAaron Kling return devfreq_suspend_device(gdevfreq->devfreq); 320*6ca1701cSAaron Kling } 321