1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved. 4 * 5 * Author: 6 * Mikko Perttunen <mperttunen@nvidia.com> 7 * 8 * This software is licensed under the terms of the GNU General Public 9 * License version 2, as published by the Free Software Foundation, and 10 * may be copied, distributed, and modified under those terms. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 */ 18 19 #include <linux/debugfs.h> 20 #include <linux/bitops.h> 21 #include <linux/clk.h> 22 #include <linux/delay.h> 23 #include <linux/err.h> 24 #include <linux/interrupt.h> 25 #include <linux/io.h> 26 #include <linux/irq.h> 27 #include <linux/irqdomain.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/platform_device.h> 31 #include <linux/reset.h> 32 #include <linux/thermal.h> 33 34 #include <dt-bindings/thermal/tegra114-soctherm.h> 35 #include <dt-bindings/thermal/tegra124-soctherm.h> 36 37 #include "../thermal_core.h" 38 #include "soctherm.h" 39 40 #define SENSOR_CONFIG0 0 41 #define SENSOR_CONFIG0_STOP BIT(0) 42 #define SENSOR_CONFIG0_CPTR_OVER BIT(2) 43 #define SENSOR_CONFIG0_OVER BIT(3) 44 #define SENSOR_CONFIG0_TCALC_OVER BIT(4) 45 #define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8) 46 #define SENSOR_CONFIG0_TALL_SHIFT 8 47 48 #define SENSOR_CONFIG1 4 49 #define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff 50 #define SENSOR_CONFIG1_TSAMPLE_SHIFT 0 51 #define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15) 52 #define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15 53 #define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24) 54 #define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24 55 #define SENSOR_CONFIG1_TEMP_ENABLE BIT(31) 56 57 /* 58 * SENSOR_CONFIG2 is defined in soctherm.h 59 * because, it will be used by tegra_soctherm_fuse.c 60 */ 61 62 #define SENSOR_STATUS0 0xc 63 #define SENSOR_STATUS0_VALID_MASK BIT(31) 64 #define SENSOR_STATUS0_CAPTURE_MASK 0xffff 65 66 #define SENSOR_STATUS1 0x10 67 #define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31) 68 #define SENSOR_STATUS1_TEMP_MASK 0xffff 69 70 #define READBACK_VALUE_MASK 0xff00 71 #define READBACK_VALUE_SHIFT 8 72 #define READBACK_ADD_HALF BIT(7) 73 #define READBACK_NEGATE BIT(0) 74 75 /* 76 * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h 77 * because it will be used by tegraxxx_soctherm.c 78 */ 79 #define THERMCTL_LVL0_CPU0_EN_MASK BIT(8) 80 #define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5) 81 #define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1 82 #define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2 83 #define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3) 84 #define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1 85 #define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2 86 #define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2) 87 #define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3 88 89 #define THERMCTL_LVL0_UP_STATS 0x10 90 #define THERMCTL_LVL0_DN_STATS 0x14 91 92 #define THERMCTL_INTR_STATUS 0x84 93 94 #define TH_INTR_MD0_MASK BIT(25) 95 #define TH_INTR_MU0_MASK BIT(24) 96 #define TH_INTR_GD0_MASK BIT(17) 97 #define TH_INTR_GU0_MASK BIT(16) 98 #define TH_INTR_CD0_MASK BIT(9) 99 #define TH_INTR_CU0_MASK BIT(8) 100 #define TH_INTR_PD0_MASK BIT(1) 101 #define TH_INTR_PU0_MASK BIT(0) 102 #define TH_INTR_IGNORE_MASK 0xFCFCFCFC 103 104 #define THERMCTL_STATS_CTL 0x94 105 #define STATS_CTL_CLR_DN 0x8 106 #define STATS_CTL_EN_DN 0x4 107 #define STATS_CTL_CLR_UP 0x2 108 #define STATS_CTL_EN_UP 0x1 109 110 #define OC1_CFG 0x310 111 #define OC1_CFG_LONG_LATENCY_MASK BIT(6) 112 #define OC1_CFG_HW_RESTORE_MASK BIT(5) 113 #define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4) 114 #define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2) 115 #define OC1_CFG_ALARM_POLARITY_MASK BIT(1) 116 #define OC1_CFG_EN_THROTTLE_MASK BIT(0) 117 118 #define OC1_CNT_THRESHOLD 0x314 119 #define OC1_THROTTLE_PERIOD 0x318 120 #define OC1_ALARM_COUNT 0x31c 121 #define OC1_FILTER 0x320 122 #define OC1_STATS 0x3a8 123 124 #define OC_INTR_STATUS 0x39c 125 #define OC_INTR_ENABLE 0x3a0 126 #define OC_INTR_DISABLE 0x3a4 127 #define OC_STATS_CTL 0x3c4 128 #define OC_STATS_CTL_CLR_ALL 0x2 129 #define OC_STATS_CTL_EN_ALL 0x1 130 131 #define OC_INTR_OC1_MASK BIT(0) 132 #define OC_INTR_OC2_MASK BIT(1) 133 #define OC_INTR_OC3_MASK BIT(2) 134 #define OC_INTR_OC4_MASK BIT(3) 135 #define OC_INTR_OC5_MASK BIT(4) 136 137 #define THROT_GLOBAL_CFG 0x400 138 #define THROT_GLOBAL_ENB_MASK BIT(0) 139 140 #define CPU_PSKIP_STATUS 0x418 141 #define XPU_PSKIP_STATUS_M_MASK (0xff << 12) 142 #define XPU_PSKIP_STATUS_N_MASK (0xff << 4) 143 #define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1) 144 #define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0) 145 146 #define THROT_PRIORITY_LOCK 0x424 147 #define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff 148 149 #define THROT_STATUS 0x428 150 #define THROT_STATUS_BREACH_MASK BIT(12) 151 #define THROT_STATUS_STATE_MASK (0xff << 4) 152 #define THROT_STATUS_ENABLED_MASK BIT(0) 153 154 #define THROT_PSKIP_CTRL_LITE_CPU 0x430 155 #define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31) 156 #define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) 157 #define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff 158 #define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16) 159 #define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8) 160 #define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7 161 162 #define THROT_VECT_NONE 0x0 /* 3'b000 */ 163 #define THROT_VECT_LOW 0x1 /* 3'b001 */ 164 #define THROT_VECT_MED 0x3 /* 3'b011 */ 165 #define THROT_VECT_HIGH 0x7 /* 3'b111 */ 166 167 #define THROT_PSKIP_RAMP_LITE_CPU 0x434 168 #define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) 169 #define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) 170 #define THROT_PSKIP_RAMP_STEP_MASK 0xff 171 172 #define THROT_PRIORITY_LITE 0x444 173 #define THROT_PRIORITY_LITE_PRIO_MASK 0xff 174 175 #define THROT_DELAY_LITE 0x448 176 #define THROT_DELAY_LITE_DELAY_MASK 0xff 177 178 /* car register offsets needed for enabling HW throttling */ 179 #define CAR_SUPER_CCLKG_DIVIDER 0x36c 180 #define CDIVG_USE_THERM_CONTROLS_MASK BIT(30) 181 182 /* ccroc register offsets needed for enabling HW throttling for Tegra132 */ 183 #define CCROC_SUPER_CCLKG_DIVIDER 0x024 184 185 #define CCROC_GLOBAL_CFG 0x148 186 187 #define CCROC_THROT_PSKIP_RAMP_CPU 0x150 188 #define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) 189 #define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) 190 #define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff 191 192 #define CCROC_THROT_PSKIP_CTRL_CPU 0x154 193 #define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31) 194 #define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) 195 #define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff 196 197 /* get val from register(r) mask bits(m) */ 198 #define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1)) 199 /* set val(v) to mask bits(m) of register(r) */ 200 #define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \ 201 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1))) 202 203 /* get dividend from the depth */ 204 #define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1) 205 206 /* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-soctherm.h 207 * level vector 208 * NONE 3'b000 209 * LOW 3'b001 210 * MED 3'b011 211 * HIGH 3'b111 212 */ 213 #define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1) 214 215 /* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */ 216 #define THROT_OFFSET 0x30 217 #define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \ 218 (THROT_OFFSET * throt) + (8 * dev)) 219 #define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \ 220 (THROT_OFFSET * throt) + (8 * dev)) 221 222 /* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */ 223 #define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \ 224 (THROT_OFFSET * throt)) 225 #define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \ 226 (THROT_OFFSET * throt)) 227 228 #define ALARM_OFFSET 0x14 229 #define ALARM_CFG(throt) (OC1_CFG + \ 230 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 231 232 #define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \ 233 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 234 235 #define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \ 236 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 237 238 #define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \ 239 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 240 241 #define ALARM_FILTER(throt) (OC1_FILTER + \ 242 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 243 244 #define ALARM_STATS(throt) (OC1_STATS + \ 245 (4 * (throt - THROTTLE_OC1))) 246 247 /* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/ 248 #define CCROC_THROT_OFFSET 0x0c 249 #define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \ 250 (CCROC_THROT_OFFSET * vect)) 251 #define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \ 252 (CCROC_THROT_OFFSET * vect)) 253 254 /* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */ 255 #define THERMCTL_LVL_REGS_SIZE 0x20 256 #define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE)) 257 258 #define OC_THROTTLE_MODE_DISABLED 0 259 #define OC_THROTTLE_MODE_BRIEF 2 260 261 static const int min_low_temp = -127000; 262 static const int max_high_temp = 127000; 263 264 enum soctherm_throttle_id { 265 THROTTLE_LIGHT = 0, 266 THROTTLE_HEAVY, 267 THROTTLE_OC1, 268 THROTTLE_OC2, 269 THROTTLE_OC3, 270 THROTTLE_OC4, 271 THROTTLE_OC5, /* OC5 is reserved */ 272 THROTTLE_SIZE, 273 }; 274 275 enum soctherm_oc_irq_id { 276 TEGRA_SOC_OC_IRQ_1, 277 TEGRA_SOC_OC_IRQ_2, 278 TEGRA_SOC_OC_IRQ_3, 279 TEGRA_SOC_OC_IRQ_4, 280 TEGRA_SOC_OC_IRQ_5, 281 TEGRA_SOC_OC_IRQ_MAX, 282 }; 283 284 enum soctherm_throttle_dev_id { 285 THROTTLE_DEV_CPU = 0, 286 THROTTLE_DEV_GPU, 287 THROTTLE_DEV_SIZE, 288 }; 289 290 static const char *const throt_names[] = { 291 [THROTTLE_LIGHT] = "light", 292 [THROTTLE_HEAVY] = "heavy", 293 [THROTTLE_OC1] = "oc1", 294 [THROTTLE_OC2] = "oc2", 295 [THROTTLE_OC3] = "oc3", 296 [THROTTLE_OC4] = "oc4", 297 [THROTTLE_OC5] = "oc5", 298 }; 299 300 struct tegra_soctherm; 301 struct tegra_thermctl_zone { 302 void __iomem *reg; 303 struct device *dev; 304 struct tegra_soctherm *ts; 305 struct thermal_zone_device *tz; 306 const struct tegra_tsensor_group *sg; 307 }; 308 309 struct soctherm_oc_cfg { 310 u32 active_low; 311 u32 throt_period; 312 u32 alarm_cnt_thresh; 313 u32 alarm_filter; 314 u32 mode; 315 bool intr_en; 316 }; 317 318 struct soctherm_throt_cfg { 319 const char *name; 320 unsigned int id; 321 u8 priority; 322 u8 cpu_throt_level; 323 u32 cpu_throt_depth; 324 u32 gpu_throt_level; 325 struct soctherm_oc_cfg oc_cfg; 326 struct thermal_cooling_device *cdev; 327 bool init; 328 }; 329 330 struct tegra_soctherm { 331 struct reset_control *reset; 332 struct clk *clock_tsensor; 333 struct clk *clock_soctherm; 334 void __iomem *regs; 335 void __iomem *clk_regs; 336 void __iomem *ccroc_regs; 337 338 int thermal_irq; 339 int edp_irq; 340 341 u32 *calib; 342 struct thermal_zone_device **thermctl_tzs; 343 struct tegra_soctherm_soc *soc; 344 345 struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE]; 346 347 struct dentry *debugfs_dir; 348 349 struct mutex thermctl_lock; 350 }; 351 352 struct soctherm_oc_irq_chip_data { 353 struct mutex irq_lock; /* serialize OC IRQs */ 354 struct irq_chip irq_chip; 355 struct irq_domain *domain; 356 int irq_enable; 357 }; 358 359 static struct soctherm_oc_irq_chip_data soc_irq_cdata; 360 361 /* Ensure that TEGRA114_* and TEGRA124_* counterparts are equal */ 362 static_assert(TEGRA114_SOCTHERM_SENSOR_CPU == TEGRA124_SOCTHERM_SENSOR_CPU); 363 static_assert(TEGRA114_SOCTHERM_SENSOR_MEM == TEGRA124_SOCTHERM_SENSOR_MEM); 364 static_assert(TEGRA114_SOCTHERM_SENSOR_GPU == TEGRA124_SOCTHERM_SENSOR_GPU); 365 static_assert(TEGRA114_SOCTHERM_SENSOR_PLLX == TEGRA124_SOCTHERM_SENSOR_PLLX); 366 367 /** 368 * ccroc_writel() - writes a value to a CCROC register 369 * @ts: pointer to a struct tegra_soctherm 370 * @value: the value to write 371 * @reg: the register offset 372 * 373 * Writes @v to @reg. No return value. 374 */ 375 static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg) 376 { 377 writel(value, (ts->ccroc_regs + reg)); 378 } 379 380 /** 381 * ccroc_readl() - reads specified register from CCROC IP block 382 * @ts: pointer to a struct tegra_soctherm 383 * @reg: register address to be read 384 * 385 * Return: the value of the register 386 */ 387 static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg) 388 { 389 return readl(ts->ccroc_regs + reg); 390 } 391 392 static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i) 393 { 394 const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i]; 395 void __iomem *base = tegra->regs + sensor->base; 396 unsigned int val; 397 398 val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT; 399 writel(val, base + SENSOR_CONFIG0); 400 401 val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT; 402 val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT; 403 val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT; 404 val |= SENSOR_CONFIG1_TEMP_ENABLE; 405 writel(val, base + SENSOR_CONFIG1); 406 407 writel(tegra->calib[i], base + SENSOR_CONFIG2); 408 } 409 410 /* 411 * Translate from soctherm readback format to millicelsius. 412 * The soctherm readback format in bits is as follows: 413 * TTTTTTTT H______N 414 * where T's contain the temperature in Celsius, 415 * H denotes an addition of 0.5 Celsius and N denotes negation 416 * of the final value. 417 */ 418 static int translate_temp(u16 val) 419 { 420 int t; 421 422 t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000; 423 if (val & READBACK_ADD_HALF) 424 t += 500; 425 if (val & READBACK_NEGATE) 426 t *= -1; 427 428 return t; 429 } 430 431 static int tegra_thermctl_get_temp(struct thermal_zone_device *tz, int *out_temp) 432 { 433 struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz); 434 u32 val; 435 436 val = readl(zone->reg); 437 val = REG_GET_MASK(val, zone->sg->sensor_temp_mask); 438 *out_temp = translate_temp(val); 439 440 return 0; 441 } 442 443 /** 444 * enforce_temp_range() - check and enforce temperature range [min, max] 445 * @dev: struct device * of the SOC_THERM instance 446 * @trip_temp: the trip temperature to check 447 * 448 * Checks and enforces the permitted temperature range that SOC_THERM 449 * HW can support This is 450 * done while taking care of precision. 451 * 452 * Return: The precision adjusted capped temperature in millicelsius. 453 */ 454 static int enforce_temp_range(struct device *dev, int trip_temp) 455 { 456 int temp; 457 458 temp = clamp_val(trip_temp, min_low_temp, max_high_temp); 459 if (temp != trip_temp) 460 dev_dbg(dev, "soctherm: trip temperature %d forced to %d\n", 461 trip_temp, temp); 462 return temp; 463 } 464 465 /** 466 * thermtrip_program() - Configures the hardware to shut down the 467 * system if a given sensor group reaches a given temperature 468 * @dev: ptr to the struct device for the SOC_THERM IP block 469 * @sg: pointer to the sensor group to set the thermtrip temperature for 470 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at 471 * 472 * Sets the thermal trip threshold of the given sensor group to be the 473 * @trip_temp. If this threshold is crossed, the hardware will shut 474 * down. 475 * 476 * Note that, although @trip_temp is specified in millicelsius, the 477 * hardware is programmed in degrees Celsius. 478 * 479 * Return: 0 upon success, or %-EINVAL upon failure. 480 */ 481 static int thermtrip_program(struct device *dev, 482 const struct tegra_tsensor_group *sg, 483 int trip_temp) 484 { 485 struct tegra_soctherm *ts = dev_get_drvdata(dev); 486 int temp; 487 u32 r; 488 489 if (!sg || !sg->thermtrip_threshold_mask) 490 return -EINVAL; 491 492 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; 493 494 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); 495 r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp); 496 r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1); 497 r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0); 498 writel(r, ts->regs + THERMCTL_THERMTRIP_CTL); 499 500 return 0; 501 } 502 503 /** 504 * throttrip_program() - Configures the hardware to throttle the 505 * pulse if a given sensor group reaches a given temperature 506 * @dev: ptr to the struct device for the SOC_THERM IP block 507 * @sg: pointer to the sensor group to set the thermtrip temperature for 508 * @stc: pointer to the throttle need to be triggered 509 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at 510 * 511 * Sets the thermal trip threshold and throttle event of the given sensor 512 * group. If this threshold is crossed, the hardware will trigger the 513 * throttle. 514 * 515 * Note that, although @trip_temp is specified in millicelsius, the 516 * hardware is programmed in degrees Celsius. 517 * 518 * Return: 0 upon success, or %-EINVAL upon failure. 519 */ 520 static int throttrip_program(struct device *dev, 521 const struct tegra_tsensor_group *sg, 522 struct soctherm_throt_cfg *stc, 523 int trip_temp) 524 { 525 struct tegra_soctherm *ts = dev_get_drvdata(dev); 526 int temp, cpu_throt, gpu_throt; 527 unsigned int throt; 528 u32 r, reg_off; 529 530 if (!sg || !stc || !stc->init) 531 return -EINVAL; 532 533 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; 534 535 /* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */ 536 throt = stc->id; 537 reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1); 538 539 if (throt == THROTTLE_LIGHT) { 540 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT; 541 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT; 542 } else { 543 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY; 544 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY; 545 if (throt != THROTTLE_HEAVY) 546 dev_warn(dev, 547 "invalid throt id %d - assuming HEAVY", 548 throt); 549 } 550 551 r = readl(ts->regs + reg_off); 552 r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp); 553 r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp); 554 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt); 555 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt); 556 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1); 557 writel(r, ts->regs + reg_off); 558 559 return 0; 560 } 561 562 static struct soctherm_throt_cfg * 563 find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name) 564 { 565 unsigned int i; 566 567 for (i = 0; ts->throt_cfgs[i].name; i++) 568 if (!strcmp(ts->throt_cfgs[i].name, name)) 569 return &ts->throt_cfgs[i]; 570 571 return NULL; 572 } 573 574 static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id) 575 { 576 int i, temp = min_low_temp; 577 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips; 578 579 if (id >= TEGRA124_SOCTHERM_SENSOR_NUM) 580 return temp; 581 582 if (tt) { 583 for (i = 0; i < ts->soc->num_ttgs; i++) { 584 if (tt[i].id == id) 585 return tt[i].temp; 586 } 587 } 588 589 return temp; 590 } 591 592 static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, 593 const struct thermal_trip *trip, int temp) 594 { 595 struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz); 596 struct tegra_soctherm *ts = zone->ts; 597 const struct tegra_tsensor_group *sg = zone->sg; 598 struct device *dev = zone->dev; 599 600 if (!tz) 601 return -EINVAL; 602 603 if (trip->type == THERMAL_TRIP_CRITICAL) { 604 /* 605 * If thermtrips property is set in DT, 606 * doesn't need to program critical type trip to HW, 607 * if not, program critical trip to HW. 608 */ 609 if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id)) 610 return thermtrip_program(dev, sg, temp); 611 else 612 return 0; 613 614 } else if (trip->type == THERMAL_TRIP_HOT) { 615 int i; 616 617 for (i = 0; i < THROTTLE_SIZE; i++) { 618 struct thermal_cooling_device *cdev; 619 struct soctherm_throt_cfg *stc; 620 621 if (!ts->throt_cfgs[i].init) 622 continue; 623 624 cdev = ts->throt_cfgs[i].cdev; 625 if (thermal_trip_is_bound_to_cdev(tz, trip, cdev)) 626 stc = find_throttle_cfg_by_name(ts, cdev->type); 627 else 628 continue; 629 630 return throttrip_program(dev, sg, stc, temp); 631 } 632 } 633 634 return 0; 635 } 636 637 static void thermal_irq_enable(struct tegra_thermctl_zone *zn) 638 { 639 u32 r; 640 641 /* multiple zones could be handling and setting trips at once */ 642 mutex_lock(&zn->ts->thermctl_lock); 643 r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE); 644 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN); 645 writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE); 646 mutex_unlock(&zn->ts->thermctl_lock); 647 } 648 649 static void thermal_irq_disable(struct tegra_thermctl_zone *zn) 650 { 651 u32 r; 652 653 /* multiple zones could be handling and setting trips at once */ 654 mutex_lock(&zn->ts->thermctl_lock); 655 r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE); 656 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0); 657 writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE); 658 mutex_unlock(&zn->ts->thermctl_lock); 659 } 660 661 static int tegra_thermctl_set_trips(struct thermal_zone_device *tz, int lo, int hi) 662 { 663 struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz); 664 u32 r; 665 666 thermal_irq_disable(zone); 667 668 r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset); 669 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0); 670 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset); 671 672 lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain; 673 hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain; 674 dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo); 675 676 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi); 677 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo); 678 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1); 679 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset); 680 681 thermal_irq_enable(zone); 682 683 return 0; 684 } 685 686 static const struct thermal_zone_device_ops tegra_of_thermal_ops = { 687 .get_temp = tegra_thermctl_get_temp, 688 .set_trip_temp = tegra_thermctl_set_trip_temp, 689 .set_trips = tegra_thermctl_set_trips, 690 }; 691 692 static int get_hot_trip_cb(struct thermal_trip *trip, void *arg) 693 { 694 const struct thermal_trip **trip_ret = arg; 695 696 if (trip->type != THERMAL_TRIP_HOT) 697 return 0; 698 699 *trip_ret = trip; 700 /* Return nonzero to terminate the search. */ 701 return 1; 702 } 703 704 static const struct thermal_trip *get_hot_trip(struct thermal_zone_device *tz) 705 { 706 const struct thermal_trip *trip = NULL; 707 708 thermal_zone_for_each_trip(tz, get_hot_trip_cb, &trip); 709 710 return trip; 711 } 712 713 /** 714 * tegra_soctherm_set_hwtrips() - set HW trip point from DT data 715 * @dev: struct device * of the SOC_THERM instance 716 * @sg: pointer to the sensor group to set the thermtrip temperature for 717 * @tz: struct thermal_zone_device * 718 * 719 * Configure the SOC_THERM HW trip points, setting "THERMTRIP" 720 * "THROTTLE" trip points , using "thermtrips", "critical" or "hot" 721 * type trip_temp 722 * from thermal zone. 723 * After they have been configured, THERMTRIP or THROTTLE will take 724 * action when the configured SoC thermal sensor group reaches a 725 * certain temperature. 726 * 727 * Return: 0 upon success, or a negative error code on failure. 728 * "Success" does not mean that trips was enabled; it could also 729 * mean that no node was found in DT. 730 * THERMTRIP has been enabled successfully when a message similar to 731 * this one appears on the serial console: 732 * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC" 733 * THROTTLE has been enabled successfully when a message similar to 734 * this one appears on the serial console: 735 * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC" 736 */ 737 static int tegra_soctherm_set_hwtrips(struct device *dev, 738 const struct tegra_tsensor_group *sg, 739 struct thermal_zone_device *tz) 740 { 741 struct tegra_soctherm *ts = dev_get_drvdata(dev); 742 const struct thermal_trip *hot_trip; 743 struct soctherm_throt_cfg *stc; 744 int i, temperature, ret; 745 746 /* Get thermtrips. If missing, try to get critical trips. */ 747 temperature = tsensor_group_thermtrip_get(ts, sg->id); 748 if (min_low_temp == temperature) 749 if (thermal_zone_get_crit_temp(tz, &temperature)) 750 temperature = max_high_temp; 751 752 ret = thermtrip_program(dev, sg, temperature); 753 if (ret) { 754 dev_err(dev, "thermtrip: %s: error during enable\n", sg->name); 755 return ret; 756 } 757 758 dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n", 759 sg->name, temperature); 760 761 hot_trip = get_hot_trip(tz); 762 if (!hot_trip) { 763 dev_info(dev, "throttrip: %s: missing hot temperature\n", 764 sg->name); 765 return 0; 766 } 767 768 for (i = 0; i < THROTTLE_OC1; i++) { 769 struct thermal_cooling_device *cdev; 770 771 if (!ts->throt_cfgs[i].init) 772 continue; 773 774 cdev = ts->throt_cfgs[i].cdev; 775 if (thermal_trip_is_bound_to_cdev(tz, hot_trip, cdev)) 776 stc = find_throttle_cfg_by_name(ts, cdev->type); 777 else 778 continue; 779 780 ret = throttrip_program(dev, sg, stc, temperature); 781 if (ret) { 782 dev_err(dev, "throttrip: %s: error during enable\n", 783 sg->name); 784 return ret; 785 } 786 787 dev_info(dev, 788 "throttrip: will throttle when %s reaches %d mC\n", 789 sg->name, temperature); 790 break; 791 } 792 793 if (i == THROTTLE_SIZE) 794 dev_info(dev, "throttrip: %s: missing throttle cdev\n", 795 sg->name); 796 797 return 0; 798 } 799 800 static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id) 801 { 802 struct tegra_soctherm *ts = dev_id; 803 u32 r; 804 805 /* Case for no lock: 806 * Although interrupts are enabled in set_trips, there is still no need 807 * to lock here because the interrupts are disabled before programming 808 * new trip points. Hence there cant be a interrupt on the same sensor. 809 * An interrupt can however occur on a sensor while trips are being 810 * programmed on a different one. This beign a LEVEL interrupt won't 811 * cause a new interrupt but this is taken care of by the re-reading of 812 * the STATUS register in the thread function. 813 */ 814 r = readl(ts->regs + THERMCTL_INTR_STATUS); 815 writel(r, ts->regs + THERMCTL_INTR_DISABLE); 816 817 return IRQ_WAKE_THREAD; 818 } 819 820 /** 821 * soctherm_thermal_isr_thread() - Handles a thermal interrupt request 822 * @irq: The interrupt number being requested; not used 823 * @dev_id: Opaque pointer to tegra_soctherm; 824 * 825 * Clears the interrupt status register if there are expected 826 * interrupt bits set. 827 * The interrupt(s) are then handled by updating the corresponding 828 * thermal zones. 829 * 830 * An error is logged if any unexpected interrupt bits are set. 831 * 832 * Disabled interrupts are re-enabled. 833 * 834 * Return: %IRQ_HANDLED. Interrupt was handled and no further processing 835 * is needed. 836 */ 837 static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id) 838 { 839 struct tegra_soctherm *ts = dev_id; 840 struct thermal_zone_device *tz; 841 u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0; 842 843 st = readl(ts->regs + THERMCTL_INTR_STATUS); 844 845 /* deliberately clear expected interrupts handled in SW */ 846 cp |= st & TH_INTR_CD0_MASK; 847 cp |= st & TH_INTR_CU0_MASK; 848 849 gp |= st & TH_INTR_GD0_MASK; 850 gp |= st & TH_INTR_GU0_MASK; 851 852 pl |= st & TH_INTR_PD0_MASK; 853 pl |= st & TH_INTR_PU0_MASK; 854 855 me |= st & TH_INTR_MD0_MASK; 856 me |= st & TH_INTR_MU0_MASK; 857 858 ex |= cp | gp | pl | me; 859 if (ex) { 860 writel(ex, ts->regs + THERMCTL_INTR_STATUS); 861 st &= ~ex; 862 863 if (cp) { 864 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU]; 865 thermal_zone_device_update(tz, 866 THERMAL_EVENT_UNSPECIFIED); 867 } 868 869 if (gp) { 870 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU]; 871 thermal_zone_device_update(tz, 872 THERMAL_EVENT_UNSPECIFIED); 873 } 874 875 if (pl) { 876 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX]; 877 thermal_zone_device_update(tz, 878 THERMAL_EVENT_UNSPECIFIED); 879 } 880 881 if (me) { 882 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM]; 883 thermal_zone_device_update(tz, 884 THERMAL_EVENT_UNSPECIFIED); 885 } 886 } 887 888 /* deliberately ignore expected interrupts NOT handled in SW */ 889 ex |= TH_INTR_IGNORE_MASK; 890 st &= ~ex; 891 892 if (st) { 893 /* Whine about any other unexpected INTR bits still set */ 894 pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st); 895 writel(st, ts->regs + THERMCTL_INTR_STATUS); 896 } 897 898 return IRQ_HANDLED; 899 } 900 901 /** 902 * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt 903 * @ts: pointer to a struct tegra_soctherm 904 * @alarm: The soctherm throttle id 905 * @enable: Flag indicating enable the soctherm over-current 906 * interrupt or disable it 907 * 908 * Enables a specific over-current pins @alarm to raise an interrupt if the flag 909 * is set and the alarm corresponds to OC1, OC2, OC3, or OC4. 910 */ 911 static void soctherm_oc_intr_enable(struct tegra_soctherm *ts, 912 enum soctherm_throttle_id alarm, 913 bool enable) 914 { 915 u32 r; 916 917 if (!enable) 918 return; 919 920 r = readl(ts->regs + OC_INTR_ENABLE); 921 switch (alarm) { 922 case THROTTLE_OC1: 923 r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1); 924 break; 925 case THROTTLE_OC2: 926 r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1); 927 break; 928 case THROTTLE_OC3: 929 r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1); 930 break; 931 case THROTTLE_OC4: 932 r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1); 933 break; 934 default: 935 r = 0; 936 break; 937 } 938 writel(r, ts->regs + OC_INTR_ENABLE); 939 } 940 941 /** 942 * soctherm_handle_alarm() - Handles soctherm alarms 943 * @alarm: The soctherm throttle id 944 * 945 * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing 946 * a warning or informative message. 947 * 948 * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success). 949 */ 950 static int soctherm_handle_alarm(enum soctherm_throttle_id alarm) 951 { 952 int rv = -EINVAL; 953 954 switch (alarm) { 955 case THROTTLE_OC1: 956 pr_debug("soctherm: Successfully handled OC1 alarm\n"); 957 rv = 0; 958 break; 959 960 case THROTTLE_OC2: 961 pr_debug("soctherm: Successfully handled OC2 alarm\n"); 962 rv = 0; 963 break; 964 965 case THROTTLE_OC3: 966 pr_debug("soctherm: Successfully handled OC3 alarm\n"); 967 rv = 0; 968 break; 969 970 case THROTTLE_OC4: 971 pr_debug("soctherm: Successfully handled OC4 alarm\n"); 972 rv = 0; 973 break; 974 975 default: 976 break; 977 } 978 979 if (rv) 980 pr_err("soctherm: ERROR in handling %s alarm\n", 981 throt_names[alarm]); 982 983 return rv; 984 } 985 986 /** 987 * soctherm_edp_isr_thread() - log an over-current interrupt request 988 * @irq: OC irq number. Currently not being used. See description 989 * @arg: a void pointer for callback, currently not being used 990 * 991 * Over-current events are handled in hardware. This function is called to log 992 * and handle any OC events that happened. Additionally, it checks every 993 * over-current interrupt registers for registers are set but 994 * was not expected (i.e. any discrepancy in interrupt status) by the function, 995 * the discrepancy will logged. 996 * 997 * Return: %IRQ_HANDLED 998 */ 999 static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg) 1000 { 1001 struct tegra_soctherm *ts = arg; 1002 u32 st, ex, oc1, oc2, oc3, oc4; 1003 1004 st = readl(ts->regs + OC_INTR_STATUS); 1005 1006 /* deliberately clear expected interrupts handled in SW */ 1007 oc1 = st & OC_INTR_OC1_MASK; 1008 oc2 = st & OC_INTR_OC2_MASK; 1009 oc3 = st & OC_INTR_OC3_MASK; 1010 oc4 = st & OC_INTR_OC4_MASK; 1011 ex = oc1 | oc2 | oc3 | oc4; 1012 1013 pr_err("soctherm: OC ALARM 0x%08x\n", ex); 1014 if (ex) { 1015 writel(st, ts->regs + OC_INTR_STATUS); 1016 st &= ~ex; 1017 1018 if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1)) 1019 soctherm_oc_intr_enable(ts, THROTTLE_OC1, true); 1020 1021 if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2)) 1022 soctherm_oc_intr_enable(ts, THROTTLE_OC2, true); 1023 1024 if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3)) 1025 soctherm_oc_intr_enable(ts, THROTTLE_OC3, true); 1026 1027 if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4)) 1028 soctherm_oc_intr_enable(ts, THROTTLE_OC4, true); 1029 1030 if (oc1 && soc_irq_cdata.irq_enable & BIT(0)) 1031 handle_nested_irq( 1032 irq_find_mapping(soc_irq_cdata.domain, 0)); 1033 1034 if (oc2 && soc_irq_cdata.irq_enable & BIT(1)) 1035 handle_nested_irq( 1036 irq_find_mapping(soc_irq_cdata.domain, 1)); 1037 1038 if (oc3 && soc_irq_cdata.irq_enable & BIT(2)) 1039 handle_nested_irq( 1040 irq_find_mapping(soc_irq_cdata.domain, 2)); 1041 1042 if (oc4 && soc_irq_cdata.irq_enable & BIT(3)) 1043 handle_nested_irq( 1044 irq_find_mapping(soc_irq_cdata.domain, 3)); 1045 } 1046 1047 if (st) { 1048 pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st); 1049 writel(st, ts->regs + OC_INTR_STATUS); 1050 } 1051 1052 return IRQ_HANDLED; 1053 } 1054 1055 /** 1056 * soctherm_edp_isr() - Disables any active interrupts 1057 * @irq: The interrupt request number 1058 * @arg: Opaque pointer to an argument 1059 * 1060 * Writes to the OC_INTR_DISABLE register the over current interrupt status, 1061 * masking any asserted interrupts. Doing this prevents the same interrupts 1062 * from triggering this isr repeatedly. The thread woken by this isr will 1063 * handle asserted interrupts and subsequently unmask/re-enable them. 1064 * 1065 * The OC_INTR_DISABLE register indicates which OC interrupts 1066 * have been disabled. 1067 * 1068 * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread 1069 */ 1070 static irqreturn_t soctherm_edp_isr(int irq, void *arg) 1071 { 1072 struct tegra_soctherm *ts = arg; 1073 u32 r; 1074 1075 if (!ts) 1076 return IRQ_NONE; 1077 1078 r = readl(ts->regs + OC_INTR_STATUS); 1079 writel(r, ts->regs + OC_INTR_DISABLE); 1080 1081 return IRQ_WAKE_THREAD; 1082 } 1083 1084 /** 1085 * soctherm_oc_irq_lock() - locks the over-current interrupt request 1086 * @data: Interrupt request data 1087 * 1088 * Looks up the chip data from @data and locks the mutex associated with 1089 * a particular over-current interrupt request. 1090 */ 1091 static void soctherm_oc_irq_lock(struct irq_data *data) 1092 { 1093 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1094 1095 mutex_lock(&d->irq_lock); 1096 } 1097 1098 /** 1099 * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request 1100 * @data: Interrupt request data 1101 * 1102 * Looks up the interrupt request data @data and unlocks the mutex associated 1103 * with a particular over-current interrupt request. 1104 */ 1105 static void soctherm_oc_irq_sync_unlock(struct irq_data *data) 1106 { 1107 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1108 1109 mutex_unlock(&d->irq_lock); 1110 } 1111 1112 /** 1113 * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue 1114 * @data: irq_data structure of the chip 1115 * 1116 * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM 1117 * to respond to over-current interrupts. 1118 * 1119 */ 1120 static void soctherm_oc_irq_enable(struct irq_data *data) 1121 { 1122 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1123 1124 d->irq_enable |= BIT(data->hwirq); 1125 } 1126 1127 /** 1128 * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests 1129 * @data: The interrupt request information 1130 * 1131 * Clears the interrupt request enable bit of the overcurrent 1132 * interrupt request chip data. 1133 * 1134 * Return: Nothing is returned (void) 1135 */ 1136 static void soctherm_oc_irq_disable(struct irq_data *data) 1137 { 1138 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1139 1140 d->irq_enable &= ~BIT(data->hwirq); 1141 } 1142 1143 static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type) 1144 { 1145 return 0; 1146 } 1147 1148 /** 1149 * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper 1150 * @h: Interrupt request domain 1151 * @virq: Virtual interrupt request number 1152 * @hw: Hardware interrupt request number 1153 * 1154 * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM 1155 * interrupt request is called, the irq_domain takes the request's virtual 1156 * request number (much like a virtual memory address) and maps it to a 1157 * physical hardware request number. 1158 * 1159 * When a mapping doesn't already exist for a virtual request number, the 1160 * irq_domain calls this function to associate the virtual request number with 1161 * a hardware request number. 1162 * 1163 * Return: 0 1164 */ 1165 static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq, 1166 irq_hw_number_t hw) 1167 { 1168 struct soctherm_oc_irq_chip_data *data = h->host_data; 1169 1170 irq_set_chip_data(virq, data); 1171 irq_set_chip(virq, &data->irq_chip); 1172 irq_set_nested_thread(virq, 1); 1173 return 0; 1174 } 1175 1176 /** 1177 * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts 1178 * @d: Interrupt request domain 1179 * @ctrlr: Controller device tree node 1180 * @intspec: Array of u32s from DTs "interrupt" property 1181 * @intsize: Number of values inside the intspec array 1182 * @out_hwirq: HW IRQ value associated with this interrupt 1183 * @out_type: The IRQ SENSE type for this interrupt. 1184 * 1185 * This Device Tree IRQ specifier translation function will translate a 1186 * specific "interrupt" as defined by 2 DT values where the cell values map 1187 * the hwirq number + 1 and linux irq flags. Since the output is the hwirq 1188 * number, this function will subtract 1 from the value listed in DT. 1189 * 1190 * Return: 0 1191 */ 1192 static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d, 1193 struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, 1194 irq_hw_number_t *out_hwirq, unsigned int *out_type) 1195 { 1196 if (WARN_ON(intsize < 2)) 1197 return -EINVAL; 1198 1199 /* 1200 * The HW value is 1 index less than the DT IRQ values. 1201 * i.e. OC4 goes to HW index 3. 1202 */ 1203 *out_hwirq = intspec[0] - 1; 1204 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 1205 return 0; 1206 } 1207 1208 static const struct irq_domain_ops soctherm_oc_domain_ops = { 1209 .map = soctherm_oc_irq_map, 1210 .xlate = soctherm_irq_domain_xlate_twocell, 1211 }; 1212 1213 /** 1214 * soctherm_oc_int_init() - Initial enabling of the over 1215 * current interrupts 1216 * @fwnode: The devicetree node for soctherm 1217 * @num_irqs: The number of new interrupt requests 1218 * 1219 * Sets the over current interrupt request chip data 1220 * 1221 * Return: 0 on success or if overcurrent interrupts are not enabled, 1222 * -ENOMEM (out of memory), or irq_base if the function failed to 1223 * allocate the irqs 1224 */ 1225 static int soctherm_oc_int_init(struct fwnode_handle *fwnode, int num_irqs) 1226 { 1227 if (!num_irqs) { 1228 pr_info("%s(): OC interrupts are not enabled\n", __func__); 1229 return 0; 1230 } 1231 1232 mutex_init(&soc_irq_cdata.irq_lock); 1233 soc_irq_cdata.irq_enable = 0; 1234 1235 soc_irq_cdata.irq_chip.name = "soc_therm_oc"; 1236 soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock; 1237 soc_irq_cdata.irq_chip.irq_bus_sync_unlock = 1238 soctherm_oc_irq_sync_unlock; 1239 soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable; 1240 soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable; 1241 soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type; 1242 soc_irq_cdata.irq_chip.irq_set_wake = NULL; 1243 1244 soc_irq_cdata.domain = irq_domain_create_linear(fwnode, num_irqs, &soctherm_oc_domain_ops, 1245 &soc_irq_cdata); 1246 if (!soc_irq_cdata.domain) { 1247 pr_err("%s: Failed to create IRQ domain\n", __func__); 1248 return -ENOMEM; 1249 } 1250 1251 pr_debug("%s(): OC interrupts enabled successful\n", __func__); 1252 return 0; 1253 } 1254 1255 #ifdef CONFIG_DEBUG_FS 1256 static int regs_show(struct seq_file *s, void *data) 1257 { 1258 struct platform_device *pdev = s->private; 1259 struct tegra_soctherm *ts = platform_get_drvdata(pdev); 1260 const struct tegra_tsensor *tsensors = ts->soc->tsensors; 1261 const struct tegra_tsensor_group **ttgs = ts->soc->ttgs; 1262 u32 r, state; 1263 int i, level; 1264 1265 seq_puts(s, "-----TSENSE (convert HW)-----\n"); 1266 1267 for (i = 0; i < ts->soc->num_tsensors; i++) { 1268 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1); 1269 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE); 1270 1271 seq_printf(s, "%s: ", tsensors[i].name); 1272 seq_printf(s, "En(%d) ", state); 1273 1274 if (!state) { 1275 seq_puts(s, "\n"); 1276 continue; 1277 } 1278 1279 state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK); 1280 seq_printf(s, "tiddq(%d) ", state); 1281 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK); 1282 seq_printf(s, "ten_count(%d) ", state); 1283 state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK); 1284 seq_printf(s, "tsample(%d) ", state + 1); 1285 1286 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1); 1287 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK); 1288 seq_printf(s, "Temp(%d/", state); 1289 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK); 1290 seq_printf(s, "%d) ", translate_temp(state)); 1291 1292 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0); 1293 state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK); 1294 seq_printf(s, "Capture(%d/", state); 1295 state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK); 1296 seq_printf(s, "%d) ", state); 1297 1298 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0); 1299 state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP); 1300 seq_printf(s, "Stop(%d) ", state); 1301 state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK); 1302 seq_printf(s, "Tall(%d) ", state); 1303 state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER); 1304 seq_printf(s, "Over(%d/", state); 1305 state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER); 1306 seq_printf(s, "%d/", state); 1307 state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER); 1308 seq_printf(s, "%d) ", state); 1309 1310 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2); 1311 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK); 1312 seq_printf(s, "Therm_A/B(%d/", state); 1313 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK); 1314 seq_printf(s, "%d)\n", (s16)state); 1315 } 1316 1317 r = readl(ts->regs + SENSOR_PDIV); 1318 seq_printf(s, "PDIV: 0x%x\n", r); 1319 1320 r = readl(ts->regs + SENSOR_HOTSPOT_OFF); 1321 seq_printf(s, "HOTSPOT: 0x%x\n", r); 1322 1323 seq_puts(s, "\n"); 1324 seq_puts(s, "-----SOC_THERM-----\n"); 1325 1326 r = readl(ts->regs + SENSOR_TEMP1); 1327 state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK); 1328 seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state)); 1329 state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK); 1330 seq_printf(s, " GPU(%d) ", translate_temp(state)); 1331 r = readl(ts->regs + SENSOR_TEMP2); 1332 state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK); 1333 seq_printf(s, " PLLX(%d) ", translate_temp(state)); 1334 state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK); 1335 seq_printf(s, " MEM(%d)\n", translate_temp(state)); 1336 1337 for (i = 0; i < ts->soc->num_ttgs; i++) { 1338 seq_printf(s, "%s:\n", ttgs[i]->name); 1339 for (level = 0; level < 4; level++) { 1340 s32 v; 1341 u32 mask; 1342 u16 off = ttgs[i]->thermctl_lvl0_offset; 1343 1344 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1345 1346 mask = ttgs[i]->thermctl_lvl0_up_thresh_mask; 1347 state = REG_GET_MASK(r, mask); 1348 v = sign_extend32(state, ts->soc->bptt - 1); 1349 v *= ts->soc->thresh_grain; 1350 seq_printf(s, " %d: Up/Dn(%d /", level, v); 1351 1352 mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask; 1353 state = REG_GET_MASK(r, mask); 1354 v = sign_extend32(state, ts->soc->bptt - 1); 1355 v *= ts->soc->thresh_grain; 1356 seq_printf(s, "%d ) ", v); 1357 1358 mask = THERMCTL_LVL0_CPU0_EN_MASK; 1359 state = REG_GET_MASK(r, mask); 1360 seq_printf(s, "En(%d) ", state); 1361 1362 mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK; 1363 state = REG_GET_MASK(r, mask); 1364 seq_puts(s, "CPU Throt"); 1365 if (!state) 1366 seq_printf(s, "(%s) ", "none"); 1367 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT) 1368 seq_printf(s, "(%s) ", "L"); 1369 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY) 1370 seq_printf(s, "(%s) ", "H"); 1371 else 1372 seq_printf(s, "(%s) ", "H+L"); 1373 1374 mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK; 1375 state = REG_GET_MASK(r, mask); 1376 seq_puts(s, "GPU Throt"); 1377 if (!state) 1378 seq_printf(s, "(%s) ", "none"); 1379 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT) 1380 seq_printf(s, "(%s) ", "L"); 1381 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY) 1382 seq_printf(s, "(%s) ", "H"); 1383 else 1384 seq_printf(s, "(%s) ", "H+L"); 1385 1386 mask = THERMCTL_LVL0_CPU0_STATUS_MASK; 1387 state = REG_GET_MASK(r, mask); 1388 seq_printf(s, "Status(%s)\n", 1389 state == 0 ? "LO" : 1390 state == 1 ? "In" : 1391 state == 2 ? "Res" : "HI"); 1392 } 1393 } 1394 1395 r = readl(ts->regs + THERMCTL_STATS_CTL); 1396 seq_printf(s, "STATS: Up(%s) Dn(%s)\n", 1397 r & STATS_CTL_EN_UP ? "En" : "--", 1398 r & STATS_CTL_EN_DN ? "En" : "--"); 1399 1400 for (level = 0; level < 4; level++) { 1401 u16 off; 1402 1403 off = THERMCTL_LVL0_UP_STATS; 1404 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1405 seq_printf(s, " Level_%d Up(%d) ", level, r); 1406 1407 off = THERMCTL_LVL0_DN_STATS; 1408 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1409 seq_printf(s, "Dn(%d)\n", r); 1410 } 1411 1412 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); 1413 state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask); 1414 seq_printf(s, "Thermtrip Any En(%d)\n", state); 1415 for (i = 0; i < ts->soc->num_ttgs; i++) { 1416 state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask); 1417 seq_printf(s, " %s En(%d) ", ttgs[i]->name, state); 1418 state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask); 1419 state *= ts->soc->thresh_grain; 1420 seq_printf(s, "Thresh(%d)\n", state); 1421 } 1422 1423 r = readl(ts->regs + THROT_GLOBAL_CFG); 1424 seq_puts(s, "\n"); 1425 seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r); 1426 1427 seq_puts(s, "---------------------------------------------------\n"); 1428 r = readl(ts->regs + THROT_STATUS); 1429 state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK); 1430 seq_printf(s, "THROT STATUS: breach(%d) ", state); 1431 state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK); 1432 seq_printf(s, "state(%d) ", state); 1433 state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK); 1434 seq_printf(s, "enabled(%d)\n", state); 1435 1436 r = readl(ts->regs + CPU_PSKIP_STATUS); 1437 if (ts->soc->use_ccroc) { 1438 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); 1439 seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state); 1440 } else { 1441 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK); 1442 seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state); 1443 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK); 1444 seq_printf(s, "N(%d) ", state); 1445 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); 1446 seq_printf(s, "enabled(%d)\n", state); 1447 } 1448 1449 return 0; 1450 } 1451 1452 DEFINE_SHOW_ATTRIBUTE(regs); 1453 1454 static void soctherm_debug_init(struct platform_device *pdev) 1455 { 1456 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 1457 struct dentry *root; 1458 1459 root = debugfs_create_dir("soctherm", NULL); 1460 1461 tegra->debugfs_dir = root; 1462 1463 debugfs_create_file("reg_contents", 0644, root, pdev, ®s_fops); 1464 } 1465 #else 1466 static inline void soctherm_debug_init(struct platform_device *pdev) {} 1467 #endif 1468 1469 static int soctherm_clk_enable(struct platform_device *pdev, bool enable) 1470 { 1471 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 1472 int err; 1473 1474 if (!tegra->clock_soctherm || !tegra->clock_tsensor) 1475 return -EINVAL; 1476 1477 reset_control_assert(tegra->reset); 1478 1479 if (enable) { 1480 err = clk_prepare_enable(tegra->clock_soctherm); 1481 if (err) { 1482 reset_control_deassert(tegra->reset); 1483 return err; 1484 } 1485 1486 err = clk_prepare_enable(tegra->clock_tsensor); 1487 if (err) { 1488 clk_disable_unprepare(tegra->clock_soctherm); 1489 reset_control_deassert(tegra->reset); 1490 return err; 1491 } 1492 } else { 1493 clk_disable_unprepare(tegra->clock_tsensor); 1494 clk_disable_unprepare(tegra->clock_soctherm); 1495 } 1496 1497 reset_control_deassert(tegra->reset); 1498 1499 return 0; 1500 } 1501 1502 static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev, 1503 unsigned long *max_state) 1504 { 1505 *max_state = 1; 1506 return 0; 1507 } 1508 1509 static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev, 1510 unsigned long *cur_state) 1511 { 1512 struct tegra_soctherm *ts = cdev->devdata; 1513 u32 r; 1514 1515 r = readl(ts->regs + THROT_STATUS); 1516 if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK)) 1517 *cur_state = 1; 1518 else 1519 *cur_state = 0; 1520 1521 return 0; 1522 } 1523 1524 static int throt_set_cdev_state(struct thermal_cooling_device *cdev, 1525 unsigned long cur_state) 1526 { 1527 return 0; 1528 } 1529 1530 static const struct thermal_cooling_device_ops throt_cooling_ops = { 1531 .get_max_state = throt_get_cdev_max_state, 1532 .get_cur_state = throt_get_cdev_cur_state, 1533 .set_cur_state = throt_set_cdev_state, 1534 }; 1535 1536 static int soctherm_thermtrips_parse(struct platform_device *pdev) 1537 { 1538 struct device *dev = &pdev->dev; 1539 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1540 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips; 1541 const int max_num_prop = ts->soc->num_ttgs * 2; 1542 u32 *tlb; 1543 int i, j, n, ret; 1544 1545 if (!tt) 1546 return -ENOMEM; 1547 1548 n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips"); 1549 if (n <= 0) { 1550 dev_info(dev, 1551 "missing thermtrips, will use critical trips as shut down temp\n"); 1552 return n; 1553 } 1554 1555 n = min(max_num_prop, n); 1556 1557 tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL); 1558 if (!tlb) 1559 return -ENOMEM; 1560 ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips", 1561 tlb, n); 1562 if (ret) { 1563 dev_err(dev, "invalid num ele: thermtrips:%d\n", ret); 1564 return ret; 1565 } 1566 1567 i = 0; 1568 for (j = 0; j < n; j = j + 2) { 1569 if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM) 1570 continue; 1571 1572 tt[i].id = tlb[j]; 1573 tt[i].temp = tlb[j + 1]; 1574 i++; 1575 } 1576 1577 return 0; 1578 } 1579 1580 static void soctherm_oc_cfg_parse(struct device *dev, 1581 struct device_node *np_oc, 1582 struct soctherm_throt_cfg *stc) 1583 { 1584 u32 val; 1585 1586 if (of_property_read_bool(np_oc, "nvidia,polarity-active-low")) 1587 stc->oc_cfg.active_low = 1; 1588 else 1589 stc->oc_cfg.active_low = 0; 1590 1591 if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) { 1592 stc->oc_cfg.intr_en = 1; 1593 stc->oc_cfg.alarm_cnt_thresh = val; 1594 } 1595 1596 if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val)) 1597 stc->oc_cfg.throt_period = val; 1598 1599 if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val)) 1600 stc->oc_cfg.alarm_filter = val; 1601 1602 /* BRIEF throttling by default, do not support STICKY */ 1603 stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF; 1604 } 1605 1606 static int soctherm_throt_cfg_parse(struct device *dev, 1607 struct device_node *np, 1608 struct soctherm_throt_cfg *stc) 1609 { 1610 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1611 int ret; 1612 u32 val; 1613 1614 ret = of_property_read_u32(np, "nvidia,priority", &val); 1615 if (ret) { 1616 dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name); 1617 return -EINVAL; 1618 } 1619 stc->priority = val; 1620 1621 ret = of_property_read_u32(np, ts->soc->use_ccroc ? 1622 "nvidia,cpu-throt-level" : 1623 "nvidia,cpu-throt-percent", &val); 1624 if (!ret) { 1625 if (ts->soc->use_ccroc && 1626 val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH) 1627 stc->cpu_throt_level = val; 1628 else if (!ts->soc->use_ccroc && val <= 100) 1629 stc->cpu_throt_depth = val; 1630 else 1631 goto err; 1632 } else { 1633 goto err; 1634 } 1635 1636 ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val); 1637 if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH) 1638 stc->gpu_throt_level = val; 1639 else 1640 goto err; 1641 1642 return 0; 1643 1644 err: 1645 dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n", 1646 stc->name); 1647 return -EINVAL; 1648 } 1649 1650 /** 1651 * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations 1652 * and register them as cooling devices. 1653 * @pdev: Pointer to platform_device struct 1654 */ 1655 static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) 1656 { 1657 struct device *dev = &pdev->dev; 1658 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1659 struct device_node *np_stc; 1660 const char *name; 1661 int i; 1662 1663 for (i = 0; i < THROTTLE_SIZE; i++) { 1664 ts->throt_cfgs[i].name = throt_names[i]; 1665 ts->throt_cfgs[i].id = i; 1666 ts->throt_cfgs[i].init = false; 1667 } 1668 1669 np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs"); 1670 if (!np_stc) { 1671 dev_info(dev, 1672 "throttle-cfg: no throttle-cfgs - not enabling\n"); 1673 return; 1674 } 1675 1676 for_each_child_of_node_scoped(np_stc, np_stcc) { 1677 struct soctherm_throt_cfg *stc; 1678 struct thermal_cooling_device *tcd; 1679 int err; 1680 1681 name = np_stcc->name; 1682 stc = find_throttle_cfg_by_name(ts, name); 1683 if (!stc) { 1684 dev_err(dev, 1685 "throttle-cfg: could not find %s\n", name); 1686 continue; 1687 } 1688 1689 if (stc->init) { 1690 dev_err(dev, "throttle-cfg: %s: redefined!\n", name); 1691 break; 1692 } 1693 1694 err = soctherm_throt_cfg_parse(dev, np_stcc, stc); 1695 if (err) 1696 continue; 1697 1698 if (stc->id >= THROTTLE_OC1) { 1699 soctherm_oc_cfg_parse(dev, np_stcc, stc); 1700 stc->init = true; 1701 } else { 1702 1703 tcd = thermal_of_cooling_device_register(np_stcc, 1704 (char *)name, ts, 1705 &throt_cooling_ops); 1706 if (IS_ERR_OR_NULL(tcd)) { 1707 dev_err(dev, 1708 "throttle-cfg: %s: failed to register cooling device\n", 1709 name); 1710 continue; 1711 } 1712 stc->cdev = tcd; 1713 stc->init = true; 1714 } 1715 1716 } 1717 1718 of_node_put(np_stc); 1719 } 1720 1721 /** 1722 * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config 1723 * @ts: pointer to a struct tegra_soctherm 1724 * @level: describing the level LOW/MED/HIGH of throttling 1725 * 1726 * It's necessary to set up the CPU-local CCROC NV_THERM instance with 1727 * the M/N values desired for each level. This function does this. 1728 * 1729 * This function pre-programs the CCROC NV_THERM levels in terms of 1730 * pre-configured "Low", "Medium" or "Heavy" throttle levels which are 1731 * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY. 1732 */ 1733 static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level) 1734 { 1735 u8 depth, dividend; 1736 u32 r; 1737 1738 switch (level) { 1739 case TEGRA_SOCTHERM_THROT_LEVEL_LOW: 1740 depth = 50; 1741 break; 1742 case TEGRA_SOCTHERM_THROT_LEVEL_MED: 1743 depth = 75; 1744 break; 1745 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: 1746 depth = 80; 1747 break; 1748 case TEGRA_SOCTHERM_THROT_LEVEL_NONE: 1749 return; 1750 default: 1751 return; 1752 } 1753 1754 dividend = THROT_DEPTH_DIVIDEND(depth); 1755 1756 /* setup PSKIP in ccroc nv_therm registers */ 1757 r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); 1758 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff); 1759 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf); 1760 ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); 1761 1762 r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); 1763 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1); 1764 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); 1765 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); 1766 ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); 1767 } 1768 1769 /** 1770 * throttlectl_cpu_level_select() - program CPU pulse skipper config 1771 * @ts: pointer to a struct tegra_soctherm 1772 * @throt: the LIGHT/HEAVY of throttle event id 1773 * 1774 * Pulse skippers are used to throttle clock frequencies. This 1775 * function programs the pulse skippers based on @throt and platform 1776 * data. This function is used on SoCs which have CPU-local pulse 1777 * skipper control, such as T13x. It programs soctherm's interface to 1778 * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling 1779 * vectors. PSKIP_BYPASS mode is set as required per HW spec. 1780 */ 1781 static void throttlectl_cpu_level_select(struct tegra_soctherm *ts, 1782 enum soctherm_throttle_id throt) 1783 { 1784 u32 r, throt_vect; 1785 1786 /* Denver:CCROC NV_THERM interface N:3 Mapping */ 1787 switch (ts->throt_cfgs[throt].cpu_throt_level) { 1788 case TEGRA_SOCTHERM_THROT_LEVEL_LOW: 1789 throt_vect = THROT_VECT_LOW; 1790 break; 1791 case TEGRA_SOCTHERM_THROT_LEVEL_MED: 1792 throt_vect = THROT_VECT_MED; 1793 break; 1794 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: 1795 throt_vect = THROT_VECT_HIGH; 1796 break; 1797 default: 1798 throt_vect = THROT_VECT_NONE; 1799 break; 1800 } 1801 1802 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1803 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1804 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect); 1805 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect); 1806 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1807 1808 /* bypass sequencer in soc_therm as it is programmed in ccroc */ 1809 r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1); 1810 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1811 } 1812 1813 /** 1814 * throttlectl_cpu_mn() - program CPU pulse skipper configuration 1815 * @ts: pointer to a struct tegra_soctherm 1816 * @throt: the LIGHT/HEAVY of throttle event id 1817 * 1818 * Pulse skippers are used to throttle clock frequencies. This 1819 * function programs the pulse skippers based on @throt and platform 1820 * data. This function is used for CPUs that have "remote" pulse 1821 * skipper control, e.g., the CPU pulse skipper is controlled by the 1822 * SOC_THERM IP block. (SOC_THERM is located outside the CPU 1823 * complex.) 1824 */ 1825 static void throttlectl_cpu_mn(struct tegra_soctherm *ts, 1826 enum soctherm_throttle_id throt) 1827 { 1828 u32 r; 1829 int depth; 1830 u8 dividend; 1831 1832 depth = ts->throt_cfgs[throt].cpu_throt_depth; 1833 dividend = THROT_DEPTH_DIVIDEND(depth); 1834 1835 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1836 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1837 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); 1838 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); 1839 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1840 1841 r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1842 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff); 1843 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf); 1844 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1845 } 1846 1847 /** 1848 * throttlectl_gpu_level_select() - selects throttling level for GPU 1849 * @ts: pointer to a struct tegra_soctherm 1850 * @throt: the LIGHT/HEAVY of throttle event id 1851 * 1852 * This function programs soctherm's interface to GK20a NV_THERM to select 1853 * pre-configured "Low", "Medium" or "Heavy" throttle levels. 1854 * 1855 * Return: boolean true if HW was programmed 1856 */ 1857 static void throttlectl_gpu_level_select(struct tegra_soctherm *ts, 1858 enum soctherm_throttle_id throt) 1859 { 1860 u32 r, level, throt_vect; 1861 1862 level = ts->throt_cfgs[throt].gpu_throt_level; 1863 throt_vect = THROT_LEVEL_TO_DEPTH(level); 1864 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU)); 1865 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1866 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect); 1867 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU)); 1868 } 1869 1870 static int soctherm_oc_cfg_program(struct tegra_soctherm *ts, 1871 enum soctherm_throttle_id throt) 1872 { 1873 u32 r; 1874 struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg; 1875 1876 if (oc->mode == OC_THROTTLE_MODE_DISABLED) 1877 return -EINVAL; 1878 1879 r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1); 1880 r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode); 1881 r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low); 1882 r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1); 1883 writel(r, ts->regs + ALARM_CFG(throt)); 1884 writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt)); 1885 writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt)); 1886 writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt)); 1887 soctherm_oc_intr_enable(ts, throt, oc->intr_en); 1888 1889 return 0; 1890 } 1891 1892 /** 1893 * soctherm_throttle_program() - programs pulse skippers' configuration 1894 * @ts: pointer to a struct tegra_soctherm 1895 * @throt: the LIGHT/HEAVY of the throttle event id. 1896 * 1897 * Pulse skippers are used to throttle clock frequencies. 1898 * This function programs the pulse skippers. 1899 */ 1900 static void soctherm_throttle_program(struct tegra_soctherm *ts, 1901 enum soctherm_throttle_id throt) 1902 { 1903 u32 r; 1904 struct soctherm_throt_cfg stc = ts->throt_cfgs[throt]; 1905 1906 if (!stc.init) 1907 return; 1908 1909 if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt))) 1910 return; 1911 1912 /* Setup PSKIP parameters */ 1913 if (ts->soc->use_ccroc) 1914 throttlectl_cpu_level_select(ts, throt); 1915 else 1916 throttlectl_cpu_mn(ts, throt); 1917 1918 throttlectl_gpu_level_select(ts, throt); 1919 1920 r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority); 1921 writel(r, ts->regs + THROT_PRIORITY_CTRL(throt)); 1922 1923 r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0); 1924 writel(r, ts->regs + THROT_DELAY_CTRL(throt)); 1925 1926 r = readl(ts->regs + THROT_PRIORITY_LOCK); 1927 r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK); 1928 if (r >= stc.priority) 1929 return; 1930 r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK, 1931 stc.priority); 1932 writel(r, ts->regs + THROT_PRIORITY_LOCK); 1933 } 1934 1935 static void tegra_soctherm_throttle(struct device *dev) 1936 { 1937 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1938 u32 v; 1939 int i; 1940 1941 /* configure LOW, MED and HIGH levels for CCROC NV_THERM */ 1942 if (ts->soc->use_ccroc) { 1943 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW); 1944 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED); 1945 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH); 1946 } 1947 1948 /* Thermal HW throttle programming */ 1949 for (i = 0; i < THROTTLE_SIZE; i++) 1950 soctherm_throttle_program(ts, i); 1951 1952 v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1); 1953 if (ts->soc->use_ccroc) { 1954 ccroc_writel(ts, v, CCROC_GLOBAL_CFG); 1955 1956 v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER); 1957 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); 1958 ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER); 1959 } else { 1960 writel(v, ts->regs + THROT_GLOBAL_CFG); 1961 1962 v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER); 1963 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); 1964 writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER); 1965 } 1966 1967 /* initialize stats collection */ 1968 v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN | 1969 STATS_CTL_CLR_UP | STATS_CTL_EN_UP; 1970 writel(v, ts->regs + THERMCTL_STATS_CTL); 1971 } 1972 1973 static int soctherm_interrupts_init(struct platform_device *pdev, 1974 struct tegra_soctherm *tegra) 1975 { 1976 int ret; 1977 1978 ret = soctherm_oc_int_init(dev_fwnode(&pdev->dev), TEGRA_SOC_OC_IRQ_MAX); 1979 if (ret < 0) { 1980 dev_err(&pdev->dev, "soctherm_oc_int_init failed\n"); 1981 return ret; 1982 } 1983 1984 tegra->thermal_irq = platform_get_irq(pdev, 0); 1985 if (tegra->thermal_irq < 0) { 1986 dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n"); 1987 return 0; 1988 } 1989 1990 tegra->edp_irq = platform_get_irq(pdev, 1); 1991 if (tegra->edp_irq < 0) { 1992 dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n"); 1993 return 0; 1994 } 1995 1996 ret = devm_request_threaded_irq(&pdev->dev, 1997 tegra->thermal_irq, 1998 soctherm_thermal_isr, 1999 soctherm_thermal_isr_thread, 2000 IRQF_ONESHOT, 2001 dev_name(&pdev->dev), 2002 tegra); 2003 if (ret < 0) { 2004 dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n"); 2005 return ret; 2006 } 2007 2008 ret = devm_request_threaded_irq(&pdev->dev, 2009 tegra->edp_irq, 2010 soctherm_edp_isr, 2011 soctherm_edp_isr_thread, 2012 IRQF_ONESHOT, 2013 "soctherm_edp", 2014 tegra); 2015 if (ret < 0) { 2016 dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n"); 2017 return ret; 2018 } 2019 2020 return 0; 2021 } 2022 2023 static void soctherm_init(struct platform_device *pdev) 2024 { 2025 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2026 const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs; 2027 int i; 2028 u32 pdiv, hotspot; 2029 2030 /* Initialize raw sensors */ 2031 for (i = 0; i < tegra->soc->num_tsensors; ++i) 2032 enable_tsensor(tegra, i); 2033 2034 /* program pdiv and hotspot offsets per THERM */ 2035 pdiv = readl(tegra->regs + SENSOR_PDIV); 2036 hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF); 2037 for (i = 0; i < tegra->soc->num_ttgs; ++i) { 2038 pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask, 2039 ttgs[i]->pdiv); 2040 /* hotspot offset from PLLX, doesn't need to configure PLLX */ 2041 if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX) 2042 continue; 2043 hotspot = REG_SET_MASK(hotspot, 2044 ttgs[i]->pllx_hotspot_mask, 2045 ttgs[i]->pllx_hotspot_diff); 2046 } 2047 writel(pdiv, tegra->regs + SENSOR_PDIV); 2048 writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF); 2049 2050 /* Configure hw throttle */ 2051 tegra_soctherm_throttle(&pdev->dev); 2052 } 2053 2054 static const struct of_device_id tegra_soctherm_of_match[] = { 2055 #ifdef CONFIG_ARCH_TEGRA_114_SOC 2056 { 2057 .compatible = "nvidia,tegra114-soctherm", 2058 .data = &tegra114_soctherm, 2059 }, 2060 #endif 2061 #ifdef CONFIG_ARCH_TEGRA_124_SOC 2062 { 2063 .compatible = "nvidia,tegra124-soctherm", 2064 .data = &tegra124_soctherm, 2065 }, 2066 #endif 2067 #ifdef CONFIG_ARCH_TEGRA_132_SOC 2068 { 2069 .compatible = "nvidia,tegra132-soctherm", 2070 .data = &tegra132_soctherm, 2071 }, 2072 #endif 2073 #ifdef CONFIG_ARCH_TEGRA_210_SOC 2074 { 2075 .compatible = "nvidia,tegra210-soctherm", 2076 .data = &tegra210_soctherm, 2077 }, 2078 #endif 2079 { }, 2080 }; 2081 MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match); 2082 2083 static int tegra_soctherm_probe(struct platform_device *pdev) 2084 { 2085 const struct of_device_id *match; 2086 struct tegra_soctherm *tegra; 2087 struct thermal_zone_device *z; 2088 struct tsensor_shared_calib shared_calib; 2089 struct tegra_soctherm_soc *soc; 2090 unsigned int i; 2091 int err; 2092 2093 match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node); 2094 if (!match) 2095 return -ENODEV; 2096 2097 soc = (struct tegra_soctherm_soc *)match->data; 2098 if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM) 2099 return -EINVAL; 2100 2101 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); 2102 if (!tegra) 2103 return -ENOMEM; 2104 2105 mutex_init(&tegra->thermctl_lock); 2106 dev_set_drvdata(&pdev->dev, tegra); 2107 2108 tegra->soc = soc; 2109 2110 tegra->regs = devm_platform_ioremap_resource_byname(pdev, "soctherm-reg"); 2111 if (IS_ERR(tegra->regs)) { 2112 dev_err(&pdev->dev, "can't get soctherm registers"); 2113 return PTR_ERR(tegra->regs); 2114 } 2115 2116 if (!tegra->soc->use_ccroc) { 2117 tegra->clk_regs = devm_platform_ioremap_resource_byname(pdev, "car-reg"); 2118 if (IS_ERR(tegra->clk_regs)) { 2119 dev_err(&pdev->dev, "can't get car clk registers"); 2120 return PTR_ERR(tegra->clk_regs); 2121 } 2122 } else { 2123 tegra->ccroc_regs = devm_platform_ioremap_resource_byname(pdev, "ccroc-reg"); 2124 if (IS_ERR(tegra->ccroc_regs)) { 2125 dev_err(&pdev->dev, "can't get ccroc registers"); 2126 return PTR_ERR(tegra->ccroc_regs); 2127 } 2128 } 2129 2130 tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm"); 2131 if (IS_ERR(tegra->reset)) { 2132 dev_err(&pdev->dev, "can't get soctherm reset\n"); 2133 return PTR_ERR(tegra->reset); 2134 } 2135 2136 tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor"); 2137 if (IS_ERR(tegra->clock_tsensor)) { 2138 dev_err(&pdev->dev, "can't get tsensor clock\n"); 2139 return PTR_ERR(tegra->clock_tsensor); 2140 } 2141 2142 tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm"); 2143 if (IS_ERR(tegra->clock_soctherm)) { 2144 dev_err(&pdev->dev, "can't get soctherm clock\n"); 2145 return PTR_ERR(tegra->clock_soctherm); 2146 } 2147 2148 tegra->calib = devm_kcalloc(&pdev->dev, 2149 soc->num_tsensors, sizeof(u32), 2150 GFP_KERNEL); 2151 if (!tegra->calib) 2152 return -ENOMEM; 2153 2154 /* calculate shared calibration data */ 2155 err = tegra_calc_shared_calib(soc->tfuse, &shared_calib); 2156 if (err) 2157 return err; 2158 2159 /* calculate tsensor calibration data */ 2160 for (i = 0; i < soc->num_tsensors; ++i) { 2161 err = tegra_calc_tsensor_calib(&soc->tsensors[i], 2162 &shared_calib, 2163 &tegra->calib[i]); 2164 if (err) 2165 return err; 2166 } 2167 2168 tegra->thermctl_tzs = devm_kcalloc(&pdev->dev, 2169 soc->num_ttgs, sizeof(z), 2170 GFP_KERNEL); 2171 if (!tegra->thermctl_tzs) 2172 return -ENOMEM; 2173 2174 err = soctherm_clk_enable(pdev, true); 2175 if (err) 2176 return err; 2177 2178 soctherm_thermtrips_parse(pdev); 2179 2180 soctherm_init_hw_throt_cdev(pdev); 2181 2182 soctherm_init(pdev); 2183 2184 for (i = 0; i < soc->num_ttgs; ++i) { 2185 struct tegra_thermctl_zone *zone = 2186 devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); 2187 if (!zone) { 2188 err = -ENOMEM; 2189 goto disable_clocks; 2190 } 2191 2192 zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset; 2193 zone->dev = &pdev->dev; 2194 zone->sg = soc->ttgs[i]; 2195 zone->ts = tegra; 2196 2197 z = devm_thermal_of_zone_register(&pdev->dev, 2198 soc->ttgs[i]->id, zone, 2199 &tegra_of_thermal_ops); 2200 if (IS_ERR(z)) { 2201 err = PTR_ERR(z); 2202 dev_err(&pdev->dev, "failed to register sensor: %d\n", 2203 err); 2204 goto disable_clocks; 2205 } 2206 2207 zone->tz = z; 2208 tegra->thermctl_tzs[soc->ttgs[i]->id] = z; 2209 2210 /* Configure hw trip points */ 2211 err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z); 2212 if (err) 2213 goto disable_clocks; 2214 } 2215 2216 err = soctherm_interrupts_init(pdev, tegra); 2217 2218 soctherm_debug_init(pdev); 2219 2220 return 0; 2221 2222 disable_clocks: 2223 soctherm_clk_enable(pdev, false); 2224 2225 return err; 2226 } 2227 2228 static void tegra_soctherm_remove(struct platform_device *pdev) 2229 { 2230 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2231 2232 debugfs_remove_recursive(tegra->debugfs_dir); 2233 2234 soctherm_clk_enable(pdev, false); 2235 } 2236 2237 static int __maybe_unused soctherm_suspend(struct device *dev) 2238 { 2239 struct platform_device *pdev = to_platform_device(dev); 2240 2241 soctherm_clk_enable(pdev, false); 2242 2243 return 0; 2244 } 2245 2246 static int __maybe_unused soctherm_resume(struct device *dev) 2247 { 2248 struct platform_device *pdev = to_platform_device(dev); 2249 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2250 struct tegra_soctherm_soc *soc = tegra->soc; 2251 int err, i; 2252 2253 err = soctherm_clk_enable(pdev, true); 2254 if (err) { 2255 dev_err(&pdev->dev, 2256 "Resume failed: enable clocks failed\n"); 2257 return err; 2258 } 2259 2260 soctherm_init(pdev); 2261 2262 for (i = 0; i < soc->num_ttgs; ++i) { 2263 struct thermal_zone_device *tz; 2264 2265 tz = tegra->thermctl_tzs[soc->ttgs[i]->id]; 2266 err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz); 2267 if (err) { 2268 dev_err(&pdev->dev, 2269 "Resume failed: set hwtrips failed\n"); 2270 return err; 2271 } 2272 } 2273 2274 return 0; 2275 } 2276 2277 static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume); 2278 2279 static struct platform_driver tegra_soctherm_driver = { 2280 .probe = tegra_soctherm_probe, 2281 .remove = tegra_soctherm_remove, 2282 .driver = { 2283 .name = "tegra_soctherm", 2284 .pm = &tegra_soctherm_pm, 2285 .of_match_table = tegra_soctherm_of_match, 2286 }, 2287 }; 2288 module_platform_driver(tegra_soctherm_driver); 2289 2290 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); 2291 MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver"); 2292 MODULE_LICENSE("GPL v2"); 2293