1 /* 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 3 * 4 * Copyright (C) 2014 Samsung Electronics 5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 6 * Lukasz Majewski <l.majewski@samsung.com> 7 * 8 * Copyright (C) 2011 Samsung Electronics 9 * Donggeun Kim <dg77.kim@samsung.com> 10 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 */ 27 28 #include <linux/clk.h> 29 #include <linux/io.h> 30 #include <linux/interrupt.h> 31 #include <linux/module.h> 32 #include <linux/of_device.h> 33 #include <linux/of_address.h> 34 #include <linux/of_irq.h> 35 #include <linux/platform_device.h> 36 #include <linux/regulator/consumer.h> 37 38 #include "exynos_tmu.h" 39 #include "../thermal_core.h" 40 41 /* Exynos generic registers */ 42 #define EXYNOS_TMU_REG_TRIMINFO 0x0 43 #define EXYNOS_TMU_REG_CONTROL 0x20 44 #define EXYNOS_TMU_REG_STATUS 0x28 45 #define EXYNOS_TMU_REG_CURRENT_TEMP 0x40 46 #define EXYNOS_TMU_REG_INTEN 0x70 47 #define EXYNOS_TMU_REG_INTSTAT 0x74 48 #define EXYNOS_TMU_REG_INTCLEAR 0x78 49 50 #define EXYNOS_TMU_TEMP_MASK 0xff 51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24 52 #define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f 53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf 54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8 55 #define EXYNOS_TMU_CORE_EN_SHIFT 0 56 57 /* Exynos3250 specific registers */ 58 #define EXYNOS_TMU_TRIMINFO_CON1 0x10 59 60 /* Exynos4210 specific registers */ 61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44 62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50 63 64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */ 65 #define EXYNOS_TMU_TRIMINFO_CON2 0x14 66 #define EXYNOS_THD_TEMP_RISE 0x50 67 #define EXYNOS_THD_TEMP_FALL 0x54 68 #define EXYNOS_EMUL_CON 0x80 69 70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE 1 71 #define EXYNOS_TRIMINFO_25_SHIFT 0 72 #define EXYNOS_TRIMINFO_85_SHIFT 8 73 #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 74 #define EXYNOS_TMU_TRIP_MODE_MASK 0x7 75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12 76 77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT 0 78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT 4 79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT 8 80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT 12 81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT 16 82 83 #define EXYNOS_EMUL_TIME 0x57F0 84 #define EXYNOS_EMUL_TIME_MASK 0xffff 85 #define EXYNOS_EMUL_TIME_SHIFT 16 86 #define EXYNOS_EMUL_DATA_SHIFT 8 87 #define EXYNOS_EMUL_DATA_MASK 0xFF 88 #define EXYNOS_EMUL_ENABLE 0x1 89 90 /* Exynos5260 specific */ 91 #define EXYNOS5260_TMU_REG_INTEN 0xC0 92 #define EXYNOS5260_TMU_REG_INTSTAT 0xC4 93 #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8 94 #define EXYNOS5260_EMUL_CON 0x100 95 96 /* Exynos4412 specific */ 97 #define EXYNOS4412_MUX_ADDR_VALUE 6 98 #define EXYNOS4412_MUX_ADDR_SHIFT 20 99 100 /* Exynos5433 specific registers */ 101 #define EXYNOS5433_TMU_REG_CONTROL1 0x024 102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c 103 #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030 104 #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034 105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044 106 #define EXYNOS5433_THD_TEMP_RISE3_0 0x050 107 #define EXYNOS5433_THD_TEMP_RISE7_4 0x054 108 #define EXYNOS5433_THD_TEMP_FALL3_0 0x060 109 #define EXYNOS5433_THD_TEMP_FALL7_4 0x064 110 #define EXYNOS5433_TMU_REG_INTEN 0x0c0 111 #define EXYNOS5433_TMU_REG_INTPEND 0x0c8 112 #define EXYNOS5433_TMU_EMUL_CON 0x110 113 #define EXYNOS5433_TMU_PD_DET_EN 0x130 114 115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16 116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23 117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \ 118 (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT) 119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23) 120 121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0 122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1 123 124 #define EXYNOS5433_PD_DET_EN 1 125 126 /*exynos5440 specific registers*/ 127 #define EXYNOS5440_TMU_S0_7_TRIM 0x000 128 #define EXYNOS5440_TMU_S0_7_CTRL 0x020 129 #define EXYNOS5440_TMU_S0_7_DEBUG 0x040 130 #define EXYNOS5440_TMU_S0_7_TEMP 0x0f0 131 #define EXYNOS5440_TMU_S0_7_TH0 0x110 132 #define EXYNOS5440_TMU_S0_7_TH1 0x130 133 #define EXYNOS5440_TMU_S0_7_TH2 0x150 134 #define EXYNOS5440_TMU_S0_7_IRQEN 0x210 135 #define EXYNOS5440_TMU_S0_7_IRQ 0x230 136 /* exynos5440 common registers */ 137 #define EXYNOS5440_TMU_IRQ_STATUS 0x000 138 #define EXYNOS5440_TMU_PMIN 0x004 139 140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0 141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1 142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2 143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3 144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4 145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 146 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 147 148 /* Exynos7 specific registers */ 149 #define EXYNOS7_THD_TEMP_RISE7_6 0x50 150 #define EXYNOS7_THD_TEMP_FALL7_6 0x60 151 #define EXYNOS7_TMU_REG_INTEN 0x110 152 #define EXYNOS7_TMU_REG_INTPEND 0x118 153 #define EXYNOS7_TMU_REG_EMUL_CON 0x160 154 155 #define EXYNOS7_TMU_TEMP_MASK 0x1ff 156 #define EXYNOS7_PD_DET_EN_SHIFT 23 157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0 158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1 159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2 160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3 161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4 162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5 163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6 164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7 165 #define EXYNOS7_EMUL_DATA_SHIFT 7 166 #define EXYNOS7_EMUL_DATA_MASK 0x1ff 167 168 #define EXYNOS_FIRST_POINT_TRIM 25 169 #define EXYNOS_SECOND_POINT_TRIM 85 170 171 #define EXYNOS_NOISE_CANCEL_MODE 4 172 173 #define MCELSIUS 1000 174 /** 175 * struct exynos_tmu_data : A structure to hold the private data of the TMU 176 driver 177 * @id: identifier of the one instance of the TMU controller. 178 * @pdata: pointer to the tmu platform/configuration data 179 * @base: base address of the single instance of the TMU controller. 180 * @base_second: base address of the common registers of the TMU controller. 181 * @irq: irq number of the TMU controller. 182 * @soc: id of the SOC type. 183 * @irq_work: pointer to the irq work structure. 184 * @lock: lock to implement synchronization. 185 * @clk: pointer to the clock structure. 186 * @clk_sec: pointer to the clock structure for accessing the base_second. 187 * @sclk: pointer to the clock structure for accessing the tmu special clk. 188 * @efuse_value: SoC defined fuse value 189 * @min_efuse_value: minimum valid trimming data 190 * @max_efuse_value: maximum valid trimming data 191 * @temp_error1: fused value of the first point trim. 192 * @temp_error2: fused value of the second point trim. 193 * @regulator: pointer to the TMU regulator structure. 194 * @reg_conf: pointer to structure to register with core thermal. 195 * @ntrip: number of supported trip points. 196 * @enabled: current status of TMU device 197 * @tmu_initialize: SoC specific TMU initialization method 198 * @tmu_control: SoC specific TMU control method 199 * @tmu_read: SoC specific TMU temperature read method 200 * @tmu_set_emulation: SoC specific TMU emulation setting method 201 * @tmu_clear_irqs: SoC specific TMU interrupts clearing method 202 */ 203 struct exynos_tmu_data { 204 int id; 205 struct exynos_tmu_platform_data *pdata; 206 void __iomem *base; 207 void __iomem *base_second; 208 int irq; 209 enum soc_type soc; 210 struct work_struct irq_work; 211 struct mutex lock; 212 struct clk *clk, *clk_sec, *sclk; 213 u32 efuse_value; 214 u32 min_efuse_value; 215 u32 max_efuse_value; 216 u16 temp_error1, temp_error2; 217 struct regulator *regulator; 218 struct thermal_zone_device *tzd; 219 unsigned int ntrip; 220 bool enabled; 221 222 int (*tmu_initialize)(struct platform_device *pdev); 223 void (*tmu_control)(struct platform_device *pdev, bool on); 224 int (*tmu_read)(struct exynos_tmu_data *data); 225 void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp); 226 void (*tmu_clear_irqs)(struct exynos_tmu_data *data); 227 }; 228 229 static void exynos_report_trigger(struct exynos_tmu_data *p) 230 { 231 char data[10], *envp[] = { data, NULL }; 232 struct thermal_zone_device *tz = p->tzd; 233 int temp; 234 unsigned int i; 235 236 if (!tz) { 237 pr_err("No thermal zone device defined\n"); 238 return; 239 } 240 241 thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); 242 243 mutex_lock(&tz->lock); 244 /* Find the level for which trip happened */ 245 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 246 tz->ops->get_trip_temp(tz, i, &temp); 247 if (tz->last_temperature < temp) 248 break; 249 } 250 251 snprintf(data, sizeof(data), "%u", i); 252 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp); 253 mutex_unlock(&tz->lock); 254 } 255 256 /* 257 * TMU treats temperature as a mapped temperature code. 258 * The temperature is converted differently depending on the calibration type. 259 */ 260 static int temp_to_code(struct exynos_tmu_data *data, u8 temp) 261 { 262 struct exynos_tmu_platform_data *pdata = data->pdata; 263 264 if (pdata->cal_type == TYPE_ONE_POINT_TRIMMING) 265 return temp + data->temp_error1 - EXYNOS_FIRST_POINT_TRIM; 266 267 return (temp - EXYNOS_FIRST_POINT_TRIM) * 268 (data->temp_error2 - data->temp_error1) / 269 (EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) + 270 data->temp_error1; 271 } 272 273 /* 274 * Calculate a temperature value from a temperature code. 275 * The unit of the temperature is degree Celsius. 276 */ 277 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code) 278 { 279 struct exynos_tmu_platform_data *pdata = data->pdata; 280 281 if (pdata->cal_type == TYPE_ONE_POINT_TRIMMING) 282 return temp_code - data->temp_error1 + EXYNOS_FIRST_POINT_TRIM; 283 284 return (temp_code - data->temp_error1) * 285 (EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) / 286 (data->temp_error2 - data->temp_error1) + 287 EXYNOS_FIRST_POINT_TRIM; 288 } 289 290 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info) 291 { 292 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK; 293 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) & 294 EXYNOS_TMU_TEMP_MASK); 295 296 if (!data->temp_error1 || 297 (data->min_efuse_value > data->temp_error1) || 298 (data->temp_error1 > data->max_efuse_value)) 299 data->temp_error1 = data->efuse_value & EXYNOS_TMU_TEMP_MASK; 300 301 if (!data->temp_error2) 302 data->temp_error2 = 303 (data->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) & 304 EXYNOS_TMU_TEMP_MASK; 305 } 306 307 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling) 308 { 309 struct thermal_zone_device *tz = data->tzd; 310 const struct thermal_trip * const trips = 311 of_thermal_get_trip_points(tz); 312 unsigned long temp; 313 int i; 314 315 if (!trips) { 316 pr_err("%s: Cannot get trip points from of-thermal.c!\n", 317 __func__); 318 return 0; 319 } 320 321 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 322 if (trips[i].type == THERMAL_TRIP_CRITICAL) 323 continue; 324 325 temp = trips[i].temperature / MCELSIUS; 326 if (falling) 327 temp -= (trips[i].hysteresis / MCELSIUS); 328 else 329 threshold &= ~(0xff << 8 * i); 330 331 threshold |= temp_to_code(data, temp) << 8 * i; 332 } 333 334 return threshold; 335 } 336 337 static int exynos_tmu_initialize(struct platform_device *pdev) 338 { 339 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 340 int ret; 341 342 if (of_thermal_get_ntrips(data->tzd) > data->ntrip) { 343 dev_info(&pdev->dev, 344 "More trip points than supported by this TMU.\n"); 345 dev_info(&pdev->dev, 346 "%d trip points should be configured in polling mode.\n", 347 (of_thermal_get_ntrips(data->tzd) - data->ntrip)); 348 } 349 350 mutex_lock(&data->lock); 351 clk_enable(data->clk); 352 if (!IS_ERR(data->clk_sec)) 353 clk_enable(data->clk_sec); 354 ret = data->tmu_initialize(pdev); 355 clk_disable(data->clk); 356 mutex_unlock(&data->lock); 357 if (!IS_ERR(data->clk_sec)) 358 clk_disable(data->clk_sec); 359 360 return ret; 361 } 362 363 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con) 364 { 365 struct exynos_tmu_platform_data *pdata = data->pdata; 366 367 if (data->soc == SOC_ARCH_EXYNOS4412 || 368 data->soc == SOC_ARCH_EXYNOS3250) 369 con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT); 370 371 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT); 372 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT; 373 374 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 375 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 376 377 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT); 378 con |= (EXYNOS_NOISE_CANCEL_MODE << EXYNOS_TMU_TRIP_MODE_SHIFT); 379 380 return con; 381 } 382 383 static void exynos_tmu_control(struct platform_device *pdev, bool on) 384 { 385 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 386 387 mutex_lock(&data->lock); 388 clk_enable(data->clk); 389 data->tmu_control(pdev, on); 390 data->enabled = on; 391 clk_disable(data->clk); 392 mutex_unlock(&data->lock); 393 } 394 395 static int exynos4210_tmu_initialize(struct platform_device *pdev) 396 { 397 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 398 struct thermal_zone_device *tz = data->tzd; 399 const struct thermal_trip * const trips = 400 of_thermal_get_trip_points(tz); 401 int ret = 0, threshold_code, i; 402 unsigned long reference, temp; 403 unsigned int status; 404 405 if (!trips) { 406 pr_err("%s: Cannot get trip points from of-thermal.c!\n", 407 __func__); 408 ret = -ENODEV; 409 goto out; 410 } 411 412 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 413 if (!status) { 414 ret = -EBUSY; 415 goto out; 416 } 417 418 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); 419 420 /* Write temperature code for threshold */ 421 reference = trips[0].temperature / MCELSIUS; 422 threshold_code = temp_to_code(data, reference); 423 if (threshold_code < 0) { 424 ret = threshold_code; 425 goto out; 426 } 427 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); 428 429 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 430 temp = trips[i].temperature / MCELSIUS; 431 writeb(temp - reference, data->base + 432 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); 433 } 434 435 data->tmu_clear_irqs(data); 436 out: 437 return ret; 438 } 439 440 static int exynos4412_tmu_initialize(struct platform_device *pdev) 441 { 442 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 443 const struct thermal_trip * const trips = 444 of_thermal_get_trip_points(data->tzd); 445 unsigned int status, trim_info, con, ctrl, rising_threshold; 446 int ret = 0, threshold_code, i; 447 unsigned long crit_temp = 0; 448 449 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 450 if (!status) { 451 ret = -EBUSY; 452 goto out; 453 } 454 455 if (data->soc == SOC_ARCH_EXYNOS3250 || 456 data->soc == SOC_ARCH_EXYNOS4412 || 457 data->soc == SOC_ARCH_EXYNOS5250) { 458 if (data->soc == SOC_ARCH_EXYNOS3250) { 459 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1); 460 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 461 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1); 462 } 463 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2); 464 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 465 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2); 466 } 467 468 /* On exynos5420 the triminfo register is in the shared space */ 469 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) 470 trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO); 471 else 472 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 473 474 sanitize_temp_error(data, trim_info); 475 476 /* Write temperature code for rising and falling threshold */ 477 rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE); 478 rising_threshold = get_th_reg(data, rising_threshold, false); 479 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 480 writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL); 481 482 data->tmu_clear_irqs(data); 483 484 /* if last threshold limit is also present */ 485 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) { 486 if (trips[i].type == THERMAL_TRIP_CRITICAL) { 487 crit_temp = trips[i].temperature; 488 break; 489 } 490 } 491 492 if (i == of_thermal_get_ntrips(data->tzd)) { 493 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n", 494 __func__); 495 ret = -EINVAL; 496 goto out; 497 } 498 499 threshold_code = temp_to_code(data, crit_temp / MCELSIUS); 500 /* 1-4 level to be assigned in th0 reg */ 501 rising_threshold &= ~(0xff << 8 * i); 502 rising_threshold |= threshold_code << 8 * i; 503 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 504 con = readl(data->base + EXYNOS_TMU_REG_CONTROL); 505 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 506 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 507 508 out: 509 return ret; 510 } 511 512 static int exynos5433_tmu_initialize(struct platform_device *pdev) 513 { 514 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 515 struct exynos_tmu_platform_data *pdata = data->pdata; 516 struct thermal_zone_device *tz = data->tzd; 517 unsigned int status, trim_info; 518 unsigned int rising_threshold = 0, falling_threshold = 0; 519 int temp, temp_hist; 520 int ret = 0, threshold_code, i, sensor_id, cal_type; 521 522 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 523 if (!status) { 524 ret = -EBUSY; 525 goto out; 526 } 527 528 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 529 sanitize_temp_error(data, trim_info); 530 531 /* Read the temperature sensor id */ 532 sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK) 533 >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT; 534 dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id); 535 536 /* Read the calibration mode */ 537 writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO); 538 cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK) 539 >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT; 540 541 switch (cal_type) { 542 case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING: 543 pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 544 break; 545 case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING: 546 pdata->cal_type = TYPE_TWO_POINT_TRIMMING; 547 break; 548 default: 549 pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 550 break; 551 } 552 553 dev_info(&pdev->dev, "Calibration type is %d-point calibration\n", 554 cal_type ? 2 : 1); 555 556 /* Write temperature code for rising and falling threshold */ 557 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 558 int rising_reg_offset, falling_reg_offset; 559 int j = 0; 560 561 switch (i) { 562 case 0: 563 case 1: 564 case 2: 565 case 3: 566 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0; 567 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0; 568 j = i; 569 break; 570 case 4: 571 case 5: 572 case 6: 573 case 7: 574 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4; 575 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4; 576 j = i - 4; 577 break; 578 default: 579 continue; 580 } 581 582 /* Write temperature code for rising threshold */ 583 tz->ops->get_trip_temp(tz, i, &temp); 584 temp /= MCELSIUS; 585 threshold_code = temp_to_code(data, temp); 586 587 rising_threshold = readl(data->base + rising_reg_offset); 588 rising_threshold |= (threshold_code << j * 8); 589 writel(rising_threshold, data->base + rising_reg_offset); 590 591 /* Write temperature code for falling threshold */ 592 tz->ops->get_trip_hyst(tz, i, &temp_hist); 593 temp_hist = temp - (temp_hist / MCELSIUS); 594 threshold_code = temp_to_code(data, temp_hist); 595 596 falling_threshold = readl(data->base + falling_reg_offset); 597 falling_threshold &= ~(0xff << j * 8); 598 falling_threshold |= (threshold_code << j * 8); 599 writel(falling_threshold, data->base + falling_reg_offset); 600 } 601 602 data->tmu_clear_irqs(data); 603 out: 604 return ret; 605 } 606 607 static int exynos5440_tmu_initialize(struct platform_device *pdev) 608 { 609 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 610 unsigned int trim_info = 0, con, rising_threshold; 611 int threshold_code; 612 int crit_temp = 0; 613 614 /* 615 * For exynos5440 soc triminfo value is swapped between TMU0 and 616 * TMU2, so the below logic is needed. 617 */ 618 switch (data->id) { 619 case 0: 620 trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET + 621 EXYNOS5440_TMU_S0_7_TRIM); 622 break; 623 case 1: 624 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM); 625 break; 626 case 2: 627 trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET + 628 EXYNOS5440_TMU_S0_7_TRIM); 629 } 630 sanitize_temp_error(data, trim_info); 631 632 /* Write temperature code for rising and falling threshold */ 633 rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0); 634 rising_threshold = get_th_reg(data, rising_threshold, false); 635 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0); 636 writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1); 637 638 data->tmu_clear_irqs(data); 639 640 /* if last threshold limit is also present */ 641 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) { 642 threshold_code = temp_to_code(data, crit_temp / MCELSIUS); 643 /* 5th level to be assigned in th2 reg */ 644 rising_threshold = 645 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; 646 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2); 647 con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL); 648 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 649 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 650 } 651 /* Clear the PMIN in the common TMU register */ 652 if (!data->id) 653 writel(0, data->base_second + EXYNOS5440_TMU_PMIN); 654 655 return 0; 656 } 657 658 static int exynos7_tmu_initialize(struct platform_device *pdev) 659 { 660 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 661 struct thermal_zone_device *tz = data->tzd; 662 unsigned int status, trim_info; 663 unsigned int rising_threshold = 0, falling_threshold = 0; 664 int ret = 0, threshold_code, i; 665 int temp, temp_hist; 666 unsigned int reg_off, bit_off; 667 668 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 669 if (!status) { 670 ret = -EBUSY; 671 goto out; 672 } 673 674 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 675 676 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK; 677 if (!data->temp_error1 || 678 (data->min_efuse_value > data->temp_error1) || 679 (data->temp_error1 > data->max_efuse_value)) 680 data->temp_error1 = data->efuse_value & EXYNOS_TMU_TEMP_MASK; 681 682 /* Write temperature code for rising and falling threshold */ 683 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) { 684 /* 685 * On exynos7 there are 4 rising and 4 falling threshold 686 * registers (0x50-0x5c and 0x60-0x6c respectively). Each 687 * register holds the value of two threshold levels (at bit 688 * offsets 0 and 16). Based on the fact that there are atmost 689 * eight possible trigger levels, calculate the register and 690 * bit offsets where the threshold levels are to be written. 691 * 692 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50) 693 * [24:16] - Threshold level 7 694 * [8:0] - Threshold level 6 695 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54) 696 * [24:16] - Threshold level 5 697 * [8:0] - Threshold level 4 698 * 699 * and similarly for falling thresholds. 700 * 701 * Based on the above, calculate the register and bit offsets 702 * for rising/falling threshold levels and populate them. 703 */ 704 reg_off = ((7 - i) / 2) * 4; 705 bit_off = ((8 - i) % 2); 706 707 tz->ops->get_trip_temp(tz, i, &temp); 708 temp /= MCELSIUS; 709 710 tz->ops->get_trip_hyst(tz, i, &temp_hist); 711 temp_hist = temp - (temp_hist / MCELSIUS); 712 713 /* Set 9-bit temperature code for rising threshold levels */ 714 threshold_code = temp_to_code(data, temp); 715 rising_threshold = readl(data->base + 716 EXYNOS7_THD_TEMP_RISE7_6 + reg_off); 717 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); 718 rising_threshold |= threshold_code << (16 * bit_off); 719 writel(rising_threshold, 720 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); 721 722 /* Set 9-bit temperature code for falling threshold levels */ 723 threshold_code = temp_to_code(data, temp_hist); 724 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); 725 falling_threshold |= threshold_code << (16 * bit_off); 726 writel(falling_threshold, 727 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off); 728 } 729 730 data->tmu_clear_irqs(data); 731 out: 732 return ret; 733 } 734 735 static void exynos4210_tmu_control(struct platform_device *pdev, bool on) 736 { 737 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 738 struct thermal_zone_device *tz = data->tzd; 739 unsigned int con, interrupt_en; 740 741 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 742 743 if (on) { 744 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 745 interrupt_en = 746 (of_thermal_is_trip_valid(tz, 3) 747 << EXYNOS_TMU_INTEN_RISE3_SHIFT) | 748 (of_thermal_is_trip_valid(tz, 2) 749 << EXYNOS_TMU_INTEN_RISE2_SHIFT) | 750 (of_thermal_is_trip_valid(tz, 1) 751 << EXYNOS_TMU_INTEN_RISE1_SHIFT) | 752 (of_thermal_is_trip_valid(tz, 0) 753 << EXYNOS_TMU_INTEN_RISE0_SHIFT); 754 755 if (data->soc != SOC_ARCH_EXYNOS4210) 756 interrupt_en |= 757 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 758 } else { 759 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 760 interrupt_en = 0; /* Disable all interrupts */ 761 } 762 writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN); 763 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 764 } 765 766 static void exynos5433_tmu_control(struct platform_device *pdev, bool on) 767 { 768 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 769 struct thermal_zone_device *tz = data->tzd; 770 unsigned int con, interrupt_en, pd_det_en; 771 772 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 773 774 if (on) { 775 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 776 interrupt_en = 777 (of_thermal_is_trip_valid(tz, 7) 778 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 779 (of_thermal_is_trip_valid(tz, 6) 780 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | 781 (of_thermal_is_trip_valid(tz, 5) 782 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | 783 (of_thermal_is_trip_valid(tz, 4) 784 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | 785 (of_thermal_is_trip_valid(tz, 3) 786 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | 787 (of_thermal_is_trip_valid(tz, 2) 788 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | 789 (of_thermal_is_trip_valid(tz, 1) 790 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | 791 (of_thermal_is_trip_valid(tz, 0) 792 << EXYNOS7_TMU_INTEN_RISE0_SHIFT); 793 794 interrupt_en |= 795 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 796 } else { 797 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 798 interrupt_en = 0; /* Disable all interrupts */ 799 } 800 801 pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0; 802 803 writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN); 804 writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN); 805 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 806 } 807 808 static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 809 { 810 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 811 struct thermal_zone_device *tz = data->tzd; 812 unsigned int con, interrupt_en; 813 814 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL)); 815 816 if (on) { 817 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 818 interrupt_en = 819 (of_thermal_is_trip_valid(tz, 3) 820 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) | 821 (of_thermal_is_trip_valid(tz, 2) 822 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) | 823 (of_thermal_is_trip_valid(tz, 1) 824 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) | 825 (of_thermal_is_trip_valid(tz, 0) 826 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT); 827 interrupt_en |= 828 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; 829 } else { 830 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 831 interrupt_en = 0; /* Disable all interrupts */ 832 } 833 writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN); 834 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 835 } 836 837 static void exynos7_tmu_control(struct platform_device *pdev, bool on) 838 { 839 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 840 struct thermal_zone_device *tz = data->tzd; 841 unsigned int con, interrupt_en; 842 843 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 844 845 if (on) { 846 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 847 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); 848 interrupt_en = 849 (of_thermal_is_trip_valid(tz, 7) 850 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 851 (of_thermal_is_trip_valid(tz, 6) 852 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | 853 (of_thermal_is_trip_valid(tz, 5) 854 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | 855 (of_thermal_is_trip_valid(tz, 4) 856 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | 857 (of_thermal_is_trip_valid(tz, 3) 858 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | 859 (of_thermal_is_trip_valid(tz, 2) 860 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | 861 (of_thermal_is_trip_valid(tz, 1) 862 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | 863 (of_thermal_is_trip_valid(tz, 0) 864 << EXYNOS7_TMU_INTEN_RISE0_SHIFT); 865 866 interrupt_en |= 867 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 868 } else { 869 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 870 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); 871 interrupt_en = 0; /* Disable all interrupts */ 872 } 873 874 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); 875 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 876 } 877 878 static int exynos_get_temp(void *p, int *temp) 879 { 880 struct exynos_tmu_data *data = p; 881 int value, ret = 0; 882 883 if (!data || !data->tmu_read || !data->enabled) 884 return -EINVAL; 885 886 mutex_lock(&data->lock); 887 clk_enable(data->clk); 888 889 value = data->tmu_read(data); 890 if (value < 0) 891 ret = value; 892 else 893 *temp = code_to_temp(data, value) * MCELSIUS; 894 895 clk_disable(data->clk); 896 mutex_unlock(&data->lock); 897 898 return ret; 899 } 900 901 #ifdef CONFIG_THERMAL_EMULATION 902 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val, 903 int temp) 904 { 905 if (temp) { 906 temp /= MCELSIUS; 907 908 if (data->soc != SOC_ARCH_EXYNOS5440) { 909 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); 910 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); 911 } 912 if (data->soc == SOC_ARCH_EXYNOS7) { 913 val &= ~(EXYNOS7_EMUL_DATA_MASK << 914 EXYNOS7_EMUL_DATA_SHIFT); 915 val |= (temp_to_code(data, temp) << 916 EXYNOS7_EMUL_DATA_SHIFT) | 917 EXYNOS_EMUL_ENABLE; 918 } else { 919 val &= ~(EXYNOS_EMUL_DATA_MASK << 920 EXYNOS_EMUL_DATA_SHIFT); 921 val |= (temp_to_code(data, temp) << 922 EXYNOS_EMUL_DATA_SHIFT) | 923 EXYNOS_EMUL_ENABLE; 924 } 925 } else { 926 val &= ~EXYNOS_EMUL_ENABLE; 927 } 928 929 return val; 930 } 931 932 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, 933 int temp) 934 { 935 unsigned int val; 936 u32 emul_con; 937 938 if (data->soc == SOC_ARCH_EXYNOS5260) 939 emul_con = EXYNOS5260_EMUL_CON; 940 else if (data->soc == SOC_ARCH_EXYNOS5433) 941 emul_con = EXYNOS5433_TMU_EMUL_CON; 942 else if (data->soc == SOC_ARCH_EXYNOS7) 943 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 944 else 945 emul_con = EXYNOS_EMUL_CON; 946 947 val = readl(data->base + emul_con); 948 val = get_emul_con_reg(data, val, temp); 949 writel(val, data->base + emul_con); 950 } 951 952 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data, 953 int temp) 954 { 955 unsigned int val; 956 957 val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG); 958 val = get_emul_con_reg(data, val, temp); 959 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG); 960 } 961 962 static int exynos_tmu_set_emulation(void *drv_data, int temp) 963 { 964 struct exynos_tmu_data *data = drv_data; 965 int ret = -EINVAL; 966 967 if (data->soc == SOC_ARCH_EXYNOS4210) 968 goto out; 969 970 if (temp && temp < MCELSIUS) 971 goto out; 972 973 mutex_lock(&data->lock); 974 clk_enable(data->clk); 975 data->tmu_set_emulation(data, temp); 976 clk_disable(data->clk); 977 mutex_unlock(&data->lock); 978 return 0; 979 out: 980 return ret; 981 } 982 #else 983 #define exynos4412_tmu_set_emulation NULL 984 #define exynos5440_tmu_set_emulation NULL 985 static int exynos_tmu_set_emulation(void *drv_data, int temp) 986 { return -EINVAL; } 987 #endif /* CONFIG_THERMAL_EMULATION */ 988 989 static int exynos4210_tmu_read(struct exynos_tmu_data *data) 990 { 991 int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP); 992 993 /* "temp_code" should range between 75 and 175 */ 994 return (ret < 75 || ret > 175) ? -ENODATA : ret; 995 } 996 997 static int exynos4412_tmu_read(struct exynos_tmu_data *data) 998 { 999 return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP); 1000 } 1001 1002 static int exynos5440_tmu_read(struct exynos_tmu_data *data) 1003 { 1004 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP); 1005 } 1006 1007 static int exynos7_tmu_read(struct exynos_tmu_data *data) 1008 { 1009 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) & 1010 EXYNOS7_TMU_TEMP_MASK; 1011 } 1012 1013 static void exynos_tmu_work(struct work_struct *work) 1014 { 1015 struct exynos_tmu_data *data = container_of(work, 1016 struct exynos_tmu_data, irq_work); 1017 unsigned int val_type; 1018 1019 if (!IS_ERR(data->clk_sec)) 1020 clk_enable(data->clk_sec); 1021 /* Find which sensor generated this interrupt */ 1022 if (data->soc == SOC_ARCH_EXYNOS5440) { 1023 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS); 1024 if (!((val_type >> data->id) & 0x1)) 1025 goto out; 1026 } 1027 if (!IS_ERR(data->clk_sec)) 1028 clk_disable(data->clk_sec); 1029 1030 exynos_report_trigger(data); 1031 mutex_lock(&data->lock); 1032 clk_enable(data->clk); 1033 1034 /* TODO: take action based on particular interrupt */ 1035 data->tmu_clear_irqs(data); 1036 1037 clk_disable(data->clk); 1038 mutex_unlock(&data->lock); 1039 out: 1040 enable_irq(data->irq); 1041 } 1042 1043 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) 1044 { 1045 unsigned int val_irq; 1046 u32 tmu_intstat, tmu_intclear; 1047 1048 if (data->soc == SOC_ARCH_EXYNOS5260) { 1049 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT; 1050 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR; 1051 } else if (data->soc == SOC_ARCH_EXYNOS7) { 1052 tmu_intstat = EXYNOS7_TMU_REG_INTPEND; 1053 tmu_intclear = EXYNOS7_TMU_REG_INTPEND; 1054 } else if (data->soc == SOC_ARCH_EXYNOS5433) { 1055 tmu_intstat = EXYNOS5433_TMU_REG_INTPEND; 1056 tmu_intclear = EXYNOS5433_TMU_REG_INTPEND; 1057 } else { 1058 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 1059 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; 1060 } 1061 1062 val_irq = readl(data->base + tmu_intstat); 1063 /* 1064 * Clear the interrupts. Please note that the documentation for 1065 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly 1066 * states that INTCLEAR register has a different placing of bits 1067 * responsible for FALL IRQs than INTSTAT register. Exynos5420 1068 * and Exynos5440 documentation is correct (Exynos4210 doesn't 1069 * support FALL IRQs at all). 1070 */ 1071 writel(val_irq, data->base + tmu_intclear); 1072 } 1073 1074 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data) 1075 { 1076 unsigned int val_irq; 1077 1078 val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ); 1079 /* clear the interrupts */ 1080 writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ); 1081 } 1082 1083 static irqreturn_t exynos_tmu_irq(int irq, void *id) 1084 { 1085 struct exynos_tmu_data *data = id; 1086 1087 disable_irq_nosync(irq); 1088 schedule_work(&data->irq_work); 1089 1090 return IRQ_HANDLED; 1091 } 1092 1093 static const struct of_device_id exynos_tmu_match[] = { 1094 { 1095 .compatible = "samsung,exynos3250-tmu", 1096 .data = (const void *)SOC_ARCH_EXYNOS3250, 1097 }, { 1098 .compatible = "samsung,exynos4210-tmu", 1099 .data = (const void *)SOC_ARCH_EXYNOS4210, 1100 }, { 1101 .compatible = "samsung,exynos4412-tmu", 1102 .data = (const void *)SOC_ARCH_EXYNOS4412, 1103 }, { 1104 .compatible = "samsung,exynos5250-tmu", 1105 .data = (const void *)SOC_ARCH_EXYNOS5250, 1106 }, { 1107 .compatible = "samsung,exynos5260-tmu", 1108 .data = (const void *)SOC_ARCH_EXYNOS5260, 1109 }, { 1110 .compatible = "samsung,exynos5420-tmu", 1111 .data = (const void *)SOC_ARCH_EXYNOS5420, 1112 }, { 1113 .compatible = "samsung,exynos5420-tmu-ext-triminfo", 1114 .data = (const void *)SOC_ARCH_EXYNOS5420_TRIMINFO, 1115 }, { 1116 .compatible = "samsung,exynos5433-tmu", 1117 .data = (const void *)SOC_ARCH_EXYNOS5433, 1118 }, { 1119 .compatible = "samsung,exynos5440-tmu", 1120 .data = (const void *)SOC_ARCH_EXYNOS5440, 1121 }, { 1122 .compatible = "samsung,exynos7-tmu", 1123 .data = (const void *)SOC_ARCH_EXYNOS7, 1124 }, 1125 { }, 1126 }; 1127 MODULE_DEVICE_TABLE(of, exynos_tmu_match); 1128 1129 static int exynos_of_sensor_conf(struct device_node *np, 1130 struct exynos_tmu_platform_data *pdata) 1131 { 1132 u32 value; 1133 int ret; 1134 1135 of_node_get(np); 1136 1137 ret = of_property_read_u32(np, "samsung,tmu_gain", &value); 1138 pdata->gain = (u8)value; 1139 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value); 1140 pdata->reference_voltage = (u8)value; 1141 1142 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type); 1143 1144 of_node_put(np); 1145 return 0; 1146 } 1147 1148 static int exynos_map_dt_data(struct platform_device *pdev) 1149 { 1150 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1151 struct exynos_tmu_platform_data *pdata; 1152 struct resource res; 1153 1154 if (!data || !pdev->dev.of_node) 1155 return -ENODEV; 1156 1157 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl"); 1158 if (data->id < 0) 1159 data->id = 0; 1160 1161 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1162 if (data->irq <= 0) { 1163 dev_err(&pdev->dev, "failed to get IRQ\n"); 1164 return -ENODEV; 1165 } 1166 1167 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) { 1168 dev_err(&pdev->dev, "failed to get Resource 0\n"); 1169 return -ENODEV; 1170 } 1171 1172 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); 1173 if (!data->base) { 1174 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 1175 return -EADDRNOTAVAIL; 1176 } 1177 1178 pdata = devm_kzalloc(&pdev->dev, 1179 sizeof(struct exynos_tmu_platform_data), 1180 GFP_KERNEL); 1181 if (!pdata) 1182 return -ENOMEM; 1183 1184 exynos_of_sensor_conf(pdev->dev.of_node, pdata); 1185 data->pdata = pdata; 1186 data->soc = (enum soc_type)of_device_get_match_data(&pdev->dev); 1187 1188 switch (data->soc) { 1189 case SOC_ARCH_EXYNOS4210: 1190 data->tmu_initialize = exynos4210_tmu_initialize; 1191 data->tmu_control = exynos4210_tmu_control; 1192 data->tmu_read = exynos4210_tmu_read; 1193 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1194 data->ntrip = 4; 1195 data->efuse_value = 55; 1196 data->min_efuse_value = 40; 1197 data->max_efuse_value = 100; 1198 break; 1199 case SOC_ARCH_EXYNOS3250: 1200 case SOC_ARCH_EXYNOS4412: 1201 case SOC_ARCH_EXYNOS5250: 1202 case SOC_ARCH_EXYNOS5260: 1203 case SOC_ARCH_EXYNOS5420: 1204 case SOC_ARCH_EXYNOS5420_TRIMINFO: 1205 data->tmu_initialize = exynos4412_tmu_initialize; 1206 data->tmu_control = exynos4210_tmu_control; 1207 data->tmu_read = exynos4412_tmu_read; 1208 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1209 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1210 data->ntrip = 4; 1211 data->efuse_value = 55; 1212 if (data->soc != SOC_ARCH_EXYNOS5420 && 1213 data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO) 1214 data->min_efuse_value = 40; 1215 else 1216 data->min_efuse_value = 0; 1217 data->max_efuse_value = 100; 1218 break; 1219 case SOC_ARCH_EXYNOS5433: 1220 data->tmu_initialize = exynos5433_tmu_initialize; 1221 data->tmu_control = exynos5433_tmu_control; 1222 data->tmu_read = exynos4412_tmu_read; 1223 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1224 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1225 data->ntrip = 8; 1226 data->efuse_value = 75; 1227 data->min_efuse_value = 40; 1228 data->max_efuse_value = 150; 1229 break; 1230 case SOC_ARCH_EXYNOS5440: 1231 data->tmu_initialize = exynos5440_tmu_initialize; 1232 data->tmu_control = exynos5440_tmu_control; 1233 data->tmu_read = exynos5440_tmu_read; 1234 data->tmu_set_emulation = exynos5440_tmu_set_emulation; 1235 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs; 1236 data->ntrip = 4; 1237 data->efuse_value = 0x5d2d; 1238 data->min_efuse_value = 16; 1239 data->max_efuse_value = 76; 1240 break; 1241 case SOC_ARCH_EXYNOS7: 1242 data->tmu_initialize = exynos7_tmu_initialize; 1243 data->tmu_control = exynos7_tmu_control; 1244 data->tmu_read = exynos7_tmu_read; 1245 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1246 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1247 data->ntrip = 8; 1248 data->efuse_value = 75; 1249 data->min_efuse_value = 15; 1250 data->max_efuse_value = 100; 1251 break; 1252 default: 1253 dev_err(&pdev->dev, "Platform not supported\n"); 1254 return -EINVAL; 1255 } 1256 1257 /* 1258 * Check if the TMU shares some registers and then try to map the 1259 * memory of common registers. 1260 */ 1261 if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO && 1262 data->soc != SOC_ARCH_EXYNOS5440) 1263 return 0; 1264 1265 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) { 1266 dev_err(&pdev->dev, "failed to get Resource 1\n"); 1267 return -ENODEV; 1268 } 1269 1270 data->base_second = devm_ioremap(&pdev->dev, res.start, 1271 resource_size(&res)); 1272 if (!data->base_second) { 1273 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 1274 return -ENOMEM; 1275 } 1276 1277 return 0; 1278 } 1279 1280 static const struct thermal_zone_of_device_ops exynos_sensor_ops = { 1281 .get_temp = exynos_get_temp, 1282 .set_emul_temp = exynos_tmu_set_emulation, 1283 }; 1284 1285 static int exynos_tmu_probe(struct platform_device *pdev) 1286 { 1287 struct exynos_tmu_data *data; 1288 int ret; 1289 1290 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), 1291 GFP_KERNEL); 1292 if (!data) 1293 return -ENOMEM; 1294 1295 platform_set_drvdata(pdev, data); 1296 mutex_init(&data->lock); 1297 1298 /* 1299 * Try enabling the regulator if found 1300 * TODO: Add regulator as an SOC feature, so that regulator enable 1301 * is a compulsory call. 1302 */ 1303 data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu"); 1304 if (!IS_ERR(data->regulator)) { 1305 ret = regulator_enable(data->regulator); 1306 if (ret) { 1307 dev_err(&pdev->dev, "failed to enable vtmu\n"); 1308 return ret; 1309 } 1310 } else { 1311 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 1312 return -EPROBE_DEFER; 1313 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n"); 1314 } 1315 1316 ret = exynos_map_dt_data(pdev); 1317 if (ret) 1318 goto err_sensor; 1319 1320 INIT_WORK(&data->irq_work, exynos_tmu_work); 1321 1322 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1323 if (IS_ERR(data->clk)) { 1324 dev_err(&pdev->dev, "Failed to get clock\n"); 1325 ret = PTR_ERR(data->clk); 1326 goto err_sensor; 1327 } 1328 1329 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); 1330 if (IS_ERR(data->clk_sec)) { 1331 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { 1332 dev_err(&pdev->dev, "Failed to get triminfo clock\n"); 1333 ret = PTR_ERR(data->clk_sec); 1334 goto err_sensor; 1335 } 1336 } else { 1337 ret = clk_prepare(data->clk_sec); 1338 if (ret) { 1339 dev_err(&pdev->dev, "Failed to get clock\n"); 1340 goto err_sensor; 1341 } 1342 } 1343 1344 ret = clk_prepare(data->clk); 1345 if (ret) { 1346 dev_err(&pdev->dev, "Failed to get clock\n"); 1347 goto err_clk_sec; 1348 } 1349 1350 switch (data->soc) { 1351 case SOC_ARCH_EXYNOS5433: 1352 case SOC_ARCH_EXYNOS7: 1353 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); 1354 if (IS_ERR(data->sclk)) { 1355 dev_err(&pdev->dev, "Failed to get sclk\n"); 1356 goto err_clk; 1357 } else { 1358 ret = clk_prepare_enable(data->sclk); 1359 if (ret) { 1360 dev_err(&pdev->dev, "Failed to enable sclk\n"); 1361 goto err_clk; 1362 } 1363 } 1364 break; 1365 default: 1366 break; 1367 } 1368 1369 /* 1370 * data->tzd must be registered before calling exynos_tmu_initialize(), 1371 * requesting irq and calling exynos_tmu_control(). 1372 */ 1373 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data, 1374 &exynos_sensor_ops); 1375 if (IS_ERR(data->tzd)) { 1376 ret = PTR_ERR(data->tzd); 1377 dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret); 1378 goto err_sclk; 1379 } 1380 1381 ret = exynos_tmu_initialize(pdev); 1382 if (ret) { 1383 dev_err(&pdev->dev, "Failed to initialize TMU\n"); 1384 goto err_thermal; 1385 } 1386 1387 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, 1388 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); 1389 if (ret) { 1390 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); 1391 goto err_thermal; 1392 } 1393 1394 exynos_tmu_control(pdev, true); 1395 return 0; 1396 1397 err_thermal: 1398 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1399 err_sclk: 1400 clk_disable_unprepare(data->sclk); 1401 err_clk: 1402 clk_unprepare(data->clk); 1403 err_clk_sec: 1404 if (!IS_ERR(data->clk_sec)) 1405 clk_unprepare(data->clk_sec); 1406 err_sensor: 1407 if (!IS_ERR(data->regulator)) 1408 regulator_disable(data->regulator); 1409 1410 return ret; 1411 } 1412 1413 static int exynos_tmu_remove(struct platform_device *pdev) 1414 { 1415 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1416 struct thermal_zone_device *tzd = data->tzd; 1417 1418 thermal_zone_of_sensor_unregister(&pdev->dev, tzd); 1419 exynos_tmu_control(pdev, false); 1420 1421 clk_disable_unprepare(data->sclk); 1422 clk_unprepare(data->clk); 1423 if (!IS_ERR(data->clk_sec)) 1424 clk_unprepare(data->clk_sec); 1425 1426 if (!IS_ERR(data->regulator)) 1427 regulator_disable(data->regulator); 1428 1429 return 0; 1430 } 1431 1432 #ifdef CONFIG_PM_SLEEP 1433 static int exynos_tmu_suspend(struct device *dev) 1434 { 1435 exynos_tmu_control(to_platform_device(dev), false); 1436 1437 return 0; 1438 } 1439 1440 static int exynos_tmu_resume(struct device *dev) 1441 { 1442 struct platform_device *pdev = to_platform_device(dev); 1443 1444 exynos_tmu_initialize(pdev); 1445 exynos_tmu_control(pdev, true); 1446 1447 return 0; 1448 } 1449 1450 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm, 1451 exynos_tmu_suspend, exynos_tmu_resume); 1452 #define EXYNOS_TMU_PM (&exynos_tmu_pm) 1453 #else 1454 #define EXYNOS_TMU_PM NULL 1455 #endif 1456 1457 static struct platform_driver exynos_tmu_driver = { 1458 .driver = { 1459 .name = "exynos-tmu", 1460 .pm = EXYNOS_TMU_PM, 1461 .of_match_table = exynos_tmu_match, 1462 }, 1463 .probe = exynos_tmu_probe, 1464 .remove = exynos_tmu_remove, 1465 }; 1466 1467 module_platform_driver(exynos_tmu_driver); 1468 1469 MODULE_DESCRIPTION("EXYNOS TMU Driver"); 1470 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 1471 MODULE_LICENSE("GPL"); 1472 MODULE_ALIAS("platform:exynos-tmu"); 1473