1 /* 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 3 * 4 * Copyright (C) 2014 Samsung Electronics 5 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 6 * Lukasz Majewski <l.majewski@samsung.com> 7 * 8 * Copyright (C) 2011 Samsung Electronics 9 * Donggeun Kim <dg77.kim@samsung.com> 10 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 */ 27 28 #include <linux/clk.h> 29 #include <linux/io.h> 30 #include <linux/interrupt.h> 31 #include <linux/module.h> 32 #include <linux/of_device.h> 33 #include <linux/of_address.h> 34 #include <linux/of_irq.h> 35 #include <linux/platform_device.h> 36 #include <linux/regulator/consumer.h> 37 38 #include "exynos_tmu.h" 39 #include "../thermal_core.h" 40 41 /* Exynos generic registers */ 42 #define EXYNOS_TMU_REG_TRIMINFO 0x0 43 #define EXYNOS_TMU_REG_CONTROL 0x20 44 #define EXYNOS_TMU_REG_STATUS 0x28 45 #define EXYNOS_TMU_REG_CURRENT_TEMP 0x40 46 #define EXYNOS_TMU_REG_INTEN 0x70 47 #define EXYNOS_TMU_REG_INTSTAT 0x74 48 #define EXYNOS_TMU_REG_INTCLEAR 0x78 49 50 #define EXYNOS_TMU_TEMP_MASK 0xff 51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24 52 #define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f 53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf 54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8 55 #define EXYNOS_TMU_CORE_EN_SHIFT 0 56 57 /* Exynos3250 specific registers */ 58 #define EXYNOS_TMU_TRIMINFO_CON1 0x10 59 60 /* Exynos4210 specific registers */ 61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44 62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50 63 64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */ 65 #define EXYNOS_TMU_TRIMINFO_CON2 0x14 66 #define EXYNOS_THD_TEMP_RISE 0x50 67 #define EXYNOS_THD_TEMP_FALL 0x54 68 #define EXYNOS_EMUL_CON 0x80 69 70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE 1 71 #define EXYNOS_TRIMINFO_25_SHIFT 0 72 #define EXYNOS_TRIMINFO_85_SHIFT 8 73 #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 74 #define EXYNOS_TMU_TRIP_MODE_MASK 0x7 75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12 76 77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT 0 78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT 4 79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT 8 80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT 12 81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT 16 82 83 #define EXYNOS_EMUL_TIME 0x57F0 84 #define EXYNOS_EMUL_TIME_MASK 0xffff 85 #define EXYNOS_EMUL_TIME_SHIFT 16 86 #define EXYNOS_EMUL_DATA_SHIFT 8 87 #define EXYNOS_EMUL_DATA_MASK 0xFF 88 #define EXYNOS_EMUL_ENABLE 0x1 89 90 /* Exynos5260 specific */ 91 #define EXYNOS5260_TMU_REG_INTEN 0xC0 92 #define EXYNOS5260_TMU_REG_INTSTAT 0xC4 93 #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8 94 #define EXYNOS5260_EMUL_CON 0x100 95 96 /* Exynos4412 specific */ 97 #define EXYNOS4412_MUX_ADDR_VALUE 6 98 #define EXYNOS4412_MUX_ADDR_SHIFT 20 99 100 /* Exynos5433 specific registers */ 101 #define EXYNOS5433_TMU_REG_CONTROL1 0x024 102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c 103 #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030 104 #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034 105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044 106 #define EXYNOS5433_THD_TEMP_RISE3_0 0x050 107 #define EXYNOS5433_THD_TEMP_RISE7_4 0x054 108 #define EXYNOS5433_THD_TEMP_FALL3_0 0x060 109 #define EXYNOS5433_THD_TEMP_FALL7_4 0x064 110 #define EXYNOS5433_TMU_REG_INTEN 0x0c0 111 #define EXYNOS5433_TMU_REG_INTPEND 0x0c8 112 #define EXYNOS5433_TMU_EMUL_CON 0x110 113 #define EXYNOS5433_TMU_PD_DET_EN 0x130 114 115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16 116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23 117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \ 118 (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT) 119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23) 120 121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0 122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1 123 124 #define EXYNOS5433_PD_DET_EN 1 125 126 /*exynos5440 specific registers*/ 127 #define EXYNOS5440_TMU_S0_7_TRIM 0x000 128 #define EXYNOS5440_TMU_S0_7_CTRL 0x020 129 #define EXYNOS5440_TMU_S0_7_DEBUG 0x040 130 #define EXYNOS5440_TMU_S0_7_TEMP 0x0f0 131 #define EXYNOS5440_TMU_S0_7_TH0 0x110 132 #define EXYNOS5440_TMU_S0_7_TH1 0x130 133 #define EXYNOS5440_TMU_S0_7_TH2 0x150 134 #define EXYNOS5440_TMU_S0_7_IRQEN 0x210 135 #define EXYNOS5440_TMU_S0_7_IRQ 0x230 136 /* exynos5440 common registers */ 137 #define EXYNOS5440_TMU_IRQ_STATUS 0x000 138 #define EXYNOS5440_TMU_PMIN 0x004 139 140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0 141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1 142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2 143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3 144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4 145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 146 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 147 148 /* Exynos7 specific registers */ 149 #define EXYNOS7_THD_TEMP_RISE7_6 0x50 150 #define EXYNOS7_THD_TEMP_FALL7_6 0x60 151 #define EXYNOS7_TMU_REG_INTEN 0x110 152 #define EXYNOS7_TMU_REG_INTPEND 0x118 153 #define EXYNOS7_TMU_REG_EMUL_CON 0x160 154 155 #define EXYNOS7_TMU_TEMP_MASK 0x1ff 156 #define EXYNOS7_PD_DET_EN_SHIFT 23 157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0 158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1 159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2 160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3 161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4 162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5 163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6 164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7 165 #define EXYNOS7_EMUL_DATA_SHIFT 7 166 #define EXYNOS7_EMUL_DATA_MASK 0x1ff 167 168 #define EXYNOS_FIRST_POINT_TRIM 25 169 #define EXYNOS_SECOND_POINT_TRIM 85 170 171 #define EXYNOS_NOISE_CANCEL_MODE 4 172 173 #define MCELSIUS 1000 174 /** 175 * struct exynos_tmu_data : A structure to hold the private data of the TMU 176 driver 177 * @id: identifier of the one instance of the TMU controller. 178 * @pdata: pointer to the tmu platform/configuration data 179 * @base: base address of the single instance of the TMU controller. 180 * @base_second: base address of the common registers of the TMU controller. 181 * @irq: irq number of the TMU controller. 182 * @soc: id of the SOC type. 183 * @irq_work: pointer to the irq work structure. 184 * @lock: lock to implement synchronization. 185 * @clk: pointer to the clock structure. 186 * @clk_sec: pointer to the clock structure for accessing the base_second. 187 * @sclk: pointer to the clock structure for accessing the tmu special clk. 188 * @temp_error1: fused value of the first point trim. 189 * @temp_error2: fused value of the second point trim. 190 * @regulator: pointer to the TMU regulator structure. 191 * @reg_conf: pointer to structure to register with core thermal. 192 * @ntrip: number of supported trip points. 193 * @enabled: current status of TMU device 194 * @tmu_initialize: SoC specific TMU initialization method 195 * @tmu_control: SoC specific TMU control method 196 * @tmu_read: SoC specific TMU temperature read method 197 * @tmu_set_emulation: SoC specific TMU emulation setting method 198 * @tmu_clear_irqs: SoC specific TMU interrupts clearing method 199 */ 200 struct exynos_tmu_data { 201 int id; 202 struct exynos_tmu_platform_data *pdata; 203 void __iomem *base; 204 void __iomem *base_second; 205 int irq; 206 enum soc_type soc; 207 struct work_struct irq_work; 208 struct mutex lock; 209 struct clk *clk, *clk_sec, *sclk; 210 u16 temp_error1, temp_error2; 211 struct regulator *regulator; 212 struct thermal_zone_device *tzd; 213 unsigned int ntrip; 214 bool enabled; 215 216 int (*tmu_initialize)(struct platform_device *pdev); 217 void (*tmu_control)(struct platform_device *pdev, bool on); 218 int (*tmu_read)(struct exynos_tmu_data *data); 219 void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp); 220 void (*tmu_clear_irqs)(struct exynos_tmu_data *data); 221 }; 222 223 static void exynos_report_trigger(struct exynos_tmu_data *p) 224 { 225 char data[10], *envp[] = { data, NULL }; 226 struct thermal_zone_device *tz = p->tzd; 227 int temp; 228 unsigned int i; 229 230 if (!tz) { 231 pr_err("No thermal zone device defined\n"); 232 return; 233 } 234 235 thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); 236 237 mutex_lock(&tz->lock); 238 /* Find the level for which trip happened */ 239 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 240 tz->ops->get_trip_temp(tz, i, &temp); 241 if (tz->last_temperature < temp) 242 break; 243 } 244 245 snprintf(data, sizeof(data), "%u", i); 246 kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp); 247 mutex_unlock(&tz->lock); 248 } 249 250 /* 251 * TMU treats temperature as a mapped temperature code. 252 * The temperature is converted differently depending on the calibration type. 253 */ 254 static int temp_to_code(struct exynos_tmu_data *data, u8 temp) 255 { 256 struct exynos_tmu_platform_data *pdata = data->pdata; 257 258 if (pdata->cal_type == TYPE_ONE_POINT_TRIMMING) 259 return temp + data->temp_error1 - EXYNOS_FIRST_POINT_TRIM; 260 261 return (temp - EXYNOS_FIRST_POINT_TRIM) * 262 (data->temp_error2 - data->temp_error1) / 263 (EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) + 264 data->temp_error1; 265 } 266 267 /* 268 * Calculate a temperature value from a temperature code. 269 * The unit of the temperature is degree Celsius. 270 */ 271 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code) 272 { 273 struct exynos_tmu_platform_data *pdata = data->pdata; 274 275 if (pdata->cal_type == TYPE_ONE_POINT_TRIMMING) 276 return temp_code - data->temp_error1 + EXYNOS_FIRST_POINT_TRIM; 277 278 return (temp_code - data->temp_error1) * 279 (EXYNOS_SECOND_POINT_TRIM - EXYNOS_FIRST_POINT_TRIM) / 280 (data->temp_error2 - data->temp_error1) + 281 EXYNOS_FIRST_POINT_TRIM; 282 } 283 284 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info) 285 { 286 struct exynos_tmu_platform_data *pdata = data->pdata; 287 288 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK; 289 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) & 290 EXYNOS_TMU_TEMP_MASK); 291 292 if (!data->temp_error1 || 293 (pdata->min_efuse_value > data->temp_error1) || 294 (data->temp_error1 > pdata->max_efuse_value)) 295 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK; 296 297 if (!data->temp_error2) 298 data->temp_error2 = 299 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) & 300 EXYNOS_TMU_TEMP_MASK; 301 } 302 303 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling) 304 { 305 struct thermal_zone_device *tz = data->tzd; 306 const struct thermal_trip * const trips = 307 of_thermal_get_trip_points(tz); 308 unsigned long temp; 309 int i; 310 311 if (!trips) { 312 pr_err("%s: Cannot get trip points from of-thermal.c!\n", 313 __func__); 314 return 0; 315 } 316 317 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 318 if (trips[i].type == THERMAL_TRIP_CRITICAL) 319 continue; 320 321 temp = trips[i].temperature / MCELSIUS; 322 if (falling) 323 temp -= (trips[i].hysteresis / MCELSIUS); 324 else 325 threshold &= ~(0xff << 8 * i); 326 327 threshold |= temp_to_code(data, temp) << 8 * i; 328 } 329 330 return threshold; 331 } 332 333 static int exynos_tmu_initialize(struct platform_device *pdev) 334 { 335 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 336 int ret; 337 338 if (of_thermal_get_ntrips(data->tzd) > data->ntrip) { 339 dev_info(&pdev->dev, 340 "More trip points than supported by this TMU.\n"); 341 dev_info(&pdev->dev, 342 "%d trip points should be configured in polling mode.\n", 343 (of_thermal_get_ntrips(data->tzd) - data->ntrip)); 344 } 345 346 mutex_lock(&data->lock); 347 clk_enable(data->clk); 348 if (!IS_ERR(data->clk_sec)) 349 clk_enable(data->clk_sec); 350 ret = data->tmu_initialize(pdev); 351 clk_disable(data->clk); 352 mutex_unlock(&data->lock); 353 if (!IS_ERR(data->clk_sec)) 354 clk_disable(data->clk_sec); 355 356 return ret; 357 } 358 359 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con) 360 { 361 struct exynos_tmu_platform_data *pdata = data->pdata; 362 363 if (data->soc == SOC_ARCH_EXYNOS4412 || 364 data->soc == SOC_ARCH_EXYNOS3250) 365 con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT); 366 367 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT); 368 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT; 369 370 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 371 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 372 373 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT); 374 con |= (EXYNOS_NOISE_CANCEL_MODE << EXYNOS_TMU_TRIP_MODE_SHIFT); 375 376 return con; 377 } 378 379 static void exynos_tmu_control(struct platform_device *pdev, bool on) 380 { 381 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 382 383 mutex_lock(&data->lock); 384 clk_enable(data->clk); 385 data->tmu_control(pdev, on); 386 data->enabled = on; 387 clk_disable(data->clk); 388 mutex_unlock(&data->lock); 389 } 390 391 static int exynos4210_tmu_initialize(struct platform_device *pdev) 392 { 393 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 394 struct thermal_zone_device *tz = data->tzd; 395 const struct thermal_trip * const trips = 396 of_thermal_get_trip_points(tz); 397 int ret = 0, threshold_code, i; 398 unsigned long reference, temp; 399 unsigned int status; 400 401 if (!trips) { 402 pr_err("%s: Cannot get trip points from of-thermal.c!\n", 403 __func__); 404 ret = -ENODEV; 405 goto out; 406 } 407 408 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 409 if (!status) { 410 ret = -EBUSY; 411 goto out; 412 } 413 414 sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); 415 416 /* Write temperature code for threshold */ 417 reference = trips[0].temperature / MCELSIUS; 418 threshold_code = temp_to_code(data, reference); 419 if (threshold_code < 0) { 420 ret = threshold_code; 421 goto out; 422 } 423 writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); 424 425 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 426 temp = trips[i].temperature / MCELSIUS; 427 writeb(temp - reference, data->base + 428 EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); 429 } 430 431 data->tmu_clear_irqs(data); 432 out: 433 return ret; 434 } 435 436 static int exynos4412_tmu_initialize(struct platform_device *pdev) 437 { 438 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 439 const struct thermal_trip * const trips = 440 of_thermal_get_trip_points(data->tzd); 441 unsigned int status, trim_info, con, ctrl, rising_threshold; 442 int ret = 0, threshold_code, i; 443 unsigned long crit_temp = 0; 444 445 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 446 if (!status) { 447 ret = -EBUSY; 448 goto out; 449 } 450 451 if (data->soc == SOC_ARCH_EXYNOS3250 || 452 data->soc == SOC_ARCH_EXYNOS4412 || 453 data->soc == SOC_ARCH_EXYNOS5250) { 454 if (data->soc == SOC_ARCH_EXYNOS3250) { 455 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1); 456 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 457 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1); 458 } 459 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2); 460 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 461 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2); 462 } 463 464 /* On exynos5420 the triminfo register is in the shared space */ 465 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) 466 trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO); 467 else 468 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 469 470 sanitize_temp_error(data, trim_info); 471 472 /* Write temperature code for rising and falling threshold */ 473 rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE); 474 rising_threshold = get_th_reg(data, rising_threshold, false); 475 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 476 writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL); 477 478 data->tmu_clear_irqs(data); 479 480 /* if last threshold limit is also present */ 481 for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) { 482 if (trips[i].type == THERMAL_TRIP_CRITICAL) { 483 crit_temp = trips[i].temperature; 484 break; 485 } 486 } 487 488 if (i == of_thermal_get_ntrips(data->tzd)) { 489 pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n", 490 __func__); 491 ret = -EINVAL; 492 goto out; 493 } 494 495 threshold_code = temp_to_code(data, crit_temp / MCELSIUS); 496 /* 1-4 level to be assigned in th0 reg */ 497 rising_threshold &= ~(0xff << 8 * i); 498 rising_threshold |= threshold_code << 8 * i; 499 writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); 500 con = readl(data->base + EXYNOS_TMU_REG_CONTROL); 501 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 502 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 503 504 out: 505 return ret; 506 } 507 508 static int exynos5433_tmu_initialize(struct platform_device *pdev) 509 { 510 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 511 struct exynos_tmu_platform_data *pdata = data->pdata; 512 struct thermal_zone_device *tz = data->tzd; 513 unsigned int status, trim_info; 514 unsigned int rising_threshold = 0, falling_threshold = 0; 515 int temp, temp_hist; 516 int ret = 0, threshold_code, i, sensor_id, cal_type; 517 518 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 519 if (!status) { 520 ret = -EBUSY; 521 goto out; 522 } 523 524 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 525 sanitize_temp_error(data, trim_info); 526 527 /* Read the temperature sensor id */ 528 sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK) 529 >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT; 530 dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id); 531 532 /* Read the calibration mode */ 533 writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO); 534 cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK) 535 >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT; 536 537 switch (cal_type) { 538 case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING: 539 pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 540 break; 541 case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING: 542 pdata->cal_type = TYPE_TWO_POINT_TRIMMING; 543 break; 544 default: 545 pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 546 break; 547 } 548 549 dev_info(&pdev->dev, "Calibration type is %d-point calibration\n", 550 cal_type ? 2 : 1); 551 552 /* Write temperature code for rising and falling threshold */ 553 for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 554 int rising_reg_offset, falling_reg_offset; 555 int j = 0; 556 557 switch (i) { 558 case 0: 559 case 1: 560 case 2: 561 case 3: 562 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0; 563 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0; 564 j = i; 565 break; 566 case 4: 567 case 5: 568 case 6: 569 case 7: 570 rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4; 571 falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4; 572 j = i - 4; 573 break; 574 default: 575 continue; 576 } 577 578 /* Write temperature code for rising threshold */ 579 tz->ops->get_trip_temp(tz, i, &temp); 580 temp /= MCELSIUS; 581 threshold_code = temp_to_code(data, temp); 582 583 rising_threshold = readl(data->base + rising_reg_offset); 584 rising_threshold |= (threshold_code << j * 8); 585 writel(rising_threshold, data->base + rising_reg_offset); 586 587 /* Write temperature code for falling threshold */ 588 tz->ops->get_trip_hyst(tz, i, &temp_hist); 589 temp_hist = temp - (temp_hist / MCELSIUS); 590 threshold_code = temp_to_code(data, temp_hist); 591 592 falling_threshold = readl(data->base + falling_reg_offset); 593 falling_threshold &= ~(0xff << j * 8); 594 falling_threshold |= (threshold_code << j * 8); 595 writel(falling_threshold, data->base + falling_reg_offset); 596 } 597 598 data->tmu_clear_irqs(data); 599 out: 600 return ret; 601 } 602 603 static int exynos5440_tmu_initialize(struct platform_device *pdev) 604 { 605 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 606 unsigned int trim_info = 0, con, rising_threshold; 607 int threshold_code; 608 int crit_temp = 0; 609 610 /* 611 * For exynos5440 soc triminfo value is swapped between TMU0 and 612 * TMU2, so the below logic is needed. 613 */ 614 switch (data->id) { 615 case 0: 616 trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET + 617 EXYNOS5440_TMU_S0_7_TRIM); 618 break; 619 case 1: 620 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM); 621 break; 622 case 2: 623 trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET + 624 EXYNOS5440_TMU_S0_7_TRIM); 625 } 626 sanitize_temp_error(data, trim_info); 627 628 /* Write temperature code for rising and falling threshold */ 629 rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0); 630 rising_threshold = get_th_reg(data, rising_threshold, false); 631 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0); 632 writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1); 633 634 data->tmu_clear_irqs(data); 635 636 /* if last threshold limit is also present */ 637 if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) { 638 threshold_code = temp_to_code(data, crit_temp / MCELSIUS); 639 /* 5th level to be assigned in th2 reg */ 640 rising_threshold = 641 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; 642 writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2); 643 con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL); 644 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 645 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 646 } 647 /* Clear the PMIN in the common TMU register */ 648 if (!data->id) 649 writel(0, data->base_second + EXYNOS5440_TMU_PMIN); 650 651 return 0; 652 } 653 654 static int exynos7_tmu_initialize(struct platform_device *pdev) 655 { 656 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 657 struct thermal_zone_device *tz = data->tzd; 658 struct exynos_tmu_platform_data *pdata = data->pdata; 659 unsigned int status, trim_info; 660 unsigned int rising_threshold = 0, falling_threshold = 0; 661 int ret = 0, threshold_code, i; 662 int temp, temp_hist; 663 unsigned int reg_off, bit_off; 664 665 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 666 if (!status) { 667 ret = -EBUSY; 668 goto out; 669 } 670 671 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 672 673 data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK; 674 if (!data->temp_error1 || 675 (pdata->min_efuse_value > data->temp_error1) || 676 (data->temp_error1 > pdata->max_efuse_value)) 677 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK; 678 679 /* Write temperature code for rising and falling threshold */ 680 for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) { 681 /* 682 * On exynos7 there are 4 rising and 4 falling threshold 683 * registers (0x50-0x5c and 0x60-0x6c respectively). Each 684 * register holds the value of two threshold levels (at bit 685 * offsets 0 and 16). Based on the fact that there are atmost 686 * eight possible trigger levels, calculate the register and 687 * bit offsets where the threshold levels are to be written. 688 * 689 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50) 690 * [24:16] - Threshold level 7 691 * [8:0] - Threshold level 6 692 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54) 693 * [24:16] - Threshold level 5 694 * [8:0] - Threshold level 4 695 * 696 * and similarly for falling thresholds. 697 * 698 * Based on the above, calculate the register and bit offsets 699 * for rising/falling threshold levels and populate them. 700 */ 701 reg_off = ((7 - i) / 2) * 4; 702 bit_off = ((8 - i) % 2); 703 704 tz->ops->get_trip_temp(tz, i, &temp); 705 temp /= MCELSIUS; 706 707 tz->ops->get_trip_hyst(tz, i, &temp_hist); 708 temp_hist = temp - (temp_hist / MCELSIUS); 709 710 /* Set 9-bit temperature code for rising threshold levels */ 711 threshold_code = temp_to_code(data, temp); 712 rising_threshold = readl(data->base + 713 EXYNOS7_THD_TEMP_RISE7_6 + reg_off); 714 rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); 715 rising_threshold |= threshold_code << (16 * bit_off); 716 writel(rising_threshold, 717 data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); 718 719 /* Set 9-bit temperature code for falling threshold levels */ 720 threshold_code = temp_to_code(data, temp_hist); 721 falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); 722 falling_threshold |= threshold_code << (16 * bit_off); 723 writel(falling_threshold, 724 data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off); 725 } 726 727 data->tmu_clear_irqs(data); 728 out: 729 return ret; 730 } 731 732 static void exynos4210_tmu_control(struct platform_device *pdev, bool on) 733 { 734 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 735 struct thermal_zone_device *tz = data->tzd; 736 unsigned int con, interrupt_en; 737 738 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 739 740 if (on) { 741 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 742 interrupt_en = 743 (of_thermal_is_trip_valid(tz, 3) 744 << EXYNOS_TMU_INTEN_RISE3_SHIFT) | 745 (of_thermal_is_trip_valid(tz, 2) 746 << EXYNOS_TMU_INTEN_RISE2_SHIFT) | 747 (of_thermal_is_trip_valid(tz, 1) 748 << EXYNOS_TMU_INTEN_RISE1_SHIFT) | 749 (of_thermal_is_trip_valid(tz, 0) 750 << EXYNOS_TMU_INTEN_RISE0_SHIFT); 751 752 if (data->soc != SOC_ARCH_EXYNOS4210) 753 interrupt_en |= 754 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 755 } else { 756 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 757 interrupt_en = 0; /* Disable all interrupts */ 758 } 759 writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN); 760 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 761 } 762 763 static void exynos5433_tmu_control(struct platform_device *pdev, bool on) 764 { 765 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 766 struct thermal_zone_device *tz = data->tzd; 767 unsigned int con, interrupt_en, pd_det_en; 768 769 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 770 771 if (on) { 772 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 773 interrupt_en = 774 (of_thermal_is_trip_valid(tz, 7) 775 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 776 (of_thermal_is_trip_valid(tz, 6) 777 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | 778 (of_thermal_is_trip_valid(tz, 5) 779 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | 780 (of_thermal_is_trip_valid(tz, 4) 781 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | 782 (of_thermal_is_trip_valid(tz, 3) 783 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | 784 (of_thermal_is_trip_valid(tz, 2) 785 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | 786 (of_thermal_is_trip_valid(tz, 1) 787 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | 788 (of_thermal_is_trip_valid(tz, 0) 789 << EXYNOS7_TMU_INTEN_RISE0_SHIFT); 790 791 interrupt_en |= 792 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 793 } else { 794 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 795 interrupt_en = 0; /* Disable all interrupts */ 796 } 797 798 pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0; 799 800 writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN); 801 writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN); 802 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 803 } 804 805 static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 806 { 807 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 808 struct thermal_zone_device *tz = data->tzd; 809 unsigned int con, interrupt_en; 810 811 con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL)); 812 813 if (on) { 814 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 815 interrupt_en = 816 (of_thermal_is_trip_valid(tz, 3) 817 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) | 818 (of_thermal_is_trip_valid(tz, 2) 819 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) | 820 (of_thermal_is_trip_valid(tz, 1) 821 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) | 822 (of_thermal_is_trip_valid(tz, 0) 823 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT); 824 interrupt_en |= 825 interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; 826 } else { 827 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 828 interrupt_en = 0; /* Disable all interrupts */ 829 } 830 writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN); 831 writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); 832 } 833 834 static void exynos7_tmu_control(struct platform_device *pdev, bool on) 835 { 836 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 837 struct thermal_zone_device *tz = data->tzd; 838 unsigned int con, interrupt_en; 839 840 con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 841 842 if (on) { 843 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 844 con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); 845 interrupt_en = 846 (of_thermal_is_trip_valid(tz, 7) 847 << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 848 (of_thermal_is_trip_valid(tz, 6) 849 << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | 850 (of_thermal_is_trip_valid(tz, 5) 851 << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | 852 (of_thermal_is_trip_valid(tz, 4) 853 << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | 854 (of_thermal_is_trip_valid(tz, 3) 855 << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | 856 (of_thermal_is_trip_valid(tz, 2) 857 << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | 858 (of_thermal_is_trip_valid(tz, 1) 859 << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | 860 (of_thermal_is_trip_valid(tz, 0) 861 << EXYNOS7_TMU_INTEN_RISE0_SHIFT); 862 863 interrupt_en |= 864 interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 865 } else { 866 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 867 con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); 868 interrupt_en = 0; /* Disable all interrupts */ 869 } 870 871 writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); 872 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 873 } 874 875 static int exynos_get_temp(void *p, int *temp) 876 { 877 struct exynos_tmu_data *data = p; 878 int value, ret = 0; 879 880 if (!data || !data->tmu_read || !data->enabled) 881 return -EINVAL; 882 883 mutex_lock(&data->lock); 884 clk_enable(data->clk); 885 886 value = data->tmu_read(data); 887 if (value < 0) 888 ret = value; 889 else 890 *temp = code_to_temp(data, value) * MCELSIUS; 891 892 clk_disable(data->clk); 893 mutex_unlock(&data->lock); 894 895 return ret; 896 } 897 898 #ifdef CONFIG_THERMAL_EMULATION 899 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val, 900 int temp) 901 { 902 if (temp) { 903 temp /= MCELSIUS; 904 905 if (data->soc != SOC_ARCH_EXYNOS5440) { 906 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); 907 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); 908 } 909 if (data->soc == SOC_ARCH_EXYNOS7) { 910 val &= ~(EXYNOS7_EMUL_DATA_MASK << 911 EXYNOS7_EMUL_DATA_SHIFT); 912 val |= (temp_to_code(data, temp) << 913 EXYNOS7_EMUL_DATA_SHIFT) | 914 EXYNOS_EMUL_ENABLE; 915 } else { 916 val &= ~(EXYNOS_EMUL_DATA_MASK << 917 EXYNOS_EMUL_DATA_SHIFT); 918 val |= (temp_to_code(data, temp) << 919 EXYNOS_EMUL_DATA_SHIFT) | 920 EXYNOS_EMUL_ENABLE; 921 } 922 } else { 923 val &= ~EXYNOS_EMUL_ENABLE; 924 } 925 926 return val; 927 } 928 929 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, 930 int temp) 931 { 932 unsigned int val; 933 u32 emul_con; 934 935 if (data->soc == SOC_ARCH_EXYNOS5260) 936 emul_con = EXYNOS5260_EMUL_CON; 937 else if (data->soc == SOC_ARCH_EXYNOS5433) 938 emul_con = EXYNOS5433_TMU_EMUL_CON; 939 else if (data->soc == SOC_ARCH_EXYNOS7) 940 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 941 else 942 emul_con = EXYNOS_EMUL_CON; 943 944 val = readl(data->base + emul_con); 945 val = get_emul_con_reg(data, val, temp); 946 writel(val, data->base + emul_con); 947 } 948 949 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data, 950 int temp) 951 { 952 unsigned int val; 953 954 val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG); 955 val = get_emul_con_reg(data, val, temp); 956 writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG); 957 } 958 959 static int exynos_tmu_set_emulation(void *drv_data, int temp) 960 { 961 struct exynos_tmu_data *data = drv_data; 962 int ret = -EINVAL; 963 964 if (data->soc == SOC_ARCH_EXYNOS4210) 965 goto out; 966 967 if (temp && temp < MCELSIUS) 968 goto out; 969 970 mutex_lock(&data->lock); 971 clk_enable(data->clk); 972 data->tmu_set_emulation(data, temp); 973 clk_disable(data->clk); 974 mutex_unlock(&data->lock); 975 return 0; 976 out: 977 return ret; 978 } 979 #else 980 #define exynos4412_tmu_set_emulation NULL 981 #define exynos5440_tmu_set_emulation NULL 982 static int exynos_tmu_set_emulation(void *drv_data, int temp) 983 { return -EINVAL; } 984 #endif /* CONFIG_THERMAL_EMULATION */ 985 986 static int exynos4210_tmu_read(struct exynos_tmu_data *data) 987 { 988 int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP); 989 990 /* "temp_code" should range between 75 and 175 */ 991 return (ret < 75 || ret > 175) ? -ENODATA : ret; 992 } 993 994 static int exynos4412_tmu_read(struct exynos_tmu_data *data) 995 { 996 return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP); 997 } 998 999 static int exynos5440_tmu_read(struct exynos_tmu_data *data) 1000 { 1001 return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP); 1002 } 1003 1004 static int exynos7_tmu_read(struct exynos_tmu_data *data) 1005 { 1006 return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) & 1007 EXYNOS7_TMU_TEMP_MASK; 1008 } 1009 1010 static void exynos_tmu_work(struct work_struct *work) 1011 { 1012 struct exynos_tmu_data *data = container_of(work, 1013 struct exynos_tmu_data, irq_work); 1014 unsigned int val_type; 1015 1016 if (!IS_ERR(data->clk_sec)) 1017 clk_enable(data->clk_sec); 1018 /* Find which sensor generated this interrupt */ 1019 if (data->soc == SOC_ARCH_EXYNOS5440) { 1020 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS); 1021 if (!((val_type >> data->id) & 0x1)) 1022 goto out; 1023 } 1024 if (!IS_ERR(data->clk_sec)) 1025 clk_disable(data->clk_sec); 1026 1027 exynos_report_trigger(data); 1028 mutex_lock(&data->lock); 1029 clk_enable(data->clk); 1030 1031 /* TODO: take action based on particular interrupt */ 1032 data->tmu_clear_irqs(data); 1033 1034 clk_disable(data->clk); 1035 mutex_unlock(&data->lock); 1036 out: 1037 enable_irq(data->irq); 1038 } 1039 1040 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) 1041 { 1042 unsigned int val_irq; 1043 u32 tmu_intstat, tmu_intclear; 1044 1045 if (data->soc == SOC_ARCH_EXYNOS5260) { 1046 tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT; 1047 tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR; 1048 } else if (data->soc == SOC_ARCH_EXYNOS7) { 1049 tmu_intstat = EXYNOS7_TMU_REG_INTPEND; 1050 tmu_intclear = EXYNOS7_TMU_REG_INTPEND; 1051 } else if (data->soc == SOC_ARCH_EXYNOS5433) { 1052 tmu_intstat = EXYNOS5433_TMU_REG_INTPEND; 1053 tmu_intclear = EXYNOS5433_TMU_REG_INTPEND; 1054 } else { 1055 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 1056 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; 1057 } 1058 1059 val_irq = readl(data->base + tmu_intstat); 1060 /* 1061 * Clear the interrupts. Please note that the documentation for 1062 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly 1063 * states that INTCLEAR register has a different placing of bits 1064 * responsible for FALL IRQs than INTSTAT register. Exynos5420 1065 * and Exynos5440 documentation is correct (Exynos4210 doesn't 1066 * support FALL IRQs at all). 1067 */ 1068 writel(val_irq, data->base + tmu_intclear); 1069 } 1070 1071 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data) 1072 { 1073 unsigned int val_irq; 1074 1075 val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ); 1076 /* clear the interrupts */ 1077 writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ); 1078 } 1079 1080 static irqreturn_t exynos_tmu_irq(int irq, void *id) 1081 { 1082 struct exynos_tmu_data *data = id; 1083 1084 disable_irq_nosync(irq); 1085 schedule_work(&data->irq_work); 1086 1087 return IRQ_HANDLED; 1088 } 1089 1090 static const struct of_device_id exynos_tmu_match[] = { 1091 { 1092 .compatible = "samsung,exynos3250-tmu", 1093 .data = (const void *)SOC_ARCH_EXYNOS3250, 1094 }, { 1095 .compatible = "samsung,exynos4210-tmu", 1096 .data = (const void *)SOC_ARCH_EXYNOS4210, 1097 }, { 1098 .compatible = "samsung,exynos4412-tmu", 1099 .data = (const void *)SOC_ARCH_EXYNOS4412, 1100 }, { 1101 .compatible = "samsung,exynos5250-tmu", 1102 .data = (const void *)SOC_ARCH_EXYNOS5250, 1103 }, { 1104 .compatible = "samsung,exynos5260-tmu", 1105 .data = (const void *)SOC_ARCH_EXYNOS5260, 1106 }, { 1107 .compatible = "samsung,exynos5420-tmu", 1108 .data = (const void *)SOC_ARCH_EXYNOS5420, 1109 }, { 1110 .compatible = "samsung,exynos5420-tmu-ext-triminfo", 1111 .data = (const void *)SOC_ARCH_EXYNOS5420_TRIMINFO, 1112 }, { 1113 .compatible = "samsung,exynos5433-tmu", 1114 .data = (const void *)SOC_ARCH_EXYNOS5433, 1115 }, { 1116 .compatible = "samsung,exynos5440-tmu", 1117 .data = (const void *)SOC_ARCH_EXYNOS5440, 1118 }, { 1119 .compatible = "samsung,exynos7-tmu", 1120 .data = (const void *)SOC_ARCH_EXYNOS7, 1121 }, 1122 { }, 1123 }; 1124 MODULE_DEVICE_TABLE(of, exynos_tmu_match); 1125 1126 static int exynos_of_sensor_conf(struct device_node *np, 1127 struct exynos_tmu_platform_data *pdata) 1128 { 1129 u32 value; 1130 int ret; 1131 1132 of_node_get(np); 1133 1134 ret = of_property_read_u32(np, "samsung,tmu_gain", &value); 1135 pdata->gain = (u8)value; 1136 of_property_read_u32(np, "samsung,tmu_reference_voltage", &value); 1137 pdata->reference_voltage = (u8)value; 1138 1139 of_property_read_u32(np, "samsung,tmu_efuse_value", 1140 &pdata->efuse_value); 1141 of_property_read_u32(np, "samsung,tmu_min_efuse_value", 1142 &pdata->min_efuse_value); 1143 of_property_read_u32(np, "samsung,tmu_max_efuse_value", 1144 &pdata->max_efuse_value); 1145 1146 of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type); 1147 1148 of_node_put(np); 1149 return 0; 1150 } 1151 1152 static int exynos_map_dt_data(struct platform_device *pdev) 1153 { 1154 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1155 struct exynos_tmu_platform_data *pdata; 1156 struct resource res; 1157 1158 if (!data || !pdev->dev.of_node) 1159 return -ENODEV; 1160 1161 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl"); 1162 if (data->id < 0) 1163 data->id = 0; 1164 1165 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1166 if (data->irq <= 0) { 1167 dev_err(&pdev->dev, "failed to get IRQ\n"); 1168 return -ENODEV; 1169 } 1170 1171 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) { 1172 dev_err(&pdev->dev, "failed to get Resource 0\n"); 1173 return -ENODEV; 1174 } 1175 1176 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); 1177 if (!data->base) { 1178 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 1179 return -EADDRNOTAVAIL; 1180 } 1181 1182 pdata = devm_kzalloc(&pdev->dev, 1183 sizeof(struct exynos_tmu_platform_data), 1184 GFP_KERNEL); 1185 if (!pdata) 1186 return -ENOMEM; 1187 1188 exynos_of_sensor_conf(pdev->dev.of_node, pdata); 1189 data->pdata = pdata; 1190 data->soc = (enum soc_type)of_device_get_match_data(&pdev->dev); 1191 1192 switch (data->soc) { 1193 case SOC_ARCH_EXYNOS4210: 1194 data->tmu_initialize = exynos4210_tmu_initialize; 1195 data->tmu_control = exynos4210_tmu_control; 1196 data->tmu_read = exynos4210_tmu_read; 1197 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1198 data->ntrip = 4; 1199 break; 1200 case SOC_ARCH_EXYNOS3250: 1201 case SOC_ARCH_EXYNOS4412: 1202 case SOC_ARCH_EXYNOS5250: 1203 case SOC_ARCH_EXYNOS5260: 1204 case SOC_ARCH_EXYNOS5420: 1205 case SOC_ARCH_EXYNOS5420_TRIMINFO: 1206 data->tmu_initialize = exynos4412_tmu_initialize; 1207 data->tmu_control = exynos4210_tmu_control; 1208 data->tmu_read = exynos4412_tmu_read; 1209 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1210 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1211 data->ntrip = 4; 1212 break; 1213 case SOC_ARCH_EXYNOS5433: 1214 data->tmu_initialize = exynos5433_tmu_initialize; 1215 data->tmu_control = exynos5433_tmu_control; 1216 data->tmu_read = exynos4412_tmu_read; 1217 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1218 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1219 data->ntrip = 8; 1220 break; 1221 case SOC_ARCH_EXYNOS5440: 1222 data->tmu_initialize = exynos5440_tmu_initialize; 1223 data->tmu_control = exynos5440_tmu_control; 1224 data->tmu_read = exynos5440_tmu_read; 1225 data->tmu_set_emulation = exynos5440_tmu_set_emulation; 1226 data->tmu_clear_irqs = exynos5440_tmu_clear_irqs; 1227 data->ntrip = 4; 1228 break; 1229 case SOC_ARCH_EXYNOS7: 1230 data->tmu_initialize = exynos7_tmu_initialize; 1231 data->tmu_control = exynos7_tmu_control; 1232 data->tmu_read = exynos7_tmu_read; 1233 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1234 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1235 data->ntrip = 8; 1236 break; 1237 default: 1238 dev_err(&pdev->dev, "Platform not supported\n"); 1239 return -EINVAL; 1240 } 1241 1242 /* 1243 * Check if the TMU shares some registers and then try to map the 1244 * memory of common registers. 1245 */ 1246 if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO && 1247 data->soc != SOC_ARCH_EXYNOS5440) 1248 return 0; 1249 1250 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) { 1251 dev_err(&pdev->dev, "failed to get Resource 1\n"); 1252 return -ENODEV; 1253 } 1254 1255 data->base_second = devm_ioremap(&pdev->dev, res.start, 1256 resource_size(&res)); 1257 if (!data->base_second) { 1258 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 1259 return -ENOMEM; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static const struct thermal_zone_of_device_ops exynos_sensor_ops = { 1266 .get_temp = exynos_get_temp, 1267 .set_emul_temp = exynos_tmu_set_emulation, 1268 }; 1269 1270 static int exynos_tmu_probe(struct platform_device *pdev) 1271 { 1272 struct exynos_tmu_data *data; 1273 int ret; 1274 1275 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), 1276 GFP_KERNEL); 1277 if (!data) 1278 return -ENOMEM; 1279 1280 platform_set_drvdata(pdev, data); 1281 mutex_init(&data->lock); 1282 1283 /* 1284 * Try enabling the regulator if found 1285 * TODO: Add regulator as an SOC feature, so that regulator enable 1286 * is a compulsory call. 1287 */ 1288 data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu"); 1289 if (!IS_ERR(data->regulator)) { 1290 ret = regulator_enable(data->regulator); 1291 if (ret) { 1292 dev_err(&pdev->dev, "failed to enable vtmu\n"); 1293 return ret; 1294 } 1295 } else { 1296 if (PTR_ERR(data->regulator) == -EPROBE_DEFER) 1297 return -EPROBE_DEFER; 1298 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n"); 1299 } 1300 1301 ret = exynos_map_dt_data(pdev); 1302 if (ret) 1303 goto err_sensor; 1304 1305 INIT_WORK(&data->irq_work, exynos_tmu_work); 1306 1307 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1308 if (IS_ERR(data->clk)) { 1309 dev_err(&pdev->dev, "Failed to get clock\n"); 1310 ret = PTR_ERR(data->clk); 1311 goto err_sensor; 1312 } 1313 1314 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); 1315 if (IS_ERR(data->clk_sec)) { 1316 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { 1317 dev_err(&pdev->dev, "Failed to get triminfo clock\n"); 1318 ret = PTR_ERR(data->clk_sec); 1319 goto err_sensor; 1320 } 1321 } else { 1322 ret = clk_prepare(data->clk_sec); 1323 if (ret) { 1324 dev_err(&pdev->dev, "Failed to get clock\n"); 1325 goto err_sensor; 1326 } 1327 } 1328 1329 ret = clk_prepare(data->clk); 1330 if (ret) { 1331 dev_err(&pdev->dev, "Failed to get clock\n"); 1332 goto err_clk_sec; 1333 } 1334 1335 switch (data->soc) { 1336 case SOC_ARCH_EXYNOS5433: 1337 case SOC_ARCH_EXYNOS7: 1338 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); 1339 if (IS_ERR(data->sclk)) { 1340 dev_err(&pdev->dev, "Failed to get sclk\n"); 1341 goto err_clk; 1342 } else { 1343 ret = clk_prepare_enable(data->sclk); 1344 if (ret) { 1345 dev_err(&pdev->dev, "Failed to enable sclk\n"); 1346 goto err_clk; 1347 } 1348 } 1349 break; 1350 default: 1351 break; 1352 } 1353 1354 /* 1355 * data->tzd must be registered before calling exynos_tmu_initialize(), 1356 * requesting irq and calling exynos_tmu_control(). 1357 */ 1358 data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data, 1359 &exynos_sensor_ops); 1360 if (IS_ERR(data->tzd)) { 1361 ret = PTR_ERR(data->tzd); 1362 dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret); 1363 goto err_sclk; 1364 } 1365 1366 ret = exynos_tmu_initialize(pdev); 1367 if (ret) { 1368 dev_err(&pdev->dev, "Failed to initialize TMU\n"); 1369 goto err_thermal; 1370 } 1371 1372 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, 1373 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); 1374 if (ret) { 1375 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); 1376 goto err_thermal; 1377 } 1378 1379 exynos_tmu_control(pdev, true); 1380 return 0; 1381 1382 err_thermal: 1383 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1384 err_sclk: 1385 clk_disable_unprepare(data->sclk); 1386 err_clk: 1387 clk_unprepare(data->clk); 1388 err_clk_sec: 1389 if (!IS_ERR(data->clk_sec)) 1390 clk_unprepare(data->clk_sec); 1391 err_sensor: 1392 if (!IS_ERR(data->regulator)) 1393 regulator_disable(data->regulator); 1394 1395 return ret; 1396 } 1397 1398 static int exynos_tmu_remove(struct platform_device *pdev) 1399 { 1400 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 1401 struct thermal_zone_device *tzd = data->tzd; 1402 1403 thermal_zone_of_sensor_unregister(&pdev->dev, tzd); 1404 exynos_tmu_control(pdev, false); 1405 1406 clk_disable_unprepare(data->sclk); 1407 clk_unprepare(data->clk); 1408 if (!IS_ERR(data->clk_sec)) 1409 clk_unprepare(data->clk_sec); 1410 1411 if (!IS_ERR(data->regulator)) 1412 regulator_disable(data->regulator); 1413 1414 return 0; 1415 } 1416 1417 #ifdef CONFIG_PM_SLEEP 1418 static int exynos_tmu_suspend(struct device *dev) 1419 { 1420 exynos_tmu_control(to_platform_device(dev), false); 1421 1422 return 0; 1423 } 1424 1425 static int exynos_tmu_resume(struct device *dev) 1426 { 1427 struct platform_device *pdev = to_platform_device(dev); 1428 1429 exynos_tmu_initialize(pdev); 1430 exynos_tmu_control(pdev, true); 1431 1432 return 0; 1433 } 1434 1435 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm, 1436 exynos_tmu_suspend, exynos_tmu_resume); 1437 #define EXYNOS_TMU_PM (&exynos_tmu_pm) 1438 #else 1439 #define EXYNOS_TMU_PM NULL 1440 #endif 1441 1442 static struct platform_driver exynos_tmu_driver = { 1443 .driver = { 1444 .name = "exynos-tmu", 1445 .pm = EXYNOS_TMU_PM, 1446 .of_match_table = exynos_tmu_match, 1447 }, 1448 .probe = exynos_tmu_probe, 1449 .remove = exynos_tmu_remove, 1450 }; 1451 1452 module_platform_driver(exynos_tmu_driver); 1453 1454 MODULE_DESCRIPTION("EXYNOS TMU Driver"); 1455 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 1456 MODULE_LICENSE("GPL"); 1457 MODULE_ALIAS("platform:exynos-tmu"); 1458