1 /* 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 3 * 4 * Copyright (C) 2011 Samsung Electronics 5 * Donggeun Kim <dg77.kim@samsung.com> 6 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/clk.h> 25 #include <linux/io.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/of.h> 29 #include <linux/of_address.h> 30 #include <linux/of_irq.h> 31 #include <linux/platform_device.h> 32 #include <linux/regulator/consumer.h> 33 34 #include "exynos_thermal_common.h" 35 #include "exynos_tmu.h" 36 #include "exynos_tmu_data.h" 37 38 /** 39 * struct exynos_tmu_data : A structure to hold the private data of the TMU 40 driver 41 * @id: identifier of the one instance of the TMU controller. 42 * @pdata: pointer to the tmu platform/configuration data 43 * @base: base address of the single instance of the TMU controller. 44 * @base_second: base address of the common registers of the TMU controller. 45 * @irq: irq number of the TMU controller. 46 * @soc: id of the SOC type. 47 * @irq_work: pointer to the irq work structure. 48 * @lock: lock to implement synchronization. 49 * @clk: pointer to the clock structure. 50 * @clk_sec: pointer to the clock structure for accessing the base_second. 51 * @temp_error1: fused value of the first point trim. 52 * @temp_error2: fused value of the second point trim. 53 * @regulator: pointer to the TMU regulator structure. 54 * @reg_conf: pointer to structure to register with core thermal. 55 */ 56 struct exynos_tmu_data { 57 int id; 58 struct exynos_tmu_platform_data *pdata; 59 void __iomem *base; 60 void __iomem *base_second; 61 int irq; 62 enum soc_type soc; 63 struct work_struct irq_work; 64 struct mutex lock; 65 struct clk *clk, *clk_sec; 66 u8 temp_error1, temp_error2; 67 struct regulator *regulator; 68 struct thermal_sensor_conf *reg_conf; 69 }; 70 71 /* 72 * TMU treats temperature as a mapped temperature code. 73 * The temperature is converted differently depending on the calibration type. 74 */ 75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp) 76 { 77 struct exynos_tmu_platform_data *pdata = data->pdata; 78 int temp_code; 79 80 switch (pdata->cal_type) { 81 case TYPE_TWO_POINT_TRIMMING: 82 temp_code = (temp - pdata->first_point_trim) * 83 (data->temp_error2 - data->temp_error1) / 84 (pdata->second_point_trim - pdata->first_point_trim) + 85 data->temp_error1; 86 break; 87 case TYPE_ONE_POINT_TRIMMING: 88 temp_code = temp + data->temp_error1 - pdata->first_point_trim; 89 break; 90 default: 91 temp_code = temp + pdata->default_temp_offset; 92 break; 93 } 94 95 return temp_code; 96 } 97 98 /* 99 * Calculate a temperature value from a temperature code. 100 * The unit of the temperature is degree Celsius. 101 */ 102 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) 103 { 104 struct exynos_tmu_platform_data *pdata = data->pdata; 105 int temp; 106 107 switch (pdata->cal_type) { 108 case TYPE_TWO_POINT_TRIMMING: 109 temp = (temp_code - data->temp_error1) * 110 (pdata->second_point_trim - pdata->first_point_trim) / 111 (data->temp_error2 - data->temp_error1) + 112 pdata->first_point_trim; 113 break; 114 case TYPE_ONE_POINT_TRIMMING: 115 temp = temp_code - data->temp_error1 + pdata->first_point_trim; 116 break; 117 default: 118 temp = temp_code - pdata->default_temp_offset; 119 break; 120 } 121 122 return temp; 123 } 124 125 static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data) 126 { 127 const struct exynos_tmu_registers *reg = data->pdata->registers; 128 unsigned int val_irq; 129 130 val_irq = readl(data->base + reg->tmu_intstat); 131 /* 132 * Clear the interrupts. Please note that the documentation for 133 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly 134 * states that INTCLEAR register has a different placing of bits 135 * responsible for FALL IRQs than INTSTAT register. Exynos5420 136 * and Exynos5440 documentation is correct (Exynos4210 doesn't 137 * support FALL IRQs at all). 138 */ 139 writel(val_irq, data->base + reg->tmu_intclear); 140 } 141 142 static int exynos_tmu_initialize(struct platform_device *pdev) 143 { 144 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 145 struct exynos_tmu_platform_data *pdata = data->pdata; 146 const struct exynos_tmu_registers *reg = pdata->registers; 147 unsigned int status, trim_info = 0, con, ctrl; 148 unsigned int rising_threshold = 0, falling_threshold = 0; 149 int ret = 0, threshold_code, i; 150 151 mutex_lock(&data->lock); 152 clk_enable(data->clk); 153 if (!IS_ERR(data->clk_sec)) 154 clk_enable(data->clk_sec); 155 156 if (TMU_SUPPORTS(pdata, READY_STATUS)) { 157 status = readb(data->base + EXYNOS_TMU_REG_STATUS); 158 if (!status) { 159 ret = -EBUSY; 160 goto out; 161 } 162 } 163 164 if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) { 165 if (data->soc == SOC_ARCH_EXYNOS3250) { 166 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1); 167 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 168 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1); 169 } 170 ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2); 171 ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE; 172 writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2); 173 } 174 175 /* Save trimming info in order to perform calibration */ 176 if (data->soc == SOC_ARCH_EXYNOS5440) { 177 /* 178 * For exynos5440 soc triminfo value is swapped between TMU0 and 179 * TMU2, so the below logic is needed. 180 */ 181 switch (data->id) { 182 case 0: 183 trim_info = readl(data->base + 184 EXYNOS5440_EFUSE_SWAP_OFFSET + EXYNOS5440_TMU_S0_7_TRIM); 185 break; 186 case 1: 187 trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM); 188 break; 189 case 2: 190 trim_info = readl(data->base - 191 EXYNOS5440_EFUSE_SWAP_OFFSET + EXYNOS5440_TMU_S0_7_TRIM); 192 } 193 } else { 194 /* On exynos5420 the triminfo register is in the shared space */ 195 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) 196 trim_info = readl(data->base_second + 197 EXYNOS_TMU_REG_TRIMINFO); 198 else 199 trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 200 } 201 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK; 202 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) & 203 EXYNOS_TMU_TEMP_MASK); 204 205 if (!data->temp_error1 || 206 (pdata->min_efuse_value > data->temp_error1) || 207 (data->temp_error1 > pdata->max_efuse_value)) 208 data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK; 209 210 if (!data->temp_error2) 211 data->temp_error2 = 212 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) & 213 EXYNOS_TMU_TEMP_MASK; 214 215 rising_threshold = readl(data->base + reg->threshold_th0); 216 217 if (data->soc == SOC_ARCH_EXYNOS4210) { 218 /* Write temperature code for threshold */ 219 threshold_code = temp_to_code(data, pdata->threshold); 220 writeb(threshold_code, 221 data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); 222 for (i = 0; i < pdata->non_hw_trigger_levels; i++) 223 writeb(pdata->trigger_levels[i], data->base + 224 reg->threshold_th0 + i * sizeof(reg->threshold_th0)); 225 226 exynos_tmu_clear_irqs(data); 227 } else { 228 /* Write temperature code for rising and falling threshold */ 229 for (i = 0; i < pdata->non_hw_trigger_levels; i++) { 230 threshold_code = temp_to_code(data, 231 pdata->trigger_levels[i]); 232 rising_threshold &= ~(0xff << 8 * i); 233 rising_threshold |= threshold_code << 8 * i; 234 if (data->soc != SOC_ARCH_EXYNOS5440) { 235 threshold_code = temp_to_code(data, 236 pdata->trigger_levels[i] - 237 pdata->threshold_falling); 238 falling_threshold |= threshold_code << 8 * i; 239 } 240 } 241 242 writel(rising_threshold, 243 data->base + reg->threshold_th0); 244 writel(falling_threshold, 245 data->base + reg->threshold_th1); 246 247 exynos_tmu_clear_irqs(data); 248 249 /* if last threshold limit is also present */ 250 i = pdata->max_trigger_level - 1; 251 if (pdata->trigger_levels[i] && 252 (pdata->trigger_type[i] == HW_TRIP)) { 253 threshold_code = temp_to_code(data, 254 pdata->trigger_levels[i]); 255 if (data->soc != SOC_ARCH_EXYNOS5440) { 256 /* 1-4 level to be assigned in th0 reg */ 257 rising_threshold &= ~(0xff << 8 * i); 258 rising_threshold |= threshold_code << 8 * i; 259 writel(rising_threshold, 260 data->base + EXYNOS_THD_TEMP_RISE); 261 } else { 262 /* 5th level to be assigned in th2 reg */ 263 rising_threshold = 264 threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; 265 writel(rising_threshold, 266 data->base + EXYNOS5440_TMU_S0_7_TH2); 267 } 268 con = readl(data->base + reg->tmu_ctrl); 269 con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); 270 writel(con, data->base + reg->tmu_ctrl); 271 } 272 } 273 /*Clear the PMIN in the common TMU register*/ 274 if (data->soc == SOC_ARCH_EXYNOS5440 && !data->id) 275 writel(0, data->base_second + EXYNOS5440_TMU_PMIN); 276 out: 277 clk_disable(data->clk); 278 mutex_unlock(&data->lock); 279 if (!IS_ERR(data->clk_sec)) 280 clk_disable(data->clk_sec); 281 282 return ret; 283 } 284 285 static void exynos_tmu_control(struct platform_device *pdev, bool on) 286 { 287 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 288 struct exynos_tmu_platform_data *pdata = data->pdata; 289 const struct exynos_tmu_registers *reg = pdata->registers; 290 unsigned int con, interrupt_en; 291 292 mutex_lock(&data->lock); 293 clk_enable(data->clk); 294 295 con = readl(data->base + reg->tmu_ctrl); 296 297 if (pdata->test_mux) 298 con |= (pdata->test_mux << EXYNOS4412_MUX_ADDR_SHIFT); 299 300 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT); 301 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT; 302 303 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 304 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); 305 306 if (pdata->noise_cancel_mode) { 307 con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT); 308 con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT); 309 } 310 311 if (on) { 312 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 313 interrupt_en = 314 pdata->trigger_enable[3] << reg->inten_rise3_shift | 315 pdata->trigger_enable[2] << reg->inten_rise2_shift | 316 pdata->trigger_enable[1] << reg->inten_rise1_shift | 317 pdata->trigger_enable[0] << reg->inten_rise0_shift; 318 if (TMU_SUPPORTS(pdata, FALLING_TRIP)) 319 interrupt_en |= 320 interrupt_en << reg->inten_fall0_shift; 321 } else { 322 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 323 interrupt_en = 0; /* Disable all interrupts */ 324 } 325 writel(interrupt_en, data->base + reg->tmu_inten); 326 writel(con, data->base + reg->tmu_ctrl); 327 328 clk_disable(data->clk); 329 mutex_unlock(&data->lock); 330 } 331 332 static int exynos_tmu_read(struct exynos_tmu_data *data) 333 { 334 struct exynos_tmu_platform_data *pdata = data->pdata; 335 const struct exynos_tmu_registers *reg = pdata->registers; 336 u8 temp_code; 337 int temp; 338 339 mutex_lock(&data->lock); 340 clk_enable(data->clk); 341 342 temp_code = readb(data->base + reg->tmu_cur_temp); 343 344 if (data->soc == SOC_ARCH_EXYNOS4210) 345 /* temp_code should range between 75 and 175 */ 346 if (temp_code < 75 || temp_code > 175) { 347 temp = -ENODATA; 348 goto out; 349 } 350 351 temp = code_to_temp(data, temp_code); 352 out: 353 clk_disable(data->clk); 354 mutex_unlock(&data->lock); 355 356 return temp; 357 } 358 359 #ifdef CONFIG_THERMAL_EMULATION 360 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 361 { 362 struct exynos_tmu_data *data = drv_data; 363 struct exynos_tmu_platform_data *pdata = data->pdata; 364 const struct exynos_tmu_registers *reg = pdata->registers; 365 unsigned int val; 366 int ret = -EINVAL; 367 368 if (!TMU_SUPPORTS(pdata, EMULATION)) 369 goto out; 370 371 if (temp && temp < MCELSIUS) 372 goto out; 373 374 mutex_lock(&data->lock); 375 clk_enable(data->clk); 376 377 val = readl(data->base + reg->emul_con); 378 379 if (temp) { 380 temp /= MCELSIUS; 381 382 if (TMU_SUPPORTS(pdata, EMUL_TIME)) { 383 val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); 384 val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); 385 } 386 val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT); 387 val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) | 388 EXYNOS_EMUL_ENABLE; 389 } else { 390 val &= ~EXYNOS_EMUL_ENABLE; 391 } 392 393 writel(val, data->base + reg->emul_con); 394 395 clk_disable(data->clk); 396 mutex_unlock(&data->lock); 397 return 0; 398 out: 399 return ret; 400 } 401 #else 402 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) 403 { return -EINVAL; } 404 #endif/*CONFIG_THERMAL_EMULATION*/ 405 406 static void exynos_tmu_work(struct work_struct *work) 407 { 408 struct exynos_tmu_data *data = container_of(work, 409 struct exynos_tmu_data, irq_work); 410 unsigned int val_type; 411 412 if (!IS_ERR(data->clk_sec)) 413 clk_enable(data->clk_sec); 414 /* Find which sensor generated this interrupt */ 415 if (data->soc == SOC_ARCH_EXYNOS5440) { 416 val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS); 417 if (!((val_type >> data->id) & 0x1)) 418 goto out; 419 } 420 if (!IS_ERR(data->clk_sec)) 421 clk_disable(data->clk_sec); 422 423 exynos_report_trigger(data->reg_conf); 424 mutex_lock(&data->lock); 425 clk_enable(data->clk); 426 427 /* TODO: take action based on particular interrupt */ 428 exynos_tmu_clear_irqs(data); 429 430 clk_disable(data->clk); 431 mutex_unlock(&data->lock); 432 out: 433 enable_irq(data->irq); 434 } 435 436 static irqreturn_t exynos_tmu_irq(int irq, void *id) 437 { 438 struct exynos_tmu_data *data = id; 439 440 disable_irq_nosync(irq); 441 schedule_work(&data->irq_work); 442 443 return IRQ_HANDLED; 444 } 445 446 static const struct of_device_id exynos_tmu_match[] = { 447 { 448 .compatible = "samsung,exynos3250-tmu", 449 .data = (void *)EXYNOS3250_TMU_DRV_DATA, 450 }, 451 { 452 .compatible = "samsung,exynos4210-tmu", 453 .data = (void *)EXYNOS4210_TMU_DRV_DATA, 454 }, 455 { 456 .compatible = "samsung,exynos4412-tmu", 457 .data = (void *)EXYNOS4412_TMU_DRV_DATA, 458 }, 459 { 460 .compatible = "samsung,exynos5250-tmu", 461 .data = (void *)EXYNOS5250_TMU_DRV_DATA, 462 }, 463 { 464 .compatible = "samsung,exynos5260-tmu", 465 .data = (void *)EXYNOS5260_TMU_DRV_DATA, 466 }, 467 { 468 .compatible = "samsung,exynos5420-tmu", 469 .data = (void *)EXYNOS5420_TMU_DRV_DATA, 470 }, 471 { 472 .compatible = "samsung,exynos5420-tmu-ext-triminfo", 473 .data = (void *)EXYNOS5420_TMU_DRV_DATA, 474 }, 475 { 476 .compatible = "samsung,exynos5440-tmu", 477 .data = (void *)EXYNOS5440_TMU_DRV_DATA, 478 }, 479 {}, 480 }; 481 MODULE_DEVICE_TABLE(of, exynos_tmu_match); 482 483 static inline struct exynos_tmu_platform_data *exynos_get_driver_data( 484 struct platform_device *pdev, int id) 485 { 486 struct exynos_tmu_init_data *data_table; 487 struct exynos_tmu_platform_data *tmu_data; 488 const struct of_device_id *match; 489 490 match = of_match_node(exynos_tmu_match, pdev->dev.of_node); 491 if (!match) 492 return NULL; 493 data_table = (struct exynos_tmu_init_data *) match->data; 494 if (!data_table || id >= data_table->tmu_count) 495 return NULL; 496 tmu_data = data_table->tmu_data; 497 return (struct exynos_tmu_platform_data *) (tmu_data + id); 498 } 499 500 static int exynos_map_dt_data(struct platform_device *pdev) 501 { 502 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 503 struct exynos_tmu_platform_data *pdata; 504 struct resource res; 505 int ret; 506 507 if (!data || !pdev->dev.of_node) 508 return -ENODEV; 509 510 /* 511 * Try enabling the regulator if found 512 * TODO: Add regulator as an SOC feature, so that regulator enable 513 * is a compulsory call. 514 */ 515 data->regulator = devm_regulator_get(&pdev->dev, "vtmu"); 516 if (!IS_ERR(data->regulator)) { 517 ret = regulator_enable(data->regulator); 518 if (ret) { 519 dev_err(&pdev->dev, "failed to enable vtmu\n"); 520 return ret; 521 } 522 } else { 523 dev_info(&pdev->dev, "Regulator node (vtmu) not found\n"); 524 } 525 526 data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl"); 527 if (data->id < 0) 528 data->id = 0; 529 530 data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 531 if (data->irq <= 0) { 532 dev_err(&pdev->dev, "failed to get IRQ\n"); 533 return -ENODEV; 534 } 535 536 if (of_address_to_resource(pdev->dev.of_node, 0, &res)) { 537 dev_err(&pdev->dev, "failed to get Resource 0\n"); 538 return -ENODEV; 539 } 540 541 data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); 542 if (!data->base) { 543 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 544 return -EADDRNOTAVAIL; 545 } 546 547 pdata = exynos_get_driver_data(pdev, data->id); 548 if (!pdata) { 549 dev_err(&pdev->dev, "No platform init data supplied.\n"); 550 return -ENODEV; 551 } 552 data->pdata = pdata; 553 /* 554 * Check if the TMU shares some registers and then try to map the 555 * memory of common registers. 556 */ 557 if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE)) 558 return 0; 559 560 if (of_address_to_resource(pdev->dev.of_node, 1, &res)) { 561 dev_err(&pdev->dev, "failed to get Resource 1\n"); 562 return -ENODEV; 563 } 564 565 data->base_second = devm_ioremap(&pdev->dev, res.start, 566 resource_size(&res)); 567 if (!data->base_second) { 568 dev_err(&pdev->dev, "Failed to ioremap memory\n"); 569 return -ENOMEM; 570 } 571 572 return 0; 573 } 574 575 static int exynos_tmu_probe(struct platform_device *pdev) 576 { 577 struct exynos_tmu_data *data; 578 struct exynos_tmu_platform_data *pdata; 579 struct thermal_sensor_conf *sensor_conf; 580 int ret, i; 581 582 data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), 583 GFP_KERNEL); 584 if (!data) 585 return -ENOMEM; 586 587 platform_set_drvdata(pdev, data); 588 mutex_init(&data->lock); 589 590 ret = exynos_map_dt_data(pdev); 591 if (ret) 592 return ret; 593 594 pdata = data->pdata; 595 596 INIT_WORK(&data->irq_work, exynos_tmu_work); 597 598 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 599 if (IS_ERR(data->clk)) { 600 dev_err(&pdev->dev, "Failed to get clock\n"); 601 return PTR_ERR(data->clk); 602 } 603 604 data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); 605 if (IS_ERR(data->clk_sec)) { 606 if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { 607 dev_err(&pdev->dev, "Failed to get triminfo clock\n"); 608 return PTR_ERR(data->clk_sec); 609 } 610 } else { 611 ret = clk_prepare(data->clk_sec); 612 if (ret) { 613 dev_err(&pdev->dev, "Failed to get clock\n"); 614 return ret; 615 } 616 } 617 618 ret = clk_prepare(data->clk); 619 if (ret) { 620 dev_err(&pdev->dev, "Failed to get clock\n"); 621 goto err_clk_sec; 622 } 623 624 if (pdata->type == SOC_ARCH_EXYNOS3250 || 625 pdata->type == SOC_ARCH_EXYNOS4210 || 626 pdata->type == SOC_ARCH_EXYNOS4412 || 627 pdata->type == SOC_ARCH_EXYNOS5250 || 628 pdata->type == SOC_ARCH_EXYNOS5260 || 629 pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO || 630 pdata->type == SOC_ARCH_EXYNOS5440) 631 data->soc = pdata->type; 632 else { 633 ret = -EINVAL; 634 dev_err(&pdev->dev, "Platform not supported\n"); 635 goto err_clk; 636 } 637 638 ret = exynos_tmu_initialize(pdev); 639 if (ret) { 640 dev_err(&pdev->dev, "Failed to initialize TMU\n"); 641 goto err_clk; 642 } 643 644 exynos_tmu_control(pdev, true); 645 646 /* Allocate a structure to register with the exynos core thermal */ 647 sensor_conf = devm_kzalloc(&pdev->dev, 648 sizeof(struct thermal_sensor_conf), GFP_KERNEL); 649 if (!sensor_conf) { 650 ret = -ENOMEM; 651 goto err_clk; 652 } 653 sprintf(sensor_conf->name, "therm_zone%d", data->id); 654 sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read; 655 sensor_conf->write_emul_temp = 656 (int (*)(void *, unsigned long))exynos_tmu_set_emulation; 657 sensor_conf->driver_data = data; 658 sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] + 659 pdata->trigger_enable[1] + pdata->trigger_enable[2]+ 660 pdata->trigger_enable[3]; 661 662 for (i = 0; i < sensor_conf->trip_data.trip_count; i++) { 663 sensor_conf->trip_data.trip_val[i] = 664 pdata->threshold + pdata->trigger_levels[i]; 665 sensor_conf->trip_data.trip_type[i] = 666 pdata->trigger_type[i]; 667 } 668 669 sensor_conf->trip_data.trigger_falling = pdata->threshold_falling; 670 671 sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count; 672 for (i = 0; i < pdata->freq_tab_count; i++) { 673 sensor_conf->cooling_data.freq_data[i].freq_clip_max = 674 pdata->freq_tab[i].freq_clip_max; 675 sensor_conf->cooling_data.freq_data[i].temp_level = 676 pdata->freq_tab[i].temp_level; 677 } 678 sensor_conf->dev = &pdev->dev; 679 /* Register the sensor with thermal management interface */ 680 ret = exynos_register_thermal(sensor_conf); 681 if (ret) { 682 dev_err(&pdev->dev, "Failed to register thermal interface\n"); 683 goto err_clk; 684 } 685 data->reg_conf = sensor_conf; 686 687 ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, 688 IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); 689 if (ret) { 690 dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); 691 goto err_clk; 692 } 693 694 return 0; 695 err_clk: 696 clk_unprepare(data->clk); 697 err_clk_sec: 698 if (!IS_ERR(data->clk_sec)) 699 clk_unprepare(data->clk_sec); 700 return ret; 701 } 702 703 static int exynos_tmu_remove(struct platform_device *pdev) 704 { 705 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 706 707 exynos_unregister_thermal(data->reg_conf); 708 709 exynos_tmu_control(pdev, false); 710 711 clk_unprepare(data->clk); 712 if (!IS_ERR(data->clk_sec)) 713 clk_unprepare(data->clk_sec); 714 715 if (!IS_ERR(data->regulator)) 716 regulator_disable(data->regulator); 717 718 return 0; 719 } 720 721 #ifdef CONFIG_PM_SLEEP 722 static int exynos_tmu_suspend(struct device *dev) 723 { 724 exynos_tmu_control(to_platform_device(dev), false); 725 726 return 0; 727 } 728 729 static int exynos_tmu_resume(struct device *dev) 730 { 731 struct platform_device *pdev = to_platform_device(dev); 732 733 exynos_tmu_initialize(pdev); 734 exynos_tmu_control(pdev, true); 735 736 return 0; 737 } 738 739 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm, 740 exynos_tmu_suspend, exynos_tmu_resume); 741 #define EXYNOS_TMU_PM (&exynos_tmu_pm) 742 #else 743 #define EXYNOS_TMU_PM NULL 744 #endif 745 746 static struct platform_driver exynos_tmu_driver = { 747 .driver = { 748 .name = "exynos-tmu", 749 .owner = THIS_MODULE, 750 .pm = EXYNOS_TMU_PM, 751 .of_match_table = exynos_tmu_match, 752 }, 753 .probe = exynos_tmu_probe, 754 .remove = exynos_tmu_remove, 755 }; 756 757 module_platform_driver(exynos_tmu_driver); 758 759 MODULE_DESCRIPTION("EXYNOS TMU Driver"); 760 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 761 MODULE_LICENSE("GPL"); 762 MODULE_ALIAS("platform:exynos-tmu"); 763