xref: /linux/drivers/thermal/samsung/exynos_tmu.c (revision a7331f72d3eb2bf6a15405459f7c514607402ca6)
1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2011 Samsung Electronics
5  *  Donggeun Kim <dg77.kim@samsung.com>
6  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33 
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37 
38 /**
39  * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 	driver
41  * @id: identifier of the one instance of the TMU controller.
42  * @pdata: pointer to the tmu platform/configuration data
43  * @base: base address of the single instance of the TMU controller.
44  * @base_second: base address of the common registers of the TMU controller.
45  * @irq: irq number of the TMU controller.
46  * @soc: id of the SOC type.
47  * @irq_work: pointer to the irq work structure.
48  * @lock: lock to implement synchronization.
49  * @clk: pointer to the clock structure.
50  * @clk_sec: pointer to the clock structure for accessing the base_second.
51  * @temp_error1: fused value of the first point trim.
52  * @temp_error2: fused value of the second point trim.
53  * @regulator: pointer to the TMU regulator structure.
54  * @reg_conf: pointer to structure to register with core thermal.
55  * @tmu_initialize: SoC specific TMU initialization method
56  * @tmu_control: SoC specific TMU control method
57  * @tmu_read: SoC specific TMU temperature read method
58  * @tmu_set_emulation: SoC specific TMU emulation setting method
59  * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
60  */
61 struct exynos_tmu_data {
62 	int id;
63 	struct exynos_tmu_platform_data *pdata;
64 	void __iomem *base;
65 	void __iomem *base_second;
66 	int irq;
67 	enum soc_type soc;
68 	struct work_struct irq_work;
69 	struct mutex lock;
70 	struct clk *clk, *clk_sec;
71 	u8 temp_error1, temp_error2;
72 	struct regulator *regulator;
73 	struct thermal_sensor_conf *reg_conf;
74 	int (*tmu_initialize)(struct platform_device *pdev);
75 	void (*tmu_control)(struct platform_device *pdev, bool on);
76 	int (*tmu_read)(struct exynos_tmu_data *data);
77 	void (*tmu_set_emulation)(struct exynos_tmu_data *data,
78 				  unsigned long temp);
79 	void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
80 };
81 
82 /*
83  * TMU treats temperature as a mapped temperature code.
84  * The temperature is converted differently depending on the calibration type.
85  */
86 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
87 {
88 	struct exynos_tmu_platform_data *pdata = data->pdata;
89 	int temp_code;
90 
91 	switch (pdata->cal_type) {
92 	case TYPE_TWO_POINT_TRIMMING:
93 		temp_code = (temp - pdata->first_point_trim) *
94 			(data->temp_error2 - data->temp_error1) /
95 			(pdata->second_point_trim - pdata->first_point_trim) +
96 			data->temp_error1;
97 		break;
98 	case TYPE_ONE_POINT_TRIMMING:
99 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
100 		break;
101 	default:
102 		temp_code = temp + pdata->default_temp_offset;
103 		break;
104 	}
105 
106 	return temp_code;
107 }
108 
109 /*
110  * Calculate a temperature value from a temperature code.
111  * The unit of the temperature is degree Celsius.
112  */
113 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
114 {
115 	struct exynos_tmu_platform_data *pdata = data->pdata;
116 	int temp;
117 
118 	switch (pdata->cal_type) {
119 	case TYPE_TWO_POINT_TRIMMING:
120 		temp = (temp_code - data->temp_error1) *
121 			(pdata->second_point_trim - pdata->first_point_trim) /
122 			(data->temp_error2 - data->temp_error1) +
123 			pdata->first_point_trim;
124 		break;
125 	case TYPE_ONE_POINT_TRIMMING:
126 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
127 		break;
128 	default:
129 		temp = temp_code - pdata->default_temp_offset;
130 		break;
131 	}
132 
133 	return temp;
134 }
135 
136 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
137 {
138 	struct exynos_tmu_platform_data *pdata = data->pdata;
139 
140 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
141 	data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
142 				EXYNOS_TMU_TEMP_MASK);
143 
144 	if (!data->temp_error1 ||
145 		(pdata->min_efuse_value > data->temp_error1) ||
146 		(data->temp_error1 > pdata->max_efuse_value))
147 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
148 
149 	if (!data->temp_error2)
150 		data->temp_error2 =
151 			(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
152 			EXYNOS_TMU_TEMP_MASK;
153 }
154 
155 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
156 {
157 	struct exynos_tmu_platform_data *pdata = data->pdata;
158 	int i;
159 
160 	for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
161 		u8 temp = pdata->trigger_levels[i];
162 
163 		if (falling)
164 			temp -= pdata->threshold_falling;
165 		else
166 			threshold &= ~(0xff << 8 * i);
167 
168 		threshold |= temp_to_code(data, temp) << 8 * i;
169 	}
170 
171 	return threshold;
172 }
173 
174 static int exynos_tmu_initialize(struct platform_device *pdev)
175 {
176 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
177 	int ret;
178 
179 	mutex_lock(&data->lock);
180 	clk_enable(data->clk);
181 	if (!IS_ERR(data->clk_sec))
182 		clk_enable(data->clk_sec);
183 	ret = data->tmu_initialize(pdev);
184 	clk_disable(data->clk);
185 	mutex_unlock(&data->lock);
186 	if (!IS_ERR(data->clk_sec))
187 		clk_disable(data->clk_sec);
188 
189 	return ret;
190 }
191 
192 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
193 {
194 	struct exynos_tmu_platform_data *pdata = data->pdata;
195 
196 	if (pdata->test_mux)
197 		con |= (pdata->test_mux << EXYNOS4412_MUX_ADDR_SHIFT);
198 
199 	con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
200 	con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
201 
202 	con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
203 	con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
204 
205 	if (pdata->noise_cancel_mode) {
206 		con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
207 		con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
208 	}
209 
210 	return con;
211 }
212 
213 static void exynos_tmu_control(struct platform_device *pdev, bool on)
214 {
215 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
216 
217 	mutex_lock(&data->lock);
218 	clk_enable(data->clk);
219 	data->tmu_control(pdev, on);
220 	clk_disable(data->clk);
221 	mutex_unlock(&data->lock);
222 }
223 
224 static int exynos4210_tmu_initialize(struct platform_device *pdev)
225 {
226 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
227 	struct exynos_tmu_platform_data *pdata = data->pdata;
228 	unsigned int status;
229 	int ret = 0, threshold_code, i;
230 
231 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
232 	if (!status) {
233 		ret = -EBUSY;
234 		goto out;
235 	}
236 
237 	sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
238 
239 	/* Write temperature code for threshold */
240 	threshold_code = temp_to_code(data, pdata->threshold);
241 	writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
242 
243 	for (i = 0; i < pdata->non_hw_trigger_levels; i++)
244 		writeb(pdata->trigger_levels[i], data->base +
245 		       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
246 
247 	data->tmu_clear_irqs(data);
248 out:
249 	return ret;
250 }
251 
252 static int exynos4412_tmu_initialize(struct platform_device *pdev)
253 {
254 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
255 	struct exynos_tmu_platform_data *pdata = data->pdata;
256 	unsigned int status, trim_info, con, ctrl, rising_threshold;
257 	int ret = 0, threshold_code, i;
258 
259 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
260 	if (!status) {
261 		ret = -EBUSY;
262 		goto out;
263 	}
264 
265 	if (data->soc == SOC_ARCH_EXYNOS3250 ||
266 	    data->soc == SOC_ARCH_EXYNOS4412 ||
267 	    data->soc == SOC_ARCH_EXYNOS5250) {
268 		if (data->soc == SOC_ARCH_EXYNOS3250) {
269 			ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
270 			ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
271 			writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
272 		}
273 		ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
274 		ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
275 		writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
276 	}
277 
278 	/* On exynos5420 the triminfo register is in the shared space */
279 	if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
280 		trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
281 	else
282 		trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
283 
284 	sanitize_temp_error(data, trim_info);
285 
286 	/* Write temperature code for rising and falling threshold */
287 	rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
288 	rising_threshold = get_th_reg(data, rising_threshold, false);
289 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
290 	writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
291 
292 	data->tmu_clear_irqs(data);
293 
294 	/* if last threshold limit is also present */
295 	i = pdata->max_trigger_level - 1;
296 	if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
297 		threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
298 		/* 1-4 level to be assigned in th0 reg */
299 		rising_threshold &= ~(0xff << 8 * i);
300 		rising_threshold |= threshold_code << 8 * i;
301 		writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
302 		con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
303 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
304 		writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
305 	}
306 out:
307 	return ret;
308 }
309 
310 static int exynos5440_tmu_initialize(struct platform_device *pdev)
311 {
312 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
313 	struct exynos_tmu_platform_data *pdata = data->pdata;
314 	unsigned int trim_info = 0, con, rising_threshold;
315 	int ret = 0, threshold_code, i;
316 
317 	/*
318 	 * For exynos5440 soc triminfo value is swapped between TMU0 and
319 	 * TMU2, so the below logic is needed.
320 	 */
321 	switch (data->id) {
322 	case 0:
323 		trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
324 				 EXYNOS5440_TMU_S0_7_TRIM);
325 		break;
326 	case 1:
327 		trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
328 		break;
329 	case 2:
330 		trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
331 				  EXYNOS5440_TMU_S0_7_TRIM);
332 	}
333 	sanitize_temp_error(data, trim_info);
334 
335 	/* Write temperature code for rising and falling threshold */
336 	rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
337 	rising_threshold = get_th_reg(data, rising_threshold, false);
338 	writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
339 	writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
340 
341 	data->tmu_clear_irqs(data);
342 
343 	/* if last threshold limit is also present */
344 	i = pdata->max_trigger_level - 1;
345 	if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
346 		threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
347 		/* 5th level to be assigned in th2 reg */
348 		rising_threshold =
349 			threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
350 		writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
351 		con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
352 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
353 		writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
354 	}
355 	/* Clear the PMIN in the common TMU register */
356 	if (!data->id)
357 		writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
358 	return ret;
359 }
360 
361 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
362 {
363 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
364 	struct exynos_tmu_platform_data *pdata = data->pdata;
365 	unsigned int con, interrupt_en;
366 
367 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
368 
369 	if (on) {
370 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
371 		interrupt_en =
372 			pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
373 			pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
374 			pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
375 			pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
376 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
377 			interrupt_en |=
378 				interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
379 	} else {
380 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
381 		interrupt_en = 0; /* Disable all interrupts */
382 	}
383 	writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
384 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
385 }
386 
387 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
388 {
389 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
390 	struct exynos_tmu_platform_data *pdata = data->pdata;
391 	unsigned int con, interrupt_en;
392 
393 	con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
394 
395 	if (on) {
396 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
397 		interrupt_en =
398 			pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
399 			pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
400 			pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
401 			pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
402 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
403 			interrupt_en |=
404 				interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
405 	} else {
406 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
407 		interrupt_en = 0; /* Disable all interrupts */
408 	}
409 	writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
410 	writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
411 }
412 
413 static int exynos_tmu_read(struct exynos_tmu_data *data)
414 {
415 	int ret;
416 
417 	mutex_lock(&data->lock);
418 	clk_enable(data->clk);
419 	ret = data->tmu_read(data);
420 	if (ret >= 0)
421 		ret = code_to_temp(data, ret);
422 	clk_disable(data->clk);
423 	mutex_unlock(&data->lock);
424 
425 	return ret;
426 }
427 
428 #ifdef CONFIG_THERMAL_EMULATION
429 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
430 			    unsigned long temp)
431 {
432 	struct exynos_tmu_platform_data *pdata = data->pdata;
433 
434 	if (temp) {
435 		temp /= MCELSIUS;
436 
437 		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
438 			val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
439 			val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
440 		}
441 		val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
442 		val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
443 			EXYNOS_EMUL_ENABLE;
444 	} else {
445 		val &= ~EXYNOS_EMUL_ENABLE;
446 	}
447 
448 	return val;
449 }
450 
451 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
452 					 unsigned long temp)
453 {
454 	unsigned int val;
455 	u32 emul_con;
456 
457 	if (data->soc == SOC_ARCH_EXYNOS5260)
458 		emul_con = EXYNOS5260_EMUL_CON;
459 	else
460 		emul_con = EXYNOS_EMUL_CON;
461 
462 	val = readl(data->base + emul_con);
463 	val = get_emul_con_reg(data, val, temp);
464 	writel(val, data->base + emul_con);
465 }
466 
467 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
468 					 unsigned long temp)
469 {
470 	unsigned int val;
471 
472 	val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
473 	val = get_emul_con_reg(data, val, temp);
474 	writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
475 }
476 
477 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
478 {
479 	struct exynos_tmu_data *data = drv_data;
480 	struct exynos_tmu_platform_data *pdata = data->pdata;
481 	int ret = -EINVAL;
482 
483 	if (!TMU_SUPPORTS(pdata, EMULATION))
484 		goto out;
485 
486 	if (temp && temp < MCELSIUS)
487 		goto out;
488 
489 	mutex_lock(&data->lock);
490 	clk_enable(data->clk);
491 	data->tmu_set_emulation(data, temp);
492 	clk_disable(data->clk);
493 	mutex_unlock(&data->lock);
494 	return 0;
495 out:
496 	return ret;
497 }
498 #else
499 #define exynos4412_tmu_set_emulation NULL
500 #define exynos5440_tmu_set_emulation NULL
501 static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
502 	{ return -EINVAL; }
503 #endif/*CONFIG_THERMAL_EMULATION*/
504 
505 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
506 {
507 	int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
508 
509 	/* "temp_code" should range between 75 and 175 */
510 	return (ret < 75 || ret > 175) ? -ENODATA : ret;
511 }
512 
513 static int exynos4412_tmu_read(struct exynos_tmu_data *data)
514 {
515 	return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
516 }
517 
518 static int exynos5440_tmu_read(struct exynos_tmu_data *data)
519 {
520 	return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
521 }
522 
523 static void exynos_tmu_work(struct work_struct *work)
524 {
525 	struct exynos_tmu_data *data = container_of(work,
526 			struct exynos_tmu_data, irq_work);
527 	unsigned int val_type;
528 
529 	if (!IS_ERR(data->clk_sec))
530 		clk_enable(data->clk_sec);
531 	/* Find which sensor generated this interrupt */
532 	if (data->soc == SOC_ARCH_EXYNOS5440) {
533 		val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
534 		if (!((val_type >> data->id) & 0x1))
535 			goto out;
536 	}
537 	if (!IS_ERR(data->clk_sec))
538 		clk_disable(data->clk_sec);
539 
540 	exynos_report_trigger(data->reg_conf);
541 	mutex_lock(&data->lock);
542 	clk_enable(data->clk);
543 
544 	/* TODO: take action based on particular interrupt */
545 	data->tmu_clear_irqs(data);
546 
547 	clk_disable(data->clk);
548 	mutex_unlock(&data->lock);
549 out:
550 	enable_irq(data->irq);
551 }
552 
553 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
554 {
555 	unsigned int val_irq;
556 	u32 tmu_intstat, tmu_intclear;
557 
558 	if (data->soc == SOC_ARCH_EXYNOS5260) {
559 		tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
560 		tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
561 	} else {
562 		tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
563 		tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
564 	}
565 
566 	val_irq = readl(data->base + tmu_intstat);
567 	/*
568 	 * Clear the interrupts.  Please note that the documentation for
569 	 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
570 	 * states that INTCLEAR register has a different placing of bits
571 	 * responsible for FALL IRQs than INTSTAT register.  Exynos5420
572 	 * and Exynos5440 documentation is correct (Exynos4210 doesn't
573 	 * support FALL IRQs at all).
574 	 */
575 	writel(val_irq, data->base + tmu_intclear);
576 }
577 
578 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
579 {
580 	unsigned int val_irq;
581 
582 	val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
583 	/* clear the interrupts */
584 	writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
585 }
586 
587 static irqreturn_t exynos_tmu_irq(int irq, void *id)
588 {
589 	struct exynos_tmu_data *data = id;
590 
591 	disable_irq_nosync(irq);
592 	schedule_work(&data->irq_work);
593 
594 	return IRQ_HANDLED;
595 }
596 
597 static const struct of_device_id exynos_tmu_match[] = {
598 	{
599 		.compatible = "samsung,exynos3250-tmu",
600 		.data = (void *)EXYNOS3250_TMU_DRV_DATA,
601 	},
602 	{
603 		.compatible = "samsung,exynos4210-tmu",
604 		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
605 	},
606 	{
607 		.compatible = "samsung,exynos4412-tmu",
608 		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
609 	},
610 	{
611 		.compatible = "samsung,exynos5250-tmu",
612 		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
613 	},
614 	{
615 		.compatible = "samsung,exynos5260-tmu",
616 		.data = (void *)EXYNOS5260_TMU_DRV_DATA,
617 	},
618 	{
619 		.compatible = "samsung,exynos5420-tmu",
620 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
621 	},
622 	{
623 		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
624 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
625 	},
626 	{
627 		.compatible = "samsung,exynos5440-tmu",
628 		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
629 	},
630 	{},
631 };
632 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
633 
634 static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
635 			struct platform_device *pdev, int id)
636 {
637 	struct  exynos_tmu_init_data *data_table;
638 	struct exynos_tmu_platform_data *tmu_data;
639 	const struct of_device_id *match;
640 
641 	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
642 	if (!match)
643 		return NULL;
644 	data_table = (struct exynos_tmu_init_data *) match->data;
645 	if (!data_table || id >= data_table->tmu_count)
646 		return NULL;
647 	tmu_data = data_table->tmu_data;
648 	return (struct exynos_tmu_platform_data *) (tmu_data + id);
649 }
650 
651 static int exynos_map_dt_data(struct platform_device *pdev)
652 {
653 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
654 	struct exynos_tmu_platform_data *pdata;
655 	struct resource res;
656 	int ret;
657 
658 	if (!data || !pdev->dev.of_node)
659 		return -ENODEV;
660 
661 	/*
662 	 * Try enabling the regulator if found
663 	 * TODO: Add regulator as an SOC feature, so that regulator enable
664 	 * is a compulsory call.
665 	 */
666 	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
667 	if (!IS_ERR(data->regulator)) {
668 		ret = regulator_enable(data->regulator);
669 		if (ret) {
670 			dev_err(&pdev->dev, "failed to enable vtmu\n");
671 			return ret;
672 		}
673 	} else {
674 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
675 	}
676 
677 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
678 	if (data->id < 0)
679 		data->id = 0;
680 
681 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
682 	if (data->irq <= 0) {
683 		dev_err(&pdev->dev, "failed to get IRQ\n");
684 		return -ENODEV;
685 	}
686 
687 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
688 		dev_err(&pdev->dev, "failed to get Resource 0\n");
689 		return -ENODEV;
690 	}
691 
692 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
693 	if (!data->base) {
694 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
695 		return -EADDRNOTAVAIL;
696 	}
697 
698 	pdata = exynos_get_driver_data(pdev, data->id);
699 	if (!pdata) {
700 		dev_err(&pdev->dev, "No platform init data supplied.\n");
701 		return -ENODEV;
702 	}
703 	data->pdata = pdata;
704 	/*
705 	 * Check if the TMU shares some registers and then try to map the
706 	 * memory of common registers.
707 	 */
708 	if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
709 		return 0;
710 
711 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
712 		dev_err(&pdev->dev, "failed to get Resource 1\n");
713 		return -ENODEV;
714 	}
715 
716 	data->base_second = devm_ioremap(&pdev->dev, res.start,
717 					resource_size(&res));
718 	if (!data->base_second) {
719 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
720 		return -ENOMEM;
721 	}
722 
723 	return 0;
724 }
725 
726 static int exynos_tmu_probe(struct platform_device *pdev)
727 {
728 	struct exynos_tmu_data *data;
729 	struct exynos_tmu_platform_data *pdata;
730 	struct thermal_sensor_conf *sensor_conf;
731 	int ret, i;
732 
733 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
734 					GFP_KERNEL);
735 	if (!data)
736 		return -ENOMEM;
737 
738 	platform_set_drvdata(pdev, data);
739 	mutex_init(&data->lock);
740 
741 	ret = exynos_map_dt_data(pdev);
742 	if (ret)
743 		return ret;
744 
745 	pdata = data->pdata;
746 
747 	INIT_WORK(&data->irq_work, exynos_tmu_work);
748 
749 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
750 	if (IS_ERR(data->clk)) {
751 		dev_err(&pdev->dev, "Failed to get clock\n");
752 		return  PTR_ERR(data->clk);
753 	}
754 
755 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
756 	if (IS_ERR(data->clk_sec)) {
757 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
758 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
759 			return PTR_ERR(data->clk_sec);
760 		}
761 	} else {
762 		ret = clk_prepare(data->clk_sec);
763 		if (ret) {
764 			dev_err(&pdev->dev, "Failed to get clock\n");
765 			return ret;
766 		}
767 	}
768 
769 	ret = clk_prepare(data->clk);
770 	if (ret) {
771 		dev_err(&pdev->dev, "Failed to get clock\n");
772 		goto err_clk_sec;
773 	}
774 
775 	data->soc = pdata->type;
776 
777 	switch (data->soc) {
778 	case SOC_ARCH_EXYNOS4210:
779 		data->tmu_initialize = exynos4210_tmu_initialize;
780 		data->tmu_control = exynos4210_tmu_control;
781 		data->tmu_read = exynos4210_tmu_read;
782 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
783 		break;
784 	case SOC_ARCH_EXYNOS3250:
785 	case SOC_ARCH_EXYNOS4412:
786 	case SOC_ARCH_EXYNOS5250:
787 	case SOC_ARCH_EXYNOS5260:
788 	case SOC_ARCH_EXYNOS5420:
789 	case SOC_ARCH_EXYNOS5420_TRIMINFO:
790 		data->tmu_initialize = exynos4412_tmu_initialize;
791 		data->tmu_control = exynos4210_tmu_control;
792 		data->tmu_read = exynos4412_tmu_read;
793 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
794 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
795 		break;
796 	case SOC_ARCH_EXYNOS5440:
797 		data->tmu_initialize = exynos5440_tmu_initialize;
798 		data->tmu_control = exynos5440_tmu_control;
799 		data->tmu_read = exynos5440_tmu_read;
800 		data->tmu_set_emulation = exynos5440_tmu_set_emulation;
801 		data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
802 		break;
803 	default:
804 		ret = -EINVAL;
805 		dev_err(&pdev->dev, "Platform not supported\n");
806 		goto err_clk;
807 	}
808 
809 	ret = exynos_tmu_initialize(pdev);
810 	if (ret) {
811 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
812 		goto err_clk;
813 	}
814 
815 	exynos_tmu_control(pdev, true);
816 
817 	/* Allocate a structure to register with the exynos core thermal */
818 	sensor_conf = devm_kzalloc(&pdev->dev,
819 				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
820 	if (!sensor_conf) {
821 		ret = -ENOMEM;
822 		goto err_clk;
823 	}
824 	sprintf(sensor_conf->name, "therm_zone%d", data->id);
825 	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
826 	sensor_conf->write_emul_temp =
827 		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
828 	sensor_conf->driver_data = data;
829 	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
830 			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
831 			pdata->trigger_enable[3];
832 
833 	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
834 		sensor_conf->trip_data.trip_val[i] =
835 			pdata->threshold + pdata->trigger_levels[i];
836 		sensor_conf->trip_data.trip_type[i] =
837 					pdata->trigger_type[i];
838 	}
839 
840 	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
841 
842 	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
843 	for (i = 0; i < pdata->freq_tab_count; i++) {
844 		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
845 					pdata->freq_tab[i].freq_clip_max;
846 		sensor_conf->cooling_data.freq_data[i].temp_level =
847 					pdata->freq_tab[i].temp_level;
848 	}
849 	sensor_conf->dev = &pdev->dev;
850 	/* Register the sensor with thermal management interface */
851 	ret = exynos_register_thermal(sensor_conf);
852 	if (ret) {
853 		dev_err(&pdev->dev, "Failed to register thermal interface\n");
854 		goto err_clk;
855 	}
856 	data->reg_conf = sensor_conf;
857 
858 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
859 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
860 	if (ret) {
861 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
862 		goto err_clk;
863 	}
864 
865 	return 0;
866 err_clk:
867 	clk_unprepare(data->clk);
868 err_clk_sec:
869 	if (!IS_ERR(data->clk_sec))
870 		clk_unprepare(data->clk_sec);
871 	return ret;
872 }
873 
874 static int exynos_tmu_remove(struct platform_device *pdev)
875 {
876 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
877 
878 	exynos_unregister_thermal(data->reg_conf);
879 
880 	exynos_tmu_control(pdev, false);
881 
882 	clk_unprepare(data->clk);
883 	if (!IS_ERR(data->clk_sec))
884 		clk_unprepare(data->clk_sec);
885 
886 	if (!IS_ERR(data->regulator))
887 		regulator_disable(data->regulator);
888 
889 	return 0;
890 }
891 
892 #ifdef CONFIG_PM_SLEEP
893 static int exynos_tmu_suspend(struct device *dev)
894 {
895 	exynos_tmu_control(to_platform_device(dev), false);
896 
897 	return 0;
898 }
899 
900 static int exynos_tmu_resume(struct device *dev)
901 {
902 	struct platform_device *pdev = to_platform_device(dev);
903 
904 	exynos_tmu_initialize(pdev);
905 	exynos_tmu_control(pdev, true);
906 
907 	return 0;
908 }
909 
910 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
911 			 exynos_tmu_suspend, exynos_tmu_resume);
912 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
913 #else
914 #define EXYNOS_TMU_PM	NULL
915 #endif
916 
917 static struct platform_driver exynos_tmu_driver = {
918 	.driver = {
919 		.name   = "exynos-tmu",
920 		.owner  = THIS_MODULE,
921 		.pm     = EXYNOS_TMU_PM,
922 		.of_match_table = exynos_tmu_match,
923 	},
924 	.probe = exynos_tmu_probe,
925 	.remove	= exynos_tmu_remove,
926 };
927 
928 module_platform_driver(exynos_tmu_driver);
929 
930 MODULE_DESCRIPTION("EXYNOS TMU Driver");
931 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
932 MODULE_LICENSE("GPL");
933 MODULE_ALIAS("platform:exynos-tmu");
934