xref: /linux/drivers/thermal/samsung/exynos_tmu.c (revision 37f9034f99c3c1ba9087357fbbc2b79fc1a30e72)
1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2011 Samsung Electronics
5  *  Donggeun Kim <dg77.kim@samsung.com>
6  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33 
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37 
38 /**
39  * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 	driver
41  * @id: identifier of the one instance of the TMU controller.
42  * @pdata: pointer to the tmu platform/configuration data
43  * @base: base address of the single instance of the TMU controller.
44  * @base_second: base address of the common registers of the TMU controller.
45  * @irq: irq number of the TMU controller.
46  * @soc: id of the SOC type.
47  * @irq_work: pointer to the irq work structure.
48  * @lock: lock to implement synchronization.
49  * @clk: pointer to the clock structure.
50  * @clk_sec: pointer to the clock structure for accessing the base_second.
51  * @temp_error1: fused value of the first point trim.
52  * @temp_error2: fused value of the second point trim.
53  * @regulator: pointer to the TMU regulator structure.
54  * @reg_conf: pointer to structure to register with core thermal.
55  * @tmu_initialize: SoC specific TMU initialization method
56  * @tmu_control: SoC specific TMU control method
57  */
58 struct exynos_tmu_data {
59 	int id;
60 	struct exynos_tmu_platform_data *pdata;
61 	void __iomem *base;
62 	void __iomem *base_second;
63 	int irq;
64 	enum soc_type soc;
65 	struct work_struct irq_work;
66 	struct mutex lock;
67 	struct clk *clk, *clk_sec;
68 	u8 temp_error1, temp_error2;
69 	struct regulator *regulator;
70 	struct thermal_sensor_conf *reg_conf;
71 	int (*tmu_initialize)(struct platform_device *pdev);
72 	void (*tmu_control)(struct platform_device *pdev, bool on);
73 };
74 
75 /*
76  * TMU treats temperature as a mapped temperature code.
77  * The temperature is converted differently depending on the calibration type.
78  */
79 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
80 {
81 	struct exynos_tmu_platform_data *pdata = data->pdata;
82 	int temp_code;
83 
84 	switch (pdata->cal_type) {
85 	case TYPE_TWO_POINT_TRIMMING:
86 		temp_code = (temp - pdata->first_point_trim) *
87 			(data->temp_error2 - data->temp_error1) /
88 			(pdata->second_point_trim - pdata->first_point_trim) +
89 			data->temp_error1;
90 		break;
91 	case TYPE_ONE_POINT_TRIMMING:
92 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
93 		break;
94 	default:
95 		temp_code = temp + pdata->default_temp_offset;
96 		break;
97 	}
98 
99 	return temp_code;
100 }
101 
102 /*
103  * Calculate a temperature value from a temperature code.
104  * The unit of the temperature is degree Celsius.
105  */
106 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
107 {
108 	struct exynos_tmu_platform_data *pdata = data->pdata;
109 	int temp;
110 
111 	switch (pdata->cal_type) {
112 	case TYPE_TWO_POINT_TRIMMING:
113 		temp = (temp_code - data->temp_error1) *
114 			(pdata->second_point_trim - pdata->first_point_trim) /
115 			(data->temp_error2 - data->temp_error1) +
116 			pdata->first_point_trim;
117 		break;
118 	case TYPE_ONE_POINT_TRIMMING:
119 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
120 		break;
121 	default:
122 		temp = temp_code - pdata->default_temp_offset;
123 		break;
124 	}
125 
126 	return temp;
127 }
128 
129 static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
130 {
131 	const struct exynos_tmu_registers *reg = data->pdata->registers;
132 	unsigned int val_irq;
133 
134 	val_irq = readl(data->base + reg->tmu_intstat);
135 	/*
136 	 * Clear the interrupts.  Please note that the documentation for
137 	 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
138 	 * states that INTCLEAR register has a different placing of bits
139 	 * responsible for FALL IRQs than INTSTAT register.  Exynos5420
140 	 * and Exynos5440 documentation is correct (Exynos4210 doesn't
141 	 * support FALL IRQs at all).
142 	 */
143 	writel(val_irq, data->base + reg->tmu_intclear);
144 }
145 
146 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
147 {
148 	struct exynos_tmu_platform_data *pdata = data->pdata;
149 
150 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
151 	data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
152 				EXYNOS_TMU_TEMP_MASK);
153 
154 	if (!data->temp_error1 ||
155 		(pdata->min_efuse_value > data->temp_error1) ||
156 		(data->temp_error1 > pdata->max_efuse_value))
157 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
158 
159 	if (!data->temp_error2)
160 		data->temp_error2 =
161 			(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
162 			EXYNOS_TMU_TEMP_MASK;
163 }
164 
165 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
166 {
167 	struct exynos_tmu_platform_data *pdata = data->pdata;
168 	int i;
169 
170 	for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
171 		u8 temp = pdata->trigger_levels[i];
172 
173 		if (falling)
174 			temp -= pdata->threshold_falling;
175 		else
176 			threshold &= ~(0xff << 8 * i);
177 
178 		threshold |= temp_to_code(data, temp) << 8 * i;
179 	}
180 
181 	return threshold;
182 }
183 
184 static int exynos_tmu_initialize(struct platform_device *pdev)
185 {
186 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
187 	int ret;
188 
189 	mutex_lock(&data->lock);
190 	clk_enable(data->clk);
191 	if (!IS_ERR(data->clk_sec))
192 		clk_enable(data->clk_sec);
193 	ret = data->tmu_initialize(pdev);
194 	clk_disable(data->clk);
195 	mutex_unlock(&data->lock);
196 	if (!IS_ERR(data->clk_sec))
197 		clk_disable(data->clk_sec);
198 
199 	return ret;
200 }
201 
202 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
203 {
204 	struct exynos_tmu_platform_data *pdata = data->pdata;
205 
206 	if (pdata->test_mux)
207 		con |= (pdata->test_mux << EXYNOS4412_MUX_ADDR_SHIFT);
208 
209 	con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
210 	con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
211 
212 	con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
213 	con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
214 
215 	if (pdata->noise_cancel_mode) {
216 		con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
217 		con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
218 	}
219 
220 	return con;
221 }
222 
223 static void exynos_tmu_control(struct platform_device *pdev, bool on)
224 {
225 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
226 
227 	mutex_lock(&data->lock);
228 	clk_enable(data->clk);
229 	data->tmu_control(pdev, on);
230 	clk_disable(data->clk);
231 	mutex_unlock(&data->lock);
232 }
233 
234 static int exynos4210_tmu_initialize(struct platform_device *pdev)
235 {
236 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
237 	struct exynos_tmu_platform_data *pdata = data->pdata;
238 	unsigned int status;
239 	int ret = 0, threshold_code, i;
240 
241 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
242 	if (!status) {
243 		ret = -EBUSY;
244 		goto out;
245 	}
246 
247 	sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
248 
249 	/* Write temperature code for threshold */
250 	threshold_code = temp_to_code(data, pdata->threshold);
251 	writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
252 
253 	for (i = 0; i < pdata->non_hw_trigger_levels; i++)
254 		writeb(pdata->trigger_levels[i], data->base +
255 		       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
256 
257 	exynos_tmu_clear_irqs(data);
258 out:
259 	return ret;
260 }
261 
262 static int exynos4412_tmu_initialize(struct platform_device *pdev)
263 {
264 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
265 	struct exynos_tmu_platform_data *pdata = data->pdata;
266 	unsigned int status, trim_info, con, ctrl, rising_threshold;
267 	int ret = 0, threshold_code, i;
268 
269 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
270 	if (!status) {
271 		ret = -EBUSY;
272 		goto out;
273 	}
274 
275 	if (data->soc == SOC_ARCH_EXYNOS3250 ||
276 	    data->soc == SOC_ARCH_EXYNOS4412 ||
277 	    data->soc == SOC_ARCH_EXYNOS5250) {
278 		if (data->soc == SOC_ARCH_EXYNOS3250) {
279 			ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
280 			ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
281 			writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
282 		}
283 		ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
284 		ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
285 		writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
286 	}
287 
288 	/* On exynos5420 the triminfo register is in the shared space */
289 	if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
290 		trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
291 	else
292 		trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
293 
294 	sanitize_temp_error(data, trim_info);
295 
296 	/* Write temperature code for rising and falling threshold */
297 	rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
298 	rising_threshold = get_th_reg(data, rising_threshold, false);
299 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
300 	writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
301 
302 	exynos_tmu_clear_irqs(data);
303 
304 	/* if last threshold limit is also present */
305 	i = pdata->max_trigger_level - 1;
306 	if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
307 		threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
308 		/* 1-4 level to be assigned in th0 reg */
309 		rising_threshold &= ~(0xff << 8 * i);
310 		rising_threshold |= threshold_code << 8 * i;
311 		writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
312 		con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
313 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
314 		writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
315 	}
316 out:
317 	return ret;
318 }
319 
320 static int exynos5440_tmu_initialize(struct platform_device *pdev)
321 {
322 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
323 	struct exynos_tmu_platform_data *pdata = data->pdata;
324 	unsigned int trim_info = 0, con, rising_threshold;
325 	int ret = 0, threshold_code, i;
326 
327 	/*
328 	 * For exynos5440 soc triminfo value is swapped between TMU0 and
329 	 * TMU2, so the below logic is needed.
330 	 */
331 	switch (data->id) {
332 	case 0:
333 		trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
334 				 EXYNOS5440_TMU_S0_7_TRIM);
335 		break;
336 	case 1:
337 		trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
338 		break;
339 	case 2:
340 		trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
341 				  EXYNOS5440_TMU_S0_7_TRIM);
342 	}
343 	sanitize_temp_error(data, trim_info);
344 
345 	/* Write temperature code for rising and falling threshold */
346 	rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
347 	rising_threshold = get_th_reg(data, rising_threshold, false);
348 	writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
349 	writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
350 
351 	exynos_tmu_clear_irqs(data);
352 
353 	/* if last threshold limit is also present */
354 	i = pdata->max_trigger_level - 1;
355 	if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
356 		threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
357 		/* 5th level to be assigned in th2 reg */
358 		rising_threshold =
359 			threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
360 		writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
361 		con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
362 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
363 		writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
364 	}
365 	/* Clear the PMIN in the common TMU register */
366 	if (!data->id)
367 		writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
368 	return ret;
369 }
370 
371 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
372 {
373 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
374 	struct exynos_tmu_platform_data *pdata = data->pdata;
375 	unsigned int con, interrupt_en;
376 
377 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
378 
379 	if (on) {
380 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
381 		interrupt_en =
382 			pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
383 			pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
384 			pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
385 			pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
386 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
387 			interrupt_en |=
388 				interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
389 	} else {
390 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
391 		interrupt_en = 0; /* Disable all interrupts */
392 	}
393 	writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
394 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
395 }
396 
397 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
398 {
399 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
400 	struct exynos_tmu_platform_data *pdata = data->pdata;
401 	unsigned int con, interrupt_en;
402 
403 	con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
404 
405 	if (on) {
406 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
407 		interrupt_en =
408 			pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
409 			pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
410 			pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
411 			pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
412 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
413 			interrupt_en |=
414 				interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
415 	} else {
416 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
417 		interrupt_en = 0; /* Disable all interrupts */
418 	}
419 	writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
420 	writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
421 }
422 
423 static int exynos_tmu_read(struct exynos_tmu_data *data)
424 {
425 	struct exynos_tmu_platform_data *pdata = data->pdata;
426 	const struct exynos_tmu_registers *reg = pdata->registers;
427 	u8 temp_code;
428 	int temp;
429 
430 	mutex_lock(&data->lock);
431 	clk_enable(data->clk);
432 
433 	temp_code = readb(data->base + reg->tmu_cur_temp);
434 
435 	if (data->soc == SOC_ARCH_EXYNOS4210)
436 		/* temp_code should range between 75 and 175 */
437 		if (temp_code < 75 || temp_code > 175) {
438 			temp = -ENODATA;
439 			goto out;
440 		}
441 
442 	temp = code_to_temp(data, temp_code);
443 out:
444 	clk_disable(data->clk);
445 	mutex_unlock(&data->lock);
446 
447 	return temp;
448 }
449 
450 #ifdef CONFIG_THERMAL_EMULATION
451 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
452 {
453 	struct exynos_tmu_data *data = drv_data;
454 	struct exynos_tmu_platform_data *pdata = data->pdata;
455 	const struct exynos_tmu_registers *reg = pdata->registers;
456 	unsigned int val;
457 	int ret = -EINVAL;
458 
459 	if (!TMU_SUPPORTS(pdata, EMULATION))
460 		goto out;
461 
462 	if (temp && temp < MCELSIUS)
463 		goto out;
464 
465 	mutex_lock(&data->lock);
466 	clk_enable(data->clk);
467 
468 	val = readl(data->base + reg->emul_con);
469 
470 	if (temp) {
471 		temp /= MCELSIUS;
472 
473 		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
474 			val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
475 			val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
476 		}
477 		val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
478 		val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
479 			EXYNOS_EMUL_ENABLE;
480 	} else {
481 		val &= ~EXYNOS_EMUL_ENABLE;
482 	}
483 
484 	writel(val, data->base + reg->emul_con);
485 
486 	clk_disable(data->clk);
487 	mutex_unlock(&data->lock);
488 	return 0;
489 out:
490 	return ret;
491 }
492 #else
493 static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
494 	{ return -EINVAL; }
495 #endif/*CONFIG_THERMAL_EMULATION*/
496 
497 static void exynos_tmu_work(struct work_struct *work)
498 {
499 	struct exynos_tmu_data *data = container_of(work,
500 			struct exynos_tmu_data, irq_work);
501 	unsigned int val_type;
502 
503 	if (!IS_ERR(data->clk_sec))
504 		clk_enable(data->clk_sec);
505 	/* Find which sensor generated this interrupt */
506 	if (data->soc == SOC_ARCH_EXYNOS5440) {
507 		val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
508 		if (!((val_type >> data->id) & 0x1))
509 			goto out;
510 	}
511 	if (!IS_ERR(data->clk_sec))
512 		clk_disable(data->clk_sec);
513 
514 	exynos_report_trigger(data->reg_conf);
515 	mutex_lock(&data->lock);
516 	clk_enable(data->clk);
517 
518 	/* TODO: take action based on particular interrupt */
519 	exynos_tmu_clear_irqs(data);
520 
521 	clk_disable(data->clk);
522 	mutex_unlock(&data->lock);
523 out:
524 	enable_irq(data->irq);
525 }
526 
527 static irqreturn_t exynos_tmu_irq(int irq, void *id)
528 {
529 	struct exynos_tmu_data *data = id;
530 
531 	disable_irq_nosync(irq);
532 	schedule_work(&data->irq_work);
533 
534 	return IRQ_HANDLED;
535 }
536 
537 static const struct of_device_id exynos_tmu_match[] = {
538 	{
539 		.compatible = "samsung,exynos3250-tmu",
540 		.data = (void *)EXYNOS3250_TMU_DRV_DATA,
541 	},
542 	{
543 		.compatible = "samsung,exynos4210-tmu",
544 		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
545 	},
546 	{
547 		.compatible = "samsung,exynos4412-tmu",
548 		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
549 	},
550 	{
551 		.compatible = "samsung,exynos5250-tmu",
552 		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
553 	},
554 	{
555 		.compatible = "samsung,exynos5260-tmu",
556 		.data = (void *)EXYNOS5260_TMU_DRV_DATA,
557 	},
558 	{
559 		.compatible = "samsung,exynos5420-tmu",
560 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
561 	},
562 	{
563 		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
564 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
565 	},
566 	{
567 		.compatible = "samsung,exynos5440-tmu",
568 		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
569 	},
570 	{},
571 };
572 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
573 
574 static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
575 			struct platform_device *pdev, int id)
576 {
577 	struct  exynos_tmu_init_data *data_table;
578 	struct exynos_tmu_platform_data *tmu_data;
579 	const struct of_device_id *match;
580 
581 	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
582 	if (!match)
583 		return NULL;
584 	data_table = (struct exynos_tmu_init_data *) match->data;
585 	if (!data_table || id >= data_table->tmu_count)
586 		return NULL;
587 	tmu_data = data_table->tmu_data;
588 	return (struct exynos_tmu_platform_data *) (tmu_data + id);
589 }
590 
591 static int exynos_map_dt_data(struct platform_device *pdev)
592 {
593 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
594 	struct exynos_tmu_platform_data *pdata;
595 	struct resource res;
596 	int ret;
597 
598 	if (!data || !pdev->dev.of_node)
599 		return -ENODEV;
600 
601 	/*
602 	 * Try enabling the regulator if found
603 	 * TODO: Add regulator as an SOC feature, so that regulator enable
604 	 * is a compulsory call.
605 	 */
606 	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
607 	if (!IS_ERR(data->regulator)) {
608 		ret = regulator_enable(data->regulator);
609 		if (ret) {
610 			dev_err(&pdev->dev, "failed to enable vtmu\n");
611 			return ret;
612 		}
613 	} else {
614 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
615 	}
616 
617 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
618 	if (data->id < 0)
619 		data->id = 0;
620 
621 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
622 	if (data->irq <= 0) {
623 		dev_err(&pdev->dev, "failed to get IRQ\n");
624 		return -ENODEV;
625 	}
626 
627 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
628 		dev_err(&pdev->dev, "failed to get Resource 0\n");
629 		return -ENODEV;
630 	}
631 
632 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
633 	if (!data->base) {
634 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
635 		return -EADDRNOTAVAIL;
636 	}
637 
638 	pdata = exynos_get_driver_data(pdev, data->id);
639 	if (!pdata) {
640 		dev_err(&pdev->dev, "No platform init data supplied.\n");
641 		return -ENODEV;
642 	}
643 	data->pdata = pdata;
644 	/*
645 	 * Check if the TMU shares some registers and then try to map the
646 	 * memory of common registers.
647 	 */
648 	if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
649 		return 0;
650 
651 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
652 		dev_err(&pdev->dev, "failed to get Resource 1\n");
653 		return -ENODEV;
654 	}
655 
656 	data->base_second = devm_ioremap(&pdev->dev, res.start,
657 					resource_size(&res));
658 	if (!data->base_second) {
659 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
660 		return -ENOMEM;
661 	}
662 
663 	return 0;
664 }
665 
666 static int exynos_tmu_probe(struct platform_device *pdev)
667 {
668 	struct exynos_tmu_data *data;
669 	struct exynos_tmu_platform_data *pdata;
670 	struct thermal_sensor_conf *sensor_conf;
671 	int ret, i;
672 
673 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
674 					GFP_KERNEL);
675 	if (!data)
676 		return -ENOMEM;
677 
678 	platform_set_drvdata(pdev, data);
679 	mutex_init(&data->lock);
680 
681 	ret = exynos_map_dt_data(pdev);
682 	if (ret)
683 		return ret;
684 
685 	pdata = data->pdata;
686 
687 	INIT_WORK(&data->irq_work, exynos_tmu_work);
688 
689 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
690 	if (IS_ERR(data->clk)) {
691 		dev_err(&pdev->dev, "Failed to get clock\n");
692 		return  PTR_ERR(data->clk);
693 	}
694 
695 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
696 	if (IS_ERR(data->clk_sec)) {
697 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
698 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
699 			return PTR_ERR(data->clk_sec);
700 		}
701 	} else {
702 		ret = clk_prepare(data->clk_sec);
703 		if (ret) {
704 			dev_err(&pdev->dev, "Failed to get clock\n");
705 			return ret;
706 		}
707 	}
708 
709 	ret = clk_prepare(data->clk);
710 	if (ret) {
711 		dev_err(&pdev->dev, "Failed to get clock\n");
712 		goto err_clk_sec;
713 	}
714 
715 	data->soc = pdata->type;
716 
717 	switch (data->soc) {
718 	case SOC_ARCH_EXYNOS4210:
719 		data->tmu_initialize = exynos4210_tmu_initialize;
720 		data->tmu_control = exynos4210_tmu_control;
721 		break;
722 	case SOC_ARCH_EXYNOS3250:
723 	case SOC_ARCH_EXYNOS4412:
724 	case SOC_ARCH_EXYNOS5250:
725 	case SOC_ARCH_EXYNOS5260:
726 	case SOC_ARCH_EXYNOS5420:
727 	case SOC_ARCH_EXYNOS5420_TRIMINFO:
728 		data->tmu_initialize = exynos4412_tmu_initialize;
729 		data->tmu_control = exynos4210_tmu_control;
730 		break;
731 	case SOC_ARCH_EXYNOS5440:
732 		data->tmu_initialize = exynos5440_tmu_initialize;
733 		data->tmu_control = exynos5440_tmu_control;
734 		break;
735 	default:
736 		ret = -EINVAL;
737 		dev_err(&pdev->dev, "Platform not supported\n");
738 		goto err_clk;
739 	}
740 
741 	ret = exynos_tmu_initialize(pdev);
742 	if (ret) {
743 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
744 		goto err_clk;
745 	}
746 
747 	exynos_tmu_control(pdev, true);
748 
749 	/* Allocate a structure to register with the exynos core thermal */
750 	sensor_conf = devm_kzalloc(&pdev->dev,
751 				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
752 	if (!sensor_conf) {
753 		ret = -ENOMEM;
754 		goto err_clk;
755 	}
756 	sprintf(sensor_conf->name, "therm_zone%d", data->id);
757 	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
758 	sensor_conf->write_emul_temp =
759 		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
760 	sensor_conf->driver_data = data;
761 	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
762 			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
763 			pdata->trigger_enable[3];
764 
765 	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
766 		sensor_conf->trip_data.trip_val[i] =
767 			pdata->threshold + pdata->trigger_levels[i];
768 		sensor_conf->trip_data.trip_type[i] =
769 					pdata->trigger_type[i];
770 	}
771 
772 	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
773 
774 	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
775 	for (i = 0; i < pdata->freq_tab_count; i++) {
776 		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
777 					pdata->freq_tab[i].freq_clip_max;
778 		sensor_conf->cooling_data.freq_data[i].temp_level =
779 					pdata->freq_tab[i].temp_level;
780 	}
781 	sensor_conf->dev = &pdev->dev;
782 	/* Register the sensor with thermal management interface */
783 	ret = exynos_register_thermal(sensor_conf);
784 	if (ret) {
785 		dev_err(&pdev->dev, "Failed to register thermal interface\n");
786 		goto err_clk;
787 	}
788 	data->reg_conf = sensor_conf;
789 
790 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
791 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
792 	if (ret) {
793 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
794 		goto err_clk;
795 	}
796 
797 	return 0;
798 err_clk:
799 	clk_unprepare(data->clk);
800 err_clk_sec:
801 	if (!IS_ERR(data->clk_sec))
802 		clk_unprepare(data->clk_sec);
803 	return ret;
804 }
805 
806 static int exynos_tmu_remove(struct platform_device *pdev)
807 {
808 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
809 
810 	exynos_unregister_thermal(data->reg_conf);
811 
812 	exynos_tmu_control(pdev, false);
813 
814 	clk_unprepare(data->clk);
815 	if (!IS_ERR(data->clk_sec))
816 		clk_unprepare(data->clk_sec);
817 
818 	if (!IS_ERR(data->regulator))
819 		regulator_disable(data->regulator);
820 
821 	return 0;
822 }
823 
824 #ifdef CONFIG_PM_SLEEP
825 static int exynos_tmu_suspend(struct device *dev)
826 {
827 	exynos_tmu_control(to_platform_device(dev), false);
828 
829 	return 0;
830 }
831 
832 static int exynos_tmu_resume(struct device *dev)
833 {
834 	struct platform_device *pdev = to_platform_device(dev);
835 
836 	exynos_tmu_initialize(pdev);
837 	exynos_tmu_control(pdev, true);
838 
839 	return 0;
840 }
841 
842 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
843 			 exynos_tmu_suspend, exynos_tmu_resume);
844 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
845 #else
846 #define EXYNOS_TMU_PM	NULL
847 #endif
848 
849 static struct platform_driver exynos_tmu_driver = {
850 	.driver = {
851 		.name   = "exynos-tmu",
852 		.owner  = THIS_MODULE,
853 		.pm     = EXYNOS_TMU_PM,
854 		.of_match_table = exynos_tmu_match,
855 	},
856 	.probe = exynos_tmu_probe,
857 	.remove	= exynos_tmu_remove,
858 };
859 
860 module_platform_driver(exynos_tmu_driver);
861 
862 MODULE_DESCRIPTION("EXYNOS TMU Driver");
863 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
864 MODULE_LICENSE("GPL");
865 MODULE_ALIAS("platform:exynos-tmu");
866