xref: /linux/drivers/thermal/samsung/exynos_tmu.c (revision 56c64da7aa31c7e0422ec54e5d0ed60a98f28712)
1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2011 Samsung Electronics
5  *  Donggeun Kim <dg77.kim@samsung.com>
6  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33 
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37 
38 /**
39  * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 	driver
41  * @id: identifier of the one instance of the TMU controller.
42  * @pdata: pointer to the tmu platform/configuration data
43  * @base: base address of the single instance of the TMU controller.
44  * @base_second: base address of the common registers of the TMU controller.
45  * @irq: irq number of the TMU controller.
46  * @soc: id of the SOC type.
47  * @irq_work: pointer to the irq work structure.
48  * @lock: lock to implement synchronization.
49  * @clk: pointer to the clock structure.
50  * @clk_sec: pointer to the clock structure for accessing the base_second.
51  * @temp_error1: fused value of the first point trim.
52  * @temp_error2: fused value of the second point trim.
53  * @regulator: pointer to the TMU regulator structure.
54  * @reg_conf: pointer to structure to register with core thermal.
55  */
56 struct exynos_tmu_data {
57 	int id;
58 	struct exynos_tmu_platform_data *pdata;
59 	void __iomem *base;
60 	void __iomem *base_second;
61 	int irq;
62 	enum soc_type soc;
63 	struct work_struct irq_work;
64 	struct mutex lock;
65 	struct clk *clk, *clk_sec;
66 	u8 temp_error1, temp_error2;
67 	struct regulator *regulator;
68 	struct thermal_sensor_conf *reg_conf;
69 };
70 
71 /*
72  * TMU treats temperature as a mapped temperature code.
73  * The temperature is converted differently depending on the calibration type.
74  */
75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76 {
77 	struct exynos_tmu_platform_data *pdata = data->pdata;
78 	int temp_code;
79 
80 	switch (pdata->cal_type) {
81 	case TYPE_TWO_POINT_TRIMMING:
82 		temp_code = (temp - pdata->first_point_trim) *
83 			(data->temp_error2 - data->temp_error1) /
84 			(pdata->second_point_trim - pdata->first_point_trim) +
85 			data->temp_error1;
86 		break;
87 	case TYPE_ONE_POINT_TRIMMING:
88 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
89 		break;
90 	default:
91 		temp_code = temp + pdata->default_temp_offset;
92 		break;
93 	}
94 
95 	return temp_code;
96 }
97 
98 /*
99  * Calculate a temperature value from a temperature code.
100  * The unit of the temperature is degree Celsius.
101  */
102 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
103 {
104 	struct exynos_tmu_platform_data *pdata = data->pdata;
105 	int temp;
106 
107 	switch (pdata->cal_type) {
108 	case TYPE_TWO_POINT_TRIMMING:
109 		temp = (temp_code - data->temp_error1) *
110 			(pdata->second_point_trim - pdata->first_point_trim) /
111 			(data->temp_error2 - data->temp_error1) +
112 			pdata->first_point_trim;
113 		break;
114 	case TYPE_ONE_POINT_TRIMMING:
115 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
116 		break;
117 	default:
118 		temp = temp_code - pdata->default_temp_offset;
119 		break;
120 	}
121 
122 	return temp;
123 }
124 
125 static int exynos_tmu_initialize(struct platform_device *pdev)
126 {
127 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
128 	struct exynos_tmu_platform_data *pdata = data->pdata;
129 	const struct exynos_tmu_registers *reg = pdata->registers;
130 	unsigned int status, trim_info = 0, con, ctrl;
131 	unsigned int rising_threshold = 0, falling_threshold = 0;
132 	int ret = 0, threshold_code, i;
133 
134 	mutex_lock(&data->lock);
135 	clk_enable(data->clk);
136 	if (!IS_ERR(data->clk_sec))
137 		clk_enable(data->clk_sec);
138 
139 	if (TMU_SUPPORTS(pdata, READY_STATUS)) {
140 		status = readb(data->base + reg->tmu_status);
141 		if (!status) {
142 			ret = -EBUSY;
143 			goto out;
144 		}
145 	}
146 
147 	if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) {
148 		for (i = 0; i < reg->triminfo_ctrl_count; i++) {
149 			if (pdata->triminfo_reload[i]) {
150 				ctrl = readl(data->base +
151 						reg->triminfo_ctrl[i]);
152 				ctrl |= pdata->triminfo_reload[i];
153 				writel(ctrl, data->base +
154 						reg->triminfo_ctrl[i]);
155 			}
156 		}
157 	}
158 
159 	/* Save trimming info in order to perform calibration */
160 	if (data->soc == SOC_ARCH_EXYNOS5440) {
161 		/*
162 		 * For exynos5440 soc triminfo value is swapped between TMU0 and
163 		 * TMU2, so the below logic is needed.
164 		 */
165 		switch (data->id) {
166 		case 0:
167 			trim_info = readl(data->base +
168 			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
169 			break;
170 		case 1:
171 			trim_info = readl(data->base + reg->triminfo_data);
172 			break;
173 		case 2:
174 			trim_info = readl(data->base -
175 			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
176 		}
177 	} else {
178 		/* On exynos5420 the triminfo register is in the shared space */
179 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
180 			trim_info = readl(data->base_second +
181 							reg->triminfo_data);
182 		else
183 			trim_info = readl(data->base + reg->triminfo_data);
184 	}
185 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
186 	data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
187 				EXYNOS_TMU_TEMP_MASK);
188 
189 	if (!data->temp_error1 ||
190 		(pdata->min_efuse_value > data->temp_error1) ||
191 		(data->temp_error1 > pdata->max_efuse_value))
192 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
193 
194 	if (!data->temp_error2)
195 		data->temp_error2 =
196 			(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
197 			EXYNOS_TMU_TEMP_MASK;
198 
199 	rising_threshold = readl(data->base + reg->threshold_th0);
200 
201 	if (data->soc == SOC_ARCH_EXYNOS4210) {
202 		/* Write temperature code for threshold */
203 		threshold_code = temp_to_code(data, pdata->threshold);
204 		writeb(threshold_code,
205 			data->base + reg->threshold_temp);
206 		for (i = 0; i < pdata->non_hw_trigger_levels; i++)
207 			writeb(pdata->trigger_levels[i], data->base +
208 			reg->threshold_th0 + i * sizeof(reg->threshold_th0));
209 
210 		writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
211 	} else {
212 		/* Write temperature code for rising and falling threshold */
213 		for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
214 			threshold_code = temp_to_code(data,
215 						pdata->trigger_levels[i]);
216 			rising_threshold &= ~(0xff << 8 * i);
217 			rising_threshold |= threshold_code << 8 * i;
218 			if (pdata->threshold_falling) {
219 				threshold_code = temp_to_code(data,
220 						pdata->trigger_levels[i] -
221 						pdata->threshold_falling);
222 				falling_threshold |= threshold_code << 8 * i;
223 			}
224 		}
225 
226 		writel(rising_threshold,
227 				data->base + reg->threshold_th0);
228 		writel(falling_threshold,
229 				data->base + reg->threshold_th1);
230 
231 		writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
232 			(reg->intclr_fall_mask << reg->intclr_fall_shift),
233 				data->base + reg->tmu_intclear);
234 
235 		/* if last threshold limit is also present */
236 		i = pdata->max_trigger_level - 1;
237 		if (pdata->trigger_levels[i] &&
238 				(pdata->trigger_type[i] == HW_TRIP)) {
239 			threshold_code = temp_to_code(data,
240 						pdata->trigger_levels[i]);
241 			if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
242 				/* 1-4 level to be assigned in th0 reg */
243 				rising_threshold &= ~(0xff << 8 * i);
244 				rising_threshold |= threshold_code << 8 * i;
245 				writel(rising_threshold,
246 					data->base + reg->threshold_th0);
247 			} else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
248 				/* 5th level to be assigned in th2 reg */
249 				rising_threshold =
250 				threshold_code << reg->threshold_th3_l0_shift;
251 				writel(rising_threshold,
252 					data->base + reg->threshold_th2);
253 			}
254 			con = readl(data->base + reg->tmu_ctrl);
255 			con |= (1 << reg->therm_trip_en_shift);
256 			writel(con, data->base + reg->tmu_ctrl);
257 		}
258 	}
259 	/*Clear the PMIN in the common TMU register*/
260 	if (reg->tmu_pmin && !data->id)
261 		writel(0, data->base_second + reg->tmu_pmin);
262 out:
263 	clk_disable(data->clk);
264 	mutex_unlock(&data->lock);
265 	if (!IS_ERR(data->clk_sec))
266 		clk_disable(data->clk_sec);
267 
268 	return ret;
269 }
270 
271 static void exynos_tmu_control(struct platform_device *pdev, bool on)
272 {
273 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
274 	struct exynos_tmu_platform_data *pdata = data->pdata;
275 	const struct exynos_tmu_registers *reg = pdata->registers;
276 	unsigned int con, interrupt_en;
277 
278 	mutex_lock(&data->lock);
279 	clk_enable(data->clk);
280 
281 	con = readl(data->base + reg->tmu_ctrl);
282 
283 	if (pdata->test_mux)
284 		con |= (pdata->test_mux << reg->test_mux_addr_shift);
285 
286 	con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
287 	con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
288 
289 	con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
290 	con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
291 
292 	if (pdata->noise_cancel_mode) {
293 		con &= ~(reg->therm_trip_mode_mask <<
294 					reg->therm_trip_mode_shift);
295 		con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
296 	}
297 
298 	if (on) {
299 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
300 		interrupt_en =
301 			pdata->trigger_enable[3] << reg->inten_rise3_shift |
302 			pdata->trigger_enable[2] << reg->inten_rise2_shift |
303 			pdata->trigger_enable[1] << reg->inten_rise1_shift |
304 			pdata->trigger_enable[0] << reg->inten_rise0_shift;
305 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
306 			interrupt_en |=
307 				interrupt_en << reg->inten_fall0_shift;
308 	} else {
309 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
310 		interrupt_en = 0; /* Disable all interrupts */
311 	}
312 	writel(interrupt_en, data->base + reg->tmu_inten);
313 	writel(con, data->base + reg->tmu_ctrl);
314 
315 	clk_disable(data->clk);
316 	mutex_unlock(&data->lock);
317 }
318 
319 static int exynos_tmu_read(struct exynos_tmu_data *data)
320 {
321 	struct exynos_tmu_platform_data *pdata = data->pdata;
322 	const struct exynos_tmu_registers *reg = pdata->registers;
323 	u8 temp_code;
324 	int temp;
325 
326 	mutex_lock(&data->lock);
327 	clk_enable(data->clk);
328 
329 	temp_code = readb(data->base + reg->tmu_cur_temp);
330 
331 	if (data->soc == SOC_ARCH_EXYNOS4210)
332 		/* temp_code should range between 75 and 175 */
333 		if (temp_code < 75 || temp_code > 175) {
334 			temp = -ENODATA;
335 			goto out;
336 		}
337 
338 	temp = code_to_temp(data, temp_code);
339 out:
340 	clk_disable(data->clk);
341 	mutex_unlock(&data->lock);
342 
343 	return temp;
344 }
345 
346 #ifdef CONFIG_THERMAL_EMULATION
347 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
348 {
349 	struct exynos_tmu_data *data = drv_data;
350 	struct exynos_tmu_platform_data *pdata = data->pdata;
351 	const struct exynos_tmu_registers *reg = pdata->registers;
352 	unsigned int val;
353 	int ret = -EINVAL;
354 
355 	if (!TMU_SUPPORTS(pdata, EMULATION))
356 		goto out;
357 
358 	if (temp && temp < MCELSIUS)
359 		goto out;
360 
361 	mutex_lock(&data->lock);
362 	clk_enable(data->clk);
363 
364 	val = readl(data->base + reg->emul_con);
365 
366 	if (temp) {
367 		temp /= MCELSIUS;
368 
369 		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
370 			val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
371 			val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
372 		}
373 		val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
374 		val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
375 			EXYNOS_EMUL_ENABLE;
376 	} else {
377 		val &= ~EXYNOS_EMUL_ENABLE;
378 	}
379 
380 	writel(val, data->base + reg->emul_con);
381 
382 	clk_disable(data->clk);
383 	mutex_unlock(&data->lock);
384 	return 0;
385 out:
386 	return ret;
387 }
388 #else
389 static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
390 	{ return -EINVAL; }
391 #endif/*CONFIG_THERMAL_EMULATION*/
392 
393 static void exynos_tmu_work(struct work_struct *work)
394 {
395 	struct exynos_tmu_data *data = container_of(work,
396 			struct exynos_tmu_data, irq_work);
397 	struct exynos_tmu_platform_data *pdata = data->pdata;
398 	const struct exynos_tmu_registers *reg = pdata->registers;
399 	unsigned int val_irq, val_type;
400 
401 	if (!IS_ERR(data->clk_sec))
402 		clk_enable(data->clk_sec);
403 	/* Find which sensor generated this interrupt */
404 	if (reg->tmu_irqstatus) {
405 		val_type = readl(data->base_second + reg->tmu_irqstatus);
406 		if (!((val_type >> data->id) & 0x1))
407 			goto out;
408 	}
409 	if (!IS_ERR(data->clk_sec))
410 		clk_disable(data->clk_sec);
411 
412 	exynos_report_trigger(data->reg_conf);
413 	mutex_lock(&data->lock);
414 	clk_enable(data->clk);
415 
416 	/* TODO: take action based on particular interrupt */
417 	val_irq = readl(data->base + reg->tmu_intstat);
418 	/* clear the interrupts */
419 	writel(val_irq, data->base + reg->tmu_intclear);
420 
421 	clk_disable(data->clk);
422 	mutex_unlock(&data->lock);
423 out:
424 	enable_irq(data->irq);
425 }
426 
427 static irqreturn_t exynos_tmu_irq(int irq, void *id)
428 {
429 	struct exynos_tmu_data *data = id;
430 
431 	disable_irq_nosync(irq);
432 	schedule_work(&data->irq_work);
433 
434 	return IRQ_HANDLED;
435 }
436 
437 static const struct of_device_id exynos_tmu_match[] = {
438 	{
439 		.compatible = "samsung,exynos3250-tmu",
440 		.data = (void *)EXYNOS3250_TMU_DRV_DATA,
441 	},
442 	{
443 		.compatible = "samsung,exynos4210-tmu",
444 		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
445 	},
446 	{
447 		.compatible = "samsung,exynos4412-tmu",
448 		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
449 	},
450 	{
451 		.compatible = "samsung,exynos5250-tmu",
452 		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
453 	},
454 	{
455 		.compatible = "samsung,exynos5260-tmu",
456 		.data = (void *)EXYNOS5260_TMU_DRV_DATA,
457 	},
458 	{
459 		.compatible = "samsung,exynos5420-tmu",
460 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
461 	},
462 	{
463 		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
464 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
465 	},
466 	{
467 		.compatible = "samsung,exynos5440-tmu",
468 		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
469 	},
470 	{},
471 };
472 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
473 
474 static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
475 			struct platform_device *pdev, int id)
476 {
477 	struct  exynos_tmu_init_data *data_table;
478 	struct exynos_tmu_platform_data *tmu_data;
479 	const struct of_device_id *match;
480 
481 	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
482 	if (!match)
483 		return NULL;
484 	data_table = (struct exynos_tmu_init_data *) match->data;
485 	if (!data_table || id >= data_table->tmu_count)
486 		return NULL;
487 	tmu_data = data_table->tmu_data;
488 	return (struct exynos_tmu_platform_data *) (tmu_data + id);
489 }
490 
491 static int exynos_map_dt_data(struct platform_device *pdev)
492 {
493 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
494 	struct exynos_tmu_platform_data *pdata;
495 	struct resource res;
496 	int ret;
497 
498 	if (!data || !pdev->dev.of_node)
499 		return -ENODEV;
500 
501 	/*
502 	 * Try enabling the regulator if found
503 	 * TODO: Add regulator as an SOC feature, so that regulator enable
504 	 * is a compulsory call.
505 	 */
506 	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
507 	if (!IS_ERR(data->regulator)) {
508 		ret = regulator_enable(data->regulator);
509 		if (ret) {
510 			dev_err(&pdev->dev, "failed to enable vtmu\n");
511 			return ret;
512 		}
513 	} else {
514 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
515 	}
516 
517 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
518 	if (data->id < 0)
519 		data->id = 0;
520 
521 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
522 	if (data->irq <= 0) {
523 		dev_err(&pdev->dev, "failed to get IRQ\n");
524 		return -ENODEV;
525 	}
526 
527 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
528 		dev_err(&pdev->dev, "failed to get Resource 0\n");
529 		return -ENODEV;
530 	}
531 
532 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
533 	if (!data->base) {
534 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
535 		return -EADDRNOTAVAIL;
536 	}
537 
538 	pdata = exynos_get_driver_data(pdev, data->id);
539 	if (!pdata) {
540 		dev_err(&pdev->dev, "No platform init data supplied.\n");
541 		return -ENODEV;
542 	}
543 	data->pdata = pdata;
544 	/*
545 	 * Check if the TMU shares some registers and then try to map the
546 	 * memory of common registers.
547 	 */
548 	if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
549 		return 0;
550 
551 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
552 		dev_err(&pdev->dev, "failed to get Resource 1\n");
553 		return -ENODEV;
554 	}
555 
556 	data->base_second = devm_ioremap(&pdev->dev, res.start,
557 					resource_size(&res));
558 	if (!data->base_second) {
559 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
560 		return -ENOMEM;
561 	}
562 
563 	return 0;
564 }
565 
566 static int exynos_tmu_probe(struct platform_device *pdev)
567 {
568 	struct exynos_tmu_data *data;
569 	struct exynos_tmu_platform_data *pdata;
570 	struct thermal_sensor_conf *sensor_conf;
571 	int ret, i;
572 
573 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
574 					GFP_KERNEL);
575 	if (!data)
576 		return -ENOMEM;
577 
578 	platform_set_drvdata(pdev, data);
579 	mutex_init(&data->lock);
580 
581 	ret = exynos_map_dt_data(pdev);
582 	if (ret)
583 		return ret;
584 
585 	pdata = data->pdata;
586 
587 	INIT_WORK(&data->irq_work, exynos_tmu_work);
588 
589 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
590 	if (IS_ERR(data->clk)) {
591 		dev_err(&pdev->dev, "Failed to get clock\n");
592 		return  PTR_ERR(data->clk);
593 	}
594 
595 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
596 	if (IS_ERR(data->clk_sec)) {
597 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
598 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
599 			return PTR_ERR(data->clk_sec);
600 		}
601 	} else {
602 		ret = clk_prepare(data->clk_sec);
603 		if (ret) {
604 			dev_err(&pdev->dev, "Failed to get clock\n");
605 			return ret;
606 		}
607 	}
608 
609 	ret = clk_prepare(data->clk);
610 	if (ret) {
611 		dev_err(&pdev->dev, "Failed to get clock\n");
612 		goto err_clk_sec;
613 	}
614 
615 	if (pdata->type == SOC_ARCH_EXYNOS3250 ||
616 	    pdata->type == SOC_ARCH_EXYNOS4210 ||
617 	    pdata->type == SOC_ARCH_EXYNOS4412 ||
618 	    pdata->type == SOC_ARCH_EXYNOS5250 ||
619 	    pdata->type == SOC_ARCH_EXYNOS5260 ||
620 	    pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
621 	    pdata->type == SOC_ARCH_EXYNOS5440)
622 		data->soc = pdata->type;
623 	else {
624 		ret = -EINVAL;
625 		dev_err(&pdev->dev, "Platform not supported\n");
626 		goto err_clk;
627 	}
628 
629 	ret = exynos_tmu_initialize(pdev);
630 	if (ret) {
631 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
632 		goto err_clk;
633 	}
634 
635 	exynos_tmu_control(pdev, true);
636 
637 	/* Allocate a structure to register with the exynos core thermal */
638 	sensor_conf = devm_kzalloc(&pdev->dev,
639 				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
640 	if (!sensor_conf) {
641 		ret = -ENOMEM;
642 		goto err_clk;
643 	}
644 	sprintf(sensor_conf->name, "therm_zone%d", data->id);
645 	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
646 	sensor_conf->write_emul_temp =
647 		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
648 	sensor_conf->driver_data = data;
649 	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
650 			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
651 			pdata->trigger_enable[3];
652 
653 	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
654 		sensor_conf->trip_data.trip_val[i] =
655 			pdata->threshold + pdata->trigger_levels[i];
656 		sensor_conf->trip_data.trip_type[i] =
657 					pdata->trigger_type[i];
658 	}
659 
660 	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
661 
662 	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
663 	for (i = 0; i < pdata->freq_tab_count; i++) {
664 		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
665 					pdata->freq_tab[i].freq_clip_max;
666 		sensor_conf->cooling_data.freq_data[i].temp_level =
667 					pdata->freq_tab[i].temp_level;
668 	}
669 	sensor_conf->dev = &pdev->dev;
670 	/* Register the sensor with thermal management interface */
671 	ret = exynos_register_thermal(sensor_conf);
672 	if (ret) {
673 		dev_err(&pdev->dev, "Failed to register thermal interface\n");
674 		goto err_clk;
675 	}
676 	data->reg_conf = sensor_conf;
677 
678 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
679 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
680 	if (ret) {
681 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
682 		goto err_clk;
683 	}
684 
685 	return 0;
686 err_clk:
687 	clk_unprepare(data->clk);
688 err_clk_sec:
689 	if (!IS_ERR(data->clk_sec))
690 		clk_unprepare(data->clk_sec);
691 	return ret;
692 }
693 
694 static int exynos_tmu_remove(struct platform_device *pdev)
695 {
696 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
697 
698 	exynos_unregister_thermal(data->reg_conf);
699 
700 	exynos_tmu_control(pdev, false);
701 
702 	clk_unprepare(data->clk);
703 	if (!IS_ERR(data->clk_sec))
704 		clk_unprepare(data->clk_sec);
705 
706 	if (!IS_ERR(data->regulator))
707 		regulator_disable(data->regulator);
708 
709 	return 0;
710 }
711 
712 #ifdef CONFIG_PM_SLEEP
713 static int exynos_tmu_suspend(struct device *dev)
714 {
715 	exynos_tmu_control(to_platform_device(dev), false);
716 
717 	return 0;
718 }
719 
720 static int exynos_tmu_resume(struct device *dev)
721 {
722 	struct platform_device *pdev = to_platform_device(dev);
723 
724 	exynos_tmu_initialize(pdev);
725 	exynos_tmu_control(pdev, true);
726 
727 	return 0;
728 }
729 
730 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
731 			 exynos_tmu_suspend, exynos_tmu_resume);
732 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
733 #else
734 #define EXYNOS_TMU_PM	NULL
735 #endif
736 
737 static struct platform_driver exynos_tmu_driver = {
738 	.driver = {
739 		.name   = "exynos-tmu",
740 		.owner  = THIS_MODULE,
741 		.pm     = EXYNOS_TMU_PM,
742 		.of_match_table = exynos_tmu_match,
743 	},
744 	.probe = exynos_tmu_probe,
745 	.remove	= exynos_tmu_remove,
746 };
747 
748 module_platform_driver(exynos_tmu_driver);
749 
750 MODULE_DESCRIPTION("EXYNOS TMU Driver");
751 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
752 MODULE_LICENSE("GPL");
753 MODULE_ALIAS("platform:exynos-tmu");
754