exynos_tmu.c (56c64da7aa31c7e0422ec54e5d0ed60a98f28712) | exynos_tmu.c (b835ced1fd05c43bd4a706050963678bc6e95bc7) |
---|---|
1/* 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 3 * 4 * Copyright (C) 2011 Samsung Electronics 5 * Donggeun Kim <dg77.kim@samsung.com> 6 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 7 * 8 * This program is free software; you can redistribute it and/or modify --- 108 unchanged lines hidden (view full) --- 117 default: 118 temp = temp_code - pdata->default_temp_offset; 119 break; 120 } 121 122 return temp; 123} 124 | 1/* 2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) 3 * 4 * Copyright (C) 2011 Samsung Electronics 5 * Donggeun Kim <dg77.kim@samsung.com> 6 * Amit Daniel Kachhap <amit.kachhap@linaro.org> 7 * 8 * This program is free software; you can redistribute it and/or modify --- 108 unchanged lines hidden (view full) --- 117 default: 118 temp = temp_code - pdata->default_temp_offset; 119 break; 120 } 121 122 return temp; 123} 124 |
125static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data) 126{ 127 const struct exynos_tmu_registers *reg = data->pdata->registers; 128 unsigned int val_irq; 129 130 val_irq = readl(data->base + reg->tmu_intstat); 131 /* 132 * Clear the interrupts. Please note that the documentation for 133 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly 134 * states that INTCLEAR register has a different placing of bits 135 * responsible for FALL IRQs than INTSTAT register. Exynos5420 136 * and Exynos5440 documentation is correct (Exynos4210 doesn't 137 * support FALL IRQs at all). 138 */ 139 writel(val_irq, data->base + reg->tmu_intclear); 140} 141 |
|
125static int exynos_tmu_initialize(struct platform_device *pdev) 126{ 127 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 128 struct exynos_tmu_platform_data *pdata = data->pdata; 129 const struct exynos_tmu_registers *reg = pdata->registers; 130 unsigned int status, trim_info = 0, con, ctrl; 131 unsigned int rising_threshold = 0, falling_threshold = 0; 132 int ret = 0, threshold_code, i; --- 69 unchanged lines hidden (view full) --- 202 /* Write temperature code for threshold */ 203 threshold_code = temp_to_code(data, pdata->threshold); 204 writeb(threshold_code, 205 data->base + reg->threshold_temp); 206 for (i = 0; i < pdata->non_hw_trigger_levels; i++) 207 writeb(pdata->trigger_levels[i], data->base + 208 reg->threshold_th0 + i * sizeof(reg->threshold_th0)); 209 | 142static int exynos_tmu_initialize(struct platform_device *pdev) 143{ 144 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 145 struct exynos_tmu_platform_data *pdata = data->pdata; 146 const struct exynos_tmu_registers *reg = pdata->registers; 147 unsigned int status, trim_info = 0, con, ctrl; 148 unsigned int rising_threshold = 0, falling_threshold = 0; 149 int ret = 0, threshold_code, i; --- 69 unchanged lines hidden (view full) --- 219 /* Write temperature code for threshold */ 220 threshold_code = temp_to_code(data, pdata->threshold); 221 writeb(threshold_code, 222 data->base + reg->threshold_temp); 223 for (i = 0; i < pdata->non_hw_trigger_levels; i++) 224 writeb(pdata->trigger_levels[i], data->base + 225 reg->threshold_th0 + i * sizeof(reg->threshold_th0)); 226 |
210 writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear); | 227 exynos_tmu_clear_irqs(data); |
211 } else { 212 /* Write temperature code for rising and falling threshold */ 213 for (i = 0; i < pdata->non_hw_trigger_levels; i++) { 214 threshold_code = temp_to_code(data, 215 pdata->trigger_levels[i]); 216 rising_threshold &= ~(0xff << 8 * i); 217 rising_threshold |= threshold_code << 8 * i; 218 if (pdata->threshold_falling) { --- 4 unchanged lines hidden (view full) --- 223 } 224 } 225 226 writel(rising_threshold, 227 data->base + reg->threshold_th0); 228 writel(falling_threshold, 229 data->base + reg->threshold_th1); 230 | 228 } else { 229 /* Write temperature code for rising and falling threshold */ 230 for (i = 0; i < pdata->non_hw_trigger_levels; i++) { 231 threshold_code = temp_to_code(data, 232 pdata->trigger_levels[i]); 233 rising_threshold &= ~(0xff << 8 * i); 234 rising_threshold |= threshold_code << 8 * i; 235 if (pdata->threshold_falling) { --- 4 unchanged lines hidden (view full) --- 240 } 241 } 242 243 writel(rising_threshold, 244 data->base + reg->threshold_th0); 245 writel(falling_threshold, 246 data->base + reg->threshold_th1); 247 |
231 writel((reg->intclr_rise_mask << reg->intclr_rise_shift) | 232 (reg->intclr_fall_mask << reg->intclr_fall_shift), 233 data->base + reg->tmu_intclear); | 248 exynos_tmu_clear_irqs(data); |
234 235 /* if last threshold limit is also present */ 236 i = pdata->max_trigger_level - 1; 237 if (pdata->trigger_levels[i] && 238 (pdata->trigger_type[i] == HW_TRIP)) { 239 threshold_code = temp_to_code(data, 240 pdata->trigger_levels[i]); 241 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) { --- 149 unchanged lines hidden (view full) --- 391#endif/*CONFIG_THERMAL_EMULATION*/ 392 393static void exynos_tmu_work(struct work_struct *work) 394{ 395 struct exynos_tmu_data *data = container_of(work, 396 struct exynos_tmu_data, irq_work); 397 struct exynos_tmu_platform_data *pdata = data->pdata; 398 const struct exynos_tmu_registers *reg = pdata->registers; | 249 250 /* if last threshold limit is also present */ 251 i = pdata->max_trigger_level - 1; 252 if (pdata->trigger_levels[i] && 253 (pdata->trigger_type[i] == HW_TRIP)) { 254 threshold_code = temp_to_code(data, 255 pdata->trigger_levels[i]); 256 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) { --- 149 unchanged lines hidden (view full) --- 406#endif/*CONFIG_THERMAL_EMULATION*/ 407 408static void exynos_tmu_work(struct work_struct *work) 409{ 410 struct exynos_tmu_data *data = container_of(work, 411 struct exynos_tmu_data, irq_work); 412 struct exynos_tmu_platform_data *pdata = data->pdata; 413 const struct exynos_tmu_registers *reg = pdata->registers; |
399 unsigned int val_irq, val_type; | 414 unsigned int val_type; |
400 401 if (!IS_ERR(data->clk_sec)) 402 clk_enable(data->clk_sec); 403 /* Find which sensor generated this interrupt */ 404 if (reg->tmu_irqstatus) { 405 val_type = readl(data->base_second + reg->tmu_irqstatus); 406 if (!((val_type >> data->id) & 0x1)) 407 goto out; 408 } 409 if (!IS_ERR(data->clk_sec)) 410 clk_disable(data->clk_sec); 411 412 exynos_report_trigger(data->reg_conf); 413 mutex_lock(&data->lock); 414 clk_enable(data->clk); 415 416 /* TODO: take action based on particular interrupt */ | 415 416 if (!IS_ERR(data->clk_sec)) 417 clk_enable(data->clk_sec); 418 /* Find which sensor generated this interrupt */ 419 if (reg->tmu_irqstatus) { 420 val_type = readl(data->base_second + reg->tmu_irqstatus); 421 if (!((val_type >> data->id) & 0x1)) 422 goto out; 423 } 424 if (!IS_ERR(data->clk_sec)) 425 clk_disable(data->clk_sec); 426 427 exynos_report_trigger(data->reg_conf); 428 mutex_lock(&data->lock); 429 clk_enable(data->clk); 430 431 /* TODO: take action based on particular interrupt */ |
417 val_irq = readl(data->base + reg->tmu_intstat); 418 /* clear the interrupts */ 419 writel(val_irq, data->base + reg->tmu_intclear); | 432 exynos_tmu_clear_irqs(data); |
420 421 clk_disable(data->clk); 422 mutex_unlock(&data->lock); 423out: 424 enable_irq(data->irq); 425} 426 427static irqreturn_t exynos_tmu_irq(int irq, void *id) --- 326 unchanged lines hidden --- | 433 434 clk_disable(data->clk); 435 mutex_unlock(&data->lock); 436out: 437 enable_irq(data->irq); 438} 439 440static irqreturn_t exynos_tmu_irq(int irq, void *id) --- 326 unchanged lines hidden --- |