1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Samsung Electronics Co., Ltd. 4 * Author: Lukasz Luba <l.luba@partner.samsung.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/devfreq.h> 9 #include <linux/devfreq-event.h> 10 #include <linux/device.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/mfd/syscon.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/of_device.h> 17 #include <linux/pm_opp.h> 18 #include <linux/platform_device.h> 19 #include <linux/regmap.h> 20 #include <linux/regulator/consumer.h> 21 #include <linux/slab.h> 22 #include "../jedec_ddr.h" 23 #include "../of_memory.h" 24 25 static int irqmode; 26 module_param(irqmode, int, 0644); 27 MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)"); 28 29 #define EXYNOS5_DREXI_TIMINGAREF (0x0030) 30 #define EXYNOS5_DREXI_TIMINGROW0 (0x0034) 31 #define EXYNOS5_DREXI_TIMINGDATA0 (0x0038) 32 #define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C) 33 #define EXYNOS5_DREXI_TIMINGROW1 (0x00E4) 34 #define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8) 35 #define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC) 36 #define CDREX_PAUSE (0x2091c) 37 #define CDREX_LPDDR3PHY_CON3 (0x20a20) 38 #define CDREX_LPDDR3PHY_CLKM_SRC (0x20700) 39 #define EXYNOS5_TIMING_SET_SWI BIT(28) 40 #define USE_MX_MSPLL_TIMINGS (1) 41 #define USE_BPLL_TIMINGS (0) 42 #define EXYNOS5_AREF_NORMAL (0x2e) 43 44 #define DREX_PPCCLKCON (0x0130) 45 #define DREX_PEREV2CONFIG (0x013c) 46 #define DREX_PMNC_PPC (0xE000) 47 #define DREX_CNTENS_PPC (0xE010) 48 #define DREX_CNTENC_PPC (0xE020) 49 #define DREX_INTENS_PPC (0xE030) 50 #define DREX_INTENC_PPC (0xE040) 51 #define DREX_FLAG_PPC (0xE050) 52 #define DREX_PMCNT2_PPC (0xE130) 53 54 /* 55 * A value for register DREX_PMNC_PPC which should be written to reset 56 * the cycle counter CCNT (a reference wall clock). It sets zero to the 57 * CCNT counter. 58 */ 59 #define CC_RESET BIT(2) 60 61 /* 62 * A value for register DREX_PMNC_PPC which does the reset of all performance 63 * counters to zero. 64 */ 65 #define PPC_COUNTER_RESET BIT(1) 66 67 /* 68 * Enables all configured counters (including cycle counter). The value should 69 * be written to the register DREX_PMNC_PPC. 70 */ 71 #define PPC_ENABLE BIT(0) 72 73 /* A value for register DREX_PPCCLKCON which enables performance events clock. 74 * Must be written before first access to the performance counters register 75 * set, otherwise it could crash. 76 */ 77 #define PEREV_CLK_EN BIT(0) 78 79 /* 80 * Values which are used to enable counters, interrupts or configure flags of 81 * the performance counters. They configure counter 2 and cycle counter. 82 */ 83 #define PERF_CNT2 BIT(2) 84 #define PERF_CCNT BIT(31) 85 86 /* 87 * Performance event types which are used for setting the preferred event 88 * to track in the counters. 89 * There is a set of different types, the values are from range 0 to 0x6f. 90 * These settings should be written to the configuration register which manages 91 * the type of the event (register DREX_PEREV2CONFIG). 92 */ 93 #define READ_TRANSFER_CH0 (0x6d) 94 #define READ_TRANSFER_CH1 (0x6f) 95 96 #define PERF_COUNTER_START_VALUE 0xff000000 97 #define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL 98 99 /** 100 * struct dmc_opp_table - Operating level desciption 101 * 102 * Covers frequency and voltage settings of the DMC operating mode. 103 */ 104 struct dmc_opp_table { 105 u32 freq_hz; 106 u32 volt_uv; 107 }; 108 109 /** 110 * struct exynos5_dmc - main structure describing DMC device 111 * 112 * The main structure for the Dynamic Memory Controller which covers clocks, 113 * memory regions, HW information, parameters and current operating mode. 114 */ 115 struct exynos5_dmc { 116 struct device *dev; 117 struct devfreq *df; 118 struct devfreq_simple_ondemand_data gov_data; 119 void __iomem *base_drexi0; 120 void __iomem *base_drexi1; 121 struct regmap *clk_regmap; 122 /* Protects curr_rate and frequency/voltage setting section */ 123 struct mutex lock; 124 unsigned long curr_rate; 125 unsigned long curr_volt; 126 unsigned long bypass_rate; 127 struct dmc_opp_table *opp; 128 struct dmc_opp_table opp_bypass; 129 int opp_count; 130 u32 timings_arr_size; 131 u32 *timing_row; 132 u32 *timing_data; 133 u32 *timing_power; 134 const struct lpddr3_timings *timings; 135 const struct lpddr3_min_tck *min_tck; 136 u32 bypass_timing_row; 137 u32 bypass_timing_data; 138 u32 bypass_timing_power; 139 struct regulator *vdd_mif; 140 struct clk *fout_spll; 141 struct clk *fout_bpll; 142 struct clk *mout_spll; 143 struct clk *mout_bpll; 144 struct clk *mout_mclk_cdrex; 145 struct clk *mout_mx_mspll_ccore; 146 struct clk *mx_mspll_ccore_phy; 147 struct clk *mout_mx_mspll_ccore_phy; 148 struct devfreq_event_dev **counter; 149 int num_counters; 150 u64 last_overflow_ts[2]; 151 unsigned long load; 152 unsigned long total; 153 bool in_irq_mode; 154 }; 155 156 #define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \ 157 { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end } 158 159 #define TIMING_VAL2REG(timing, t_val) \ 160 ({ \ 161 u32 __val; \ 162 __val = (t_val) << (timing)->bit_beg; \ 163 __val; \ 164 }) 165 166 struct timing_reg { 167 char *name; 168 int bit_beg; 169 int bit_end; 170 unsigned int val; 171 }; 172 173 static const struct timing_reg timing_row_reg_fields[] = { 174 TIMING_FIELD("tRFC", 24, 31), 175 TIMING_FIELD("tRRD", 20, 23), 176 TIMING_FIELD("tRP", 16, 19), 177 TIMING_FIELD("tRCD", 12, 15), 178 TIMING_FIELD("tRC", 6, 11), 179 TIMING_FIELD("tRAS", 0, 5), 180 }; 181 182 static const struct timing_reg timing_data_reg_fields[] = { 183 TIMING_FIELD("tWTR", 28, 31), 184 TIMING_FIELD("tWR", 24, 27), 185 TIMING_FIELD("tRTP", 20, 23), 186 TIMING_FIELD("tW2W-C2C", 14, 14), 187 TIMING_FIELD("tR2R-C2C", 12, 12), 188 TIMING_FIELD("WL", 8, 11), 189 TIMING_FIELD("tDQSCK", 4, 7), 190 TIMING_FIELD("RL", 0, 3), 191 }; 192 193 static const struct timing_reg timing_power_reg_fields[] = { 194 TIMING_FIELD("tFAW", 26, 31), 195 TIMING_FIELD("tXSR", 16, 25), 196 TIMING_FIELD("tXP", 8, 15), 197 TIMING_FIELD("tCKE", 4, 7), 198 TIMING_FIELD("tMRD", 0, 3), 199 }; 200 201 #define TIMING_COUNT (ARRAY_SIZE(timing_row_reg_fields) + \ 202 ARRAY_SIZE(timing_data_reg_fields) + \ 203 ARRAY_SIZE(timing_power_reg_fields)) 204 205 static int exynos5_counters_set_event(struct exynos5_dmc *dmc) 206 { 207 int i, ret; 208 209 for (i = 0; i < dmc->num_counters; i++) { 210 if (!dmc->counter[i]) 211 continue; 212 ret = devfreq_event_set_event(dmc->counter[i]); 213 if (ret < 0) 214 return ret; 215 } 216 return 0; 217 } 218 219 static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc) 220 { 221 int i, ret; 222 223 for (i = 0; i < dmc->num_counters; i++) { 224 if (!dmc->counter[i]) 225 continue; 226 ret = devfreq_event_enable_edev(dmc->counter[i]); 227 if (ret < 0) 228 return ret; 229 } 230 return 0; 231 } 232 233 static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc) 234 { 235 int i, ret; 236 237 for (i = 0; i < dmc->num_counters; i++) { 238 if (!dmc->counter[i]) 239 continue; 240 ret = devfreq_event_disable_edev(dmc->counter[i]); 241 if (ret < 0) 242 return ret; 243 } 244 return 0; 245 } 246 247 /** 248 * find_target_freq_id() - Finds requested frequency in local DMC configuration 249 * @dmc: device for which the information is checked 250 * @target_rate: requested frequency in KHz 251 * 252 * Seeks in the local DMC driver structure for the requested frequency value 253 * and returns index or error value. 254 */ 255 static int find_target_freq_idx(struct exynos5_dmc *dmc, 256 unsigned long target_rate) 257 { 258 int i; 259 260 for (i = dmc->opp_count - 1; i >= 0; i--) 261 if (dmc->opp[i].freq_hz <= target_rate) 262 return i; 263 264 return -EINVAL; 265 } 266 267 /** 268 * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings 269 * @dmc: device for which the new settings is going to be applied 270 * @set: boolean variable passing set value 271 * 272 * Changes the register set, which holds timing parameters. 273 * There is two register sets: 0 and 1. The register set 0 274 * is used in normal operation when the clock is provided from main PLL. 275 * The bank register set 1 is used when the main PLL frequency is going to be 276 * changed and the clock is taken from alternative, stable source. 277 * This function switches between these banks according to the 278 * currently used clock source. 279 */ 280 static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set) 281 { 282 unsigned int reg; 283 int ret; 284 285 ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, ®); 286 if (ret) 287 return ret; 288 289 if (set) 290 reg |= EXYNOS5_TIMING_SET_SWI; 291 else 292 reg &= ~EXYNOS5_TIMING_SET_SWI; 293 294 regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg); 295 296 return 0; 297 } 298 299 /** 300 * exynos5_init_freq_table() - Initialized PM OPP framework 301 * @dmc: DMC device for which the frequencies are used for OPP init 302 * @profile: devfreq device's profile 303 * 304 * Populate the devfreq device's OPP table based on current frequency, voltage. 305 */ 306 static int exynos5_init_freq_table(struct exynos5_dmc *dmc, 307 struct devfreq_dev_profile *profile) 308 { 309 int i, ret; 310 int idx; 311 unsigned long freq; 312 313 ret = dev_pm_opp_of_add_table(dmc->dev); 314 if (ret < 0) { 315 dev_err(dmc->dev, "Failed to get OPP table\n"); 316 return ret; 317 } 318 319 dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev); 320 321 dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count, 322 sizeof(struct dmc_opp_table), GFP_KERNEL); 323 if (!dmc->opp) 324 goto err_opp; 325 326 idx = dmc->opp_count - 1; 327 for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) { 328 struct dev_pm_opp *opp; 329 330 opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq); 331 if (IS_ERR(opp)) 332 goto err_opp; 333 334 dmc->opp[idx - i].freq_hz = freq; 335 dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp); 336 337 dev_pm_opp_put(opp); 338 } 339 340 return 0; 341 342 err_opp: 343 dev_pm_opp_of_remove_table(dmc->dev); 344 345 return -EINVAL; 346 } 347 348 /** 349 * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings 350 * @dmc: device for which the new settings is going to be applied 351 * @param: DRAM parameters which passes timing data 352 * 353 * Low-level function for changing timings for DRAM memory clocking from 354 * 'bypass' clock source (fixed frequency @400MHz). 355 * It uses timing bank registers set 1. 356 */ 357 static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc) 358 { 359 writel(EXYNOS5_AREF_NORMAL, 360 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF); 361 362 writel(dmc->bypass_timing_row, 363 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1); 364 writel(dmc->bypass_timing_row, 365 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1); 366 writel(dmc->bypass_timing_data, 367 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1); 368 writel(dmc->bypass_timing_data, 369 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1); 370 writel(dmc->bypass_timing_power, 371 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1); 372 writel(dmc->bypass_timing_power, 373 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1); 374 } 375 376 /** 377 * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings 378 * @dmc: device for which the new settings is going to be applied 379 * @target_rate: target frequency of the DMC 380 * 381 * Low-level function for changing timings for DRAM memory operating from main 382 * clock source (BPLL), which can have different frequencies. Thus, each 383 * frequency must have corresponding timings register values in order to keep 384 * the needed delays. 385 * It uses timing bank registers set 0. 386 */ 387 static int exynos5_dram_change_timings(struct exynos5_dmc *dmc, 388 unsigned long target_rate) 389 { 390 int idx; 391 392 for (idx = dmc->opp_count - 1; idx >= 0; idx--) 393 if (dmc->opp[idx].freq_hz <= target_rate) 394 break; 395 396 if (idx < 0) 397 return -EINVAL; 398 399 writel(EXYNOS5_AREF_NORMAL, 400 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF); 401 402 writel(dmc->timing_row[idx], 403 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0); 404 writel(dmc->timing_row[idx], 405 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0); 406 writel(dmc->timing_data[idx], 407 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0); 408 writel(dmc->timing_data[idx], 409 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0); 410 writel(dmc->timing_power[idx], 411 dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0); 412 writel(dmc->timing_power[idx], 413 dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0); 414 415 return 0; 416 } 417 418 /** 419 * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC 420 * @dmc: device for which it is going to be set 421 * @target_volt: new voltage which is chosen to be final 422 * 423 * Function tries to align voltage to the safe level for 'normal' mode. 424 * It checks the need of higher voltage and changes the value. The target 425 * voltage might be lower that currently set and still the system will be 426 * stable. 427 */ 428 static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc, 429 unsigned long target_volt) 430 { 431 int ret = 0; 432 433 if (dmc->curr_volt <= target_volt) 434 return 0; 435 436 ret = regulator_set_voltage(dmc->vdd_mif, target_volt, 437 target_volt); 438 if (!ret) 439 dmc->curr_volt = target_volt; 440 441 return ret; 442 } 443 444 /** 445 * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC 446 * @dmc: device for which it is going to be set 447 * @target_volt: new voltage which is chosen to be final 448 * 449 * Function tries to align voltage to the safe level for the 'bypass' mode. 450 * It checks the need of higher voltage and changes the value. 451 * The target voltage must not be less than currently needed, because 452 * for current frequency the device might become unstable. 453 */ 454 static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc, 455 unsigned long target_volt) 456 { 457 int ret = 0; 458 unsigned long bypass_volt = dmc->opp_bypass.volt_uv; 459 460 target_volt = max(bypass_volt, target_volt); 461 462 if (dmc->curr_volt >= target_volt) 463 return 0; 464 465 ret = regulator_set_voltage(dmc->vdd_mif, target_volt, 466 target_volt); 467 if (!ret) 468 dmc->curr_volt = target_volt; 469 470 return ret; 471 } 472 473 /** 474 * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings 475 * @dmc: device for which it is going to be set 476 * @target_rate: new frequency which is chosen to be final 477 * 478 * Function changes the DRAM timings for the temporary 'bypass' mode. 479 */ 480 static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc, 481 unsigned long target_rate) 482 { 483 int idx = find_target_freq_idx(dmc, target_rate); 484 485 if (idx < 0) 486 return -EINVAL; 487 488 exynos5_set_bypass_dram_timings(dmc); 489 490 return 0; 491 } 492 493 /** 494 * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock 495 * @dmc: DMC device for which the switching is going to happen 496 * @target_rate: new frequency which is going to be set as a final 497 * @target_volt: new voltage which is going to be set as a final 498 * 499 * Function configures DMC and clocks for operating in temporary 'bypass' mode. 500 * This mode is used only temporary but if required, changes voltage and timings 501 * for DRAM chips. It switches the main clock to stable clock source for the 502 * period of the main PLL reconfiguration. 503 */ 504 static int 505 exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc, 506 unsigned long target_rate, 507 unsigned long target_volt) 508 { 509 int ret; 510 511 /* 512 * Having higher voltage for a particular frequency does not harm 513 * the chip. Use it for the temporary frequency change when one 514 * voltage manipulation might be avoided. 515 */ 516 ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt); 517 if (ret) 518 return ret; 519 520 /* 521 * Longer delays for DRAM does not cause crash, the opposite does. 522 */ 523 ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate); 524 if (ret) 525 return ret; 526 527 /* 528 * Delays are long enough, so use them for the new coming clock. 529 */ 530 ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS); 531 532 return ret; 533 } 534 535 /** 536 * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC 537 * using safe procedure 538 * @dmc: device for which the frequency is going to be changed 539 * @target_rate: requested new frequency 540 * @target_volt: requested voltage which corresponds to the new frequency 541 * 542 * The DMC frequency change procedure requires a few steps. 543 * The main requirement is to change the clock source in the clk mux 544 * for the time of main clock PLL locking. The assumption is that the 545 * alternative clock source set as parent is stable. 546 * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass' 547 * clock. This requires alignment in DRAM timing parameters for the new 548 * T-period. There is two bank sets for keeping DRAM 549 * timings: set 0 and set 1. The set 0 is used when main clock source is 550 * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between 551 * the two bank sets is part of the process. 552 * The voltage must also be aligned to the minimum required level. There is 553 * this intermediate step with switching to 'bypass' parent clock source. 554 * if the old voltage is lower, it requires an increase of the voltage level. 555 * The complexity of the voltage manipulation is hidden in low level function. 556 * In this function there is last alignment of the voltage level at the end. 557 */ 558 static int 559 exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc, 560 unsigned long target_rate, 561 unsigned long target_volt) 562 { 563 int ret; 564 565 ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate, 566 target_volt); 567 if (ret) 568 return ret; 569 570 /* 571 * Voltage is set at least to a level needed for this frequency, 572 * so switching clock source is safe now. 573 */ 574 clk_prepare_enable(dmc->fout_spll); 575 clk_prepare_enable(dmc->mout_spll); 576 clk_prepare_enable(dmc->mout_mx_mspll_ccore); 577 578 ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore); 579 if (ret) 580 goto disable_clocks; 581 582 /* 583 * We are safe to increase the timings for current bypass frequency. 584 * Thanks to this the settings will be ready for the upcoming clock 585 * source change. 586 */ 587 exynos5_dram_change_timings(dmc, target_rate); 588 589 clk_set_rate(dmc->fout_bpll, target_rate); 590 591 ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS); 592 if (ret) 593 goto disable_clocks; 594 595 ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll); 596 if (ret) 597 goto disable_clocks; 598 599 /* 600 * Make sure if the voltage is not from 'bypass' settings and align to 601 * the right level for power efficiency. 602 */ 603 ret = exynos5_dmc_align_target_voltage(dmc, target_volt); 604 605 disable_clocks: 606 clk_disable_unprepare(dmc->mout_mx_mspll_ccore); 607 clk_disable_unprepare(dmc->mout_spll); 608 clk_disable_unprepare(dmc->fout_spll); 609 610 return ret; 611 } 612 613 /** 614 * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP 615 * table. 616 * @dmc: device for which the frequency is going to be changed 617 * @freq: requested frequency in KHz 618 * @target_rate: returned frequency which is the same or lower than 619 * requested 620 * @target_volt: returned voltage which corresponds to the returned 621 * frequency 622 * 623 * Function gets requested frequency and checks OPP framework for needed 624 * frequency and voltage. It populates the values 'target_rate' and 625 * 'target_volt' or returns error value when OPP framework fails. 626 */ 627 static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc, 628 unsigned long *freq, 629 unsigned long *target_rate, 630 unsigned long *target_volt, u32 flags) 631 { 632 struct dev_pm_opp *opp; 633 634 opp = devfreq_recommended_opp(dmc->dev, freq, flags); 635 if (IS_ERR(opp)) 636 return PTR_ERR(opp); 637 638 *target_rate = dev_pm_opp_get_freq(opp); 639 *target_volt = dev_pm_opp_get_voltage(opp); 640 dev_pm_opp_put(opp); 641 642 return 0; 643 } 644 645 /** 646 * exynos5_dmc_target() - Function responsible for changing frequency of DMC 647 * @dev: device for which the frequency is going to be changed 648 * @freq: requested frequency in KHz 649 * @flags: flags provided for this frequency change request 650 * 651 * An entry function provided to the devfreq framework which provides frequency 652 * change of the DMC. The function gets the possible rate from OPP table based 653 * on requested frequency. It calls the next function responsible for the 654 * frequency and voltage change. In case of failure, does not set 'curr_rate' 655 * and returns error value to the framework. 656 */ 657 static int exynos5_dmc_target(struct device *dev, unsigned long *freq, 658 u32 flags) 659 { 660 struct exynos5_dmc *dmc = dev_get_drvdata(dev); 661 unsigned long target_rate = 0; 662 unsigned long target_volt = 0; 663 int ret; 664 665 ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt, 666 flags); 667 668 if (ret) 669 return ret; 670 671 if (target_rate == dmc->curr_rate) 672 return 0; 673 674 mutex_lock(&dmc->lock); 675 676 ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt); 677 678 if (ret) { 679 mutex_unlock(&dmc->lock); 680 return ret; 681 } 682 683 dmc->curr_rate = target_rate; 684 685 mutex_unlock(&dmc->lock); 686 return 0; 687 } 688 689 /** 690 * exynos5_counters_get() - Gets the performance counters values. 691 * @dmc: device for which the counters are going to be checked 692 * @load_count: variable which is populated with counter value 693 * @total_count: variable which is used as 'wall clock' reference 694 * 695 * Function which provides performance counters values. It sums up counters for 696 * two DMC channels. The 'total_count' is used as a reference and max value. 697 * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%]. 698 */ 699 static int exynos5_counters_get(struct exynos5_dmc *dmc, 700 unsigned long *load_count, 701 unsigned long *total_count) 702 { 703 unsigned long total = 0; 704 struct devfreq_event_data event; 705 int ret, i; 706 707 *load_count = 0; 708 709 /* Take into account only read+write counters, but stop all */ 710 for (i = 0; i < dmc->num_counters; i++) { 711 if (!dmc->counter[i]) 712 continue; 713 714 ret = devfreq_event_get_event(dmc->counter[i], &event); 715 if (ret < 0) 716 return ret; 717 718 *load_count += event.load_count; 719 720 if (total < event.total_count) 721 total = event.total_count; 722 } 723 724 *total_count = total; 725 726 return 0; 727 } 728 729 /** 730 * exynos5_dmc_start_perf_events() - Setup and start performance event counters 731 * @dmc: device for which the counters are going to be checked 732 * @beg_value: initial value for the counter 733 * 734 * Function which enables needed counters, interrupts and sets initial values 735 * then starts the counters. 736 */ 737 static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc, 738 u32 beg_value) 739 { 740 /* Enable interrupts for counter 2 */ 741 writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC); 742 writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC); 743 744 /* Enable counter 2 and CCNT */ 745 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC); 746 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC); 747 748 /* Clear overflow flag for all counters */ 749 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC); 750 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC); 751 752 /* Reset all counters */ 753 writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC); 754 writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC); 755 756 /* 757 * Set start value for the counters, the number of samples that 758 * will be gathered is calculated as: 0xffffffff - beg_value 759 */ 760 writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC); 761 writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC); 762 763 /* Start all counters */ 764 writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC); 765 writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC); 766 } 767 768 /** 769 * exynos5_dmc_perf_events_calc() - Calculate utilization 770 * @dmc: device for which the counters are going to be checked 771 * @diff_ts: time between last interrupt and current one 772 * 773 * Function which calculates needed utilization for the devfreq governor. 774 * It prepares values for 'busy_time' and 'total_time' based on elapsed time 775 * between interrupts, which approximates utilization. 776 */ 777 static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts) 778 { 779 /* 780 * This is a simple algorithm for managing traffic on DMC. 781 * When there is almost no load the counters overflow every 4s, 782 * no mater the DMC frequency. 783 * The high load might be approximated using linear function. 784 * Knowing that, simple calculation can provide 'busy_time' and 785 * 'total_time' to the devfreq governor which picks up target 786 * frequency. 787 * We want a fast ramp up and slow decay in frequency change function. 788 */ 789 if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) { 790 /* 791 * Set higher utilization for the simple_ondemand governor. 792 * The governor should increase the frequency of the DMC. 793 */ 794 dmc->load = 70; 795 dmc->total = 100; 796 } else { 797 /* 798 * Set low utilization for the simple_ondemand governor. 799 * The governor should decrease the frequency of the DMC. 800 */ 801 dmc->load = 35; 802 dmc->total = 100; 803 } 804 805 dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts); 806 } 807 808 /** 809 * exynos5_dmc_perf_events_check() - Checks the status of the counters 810 * @dmc: device for which the counters are going to be checked 811 * 812 * Function which is called from threaded IRQ to check the counters state 813 * and to call approximation for the needed utilization. 814 */ 815 static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc) 816 { 817 u32 val; 818 u64 diff_ts, ts; 819 820 ts = ktime_get_ns(); 821 822 /* Stop all counters */ 823 writel(0, dmc->base_drexi0 + DREX_PMNC_PPC); 824 writel(0, dmc->base_drexi1 + DREX_PMNC_PPC); 825 826 /* Check the source in interrupt flag registers (which channel) */ 827 val = readl(dmc->base_drexi0 + DREX_FLAG_PPC); 828 if (val) { 829 diff_ts = ts - dmc->last_overflow_ts[0]; 830 dmc->last_overflow_ts[0] = ts; 831 dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val); 832 } else { 833 val = readl(dmc->base_drexi1 + DREX_FLAG_PPC); 834 diff_ts = ts - dmc->last_overflow_ts[1]; 835 dmc->last_overflow_ts[1] = ts; 836 dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val); 837 } 838 839 exynos5_dmc_perf_events_calc(dmc, diff_ts); 840 841 exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE); 842 } 843 844 /** 845 * exynos5_dmc_enable_perf_events() - Enable performance events 846 * @dmc: device for which the counters are going to be checked 847 * 848 * Function which is setup needed environment and enables counters. 849 */ 850 static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc) 851 { 852 u64 ts; 853 854 /* Enable Performance Event Clock */ 855 writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON); 856 writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON); 857 858 /* Select read transfers as performance event2 */ 859 writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG); 860 writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG); 861 862 ts = ktime_get_ns(); 863 dmc->last_overflow_ts[0] = ts; 864 dmc->last_overflow_ts[1] = ts; 865 866 /* Devfreq shouldn't be faster than initialization, play safe though. */ 867 dmc->load = 99; 868 dmc->total = 100; 869 } 870 871 /** 872 * exynos5_dmc_disable_perf_events() - Disable performance events 873 * @dmc: device for which the counters are going to be checked 874 * 875 * Function which stops, disables performance event counters and interrupts. 876 */ 877 static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc) 878 { 879 /* Stop all counters */ 880 writel(0, dmc->base_drexi0 + DREX_PMNC_PPC); 881 writel(0, dmc->base_drexi1 + DREX_PMNC_PPC); 882 883 /* Disable interrupts for counter 2 */ 884 writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC); 885 writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC); 886 887 /* Disable counter 2 and CCNT */ 888 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC); 889 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC); 890 891 /* Clear overflow flag for all counters */ 892 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC); 893 writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC); 894 } 895 896 /** 897 * exynos5_dmc_get_status() - Read current DMC performance statistics. 898 * @dev: device for which the statistics are requested 899 * @stat: structure which has statistic fields 900 * 901 * Function reads the DMC performance counters and calculates 'busy_time' 902 * and 'total_time'. To protect from overflow, the values are shifted right 903 * by 10. After read out the counters are setup to count again. 904 */ 905 static int exynos5_dmc_get_status(struct device *dev, 906 struct devfreq_dev_status *stat) 907 { 908 struct exynos5_dmc *dmc = dev_get_drvdata(dev); 909 unsigned long load, total; 910 int ret; 911 912 if (dmc->in_irq_mode) { 913 mutex_lock(&dmc->lock); 914 stat->current_frequency = dmc->curr_rate; 915 mutex_unlock(&dmc->lock); 916 917 stat->busy_time = dmc->load; 918 stat->total_time = dmc->total; 919 } else { 920 ret = exynos5_counters_get(dmc, &load, &total); 921 if (ret < 0) 922 return -EINVAL; 923 924 /* To protect from overflow, divide by 1024 */ 925 stat->busy_time = load >> 10; 926 stat->total_time = total >> 10; 927 928 ret = exynos5_counters_set_event(dmc); 929 if (ret < 0) { 930 dev_err(dev, "could not set event counter\n"); 931 return ret; 932 } 933 } 934 935 return 0; 936 } 937 938 /** 939 * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency 940 * @dev: device for which the framework checks operating frequency 941 * @freq: returned frequency value 942 * 943 * It returns the currently used frequency of the DMC. The real operating 944 * frequency might be lower when the clock source value could not be divided 945 * to the requested value. 946 */ 947 static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq) 948 { 949 struct exynos5_dmc *dmc = dev_get_drvdata(dev); 950 951 mutex_lock(&dmc->lock); 952 *freq = dmc->curr_rate; 953 mutex_unlock(&dmc->lock); 954 955 return 0; 956 } 957 958 /** 959 * exynos5_dmc_df_profile - Devfreq governor's profile structure 960 * 961 * It provides to the devfreq framework needed functions and polling period. 962 */ 963 static struct devfreq_dev_profile exynos5_dmc_df_profile = { 964 .timer = DEVFREQ_TIMER_DELAYED, 965 .target = exynos5_dmc_target, 966 .get_dev_status = exynos5_dmc_get_status, 967 .get_cur_freq = exynos5_dmc_get_cur_freq, 968 }; 969 970 /** 971 * exynos5_dmc_align_initial_frequency() - Align initial frequency value 972 * @dmc: device for which the frequency is going to be set 973 * @bootloader_init_freq: initial frequency set by the bootloader in KHz 974 * 975 * The initial bootloader frequency, which is present during boot, might be 976 * different that supported frequency values in the driver. It is possible 977 * due to different PLL settings or used PLL as a source. 978 * This function provides the 'initial_freq' for the devfreq framework 979 * statistics engine which supports only registered values. Thus, some alignment 980 * must be made. 981 */ 982 static unsigned long 983 exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc, 984 unsigned long bootloader_init_freq) 985 { 986 unsigned long aligned_freq; 987 int idx; 988 989 idx = find_target_freq_idx(dmc, bootloader_init_freq); 990 if (idx >= 0) 991 aligned_freq = dmc->opp[idx].freq_hz; 992 else 993 aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz; 994 995 return aligned_freq; 996 } 997 998 /** 999 * create_timings_aligned() - Create register values and align with standard 1000 * @dmc: device for which the frequency is going to be set 1001 * @idx: speed bin in the OPP table 1002 * @clk_period_ps: the period of the clock, known as tCK 1003 * 1004 * The function calculates timings and creates a register value ready for 1005 * a frequency transition. The register contains a few timings. They are 1006 * shifted by a known offset. The timing value is calculated based on memory 1007 * specyfication: minimal time required and minimal cycles required. 1008 */ 1009 static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row, 1010 u32 *reg_timing_data, u32 *reg_timing_power, 1011 u32 clk_period_ps) 1012 { 1013 u32 val; 1014 const struct timing_reg *reg; 1015 1016 if (clk_period_ps == 0) 1017 return -EINVAL; 1018 1019 *reg_timing_row = 0; 1020 *reg_timing_data = 0; 1021 *reg_timing_power = 0; 1022 1023 val = dmc->timings->tRFC / clk_period_ps; 1024 val += dmc->timings->tRFC % clk_period_ps ? 1 : 0; 1025 val = max(val, dmc->min_tck->tRFC); 1026 reg = &timing_row_reg_fields[0]; 1027 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1028 1029 val = dmc->timings->tRRD / clk_period_ps; 1030 val += dmc->timings->tRRD % clk_period_ps ? 1 : 0; 1031 val = max(val, dmc->min_tck->tRRD); 1032 reg = &timing_row_reg_fields[1]; 1033 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1034 1035 val = dmc->timings->tRPab / clk_period_ps; 1036 val += dmc->timings->tRPab % clk_period_ps ? 1 : 0; 1037 val = max(val, dmc->min_tck->tRPab); 1038 reg = &timing_row_reg_fields[2]; 1039 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1040 1041 val = dmc->timings->tRCD / clk_period_ps; 1042 val += dmc->timings->tRCD % clk_period_ps ? 1 : 0; 1043 val = max(val, dmc->min_tck->tRCD); 1044 reg = &timing_row_reg_fields[3]; 1045 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1046 1047 val = dmc->timings->tRC / clk_period_ps; 1048 val += dmc->timings->tRC % clk_period_ps ? 1 : 0; 1049 val = max(val, dmc->min_tck->tRC); 1050 reg = &timing_row_reg_fields[4]; 1051 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1052 1053 val = dmc->timings->tRAS / clk_period_ps; 1054 val += dmc->timings->tRAS % clk_period_ps ? 1 : 0; 1055 val = max(val, dmc->min_tck->tRAS); 1056 reg = &timing_row_reg_fields[5]; 1057 *reg_timing_row |= TIMING_VAL2REG(reg, val); 1058 1059 /* data related timings */ 1060 val = dmc->timings->tWTR / clk_period_ps; 1061 val += dmc->timings->tWTR % clk_period_ps ? 1 : 0; 1062 val = max(val, dmc->min_tck->tWTR); 1063 reg = &timing_data_reg_fields[0]; 1064 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1065 1066 val = dmc->timings->tWR / clk_period_ps; 1067 val += dmc->timings->tWR % clk_period_ps ? 1 : 0; 1068 val = max(val, dmc->min_tck->tWR); 1069 reg = &timing_data_reg_fields[1]; 1070 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1071 1072 val = dmc->timings->tRTP / clk_period_ps; 1073 val += dmc->timings->tRTP % clk_period_ps ? 1 : 0; 1074 val = max(val, dmc->min_tck->tRTP); 1075 reg = &timing_data_reg_fields[2]; 1076 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1077 1078 val = dmc->timings->tW2W_C2C / clk_period_ps; 1079 val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0; 1080 val = max(val, dmc->min_tck->tW2W_C2C); 1081 reg = &timing_data_reg_fields[3]; 1082 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1083 1084 val = dmc->timings->tR2R_C2C / clk_period_ps; 1085 val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0; 1086 val = max(val, dmc->min_tck->tR2R_C2C); 1087 reg = &timing_data_reg_fields[4]; 1088 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1089 1090 val = dmc->timings->tWL / clk_period_ps; 1091 val += dmc->timings->tWL % clk_period_ps ? 1 : 0; 1092 val = max(val, dmc->min_tck->tWL); 1093 reg = &timing_data_reg_fields[5]; 1094 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1095 1096 val = dmc->timings->tDQSCK / clk_period_ps; 1097 val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0; 1098 val = max(val, dmc->min_tck->tDQSCK); 1099 reg = &timing_data_reg_fields[6]; 1100 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1101 1102 val = dmc->timings->tRL / clk_period_ps; 1103 val += dmc->timings->tRL % clk_period_ps ? 1 : 0; 1104 val = max(val, dmc->min_tck->tRL); 1105 reg = &timing_data_reg_fields[7]; 1106 *reg_timing_data |= TIMING_VAL2REG(reg, val); 1107 1108 /* power related timings */ 1109 val = dmc->timings->tFAW / clk_period_ps; 1110 val += dmc->timings->tFAW % clk_period_ps ? 1 : 0; 1111 val = max(val, dmc->min_tck->tFAW); 1112 reg = &timing_power_reg_fields[0]; 1113 *reg_timing_power |= TIMING_VAL2REG(reg, val); 1114 1115 val = dmc->timings->tXSR / clk_period_ps; 1116 val += dmc->timings->tXSR % clk_period_ps ? 1 : 0; 1117 val = max(val, dmc->min_tck->tXSR); 1118 reg = &timing_power_reg_fields[1]; 1119 *reg_timing_power |= TIMING_VAL2REG(reg, val); 1120 1121 val = dmc->timings->tXP / clk_period_ps; 1122 val += dmc->timings->tXP % clk_period_ps ? 1 : 0; 1123 val = max(val, dmc->min_tck->tXP); 1124 reg = &timing_power_reg_fields[2]; 1125 *reg_timing_power |= TIMING_VAL2REG(reg, val); 1126 1127 val = dmc->timings->tCKE / clk_period_ps; 1128 val += dmc->timings->tCKE % clk_period_ps ? 1 : 0; 1129 val = max(val, dmc->min_tck->tCKE); 1130 reg = &timing_power_reg_fields[3]; 1131 *reg_timing_power |= TIMING_VAL2REG(reg, val); 1132 1133 val = dmc->timings->tMRD / clk_period_ps; 1134 val += dmc->timings->tMRD % clk_period_ps ? 1 : 0; 1135 val = max(val, dmc->min_tck->tMRD); 1136 reg = &timing_power_reg_fields[4]; 1137 *reg_timing_power |= TIMING_VAL2REG(reg, val); 1138 1139 return 0; 1140 } 1141 1142 /** 1143 * of_get_dram_timings() - helper function for parsing DT settings for DRAM 1144 * @dmc: device for which the frequency is going to be set 1145 * 1146 * The function parses DT entries with DRAM information. 1147 */ 1148 static int of_get_dram_timings(struct exynos5_dmc *dmc) 1149 { 1150 int ret = 0; 1151 int idx; 1152 struct device_node *np_ddr; 1153 u32 freq_mhz, clk_period_ps; 1154 1155 np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0); 1156 if (!np_ddr) { 1157 dev_warn(dmc->dev, "could not find 'device-handle' in DT\n"); 1158 return -EINVAL; 1159 } 1160 1161 dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, 1162 sizeof(u32), GFP_KERNEL); 1163 if (!dmc->timing_row) 1164 return -ENOMEM; 1165 1166 dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, 1167 sizeof(u32), GFP_KERNEL); 1168 if (!dmc->timing_data) 1169 return -ENOMEM; 1170 1171 dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, 1172 sizeof(u32), GFP_KERNEL); 1173 if (!dmc->timing_power) 1174 return -ENOMEM; 1175 1176 dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, 1177 DDR_TYPE_LPDDR3, 1178 &dmc->timings_arr_size); 1179 if (!dmc->timings) { 1180 of_node_put(np_ddr); 1181 dev_warn(dmc->dev, "could not get timings from DT\n"); 1182 return -EINVAL; 1183 } 1184 1185 dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); 1186 if (!dmc->min_tck) { 1187 of_node_put(np_ddr); 1188 dev_warn(dmc->dev, "could not get tck from DT\n"); 1189 return -EINVAL; 1190 } 1191 1192 /* Sorted array of OPPs with frequency ascending */ 1193 for (idx = 0; idx < dmc->opp_count; idx++) { 1194 freq_mhz = dmc->opp[idx].freq_hz / 1000000; 1195 clk_period_ps = 1000000 / freq_mhz; 1196 1197 ret = create_timings_aligned(dmc, &dmc->timing_row[idx], 1198 &dmc->timing_data[idx], 1199 &dmc->timing_power[idx], 1200 clk_period_ps); 1201 } 1202 1203 of_node_put(np_ddr); 1204 1205 /* Take the highest frequency's timings as 'bypass' */ 1206 dmc->bypass_timing_row = dmc->timing_row[idx - 1]; 1207 dmc->bypass_timing_data = dmc->timing_data[idx - 1]; 1208 dmc->bypass_timing_power = dmc->timing_power[idx - 1]; 1209 1210 return ret; 1211 } 1212 1213 /** 1214 * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation. 1215 * @dmc: DMC structure containing needed fields 1216 * 1217 * Get the needed clocks defined in DT device, enable and set the right parents. 1218 * Read current frequency and initialize the initial rate for governor. 1219 */ 1220 static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc) 1221 { 1222 int ret; 1223 unsigned long target_volt = 0; 1224 unsigned long target_rate = 0; 1225 unsigned int tmp; 1226 1227 dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll"); 1228 if (IS_ERR(dmc->fout_spll)) 1229 return PTR_ERR(dmc->fout_spll); 1230 1231 dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll"); 1232 if (IS_ERR(dmc->fout_bpll)) 1233 return PTR_ERR(dmc->fout_bpll); 1234 1235 dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex"); 1236 if (IS_ERR(dmc->mout_mclk_cdrex)) 1237 return PTR_ERR(dmc->mout_mclk_cdrex); 1238 1239 dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll"); 1240 if (IS_ERR(dmc->mout_bpll)) 1241 return PTR_ERR(dmc->mout_bpll); 1242 1243 dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev, 1244 "mout_mx_mspll_ccore"); 1245 if (IS_ERR(dmc->mout_mx_mspll_ccore)) 1246 return PTR_ERR(dmc->mout_mx_mspll_ccore); 1247 1248 dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2"); 1249 if (IS_ERR(dmc->mout_spll)) { 1250 dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll"); 1251 if (IS_ERR(dmc->mout_spll)) 1252 return PTR_ERR(dmc->mout_spll); 1253 } 1254 1255 /* 1256 * Convert frequency to KHz values and set it for the governor. 1257 */ 1258 dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex); 1259 dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate); 1260 exynos5_dmc_df_profile.initial_freq = dmc->curr_rate; 1261 1262 ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate, 1263 &target_volt, 0); 1264 if (ret) 1265 return ret; 1266 1267 dmc->curr_volt = target_volt; 1268 1269 clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll); 1270 1271 dmc->bypass_rate = clk_get_rate(dmc->mout_mx_mspll_ccore); 1272 1273 clk_prepare_enable(dmc->fout_bpll); 1274 clk_prepare_enable(dmc->mout_bpll); 1275 1276 /* 1277 * Some bootloaders do not set clock routes correctly. 1278 * Stop one path in clocks to PHY. 1279 */ 1280 regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp); 1281 tmp &= ~(BIT(1) | BIT(0)); 1282 regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp); 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * exynos5_performance_counters_init() - Initializes performance DMC's counters 1289 * @dmc: DMC for which it does the setup 1290 * 1291 * Initialization of performance counters in DMC for estimating usage. 1292 * The counter's values are used for calculation of a memory bandwidth and based 1293 * on that the governor changes the frequency. 1294 * The counters are not used when the governor is GOVERNOR_USERSPACE. 1295 */ 1296 static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) 1297 { 1298 int counters_size; 1299 int ret, i; 1300 1301 dmc->num_counters = devfreq_event_get_edev_count(dmc->dev); 1302 if (dmc->num_counters < 0) { 1303 dev_err(dmc->dev, "could not get devfreq-event counters\n"); 1304 return dmc->num_counters; 1305 } 1306 1307 counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters; 1308 dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL); 1309 if (!dmc->counter) 1310 return -ENOMEM; 1311 1312 for (i = 0; i < dmc->num_counters; i++) { 1313 dmc->counter[i] = 1314 devfreq_event_get_edev_by_phandle(dmc->dev, i); 1315 if (IS_ERR_OR_NULL(dmc->counter[i])) 1316 return -EPROBE_DEFER; 1317 } 1318 1319 ret = exynos5_counters_enable_edev(dmc); 1320 if (ret < 0) { 1321 dev_err(dmc->dev, "could not enable event counter\n"); 1322 return ret; 1323 } 1324 1325 ret = exynos5_counters_set_event(dmc); 1326 if (ret < 0) { 1327 exynos5_counters_disable_edev(dmc); 1328 dev_err(dmc->dev, "could not set event counter\n"); 1329 return ret; 1330 } 1331 1332 return 0; 1333 } 1334 1335 /** 1336 * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC 1337 * @dmc: device which is used for changing this feature 1338 * @set: a boolean state passing enable/disable request 1339 * 1340 * There is a need of pausing DREX DMC when divider or MUX in clock tree 1341 * changes its configuration. In such situation access to the memory is blocked 1342 * in DMC automatically. This feature is used when clock frequency change 1343 * request appears and touches clock tree. 1344 */ 1345 static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc) 1346 { 1347 unsigned int val; 1348 int ret; 1349 1350 ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val); 1351 if (ret) 1352 return ret; 1353 1354 val |= 1UL; 1355 regmap_write(dmc->clk_regmap, CDREX_PAUSE, val); 1356 1357 return 0; 1358 } 1359 1360 static irqreturn_t dmc_irq_thread(int irq, void *priv) 1361 { 1362 int res; 1363 struct exynos5_dmc *dmc = priv; 1364 1365 mutex_lock(&dmc->df->lock); 1366 exynos5_dmc_perf_events_check(dmc); 1367 res = update_devfreq(dmc->df); 1368 mutex_unlock(&dmc->df->lock); 1369 1370 if (res) 1371 dev_warn(dmc->dev, "devfreq failed with %d\n", res); 1372 1373 return IRQ_HANDLED; 1374 } 1375 1376 /** 1377 * exynos5_dmc_probe() - Probe function for the DMC driver 1378 * @pdev: platform device for which the driver is going to be initialized 1379 * 1380 * Initialize basic components: clocks, regulators, performance counters, etc. 1381 * Read out product version and based on the information setup 1382 * internal structures for the controller (frequency and voltage) and for DRAM 1383 * memory parameters: timings for each operating frequency. 1384 * Register new devfreq device for controlling DVFS of the DMC. 1385 */ 1386 static int exynos5_dmc_probe(struct platform_device *pdev) 1387 { 1388 int ret = 0; 1389 struct device *dev = &pdev->dev; 1390 struct device_node *np = dev->of_node; 1391 struct exynos5_dmc *dmc; 1392 int irq[2]; 1393 1394 dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL); 1395 if (!dmc) 1396 return -ENOMEM; 1397 1398 mutex_init(&dmc->lock); 1399 1400 dmc->dev = dev; 1401 platform_set_drvdata(pdev, dmc); 1402 1403 dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0); 1404 if (IS_ERR(dmc->base_drexi0)) 1405 return PTR_ERR(dmc->base_drexi0); 1406 1407 dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1); 1408 if (IS_ERR(dmc->base_drexi1)) 1409 return PTR_ERR(dmc->base_drexi1); 1410 1411 dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np, 1412 "samsung,syscon-clk"); 1413 if (IS_ERR(dmc->clk_regmap)) 1414 return PTR_ERR(dmc->clk_regmap); 1415 1416 ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile); 1417 if (ret) { 1418 dev_warn(dev, "couldn't initialize frequency settings\n"); 1419 return ret; 1420 } 1421 1422 dmc->vdd_mif = devm_regulator_get(dev, "vdd"); 1423 if (IS_ERR(dmc->vdd_mif)) { 1424 ret = PTR_ERR(dmc->vdd_mif); 1425 return ret; 1426 } 1427 1428 ret = exynos5_dmc_init_clks(dmc); 1429 if (ret) 1430 return ret; 1431 1432 ret = of_get_dram_timings(dmc); 1433 if (ret) { 1434 dev_warn(dev, "couldn't initialize timings settings\n"); 1435 goto remove_clocks; 1436 } 1437 1438 ret = exynos5_dmc_set_pause_on_switching(dmc); 1439 if (ret) { 1440 dev_warn(dev, "couldn't get access to PAUSE register\n"); 1441 goto remove_clocks; 1442 } 1443 1444 /* There is two modes in which the driver works: polling or IRQ */ 1445 irq[0] = platform_get_irq_byname(pdev, "drex_0"); 1446 irq[1] = platform_get_irq_byname(pdev, "drex_1"); 1447 if (irq[0] > 0 && irq[1] > 0 && irqmode) { 1448 ret = devm_request_threaded_irq(dev, irq[0], NULL, 1449 dmc_irq_thread, IRQF_ONESHOT, 1450 dev_name(dev), dmc); 1451 if (ret) { 1452 dev_err(dev, "couldn't grab IRQ\n"); 1453 goto remove_clocks; 1454 } 1455 1456 ret = devm_request_threaded_irq(dev, irq[1], NULL, 1457 dmc_irq_thread, IRQF_ONESHOT, 1458 dev_name(dev), dmc); 1459 if (ret) { 1460 dev_err(dev, "couldn't grab IRQ\n"); 1461 goto remove_clocks; 1462 } 1463 1464 /* 1465 * Setup default thresholds for the devfreq governor. 1466 * The values are chosen based on experiments. 1467 */ 1468 dmc->gov_data.upthreshold = 55; 1469 dmc->gov_data.downdifferential = 5; 1470 1471 exynos5_dmc_enable_perf_events(dmc); 1472 1473 dmc->in_irq_mode = 1; 1474 } else { 1475 ret = exynos5_performance_counters_init(dmc); 1476 if (ret) { 1477 dev_warn(dev, "couldn't probe performance counters\n"); 1478 goto remove_clocks; 1479 } 1480 1481 /* 1482 * Setup default thresholds for the devfreq governor. 1483 * The values are chosen based on experiments. 1484 */ 1485 dmc->gov_data.upthreshold = 10; 1486 dmc->gov_data.downdifferential = 5; 1487 1488 exynos5_dmc_df_profile.polling_ms = 100; 1489 } 1490 1491 dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile, 1492 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1493 &dmc->gov_data); 1494 1495 if (IS_ERR(dmc->df)) { 1496 ret = PTR_ERR(dmc->df); 1497 goto err_devfreq_add; 1498 } 1499 1500 if (dmc->in_irq_mode) 1501 exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE); 1502 1503 dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode); 1504 1505 return 0; 1506 1507 err_devfreq_add: 1508 if (dmc->in_irq_mode) 1509 exynos5_dmc_disable_perf_events(dmc); 1510 else 1511 exynos5_counters_disable_edev(dmc); 1512 remove_clocks: 1513 clk_disable_unprepare(dmc->mout_bpll); 1514 clk_disable_unprepare(dmc->fout_bpll); 1515 1516 return ret; 1517 } 1518 1519 /** 1520 * exynos5_dmc_remove() - Remove function for the platform device 1521 * @pdev: platform device which is going to be removed 1522 * 1523 * The function relies on 'devm' framework function which automatically 1524 * clean the device's resources. It just calls explicitly disable function for 1525 * the performance counters. 1526 */ 1527 static int exynos5_dmc_remove(struct platform_device *pdev) 1528 { 1529 struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev); 1530 1531 if (dmc->in_irq_mode) 1532 exynos5_dmc_disable_perf_events(dmc); 1533 else 1534 exynos5_counters_disable_edev(dmc); 1535 1536 clk_disable_unprepare(dmc->mout_bpll); 1537 clk_disable_unprepare(dmc->fout_bpll); 1538 1539 dev_pm_opp_remove_table(dmc->dev); 1540 1541 return 0; 1542 } 1543 1544 static const struct of_device_id exynos5_dmc_of_match[] = { 1545 { .compatible = "samsung,exynos5422-dmc", }, 1546 { }, 1547 }; 1548 MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match); 1549 1550 static struct platform_driver exynos5_dmc_platdrv = { 1551 .probe = exynos5_dmc_probe, 1552 .remove = exynos5_dmc_remove, 1553 .driver = { 1554 .name = "exynos5-dmc", 1555 .of_match_table = exynos5_dmc_of_match, 1556 }, 1557 }; 1558 module_platform_driver(exynos5_dmc_platdrv); 1559 MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change"); 1560 MODULE_LICENSE("GPL v2"); 1561 MODULE_AUTHOR("Lukasz Luba"); 1562