1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Tegra20 External Memory Controller driver 4 * 5 * Author: Dmitry Osipenko <digetx@gmail.com> 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/clk.h> 10 #include <linux/clk/tegra.h> 11 #include <linux/debugfs.h> 12 #include <linux/devfreq.h> 13 #include <linux/err.h> 14 #include <linux/interconnect-provider.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/of.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_opp.h> 24 #include <linux/slab.h> 25 #include <linux/sort.h> 26 #include <linux/types.h> 27 28 #include <soc/tegra/common.h> 29 #include <soc/tegra/fuse.h> 30 31 #include "../jedec_ddr.h" 32 #include "../of_memory.h" 33 34 #include "mc.h" 35 36 #define EMC_INTSTATUS 0x000 37 #define EMC_INTMASK 0x004 38 #define EMC_DBG 0x008 39 #define EMC_ADR_CFG_0 0x010 40 #define EMC_TIMING_CONTROL 0x028 41 #define EMC_RC 0x02c 42 #define EMC_RFC 0x030 43 #define EMC_RAS 0x034 44 #define EMC_RP 0x038 45 #define EMC_R2W 0x03c 46 #define EMC_W2R 0x040 47 #define EMC_R2P 0x044 48 #define EMC_W2P 0x048 49 #define EMC_RD_RCD 0x04c 50 #define EMC_WR_RCD 0x050 51 #define EMC_RRD 0x054 52 #define EMC_REXT 0x058 53 #define EMC_WDV 0x05c 54 #define EMC_QUSE 0x060 55 #define EMC_QRST 0x064 56 #define EMC_QSAFE 0x068 57 #define EMC_RDV 0x06c 58 #define EMC_REFRESH 0x070 59 #define EMC_BURST_REFRESH_NUM 0x074 60 #define EMC_PDEX2WR 0x078 61 #define EMC_PDEX2RD 0x07c 62 #define EMC_PCHG2PDEN 0x080 63 #define EMC_ACT2PDEN 0x084 64 #define EMC_AR2PDEN 0x088 65 #define EMC_RW2PDEN 0x08c 66 #define EMC_TXSR 0x090 67 #define EMC_TCKE 0x094 68 #define EMC_TFAW 0x098 69 #define EMC_TRPAB 0x09c 70 #define EMC_TCLKSTABLE 0x0a0 71 #define EMC_TCLKSTOP 0x0a4 72 #define EMC_TREFBW 0x0a8 73 #define EMC_QUSE_EXTRA 0x0ac 74 #define EMC_ODT_WRITE 0x0b0 75 #define EMC_ODT_READ 0x0b4 76 #define EMC_MRR 0x0ec 77 #define EMC_FBIO_CFG5 0x104 78 #define EMC_FBIO_CFG6 0x114 79 #define EMC_STAT_CONTROL 0x160 80 #define EMC_STAT_LLMC_CONTROL 0x178 81 #define EMC_STAT_PWR_CLOCK_LIMIT 0x198 82 #define EMC_STAT_PWR_CLOCKS 0x19c 83 #define EMC_STAT_PWR_COUNT 0x1a0 84 #define EMC_AUTO_CAL_INTERVAL 0x2a8 85 #define EMC_CFG_2 0x2b8 86 #define EMC_CFG_DIG_DLL 0x2bc 87 #define EMC_DLL_XFORM_DQS 0x2c0 88 #define EMC_DLL_XFORM_QUSE 0x2c4 89 #define EMC_ZCAL_REF_CNT 0x2e0 90 #define EMC_ZCAL_WAIT_CNT 0x2e4 91 #define EMC_CFG_CLKTRIM_0 0x2d0 92 #define EMC_CFG_CLKTRIM_1 0x2d4 93 #define EMC_CFG_CLKTRIM_2 0x2d8 94 95 #define EMC_CLKCHANGE_REQ_ENABLE BIT(0) 96 #define EMC_CLKCHANGE_PD_ENABLE BIT(1) 97 #define EMC_CLKCHANGE_SR_ENABLE BIT(2) 98 99 #define EMC_TIMING_UPDATE BIT(0) 100 101 #define EMC_REFRESH_OVERFLOW_INT BIT(3) 102 #define EMC_CLKCHANGE_COMPLETE_INT BIT(4) 103 #define EMC_MRR_DIVLD_INT BIT(5) 104 105 #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0) 106 #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1) 107 #define EMC_DBG_FORCE_UPDATE BIT(2) 108 #define EMC_DBG_READ_DQM_CTRL BIT(9) 109 #define EMC_DBG_CFG_PRIORITY BIT(24) 110 111 #define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4) 112 #define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0) 113 114 #define EMC_MRR_DEV_SELECTN GENMASK(31, 30) 115 #define EMC_MRR_MRR_MA GENMASK(23, 16) 116 #define EMC_MRR_MRR_DATA GENMASK(15, 0) 117 118 #define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24) 119 120 #define EMC_PWR_GATHER_CLEAR (1 << 8) 121 #define EMC_PWR_GATHER_DISABLE (2 << 8) 122 #define EMC_PWR_GATHER_ENABLE (3 << 8) 123 124 enum emc_dram_type { 125 DRAM_TYPE_RESERVED, 126 DRAM_TYPE_DDR1, 127 DRAM_TYPE_LPDDR2, 128 DRAM_TYPE_DDR2, 129 }; 130 131 static const u16 emc_timing_registers[] = { 132 EMC_RC, 133 EMC_RFC, 134 EMC_RAS, 135 EMC_RP, 136 EMC_R2W, 137 EMC_W2R, 138 EMC_R2P, 139 EMC_W2P, 140 EMC_RD_RCD, 141 EMC_WR_RCD, 142 EMC_RRD, 143 EMC_REXT, 144 EMC_WDV, 145 EMC_QUSE, 146 EMC_QRST, 147 EMC_QSAFE, 148 EMC_RDV, 149 EMC_REFRESH, 150 EMC_BURST_REFRESH_NUM, 151 EMC_PDEX2WR, 152 EMC_PDEX2RD, 153 EMC_PCHG2PDEN, 154 EMC_ACT2PDEN, 155 EMC_AR2PDEN, 156 EMC_RW2PDEN, 157 EMC_TXSR, 158 EMC_TCKE, 159 EMC_TFAW, 160 EMC_TRPAB, 161 EMC_TCLKSTABLE, 162 EMC_TCLKSTOP, 163 EMC_TREFBW, 164 EMC_QUSE_EXTRA, 165 EMC_FBIO_CFG6, 166 EMC_ODT_WRITE, 167 EMC_ODT_READ, 168 EMC_FBIO_CFG5, 169 EMC_CFG_DIG_DLL, 170 EMC_DLL_XFORM_DQS, 171 EMC_DLL_XFORM_QUSE, 172 EMC_ZCAL_REF_CNT, 173 EMC_ZCAL_WAIT_CNT, 174 EMC_AUTO_CAL_INTERVAL, 175 EMC_CFG_CLKTRIM_0, 176 EMC_CFG_CLKTRIM_1, 177 EMC_CFG_CLKTRIM_2, 178 }; 179 180 struct emc_timing { 181 unsigned long rate; 182 u32 data[ARRAY_SIZE(emc_timing_registers)]; 183 }; 184 185 enum emc_rate_request_type { 186 EMC_RATE_DEVFREQ, 187 EMC_RATE_DEBUG, 188 EMC_RATE_ICC, 189 EMC_RATE_TYPE_MAX, 190 }; 191 192 struct emc_rate_request { 193 unsigned long min_rate; 194 unsigned long max_rate; 195 }; 196 197 struct tegra_emc { 198 struct device *dev; 199 struct tegra_mc *mc; 200 struct icc_provider provider; 201 struct notifier_block clk_nb; 202 struct clk *clk; 203 void __iomem *regs; 204 unsigned int dram_bus_width; 205 206 struct emc_timing *timings; 207 unsigned int num_timings; 208 209 struct { 210 struct dentry *root; 211 unsigned long min_rate; 212 unsigned long max_rate; 213 } debugfs; 214 215 /* 216 * There are multiple sources in the EMC driver which could request 217 * a min/max clock rate, these rates are contained in this array. 218 */ 219 struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX]; 220 221 /* protect shared rate-change code path */ 222 struct mutex rate_lock; 223 224 struct devfreq_simple_ondemand_data ondemand_data; 225 226 /* memory chip identity information */ 227 union lpddr2_basic_config4 basic_conf4; 228 unsigned int manufacturer_id; 229 unsigned int revision_id1; 230 unsigned int revision_id2; 231 232 bool mrr_error; 233 }; 234 235 static irqreturn_t tegra_emc_isr(int irq, void *data) 236 { 237 struct tegra_emc *emc = data; 238 u32 intmask = EMC_REFRESH_OVERFLOW_INT; 239 u32 status; 240 241 status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask; 242 if (!status) 243 return IRQ_NONE; 244 245 /* notify about HW problem */ 246 if (status & EMC_REFRESH_OVERFLOW_INT) 247 dev_err_ratelimited(emc->dev, 248 "refresh request overflow timeout\n"); 249 250 /* clear interrupts */ 251 writel_relaxed(status, emc->regs + EMC_INTSTATUS); 252 253 return IRQ_HANDLED; 254 } 255 256 static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc, 257 unsigned long rate) 258 { 259 struct emc_timing *timing = NULL; 260 unsigned int i; 261 262 for (i = 0; i < emc->num_timings; i++) { 263 if (emc->timings[i].rate >= rate) { 264 timing = &emc->timings[i]; 265 break; 266 } 267 } 268 269 if (!timing) { 270 dev_err(emc->dev, "no timing for rate %lu\n", rate); 271 return NULL; 272 } 273 274 return timing; 275 } 276 277 static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) 278 { 279 struct emc_timing *timing = tegra_emc_find_timing(emc, rate); 280 unsigned int i; 281 282 if (!timing) 283 return -EINVAL; 284 285 dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n", 286 __func__, timing->rate, rate); 287 288 /* program shadow registers */ 289 for (i = 0; i < ARRAY_SIZE(timing->data); i++) 290 writel_relaxed(timing->data[i], 291 emc->regs + emc_timing_registers[i]); 292 293 /* wait until programming has settled */ 294 readl_relaxed(emc->regs + emc_timing_registers[i - 1]); 295 296 return 0; 297 } 298 299 static int emc_complete_timing_change(struct tegra_emc *emc, bool flush) 300 { 301 int err; 302 u32 v; 303 304 dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush); 305 306 if (flush) { 307 /* manually initiate memory timing update */ 308 writel_relaxed(EMC_TIMING_UPDATE, 309 emc->regs + EMC_TIMING_CONTROL); 310 return 0; 311 } 312 313 err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v, 314 v & EMC_CLKCHANGE_COMPLETE_INT, 315 1, 100); 316 if (err) { 317 dev_err(emc->dev, "emc-car handshake timeout: %d\n", err); 318 return err; 319 } 320 321 return 0; 322 } 323 324 static int tegra_emc_clk_change_notify(struct notifier_block *nb, 325 unsigned long msg, void *data) 326 { 327 struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); 328 struct clk_notifier_data *cnd = data; 329 int err; 330 331 switch (msg) { 332 case PRE_RATE_CHANGE: 333 err = emc_prepare_timing_change(emc, cnd->new_rate); 334 break; 335 336 case ABORT_RATE_CHANGE: 337 err = emc_prepare_timing_change(emc, cnd->old_rate); 338 if (err) 339 break; 340 341 err = emc_complete_timing_change(emc, true); 342 break; 343 344 case POST_RATE_CHANGE: 345 err = emc_complete_timing_change(emc, false); 346 break; 347 348 default: 349 return NOTIFY_DONE; 350 } 351 352 return notifier_from_errno(err); 353 } 354 355 static int load_one_timing_from_dt(struct tegra_emc *emc, 356 struct emc_timing *timing, 357 struct device_node *node) 358 { 359 u32 rate; 360 int err; 361 362 if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) { 363 dev_err(emc->dev, "incompatible DT node: %pOF\n", node); 364 return -EINVAL; 365 } 366 367 err = of_property_read_u32(node, "clock-frequency", &rate); 368 if (err) { 369 dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n", 370 node, err); 371 return err; 372 } 373 374 err = of_property_read_u32_array(node, "nvidia,emc-registers", 375 timing->data, 376 ARRAY_SIZE(emc_timing_registers)); 377 if (err) { 378 dev_err(emc->dev, 379 "timing %pOF: failed to read emc timing data: %d\n", 380 node, err); 381 return err; 382 } 383 384 /* 385 * The EMC clock rate is twice the bus rate, and the bus rate is 386 * measured in kHz. 387 */ 388 timing->rate = rate * 2 * 1000; 389 390 dev_dbg(emc->dev, "%s: %pOF: EMC rate %lu\n", 391 __func__, node, timing->rate); 392 393 return 0; 394 } 395 396 static int cmp_timings(const void *_a, const void *_b) 397 { 398 const struct emc_timing *a = _a; 399 const struct emc_timing *b = _b; 400 401 if (a->rate < b->rate) 402 return -1; 403 404 if (a->rate > b->rate) 405 return 1; 406 407 return 0; 408 } 409 410 static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc, 411 struct device_node *node) 412 { 413 struct emc_timing *timing; 414 int child_count; 415 int err; 416 417 child_count = of_get_child_count(node); 418 if (!child_count) { 419 dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node); 420 return -EINVAL; 421 } 422 423 emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing), 424 GFP_KERNEL); 425 if (!emc->timings) 426 return -ENOMEM; 427 428 timing = emc->timings; 429 430 for_each_child_of_node_scoped(node, child) { 431 if (of_node_name_eq(child, "lpddr2")) 432 continue; 433 434 err = load_one_timing_from_dt(emc, timing++, child); 435 if (err) 436 return err; 437 438 emc->num_timings++; 439 } 440 441 sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, 442 NULL); 443 444 dev_info_once(emc->dev, 445 "got %u timings for RAM code %u (min %luMHz max %luMHz)\n", 446 emc->num_timings, 447 tegra_read_ram_code(), 448 emc->timings[0].rate / 1000000, 449 emc->timings[emc->num_timings - 1].rate / 1000000); 450 451 return 0; 452 } 453 454 static struct device_node * 455 tegra_emc_find_node_by_ram_code(struct tegra_emc *emc) 456 { 457 struct device *dev = emc->dev; 458 struct device_node *np; 459 u32 value, ram_code; 460 int err; 461 462 if (emc->mrr_error) { 463 dev_warn(dev, "memory timings skipped due to MRR error\n"); 464 return NULL; 465 } 466 467 if (of_get_child_count(dev->of_node) == 0) { 468 dev_info_once(dev, "device-tree doesn't have memory timings\n"); 469 return NULL; 470 } 471 472 if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code")) 473 return of_node_get(dev->of_node); 474 475 ram_code = tegra_read_ram_code(); 476 477 for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np; 478 np = of_find_node_by_name(np, "emc-tables")) { 479 err = of_property_read_u32(np, "nvidia,ram-code", &value); 480 if (err || value != ram_code) { 481 struct device_node *lpddr2_np; 482 bool cfg_mismatches = false; 483 484 lpddr2_np = of_find_node_by_name(np, "lpddr2"); 485 if (lpddr2_np) { 486 const struct lpddr2_info *info; 487 488 info = of_lpddr2_get_info(lpddr2_np, dev); 489 if (info) { 490 if (info->manufacturer_id >= 0 && 491 info->manufacturer_id != emc->manufacturer_id) 492 cfg_mismatches = true; 493 494 if (info->revision_id1 >= 0 && 495 info->revision_id1 != emc->revision_id1) 496 cfg_mismatches = true; 497 498 if (info->revision_id2 >= 0 && 499 info->revision_id2 != emc->revision_id2) 500 cfg_mismatches = true; 501 502 if (info->density != emc->basic_conf4.density) 503 cfg_mismatches = true; 504 505 if (info->io_width != emc->basic_conf4.io_width) 506 cfg_mismatches = true; 507 508 if (info->arch_type != emc->basic_conf4.arch_type) 509 cfg_mismatches = true; 510 } else { 511 dev_err(dev, "failed to parse %pOF\n", lpddr2_np); 512 cfg_mismatches = true; 513 } 514 515 of_node_put(lpddr2_np); 516 } else { 517 cfg_mismatches = true; 518 } 519 520 if (cfg_mismatches) { 521 of_node_put(np); 522 continue; 523 } 524 } 525 526 return np; 527 } 528 529 dev_err(dev, "no memory timings for RAM code %u found in device tree\n", 530 ram_code); 531 532 return NULL; 533 } 534 535 static int emc_read_lpddr_mode_register(struct tegra_emc *emc, 536 unsigned int emem_dev, 537 unsigned int register_addr, 538 unsigned int *register_data) 539 { 540 u32 memory_dev = emem_dev ? 1 : 2; 541 u32 val, mr_mask = 0xff; 542 int err; 543 544 /* clear data-valid interrupt status */ 545 writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS); 546 547 /* issue mode register read request */ 548 val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev); 549 val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr); 550 551 writel_relaxed(val, emc->regs + EMC_MRR); 552 553 /* wait for the LPDDR2 data-valid interrupt */ 554 err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val, 555 val & EMC_MRR_DIVLD_INT, 556 1, 100); 557 if (err) { 558 dev_err(emc->dev, "mode register %u read failed: %d\n", 559 register_addr, err); 560 emc->mrr_error = true; 561 return err; 562 } 563 564 /* read out mode register data */ 565 val = readl_relaxed(emc->regs + EMC_MRR); 566 *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask; 567 568 return 0; 569 } 570 571 static void emc_read_lpddr_sdram_info(struct tegra_emc *emc, 572 unsigned int emem_dev, 573 bool print_out) 574 { 575 /* these registers are standard for all LPDDR JEDEC memory chips */ 576 emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id); 577 emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1); 578 emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2); 579 emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value); 580 581 if (!print_out) 582 return; 583 584 dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n", 585 emem_dev, emc->manufacturer_id, 586 lpddr2_jedec_manufacturer(emc->manufacturer_id), 587 emc->revision_id1, emc->revision_id2, 588 4 >> emc->basic_conf4.arch_type, 589 64 << emc->basic_conf4.density, 590 32 >> emc->basic_conf4.io_width); 591 } 592 593 static int emc_setup_hw(struct tegra_emc *emc) 594 { 595 u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg; 596 u32 intmask = EMC_REFRESH_OVERFLOW_INT; 597 static bool print_sdram_info_once; 598 enum emc_dram_type dram_type; 599 const char *dram_type_str; 600 unsigned int emem_numdev; 601 602 emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2); 603 604 /* 605 * Depending on a memory type, DRAM should enter either self-refresh 606 * or power-down state on EMC clock change. 607 */ 608 if (!(emc_cfg & EMC_CLKCHANGE_PD_ENABLE) && 609 !(emc_cfg & EMC_CLKCHANGE_SR_ENABLE)) { 610 dev_err(emc->dev, 611 "bootloader didn't specify DRAM auto-suspend mode\n"); 612 return -EINVAL; 613 } 614 615 /* enable EMC and CAR to handshake on PLL divider/source changes */ 616 emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE; 617 writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2); 618 619 /* initialize interrupt */ 620 writel_relaxed(intmask, emc->regs + EMC_INTMASK); 621 writel_relaxed(intmask, emc->regs + EMC_INTSTATUS); 622 623 /* ensure that unwanted debug features are disabled */ 624 emc_dbg = readl_relaxed(emc->regs + EMC_DBG); 625 emc_dbg |= EMC_DBG_CFG_PRIORITY; 626 emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY; 627 emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE; 628 emc_dbg &= ~EMC_DBG_FORCE_UPDATE; 629 writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 630 631 emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5); 632 633 if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16) 634 emc->dram_bus_width = 16; 635 else 636 emc->dram_bus_width = 32; 637 638 dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio); 639 640 switch (dram_type) { 641 case DRAM_TYPE_RESERVED: 642 dram_type_str = "INVALID"; 643 break; 644 case DRAM_TYPE_DDR1: 645 dram_type_str = "DDR1"; 646 break; 647 case DRAM_TYPE_LPDDR2: 648 dram_type_str = "LPDDR2"; 649 break; 650 case DRAM_TYPE_DDR2: 651 dram_type_str = "DDR2"; 652 break; 653 } 654 655 emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0); 656 emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1; 657 658 dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n", 659 emc->dram_bus_width, emem_numdev, dram_type_str, 660 emem_numdev == 2 ? "devices" : "device"); 661 662 if (dram_type == DRAM_TYPE_LPDDR2) { 663 while (emem_numdev--) 664 emc_read_lpddr_sdram_info(emc, emem_numdev, 665 !print_sdram_info_once); 666 print_sdram_info_once = true; 667 } 668 669 return 0; 670 } 671 672 static long emc_round_rate(unsigned long rate, 673 unsigned long min_rate, 674 unsigned long max_rate, 675 void *arg) 676 { 677 struct emc_timing *timing = NULL; 678 struct tegra_emc *emc = arg; 679 unsigned int i; 680 681 if (!emc->num_timings) 682 return clk_get_rate(emc->clk); 683 684 min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate); 685 686 for (i = 0; i < emc->num_timings; i++) { 687 if (emc->timings[i].rate < rate && i != emc->num_timings - 1) 688 continue; 689 690 if (emc->timings[i].rate > max_rate) { 691 i = max(i, 1u) - 1; 692 693 if (emc->timings[i].rate < min_rate) 694 break; 695 } 696 697 if (emc->timings[i].rate < min_rate) 698 continue; 699 700 timing = &emc->timings[i]; 701 break; 702 } 703 704 if (!timing) { 705 dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n", 706 rate, min_rate, max_rate); 707 return -EINVAL; 708 } 709 710 return timing->rate; 711 } 712 713 static void tegra_emc_rate_requests_init(struct tegra_emc *emc) 714 { 715 unsigned int i; 716 717 for (i = 0; i < EMC_RATE_TYPE_MAX; i++) { 718 emc->requested_rate[i].min_rate = 0; 719 emc->requested_rate[i].max_rate = ULONG_MAX; 720 } 721 } 722 723 static int emc_request_rate(struct tegra_emc *emc, 724 unsigned long new_min_rate, 725 unsigned long new_max_rate, 726 enum emc_rate_request_type type) 727 { 728 struct emc_rate_request *req = emc->requested_rate; 729 unsigned long min_rate = 0, max_rate = ULONG_MAX; 730 unsigned int i; 731 int err; 732 733 /* select minimum and maximum rates among the requested rates */ 734 for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) { 735 if (i == type) { 736 min_rate = max(new_min_rate, min_rate); 737 max_rate = min(new_max_rate, max_rate); 738 } else { 739 min_rate = max(req->min_rate, min_rate); 740 max_rate = min(req->max_rate, max_rate); 741 } 742 } 743 744 if (min_rate > max_rate) { 745 dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n", 746 __func__, type, min_rate, max_rate); 747 return -ERANGE; 748 } 749 750 /* 751 * EMC rate-changes should go via OPP API because it manages voltage 752 * changes. 753 */ 754 err = dev_pm_opp_set_rate(emc->dev, min_rate); 755 if (err) 756 return err; 757 758 emc->requested_rate[type].min_rate = new_min_rate; 759 emc->requested_rate[type].max_rate = new_max_rate; 760 761 return 0; 762 } 763 764 static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate, 765 enum emc_rate_request_type type) 766 { 767 struct emc_rate_request *req = &emc->requested_rate[type]; 768 int ret; 769 770 mutex_lock(&emc->rate_lock); 771 ret = emc_request_rate(emc, rate, req->max_rate, type); 772 mutex_unlock(&emc->rate_lock); 773 774 return ret; 775 } 776 777 static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate, 778 enum emc_rate_request_type type) 779 { 780 struct emc_rate_request *req = &emc->requested_rate[type]; 781 int ret; 782 783 mutex_lock(&emc->rate_lock); 784 ret = emc_request_rate(emc, req->min_rate, rate, type); 785 mutex_unlock(&emc->rate_lock); 786 787 return ret; 788 } 789 790 /* 791 * debugfs interface 792 * 793 * The memory controller driver exposes some files in debugfs that can be used 794 * to control the EMC frequency. The top-level directory can be found here: 795 * 796 * /sys/kernel/debug/emc 797 * 798 * It contains the following files: 799 * 800 * - available_rates: This file contains a list of valid, space-separated 801 * EMC frequencies. 802 * 803 * - min_rate: Writing a value to this file sets the given frequency as the 804 * floor of the permitted range. If this is higher than the currently 805 * configured EMC frequency, this will cause the frequency to be 806 * increased so that it stays within the valid range. 807 * 808 * - max_rate: Similarily to the min_rate file, writing a value to this file 809 * sets the given frequency as the ceiling of the permitted range. If 810 * the value is lower than the currently configured EMC frequency, this 811 * will cause the frequency to be decreased so that it stays within the 812 * valid range. 813 */ 814 815 static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) 816 { 817 unsigned int i; 818 819 for (i = 0; i < emc->num_timings; i++) 820 if (rate == emc->timings[i].rate) 821 return true; 822 823 return false; 824 } 825 826 static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) 827 { 828 struct tegra_emc *emc = s->private; 829 const char *prefix = ""; 830 unsigned int i; 831 832 for (i = 0; i < emc->num_timings; i++) { 833 seq_printf(s, "%s%lu", prefix, emc->timings[i].rate); 834 prefix = " "; 835 } 836 837 seq_puts(s, "\n"); 838 839 return 0; 840 } 841 DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates); 842 843 static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) 844 { 845 struct tegra_emc *emc = data; 846 847 *rate = emc->debugfs.min_rate; 848 849 return 0; 850 } 851 852 static int tegra_emc_debug_min_rate_set(void *data, u64 rate) 853 { 854 struct tegra_emc *emc = data; 855 int err; 856 857 if (!tegra_emc_validate_rate(emc, rate)) 858 return -EINVAL; 859 860 err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG); 861 if (err < 0) 862 return err; 863 864 emc->debugfs.min_rate = rate; 865 866 return 0; 867 } 868 869 DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops, 870 tegra_emc_debug_min_rate_get, 871 tegra_emc_debug_min_rate_set, "%llu\n"); 872 873 static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) 874 { 875 struct tegra_emc *emc = data; 876 877 *rate = emc->debugfs.max_rate; 878 879 return 0; 880 } 881 882 static int tegra_emc_debug_max_rate_set(void *data, u64 rate) 883 { 884 struct tegra_emc *emc = data; 885 int err; 886 887 if (!tegra_emc_validate_rate(emc, rate)) 888 return -EINVAL; 889 890 err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG); 891 if (err < 0) 892 return err; 893 894 emc->debugfs.max_rate = rate; 895 896 return 0; 897 } 898 899 DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops, 900 tegra_emc_debug_max_rate_get, 901 tegra_emc_debug_max_rate_set, "%llu\n"); 902 903 static void tegra_emc_debugfs_init(struct tegra_emc *emc) 904 { 905 struct device *dev = emc->dev; 906 unsigned int i; 907 int err; 908 909 emc->debugfs.min_rate = ULONG_MAX; 910 emc->debugfs.max_rate = 0; 911 912 for (i = 0; i < emc->num_timings; i++) { 913 if (emc->timings[i].rate < emc->debugfs.min_rate) 914 emc->debugfs.min_rate = emc->timings[i].rate; 915 916 if (emc->timings[i].rate > emc->debugfs.max_rate) 917 emc->debugfs.max_rate = emc->timings[i].rate; 918 } 919 920 if (!emc->num_timings) { 921 emc->debugfs.min_rate = clk_get_rate(emc->clk); 922 emc->debugfs.max_rate = emc->debugfs.min_rate; 923 } 924 925 err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, 926 emc->debugfs.max_rate); 927 if (err < 0) { 928 dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n", 929 emc->debugfs.min_rate, emc->debugfs.max_rate, 930 emc->clk); 931 } 932 933 emc->debugfs.root = debugfs_create_dir("emc", NULL); 934 935 debugfs_create_file("available_rates", 0444, emc->debugfs.root, 936 emc, &tegra_emc_debug_available_rates_fops); 937 debugfs_create_file("min_rate", 0644, emc->debugfs.root, 938 emc, &tegra_emc_debug_min_rate_fops); 939 debugfs_create_file("max_rate", 0644, emc->debugfs.root, 940 emc, &tegra_emc_debug_max_rate_fops); 941 } 942 943 static inline struct tegra_emc * 944 to_tegra_emc_provider(struct icc_provider *provider) 945 { 946 return container_of(provider, struct tegra_emc, provider); 947 } 948 949 static struct icc_node_data * 950 emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data) 951 { 952 struct icc_provider *provider = data; 953 struct icc_node_data *ndata; 954 struct icc_node *node; 955 956 /* External Memory is the only possible ICC route */ 957 list_for_each_entry(node, &provider->nodes, node_list) { 958 if (node->id != TEGRA_ICC_EMEM) 959 continue; 960 961 ndata = kzalloc(sizeof(*ndata), GFP_KERNEL); 962 if (!ndata) 963 return ERR_PTR(-ENOMEM); 964 965 /* 966 * SRC and DST nodes should have matching TAG in order to have 967 * it set by default for a requested path. 968 */ 969 ndata->tag = TEGRA_MC_ICC_TAG_ISO; 970 ndata->node = node; 971 972 return ndata; 973 } 974 975 return ERR_PTR(-EPROBE_DEFER); 976 } 977 978 static int emc_icc_set(struct icc_node *src, struct icc_node *dst) 979 { 980 struct tegra_emc *emc = to_tegra_emc_provider(dst->provider); 981 unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw); 982 unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw); 983 unsigned long long rate = max(avg_bw, peak_bw); 984 unsigned int dram_data_bus_width_bytes; 985 int err; 986 987 /* 988 * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data 989 * is sampled on both clock edges. This means that EMC clock rate 990 * equals to the peak data-rate. 991 */ 992 dram_data_bus_width_bytes = emc->dram_bus_width / 8; 993 do_div(rate, dram_data_bus_width_bytes); 994 rate = min_t(u64, rate, U32_MAX); 995 996 err = emc_set_min_rate(emc, rate, EMC_RATE_ICC); 997 if (err) 998 return err; 999 1000 return 0; 1001 } 1002 1003 static int tegra_emc_interconnect_init(struct tegra_emc *emc) 1004 { 1005 const struct tegra_mc_soc *soc; 1006 struct icc_node *node; 1007 int err; 1008 1009 emc->mc = devm_tegra_memory_controller_get(emc->dev); 1010 if (IS_ERR(emc->mc)) 1011 return PTR_ERR(emc->mc); 1012 1013 soc = emc->mc->soc; 1014 1015 emc->provider.dev = emc->dev; 1016 emc->provider.set = emc_icc_set; 1017 emc->provider.data = &emc->provider; 1018 emc->provider.aggregate = soc->icc_ops->aggregate; 1019 emc->provider.xlate_extended = emc_of_icc_xlate_extended; 1020 1021 icc_provider_init(&emc->provider); 1022 1023 /* create External Memory Controller node */ 1024 node = icc_node_create(TEGRA_ICC_EMC); 1025 if (IS_ERR(node)) { 1026 err = PTR_ERR(node); 1027 goto err_msg; 1028 } 1029 1030 node->name = "External Memory Controller"; 1031 icc_node_add(node, &emc->provider); 1032 1033 /* link External Memory Controller to External Memory (DRAM) */ 1034 err = icc_link_create(node, TEGRA_ICC_EMEM); 1035 if (err) 1036 goto remove_nodes; 1037 1038 /* create External Memory node */ 1039 node = icc_node_create(TEGRA_ICC_EMEM); 1040 if (IS_ERR(node)) { 1041 err = PTR_ERR(node); 1042 goto remove_nodes; 1043 } 1044 1045 node->name = "External Memory (DRAM)"; 1046 icc_node_add(node, &emc->provider); 1047 1048 err = icc_provider_register(&emc->provider); 1049 if (err) 1050 goto remove_nodes; 1051 1052 return 0; 1053 1054 remove_nodes: 1055 icc_nodes_remove(&emc->provider); 1056 err_msg: 1057 dev_err(emc->dev, "failed to initialize ICC: %d\n", err); 1058 1059 return err; 1060 } 1061 1062 static void devm_tegra_emc_unset_callback(void *data) 1063 { 1064 tegra20_clk_set_emc_round_callback(NULL, NULL); 1065 } 1066 1067 static void devm_tegra_emc_unreg_clk_notifier(void *data) 1068 { 1069 struct tegra_emc *emc = data; 1070 1071 clk_notifier_unregister(emc->clk, &emc->clk_nb); 1072 } 1073 1074 static int tegra_emc_init_clk(struct tegra_emc *emc) 1075 { 1076 int err; 1077 1078 tegra20_clk_set_emc_round_callback(emc_round_rate, emc); 1079 1080 err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback, 1081 NULL); 1082 if (err) 1083 return err; 1084 1085 emc->clk = devm_clk_get(emc->dev, NULL); 1086 if (IS_ERR(emc->clk)) { 1087 dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk); 1088 return PTR_ERR(emc->clk); 1089 } 1090 1091 err = clk_notifier_register(emc->clk, &emc->clk_nb); 1092 if (err) { 1093 dev_err(emc->dev, "failed to register clk notifier: %d\n", err); 1094 return err; 1095 } 1096 1097 err = devm_add_action_or_reset(emc->dev, 1098 devm_tegra_emc_unreg_clk_notifier, emc); 1099 if (err) 1100 return err; 1101 1102 return 0; 1103 } 1104 1105 static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq, 1106 u32 flags) 1107 { 1108 struct tegra_emc *emc = dev_get_drvdata(dev); 1109 struct dev_pm_opp *opp; 1110 unsigned long rate; 1111 1112 opp = devfreq_recommended_opp(dev, freq, flags); 1113 if (IS_ERR(opp)) { 1114 dev_err(dev, "failed to find opp for %lu Hz\n", *freq); 1115 return PTR_ERR(opp); 1116 } 1117 1118 rate = dev_pm_opp_get_freq(opp); 1119 dev_pm_opp_put(opp); 1120 1121 return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ); 1122 } 1123 1124 static int tegra_emc_devfreq_get_dev_status(struct device *dev, 1125 struct devfreq_dev_status *stat) 1126 { 1127 struct tegra_emc *emc = dev_get_drvdata(dev); 1128 1129 /* freeze counters */ 1130 writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL); 1131 1132 /* 1133 * busy_time: number of clocks EMC request was accepted 1134 * total_time: number of clocks PWR_GATHER control was set to ENABLE 1135 */ 1136 stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT); 1137 stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS); 1138 stat->current_frequency = clk_get_rate(emc->clk); 1139 1140 /* clear counters and restart */ 1141 writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL); 1142 writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL); 1143 1144 return 0; 1145 } 1146 1147 static struct devfreq_dev_profile tegra_emc_devfreq_profile = { 1148 .polling_ms = 30, 1149 .target = tegra_emc_devfreq_target, 1150 .get_dev_status = tegra_emc_devfreq_get_dev_status, 1151 }; 1152 1153 static int tegra_emc_devfreq_init(struct tegra_emc *emc) 1154 { 1155 struct devfreq *devfreq; 1156 1157 /* 1158 * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold 1159 * should be less than 50. Secondly, multiple active memory clients 1160 * may cause over 20% of lost clock cycles due to stalls caused by 1161 * competing memory accesses. This means that threshold should be 1162 * set to a less than 30 in order to have a properly working governor. 1163 */ 1164 emc->ondemand_data.upthreshold = 20; 1165 1166 /* 1167 * Reset statistic gathers state, select global bandwidth for the 1168 * statistics collection mode and set clocks counter saturation 1169 * limit to maximum. 1170 */ 1171 writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL); 1172 writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL); 1173 writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT); 1174 1175 devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile, 1176 DEVFREQ_GOV_SIMPLE_ONDEMAND, 1177 &emc->ondemand_data); 1178 if (IS_ERR(devfreq)) { 1179 dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq); 1180 return PTR_ERR(devfreq); 1181 } 1182 1183 return 0; 1184 } 1185 1186 static int tegra_emc_probe(struct platform_device *pdev) 1187 { 1188 struct tegra_core_opp_params opp_params = {}; 1189 struct device_node *np; 1190 struct tegra_emc *emc; 1191 int irq, err; 1192 1193 irq = platform_get_irq(pdev, 0); 1194 if (irq < 0) { 1195 dev_err(&pdev->dev, "please update your device tree\n"); 1196 return irq; 1197 } 1198 1199 emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); 1200 if (!emc) 1201 return -ENOMEM; 1202 1203 mutex_init(&emc->rate_lock); 1204 emc->clk_nb.notifier_call = tegra_emc_clk_change_notify; 1205 emc->dev = &pdev->dev; 1206 1207 emc->regs = devm_platform_ioremap_resource(pdev, 0); 1208 if (IS_ERR(emc->regs)) 1209 return PTR_ERR(emc->regs); 1210 1211 err = emc_setup_hw(emc); 1212 if (err) 1213 return err; 1214 1215 np = tegra_emc_find_node_by_ram_code(emc); 1216 if (np) { 1217 err = tegra_emc_load_timings_from_dt(emc, np); 1218 of_node_put(np); 1219 if (err) 1220 return err; 1221 } 1222 1223 err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0, 1224 dev_name(&pdev->dev), emc); 1225 if (err) { 1226 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); 1227 return err; 1228 } 1229 1230 err = tegra_emc_init_clk(emc); 1231 if (err) 1232 return err; 1233 1234 opp_params.init_state = true; 1235 1236 err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params); 1237 if (err) 1238 return err; 1239 1240 platform_set_drvdata(pdev, emc); 1241 tegra_emc_rate_requests_init(emc); 1242 tegra_emc_debugfs_init(emc); 1243 tegra_emc_interconnect_init(emc); 1244 tegra_emc_devfreq_init(emc); 1245 1246 /* 1247 * Don't allow the kernel module to be unloaded. Unloading adds some 1248 * extra complexity which doesn't really worth the effort in a case of 1249 * this driver. 1250 */ 1251 try_module_get(THIS_MODULE); 1252 1253 return 0; 1254 } 1255 1256 static const struct of_device_id tegra_emc_of_match[] = { 1257 { .compatible = "nvidia,tegra20-emc", }, 1258 {}, 1259 }; 1260 MODULE_DEVICE_TABLE(of, tegra_emc_of_match); 1261 1262 static struct platform_driver tegra_emc_driver = { 1263 .probe = tegra_emc_probe, 1264 .driver = { 1265 .name = "tegra20-emc", 1266 .of_match_table = tegra_emc_of_match, 1267 .suppress_bind_attrs = true, 1268 .sync_state = icc_sync_state, 1269 }, 1270 }; 1271 module_platform_driver(tegra_emc_driver); 1272 1273 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>"); 1274 MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver"); 1275 MODULE_SOFTDEP("pre: governor_simpleondemand"); 1276 MODULE_LICENSE("GPL v2"); 1277