1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/io.h>
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/of.h>
11
12 #include <soc/tegra/mc.h>
13
14 #include "tegra210-emc.h"
15 #include "tegra210-mc.h"
16
17 /*
18 * Enable flags for specifying verbosity.
19 */
20 #define INFO (1 << 0)
21 #define STEPS (1 << 1)
22 #define SUB_STEPS (1 << 2)
23 #define PRELOCK (1 << 3)
24 #define PRELOCK_STEPS (1 << 4)
25 #define ACTIVE_EN (1 << 5)
26 #define PRAMP_UP (1 << 6)
27 #define PRAMP_DN (1 << 7)
28 #define EMA_WRITES (1 << 10)
29 #define EMA_UPDATES (1 << 11)
30 #define PER_TRAIN (1 << 16)
31 #define CC_PRINT (1 << 17)
32 #define CCFIFO (1 << 29)
33 #define REGS (1 << 30)
34 #define REG_LISTS (1 << 31)
35
36 #define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
37
38 #define DVFS_CLOCK_CHANGE_VERSION 21021
39 #define EMC_PRELOCK_VERSION 2101
40
41 enum {
42 DVFS_SEQUENCE = 1,
43 WRITE_TRAINING_SEQUENCE = 2,
44 PERIODIC_TRAINING_SEQUENCE = 3,
45 DVFS_PT1 = 10,
46 DVFS_UPDATE = 11,
47 TRAINING_PT1 = 12,
48 TRAINING_UPDATE = 13,
49 PERIODIC_TRAINING_UPDATE = 14
50 };
51
52 /*
53 * PTFV defines - basically just indexes into the per table PTFV array.
54 */
55 #define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
56 #define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
57 #define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
58 #define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
59 #define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
60 #define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
61 #define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
62 #define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
63 #define PTFV_DVFS_SAMPLES_INDEX 9
64 #define PTFV_MOVAVG_WEIGHT_INDEX 10
65 #define PTFV_CONFIG_CTRL_INDEX 11
66
67 #define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
68
69 /*
70 * Do arithmetic in fixed point.
71 */
72 #define MOVAVG_PRECISION_FACTOR 100
73
74 /*
75 * The division portion of the average operation.
76 */
77 #define __AVERAGE_PTFV(dev) \
78 ({ next->ptfv_list[(dev)] = \
79 next->ptfv_list[(dev)] / \
80 next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
81
82 /*
83 * Convert val to fixed point and add it to the temporary average.
84 */
85 #define __INCREMENT_PTFV(dev, val) \
86 ({ next->ptfv_list[(dev)] += \
87 ((val) * MOVAVG_PRECISION_FACTOR); })
88
89 /*
90 * Convert a moving average back to integral form and return the value.
91 */
92 #define __MOVAVG_AC(timing, dev) \
93 ((timing)->ptfv_list[(dev)] / \
94 MOVAVG_PRECISION_FACTOR)
95
96 /* Weighted update. */
97 #define __WEIGHTED_UPDATE_PTFV(dev, nval) \
98 do { \
99 int w = PTFV_MOVAVG_WEIGHT_INDEX; \
100 int dqs = (dev); \
101 \
102 next->ptfv_list[dqs] = \
103 ((nval * MOVAVG_PRECISION_FACTOR) + \
104 (next->ptfv_list[dqs] * \
105 next->ptfv_list[w])) / \
106 (next->ptfv_list[w] + 1); \
107 \
108 emc_dbg(emc, EMA_UPDATES, "%s: (s=%u) EMA: %u\n", \
109 __stringify(dev), nval, next->ptfv_list[dqs]); \
110 } while (0)
111
112 /* Access a particular average. */
113 #define __MOVAVG(timing, dev) \
114 ((timing)->ptfv_list[(dev)])
115
tegra210_emc_compare_update_delay(struct tegra210_emc_timing * timing,u32 measured,u32 idx)116 static bool tegra210_emc_compare_update_delay(struct tegra210_emc_timing *timing,
117 u32 measured, u32 idx)
118 {
119 u32 *curr = &timing->current_dram_clktree[idx];
120 u32 rate_mhz = timing->rate / 1000;
121 u32 tmdel;
122
123 tmdel = abs(*curr - measured);
124
125 if (tmdel * 128 * rate_mhz / 1000000 > timing->tree_margin) {
126 *curr = measured;
127 return true;
128 }
129
130 return false;
131 }
132
tegra210_emc_get_clktree_delay(struct tegra210_emc * emc,u32 delay[DRAM_CLKTREE_NUM])133 static void tegra210_emc_get_clktree_delay(struct tegra210_emc *emc,
134 u32 delay[DRAM_CLKTREE_NUM])
135 {
136 struct tegra210_emc_timing *curr = emc->last;
137 u32 rate_mhz = curr->rate / 1000;
138 u32 msb, lsb, dqsosc, delay_us;
139 unsigned int c, d, idx;
140 unsigned long clocks;
141
142 clocks = tegra210_emc_actual_osc_clocks(curr->run_clocks);
143 delay_us = 2 + (clocks / rate_mhz);
144
145 tegra210_emc_start_periodic_compensation(emc);
146 udelay(delay_us);
147
148 for (d = 0; d < emc->num_devices; d++) {
149 /* Read DQSOSC from MRR18/19 */
150 msb = tegra210_emc_mrr_read(emc, 2 - d, 19);
151 lsb = tegra210_emc_mrr_read(emc, 2 - d, 18);
152
153 for (c = 0; c < emc->num_channels; c++) {
154 /* C[c]D[d]U[0] */
155 idx = c * 4 + d * 2;
156
157 dqsosc = (msb & 0x00ff) << 8;
158 dqsosc |= (lsb & 0x00ff) >> 0;
159
160 /* Check for unpopulated channels */
161 if (dqsosc)
162 delay[idx] = (clocks * 1000000) /
163 (rate_mhz * 2 * dqsosc);
164
165 /* C[c]D[d]U[1] */
166 idx++;
167
168 dqsosc = (msb & 0xff00) << 0;
169 dqsosc |= (lsb & 0xff00) >> 8;
170
171 /* Check for unpopulated channels */
172 if (dqsosc)
173 delay[idx] = (clocks * 1000000) /
174 (rate_mhz * 2 * dqsosc);
175
176 msb >>= 16;
177 lsb >>= 16;
178 }
179 }
180 }
181
periodic_compensation_handler(struct tegra210_emc * emc,u32 type,struct tegra210_emc_timing * last,struct tegra210_emc_timing * next)182 static bool periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
183 struct tegra210_emc_timing *last,
184 struct tegra210_emc_timing *next)
185 {
186 #define __COPY_EMA(nt, lt, dev) \
187 ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
188 (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
189
190 u32 i, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
191 u32 delay[DRAM_CLKTREE_NUM], idx;
192 bool over = false;
193
194 if (!next->periodic_training)
195 return 0;
196
197 if (type == DVFS_SEQUENCE) {
198 if (last->periodic_training &&
199 (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
200 PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
201 /*
202 * If the previous frequency was using periodic
203 * calibration then we can reuse the previous
204 * frequencies EMA data.
205 */
206 for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
207 __COPY_EMA(next, last, idx);
208 } else {
209 /* Reset the EMA.*/
210 for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
211 __MOVAVG(next, idx) = 0;
212
213 for (i = 0; i < samples; i++) {
214 /* Generate next sample of data. */
215 tegra210_emc_get_clktree_delay(emc, delay);
216
217 for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++)
218 __INCREMENT_PTFV(idx, delay[idx]);
219 }
220 }
221
222 for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) {
223 /* Do the division part of the moving average */
224 __AVERAGE_PTFV(idx);
225 over |= tegra210_emc_compare_update_delay(next,
226 __MOVAVG_AC(next, idx), idx);
227 }
228 }
229
230 if (type == PERIODIC_TRAINING_SEQUENCE) {
231 tegra210_emc_get_clktree_delay(emc, delay);
232
233 for (idx = 0; idx < DRAM_CLKTREE_NUM; idx++) {
234 __WEIGHTED_UPDATE_PTFV(idx, delay[idx]);
235 over |= tegra210_emc_compare_update_delay(next,
236 __MOVAVG_AC(next, idx), idx);
237 }
238 }
239
240 return over;
241 }
242
tegra210_emc_r21021_periodic_compensation(struct tegra210_emc * emc)243 static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
244 {
245 u32 emc_cfg, emc_cfg_o, emc_cfg_update, value;
246 static const u32 list[] = {
247 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
248 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
249 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
250 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
251 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
252 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
253 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
254 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
255 EMC_DATA_BRLSHFT_0,
256 EMC_DATA_BRLSHFT_1
257 };
258 struct tegra210_emc_timing *last = emc->last;
259 unsigned int items = ARRAY_SIZE(list), i;
260
261 if (last->periodic_training) {
262 emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
263
264 value = emc_readl(emc, EMC_DBG);
265 emc_cfg_o = emc_readl(emc, EMC_CFG);
266 emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
267 EMC_CFG_DRAM_ACPD |
268 EMC_CFG_DRAM_CLKSTOP_PD);
269
270
271 /*
272 * 1. Power optimizations should be off.
273 */
274 emc_writel(emc, emc_cfg, EMC_CFG);
275
276 /* Does emc_timing_update() for above changes. */
277 tegra210_emc_dll_disable(emc);
278
279 for (i = 0; i < emc->num_channels; i++)
280 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
281 EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
282 0);
283
284 for (i = 0; i < emc->num_channels; i++)
285 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
286 EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
287 0);
288
289 emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
290 value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
291 value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
292 emc_writel(emc, value, EMC_CFG_UPDATE);
293
294 /*
295 * 2. osc kick off - this assumes training and dvfs have set
296 * correct MR23.
297 *
298 * 3. Let dram capture its clock tree delays.
299 *
300 * 4. Check delta wrt previous values (save value if margin
301 * exceeds what is set in table).
302 */
303 if (periodic_compensation_handler(emc, PERIODIC_TRAINING_SEQUENCE,
304 last, last)) {
305 /*
306 * 5. Apply compensation w.r.t. trained values (if clock tree
307 * has drifted more than the set margin).
308 */
309 for (i = 0; i < items; i++) {
310 value = tegra210_emc_compensate(last, list[i]);
311 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
312 list[i], value);
313 emc_writel(emc, value, list[i]);
314 }
315 }
316
317 emc_writel(emc, emc_cfg_o, EMC_CFG);
318
319 /*
320 * 6. Timing update actally applies the new trimmers.
321 */
322 tegra210_emc_timing_update(emc);
323
324 /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
325 emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
326
327 /* 6.2. Restore the DLL. */
328 tegra210_emc_dll_enable(emc);
329 }
330
331 return 0;
332 }
333
334 /*
335 * Do the clock change sequence.
336 */
tegra210_emc_r21021_set_clock(struct tegra210_emc * emc,u32 clksrc)337 static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
338 {
339 /* state variables */
340 static bool fsp_for_next_freq;
341 /* constant configuration parameters */
342 const bool save_restore_clkstop_pd = true;
343 const u32 zqcal_before_cc_cutoff = 2400;
344 const bool cya_allow_ref_cc = false;
345 const bool cya_issue_pc_ref = false;
346 const bool opt_cc_short_zcal = true;
347 const bool ref_b4_sref_en = false;
348 const u32 tZQCAL_lpddr4 = 1000000;
349 const bool opt_short_zcal = true;
350 const bool opt_do_sw_qrst = true;
351 const u32 opt_dvfs_mode = MAN_SR;
352 /*
353 * This is the timing table for the source frequency. It does _not_
354 * necessarily correspond to the actual timing values in the EMC at the
355 * moment. If the boot BCT differs from the table then this can happen.
356 * However, we need it for accessing the dram_timings (which are not
357 * really registers) array for the current frequency.
358 */
359 struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
360 u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
361 u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
362 u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
363 u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
364 u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
365 u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
366 bool opt_zcal_en_cc = false, is_lpddr3 = false;
367 bool compensate_trimmer_applicable = false;
368 u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
369 u32 src_clk_period, dst_clk_period; /* in picoseconds */
370 bool shared_zq_resistor = false;
371 u32 value, dram_type;
372 u32 opt_dll_mode = 0;
373 unsigned long delay;
374 unsigned int i;
375
376 emc_dbg(emc, INFO, "Running clock change.\n");
377
378 /* XXX fake == last */
379 fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
380 fsp_for_next_freq = !fsp_for_next_freq;
381
382 value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
383 dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
384
385 if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
386 shared_zq_resistor = true;
387
388 if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
389 last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
390 dram_type == DRAM_TYPE_LPDDR4)
391 opt_zcal_en_cc = true;
392
393 if (dram_type == DRAM_TYPE_DDR3)
394 opt_dll_mode = tegra210_emc_get_dll_state(next);
395
396 if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
397 (dram_type == DRAM_TYPE_LPDDR2))
398 is_lpddr3 = true;
399
400 emc_readl(emc, EMC_CFG);
401 emc_readl(emc, EMC_AUTO_CAL_CONFIG);
402
403 src_clk_period = 1000000000 / last->rate;
404 dst_clk_period = 1000000000 / next->rate;
405
406 if (dst_clk_period <= zqcal_before_cc_cutoff)
407 tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
408 else
409 tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
410
411 tZQCAL_lpddr4_fc_adj /= dst_clk_period;
412
413 emc_dbg = emc_readl(emc, EMC_DBG);
414 emc_pin = emc_readl(emc, EMC_PIN);
415 emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
416
417 emc_cfg = next->burst_regs[EMC_CFG_INDEX];
418 emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
419 EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
420 emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
421 emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
422 EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
423 EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
424 EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
425 EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
426
427 emc_dbg(emc, INFO, "Clock change version: %d\n",
428 DVFS_CLOCK_CHANGE_VERSION);
429 emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
430 emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
431 emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
432 emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
433 emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
434 next->rate);
435 emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
436 src_clk_period, dst_clk_period);
437 emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
438 emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
439 emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
440
441 /*
442 * Step 1:
443 * Pre DVFS SW sequence.
444 */
445 emc_dbg(emc, STEPS, "Step 1\n");
446 emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
447
448 value = emc_readl(emc, EMC_CFG_DIG_DLL);
449 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
450 emc_writel(emc, value, EMC_CFG_DIG_DLL);
451
452 tegra210_emc_timing_update(emc);
453
454 for (i = 0; i < emc->num_channels; i++)
455 tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
456 EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
457
458 emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
459
460 emc_auto_cal_config = next->emc_auto_cal_config;
461 auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
462 emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
463 emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
464 emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
465 emc_auto_cal_config |= auto_cal_en;
466 emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
467 emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
468
469 emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
470
471 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
472 emc_writel(emc, emc_cfg, EMC_CFG);
473 emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
474 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
475
476 if (next->periodic_training) {
477 tegra210_emc_reset_dram_clktree_values(next);
478
479 for (i = 0; i < emc->num_channels; i++)
480 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
481 EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
482 0);
483
484 for (i = 0; i < emc->num_channels; i++)
485 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
486 EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
487 0);
488
489 if (periodic_compensation_handler(emc, DVFS_SEQUENCE, fake, next))
490 compensate_trimmer_applicable = true;
491 }
492
493 emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
494 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
495 emc_writel(emc, emc_cfg, EMC_CFG);
496 emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
497 emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
498 EMC_CFG_PIPE_CLK);
499 emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
500 ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
501 EMC_FDPD_CTRL_CMD_NO_RAMP);
502
503 bg_reg_mode_change =
504 ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
505 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
506 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
507 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
508 ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
509 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
510 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
511 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
512 enable_bglp_reg =
513 (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
514 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
515 enable_bg_reg =
516 (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
517 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
518
519 if (bg_reg_mode_change) {
520 if (enable_bg_reg)
521 emc_writel(emc, last->burst_regs
522 [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
523 ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
524 EMC_PMACRO_BG_BIAS_CTRL_0);
525
526 if (enable_bglp_reg)
527 emc_writel(emc, last->burst_regs
528 [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
529 ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
530 EMC_PMACRO_BG_BIAS_CTRL_0);
531 }
532
533 /* Check if we need to turn on VREF generator. */
534 if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
535 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
536 ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
537 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
538 (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
539 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
540 ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
541 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
542 u32 pad_tx_ctrl =
543 next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
544 u32 last_pad_tx_ctrl =
545 last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
546 u32 next_dq_e_ivref, next_dqs_e_ivref;
547
548 next_dqs_e_ivref = pad_tx_ctrl &
549 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
550 next_dq_e_ivref = pad_tx_ctrl &
551 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
552 value = (last_pad_tx_ctrl &
553 ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
554 ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
555 next_dq_e_ivref | next_dqs_e_ivref;
556 emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
557 udelay(1);
558 } else if (bg_reg_mode_change) {
559 udelay(1);
560 }
561
562 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
563
564 /*
565 * Step 2:
566 * Prelock the DLL.
567 */
568 emc_dbg(emc, STEPS, "Step 2\n");
569
570 if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
571 EMC_CFG_DIG_DLL_CFG_DLL_EN) {
572 emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
573 value = tegra210_emc_dll_prelock(emc, clksrc);
574 emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
575 } else {
576 emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
577 tegra210_emc_dll_disable(emc);
578 }
579
580 /*
581 * Step 3:
582 * Prepare autocal for the clock change.
583 */
584 emc_dbg(emc, STEPS, "Step 3\n");
585
586 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
587 emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
588 emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
589 emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
590 emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
591 emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
592 emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
593 emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
594 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
595
596 emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
597 auto_cal_en);
598 emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
599
600 /*
601 * Step 4:
602 * Update EMC_CFG. (??)
603 */
604 emc_dbg(emc, STEPS, "Step 4\n");
605
606 if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
607 ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
608 else
609 emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
610
611 /*
612 * Step 5:
613 * Prepare reference variables for ZQCAL regs.
614 */
615 emc_dbg(emc, STEPS, "Step 5\n");
616
617 if (dram_type == DRAM_TYPE_LPDDR4)
618 zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
619 else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
620 zq_wait_long = max(next->min_mrs_wait,
621 div_o3(360000, dst_clk_period)) + 4;
622 else if (dram_type == DRAM_TYPE_DDR3)
623 zq_wait_long = max((u32)256,
624 div_o3(320000, dst_clk_period) + 2);
625 else
626 zq_wait_long = 0;
627
628 /*
629 * Step 6:
630 * Training code - removed.
631 */
632 emc_dbg(emc, STEPS, "Step 6\n");
633
634 /*
635 * Step 7:
636 * Program FSP reference registers and send MRWs to new FSPWR.
637 */
638 emc_dbg(emc, STEPS, "Step 7\n");
639 emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
640
641 /* WAR 200024907 */
642 if (dram_type == DRAM_TYPE_LPDDR4) {
643 u32 nRTP = 16;
644
645 if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
646 nRTP = 14;
647
648 if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
649 nRTP = 12;
650
651 if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
652 nRTP = 10;
653
654 if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
655 nRTP = 8;
656
657 deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
658
659 /*
660 * Originally there was a + .5 in the tRPST calculation.
661 * However since we can't do FP in the kernel and the tRTM
662 * computation was in a floating point ceiling function, adding
663 * one to tRTP should be ok. There is no other source of non
664 * integer values, so the result was always going to be
665 * something for the form: f_ceil(N + .5) = N + 1;
666 */
667 tRPST = (last->emc_mrw & 0x80) >> 7;
668 tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
669 max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
670 1 + nRTP;
671
672 emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
673 next->burst_regs[EMC_RP_INDEX]);
674
675 if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
676 if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
677 last->burst_regs[EMC_RP_INDEX])) {
678 R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
679 RP_war = last->burst_regs[EMC_RP_INDEX];
680 TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
681
682 if (R2P_war > 63) {
683 RP_war = R2P_war +
684 last->burst_regs[EMC_RP_INDEX] - 63;
685
686 if (TRPab_war < RP_war)
687 TRPab_war = RP_war;
688
689 R2P_war = 63;
690 }
691 } else {
692 R2P_war = last->burst_regs[EMC_R2P_INDEX];
693 RP_war = last->burst_regs[EMC_RP_INDEX];
694 TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
695 }
696
697 if (RP_war < deltaTWATM) {
698 W2P_war = last->burst_regs[EMC_W2P_INDEX]
699 + deltaTWATM - RP_war;
700 if (W2P_war > 63) {
701 RP_war = RP_war + W2P_war - 63;
702 if (TRPab_war < RP_war)
703 TRPab_war = RP_war;
704 W2P_war = 63;
705 }
706 } else {
707 W2P_war = last->burst_regs[
708 EMC_W2P_INDEX];
709 }
710
711 if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
712 (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
713 (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
714 (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
715 emc_writel(emc, RP_war, EMC_RP);
716 emc_writel(emc, R2P_war, EMC_R2P);
717 emc_writel(emc, W2P_war, EMC_W2P);
718 emc_writel(emc, TRPab_war, EMC_TRPAB);
719 }
720
721 tegra210_emc_timing_update(emc);
722 } else {
723 emc_dbg(emc, INFO, "Skipped WAR\n");
724 }
725 }
726
727 if (!fsp_for_next_freq) {
728 mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
729 mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
730 } else {
731 mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
732 mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
733 }
734
735 if (dram_type == DRAM_TYPE_LPDDR4) {
736 emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
737 emc_writel(emc, next->emc_mrw, EMC_MRW);
738 emc_writel(emc, next->emc_mrw2, EMC_MRW2);
739 }
740
741 /*
742 * Step 8:
743 * Program the shadow registers.
744 */
745 emc_dbg(emc, STEPS, "Step 8\n");
746 emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
747
748 for (i = 0; i < next->num_burst; i++) {
749 const u16 *offsets = emc->offsets->burst;
750 u16 offset;
751
752 if (!offsets[i])
753 continue;
754
755 value = next->burst_regs[i];
756 offset = offsets[i];
757
758 if (dram_type != DRAM_TYPE_LPDDR4 &&
759 (offset == EMC_MRW6 || offset == EMC_MRW7 ||
760 offset == EMC_MRW8 || offset == EMC_MRW9 ||
761 offset == EMC_MRW10 || offset == EMC_MRW11 ||
762 offset == EMC_MRW12 || offset == EMC_MRW13 ||
763 offset == EMC_MRW14 || offset == EMC_MRW15 ||
764 offset == EMC_TRAINING_CTRL))
765 continue;
766
767 /* Pain... And suffering. */
768 if (offset == EMC_CFG) {
769 value &= ~EMC_CFG_DRAM_ACPD;
770 value &= ~EMC_CFG_DYN_SELF_REF;
771
772 if (dram_type == DRAM_TYPE_LPDDR4) {
773 value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
774 value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
775 }
776 } else if (offset == EMC_MRS_WAIT_CNT &&
777 dram_type == DRAM_TYPE_LPDDR2 &&
778 opt_zcal_en_cc && !opt_cc_short_zcal &&
779 opt_short_zcal) {
780 value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
781 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
782 ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
783 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
784 } else if (offset == EMC_ZCAL_WAIT_CNT &&
785 dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
786 !opt_cc_short_zcal && opt_short_zcal) {
787 value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
788 EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
789 ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
790 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
791 } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
792 value = 0; /* EMC_ZCAL_INTERVAL reset value. */
793 } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
794 value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
795 } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
796 value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
797 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
798 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
799 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
800 } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
801 value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
802 value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
803 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
804 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
805 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
806 } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
807 value &= 0xf800f800;
808 } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
809 value &= 0xfffffff0;
810 }
811
812 emc_writel(emc, value, offset);
813 }
814
815 /* SW addition: do EMC refresh adjustment here. */
816 tegra210_emc_adjust_timing(emc, next);
817
818 if (dram_type == DRAM_TYPE_LPDDR4) {
819 value = (23 << EMC_MRW_MRW_MA_SHIFT) |
820 (next->run_clocks & EMC_MRW_MRW_OP_MASK);
821 emc_writel(emc, value, EMC_MRW);
822 }
823
824 /* Per channel burst registers. */
825 emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
826
827 for (i = 0; i < next->num_burst_per_ch; i++) {
828 const struct tegra210_emc_per_channel_regs *burst =
829 emc->offsets->burst_per_channel;
830
831 if (!burst[i].offset)
832 continue;
833
834 if (dram_type != DRAM_TYPE_LPDDR4 &&
835 (burst[i].offset == EMC_MRW6 ||
836 burst[i].offset == EMC_MRW7 ||
837 burst[i].offset == EMC_MRW8 ||
838 burst[i].offset == EMC_MRW9 ||
839 burst[i].offset == EMC_MRW10 ||
840 burst[i].offset == EMC_MRW11 ||
841 burst[i].offset == EMC_MRW12 ||
842 burst[i].offset == EMC_MRW13 ||
843 burst[i].offset == EMC_MRW14 ||
844 burst[i].offset == EMC_MRW15))
845 continue;
846
847 /* Filter out second channel if not in DUAL_CHANNEL mode. */
848 if (emc->num_channels < 2 && burst[i].bank >= 1)
849 continue;
850
851 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
852 next->burst_reg_per_ch[i], burst[i].offset);
853 emc_channel_writel(emc, burst[i].bank,
854 next->burst_reg_per_ch[i],
855 burst[i].offset);
856 }
857
858 /* Vref regs. */
859 emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
860
861 for (i = 0; i < next->vref_num; i++) {
862 const struct tegra210_emc_per_channel_regs *vref =
863 emc->offsets->vref_per_channel;
864
865 if (!vref[i].offset)
866 continue;
867
868 if (emc->num_channels < 2 && vref[i].bank >= 1)
869 continue;
870
871 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
872 next->vref_perch_regs[i], vref[i].offset);
873 emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
874 vref[i].offset);
875 }
876
877 /* Trimmers. */
878 emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
879
880 for (i = 0; i < next->num_trim; i++) {
881 const u16 *offsets = emc->offsets->trim;
882
883 if (!offsets[i])
884 continue;
885
886 if (compensate_trimmer_applicable &&
887 (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
888 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
889 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
890 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
891 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
892 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
893 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
894 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
895 offsets[i] == EMC_DATA_BRLSHFT_0 ||
896 offsets[i] == EMC_DATA_BRLSHFT_1)) {
897 value = tegra210_emc_compensate(next, offsets[i]);
898 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
899 value, offsets[i]);
900 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
901 (u32)(u64)offsets[i], value);
902 emc_writel(emc, value, offsets[i]);
903 } else {
904 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
905 next->trim_regs[i], offsets[i]);
906 emc_writel(emc, next->trim_regs[i], offsets[i]);
907 }
908 }
909
910 /* Per channel trimmers. */
911 emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
912
913 for (i = 0; i < next->num_trim_per_ch; i++) {
914 const struct tegra210_emc_per_channel_regs *trim =
915 &emc->offsets->trim_per_channel[0];
916 unsigned int offset;
917
918 if (!trim[i].offset)
919 continue;
920
921 if (emc->num_channels < 2 && trim[i].bank >= 1)
922 continue;
923
924 offset = trim[i].offset;
925
926 if (compensate_trimmer_applicable &&
927 (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
928 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
929 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
930 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
931 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
932 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
933 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
934 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
935 offset == EMC_DATA_BRLSHFT_0 ||
936 offset == EMC_DATA_BRLSHFT_1)) {
937 value = tegra210_emc_compensate(next, offset);
938 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
939 value, offset);
940 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
941 value);
942 emc_channel_writel(emc, trim[i].bank, value, offset);
943 } else {
944 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
945 next->trim_perch_regs[i], offset);
946 emc_channel_writel(emc, trim[i].bank,
947 next->trim_perch_regs[i], offset);
948 }
949 }
950
951 emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
952
953 for (i = 0; i < next->num_mc_regs; i++) {
954 const u16 *offsets = emc->offsets->burst_mc;
955 u32 *values = next->burst_mc_regs;
956
957 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
958 values[i], offsets[i]);
959 mc_writel(emc->mc, values[i], offsets[i]);
960 }
961
962 /* Registers to be programmed on the faster clock. */
963 if (next->rate < last->rate) {
964 const u16 *la = emc->offsets->la_scale;
965
966 emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
967
968 for (i = 0; i < next->num_up_down; i++) {
969 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
970 next->la_scale_regs[i], la[i]);
971 mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
972 }
973 }
974
975 /* Flush all the burst register writes. */
976 mc_readl(emc->mc, MC_EMEM_ADR_CFG);
977
978 /*
979 * Step 9:
980 * LPDDR4 section A.
981 */
982 emc_dbg(emc, STEPS, "Step 9\n");
983
984 value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
985 value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
986
987 if (dram_type == DRAM_TYPE_LPDDR4) {
988 emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
989 emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
990
991 value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
992 EMC_DBG_WRITE_ACTIVE_ONLY);
993
994 emc_writel(emc, value, EMC_DBG);
995 emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
996 emc_writel(emc, emc_dbg, EMC_DBG);
997 }
998
999 /*
1000 * Step 10:
1001 * LPDDR4 and DDR3 common section.
1002 */
1003 emc_dbg(emc, STEPS, "Step 10\n");
1004
1005 if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
1006 if (dram_type == DRAM_TYPE_LPDDR4)
1007 ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
1008 else
1009 ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
1010
1011 if (dram_type == DRAM_TYPE_LPDDR4 &&
1012 dst_clk_period <= zqcal_before_cc_cutoff) {
1013 ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
1014 ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
1015 0xFFFF3F3F) |
1016 (last->burst_regs[EMC_MRW6_INDEX] &
1017 0x0000C0C0), EMC_MRW6, 0);
1018 ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
1019 0xFFFF0707) |
1020 (last->burst_regs[EMC_MRW14_INDEX] &
1021 0x00003838), EMC_MRW14, 0);
1022
1023 if (emc->num_devices > 1) {
1024 ccfifo_writel(emc,
1025 (next->burst_regs[EMC_MRW7_INDEX] &
1026 0xFFFF3F3F) |
1027 (last->burst_regs[EMC_MRW7_INDEX] &
1028 0x0000C0C0), EMC_MRW7, 0);
1029 ccfifo_writel(emc,
1030 (next->burst_regs[EMC_MRW15_INDEX] &
1031 0xFFFF0707) |
1032 (last->burst_regs[EMC_MRW15_INDEX] &
1033 0x00003838), EMC_MRW15, 0);
1034 }
1035
1036 if (opt_zcal_en_cc) {
1037 if (emc->num_devices < 2)
1038 ccfifo_writel(emc,
1039 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
1040 | EMC_ZQ_CAL_ZQ_CAL_CMD,
1041 EMC_ZQ_CAL, 0);
1042 else if (shared_zq_resistor)
1043 ccfifo_writel(emc,
1044 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
1045 | EMC_ZQ_CAL_ZQ_CAL_CMD,
1046 EMC_ZQ_CAL, 0);
1047 else
1048 ccfifo_writel(emc,
1049 EMC_ZQ_CAL_ZQ_CAL_CMD,
1050 EMC_ZQ_CAL, 0);
1051 }
1052 }
1053 }
1054
1055 if (dram_type == DRAM_TYPE_LPDDR4) {
1056 value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
1057 ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
1058 ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
1059 }
1060
1061 if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
1062 delay = 30;
1063
1064 if (cya_allow_ref_cc) {
1065 delay += (1000 * fake->dram_timings[T_RP]) /
1066 src_clk_period;
1067 delay += 4000 * fake->dram_timings[T_RFC];
1068 }
1069
1070 ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
1071 EMC_PIN_PIN_CKEB |
1072 EMC_PIN_PIN_CKE),
1073 EMC_PIN, delay);
1074 }
1075
1076 /* calculate reference delay multiplier */
1077 value = 1;
1078
1079 if (ref_b4_sref_en)
1080 value++;
1081
1082 if (cya_allow_ref_cc)
1083 value++;
1084
1085 if (cya_issue_pc_ref)
1086 value++;
1087
1088 if (dram_type != DRAM_TYPE_LPDDR4) {
1089 delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
1090 (1000 * fake->dram_timings[T_RFC] / src_clk_period));
1091 delay = value * delay + 20;
1092 } else {
1093 delay = 0;
1094 }
1095
1096 /*
1097 * Step 11:
1098 * Ramp down.
1099 */
1100 emc_dbg(emc, STEPS, "Step 11\n");
1101
1102 ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
1103
1104 value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
1105 ccfifo_writel(emc, value, EMC_DBG, 0);
1106
1107 ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
1108 0);
1109
1110 /*
1111 * Step 12:
1112 * And finally - trigger the clock change.
1113 */
1114 emc_dbg(emc, STEPS, "Step 12\n");
1115
1116 ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
1117 value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
1118 ccfifo_writel(emc, value, EMC_DBG, 0);
1119
1120 /*
1121 * Step 13:
1122 * Ramp up.
1123 */
1124 emc_dbg(emc, STEPS, "Step 13\n");
1125
1126 ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
1127 ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
1128
1129 /*
1130 * Step 14:
1131 * Bringup CKE pins.
1132 */
1133 emc_dbg(emc, STEPS, "Step 14\n");
1134
1135 if (dram_type == DRAM_TYPE_LPDDR4) {
1136 value = emc_pin | EMC_PIN_PIN_CKE;
1137
1138 if (emc->num_devices <= 1)
1139 value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
1140 else
1141 value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
1142
1143 ccfifo_writel(emc, value, EMC_PIN, 0);
1144 }
1145
1146 /*
1147 * Step 15: (two step 15s ??)
1148 * Calculate zqlatch wait time; has dependency on ramping times.
1149 */
1150 emc_dbg(emc, STEPS, "Step 15\n");
1151
1152 if (dst_clk_period <= zqcal_before_cc_cutoff) {
1153 s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
1154 (s32)dst_clk_period;
1155 zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
1156 } else {
1157 zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
1158 div_o3(1000 * next->dram_timings[T_PDEX],
1159 dst_clk_period);
1160 }
1161
1162 emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
1163 emc_dbg(emc, INFO, "dst_clk_period = %u\n",
1164 dst_clk_period);
1165 emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
1166 next->dram_timings[T_PDEX]);
1167 emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
1168 max_t(s32, 0, zq_latch_dvfs_wait_time));
1169
1170 if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
1171 delay = div_o3(1000 * next->dram_timings[T_PDEX],
1172 dst_clk_period);
1173
1174 if (emc->num_devices < 2) {
1175 if (dst_clk_period > zqcal_before_cc_cutoff)
1176 ccfifo_writel(emc,
1177 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1178 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1179 delay);
1180
1181 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1182 ccfifo_writel(emc, value, EMC_MRW3, delay);
1183 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1184 ccfifo_writel(emc, 0, EMC_REF, 0);
1185 ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1186 EMC_ZQ_CAL_ZQ_LATCH_CMD,
1187 EMC_ZQ_CAL,
1188 max_t(s32, 0, zq_latch_dvfs_wait_time));
1189 } else if (shared_zq_resistor) {
1190 if (dst_clk_period > zqcal_before_cc_cutoff)
1191 ccfifo_writel(emc,
1192 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1193 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1194 delay);
1195
1196 ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1197 EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1198 max_t(s32, 0, zq_latch_dvfs_wait_time) +
1199 delay);
1200 ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1201 EMC_ZQ_CAL_ZQ_LATCH_CMD,
1202 EMC_ZQ_CAL, 0);
1203
1204 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1205 ccfifo_writel(emc, value, EMC_MRW3, 0);
1206 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1207 ccfifo_writel(emc, 0, EMC_REF, 0);
1208
1209 ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1210 EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1211 tZQCAL_lpddr4 / dst_clk_period);
1212 } else {
1213 if (dst_clk_period > zqcal_before_cc_cutoff)
1214 ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
1215 EMC_ZQ_CAL, delay);
1216
1217 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1218 ccfifo_writel(emc, value, EMC_MRW3, delay);
1219 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1220 ccfifo_writel(emc, 0, EMC_REF, 0);
1221
1222 ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1223 max_t(s32, 0, zq_latch_dvfs_wait_time));
1224 }
1225 }
1226
1227 /* WAR: delay for zqlatch */
1228 ccfifo_writel(emc, 0, 0, 10);
1229
1230 /*
1231 * Step 16:
1232 * LPDDR4 Conditional Training Kickoff. Removed.
1233 */
1234
1235 /*
1236 * Step 17:
1237 * MANSR exit self refresh.
1238 */
1239 emc_dbg(emc, STEPS, "Step 17\n");
1240
1241 if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
1242 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1243
1244 /*
1245 * Step 18:
1246 * Send MRWs to LPDDR3/DDR3.
1247 */
1248 emc_dbg(emc, STEPS, "Step 18\n");
1249
1250 if (dram_type == DRAM_TYPE_LPDDR2) {
1251 ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
1252 ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
1253 if (is_lpddr3)
1254 ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
1255 } else if (dram_type == DRAM_TYPE_DDR3) {
1256 if (opt_dll_mode)
1257 ccfifo_writel(emc, next->emc_emrs &
1258 ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
1259 ccfifo_writel(emc, next->emc_emrs2 &
1260 ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
1261 ccfifo_writel(emc, next->emc_mrs |
1262 EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
1263 }
1264
1265 /*
1266 * Step 19:
1267 * ZQCAL for LPDDR3/DDR3
1268 */
1269 emc_dbg(emc, STEPS, "Step 19\n");
1270
1271 if (opt_zcal_en_cc) {
1272 if (dram_type == DRAM_TYPE_LPDDR2) {
1273 value = opt_cc_short_zcal ? 90000 : 360000;
1274 value = div_o3(value, dst_clk_period);
1275 value = value <<
1276 EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
1277 value <<
1278 EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
1279 ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
1280
1281 value = opt_cc_short_zcal ? 0x56 : 0xab;
1282 ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
1283 EMC_MRW_USE_MRW_EXT_CNT |
1284 10 << EMC_MRW_MRW_MA_SHIFT |
1285 value << EMC_MRW_MRW_OP_SHIFT,
1286 EMC_MRW, 0);
1287
1288 if (emc->num_devices > 1) {
1289 value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
1290 EMC_MRW_USE_MRW_EXT_CNT |
1291 10 << EMC_MRW_MRW_MA_SHIFT |
1292 value << EMC_MRW_MRW_OP_SHIFT;
1293 ccfifo_writel(emc, value, EMC_MRW, 0);
1294 }
1295 } else if (dram_type == DRAM_TYPE_DDR3) {
1296 value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
1297
1298 ccfifo_writel(emc, value |
1299 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1300 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1301 0);
1302
1303 if (emc->num_devices > 1) {
1304 value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1305 EMC_ZQ_CAL_ZQ_CAL_CMD;
1306 ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
1307 }
1308 }
1309 }
1310
1311 if (bg_reg_mode_change) {
1312 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1313
1314 if (ramp_up_wait <= 1250000)
1315 delay = (1250000 - ramp_up_wait) / dst_clk_period;
1316 else
1317 delay = 0;
1318
1319 ccfifo_writel(emc,
1320 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
1321 EMC_PMACRO_BG_BIAS_CTRL_0, delay);
1322 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1323 }
1324
1325 /*
1326 * Step 20:
1327 * Issue ref and optional QRST.
1328 */
1329 emc_dbg(emc, STEPS, "Step 20\n");
1330
1331 if (dram_type != DRAM_TYPE_LPDDR4)
1332 ccfifo_writel(emc, 0, EMC_REF, 0);
1333
1334 if (opt_do_sw_qrst) {
1335 ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
1336 ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
1337 }
1338
1339 /*
1340 * Step 21:
1341 * Restore ZCAL and ZCAL interval.
1342 */
1343 emc_dbg(emc, STEPS, "Step 21\n");
1344
1345 if (save_restore_clkstop_pd || opt_zcal_en_cc) {
1346 ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
1347 EMC_DBG, 0);
1348 if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
1349 ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
1350 EMC_ZCAL_INTERVAL, 0);
1351
1352 if (save_restore_clkstop_pd)
1353 ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
1354 ~EMC_CFG_DYN_SELF_REF,
1355 EMC_CFG, 0);
1356 ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
1357 }
1358
1359 /*
1360 * Step 22:
1361 * Restore EMC_CFG_PIPE_CLK.
1362 */
1363 emc_dbg(emc, STEPS, "Step 22\n");
1364
1365 ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
1366
1367 if (bg_reg_mode_change) {
1368 if (enable_bg_reg)
1369 emc_writel(emc,
1370 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
1371 ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
1372 EMC_PMACRO_BG_BIAS_CTRL_0);
1373 else
1374 emc_writel(emc,
1375 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
1376 ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
1377 EMC_PMACRO_BG_BIAS_CTRL_0);
1378 }
1379
1380 /*
1381 * Step 23:
1382 */
1383 emc_dbg(emc, STEPS, "Step 23\n");
1384
1385 value = emc_readl(emc, EMC_CFG_DIG_DLL);
1386 value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
1387 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
1388 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
1389 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
1390 value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
1391 (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
1392 emc_writel(emc, value, EMC_CFG_DIG_DLL);
1393
1394 tegra210_emc_do_clock_change(emc, clksrc);
1395
1396 /*
1397 * Step 24:
1398 * Save training results. Removed.
1399 */
1400
1401 /*
1402 * Step 25:
1403 * Program MC updown registers.
1404 */
1405 emc_dbg(emc, STEPS, "Step 25\n");
1406
1407 if (next->rate > last->rate) {
1408 for (i = 0; i < next->num_up_down; i++)
1409 mc_writel(emc->mc, next->la_scale_regs[i],
1410 emc->offsets->la_scale[i]);
1411
1412 tegra210_emc_timing_update(emc);
1413 }
1414
1415 /*
1416 * Step 26:
1417 * Restore ZCAL registers.
1418 */
1419 emc_dbg(emc, STEPS, "Step 26\n");
1420
1421 if (dram_type == DRAM_TYPE_LPDDR4) {
1422 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1423 emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
1424 EMC_ZCAL_WAIT_CNT);
1425 emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
1426 EMC_ZCAL_INTERVAL);
1427 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1428 }
1429
1430 if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
1431 !opt_short_zcal && opt_cc_short_zcal) {
1432 udelay(2);
1433
1434 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1435 if (dram_type == DRAM_TYPE_LPDDR2)
1436 emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
1437 EMC_MRS_WAIT_CNT);
1438 else if (dram_type == DRAM_TYPE_DDR3)
1439 emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
1440 EMC_ZCAL_WAIT_CNT);
1441 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1442 }
1443
1444 /*
1445 * Step 27:
1446 * Restore EMC_CFG, FDPD registers.
1447 */
1448 emc_dbg(emc, STEPS, "Step 27\n");
1449
1450 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1451 emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
1452 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1453 emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
1454 EMC_FDPD_CTRL_CMD_NO_RAMP);
1455 emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
1456
1457 /*
1458 * Step 28:
1459 * Training recover. Removed.
1460 */
1461 emc_dbg(emc, STEPS, "Step 28\n");
1462
1463 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1464 emc_writel(emc,
1465 next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
1466 EMC_PMACRO_AUTOCAL_CFG_COMMON);
1467 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1468
1469 /*
1470 * Step 29:
1471 * Power fix WAR.
1472 */
1473 emc_dbg(emc, STEPS, "Step 29\n");
1474
1475 emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
1476 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
1477 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
1478 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
1479 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
1480 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
1481 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
1482 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
1483 EMC_PMACRO_CFG_PM_GLOBAL_0);
1484 emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
1485 EMC_PMACRO_TRAINING_CTRL_0);
1486 emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
1487 EMC_PMACRO_TRAINING_CTRL_1);
1488 emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
1489
1490 /*
1491 * Step 30:
1492 * Re-enable autocal.
1493 */
1494 emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
1495
1496 if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
1497 value = emc_readl(emc, EMC_CFG_DIG_DLL);
1498 value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
1499 value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
1500 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
1501 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
1502 value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
1503 (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
1504 emc_writel(emc, value, EMC_CFG_DIG_DLL);
1505 tegra210_emc_timing_update(emc);
1506 }
1507
1508 emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
1509
1510 /* Done! Yay. */
1511 }
1512
1513 const struct tegra210_emc_sequence tegra210_emc_r21021 = {
1514 .revision = 0x7,
1515 .set_clock = tegra210_emc_r21021_set_clock,
1516 .periodic_compensation = tegra210_emc_r21021_periodic_compensation,
1517 };
1518