]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - arch/arm/cpu/armv7/omap-common/emif-common.c
7c2352c7fd5a22308e71bb4f995b9e4adef27729
[karo-tx-uboot.git] / arch / arm / cpu / armv7 / omap-common / emif-common.c
1 /*
2  * EMIF programming
3  *
4  * (C) Copyright 2010
5  * Texas Instruments, <www.ti.com>
6  *
7  * Aneesh V <aneesh@ti.com>
8  *
9  * See file CREDITS for list of people who contributed to this
10  * project.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License as
14  * published by the Free Software Foundation; either version 2 of
15  * the License, or (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
25  * MA 02111-1307 USA
26  */
27
28 #include <common.h>
29 #include <asm/emif.h>
30 #include <asm/arch/clocks.h>
31 #include <asm/arch/sys_proto.h>
32 #include <asm/omap_common.h>
33 #include <asm/utils.h>
34
35 void set_lpmode_selfrefresh(u32 base)
36 {
37         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
38         u32 reg;
39
40         reg = readl(&emif->emif_pwr_mgmt_ctrl);
41         reg &= ~EMIF_REG_LP_MODE_MASK;
42         reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
43         reg &= ~EMIF_REG_SR_TIM_MASK;
44         writel(reg, &emif->emif_pwr_mgmt_ctrl);
45
46         /* dummy read for the new SR_TIM to be loaded */
47         readl(&emif->emif_pwr_mgmt_ctrl);
48 }
49
50 void force_emif_self_refresh()
51 {
52         set_lpmode_selfrefresh(EMIF1_BASE);
53         set_lpmode_selfrefresh(EMIF2_BASE);
54 }
55
56 inline u32 emif_num(u32 base)
57 {
58         if (base == EMIF1_BASE)
59                 return 1;
60         else if (base == EMIF2_BASE)
61                 return 2;
62         else
63                 return 0;
64 }
65
66
67 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
68 {
69         u32 mr;
70         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
71
72         mr_addr |= cs << EMIF_REG_CS_SHIFT;
73         writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
74         if (omap_revision() == OMAP4430_ES2_0)
75                 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
76         else
77                 mr = readl(&emif->emif_lpddr2_mode_reg_data);
78         debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
79               cs, mr_addr, mr);
80         if (((mr & 0x0000ff00) >>  8) == (mr & 0xff) &&
81             ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
82             ((mr & 0xff000000) >> 24) == (mr & 0xff))
83                 return mr & 0xff;
84         else
85                 return mr;
86 }
87
88 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
89 {
90         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
91
92         mr_addr |= cs << EMIF_REG_CS_SHIFT;
93         writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
94         writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
95 }
96
97 void emif_reset_phy(u32 base)
98 {
99         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
100         u32 iodft;
101
102         iodft = readl(&emif->emif_iodft_tlgc);
103         iodft |= EMIF_REG_RESET_PHY_MASK;
104         writel(iodft, &emif->emif_iodft_tlgc);
105 }
106
107 static void do_lpddr2_init(u32 base, u32 cs)
108 {
109         u32 mr_addr;
110
111         /* Wait till device auto initialization is complete */
112         while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
113                 ;
114         set_mr(base, cs, LPDDR2_MR10, MR10_ZQ_ZQINIT);
115         /*
116          * tZQINIT = 1 us
117          * Enough loops assuming a maximum of 2GHz
118          */
119
120         sdelay(2000);
121
122         if (omap_revision() >= OMAP5430_ES1_0)
123                 set_mr(base, cs, LPDDR2_MR1, MR1_BL_8_BT_SEQ_WRAP_EN_NWR_8);
124         else
125                 set_mr(base, cs, LPDDR2_MR1, MR1_BL_8_BT_SEQ_WRAP_EN_NWR_3);
126
127         set_mr(base, cs, LPDDR2_MR16, MR16_REF_FULL_ARRAY);
128
129         /*
130          * Enable refresh along with writing MR2
131          * Encoding of RL in MR2 is (RL - 2)
132          */
133         mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
134         set_mr(base, cs, mr_addr, RL_FINAL - 2);
135
136         if (omap_revision() >= OMAP5430_ES1_0)
137                 set_mr(base, cs, LPDDR2_MR3, 0x1);
138 }
139
140 static void lpddr2_init(u32 base, const struct emif_regs *regs)
141 {
142         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
143         u32 *ext_phy_ctrl_base = 0;
144         u32 *emif_ext_phy_ctrl_base = 0;
145         u32 i = 0;
146
147         /* Not NVM */
148         clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
149
150         /*
151          * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
152          * when EMIF_SDRAM_CONFIG register is written
153          */
154         setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
155
156         /*
157          * Set the SDRAM_CONFIG and PHY_CTRL for the
158          * un-locked frequency & default RL
159          */
160         writel(regs->sdram_config_init, &emif->emif_sdram_config);
161         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
162
163         ext_phy_ctrl_base = (u32 *) &(regs->emif_ddr_ext_phy_ctrl_1);
164         emif_ext_phy_ctrl_base = (u32 *) &(emif->emif_ddr_ext_phy_ctrl_1);
165
166         if (omap_revision() >= OMAP5430_ES1_0) {
167                 /* Configure external phy control timing registers */
168                 for (i = 0; i < EMIF_EXT_PHY_CTRL_TIMING_REG; i++) {
169                         writel(*ext_phy_ctrl_base, emif_ext_phy_ctrl_base++);
170                         /* Update shadow registers */
171                         writel(*ext_phy_ctrl_base++, emif_ext_phy_ctrl_base++);
172                 }
173
174                 /*
175                  * external phy 6-24 registers do not change with
176                  * ddr frequency
177                  */
178                 for (i = 0; i < EMIF_EXT_PHY_CTRL_CONST_REG; i++) {
179                         writel(ext_phy_ctrl_const_base[i],
180                                                 emif_ext_phy_ctrl_base++);
181                         /* Update shadow registers */
182                         writel(ext_phy_ctrl_const_base[i],
183                                                 emif_ext_phy_ctrl_base++);
184                 }
185         }
186
187         do_lpddr2_init(base, CS0);
188         if (regs->sdram_config & EMIF_REG_EBANK_MASK)
189                 do_lpddr2_init(base, CS1);
190
191         writel(regs->sdram_config, &emif->emif_sdram_config);
192         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
193
194         /* Enable refresh now */
195         clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
196
197 }
198
199 void emif_update_timings(u32 base, const struct emif_regs *regs)
200 {
201         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
202
203         writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
204         writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
205         writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
206         writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
207         if (omap_revision() == OMAP4430_ES1_0) {
208                 /* ES1 bug EMIF should be in force idle during freq_update */
209                 writel(0, &emif->emif_pwr_mgmt_ctrl);
210         } else {
211                 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
212                 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
213         }
214         writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
215         writel(regs->zq_config, &emif->emif_zq_config);
216         writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
217         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
218
219         if (omap_revision() >= OMAP5430_ES1_0) {
220                 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
221                         &emif->emif_l3_config);
222         } else if (omap_revision() >= OMAP4460_ES1_0) {
223                 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
224                         &emif->emif_l3_config);
225         } else {
226                 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
227                         &emif->emif_l3_config);
228         }
229 }
230
231 static void ddr3_leveling(u32 base, const struct emif_regs *regs)
232 {
233         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
234
235         /* keep sdram in self-refresh */
236         writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
237                 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
238         __udelay(130);
239
240         /*
241          * Set invert_clkout (if activated)--DDR_PHYCTRL_1
242          * Invert clock adds an additional half cycle delay on the command
243          * interface.  The additional half cycle, is usually meant to enable
244          * leveling in the situation that DQS is later than CK on the board.It
245          * also helps provide some additional margin for leveling.
246          */
247         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
248         writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
249         __udelay(130);
250
251         writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
252                 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
253
254         /* Launch Full leveling */
255         writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
256
257         /* Wait till full leveling is complete */
258         readl(&emif->emif_rd_wr_lvl_ctl);
259         __udelay(130);
260
261         /* Read data eye leveling no of samples */
262         config_data_eye_leveling_samples(base);
263
264         /* Launch 8 incremental WR_LVL- to compensate for PHY limitation */
265         writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT, &emif->emif_rd_wr_lvl_ctl);
266         __udelay(130);
267
268         /* Launch Incremental leveling */
269         writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
270         __udelay(130);
271 }
272
273 static void ddr3_init(u32 base, const struct emif_regs *regs)
274 {
275         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
276         u32 *ext_phy_ctrl_base = 0;
277         u32 *emif_ext_phy_ctrl_base = 0;
278         u32 i = 0;
279
280         /*
281          * Set SDRAM_CONFIG and PHY control registers to locked frequency
282          * and RL =7. As the default values of the Mode Registers are not
283          * defined, contents of mode Registers must be fully initialized.
284          * H/W takes care of this initialization
285          */
286         writel(regs->sdram_config_init, &emif->emif_sdram_config);
287
288         writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
289
290         /* Update timing registers */
291         writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
292         writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
293         writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
294
295         writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
296         writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
297
298         ext_phy_ctrl_base = (u32 *) &(regs->emif_ddr_ext_phy_ctrl_1);
299         emif_ext_phy_ctrl_base = (u32 *) &(emif->emif_ddr_ext_phy_ctrl_1);
300
301         /* Configure external phy control timing registers */
302         for (i = 0; i < EMIF_EXT_PHY_CTRL_TIMING_REG; i++) {
303                 writel(*ext_phy_ctrl_base, emif_ext_phy_ctrl_base++);
304                 /* Update shadow registers */
305                 writel(*ext_phy_ctrl_base++, emif_ext_phy_ctrl_base++);
306         }
307
308         /*
309          * external phy 6-24 registers do not change with
310          * ddr frequency
311          */
312         for (i = 0; i < EMIF_EXT_PHY_CTRL_CONST_REG; i++) {
313                 writel(ddr3_ext_phy_ctrl_const_base[i],
314                                         emif_ext_phy_ctrl_base++);
315                 /* Update shadow registers */
316                 writel(ddr3_ext_phy_ctrl_const_base[i],
317                                         emif_ext_phy_ctrl_base++);
318         }
319
320         /* enable leveling */
321         writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
322
323         ddr3_leveling(base, regs);
324 }
325
326 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
327 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
328
329 /*
330  * Organization and refresh requirements for LPDDR2 devices of different
331  * types and densities. Derived from JESD209-2 section 2.4
332  */
333 const struct lpddr2_addressing addressing_table[] = {
334         /* Banks tREFIx10     rowx32,rowx16      colx32,colx16  density */
335         {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
336         {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
337         {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
338         {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
339         {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
340         {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
341         {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
342         {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
343         {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
344         {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
345 };
346
347 static const u32 lpddr2_density_2_size_in_mbytes[] = {
348         8,                      /* 64Mb */
349         16,                     /* 128Mb */
350         32,                     /* 256Mb */
351         64,                     /* 512Mb */
352         128,                    /* 1Gb   */
353         256,                    /* 2Gb   */
354         512,                    /* 4Gb   */
355         1024,                   /* 8Gb   */
356         2048,                   /* 16Gb  */
357         4096                    /* 32Gb  */
358 };
359
360 /*
361  * Calculate the period of DDR clock from frequency value and set the
362  * denominator and numerator in global variables for easy access later
363  */
364 static void set_ddr_clk_period(u32 freq)
365 {
366         /*
367          * period = 1/freq
368          * period_in_ns = 10^9/freq
369          */
370         *T_num = 1000000000;
371         *T_den = freq;
372         cancel_out(T_num, T_den, 200);
373
374 }
375
376 /*
377  * Convert time in nano seconds to number of cycles of DDR clock
378  */
379 static inline u32 ns_2_cycles(u32 ns)
380 {
381         return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
382 }
383
384 /*
385  * ns_2_cycles with the difference that the time passed is 2 times the actual
386  * value(to avoid fractions). The cycles returned is for the original value of
387  * the timing parameter
388  */
389 static inline u32 ns_x2_2_cycles(u32 ns)
390 {
391         return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
392 }
393
394 /*
395  * Find addressing table index based on the device's type(S2 or S4) and
396  * density
397  */
398 s8 addressing_table_index(u8 type, u8 density, u8 width)
399 {
400         u8 index;
401         if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
402                 return -1;
403
404         /*
405          * Look at the way ADDR_TABLE_INDEX* values have been defined
406          * in emif.h compared to LPDDR2_DENSITY_* values
407          * The table is layed out in the increasing order of density
408          * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
409          * at the end
410          */
411         if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
412                 index = ADDR_TABLE_INDEX1GS2;
413         else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
414                 index = ADDR_TABLE_INDEX2GS2;
415         else
416                 index = density;
417
418         debug("emif: addressing table index %d\n", index);
419
420         return index;
421 }
422
423 /*
424  * Find the the right timing table from the array of timing
425  * tables of the device using DDR clock frequency
426  */
427 static const struct lpddr2_ac_timings *get_timings_table(const struct
428                         lpddr2_ac_timings const *const *device_timings,
429                         u32 freq)
430 {
431         u32 i, temp, freq_nearest;
432         const struct lpddr2_ac_timings *timings = 0;
433
434         emif_assert(freq <= MAX_LPDDR2_FREQ);
435         emif_assert(device_timings);
436
437         /*
438          * Start with the maximum allowed frequency - that is always safe
439          */
440         freq_nearest = MAX_LPDDR2_FREQ;
441         /*
442          * Find the timings table that has the max frequency value:
443          *   i.  Above or equal to the DDR frequency - safe
444          *   ii. The lowest that satisfies condition (i) - optimal
445          */
446         for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
447                 temp = device_timings[i]->max_freq;
448                 if ((temp >= freq) && (temp <= freq_nearest)) {
449                         freq_nearest = temp;
450                         timings = device_timings[i];
451                 }
452         }
453         debug("emif: timings table: %d\n", freq_nearest);
454         return timings;
455 }
456
457 /*
458  * Finds the value of emif_sdram_config_reg
459  * All parameters are programmed based on the device on CS0.
460  * If there is a device on CS1, it will be same as that on CS0 or
461  * it will be NVM. We don't support NVM yet.
462  * If cs1_device pointer is NULL it is assumed that there is no device
463  * on CS1
464  */
465 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
466                                 const struct lpddr2_device_details *cs1_device,
467                                 const struct lpddr2_addressing *addressing,
468                                 u8 RL)
469 {
470         u32 config_reg = 0;
471
472         config_reg |=  (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
473         config_reg |=  EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
474                         EMIF_REG_IBANK_POS_SHIFT;
475
476         config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
477
478         config_reg |= RL << EMIF_REG_CL_SHIFT;
479
480         config_reg |= addressing->row_sz[cs0_device->io_width] <<
481                         EMIF_REG_ROWSIZE_SHIFT;
482
483         config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
484
485         config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
486                         EMIF_REG_EBANK_SHIFT;
487
488         config_reg |= addressing->col_sz[cs0_device->io_width] <<
489                         EMIF_REG_PAGESIZE_SHIFT;
490
491         return config_reg;
492 }
493
494 static u32 get_sdram_ref_ctrl(u32 freq,
495                               const struct lpddr2_addressing *addressing)
496 {
497         u32 ref_ctrl = 0, val = 0, freq_khz;
498         freq_khz = freq / 1000;
499         /*
500          * refresh rate to be set is 'tREFI * freq in MHz
501          * division by 10000 to account for khz and x10 in t_REFI_us_x10
502          */
503         val = addressing->t_REFI_us_x10 * freq_khz / 10000;
504         ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
505
506         return ref_ctrl;
507 }
508
509 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
510                                const struct lpddr2_min_tck *min_tck,
511                                const struct lpddr2_addressing *addressing)
512 {
513         u32 tim1 = 0, val = 0;
514         val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
515         tim1 |= val << EMIF_REG_T_WTR_SHIFT;
516
517         if (addressing->num_banks == BANKS8)
518                 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
519                                                         (4 * (*T_num)) - 1;
520         else
521                 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
522
523         tim1 |= val << EMIF_REG_T_RRD_SHIFT;
524
525         val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
526         tim1 |= val << EMIF_REG_T_RC_SHIFT;
527
528         val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
529         tim1 |= val << EMIF_REG_T_RAS_SHIFT;
530
531         val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
532         tim1 |= val << EMIF_REG_T_WR_SHIFT;
533
534         val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
535         tim1 |= val << EMIF_REG_T_RCD_SHIFT;
536
537         val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
538         tim1 |= val << EMIF_REG_T_RP_SHIFT;
539
540         return tim1;
541 }
542
543 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
544                                const struct lpddr2_min_tck *min_tck)
545 {
546         u32 tim2 = 0, val = 0;
547         val = max(min_tck->tCKE, timings->tCKE) - 1;
548         tim2 |= val << EMIF_REG_T_CKE_SHIFT;
549
550         val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
551         tim2 |= val << EMIF_REG_T_RTP_SHIFT;
552
553         /*
554          * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
555          * same value
556          */
557         val = ns_2_cycles(timings->tXSR) - 1;
558         tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
559         tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
560
561         val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
562         tim2 |= val << EMIF_REG_T_XP_SHIFT;
563
564         return tim2;
565 }
566
567 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
568                                const struct lpddr2_min_tck *min_tck,
569                                const struct lpddr2_addressing *addressing)
570 {
571         u32 tim3 = 0, val = 0;
572         val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
573         tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
574
575         val = ns_2_cycles(timings->tRFCab) - 1;
576         tim3 |= val << EMIF_REG_T_RFC_SHIFT;
577
578         val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
579         tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
580
581         val = ns_2_cycles(timings->tZQCS) - 1;
582         tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
583
584         val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
585         tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
586
587         return tim3;
588 }
589
590 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
591                              const struct lpddr2_addressing *addressing,
592                              u8 volt_ramp)
593 {
594         u32 zq = 0, val = 0;
595         if (volt_ramp)
596                 val =
597                     EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
598                     addressing->t_REFI_us_x10;
599         else
600                 val =
601                     EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
602                     addressing->t_REFI_us_x10;
603         zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
604
605         zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
606
607         zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
608
609         zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
610
611         /*
612          * Assuming that two chipselects have a single calibration resistor
613          * If there are indeed two calibration resistors, then this flag should
614          * be enabled to take advantage of dual calibration feature.
615          * This data should ideally come from board files. But considering
616          * that none of the boards today have calibration resistors per CS,
617          * it would be an unnecessary overhead.
618          */
619         zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
620
621         zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
622
623         zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
624
625         return zq;
626 }
627
628 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
629                                  const struct lpddr2_addressing *addressing,
630                                  u8 is_derated)
631 {
632         u32 alert = 0, interval;
633         interval =
634             TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
635         if (is_derated)
636                 interval *= 4;
637         alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
638
639         alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
640
641         alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
642
643         alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
644
645         alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
646
647         alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
648
649         return alert;
650 }
651
652 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
653 {
654         u32 idle = 0, val = 0;
655         if (volt_ramp)
656                 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
657         else
658                 /*Maximum value in normal conditions - suggested by hw team */
659                 val = 0x1FF;
660         idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
661
662         idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
663
664         return idle;
665 }
666
667 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
668 {
669         u32 phy = 0, val = 0;
670
671         phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
672
673         if (freq <= 100000000)
674                 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
675         else if (freq <= 200000000)
676                 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
677         else
678                 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
679         phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
680
681         /* Other fields are constant magic values. Hardcode them together */
682         phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
683                 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
684
685         return phy;
686 }
687
688 static u32 get_emif_mem_size(struct emif_device_details *devices)
689 {
690         u32 size_mbytes = 0, temp;
691
692         if (!devices)
693                 return 0;
694
695         if (devices->cs0_device_details) {
696                 temp = devices->cs0_device_details->density;
697                 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
698         }
699
700         if (devices->cs1_device_details) {
701                 temp = devices->cs1_device_details->density;
702                 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
703         }
704         /* convert to bytes */
705         return size_mbytes << 20;
706 }
707
708 /* Gets the encoding corresponding to a given DMM section size */
709 u32 get_dmm_section_size_map(u32 section_size)
710 {
711         /*
712          * Section size mapping:
713          * 0x0: 16-MiB section
714          * 0x1: 32-MiB section
715          * 0x2: 64-MiB section
716          * 0x3: 128-MiB section
717          * 0x4: 256-MiB section
718          * 0x5: 512-MiB section
719          * 0x6: 1-GiB section
720          * 0x7: 2-GiB section
721          */
722         section_size >>= 24; /* divide by 16 MB */
723         return log_2_n_round_down(section_size);
724 }
725
726 static void emif_calculate_regs(
727                 const struct emif_device_details *emif_dev_details,
728                 u32 freq, struct emif_regs *regs)
729 {
730         u32 temp, sys_freq;
731         const struct lpddr2_addressing *addressing;
732         const struct lpddr2_ac_timings *timings;
733         const struct lpddr2_min_tck *min_tck;
734         const struct lpddr2_device_details *cs0_dev_details =
735                                         emif_dev_details->cs0_device_details;
736         const struct lpddr2_device_details *cs1_dev_details =
737                                         emif_dev_details->cs1_device_details;
738         const struct lpddr2_device_timings *cs0_dev_timings =
739                                         emif_dev_details->cs0_device_timings;
740
741         emif_assert(emif_dev_details);
742         emif_assert(regs);
743         /*
744          * You can not have a device on CS1 without one on CS0
745          * So configuring EMIF without a device on CS0 doesn't
746          * make sense
747          */
748         emif_assert(cs0_dev_details);
749         emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
750         /*
751          * If there is a device on CS1 it should be same type as CS0
752          * (or NVM. But NVM is not supported in this driver yet)
753          */
754         emif_assert((cs1_dev_details == NULL) ||
755                     (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
756                     (cs0_dev_details->type == cs1_dev_details->type));
757         emif_assert(freq <= MAX_LPDDR2_FREQ);
758
759         set_ddr_clk_period(freq);
760
761         /*
762          * The device on CS0 is used for all timing calculations
763          * There is only one set of registers for timings per EMIF. So, if the
764          * second CS(CS1) has a device, it should have the same timings as the
765          * device on CS0
766          */
767         timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
768         emif_assert(timings);
769         min_tck = cs0_dev_timings->min_tck;
770
771         temp = addressing_table_index(cs0_dev_details->type,
772                                       cs0_dev_details->density,
773                                       cs0_dev_details->io_width);
774
775         emif_assert((temp >= 0));
776         addressing = &(addressing_table[temp]);
777         emif_assert(addressing);
778
779         sys_freq = get_sys_clk_freq();
780
781         regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
782                                                         cs1_dev_details,
783                                                         addressing, RL_BOOT);
784
785         regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
786                                                 cs1_dev_details,
787                                                 addressing, RL_FINAL);
788
789         regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
790
791         regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
792
793         regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
794
795         regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
796
797         regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
798
799         regs->temp_alert_config =
800             get_temp_alert_config(cs1_dev_details, addressing, 0);
801
802         regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
803                                             LPDDR2_VOLTAGE_STABLE);
804
805         regs->emif_ddr_phy_ctlr_1_init =
806                         get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
807
808         regs->emif_ddr_phy_ctlr_1 =
809                         get_ddr_phy_ctrl_1(freq, RL_FINAL);
810
811         regs->freq = freq;
812
813         print_timing_reg(regs->sdram_config_init);
814         print_timing_reg(regs->sdram_config);
815         print_timing_reg(regs->ref_ctrl);
816         print_timing_reg(regs->sdram_tim1);
817         print_timing_reg(regs->sdram_tim2);
818         print_timing_reg(regs->sdram_tim3);
819         print_timing_reg(regs->read_idle_ctrl);
820         print_timing_reg(regs->temp_alert_config);
821         print_timing_reg(regs->zq_config);
822         print_timing_reg(regs->emif_ddr_phy_ctlr_1);
823         print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
824 }
825 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
826
827 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
828 const char *get_lpddr2_type(u8 type_id)
829 {
830         switch (type_id) {
831         case LPDDR2_TYPE_S4:
832                 return "LPDDR2-S4";
833         case LPDDR2_TYPE_S2:
834                 return "LPDDR2-S2";
835         default:
836                 return NULL;
837         }
838 }
839
840 const char *get_lpddr2_io_width(u8 width_id)
841 {
842         switch (width_id) {
843         case LPDDR2_IO_WIDTH_8:
844                 return "x8";
845         case LPDDR2_IO_WIDTH_16:
846                 return "x16";
847         case LPDDR2_IO_WIDTH_32:
848                 return "x32";
849         default:
850                 return NULL;
851         }
852 }
853
854 const char *get_lpddr2_manufacturer(u32 manufacturer)
855 {
856         switch (manufacturer) {
857         case LPDDR2_MANUFACTURER_SAMSUNG:
858                 return "Samsung";
859         case LPDDR2_MANUFACTURER_QIMONDA:
860                 return "Qimonda";
861         case LPDDR2_MANUFACTURER_ELPIDA:
862                 return "Elpida";
863         case LPDDR2_MANUFACTURER_ETRON:
864                 return "Etron";
865         case LPDDR2_MANUFACTURER_NANYA:
866                 return "Nanya";
867         case LPDDR2_MANUFACTURER_HYNIX:
868                 return "Hynix";
869         case LPDDR2_MANUFACTURER_MOSEL:
870                 return "Mosel";
871         case LPDDR2_MANUFACTURER_WINBOND:
872                 return "Winbond";
873         case LPDDR2_MANUFACTURER_ESMT:
874                 return "ESMT";
875         case LPDDR2_MANUFACTURER_SPANSION:
876                 return "Spansion";
877         case LPDDR2_MANUFACTURER_SST:
878                 return "SST";
879         case LPDDR2_MANUFACTURER_ZMOS:
880                 return "ZMOS";
881         case LPDDR2_MANUFACTURER_INTEL:
882                 return "Intel";
883         case LPDDR2_MANUFACTURER_NUMONYX:
884                 return "Numonyx";
885         case LPDDR2_MANUFACTURER_MICRON:
886                 return "Micron";
887         default:
888                 return NULL;
889         }
890 }
891
892 static void display_sdram_details(u32 emif_nr, u32 cs,
893                                   struct lpddr2_device_details *device)
894 {
895         const char *mfg_str;
896         const char *type_str;
897         char density_str[10];
898         u32 density;
899
900         debug("EMIF%d CS%d\t", emif_nr, cs);
901
902         if (!device) {
903                 debug("None\n");
904                 return;
905         }
906
907         mfg_str = get_lpddr2_manufacturer(device->manufacturer);
908         type_str = get_lpddr2_type(device->type);
909
910         density = lpddr2_density_2_size_in_mbytes[device->density];
911         if ((density / 1024 * 1024) == density) {
912                 density /= 1024;
913                 sprintf(density_str, "%d GB", density);
914         } else
915                 sprintf(density_str, "%d MB", density);
916         if (mfg_str && type_str)
917                 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
918 }
919
920 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
921                                   struct lpddr2_device_details *lpddr2_device)
922 {
923         u32 mr = 0, temp;
924
925         mr = get_mr(base, cs, LPDDR2_MR0);
926         if (mr > 0xFF) {
927                 /* Mode register value bigger than 8 bit */
928                 return 0;
929         }
930
931         temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
932         if (temp) {
933                 /* Not SDRAM */
934                 return 0;
935         }
936         temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
937
938         if (temp) {
939                 /* DNV supported - But DNV is only supported for NVM */
940                 return 0;
941         }
942
943         mr = get_mr(base, cs, LPDDR2_MR4);
944         if (mr > 0xFF) {
945                 /* Mode register value bigger than 8 bit */
946                 return 0;
947         }
948
949         mr = get_mr(base, cs, LPDDR2_MR5);
950         if (mr > 0xFF) {
951                 /* Mode register value bigger than 8 bit */
952                 return 0;
953         }
954
955         if (!get_lpddr2_manufacturer(mr)) {
956                 /* Manufacturer not identified */
957                 return 0;
958         }
959         lpddr2_device->manufacturer = mr;
960
961         mr = get_mr(base, cs, LPDDR2_MR6);
962         if (mr >= 0xFF) {
963                 /* Mode register value bigger than 8 bit */
964                 return 0;
965         }
966
967         mr = get_mr(base, cs, LPDDR2_MR7);
968         if (mr >= 0xFF) {
969                 /* Mode register value bigger than 8 bit */
970                 return 0;
971         }
972
973         mr = get_mr(base, cs, LPDDR2_MR8);
974         if (mr >= 0xFF) {
975                 /* Mode register value bigger than 8 bit */
976                 return 0;
977         }
978
979         temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
980         if (!get_lpddr2_type(temp)) {
981                 /* Not SDRAM */
982                 return 0;
983         }
984         lpddr2_device->type = temp;
985
986         temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
987         if (temp > LPDDR2_DENSITY_32Gb) {
988                 /* Density not supported */
989                 return 0;
990         }
991         lpddr2_device->density = temp;
992
993         temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
994         if (!get_lpddr2_io_width(temp)) {
995                 /* IO width unsupported value */
996                 return 0;
997         }
998         lpddr2_device->io_width = temp;
999
1000         /*
1001          * If all the above tests pass we should
1002          * have a device on this chip-select
1003          */
1004         return 1;
1005 }
1006
1007 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1008                         struct lpddr2_device_details *lpddr2_dev_details)
1009 {
1010         u32 phy;
1011         u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1012
1013         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1014
1015         if (!lpddr2_dev_details)
1016                 return NULL;
1017
1018         /* Do the minimum init for mode register accesses */
1019         if (!(running_from_sdram() || warm_reset())) {
1020                 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1021                 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1022         }
1023
1024         if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1025                 return NULL;
1026
1027         display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1028
1029         return lpddr2_dev_details;
1030 }
1031 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1032
1033 static void do_sdram_init(u32 base)
1034 {
1035         const struct emif_regs *regs;
1036         u32 in_sdram, emif_nr;
1037
1038         debug(">>do_sdram_init() %x\n", base);
1039
1040         in_sdram = running_from_sdram();
1041         emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1042
1043 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1044         emif_get_reg_dump(emif_nr, &regs);
1045         if (!regs) {
1046                 debug("EMIF: reg dump not provided\n");
1047                 return;
1048         }
1049 #else
1050         /*
1051          * The user has not provided the register values. We need to
1052          * calculate it based on the timings and the DDR frequency
1053          */
1054         struct emif_device_details dev_details;
1055         struct emif_regs calculated_regs;
1056
1057         /*
1058          * Get device details:
1059          * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1060          * - Obtained from user otherwise
1061          */
1062         struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1063         emif_reset_phy(base);
1064         dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1065                                                 &cs0_dev_details);
1066         dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1067                                                 &cs1_dev_details);
1068         emif_reset_phy(base);
1069
1070         /* Return if no devices on this EMIF */
1071         if (!dev_details.cs0_device_details &&
1072             !dev_details.cs1_device_details) {
1073                 emif_sizes[emif_nr - 1] = 0;
1074                 return;
1075         }
1076
1077         if (!in_sdram)
1078                 emif_sizes[emif_nr - 1] = get_emif_mem_size(&dev_details);
1079
1080         /*
1081          * Get device timings:
1082          * - Default timings specified by JESD209-2 if
1083          *   CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1084          * - Obtained from user otherwise
1085          */
1086         emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1087                                 &dev_details.cs1_device_timings);
1088
1089         /* Calculate the register values */
1090         emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1091         regs = &calculated_regs;
1092 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1093
1094         /*
1095          * Initializing the LPDDR2 device can not happen from SDRAM.
1096          * Changing the timing registers in EMIF can happen(going from one
1097          * OPP to another)
1098          */
1099         if (!(in_sdram || warm_reset())) {
1100                 if (omap_revision() != OMAP5432_ES1_0)
1101                         lpddr2_init(base, regs);
1102                 else
1103                         ddr3_init(base, regs);
1104         }
1105
1106         /* Write to the shadow registers */
1107         emif_update_timings(base, regs);
1108
1109         debug("<<do_sdram_init() %x\n", base);
1110 }
1111
1112 void emif_post_init_config(u32 base)
1113 {
1114         struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1115         u32 omap_rev = omap_revision();
1116
1117         if (omap_rev == OMAP5430_ES1_0)
1118                 return;
1119
1120         /* reset phy on ES2.0 */
1121         if (omap_rev == OMAP4430_ES2_0)
1122                 emif_reset_phy(base);
1123
1124         /* Put EMIF back in smart idle on ES1.0 */
1125         if (omap_rev == OMAP4430_ES1_0)
1126                 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1127 }
1128
1129 void dmm_init(u32 base)
1130 {
1131         const struct dmm_lisa_map_regs *lisa_map_regs;
1132
1133 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1134         emif_get_dmm_regs(&lisa_map_regs);
1135 #else
1136         u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1137         u32 section_cnt, sys_addr;
1138         struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1139
1140         mapped_size = 0;
1141         section_cnt = 3;
1142         sys_addr = CONFIG_SYS_SDRAM_BASE;
1143         emif1_size = emif_sizes[0];
1144         emif2_size = emif_sizes[1];
1145         debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1146
1147         if (!emif1_size && !emif2_size)
1148                 return;
1149
1150         /* symmetric interleaved section */
1151         if (emif1_size && emif2_size) {
1152                 mapped_size = min(emif1_size, emif2_size);
1153                 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1154                 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1155                 /* only MSB */
1156                 section_map |= (sys_addr >> 24) <<
1157                                 EMIF_SYS_ADDR_SHIFT;
1158                 section_map |= get_dmm_section_size_map(mapped_size * 2)
1159                                 << EMIF_SYS_SIZE_SHIFT;
1160                 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1161                 emif1_size -= mapped_size;
1162                 emif2_size -= mapped_size;
1163                 sys_addr += (mapped_size * 2);
1164                 section_cnt--;
1165         }
1166
1167         /*
1168          * Single EMIF section(we can have a maximum of 1 single EMIF
1169          * section- either EMIF1 or EMIF2 or none, but not both)
1170          */
1171         if (emif1_size) {
1172                 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1173                 section_map |= get_dmm_section_size_map(emif1_size)
1174                                 << EMIF_SYS_SIZE_SHIFT;
1175                 /* only MSB */
1176                 section_map |= (mapped_size >> 24) <<
1177                                 EMIF_SDRC_ADDR_SHIFT;
1178                 /* only MSB */
1179                 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1180                 section_cnt--;
1181         }
1182         if (emif2_size) {
1183                 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1184                 section_map |= get_dmm_section_size_map(emif2_size) <<
1185                                 EMIF_SYS_SIZE_SHIFT;
1186                 /* only MSB */
1187                 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1188                 /* only MSB */
1189                 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1190                 section_cnt--;
1191         }
1192
1193         if (section_cnt == 2) {
1194                 /* Only 1 section - either symmetric or single EMIF */
1195                 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1196                 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1197                 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1198         } else {
1199                 /* 2 sections - 1 symmetric, 1 single EMIF */
1200                 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1201                 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1202         }
1203
1204         /* TRAP for invalid TILER mappings in section 0 */
1205         lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1206
1207         lisa_map_regs = &lis_map_regs_calculated;
1208 #endif
1209         struct dmm_lisa_map_regs *hw_lisa_map_regs =
1210             (struct dmm_lisa_map_regs *)base;
1211
1212         writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1213         writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1214         writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1215         writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1216
1217         writel(lisa_map_regs->dmm_lisa_map_3,
1218                 &hw_lisa_map_regs->dmm_lisa_map_3);
1219         writel(lisa_map_regs->dmm_lisa_map_2,
1220                 &hw_lisa_map_regs->dmm_lisa_map_2);
1221         writel(lisa_map_regs->dmm_lisa_map_1,
1222                 &hw_lisa_map_regs->dmm_lisa_map_1);
1223         writel(lisa_map_regs->dmm_lisa_map_0,
1224                 &hw_lisa_map_regs->dmm_lisa_map_0);
1225
1226         if (omap_revision() >= OMAP4460_ES1_0) {
1227                 hw_lisa_map_regs =
1228                     (struct dmm_lisa_map_regs *)MA_BASE;
1229
1230                 writel(lisa_map_regs->dmm_lisa_map_3,
1231                         &hw_lisa_map_regs->dmm_lisa_map_3);
1232                 writel(lisa_map_regs->dmm_lisa_map_2,
1233                         &hw_lisa_map_regs->dmm_lisa_map_2);
1234                 writel(lisa_map_regs->dmm_lisa_map_1,
1235                         &hw_lisa_map_regs->dmm_lisa_map_1);
1236                 writel(lisa_map_regs->dmm_lisa_map_0,
1237                         &hw_lisa_map_regs->dmm_lisa_map_0);
1238         }
1239 }
1240
1241 /*
1242  * SDRAM initialization:
1243  * SDRAM initialization has two parts:
1244  * 1. Configuring the SDRAM device
1245  * 2. Update the AC timings related parameters in the EMIF module
1246  * (1) should be done only once and should not be done while we are
1247  * running from SDRAM.
1248  * (2) can and should be done more than once if OPP changes.
1249  * Particularly, this may be needed when we boot without SPL and
1250  * and using Configuration Header(CH). ROM code supports only at 50% OPP
1251  * at boot (low power boot). So u-boot has to switch to OPP100 and update
1252  * the frequency. So,
1253  * Doing (1) and (2) makes sense - first time initialization
1254  * Doing (2) and not (1) makes sense - OPP change (when using CH)
1255  * Doing (1) and not (2) doen't make sense
1256  * See do_sdram_init() for the details
1257  */
1258 void sdram_init(void)
1259 {
1260         u32 in_sdram, size_prog, size_detect;
1261         u32 omap_rev = omap_revision();
1262
1263         debug(">>sdram_init()\n");
1264
1265         if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1266                 return;
1267
1268         in_sdram = running_from_sdram();
1269         debug("in_sdram = %d\n", in_sdram);
1270
1271         if (!(in_sdram || warm_reset())) {
1272                 if (omap_rev != OMAP5432_ES1_0)
1273                         bypass_dpll(&prcm->cm_clkmode_dpll_core);
1274                 else
1275                         writel(CM_DLL_CTRL_NO_OVERRIDE, &prcm->cm_dll_ctrl);
1276         }
1277
1278         do_sdram_init(EMIF1_BASE);
1279         do_sdram_init(EMIF2_BASE);
1280
1281         if (!in_sdram)
1282                 dmm_init(DMM_BASE);
1283
1284         if (!(in_sdram || warm_reset())) {
1285                 emif_post_init_config(EMIF1_BASE);
1286                 emif_post_init_config(EMIF2_BASE);
1287         }
1288
1289         /* for the shadow registers to take effect */
1290         if (omap_rev != OMAP5432_ES1_0)
1291                 freq_update_core();
1292
1293         /* Do some testing after the init */
1294         if (!in_sdram) {
1295                 size_prog = omap_sdram_size();
1296                 size_prog = log_2_n_round_down(size_prog);
1297                 size_prog = (1 << size_prog);
1298
1299                 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1300                                                 size_prog);
1301                 /* Compare with the size programmed */
1302                 if (size_detect != size_prog) {
1303                         printf("SDRAM: identified size not same as expected"
1304                                 " size identified: %x expected: %x\n",
1305                                 size_detect,
1306                                 size_prog);
1307                 } else
1308                         debug("get_ram_size() successful");
1309         }
1310
1311         debug("<<sdram_init()\n");
1312 }