X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=cpu%2Fmpc83xx%2Fspd_sdram.c;h=647813f68d94e5ae020e60d5e749adbebe972f5b;hb=f64702b7fc8f8df39d31add770df6e372f9e9ce3;hp=63dcd664be2adf40c3bfc6c32bbc6b0a308338a4;hpb=cbf9c117282b8729bdb86071997b58fcab58c444;p=karo-tx-uboot.git diff --git a/cpu/mpc83xx/spd_sdram.c b/cpu/mpc83xx/spd_sdram.c index 63dcd664be..647813f68d 100644 --- a/cpu/mpc83xx/spd_sdram.c +++ b/cpu/mpc83xx/spd_sdram.c @@ -1,5 +1,10 @@ /* - * Copyright 2004 Freescale Semiconductor. + * (C) Copyright 2006 Freescale Semiconductor, Inc. + * + * (C) Copyright 2006 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * Copyright (C) 2004-2006 Freescale Semiconductor, Inc. * (C) Copyright 2003 Motorola Inc. * Xianghua Xiao (X.Xiao@motorola.com) * @@ -20,11 +25,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA - * - * Change log: - * - * 20050101: Eran Liberty (liberty@freescale.com) - * Initial file creating (porting from 85XX & 8260) */ #include @@ -36,7 +36,9 @@ #ifdef CONFIG_SPD_EEPROM -#if defined(CONFIG_DDR_ECC) +DECLARE_GLOBAL_DATA_PTR; + +#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRC) extern void dma_init(void); extern uint dma_check(void); extern int dma_xfer(void *dest, uint count, void *src); @@ -49,82 +51,192 @@ extern int dma_xfer(void *dest, uint count, void *src); /* * Convert picoseconds into clock cycles (rounding up if needed). */ - int picos_to_clk(int picos) { + unsigned int ddr_bus_clk; int clks; - clks = picos / (2000000000 / (get_bus_freq(0) / 1000)); - if (picos % (2000000000 / (get_bus_freq(0) / 1000)) != 0) { - clks++; - } + ddr_bus_clk = gd->ddr_clk >> 1; + clks = picos / (1000000000 / (ddr_bus_clk / 1000)); + if (picos % (1000000000 / (ddr_bus_clk / 1000)) != 0) + clks++; return clks; } -unsigned int -banksize(unsigned char row_dens) +unsigned int banksize(unsigned char row_dens) { return ((row_dens >> 2) | ((row_dens & 3) << 6)) << 24; } -long int spd_sdram(int(read_spd)(uint addr)) +int read_spd(uint addr) +{ + return ((int) addr); +} + +#undef SPD_DEBUG +#ifdef SPD_DEBUG +static void spd_debug(spd_eeprom_t *spd) { - volatile immap_t *immap = (immap_t *)CFG_IMMRBAR; - volatile ddr8349_t *ddr = &immap->ddr; - volatile law8349_t *ecm = &immap->sysconf.ddrlaw[0]; + printf ("\nDIMM type: %-18.18s\n", spd->mpart); + printf ("SPD size: %d\n", spd->info_size); + printf ("EEPROM size: %d\n", 1 << spd->chip_size); + printf ("Memory type: %d\n", spd->mem_type); + printf ("Row addr: %d\n", spd->nrow_addr); + printf ("Column addr: %d\n", spd->ncol_addr); + printf ("# of rows: %d\n", spd->nrows); + printf ("Row density: %d\n", spd->row_dens); + printf ("# of banks: %d\n", spd->nbanks); + printf ("Data width: %d\n", + 256 * spd->dataw_msb + spd->dataw_lsb); + printf ("Chip width: %d\n", spd->primw); + printf ("Refresh rate: %02X\n", spd->refresh); + printf ("CAS latencies: %02X\n", spd->cas_lat); + printf ("Write latencies: %02X\n", spd->write_lat); + printf ("tRP: %d\n", spd->trp); + printf ("tRCD: %d\n", spd->trcd); + printf ("\n"); +} +#endif /* SPD_DEBUG */ + +long int spd_sdram() +{ + volatile immap_t *immap = (immap_t *)CFG_IMMR; + volatile ddr83xx_t *ddr = &immap->ddr; + volatile law83xx_t *ecm = &immap->sysconf.ddrlaw[0]; spd_eeprom_t spd; - unsigned tmp, tmp1; + unsigned int n_ranks; + unsigned int odt_rd_cfg, odt_wr_cfg; + unsigned char twr_clk, twtr_clk; + unsigned char sdram_type; unsigned int memsize; unsigned int law_size; - unsigned char caslat; - unsigned int trfc, trfc_clk, trfc_low; + unsigned char caslat, caslat_ctrl; + unsigned int trfc, trfc_clk, trfc_low, trfc_high; + unsigned int trcd_clk, trtp_clk; + unsigned char cke_min_clk; + unsigned char add_lat, wr_lat; + unsigned char wr_data_delay; + unsigned char four_act; + unsigned char cpo; + unsigned char burstlen; + unsigned char odt_cfg, mode_odt_enable; + unsigned int max_bus_clk; + unsigned int max_data_rate, effective_data_rate; + unsigned int ddrc_clk; + unsigned int refresh_clk; + unsigned int sdram_cfg; + unsigned int ddrc_ecc_enable; + unsigned int pvr = get_pvr(); + + /* Read SPD parameters with I2C */ + CFG_READ_SPD(SPD_EEPROM_ADDRESS, 0, 1, (uchar *) & spd, sizeof (spd)); +#ifdef SPD_DEBUG + spd_debug(&spd); +#endif + /* Check the memory type */ + if (spd.mem_type != SPD_MEMTYPE_DDR && spd.mem_type != SPD_MEMTYPE_DDR2) { + printf("DDR: Module mem type is %02X\n", spd.mem_type); + return 0; + } -#warning Current spd_sdram does not fit its usage... adjust implementation or API... + /* Check the number of physical bank */ + if (spd.mem_type == SPD_MEMTYPE_DDR) { + n_ranks = spd.nrows; + } else { + n_ranks = (spd.nrows & 0x7) + 1; + } - CFG_READ_SPD(SPD_EEPROM_ADDRESS, 0, 1, (uchar *) & spd, sizeof (spd)); + if (n_ranks > 2) { + printf("DDR: The number of physical bank is %02X\n", n_ranks); + return 0; + } - if (spd.nrows > 2) { - puts("DDR:Only two chip selects are supported on ADS.\n"); + /* Check if the number of row of the module is in the range of DDRC */ + if (spd.nrow_addr < 12 || spd.nrow_addr > 15) { + printf("DDR: Row number is out of range of DDRC, row=%02X\n", + spd.nrow_addr); return 0; } - if (spd.nrow_addr < 12 - || spd.nrow_addr > 14 - || spd.ncol_addr < 8 - || spd.ncol_addr > 11) { - puts("DDR:Row or Col number unsupported.\n"); + /* Check if the number of col of the module is in the range of DDRC */ + if (spd.ncol_addr < 8 || spd.ncol_addr > 11) { + printf("DDR: Col number is out of range of DDRC, col=%02X\n", + spd.ncol_addr); return 0; } +#ifdef CFG_DDRCDR_VALUE + /* + * Adjust DDR II IO voltage biasing. It just makes it work. + */ + if(spd.mem_type == SPD_MEMTYPE_DDR2) { + immap->sysconf.ddrcdr = CFG_DDRCDR_VALUE; + } +#endif + + /* + * ODT configuration recommendation from DDR Controller Chapter. + */ + odt_rd_cfg = 0; /* Never assert ODT */ + odt_wr_cfg = 0; /* Never assert ODT */ + if (spd.mem_type == SPD_MEMTYPE_DDR2) { + odt_wr_cfg = 1; /* Assert ODT on writes to CSn */ + } + + /* Setup DDR chip select register */ +#ifdef CFG_83XX_DDR_USES_CS0 + ddr->csbnds[0].csbnds = (banksize(spd.row_dens) >> 24) - 1; + ddr->cs_config[0] = ( 1 << 31 + | (odt_rd_cfg << 20) + | (odt_wr_cfg << 16) + | (spd.nrow_addr - 12) << 8 + | (spd.ncol_addr - 8) ); + debug("\n"); + debug("cs0_bnds = 0x%08x\n",ddr->csbnds[0].csbnds); + debug("cs0_config = 0x%08x\n",ddr->cs_config[0]); + + if (n_ranks == 2) { + ddr->csbnds[1].csbnds = ( (banksize(spd.row_dens) >> 8) + | ((banksize(spd.row_dens) >> 23) - 1) ); + ddr->cs_config[1] = ( 1<<31 + | (odt_rd_cfg << 20) + | (odt_wr_cfg << 16) + | (spd.nrow_addr-12) << 8 + | (spd.ncol_addr-8) ); + debug("cs1_bnds = 0x%08x\n",ddr->csbnds[1].csbnds); + debug("cs1_config = 0x%08x\n",ddr->cs_config[1]); + } + +#else ddr->csbnds[2].csbnds = (banksize(spd.row_dens) >> 24) - 1; ddr->cs_config[2] = ( 1 << 31 + | (odt_rd_cfg << 20) + | (odt_wr_cfg << 16) | (spd.nrow_addr - 12) << 8 | (spd.ncol_addr - 8) ); debug("\n"); debug("cs2_bnds = 0x%08x\n",ddr->csbnds[2].csbnds); debug("cs2_config = 0x%08x\n",ddr->cs_config[2]); - if (spd.nrows == 2) { + if (n_ranks == 2) { ddr->csbnds[3].csbnds = ( (banksize(spd.row_dens) >> 8) | ((banksize(spd.row_dens) >> 23) - 1) ); ddr->cs_config[3] = ( 1<<31 + | (odt_rd_cfg << 20) + | (odt_wr_cfg << 16) | (spd.nrow_addr-12) << 8 | (spd.ncol_addr-8) ); debug("cs3_bnds = 0x%08x\n",ddr->csbnds[3].csbnds); debug("cs3_config = 0x%08x\n",ddr->cs_config[3]); } - - if (spd.mem_type != 0x07) { - puts("No DDR module found!\n"); - return 0; - } +#endif /* * Figure out memory size in Megabytes. */ - memsize = spd.nrows * banksize(spd.row_dens) / 0x100000; + memsize = n_ranks * banksize(spd.row_dens) / 0x100000; /* * First supported LAW size is 16M, at LAWAR_SIZE_16M == 23. @@ -140,269 +252,630 @@ long int spd_sdram(int(read_spd)(uint addr)) debug("DDR:ar=0x%08x\n", ecm->ar); /* - * find the largest CAS + * Find the largest CAS by locating the highest 1 bit + * in the spd.cas_lat field. Translate it to a DDR + * controller field value: + * + * CAS Lat DDR I DDR II Ctrl + * Clocks SPD Bit SPD Bit Value + * ------- ------- ------- ----- + * 1.0 0 0001 + * 1.5 1 0010 + * 2.0 2 2 0011 + * 2.5 3 0100 + * 3.0 4 3 0101 + * 3.5 5 0110 + * 4.0 6 4 0111 + * 4.5 1000 + * 5.0 5 1001 */ - if(spd.cas_lat & 0x40) { - caslat = 7; - } else if (spd.cas_lat & 0x20) { - caslat = 6; - } else if (spd.cas_lat & 0x10) { - caslat = 5; - } else if (spd.cas_lat & 0x08) { - caslat = 4; - } else if (spd.cas_lat & 0x04) { - caslat = 3; - } else if (spd.cas_lat & 0x02) { - caslat = 2; - } else if (spd.cas_lat & 0x01) { - caslat = 1; - } else { - puts("DDR:no valid CAS Latency information.\n"); + caslat = __ilog2(spd.cas_lat); + if ((spd.mem_type == SPD_MEMTYPE_DDR) + && (caslat > 6)) { + printf("DDR I: Invalid SPD CAS Latency: 0x%x.\n", spd.cas_lat); + return 0; + } else if (spd.mem_type == SPD_MEMTYPE_DDR2 + && (caslat < 2 || caslat > 5)) { + printf("DDR II: Invalid SPD CAS Latency: 0x%x.\n", + spd.cas_lat); return 0; } + debug("DDR: caslat SPD bit is %d\n", caslat); + + max_bus_clk = 1000 *10 / (((spd.clk_cycle & 0xF0) >> 4) * 10 + + (spd.clk_cycle & 0x0f)); + max_data_rate = max_bus_clk * 2; + + debug("DDR:Module maximum data rate is: %dMhz\n", max_data_rate); + + ddrc_clk = gd->ddr_clk / 1000000; + effective_data_rate = 0; + + if (max_data_rate >= 390 && max_data_rate < 460) { /* it is DDR 400 */ + if (ddrc_clk <= 460 && ddrc_clk > 350) { + /* DDR controller clk at 350~460 */ + effective_data_rate = 400; /* 5ns */ + caslat = caslat; + } else if (ddrc_clk <= 350 && ddrc_clk > 280) { + /* DDR controller clk at 280~350 */ + effective_data_rate = 333; /* 6ns */ + if (spd.clk_cycle2 == 0x60) + caslat = caslat - 1; + else + caslat = caslat; + } else if (ddrc_clk <= 280 && ddrc_clk > 230) { + /* DDR controller clk at 230~280 */ + effective_data_rate = 266; /* 7.5ns */ + if (spd.clk_cycle3 == 0x75) + caslat = caslat - 2; + else if (spd.clk_cycle2 == 0x75) + caslat = caslat - 1; + else + caslat = caslat; + } else if (ddrc_clk <= 230 && ddrc_clk > 90) { + /* DDR controller clk at 90~230 */ + effective_data_rate = 200; /* 10ns */ + if (spd.clk_cycle3 == 0xa0) + caslat = caslat - 2; + else if (spd.clk_cycle2 == 0xa0) + caslat = caslat - 1; + else + caslat = caslat; + } + } else if (max_data_rate >= 323) { /* it is DDR 333 */ + if (ddrc_clk <= 350 && ddrc_clk > 280) { + /* DDR controller clk at 280~350 */ + effective_data_rate = 333; /* 6ns */ + caslat = caslat; + } else if (ddrc_clk <= 280 && ddrc_clk > 230) { + /* DDR controller clk at 230~280 */ + effective_data_rate = 266; /* 7.5ns */ + if (spd.clk_cycle2 == 0x75) + caslat = caslat - 1; + else + caslat = caslat; + } else if (ddrc_clk <= 230 && ddrc_clk > 90) { + /* DDR controller clk at 90~230 */ + effective_data_rate = 200; /* 10ns */ + if (spd.clk_cycle3 == 0xa0) + caslat = caslat - 2; + else if (spd.clk_cycle2 == 0xa0) + caslat = caslat - 1; + else + caslat = caslat; + } + } else if (max_data_rate >= 256) { /* it is DDR 266 */ + if (ddrc_clk <= 350 && ddrc_clk > 280) { + /* DDR controller clk at 280~350 */ + printf("DDR: DDR controller freq is more than " + "max data rate of the module\n"); + return 0; + } else if (ddrc_clk <= 280 && ddrc_clk > 230) { + /* DDR controller clk at 230~280 */ + effective_data_rate = 266; /* 7.5ns */ + caslat = caslat; + } else if (ddrc_clk <= 230 && ddrc_clk > 90) { + /* DDR controller clk at 90~230 */ + effective_data_rate = 200; /* 10ns */ + if (spd.clk_cycle2 == 0xa0) + caslat = caslat - 1; + } + } else if (max_data_rate >= 190) { /* it is DDR 200 */ + if (ddrc_clk <= 350 && ddrc_clk > 230) { + /* DDR controller clk at 230~350 */ + printf("DDR: DDR controller freq is more than " + "max data rate of the module\n"); + return 0; + } else if (ddrc_clk <= 230 && ddrc_clk > 90) { + /* DDR controller clk at 90~230 */ + effective_data_rate = 200; /* 10ns */ + caslat = caslat; + } + } - tmp = 20000 / (((spd.clk_cycle & 0xF0) >> 4) * 10 - + (spd.clk_cycle & 0x0f)); - debug("DDR:Module maximum data rate is: %dMhz\n", tmp); - - tmp1 = get_bus_freq(0) / 1000000; - if (tmp1 < 230 && tmp1 >= 90 && tmp >= 230) { - /* 90~230 range, treated as DDR 200 */ - if (spd.clk_cycle3 == 0xa0) - caslat -= 2; - else if(spd.clk_cycle2 == 0xa0) - caslat--; - } else if (tmp1 < 280 && tmp1 >= 230 && tmp >= 280) { - /* 230-280 range, treated as DDR 266 */ - if (spd.clk_cycle3 == 0x75) - caslat -= 2; - else if (spd.clk_cycle2 == 0x75) - caslat--; - } else if (tmp1 < 350 && tmp1 >= 280 && tmp >= 350) { - /* 280~350 range, treated as DDR 333 */ - if (spd.clk_cycle3 == 0x60) - caslat -= 2; - else if (spd.clk_cycle2 == 0x60) - caslat--; - } else if (tmp1 < 90 || tmp1 >= 350) { - /* DDR rate out-of-range */ - puts("DDR:platform frequency is not fit for DDR rate\n"); - return 0; + debug("DDR:Effective data rate is: %dMhz\n", effective_data_rate); + debug("DDR:The MSB 1 of CAS Latency is: %d\n", caslat); + + /* + * Errata DDR6 work around: input enable 2 cycles earlier. + * including MPC834x Rev1.0/1.1 and MPC8360 Rev1.1/1.2. + */ + if(PVR_MAJ(pvr) <= 1 && spd.mem_type == SPD_MEMTYPE_DDR){ + if (caslat == 2) + ddr->debug_reg = 0x201c0000; /* CL=2 */ + else if (caslat == 3) + ddr->debug_reg = 0x202c0000; /* CL=2.5 */ + else if (caslat == 4) + ddr->debug_reg = 0x202c0000; /* CL=3.0 */ + + __asm__ __volatile__ ("sync"); + + debug("Errata DDR6 (debug_reg=0x%08x)\n", ddr->debug_reg); } /* - * note: caslat must also be programmed into ddr->sdram_mode - * register. - * - * note: WRREC(Twr) and WRTORD(Twtr) are not in SPD, - * use conservative value here. + * Convert caslat clocks to DDR controller value. + * Force caslat_ctrl to be DDR Controller field-sized. */ - trfc = spd.trfc * 1000; /* up to ps */ + if (spd.mem_type == SPD_MEMTYPE_DDR) { + caslat_ctrl = (caslat + 1) & 0x07; + } else { + caslat_ctrl = (2 * caslat - 1) & 0x0f; + } + + debug("DDR: effective data rate is %d MHz\n", effective_data_rate); + debug("DDR: caslat SPD bit is %d, controller field is 0x%x\n", + caslat, caslat_ctrl); + + /* + * Timing Config 0. + * Avoid writing for DDR I. + */ + if (spd.mem_type == SPD_MEMTYPE_DDR2) { + unsigned char taxpd_clk = 8; /* By the book. */ + unsigned char tmrd_clk = 2; /* By the book. */ + unsigned char act_pd_exit = 2; /* Empirical? */ + unsigned char pre_pd_exit = 6; /* Empirical? */ + + ddr->timing_cfg_0 = (0 + | ((act_pd_exit & 0x7) << 20) /* ACT_PD_EXIT */ + | ((pre_pd_exit & 0x7) << 16) /* PRE_PD_EXIT */ + | ((taxpd_clk & 0xf) << 8) /* ODT_PD_EXIT */ + | ((tmrd_clk & 0xf) << 0) /* MRS_CYC */ + ); + debug("DDR: timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0); + } + + /* + * For DDR I, WRREC(Twr) and WRTORD(Twtr) are not in SPD, + * use conservative value. + * For DDR II, they are bytes 36 and 37, in quarter nanos. + */ + + if (spd.mem_type == SPD_MEMTYPE_DDR) { + twr_clk = 3; /* Clocks */ + twtr_clk = 1; /* Clocks */ + } else { + twr_clk = picos_to_clk(spd.twr * 250); + twtr_clk = picos_to_clk(spd.twtr * 250); + } + + /* + * Calculate Trfc, in picos. + * DDR I: Byte 42 straight up in ns. + * DDR II: Byte 40 and 42 swizzled some, in ns. + */ + if (spd.mem_type == SPD_MEMTYPE_DDR) { + trfc = spd.trfc * 1000; /* up to ps */ + } else { + unsigned int byte40_table_ps[8] = { + 0, + 250, + 330, + 500, + 660, + 750, + 0, + 0 + }; + + trfc = (((spd.trctrfc_ext & 0x1) * 256) + spd.trfc) * 1000 + + byte40_table_ps[(spd.trctrfc_ext >> 1) & 0x7]; + } trfc_clk = picos_to_clk(trfc); + + /* + * Trcd, Byte 29, from quarter nanos to ps and clocks. + */ + trcd_clk = picos_to_clk(spd.trcd * 250) & 0x7; + + /* + * Convert trfc_clk to DDR controller fields. DDR I should + * fit in the REFREC field (16-19) of TIMING_CFG_1, but the + * 83xx controller has an extended REFREC field of three bits. + * The controller automatically adds 8 clocks to this value, + * so preadjust it down 8 first before splitting it up. + */ trfc_low = (trfc_clk - 8) & 0xf; + trfc_high = ((trfc_clk - 8) >> 4) & 0x3; ddr->timing_cfg_1 = - (((picos_to_clk(spd.trp * 250) & 0x07) << 28 ) | - ((picos_to_clk(spd.tras * 1000) & 0x0f ) << 24 ) | - ((picos_to_clk(spd.trcd * 250) & 0x07) << 20 ) | - ((caslat & 0x07) << 16 ) | - (trfc_low << 12 ) | - ( 0x300 ) | - ((picos_to_clk(spd.trrd * 250) & 0x07) << 4) | 1); + (((picos_to_clk(spd.trp * 250) & 0x07) << 28 ) | /* PRETOACT */ + ((picos_to_clk(spd.tras * 1000) & 0x0f ) << 24 ) | /* ACTTOPRE */ + (trcd_clk << 20 ) | /* ACTTORW */ + (caslat_ctrl << 16 ) | /* CASLAT */ + (trfc_low << 12 ) | /* REFEC */ + ((twr_clk & 0x07) << 8) | /* WRRREC */ + ((picos_to_clk(spd.trrd * 250) & 0x07) << 4) | /* ACTTOACT */ + ((twtr_clk & 0x07) << 0) /* WRTORD */ + ); + + /* + * Additive Latency + * For DDR I, 0. + * For DDR II, with ODT enabled, use "a value" less than ACTTORW, + * which comes from Trcd, and also note that: + * add_lat + caslat must be >= 4 + */ + add_lat = 0; + if (spd.mem_type == SPD_MEMTYPE_DDR2 + && (odt_wr_cfg || odt_rd_cfg) + && (caslat < 4)) { + add_lat = trcd_clk - 1; + if ((add_lat + caslat) < 4) { + add_lat = 0; + } + } - ddr->timing_cfg_2 = 0x00000800; + /* + * Write Data Delay + * Historically 0x2 == 4/8 clock delay. + * Empirically, 0x3 == 6/8 clock delay is suggested for DDR I 266. + */ + wr_data_delay = 2; + + /* + * Write Latency + * Read to Precharge + * Minimum CKE Pulse Width. + * Four Activate Window + */ + if (spd.mem_type == SPD_MEMTYPE_DDR) { + /* + * This is a lie. It should really be 1, but if it is + * set to 1, bits overlap into the old controller's + * otherwise unused ACSM field. If we leave it 0, then + * the HW will magically treat it as 1 for DDR 1. Oh Yea. + */ + wr_lat = 0; + + trtp_clk = 2; /* By the book. */ + cke_min_clk = 1; /* By the book. */ + four_act = 1; /* By the book. */ + + } else { + wr_lat = caslat - 1; + + /* Convert SPD value from quarter nanos to picos. */ + trtp_clk = picos_to_clk(spd.trtp * 250); + + cke_min_clk = 3; /* By the book. */ + four_act = picos_to_clk(37500); /* By the book. 1k pages? */ + } + + /* + * Empirically set ~MCAS-to-preamble override for DDR 2. + * Your milage will vary. + */ + cpo = 0; + if (spd.mem_type == SPD_MEMTYPE_DDR2) { + if (effective_data_rate == 266 || effective_data_rate == 333) { + cpo = 0x7; /* READ_LAT + 5/4 */ + } else if (effective_data_rate == 400) { + cpo = 0x9; /* READ_LAT + 7/4 */ + } else { + /* Automatic calibration */ + cpo = 0x1f; + } + } + + ddr->timing_cfg_2 = (0 + | ((add_lat & 0x7) << 28) /* ADD_LAT */ + | ((cpo & 0x1f) << 23) /* CPO */ + | ((wr_lat & 0x7) << 19) /* WR_LAT */ + | ((trtp_clk & 0x7) << 13) /* RD_TO_PRE */ + | ((wr_data_delay & 0x7) << 10) /* WR_DATA_DELAY */ + | ((cke_min_clk & 0x7) << 6) /* CKE_PLS */ + | ((four_act & 0x1f) << 0) /* FOUR_ACT */ + ); debug("DDR:timing_cfg_1=0x%08x\n", ddr->timing_cfg_1); debug("DDR:timing_cfg_2=0x%08x\n", ddr->timing_cfg_2); - /* - * Only DDR I is supported - * DDR I and II have different mode-register-set definition + /* Check DIMM data bus width */ + if (spd.dataw_lsb == 0x20) { + burstlen = 0x03; /* 32 bit data bus, burst len is 8 */ + printf("\n DDR DIMM: data bus width is 32 bit"); + } else { + burstlen = 0x02; /* Others act as 64 bit bus, burst len is 4 */ + printf("\n DDR DIMM: data bus width is 64 bit"); + } + + /* Is this an ECC DDR chip? */ + if (spd.config == 0x02) + printf(" with ECC\n"); + else + printf(" without ECC\n"); + + /* Burst length is always 4 for 64 bit data bus, 8 for 32 bit data bus, + Burst type is sequential */ + if (spd.mem_type == SPD_MEMTYPE_DDR) { + switch (caslat) { + case 1: + ddr->sdram_mode = 0x50 | burstlen; /* CL=1.5 */ + break; + case 2: + ddr->sdram_mode = 0x20 | burstlen; /* CL=2.0 */ + break; + case 3: + ddr->sdram_mode = 0x60 | burstlen; /* CL=2.5 */ + break; + case 4: + ddr->sdram_mode = 0x30 | burstlen; /* CL=3.0 */ + break; + default: + printf("DDR:only CL 1.5, 2.0, 2.5, 3.0 is supported\n"); + return 0; + } + } else { + mode_odt_enable = 0x0; /* Default disabled */ + if (odt_wr_cfg || odt_rd_cfg) { + /* + * Bits 6 and 2 in Extended MRS(1) + * Bit 2 == 0x04 == 75 Ohm, with 2 DIMM modules. + * Bit 6 == 0x40 == 150 Ohm, with 1 DIMM module. + */ + mode_odt_enable = 0x40; /* 150 Ohm */ + } - /* burst length is always 4 */ - switch(caslat) { - case 2: - ddr->sdram_mode = 0x52; /* 1.5 */ - break; - case 3: - ddr->sdram_mode = 0x22; /* 2.0 */ - break; - case 4: - ddr->sdram_mode = 0x62; /* 2.5 */ - break; - case 5: - ddr->sdram_mode = 0x32; /* 3.0 */ - break; - default: - puts("DDR:only CAS Latency 1.5, 2.0, 2.5, 3.0 is supported.\n"); - return 0; + ddr->sdram_mode = + (0 + | (1 << (16 + 10)) /* DQS Differential disable */ + | (add_lat << (16 + 3)) /* Additive Latency in EMRS1 */ + | (mode_odt_enable << 16) /* ODT Enable in EMRS1 */ + | ((twr_clk - 1) << 9) /* Write Recovery Autopre */ + | (caslat << 4) /* caslat */ + | (burstlen << 0) /* Burst length */ + ); } debug("DDR:sdram_mode=0x%08x\n", ddr->sdram_mode); - switch(spd.refresh) { - case 0x00: - case 0x80: - tmp = picos_to_clk(15625000); - break; - case 0x01: - case 0x81: - tmp = picos_to_clk(3900000); - break; - case 0x02: - case 0x82: - tmp = picos_to_clk(7800000); - break; - case 0x03: - case 0x83: - tmp = picos_to_clk(31300000); - break; - case 0x04: - case 0x84: - tmp = picos_to_clk(62500000); - break; - case 0x05: - case 0x85: - tmp = picos_to_clk(125000000); - break; - default: - tmp = 0x512; - break; + /* + * Clear EMRS2 and EMRS3. + */ + ddr->sdram_mode2 = 0; + debug("DDR: sdram_mode2 = 0x%08x\n", ddr->sdram_mode2); + + switch (spd.refresh) { + case 0x00: + case 0x80: + refresh_clk = picos_to_clk(15625000); + break; + case 0x01: + case 0x81: + refresh_clk = picos_to_clk(3900000); + break; + case 0x02: + case 0x82: + refresh_clk = picos_to_clk(7800000); + break; + case 0x03: + case 0x83: + refresh_clk = picos_to_clk(31300000); + break; + case 0x04: + case 0x84: + refresh_clk = picos_to_clk(62500000); + break; + case 0x05: + case 0x85: + refresh_clk = picos_to_clk(125000000); + break; + default: + refresh_clk = 0x512; + break; } /* * Set BSTOPRE to 0x100 for page mode * If auto-charge is used, set BSTOPRE = 0 */ - ddr->sdram_interval = ((tmp & 0x3fff) << 16) | 0x100; + ddr->sdram_interval = ((refresh_clk & 0x3fff) << 16) | 0x100; debug("DDR:sdram_interval=0x%08x\n", ddr->sdram_interval); /* - * Is this an ECC DDR chip? + * SDRAM Cfg 2 */ -#if defined(CONFIG_DDR_ECC) - if (spd.config == 0x02) { - ddr->err_disable = 0x0000000d; - ddr->err_sbe = 0x00ff0000; + odt_cfg = 0; + if (odt_rd_cfg | odt_wr_cfg) { + odt_cfg = 0x2; /* ODT to IOs during reads */ } - debug("DDR:err_disable=0x%08x\n", ddr->err_disable); - debug("DDR:err_sbe=0x%08x\n", ddr->err_sbe); + if (spd.mem_type == SPD_MEMTYPE_DDR2) { + ddr->sdram_cfg2 = (0 + | (0 << 26) /* True DQS */ + | (odt_cfg << 21) /* ODT only read */ + | (1 << 12) /* 1 refresh at a time */ + ); + + debug("DDR: sdram_cfg2 = 0x%08x\n", ddr->sdram_cfg2); + } + +#ifdef CFG_DDR_SDRAM_CLK_CNTL /* Optional platform specific value */ + ddr->sdram_clk_cntl = CFG_DDR_SDRAM_CLK_CNTL; #endif - asm("sync;isync"); + debug("DDR:sdram_clk_cntl=0x%08x\n", ddr->sdram_clk_cntl); - udelay(500); + asm("sync;isync"); - /* - * SS_EN=1, - * CLK_ADJST = 2-MCK/MCK_B, is lauched 1/2 of one SDRAM - * clock cycle after address/command - */ - ddr->sdram_clk_cntl = 0x82000000; + udelay(600); /* - * Figure out the settings for the sdram_cfg register. Build up - * the entire register in 'tmp' before writing since the write into + * Figure out the settings for the sdram_cfg register. Build up + * the value in 'sdram_cfg' before writing since the write into * the register will actually enable the memory controller, and all * settings must be done before enabling. * * sdram_cfg[0] = 1 (ddr sdram logic enable) * sdram_cfg[1] = 1 (self-refresh-enable) - * sdram_cfg[6:7] = 2 (SDRAM type = DDR SDRAM) + * sdram_cfg[5:7] = (SDRAM type = DDR SDRAM) + * 010 DDR 1 SDRAM + * 011 DDR 2 SDRAM + * sdram_cfg[12] = 0 (32_BE =0 , 64 bit bus mode) + * sdram_cfg[13] = 0 (8_BE =0, 4-beat bursts) */ - tmp = 0xc2000000; + if (spd.mem_type == SPD_MEMTYPE_DDR) + sdram_type = 2; + else + sdram_type = 3; - /* - * sdram_cfg[3] = RD_EN - registered DIMM enable - * A value of 0x26 indicates micron registered DIMMS (micron.com) - */ - if (spd.mod_attr == 0x26) { - tmp |= 0x10000000; - } + sdram_cfg = (0 + | (1 << 31) /* DDR enable */ + | (1 << 30) /* Self refresh */ + | (sdram_type << 24) /* SDRAM type */ + ); + + /* sdram_cfg[3] = RD_EN - registered DIMM enable */ + if (spd.mod_attr & 0x02) + sdram_cfg |= 0x10000000; + + /* The DIMM is 32bit width */ + if (spd.dataw_lsb == 0x20) + sdram_cfg |= 0x000C0000; + + ddrc_ecc_enable = 0; #if defined(CONFIG_DDR_ECC) - /* - * If the user wanted ECC (enabled via sdram_cfg[2]) - */ + /* Enable ECC with sdram_cfg[2] */ if (spd.config == 0x02) { - tmp |= 0x20000000; + sdram_cfg |= 0x20000000; + ddrc_ecc_enable = 1; + /* disable error detection */ + ddr->err_disable = ~ECC_ERROR_ENABLE; + /* set single bit error threshold to maximum value, + * reset counter to zero */ + ddr->err_sbe = (255 << ECC_ERROR_MAN_SBET_SHIFT) | + (0 << ECC_ERROR_MAN_SBEC_SHIFT); } + + debug("DDR:err_disable=0x%08x\n", ddr->err_disable); + debug("DDR:err_sbe=0x%08x\n", ddr->err_sbe); #endif + printf(" DDRC ECC mode: %s\n", ddrc_ecc_enable ? "ON":"OFF"); #if defined(CONFIG_DDR_2T_TIMING) /* * Enable 2T timing by setting sdram_cfg[16]. */ - tmp |= SDRAM_CFG_2T_EN; + sdram_cfg |= SDRAM_CFG_2T_EN; #endif - - ddr->sdram_cfg = tmp; + /* Enable controller, and GO! */ + ddr->sdram_cfg = sdram_cfg; asm("sync;isync"); udelay(500); debug("DDR:sdram_cfg=0x%08x\n", ddr->sdram_cfg); - - return memsize;/*in MBytes*/ + return memsize; /*in MBytes*/ } #endif /* CONFIG_SPD_EEPROM */ +#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRC) +/* + * Use timebase counter, get_timer() is not availabe + * at this point of initialization yet. + */ +static __inline__ unsigned long get_tbms (void) +{ + unsigned long tbl; + unsigned long tbu1, tbu2; + unsigned long ms; + unsigned long long tmp; + + ulong tbclk = get_tbclk(); + + /* get the timebase ticks */ + do { + asm volatile ("mftbu %0":"=r" (tbu1):); + asm volatile ("mftb %0":"=r" (tbl):); + asm volatile ("mftbu %0":"=r" (tbu2):); + } while (tbu1 != tbu2); + + /* convert ticks to ms */ + tmp = (unsigned long long)(tbu1); + tmp = (tmp << 32); + tmp += (unsigned long long)(tbl); + ms = tmp/(tbclk/1000); + + return ms; +} -#if defined(CONFIG_DDR_ECC) /* * Initialize all of memory for ECC, then enable errors. */ - -void -ddr_enable_ecc(unsigned int dram_size) +/* #define CONFIG_DDR_ECC_INIT_VIA_DMA */ +void ddr_enable_ecc(unsigned int dram_size) { -#ifndef FIXME - uint *p = 0; - uint i = 0; - volatile immap_t *immap = (immap_t *)CFG_IMMRBAR; - volatile ccsr_ddr_t *ddr= &immap->im_ddr; - - dma_init(); - - for (*p = 0; p < (uint *)(8 * 1024); p++) { - if (((unsigned int)p & 0x1f) == 0) { - ppcDcbz((unsigned long) p); - } - *p = (unsigned int)0xdeadbeef; - if (((unsigned int)p & 0x1c) == 0x1c) { - ppcDcbf((unsigned long) p); - } + volatile immap_t *immap = (immap_t *)CFG_IMMR; + volatile ddr83xx_t *ddr= &immap->ddr; + unsigned long t_start, t_end; + register u64 *p; + register uint size; + unsigned int pattern[2]; +#if defined(CONFIG_DDR_ECC_INIT_VIA_DMA) + uint i; +#endif + icache_enable(); + t_start = get_tbms(); + pattern[0] = 0xdeadbeef; + pattern[1] = 0xdeadbeef; + +#if !defined(CONFIG_DDR_ECC_INIT_VIA_DMA) + debug("ddr init: CPU FP write method\n"); + size = dram_size; + for (p = 0; p < (u64*)(size); p++) { + ppcDWstore((u32*)p, pattern); + } + __asm__ __volatile__ ("sync"); +#else + debug("ddr init: DMA method\n"); + size = 0x2000; + for (p = 0; p < (u64*)(size); p++) { + ppcDWstore((u32*)p, pattern); } + __asm__ __volatile__ ("sync"); - /* 8K */ - dma_xfer((uint *)0x2000, 0x2000, (uint *)0); - /* 16K */ - dma_xfer((uint *)0x4000, 0x4000, (uint *)0); - /* 32K */ - dma_xfer((uint *)0x8000, 0x8000, (uint *)0); - /* 64K */ - dma_xfer((uint *)0x10000, 0x10000, (uint *)0); - /* 128k */ - dma_xfer((uint *)0x20000, 0x20000, (uint *)0); - /* 256k */ - dma_xfer((uint *)0x40000, 0x40000, (uint *)0); - /* 512k */ - dma_xfer((uint *)0x80000, 0x80000, (uint *)0); - /* 1M */ - dma_xfer((uint *)0x100000, 0x100000, (uint *)0); - /* 2M */ - dma_xfer((uint *)0x200000, 0x200000, (uint *)0); - /* 4M */ - dma_xfer((uint *)0x400000, 0x400000, (uint *)0); + /* Initialise DMA for direct transfer */ + dma_init(); + /* Start DMA to transfer */ + dma_xfer((uint *)0x2000, 0x2000, (uint *)0); /* 8K */ + dma_xfer((uint *)0x4000, 0x4000, (uint *)0); /* 16K */ + dma_xfer((uint *)0x8000, 0x8000, (uint *)0); /* 32K */ + dma_xfer((uint *)0x10000, 0x10000, (uint *)0); /* 64K */ + dma_xfer((uint *)0x20000, 0x20000, (uint *)0); /* 128K */ + dma_xfer((uint *)0x40000, 0x40000, (uint *)0); /* 256K */ + dma_xfer((uint *)0x80000, 0x80000, (uint *)0); /* 512K */ + dma_xfer((uint *)0x100000, 0x100000, (uint *)0); /* 1M */ + dma_xfer((uint *)0x200000, 0x200000, (uint *)0); /* 2M */ + dma_xfer((uint *)0x400000, 0x400000, (uint *)0); /* 4M */ for (i = 1; i < dram_size / 0x800000; i++) { dma_xfer((uint *)(0x800000*i), 0x800000, (uint *)0); } - - /* - * Enable errors for ECC. - */ - ddr->err_disable = 0x00000000; - asm("sync;isync"); #endif -} + t_end = get_tbms(); + icache_disable(); + + debug("\nREADY!!\n"); + debug("ddr init duration: %ld ms\n", t_end - t_start); + + /* Clear All ECC Errors */ + if ((ddr->err_detect & ECC_ERROR_DETECT_MME) == ECC_ERROR_DETECT_MME) + ddr->err_detect |= ECC_ERROR_DETECT_MME; + if ((ddr->err_detect & ECC_ERROR_DETECT_MBE) == ECC_ERROR_DETECT_MBE) + ddr->err_detect |= ECC_ERROR_DETECT_MBE; + if ((ddr->err_detect & ECC_ERROR_DETECT_SBE) == ECC_ERROR_DETECT_SBE) + ddr->err_detect |= ECC_ERROR_DETECT_SBE; + if ((ddr->err_detect & ECC_ERROR_DETECT_MSE) == ECC_ERROR_DETECT_MSE) + ddr->err_detect |= ECC_ERROR_DETECT_MSE; + + /* Disable ECC-Interrupts */ + ddr->err_int_en &= ECC_ERR_INT_DISABLE; + + /* Enable errors for ECC */ + ddr->err_disable &= ECC_ERROR_ENABLE; + + __asm__ __volatile__ ("sync"); + __asm__ __volatile__ ("isync"); +} #endif /* CONFIG_DDR_ECC */