]> git.kernelconcepts.de Git - karo-tx-uboot.git/blobdiff - drivers/ddr/altera/sequencer.c
ddr: altera: Clean up sdr_*_phase() part 6
[karo-tx-uboot.git] / drivers / ddr / altera / sequencer.c
index 8020651f88f6f91a404a9846c49e19f6e5eb4efe..75a0699e849d6c40a0ff42b861b8ea483de25dec 100644 (file)
@@ -7,6 +7,7 @@
 #include <common.h>
 #include <asm/io.h>
 #include <asm/arch/sdram.h>
+#include <errno.h>
 #include "sequencer.h"
 #include "sequencer_auto.h"
 #include "sequencer_auto_ac_init.h"
@@ -113,10 +114,17 @@ static void reg_file_set_sub_stage(u8 set_sub_stage)
        clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
 }
 
-static void initialize(void)
+/**
+ * phy_mgr_initialize() - Initialize PHY Manager
+ *
+ * Initialize PHY Manager.
+ */
+static void phy_mgr_initialize(void)
 {
+       u32 ratio;
+
        debug("%s:%d\n", __func__, __LINE__);
-       /* USER calibration has control over path to memory */
+       /* Calibration has control over path to memory */
        /*
         * In Hard PHY this is a 2-bit control:
         * 0: AFI Mux Select
@@ -132,49 +140,55 @@ static void initialize(void)
 
        writel(0, &phy_mgr_cfg->cal_debug_info);
 
-       if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
-               param->read_correct_mask_vg  = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DQ_PER_READ_DQS /
-                       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
-               param->write_correct_mask_vg = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DQ_PER_READ_DQS /
-                       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
-               param->read_correct_mask     = ((uint32_t)1 <<
-                       RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
-               param->write_correct_mask    = ((uint32_t)1 <<
-                       RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
-               param->dm_correct_mask       = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
-                       - 1;
-       }
+       /* Init params only if we do NOT skip calibration. */
+       if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
+               return;
+
+       ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
+               RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+       param->read_correct_mask_vg = (1 << ratio) - 1;
+       param->write_correct_mask_vg = (1 << ratio) - 1;
+       param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
+       param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
+       ratio = RW_MGR_MEM_DATA_WIDTH /
+               RW_MGR_MEM_DATA_MASK_WIDTH;
+       param->dm_correct_mask = (1 << ratio) - 1;
 }
 
-static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
+/**
+ * set_rank_and_odt_mask() - Set Rank and ODT mask
+ * @rank:      Rank mask
+ * @odt_mode:  ODT mode, OFF or READ_WRITE
+ *
+ * Set Rank and ODT mask (On-Die Termination).
+ */
+static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
 {
-       uint32_t odt_mask_0 = 0;
-       uint32_t odt_mask_1 = 0;
-       uint32_t cs_and_odt_mask;
+       u32 odt_mask_0 = 0;
+       u32 odt_mask_1 = 0;
+       u32 cs_and_odt_mask;
 
-       if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
-               if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
-                       /*
-                        * 1 Rank
-                        * Read: ODT = 0
-                        * Write: ODT = 1
-                        */
+       if (odt_mode == RW_MGR_ODT_MODE_OFF) {
+               odt_mask_0 = 0x0;
+               odt_mask_1 = 0x0;
+       } else {        /* RW_MGR_ODT_MODE_READ_WRITE */
+               switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
+               case 1: /* 1 Rank */
+                       /* Read: ODT = 0 ; Write: ODT = 1 */
                        odt_mask_0 = 0x0;
                        odt_mask_1 = 0x1;
-               } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
-                       /* 2 Ranks */
+                       break;
+               case 2: /* 2 Ranks */
                        if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
-                               /* - Dual-Slot , Single-Rank
-                                * (1 chip-select per DIMM)
-                                * OR
-                                * - RDIMM, 4 total CS (2 CS per DIMM)
-                                * means 2 DIMM
-                                * Since MEM_NUMBER_OF_RANKS is 2 they are
-                                * both single rank
-                                * with 2 CS each (special for RDIMM)
+                               /*
+                                * - Dual-Slot , Single-Rank (1 CS per DIMM)
+                                *   OR
+                                * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
+                                *
+                                * Since MEM_NUMBER_OF_RANKS is 2, they
+                                * are both single rank with 2 CS each
+                                * (special for RDIMM).
+                                *
                                 * Read: Turn on ODT on the opposite rank
                                 * Write: Turn on ODT on all ranks
                                 */
@@ -182,19 +196,18 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                                odt_mask_1 = 0x3;
                        } else {
                                /*
-                                * USER - Single-Slot , Dual-rank DIMMs
-                                * (2 chip-selects per DIMM)
-                                * USER Read: Turn on ODT off on all ranks
-                                * USER Write: Turn on ODT on active rank
+                                * - Single-Slot , Dual-Rank (2 CS per DIMM)
+                                *
+                                * Read: Turn on ODT off on all ranks
+                                * Write: Turn on ODT on active rank
                                 */
                                odt_mask_0 = 0x0;
                                odt_mask_1 = 0x3 & (1 << rank);
                        }
-               } else {
-                       /* 4 Ranks
-                        * Read:
+                       break;
+               case 4: /* 4 Ranks */
+                       /* Read:
                         * ----------+-----------------------+
-                        *           |                       |
                         *           |         ODT           |
                         * Read From +-----------------------+
                         *   Rank    |  3  |  2  |  1  |  0  |
@@ -207,7 +220,6 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                         *
                         * Write:
                         * ----------+-----------------------+
-                        *           |                       |
                         *           |         ODT           |
                         * Write To  +-----------------------+
                         *   Rank    |  3  |  2  |  1  |  0  |
@@ -236,16 +248,13 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                                odt_mask_1 = 0xA;
                                break;
                        }
+                       break;
                }
-       } else {
-               odt_mask_0 = 0x0;
-               odt_mask_1 = 0x0;
        }
 
-       cs_and_odt_mask =
-               (0xFF & ~(1 << rank)) |
-               ((0xFF & odt_mask_0) << 8) |
-               ((0xFF & odt_mask_1) << 16);
+       cs_and_odt_mask = (0xFF & ~(1 << rank)) |
+                         ((0xFF & odt_mask_0) << 8) |
+                         ((0xFF & odt_mask_1) << 16);
        writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
 }
@@ -482,20 +491,21 @@ static void scc_mgr_set_hhp_extras(void)
                   __func__, __LINE__);
 }
 
-/*
- * USER Zero all DQS config
- * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
+/**
+ * scc_mgr_zero_all() - Zero all DQS config
+ *
+ * Zero all DQS config.
  */
 static void scc_mgr_zero_all(void)
 {
-       uint32_t i, r;
+       int i, r;
 
        /*
         * USER Zero all DQS config settings, across all groups and all
         * shadow registers
         */
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
-            NUM_RANKS_PER_SHADOW_REG) {
+       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+            r += NUM_RANKS_PER_SHADOW_REG) {
                for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
                        /*
                         * The phases actually don't exist on a per-rank basis,
@@ -509,12 +519,12 @@ static void scc_mgr_zero_all(void)
 
                for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
                        scc_mgr_set_dqdqs_output_phase(i, 0);
-                       /* av/cv don't have out2 */
+                       /* Arria V/Cyclone V don't have out2. */
                        scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
                }
        }
 
-       /* multicast to all DQS group enables */
+       /* Multicast to all DQS group enables. */
        writel(0xff, &sdr_scc_mgr->dqs_ena);
        writel(0, &sdr_scc_mgr->update);
 }
@@ -564,43 +574,47 @@ static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
                writel(base + i, &sdr_scc_mgr->dqs_ena);
 }
 
-static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
-                              int32_t out_only)
+/**
+ * scc_mgr_zero_group() - Zero all configs for a group
+ *
+ * Zero DQ, DM, DQS and OCT configs for a group.
+ */
+static void scc_mgr_zero_group(const u32 write_group, const int out_only)
 {
-       uint32_t i, r;
+       int i, r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
-               NUM_RANKS_PER_SHADOW_REG) {
-               /* Zero all DQ config settings */
+       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+            r += NUM_RANKS_PER_SHADOW_REG) {
+               /* Zero all DQ config settings. */
                for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
                        scc_mgr_set_dq_out1_delay(i, 0);
                        if (!out_only)
                                scc_mgr_set_dq_in_delay(i, 0);
                }
 
-               /* multicast to all DQ enables */
+               /* Multicast to all DQ enables. */
                writel(0xff, &sdr_scc_mgr->dq_ena);
 
-               /* Zero all DM config settings */
-               for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+               /* Zero all DM config settings. */
+               for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
                        scc_mgr_set_dm_out1_delay(i, 0);
-               }
 
-               /* multicast to all DM enables */
+               /* Multicast to all DM enables. */
                writel(0xff, &sdr_scc_mgr->dm_ena);
 
-               /* zero all DQS io settings */
+               /* Zero all DQS IO settings. */
                if (!out_only)
                        scc_mgr_set_dqs_io_in_delay(0);
-               /* av/cv don't have out2 */
+
+               /* Arria V/Cyclone V don't have out2. */
                scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
                scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
                scc_mgr_load_dqs_for_write_group(write_group);
 
-               /* multicast to all DQS IO enables (only 1) */
+               /* Multicast to all DQS IO enables (only 1 in total). */
                writel(0, &sdr_scc_mgr->dqs_io_ena);
 
-               /* hit update to zero everything */
+               /* Hit update to zero everything. */
                writel(0, &sdr_scc_mgr->update);
        }
 }
@@ -658,107 +672,87 @@ static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
        scc_mgr_load_dqs_for_write_group(write_group);
 }
 
-/* apply a delay to the entire output side: DQ, DM, DQS, OCT */
-static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
-                                                 uint32_t group_bgn,
-                                                 uint32_t delay)
+/**
+ * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
+ * @write_group:       Write group
+ * @delay:             Delay value
+ *
+ * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
+ */
+static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
+                                                 const u32 delay)
 {
-       uint32_t i, p, new_delay;
-
-       /* dq shift */
-       for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
-               new_delay = READ_SCC_DQ_OUT2_DELAY;
-               new_delay += delay;
-
-               if (new_delay > IO_IO_OUT2_DELAY_MAX) {
-                       debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
-                                  %u > %lu => %lu", __func__, __LINE__,
-                                  write_group, group_bgn, delay, i, p, new_delay,
-                                  (long unsigned int)IO_IO_OUT2_DELAY_MAX,
-                                  (long unsigned int)IO_IO_OUT2_DELAY_MAX);
-                       new_delay = IO_IO_OUT2_DELAY_MAX;
-               }
+       u32 i, new_delay;
 
+       /* DQ shift */
+       for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
                scc_mgr_load_dq(i);
-       }
-
-       /* dm shift */
-       for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
-               new_delay = READ_SCC_DM_IO_OUT2_DELAY;
-               new_delay += delay;
-
-               if (new_delay > IO_IO_OUT2_DELAY_MAX) {
-                       debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
-                                  %u > %lu => %lu\n",  __func__, __LINE__,
-                                  write_group, group_bgn, delay, i, new_delay,
-                                  (long unsigned int)IO_IO_OUT2_DELAY_MAX,
-                                  (long unsigned int)IO_IO_OUT2_DELAY_MAX);
-                       new_delay = IO_IO_OUT2_DELAY_MAX;
-               }
 
+       /* DM shift */
+       for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
                scc_mgr_load_dm(i);
-       }
-
-       /* dqs shift */
-       new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
-       new_delay += delay;
 
+       /* DQS shift */
+       new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
        if (new_delay > IO_IO_OUT2_DELAY_MAX) {
-               debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
-                          " adding %u to OUT1\n", __func__, __LINE__,
-                          write_group, group_bgn, delay, new_delay,
-                          IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+               debug_cond(DLEVEL == 1,
+                          "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
+                          __func__, __LINE__, write_group, delay, new_delay,
+                          IO_IO_OUT2_DELAY_MAX,
                           new_delay - IO_IO_OUT2_DELAY_MAX);
-               scc_mgr_set_dqs_out1_delay(new_delay -
-                                          IO_IO_OUT2_DELAY_MAX);
-               new_delay = IO_IO_OUT2_DELAY_MAX;
+               new_delay -= IO_IO_OUT2_DELAY_MAX;
+               scc_mgr_set_dqs_out1_delay(new_delay);
        }
 
        scc_mgr_load_dqs_io();
 
-       /* oct shift */
-       new_delay = READ_SCC_OCT_OUT2_DELAY;
-       new_delay += delay;
-
+       /* OCT shift */
+       new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
        if (new_delay > IO_IO_OUT2_DELAY_MAX) {
-               debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
-                          " adding %u to OUT1\n", __func__, __LINE__,
-                          write_group, group_bgn, delay, new_delay,
-                          IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+               debug_cond(DLEVEL == 1,
+                          "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
+                          __func__, __LINE__, write_group, delay,
+                          new_delay, IO_IO_OUT2_DELAY_MAX,
                           new_delay - IO_IO_OUT2_DELAY_MAX);
-               scc_mgr_set_oct_out1_delay(write_group, new_delay -
-                                          IO_IO_OUT2_DELAY_MAX);
-               new_delay = IO_IO_OUT2_DELAY_MAX;
+               new_delay -= IO_IO_OUT2_DELAY_MAX;
+               scc_mgr_set_oct_out1_delay(write_group, new_delay);
        }
 
        scc_mgr_load_dqs_for_write_group(write_group);
 }
 
-/*
- * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
- * and to all ranks
+/**
+ * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
+ * @write_group:       Write group
+ * @delay:             Delay value
+ *
+ * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
  */
-static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
-       uint32_t write_group, uint32_t group_bgn, uint32_t delay)
+static void
+scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
+                                               const u32 delay)
 {
-       uint32_t r;
+       int r;
 
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
-               r += NUM_RANKS_PER_SHADOW_REG) {
-               scc_mgr_apply_group_all_out_delay_add(write_group,
-                                                     group_bgn, delay);
+            r += NUM_RANKS_PER_SHADOW_REG) {
+               scc_mgr_apply_group_all_out_delay_add(write_group, delay);
                writel(0, &sdr_scc_mgr->update);
        }
 }
 
-/* optimization used to recover some slots in ddr3 inst_rom */
-/* could be applied to other protocols if we wanted to */
+/**
+ * set_jump_as_return() - Return instruction optimization
+ *
+ * Optimization used to recover some slots in ddr3 inst_rom could be
+ * applied to other protocols if we wanted to
+ */
 static void set_jump_as_return(void)
 {
        /*
-        * to save space, we replace return with jump to special shared
+        * To save space, we replace return with jump to special shared
         * RETURN instruction so we set the counter to large value so that
-        * we always jump
+        * we always jump.
         */
        writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
        writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
@@ -859,101 +853,51 @@ static void delay_for_n_mem_clocks(const uint32_t clocks)
        debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
 }
 
-static void rw_mgr_mem_initialize(void)
+/**
+ * rw_mgr_mem_init_load_regs() - Load instruction registers
+ * @cntr0:     Counter 0 value
+ * @cntr1:     Counter 1 value
+ * @cntr2:     Counter 2 value
+ * @jump:      Jump instruction value
+ *
+ * Load instruction registers.
+ */
+static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
 {
-       uint32_t r;
        uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
                           RW_MGR_RUN_SINGLE_GROUP_OFFSET;
 
-       debug("%s:%d\n", __func__, __LINE__);
-
-       /* The reset / cke part of initialization is broadcasted to all ranks */
-       writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
-                               RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
-
-       /*
-        * Here's how you load register for a loop
-        * Counters are located @ 0x800
-        * Jump address are located @ 0xC00
-        * For both, registers 0 to 3 are selected using bits 3 and 2, like
-        * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
-        * I know this ain't pretty, but Avalon bus throws away the 2 least
-        * significant bits
-        */
-
-       /* start with memory RESET activated */
-
-       /* tINIT = 200us */
-
-       /*
-        * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
-        * If a and b are the number of iteration in 2 nested loops
-        * it takes the following number of cycles to complete the operation:
-        * number_of_cycles = ((2 + n) * a + 2) * b
-        * where n is the number of instruction in the inner loop
-        * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
-        * b = 6A
-        */
-
        /* Load counters */
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
+       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
               &sdr_rw_load_mgr_regs->load_cntr0);
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
+       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
               &sdr_rw_load_mgr_regs->load_cntr1);
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
+       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
               &sdr_rw_load_mgr_regs->load_cntr2);
 
        /* Load jump address */
-       writel(RW_MGR_INIT_RESET_0_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add0);
-       writel(RW_MGR_INIT_RESET_0_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add1);
-       writel(RW_MGR_INIT_RESET_0_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+       writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+       writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+       writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
 
        /* Execute count instruction */
-       writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
-
-       /* indicate that memory is stable */
-       writel(1, &phy_mgr_cfg->reset_mem_stbl);
-
-       /*
-        * transition the RESET to high
-        * Wait for 500us
-        */
-
-       /*
-        * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
-        * If a and b are the number of iteration in 2 nested loops
-        * it takes the following number of cycles to complete the operation
-        * number_of_cycles = ((2 + n) * a + 2) * b
-        * where n is the number of instruction in the inner loop
-        * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
-        * b = FF
-        */
-
-       /* Load counters */
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
-              &sdr_rw_load_mgr_regs->load_cntr0);
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
-              &sdr_rw_load_mgr_regs->load_cntr1);
-       writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
-              &sdr_rw_load_mgr_regs->load_cntr2);
-
-       /* Load jump address */
-       writel(RW_MGR_INIT_RESET_1_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add0);
-       writel(RW_MGR_INIT_RESET_1_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add1);
-       writel(RW_MGR_INIT_RESET_1_CKE_0,
-               &sdr_rw_load_jump_mgr_regs->load_jump_add2);
-
-       writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
-
-       /* bring up clock enable */
+       writel(jump, grpaddr);
+}
 
-       /* tXRP < 250 ck cycles */
-       delay_for_n_mem_clocks(250);
+/**
+ * rw_mgr_mem_load_user() - Load user calibration values
+ * @fin1:      Final instruction 1
+ * @fin2:      Final instruction 2
+ * @precharge: If 1, precharge the banks at the end
+ *
+ * Load user calibration values and optionally precharge the banks.
+ */
+static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
+                                const int precharge)
+{
+       u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+                     RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+       u32 r;
 
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
                if (param->skip_ranks[r]) {
@@ -964,6 +908,10 @@ static void rw_mgr_mem_initialize(void)
                /* set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
+               /* precharge all banks ... */
+               if (precharge)
+                       writel(RW_MGR_PRECHARGE_ALL, grpaddr);
+
                /*
                 * USER Use Mirror-ed commands for odd ranks if address
                 * mirrorring is on
@@ -979,7 +927,7 @@ static void rw_mgr_mem_initialize(void)
                        writel(RW_MGR_MRS1_MIRR, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
+                       writel(fin1, grpaddr);
                } else {
                        set_jump_as_return();
                        writel(RW_MGR_MRS2, grpaddr);
@@ -990,8 +938,12 @@ static void rw_mgr_mem_initialize(void)
                        set_jump_as_return();
                        writel(RW_MGR_MRS1, grpaddr);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
+                       writel(fin2, grpaddr);
                }
+
+               if (precharge)
+                       continue;
+
                set_jump_as_return();
                writel(RW_MGR_ZQCL, grpaddr);
 
@@ -1000,91 +952,126 @@ static void rw_mgr_mem_initialize(void)
        }
 }
 
-/*
- * At the end of calibration we have to program the user settings in, and
- * USER  hand off the memory to the user.
+/**
+ * rw_mgr_mem_initialize() - Initialize RW Manager
+ *
+ * Initialize RW Manager.
  */
-static void rw_mgr_mem_handoff(void)
+static void rw_mgr_mem_initialize(void)
 {
-       uint32_t r;
-       uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
-                          RW_MGR_RUN_SINGLE_GROUP_OFFSET;
-
        debug("%s:%d\n", __func__, __LINE__);
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
-               if (param->skip_ranks[r])
-                       /* request to skip the rank */
-                       continue;
-               /* set rank */
-               set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
-               /* precharge all banks ... */
-               writel(RW_MGR_PRECHARGE_ALL, grpaddr);
+       /* The reset / cke part of initialization is broadcasted to all ranks */
+       writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+                               RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
 
-               /* load up MR settings specified by user */
+       /*
+        * Here's how you load register for a loop
+        * Counters are located @ 0x800
+        * Jump address are located @ 0xC00
+        * For both, registers 0 to 3 are selected using bits 3 and 2, like
+        * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
+        * I know this ain't pretty, but Avalon bus throws away the 2 least
+        * significant bits
+        */
 
-               /*
-                * Use Mirror-ed commands for odd ranks if address
-                * mirrorring is on
-                */
-               if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS2_MIRR, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS3_MIRR, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS1_MIRR, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
-               } else {
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS2, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS3, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS1, grpaddr);
-                       delay_for_n_mem_clocks(4);
-                       set_jump_as_return();
-                       writel(RW_MGR_MRS0_USER, grpaddr);
-               }
-               /*
-                * USER  need to wait tMOD (12CK or 15ns) time before issuing
-                * other commands, but we will have plenty of NIOS cycles before
-                * actual handoff so its okay.
-                */
-       }
+       /* Start with memory RESET activated */
+
+       /* tINIT = 200us */
+
+       /*
+        * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
+        * If a and b are the number of iteration in 2 nested loops
+        * it takes the following number of cycles to complete the operation:
+        * number_of_cycles = ((2 + n) * a + 2) * b
+        * where n is the number of instruction in the inner loop
+        * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
+        * b = 6A
+        */
+       rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
+                                 SEQ_TINIT_CNTR2_VAL,
+                                 RW_MGR_INIT_RESET_0_CKE_0);
+
+       /* Indicate that memory is stable. */
+       writel(1, &phy_mgr_cfg->reset_mem_stbl);
+
+       /*
+        * transition the RESET to high
+        * Wait for 500us
+        */
+
+       /*
+        * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
+        * If a and b are the number of iteration in 2 nested loops
+        * it takes the following number of cycles to complete the operation
+        * number_of_cycles = ((2 + n) * a + 2) * b
+        * where n is the number of instruction in the inner loop
+        * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
+        * b = FF
+        */
+       rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
+                                 SEQ_TRESET_CNTR2_VAL,
+                                 RW_MGR_INIT_RESET_1_CKE_0);
+
+       /* Bring up clock enable. */
+
+       /* tXRP < 250 ck cycles */
+       delay_for_n_mem_clocks(250);
+
+       rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
+                            0);
 }
 
 /*
- * performs a guaranteed read on the patterns we are going to use during a
- * read test to ensure memory works
+ * At the end of calibration we have to program the user settings in, and
+ * USER  hand off the memory to the user.
  */
-static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
-       uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
-       uint32_t all_ranks)
+static void rw_mgr_mem_handoff(void)
 {
-       uint32_t r, vg;
-       uint32_t correct_mask_vg;
-       uint32_t tmp_bit_chk;
-       uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
-               (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
-       uint32_t addr;
-       uint32_t base_rw_mgr;
+       rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
+       /*
+        * USER  need to wait tMOD (12CK or 15ns) time before issuing
+        * other commands, but we will have plenty of NIOS cycles before
+        * actual handoff so its okay.
+        */
+}
 
-       *bit_chk = param->read_correct_mask;
-       correct_mask_vg = param->read_correct_mask_vg;
+/**
+ * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
+ * @rank_bgn:  Rank number
+ * @group:     Read/Write Group
+ * @all_ranks: Test all ranks
+ *
+ * Performs a guaranteed read on the patterns we are going to use during a
+ * read test to ensure memory works.
+ */
+static int
+rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
+                                       const u32 all_ranks)
+{
+       const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+                        RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+       const u32 addr_offset =
+                        (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
+       const u32 rank_end = all_ranks ?
+                               RW_MGR_MEM_NUMBER_OF_RANKS :
+                               (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+       const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
+                               RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+       const u32 correct_mask_vg = param->read_correct_mask_vg;
+
+       u32 tmp_bit_chk, base_rw_mgr, bit_chk;
+       int vg, r;
+       int ret = 0;
+
+       bit_chk = param->read_correct_mask;
 
        for (r = rank_bgn; r < rank_end; r++) {
+               /* Request to skip the rank */
                if (param->skip_ranks[r])
-                       /* request to skip the rank */
                        continue;
 
-               /* set rank */
+               /* Set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
 
                /* Load up a constant bursts of read commands */
@@ -1097,56 +1084,55 @@ static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
                tmp_bit_chk = 0;
-               for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
-                       /* reset the fifos to get pointers to known state */
-
+               for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
+                    vg >= 0; vg--) {
+                       /* Reset the FIFOs to get pointers to known state. */
                        writel(0, &phy_mgr_cmd->fifo_reset);
                        writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                  RW_MGR_RESET_READ_DATAPATH_OFFSET);
-
-                       tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
-                               / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
-
-                       addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
-                       writel(RW_MGR_GUARANTEED_READ, addr +
-                              ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
-                               vg) << 2));
+                       writel(RW_MGR_GUARANTEED_READ,
+                              addr + addr_offset + (vg << 2));
 
                        base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
-                       tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
-
-                       if (vg == 0)
-                               break;
+                       tmp_bit_chk <<= shift_ratio;
+                       tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
                }
-               *bit_chk &= tmp_bit_chk;
+
+               bit_chk &= tmp_bit_chk;
        }
 
-       addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
        writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
 
        set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
-       debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
-                  %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
-                  (long unsigned int)(*bit_chk == param->read_correct_mask));
-       return *bit_chk == param->read_correct_mask;
-}
 
-static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
-       (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
-{
-       return rw_mgr_mem_calibrate_read_test_patterns(0, group,
-               num_tries, bit_chk, 1);
+       if (bit_chk != param->read_correct_mask)
+               ret = -EIO;
+
+       debug_cond(DLEVEL == 1,
+                  "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
+                  __func__, __LINE__, group, bit_chk,
+                  param->read_correct_mask, ret);
+
+       return ret;
 }
 
-/* load up the patterns we are going to use during a read test */
-static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
-       uint32_t all_ranks)
+/**
+ * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
+ * @rank_bgn:  Rank number
+ * @all_ranks: Test all ranks
+ *
+ * Load up the patterns we are going to use during a read test.
+ */
+static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
+                                                   const int all_ranks)
 {
-       uint32_t r;
-       uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
-               (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+       const u32 rank_end = all_ranks ?
+                       RW_MGR_MEM_NUMBER_OF_RANKS :
+                       (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+       u32 r;
 
        debug("%s:%d\n", __func__, __LINE__);
+
        for (r = rank_bgn; r < rank_end; r++) {
                if (param->skip_ranks[r])
                        /* request to skip the rank */
@@ -1346,92 +1332,105 @@ static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
        }
 }
 
-static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
-                             uint32_t dtaps_per_ptap, uint32_t *work_bgn,
-                             uint32_t *v, uint32_t *d, uint32_t *p,
-                             uint32_t *i, uint32_t *max_working_cnt)
+/**
+ * sdr_find_phase() - Find DQS enable phase
+ * @working:   If 1, look for working phase, if 0, look for non-working phase
+ * @grp:       Read/Write group
+ * @v:         VFIFO value
+ * @work:      Working window position
+ * @i:         Iterator
+ * @p:         DQS Phase Iterator
+ *
+ * Find working or non-working DQS enable phase setting.
+ */
+static int sdr_find_phase(int working, const u32 grp, u32 *v, u32 *work,
+                         u32 *i, u32 *p)
 {
-       uint32_t found_begin = 0;
-       uint32_t tmp_delay = 0;
-       uint32_t test_status;
-
-       for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
-               IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
-               *work_bgn = tmp_delay;
-               scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+       u32 ret, bit_chk;
+       const u32 end = VFIFO_SIZE + (working ? 0 : 1);
 
-               for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
-                       for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
-                               IO_DELAY_PER_OPA_TAP) {
-                               scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
+       for (; *i < end; (*i)++) {
+               if (working)
+                       *p = 0;
 
-                               test_status =
-                               rw_mgr_mem_calibrate_read_test_all_ranks
-                               (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
+               for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++) {
+                       scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
 
-                               if (test_status) {
-                                       *max_working_cnt = 1;
-                                       found_begin = 1;
-                                       break;
-                               }
-                       }
+                       ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+                                               PASS_ONE_BIT, &bit_chk, 0);
+                       if (!working)
+                               ret = !ret;
 
-                       if (found_begin)
-                               break;
+                       if (ret)
+                               return 0;
 
-                       if (*p > IO_DQS_EN_PHASE_MAX)
-                               /* fiddle with FIFO */
-                               rw_mgr_incr_vfifo(*grp, v);
+                       *work += IO_DELAY_PER_OPA_TAP;
                }
 
-               if (found_begin)
-                       break;
+               if (*p > IO_DQS_EN_PHASE_MAX) {
+                       /* Fiddle with FIFO. */
+                       rw_mgr_incr_vfifo(grp, v);
+                       if (!working)
+                               *p = 0;
+               }
        }
 
-       if (*i >= VFIFO_SIZE) {
-               /* cannot find working solution */
-               debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
-                          ptap/dtap\n", __func__, __LINE__);
-               return 0;
-       } else {
-               return 1;
+       return -EINVAL;
+}
+
+static int sdr_working_phase(uint32_t grp,
+                             uint32_t dtaps_per_ptap, uint32_t *work_bgn,
+                             uint32_t *v, uint32_t *d, uint32_t *p,
+                             uint32_t *i)
+{
+       int ret;
+
+       *work_bgn = 0;
+
+       for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
+               *i = 0;
+               scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
+               ret = sdr_find_phase(1, grp, v, work_bgn, i, p);
+               if (!ret)
+                       return 0;
+               *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
        }
+
+       /* Cannot find working solution */
+       debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
+                  __func__, __LINE__);
+       return -EINVAL;
 }
 
-static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
+static void sdr_backup_phase(uint32_t grp,
                             uint32_t *work_bgn, uint32_t *v, uint32_t *d,
-                            uint32_t *p, uint32_t *max_working_cnt)
+                            uint32_t *p)
 {
-       uint32_t found_begin = 0;
        uint32_t tmp_delay;
+       u32 bit_chk;
 
        /* Special case code for backing up a phase */
        if (*p == 0) {
                *p = IO_DQS_EN_PHASE_MAX;
-               rw_mgr_decr_vfifo(*grp, v);
+               rw_mgr_decr_vfifo(grp, v);
        } else {
                (*p)--;
        }
        tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
-       scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
+       scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
 
        for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
                (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
-               scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+               scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
 
-               if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
+               if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
                                                             PASS_ONE_BIT,
-                                                            bit_chk, 0)) {
-                       found_begin = 1;
+                                                            &bit_chk, 0)) {
                        *work_bgn = tmp_delay;
                        break;
                }
        }
 
-       /* We have found a working dtap before the ptap found above */
-       if (found_begin == 1)
-               (*max_working_cnt)++;
-
        /*
         * Restore VFIFO to old state before we decremented it
         * (if needed).
@@ -1439,95 +1438,80 @@ static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
        (*p)++;
        if (*p > IO_DQS_EN_PHASE_MAX) {
                *p = 0;
-               rw_mgr_incr_vfifo(*grp, v);
+               rw_mgr_incr_vfifo(grp, v);
        }
 
-       scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
+       scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
 }
 
-static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
+static int sdr_nonworking_phase(uint32_t grp,
                             uint32_t *work_bgn, uint32_t *v, uint32_t *d,
-                            uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
+                            uint32_t *p, uint32_t *i,
                             uint32_t *work_end)
 {
-       uint32_t found_end = 0;
+       int ret;
 
        (*p)++;
        *work_end += IO_DELAY_PER_OPA_TAP;
        if (*p > IO_DQS_EN_PHASE_MAX) {
-               /* fiddle with FIFO */
+               /* Fiddle with FIFO. */
                *p = 0;
-               rw_mgr_incr_vfifo(*grp, v);
+               rw_mgr_incr_vfifo(grp, v);
        }
 
-       for (; *i < VFIFO_SIZE + 1; (*i)++) {
-               for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
-                       += IO_DELAY_PER_OPA_TAP) {
-                       scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
-
-                       if (!rw_mgr_mem_calibrate_read_test_all_ranks
-                               (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
-                               found_end = 1;
-                               break;
-                       } else {
-                               (*max_working_cnt)++;
-                       }
-               }
-
-               if (found_end)
-                       break;
-
-               if (*p > IO_DQS_EN_PHASE_MAX) {
-                       /* fiddle with FIFO */
-                       rw_mgr_incr_vfifo(*grp, v);
-                       *p = 0;
-               }
+       ret = sdr_find_phase(0, grp, v, work_end, i, p);
+       if (ret) {
+               /* Cannot see edge of failing read. */
+               debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
+                          __func__, __LINE__);
        }
 
-       if (*i >= VFIFO_SIZE + 1) {
-               /* cannot see edge of failing read */
-               debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
-                          failed\n", __func__, __LINE__);
-               return 0;
-       } else {
-               return 1;
-       }
+       return ret;
 }
 
-static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
-                                 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
-                                 uint32_t *p, uint32_t *work_mid,
-                                 uint32_t *work_end)
+/**
+ * sdr_find_window_center() - Find center of the working DQS window.
+ * @grp:       Read/Write group
+ * @work_bgn:  First working settings
+ * @work_end:  Last working settings
+ * @val:       VFIFO value
+ *
+ * Find center of the working DQS enable window.
+ */
+static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
+                                 const u32 work_end, const u32 val)
 {
-       int i;
+       u32 bit_chk, work_mid, v = val;
        int tmp_delay = 0;
+       int i, p, d;
 
-       *work_mid = (*work_bgn + *work_end) / 2;
+       work_mid = (work_bgn + work_end) / 2;
 
        debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
-                  *work_bgn, *work_end, *work_mid);
+                  work_bgn, work_end, work_mid);
        /* Get the middle delay to be less than a VFIFO delay */
-       for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
-               (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
-               ;
+       tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
+
        debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
-       while (*work_mid > tmp_delay)
-               *work_mid -= tmp_delay;
-       debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
+       work_mid %= tmp_delay;
+       debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
 
-       tmp_delay = 0;
-       for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
-               (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
-               ;
-       tmp_delay -= IO_DELAY_PER_OPA_TAP;
-       debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
-       for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
-               tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
-               ;
-       debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
+       tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
+       if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
+               tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
+       p = tmp_delay / IO_DELAY_PER_OPA_TAP;
+
+       debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
 
-       scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
-       scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+       d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
+       if (d > IO_DQS_EN_DELAY_MAX)
+               d = IO_DQS_EN_DELAY_MAX;
+       tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+
+       debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
+
+       scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+       scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
 
        /*
         * push vfifo until we can successfully calibrate. We can do this
@@ -1535,34 +1519,32 @@ static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
         */
        for (i = 0; i < VFIFO_SIZE; i++) {
                debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
-                          *v);
-               if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
+                          v);
+               if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
                                                             PASS_ONE_BIT,
-                                                            bit_chk, 0)) {
-                       break;
+                                                            &bit_chk, 0)) {
+                       debug_cond(DLEVEL == 2,
+                                  "%s:%d center: found: vfifo=%u ptap=%u dtap=%u\n",
+                                  __func__, __LINE__, v, p, d);
+                       return 0;
                }
 
-               /* fiddle with FIFO */
-               rw_mgr_incr_vfifo(*grp, v);
+               /* Fiddle with FIFO. */
+               rw_mgr_incr_vfifo(grp, &v);
        }
 
-       if (i >= VFIFO_SIZE) {
-               debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
-                          failed\n", __func__, __LINE__);
-               return 0;
-       } else {
-               return 1;
-       }
+       debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
+                  __func__, __LINE__);
+       return -EINVAL;
 }
 
 /* find a good dqs enable to use */
 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
 {
        uint32_t v, d, p, i;
-       uint32_t max_working_cnt;
        uint32_t bit_chk;
        uint32_t dtaps_per_ptap;
-       uint32_t work_bgn, work_mid, work_end;
+       uint32_t work_bgn, work_end;
        uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
 
        debug("%s:%d %u\n", __func__, __LINE__, grp);
@@ -1580,13 +1562,10 @@ static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
        /* * Step 1 : First push vfifo until we get a failing read * */
        v = find_vfifo_read(grp, &bit_chk);
 
-       max_working_cnt = 0;
-
        /* ******************************************************** */
        /* * step 2: find first working phase, increment in ptaps * */
        work_bgn = 0;
-       if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
-                               &p, &i, &max_working_cnt) == 0)
+       if (sdr_working_phase(grp, dtaps_per_ptap, &work_bgn, &v, &d, &p, &i))
                return 0;
 
        work_end = work_bgn;
@@ -1601,14 +1580,13 @@ static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
                /* * step 3a: if we have room, back off by one and
                increment in dtaps * */
 
-               sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
-                                &max_working_cnt);
+               sdr_backup_phase(grp, &work_bgn, &v, &d, &p);
 
                /* ********************************************************* */
                /* * step 4a: go forward from working phase to non working
                phase, increment in ptaps * */
-               if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
-                                        &i, &max_working_cnt, &work_end) == 0)
+               if (sdr_nonworking_phase(grp, &work_bgn, &v, &d, &p,
+                                        &i, &work_end))
                        return 0;
 
                /* ********************************************************* */
@@ -1641,13 +1619,6 @@ static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
                           v, p, d, work_bgn);
 
                work_end = work_bgn;
-
-               /* * The actual increment of dtaps is done outside of the
-               if/else loop to share code */
-
-               /* Only here to counterbalance a subtract later on which is
-               not needed if this branch of the algorithm is taken */
-               max_working_cnt++;
        }
 
        /* The dtap increment to find the failing edge is done here */
@@ -1770,71 +1741,10 @@ static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
 
        /* ******************************************** */
        /* * step 6:  Find the centre of the window   * */
-       if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
-                                  &work_mid, &work_end) == 0)
-               return 0;
-
-       debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
-                  vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
-                  v, p-1, d);
-       return 1;
-}
-
-/*
- * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
- * dq_in_delay values
- */
-static uint32_t
-rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
-(uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
-{
-       uint32_t found;
-       uint32_t i;
-       uint32_t p;
-       uint32_t d;
-       uint32_t r;
-
-       const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
-               (RW_MGR_MEM_DQ_PER_READ_DQS-1);
-               /* we start at zero, so have one less dq to devide among */
-
-       debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
-             test_bgn);
-
-       /* try different dq_in_delays since the dq path is shorter than dqs */
-
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
-            r += NUM_RANKS_PER_SHADOW_REG) {
-               for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) {
-                       debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
-                                  vfifo_find_dqs_", __func__, __LINE__);
-                       debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
-                              write_group, read_group);
-                       debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
-                       scc_mgr_set_dq_in_delay(p, d);
-                       scc_mgr_load_dq(p);
-               }
-               writel(0, &sdr_scc_mgr->update);
-       }
-
-       found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
-
-       debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
-                  en_phase_sweep_dq", __func__, __LINE__);
-       debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
-                  chain to zero\n", write_group, read_group, found);
-
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
-            r += NUM_RANKS_PER_SHADOW_REG) {
-               for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
-                       i++, p++) {
-                       scc_mgr_set_dq_in_delay(p, 0);
-                       scc_mgr_load_dq(p);
-               }
-               writel(0, &sdr_scc_mgr->update);
-       }
+       if (sdr_find_window_centre(grp, work_bgn, work_end, v))
+               return 0; /* FIXME: Old code, return 0 means failure :-( */
 
-       return found;
+       return 1;
 }
 
 /* per-bit deskew DQ and center */
@@ -2211,139 +2121,251 @@ static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
        return (dq_margin >= 0) && (dqs_margin >= 0);
 }
 
-/*
- * calibrate the read valid prediction FIFO.
+/**
+ * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
+ * @rw_group:  Read/Write Group
+ * @phase:     DQ/DQS phase
  *
- *  - read valid prediction will consist of finding a good DQS enable phase,
- * DQS enable delay, DQS input phase, and DQS input delay.
- *  - we also do a per-bit deskew on the DQ lines.
+ * Because initially no communication ca be reliably performed with the memory
+ * device, the sequencer uses a guaranteed write mechanism to write data into
+ * the memory device.
  */
-static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
-                                          uint32_t test_bgn)
+static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
+                                                const u32 phase)
 {
-       uint32_t p, d, rank_bgn, sr;
-       uint32_t dtaps_per_ptap;
-       uint32_t tmp_delay;
-       uint32_t bit_chk;
-       uint32_t grp_calibrated;
-       uint32_t write_group, write_test_bgn;
-       uint32_t failed_substage;
+       int ret;
 
-       debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
+       /* Set a particular DQ/DQS phase. */
+       scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
 
-       /* update info for sims */
-       reg_file_set_stage(CAL_STAGE_VFIFO);
+       debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
+                  __func__, __LINE__, rw_group, phase);
 
-       write_group = read_group;
-       write_test_bgn = test_bgn;
+       /*
+        * Altera EMI_RM 2015.05.04 :: Figure 1-25
+        * Load up the patterns used by read calibration using the
+        * current DQDQS phase.
+        */
+       rw_mgr_mem_calibrate_read_load_patterns(0, 1);
+
+       if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
+               return 0;
+
+       /*
+        * Altera EMI_RM 2015.05.04 :: Figure 1-26
+        * Back-to-Back reads of the patterns used for calibration.
+        */
+       ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
+       if (ret)
+               debug_cond(DLEVEL == 1,
+                          "%s:%d Guaranteed read test failed: g=%u p=%u\n",
+                          __func__, __LINE__, rw_group, phase);
+       return ret;
+}
+
+/**
+ * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
+ * @rw_group:  Read/Write Group
+ * @test_bgn:  Rank at which the test begins
+ *
+ * DQS enable calibration ensures reliable capture of the DQ signal without
+ * glitches on the DQS line.
+ */
+static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
+                                                      const u32 test_bgn)
+{
+       /*
+        * Altera EMI_RM 2015.05.04 :: Figure 1-27
+        * DQS and DQS Eanble Signal Relationships.
+        */
+
+       /* We start at zero, so have one less dq to devide among */
+       const u32 delay_step = IO_IO_IN_DELAY_MAX /
+                              (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
+       int found;
+       u32 i, p, d, r;
+
+       debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
+
+       /* Try different dq_in_delays since the DQ path is shorter than DQS. */
+       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+            r += NUM_RANKS_PER_SHADOW_REG) {
+               for (i = 0, p = test_bgn, d = 0;
+                    i < RW_MGR_MEM_DQ_PER_READ_DQS;
+                    i++, p++, d += delay_step) {
+                       debug_cond(DLEVEL == 1,
+                                  "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
+                                  __func__, __LINE__, rw_group, r, i, p, d);
+
+                       scc_mgr_set_dq_in_delay(p, d);
+                       scc_mgr_load_dq(p);
+               }
 
-       /* USER Determine number of delay taps for each phase tap */
-       dtaps_per_ptap = 0;
-       tmp_delay = 0;
-       while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
-               dtaps_per_ptap++;
-               tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+               writel(0, &sdr_scc_mgr->update);
        }
-       dtaps_per_ptap--;
-       tmp_delay = 0;
 
-       /* update info for sims */
-       reg_file_set_group(read_group);
+       /*
+        * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
+        * dq_in_delay values
+        */
+       found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
+
+       debug_cond(DLEVEL == 1,
+                  "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
+                  __func__, __LINE__, rw_group, found);
+
+       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+            r += NUM_RANKS_PER_SHADOW_REG) {
+               scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
+               writel(0, &sdr_scc_mgr->update);
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       return 0;
+
+}
+
+/**
+ * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
+ * @rw_group:          Read/Write Group
+ * @test_bgn:          Rank at which the test begins
+ * @use_read_test:     Perform a read test
+ * @update_fom:                Update FOM
+ *
+ * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
+ * within a group.
+ */
+static int
+rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
+                                     const int use_read_test,
+                                     const int update_fom)
+
+{
+       int ret, grp_calibrated;
+       u32 rank_bgn, sr;
+
+       /*
+        * Altera EMI_RM 2015.05.04 :: Figure 1-28
+        * Read per-bit deskew can be done on a per shadow register basis.
+        */
+       grp_calibrated = 1;
+       for (rank_bgn = 0, sr = 0;
+            rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+            rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
+               /* Check if this set of ranks should be skipped entirely. */
+               if (param->skip_shadow_regs[sr])
+                       continue;
+
+               ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
+                                                       rw_group, test_bgn,
+                                                       use_read_test,
+                                                       update_fom);
+               if (ret)
+                       continue;
+
+               grp_calibrated = 0;
+       }
+
+       if (!grp_calibrated)
+               return -EIO;
+
+       return 0;
+}
+
+/**
+ * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
+ * @rw_group:          Read/Write Group
+ * @test_bgn:          Rank at which the test begins
+ *
+ * Stage 1: Calibrate the read valid prediction FIFO.
+ *
+ * This function implements UniPHY calibration Stage 1, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ *
+ * - read valid prediction will consist of finding:
+ *   - DQS enable phase and DQS enable delay (DQS Enable Calibration)
+ *   - DQS input phase  and DQS input delay (DQ/DQS Centering)
+ *  - we also do a per-bit deskew on the DQ lines.
+ */
+static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
+{
+       uint32_t p, d;
+       uint32_t dtaps_per_ptap;
+       uint32_t failed_substage;
 
-       grp_calibrated = 0;
+       int ret;
 
+       debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
+
+       /* Update info for sims */
+       reg_file_set_group(rw_group);
+       reg_file_set_stage(CAL_STAGE_VFIFO);
        reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
+
        failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
 
-       for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
+       /* USER Determine number of delay taps for each phase tap. */
+       dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
+                                     IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
+
+       for (d = 0; d <= dtaps_per_ptap; d += 2) {
                /*
                 * In RLDRAMX we may be messing the delay of pins in
-                * the same write group but outside of the current read
-                * the group, but that's ok because we haven't
-                * calibrated output side yet.
+                * the same write rw_group but outside of the current read
+                * the rw_group, but that's ok because we haven't calibrated
+                * output side yet.
                 */
                if (d > 0) {
-                       scc_mgr_apply_group_all_out_delay_add_all_ranks
-                       (write_group, write_test_bgn, d);
+                       scc_mgr_apply_group_all_out_delay_add_all_ranks(
+                                                               rw_group, d);
                }
 
-               for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
-                       p++) {
-                       /* set a particular dqdqs phase */
-                       scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
+               for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
+                       /* 1) Guaranteed Write */
+                       ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
+                       if (ret)
+                               break;
 
-                       debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
-                                  p=%u d=%u\n", __func__, __LINE__,
-                                  read_group, p, d);
+                       /* 2) DQS Enable Calibration */
+                       ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
+                                                                         test_bgn);
+                       if (ret) {
+                               failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
+                               continue;
+                       }
 
+                       /* 3) Centering DQ/DQS */
                        /*
-                        * Load up the patterns used by read calibration
-                        * using current DQDQS phase.
+                        * If doing read after write calibration, do not update
+                        * FOM now. Do it then.
                         */
-                       rw_mgr_mem_calibrate_read_load_patterns(0, 1);
-                       if (!(gbl->phy_debug_mode_flags &
-                               PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
-                               if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
-                                   (read_group, 1, &bit_chk)) {
-                                       debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
-                                                  __func__, __LINE__);
-                                       debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
-                                                  read_group, p, d);
-                                       break;
-                               }
+                       ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
+                                                               test_bgn, 1, 0);
+                       if (ret) {
+                               failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
+                               continue;
                        }
 
-/* case:56390 */
-                       grp_calibrated = 1;
-               if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
-                   (write_group, read_group, test_bgn)) {
-                               /*
-                                * USER Read per-bit deskew can be done on a
-                                * per shadow register basis.
-                                */
-                               for (rank_bgn = 0, sr = 0;
-                                       rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
-                                       rank_bgn += NUM_RANKS_PER_SHADOW_REG,
-                                       ++sr) {
-                                       /*
-                                        * Determine if this set of ranks
-                                        * should be skipped entirely.
-                                        */
-                                       if (!param->skip_shadow_regs[sr]) {
-                                               /*
-                                                * If doing read after write
-                                                * calibration, do not update
-                                                * FOM, now - do it then.
-                                                */
-                                       if (!rw_mgr_mem_calibrate_vfifo_center
-                                               (rank_bgn, write_group,
-                                               read_group, test_bgn, 1, 0)) {
-                                                       grp_calibrated = 0;
-                                                       failed_substage =
-                                               CAL_SUBSTAGE_VFIFO_CENTER;
-                                               }
-                                       }
-                               }
-                       } else {
-                               grp_calibrated = 0;
-                               failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
-                       }
+                       /* All done. */
+                       goto cal_done_ok;
                }
        }
 
-       if (grp_calibrated == 0) {
-               set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
-                                       failed_substage);
-               return 0;
-       }
+       /* Calibration Stage 1 failed. */
+       set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
+       return 0;
 
+       /* Calibration Stage 1 completed OK. */
+cal_done_ok:
        /*
         * Reset the delay chains back to zero if they have moved > 1
         * (check for > 1 because loop will increase d even when pass in
         * first case).
         */
        if (d > 2)
-               scc_mgr_zero_group(write_group, write_test_bgn, 1);
+               scc_mgr_zero_group(rw_group, 1);
 
        return 1;
 }
@@ -3131,21 +3153,24 @@ static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
        return 1;
 }
 
-/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
+/**
+ * mem_precharge_and_activate() - Precharge all banks and activate
+ *
+ * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
+ */
 static void mem_precharge_and_activate(void)
 {
-       uint32_t r;
+       int r;
 
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
-               if (param->skip_ranks[r]) {
-                       /* request to skip the rank */
+               /* Test if the rank should be skipped. */
+               if (param->skip_ranks[r])
                        continue;
-               }
 
-               /* set rank */
+               /* Set rank. */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
-               /* precharge all banks ... */
+               /* Precharge all banks. */
                writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                             RW_MGR_RUN_SINGLE_GROUP_OFFSET);
 
@@ -3157,66 +3182,57 @@ static void mem_precharge_and_activate(void)
                writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
-               /* activate rows */
+               /* Activate rows. */
                writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                                RW_MGR_RUN_SINGLE_GROUP_OFFSET);
        }
 }
 
-/* Configure various memory related parameters. */
-static void mem_config(void)
+/**
+ * mem_init_latency() - Configure memory RLAT and WLAT settings
+ *
+ * Configure memory RLAT and WLAT parameters.
+ */
+static void mem_init_latency(void)
 {
-       uint32_t rlat, wlat;
-       uint32_t rw_wl_nop_cycles;
-       uint32_t max_latency;
-
-       debug("%s:%d\n", __func__, __LINE__);
-       /* read in write and read latency */
-       wlat = readl(&data_mgr->t_wl_add);
-       wlat += readl(&data_mgr->mem_t_add);
-
-       /* WL for hard phy does not include additive latency */
-
        /*
-        * add addtional write latency to offset the address/command extra
-        * clock cycle. We change the AC mux setting causing AC to be delayed
-        * by one mem clock cycle. Only do this for DDR3
+        * For AV/CV, LFIFO is hardened and always runs at full rate
+        * so max latency in AFI clocks, used here, is correspondingly
+        * smaller.
         */
-       wlat = wlat + 1;
-
-       rlat = readl(&data_mgr->t_rl_add);
+       const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
+       u32 rlat, wlat;
 
-       rw_wl_nop_cycles = wlat - 2;
-       gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
+       debug("%s:%d\n", __func__, __LINE__);
 
        /*
-        * For AV/CV, lfifo is hardened and always runs at full rate so
-        * max latency in AFI clocks, used here, is correspondingly smaller.
+        * Read in write latency.
+        * WL for Hard PHY does not include additive latency.
         */
-       max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
-       /* configure for a burst length of 8 */
+       wlat = readl(&data_mgr->t_wl_add);
+       wlat += readl(&data_mgr->mem_t_add);
 
-       /* write latency */
-       /* Adjust Write Latency for Hard PHY */
-       wlat = wlat + 1;
+       gbl->rw_wl_nop_cycles = wlat - 1;
 
-       /* set a pretty high read latency initially */
-       gbl->curr_read_lat = rlat + 16;
+       /* Read in readl latency. */
+       rlat = readl(&data_mgr->t_rl_add);
 
+       /* Set a pretty high read latency initially. */
+       gbl->curr_read_lat = rlat + 16;
        if (gbl->curr_read_lat > max_latency)
                gbl->curr_read_lat = max_latency;
 
        writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
 
-       /* advertise write latency */
-       gbl->curr_write_lat = wlat;
-       writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
-
-       /* initialize bit slips */
-       mem_precharge_and_activate();
+       /* Advertise write latency. */
+       writel(wlat, &phy_mgr_cfg->afi_wlat);
 }
 
-/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
+/**
+ * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
+ *
+ * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
+ */
 static void mem_skip_calibrate(void)
 {
        uint32_t vfifo_offset;
@@ -3225,7 +3241,7 @@ static void mem_skip_calibrate(void)
        debug("%s:%d\n", __func__, __LINE__);
        /* Need to update every shadow register set used by the interface */
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
-               r += NUM_RANKS_PER_SHADOW_REG) {
+            r += NUM_RANKS_PER_SHADOW_REG) {
                /*
                 * Set output phase alignment settings appropriate for
                 * skip calibration.
@@ -3262,8 +3278,8 @@ static void mem_skip_calibrate(void)
                         *
                         *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
                         */
-                       scc_mgr_set_dqdqs_output_phase(i, (1.25 *
-                               IO_DLL_CHAIN_LENGTH - 2));
+                       scc_mgr_set_dqdqs_output_phase(i,
+                                       1.25 * IO_DLL_CHAIN_LENGTH - 2);
                }
                writel(0xff, &sdr_scc_mgr->dqs_ena);
                writel(0xff, &sdr_scc_mgr->dqs_io_ena);
@@ -3289,20 +3305,23 @@ static void mem_skip_calibrate(void)
         * in sequencer.
         */
        vfifo_offset = CALIB_VFIFO_OFFSET;
-       for (j = 0; j < vfifo_offset; j++) {
+       for (j = 0; j < vfifo_offset; j++)
                writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
-       }
        writel(0, &phy_mgr_cmd->fifo_reset);
 
        /*
-        * For ACV with hard lfifo, we get the skip-cal setting from
-        * generation-time constant.
+        * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
+        * setting from generation-time constant.
         */
        gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
        writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
 }
 
-/* Memory calibration entry point */
+/**
+ * mem_calibrate() - Memory calibration entry point.
+ *
+ * Perform memory calibration.
+ */
 static uint32_t mem_calibrate(void)
 {
        uint32_t i;
@@ -3312,18 +3331,24 @@ static uint32_t mem_calibrate(void)
        uint32_t run_groups, current_run;
        uint32_t failing_groups = 0;
        uint32_t group_failed = 0;
-       uint32_t sr_failed = 0;
+
+       const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
+                               RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
 
        debug("%s:%d\n", __func__, __LINE__);
-       /* Initialize the data settings */
 
+       /* Initialize the data settings */
        gbl->error_substage = CAL_SUBSTAGE_NIL;
        gbl->error_stage = CAL_STAGE_NIL;
        gbl->error_group = 0xff;
        gbl->fom_in = 0;
        gbl->fom_out = 0;
 
-       mem_config();
+       /* Initialize WLAT and RLAT. */
+       mem_init_latency();
+
+       /* Initialize bit slips. */
+       mem_precharge_and_activate();
 
        for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
                writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
@@ -3335,155 +3360,149 @@ static uint32_t mem_calibrate(void)
                scc_set_bypass_mode(i);
        }
 
+       /* Calibration is skipped. */
        if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
                /*
                 * Set VFIFO and LFIFO to instant-on settings in skip
                 * calibration mode.
                 */
                mem_skip_calibrate();
-       } else {
-               for (i = 0; i < NUM_CALIB_REPEAT; i++) {
-                       /*
-                        * Zero all delay chain/phase settings for all
-                        * groups and all shadow register sets.
-                        */
-                       scc_mgr_zero_all();
 
-                       run_groups = ~param->skip_groups;
+               /*
+                * Do not remove this line as it makes sure all of our
+                * decisions have been applied.
+                */
+               writel(0, &sdr_scc_mgr->update);
+               return 1;
+       }
+
+       /* Calibration is not skipped. */
+       for (i = 0; i < NUM_CALIB_REPEAT; i++) {
+               /*
+                * Zero all delay chain/phase settings for all
+                * groups and all shadow register sets.
+                */
+               scc_mgr_zero_all();
+
+               run_groups = ~param->skip_groups;
 
-                       for (write_group = 0, write_test_bgn = 0; write_group
-                               < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
-                               write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
-                               /* Initialized the group failure */
-                               group_failed = 0;
+               for (write_group = 0, write_test_bgn = 0; write_group
+                       < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
+                       write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
 
-                               current_run = run_groups & ((1 <<
-                                       RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
-                               run_groups = run_groups >>
-                                       RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+                       /* Initialize the group failure */
+                       group_failed = 0;
 
-                               if (current_run == 0)
-                                       continue;
+                       current_run = run_groups & ((1 <<
+                               RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
+                       run_groups = run_groups >>
+                               RW_MGR_NUM_DQS_PER_WRITE_GROUP;
 
-                               writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
-                                                   SCC_MGR_GROUP_COUNTER_OFFSET);
-                               scc_mgr_zero_group(write_group, write_test_bgn,
-                                                  0);
+                       if (current_run == 0)
+                               continue;
 
-                               for (read_group = write_group *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
-                                       read_test_bgn = 0;
-                                       read_group < (write_group + 1) *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
-                                       group_failed == 0;
-                                       read_group++, read_test_bgn +=
-                                       RW_MGR_MEM_DQ_PER_READ_DQS) {
-                                       /* Calibrate the VFIFO */
-                                       if (!((STATIC_CALIB_STEPS) &
-                                               CALIB_SKIP_VFIFO)) {
-                                               if (!rw_mgr_mem_calibrate_vfifo
-                                                       (read_group,
-                                                       read_test_bgn)) {
-                                                       group_failed = 1;
-
-                                                       if (!(gbl->
-                                                       phy_debug_mode_flags &
-                                               PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                       }
-                                               }
-                                       }
-                               }
+                       writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
+                                           SCC_MGR_GROUP_COUNTER_OFFSET);
+                       scc_mgr_zero_group(write_group, 0);
 
-                               /* Calibrate the output side */
-                               if (group_failed == 0)  {
-                                       for (rank_bgn = 0, sr = 0; rank_bgn
-                                               < RW_MGR_MEM_NUMBER_OF_RANKS;
-                                               rank_bgn +=
-                                               NUM_RANKS_PER_SHADOW_REG,
-                                               ++sr) {
-                                               sr_failed = 0;
-                                               if (!((STATIC_CALIB_STEPS) &
-                                               CALIB_SKIP_WRITES)) {
-                                                       if ((STATIC_CALIB_STEPS)
-                                               & CALIB_SKIP_DELAY_SWEEPS) {
-                                               /* not needed in quick mode! */
-                                                       } else {
-                                               /*
-                                                * Determine if this set of
-                                                * ranks should be skipped
-                                                * entirely.
-                                                */
-                                       if (!param->skip_shadow_regs[sr]) {
-                                               if (!rw_mgr_mem_calibrate_writes
-                                               (rank_bgn, write_group,
-                                               write_test_bgn)) {
-                                                       sr_failed = 1;
-                                                       if (!(gbl->
-                                                       phy_debug_mode_flags &
-                                               PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                                       }
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                               if (sr_failed != 0)
-                                                       group_failed = 1;
-                                       }
-                               }
+                       for (read_group = write_group * rwdqs_ratio,
+                            read_test_bgn = 0;
+                            read_group < (write_group + 1) * rwdqs_ratio;
+                            read_group++,
+                            read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+                               if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
+                                       continue;
 
-                               if (group_failed == 0) {
-                                       for (read_group = write_group *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
-                                       read_test_bgn = 0;
-                                               read_group < (write_group + 1)
-                                               * RW_MGR_MEM_IF_READ_DQS_WIDTH
-                                               / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
-                                               group_failed == 0;
-                                               read_group++, read_test_bgn +=
-                                               RW_MGR_MEM_DQ_PER_READ_DQS) {
-                                               if (!((STATIC_CALIB_STEPS) &
-                                                       CALIB_SKIP_WRITES)) {
-                                       if (!rw_mgr_mem_calibrate_vfifo_end
-                                               (read_group, read_test_bgn)) {
-                                                       group_failed = 1;
-
-                                               if (!(gbl->phy_debug_mode_flags
-                                               & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
+                               /* Calibrate the VFIFO */
+                               if (rw_mgr_mem_calibrate_vfifo(read_group,
+                                                              read_test_bgn))
+                                       continue;
+
+                               if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+                                       return 0;
 
-                               if (group_failed != 0)
-                                       failing_groups++;
+                               /* The group failed, we're done. */
+                               goto grp_failed;
                        }
 
-                       /*
-                        * USER If there are any failing groups then report
-                        * the failure.
-                        */
-                       if (failing_groups != 0)
-                               return 0;
+                       /* Calibrate the output side */
+                       for (rank_bgn = 0, sr = 0;
+                            rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+                            rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
+                               if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
+                                       continue;
+
+                               /* Not needed in quick mode! */
+                               if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
+                                       continue;
 
-                       /* Calibrate the LFIFO */
-                       if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
                                /*
-                                * If we're skipping groups as part of debug,
-                                * don't calibrate LFIFO.
+                                * Determine if this set of ranks
+                                * should be skipped entirely.
                                 */
-                               if (param->skip_groups == 0) {
-                                       if (!rw_mgr_mem_calibrate_lfifo())
-                                               return 0;
-                               }
+                               if (param->skip_shadow_regs[sr])
+                                       continue;
+
+                               /* Calibrate WRITEs */
+                               if (rw_mgr_mem_calibrate_writes(rank_bgn,
+                                               write_group, write_test_bgn))
+                                       continue;
+
+                               group_failed = 1;
+                               if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+                                       return 0;
+                       }
+
+                       /* Some group failed, we're done. */
+                       if (group_failed)
+                               goto grp_failed;
+
+                       for (read_group = write_group * rwdqs_ratio,
+                            read_test_bgn = 0;
+                            read_group < (write_group + 1) * rwdqs_ratio;
+                            read_group++,
+                            read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+                               if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
+                                       continue;
+
+                               if (rw_mgr_mem_calibrate_vfifo_end(read_group,
+                                                               read_test_bgn))
+                                       continue;
+
+                               if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+                                       return 0;
+
+                               /* The group failed, we're done. */
+                               goto grp_failed;
                        }
+
+                       /* No group failed, continue as usual. */
+                       continue;
+
+grp_failed:            /* A group failed, increment the counter. */
+                       failing_groups++;
                }
+
+               /*
+                * USER If there are any failing groups then report
+                * the failure.
+                */
+               if (failing_groups != 0)
+                       return 0;
+
+               if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
+                       continue;
+
+               /*
+                * If we're skipping groups as part of debug,
+                * don't calibrate LFIFO.
+                */
+               if (param->skip_groups != 0)
+                       continue;
+
+               /* Calibrate the LFIFO */
+               if (!rw_mgr_mem_calibrate_lfifo())
+                       return 0;
        }
 
        /*
@@ -3494,44 +3513,57 @@ static uint32_t mem_calibrate(void)
        return 1;
 }
 
-static uint32_t run_mem_calibrate(void)
+/**
+ * run_mem_calibrate() - Perform memory calibration
+ *
+ * This function triggers the entire memory calibration procedure.
+ */
+static int run_mem_calibrate(void)
 {
-       uint32_t pass;
-       uint32_t debug_info;
+       int pass;
 
        debug("%s:%d\n", __func__, __LINE__);
 
        /* Reset pass/fail status shown on afi_cal_success/fail */
        writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
 
-       /* stop tracking manger */
-       uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
-
-       writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
+       /* Stop tracking manager. */
+       clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
 
-       initialize();
+       phy_mgr_initialize();
        rw_mgr_mem_initialize();
 
+       /* Perform the actual memory calibration. */
        pass = mem_calibrate();
 
        mem_precharge_and_activate();
        writel(0, &phy_mgr_cmd->fifo_reset);
 
+       /* Handoff. */
+       rw_mgr_mem_handoff();
        /*
-        * Handoff:
-        * Don't return control of the PHY back to AFI when in debug mode.
+        * In Hard PHY this is a 2-bit control:
+        * 0: AFI Mux Select
+        * 1: DDIO Mux Select
         */
-       if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
-               rw_mgr_mem_handoff();
-               /*
-                * In Hard PHY this is a 2-bit control:
-                * 0: AFI Mux Select
-                * 1: DDIO Mux Select
-                */
-               writel(0x2, &phy_mgr_cfg->mux_sel);
-       }
+       writel(0x2, &phy_mgr_cfg->mux_sel);
+
+       /* Start tracking manager. */
+       setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
+
+       return pass;
+}
 
-       writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
+/**
+ * debug_mem_calibrate() - Report result of memory calibration
+ * @pass:      Value indicating whether calibration passed or failed
+ *
+ * This function reports the results of the memory calibration
+ * and writes debug information into the register file.
+ */
+static void debug_mem_calibrate(int pass)
+{
+       uint32_t debug_info;
 
        if (pass) {
                printf("%s: CALIBRATION PASSED\n", __FILE__);
@@ -3570,7 +3602,7 @@ static uint32_t run_mem_calibrate(void)
                writel(debug_info, &sdr_reg_file->failing_stage);
        }
 
-       return pass;
+       printf("%s: Calibration complete\n", __FILE__);
 }
 
 /**
@@ -3658,65 +3690,47 @@ static void initialize_hps_phy(void)
        writel(reg, &sdr_ctrl->phy_ctrl2);
 }
 
+/**
+ * initialize_tracking() - Initialize tracking
+ *
+ * Initialize the register file with usable initial data.
+ */
 static void initialize_tracking(void)
 {
-       uint32_t concatenated_longidle = 0x0;
-       uint32_t concatenated_delays = 0x0;
-       uint32_t concatenated_rw_addr = 0x0;
-       uint32_t concatenated_refresh = 0x0;
-       uint32_t trk_sample_count = 7500;
-       uint32_t dtaps_per_ptap;
-       uint32_t tmp_delay;
+       /*
+        * Initialize the register file with the correct data.
+        * Compute usable version of value in case we skip full
+        * computation later.
+        */
+       writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
+              &sdr_reg_file->dtaps_per_ptap);
+
+       /* trk_sample_count */
+       writel(7500, &sdr_reg_file->trk_sample_count);
+
+       /* longidle outer loop [15:0] */
+       writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
 
        /*
-        * compute usable version of value in case we skip full
-        * computation later
+        * longidle sample count [31:24]
+        * trfc, worst case of 933Mhz 4Gb [23:16]
+        * trcd, worst case [15:8]
+        * vfifo wait [7:0]
         */
-       dtaps_per_ptap = 0;
-       tmp_delay = 0;
-       while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
-               dtaps_per_ptap++;
-               tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
-       }
-       dtaps_per_ptap--;
-
-       concatenated_longidle = concatenated_longidle ^ 10;
-               /*longidle outer loop */
-       concatenated_longidle = concatenated_longidle << 16;
-       concatenated_longidle = concatenated_longidle ^ 100;
-               /*longidle sample count */
-       concatenated_delays = concatenated_delays ^ 243;
-               /* trfc, worst case of 933Mhz 4Gb */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 14;
-               /* trcd, worst case */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 10;
-               /* vfifo wait */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 4;
-               /* mux delay */
-
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
-
-       concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
-       concatenated_refresh = concatenated_refresh << 24;
-       concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
+       writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
+              &sdr_reg_file->delays);
 
-       /* Initialize the register file with the correct data */
-       writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
-       writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
-       writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
-       writel(concatenated_delays, &sdr_reg_file->delays);
-       writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
-       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
-       writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
+       /* mux delay */
+       writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
+              (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
+              &sdr_reg_file->trk_rw_mgr_addr);
+
+       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
+              &sdr_reg_file->trk_read_dqs_width);
+
+       /* trefi [7:0] */
+       writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
+              &sdr_reg_file->trk_rfsh);
 }
 
 int sdram_calibration_full(void)
@@ -3724,13 +3738,13 @@ int sdram_calibration_full(void)
        struct param_type my_param;
        struct gbl_type my_gbl;
        uint32_t pass;
-       uint32_t i;
+
+       memset(&my_param, 0, sizeof(my_param));
+       memset(&my_gbl, 0, sizeof(my_gbl));
 
        param = &my_param;
        gbl = &my_gbl;
 
-       /* Initialize the debug mode flags */
-       gbl->phy_debug_mode_flags = 0;
        /* Set the calibration enabled by default */
        gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
        /*
@@ -3750,13 +3764,6 @@ int sdram_calibration_full(void)
 
        initialize_tracking();
 
-       /* USER Enable all ranks, groups */
-       for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
-               param->skip_ranks[i] = 0;
-       for (i = 0; i < NUM_SHADOW_REGS; ++i)
-               param->skip_shadow_regs[i] = 0;
-       param->skip_groups = 0;
-
        printf("%s: Preparing to start memory calibration\n", __FILE__);
 
        debug("%s:%d\n", __func__, __LINE__);
@@ -3803,7 +3810,6 @@ int sdram_calibration_full(void)
                skip_delay_mask = 0x0;
 
        pass = run_mem_calibrate();
-
-       printf("%s: Calibration complete\n", __FILE__);
+       debug_mem_calibrate(pass);
        return pass;
 }