]> git.kernelconcepts.de Git - karo-tx-uboot.git/blobdiff - drivers/ddr/altera/sequencer.c
ddr: altera: sequencer: Clean data types
[karo-tx-uboot.git] / drivers / ddr / altera / sequencer.c
index dd1ce8535f2a194bd0fc2c33379a4cacb8063212..f2d164a0358207bf1c948b86a27150a19e40a351 100644 (file)
@@ -9,10 +9,6 @@
 #include <asm/arch/sdram.h>
 #include <errno.h>
 #include "sequencer.h"
-#include "sequencer_auto.h"
-#include "sequencer_auto_ac_init.h"
-#include "sequencer_auto_inst_init.h"
-#include "sequencer_defines.h"
 
 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
        (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
@@ -38,6 +34,10 @@ static struct socfpga_data_mgr *data_mgr =
 static struct socfpga_sdr_ctrl *sdr_ctrl =
        (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
 
+const struct socfpga_sdram_rw_mgr_config *rwcfg;
+const struct socfpga_sdram_io_config *iocfg;
+const struct socfpga_sdram_misc_config *misccfg;
+
 #define DELTA_D                1
 
 /*
@@ -60,7 +60,7 @@ static struct socfpga_sdr_ctrl *sdr_ctrl =
        STATIC_SKIP_DELAY_LOOPS)
 
 /* calibration steps requested by the rtl */
-uint16_t dyn_calib_steps;
+u16 dyn_calib_steps;
 
 /*
  * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
@@ -71,17 +71,16 @@ uint16_t dyn_calib_steps;
  * zero when skipping
  */
 
-uint16_t skip_delay_mask;      /* mask off bits when skipping/not-skipping */
+u16 skip_delay_mask;   /* mask off bits when skipping/not-skipping */
 
 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
        ((non_skip_value) & skip_delay_mask)
 
 struct gbl_type *gbl;
 struct param_type *param;
-uint32_t curr_shadow_reg;
 
-static void set_failing_group_stage(uint32_t group, uint32_t stage,
-       uint32_t substage)
+static void set_failing_group_stage(u32 group, u32 stage,
+       u32 substage)
 {
        /*
         * Only set the global stage if there was not been any other
@@ -140,15 +139,12 @@ static void phy_mgr_initialize(void)
        if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
                return;
 
-       ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
-               RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+       ratio = rwcfg->mem_dq_per_read_dqs /
+               rwcfg->mem_virtual_groups_per_read_dqs;
        param->read_correct_mask_vg = (1 << ratio) - 1;
        param->write_correct_mask_vg = (1 << ratio) - 1;
-       param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
-       param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
-       ratio = RW_MGR_MEM_DATA_WIDTH /
-               RW_MGR_MEM_DATA_MASK_WIDTH;
-       param->dm_correct_mask = (1 << ratio) - 1;
+       param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1;
+       param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1;
 }
 
 /**
@@ -168,14 +164,14 @@ static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
                odt_mask_0 = 0x0;
                odt_mask_1 = 0x0;
        } else {        /* RW_MGR_ODT_MODE_READ_WRITE */
-               switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
+               switch (rwcfg->mem_number_of_ranks) {
                case 1: /* 1 Rank */
                        /* Read: ODT = 0 ; Write: ODT = 1 */
                        odt_mask_0 = 0x0;
                        odt_mask_1 = 0x1;
                        break;
                case 2: /* 2 Ranks */
-                       if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
+                       if (rwcfg->mem_number_of_cs_per_dimm == 1) {
                                /*
                                 * - Dual-Slot , Single-Rank (1 CS per DIMM)
                                 *   OR
@@ -290,57 +286,57 @@ static void scc_mgr_initialize(void)
        }
 }
 
-static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
+static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
 {
        scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
 }
 
-static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
+static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
 {
        scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
 }
 
-static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
+static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
 {
        scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
 }
 
-static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
+static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
 {
        scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
 }
 
-static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
+static void scc_mgr_set_dqs_io_in_delay(u32 delay)
 {
-       scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
+       scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
                    delay);
 }
 
-static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
+static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
 {
        scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
 }
 
-static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
+static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
 {
        scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
 }
 
-static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
+static void scc_mgr_set_dqs_out1_delay(u32 delay)
 {
-       scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
+       scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
                    delay);
 }
 
-static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
+static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay)
 {
        scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
-                   RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
+                   rwcfg->mem_dq_per_write_dqs + 1 + dm,
                    delay);
 }
 
 /* load up dqs config settings */
-static void scc_mgr_load_dqs(uint32_t dqs)
+static void scc_mgr_load_dqs(u32 dqs)
 {
        writel(dqs, &sdr_scc_mgr->dqs_ena);
 }
@@ -352,13 +348,13 @@ static void scc_mgr_load_dqs_io(void)
 }
 
 /* load up dq config settings */
-static void scc_mgr_load_dq(uint32_t dq_in_group)
+static void scc_mgr_load_dq(u32 dq_in_group)
 {
        writel(dq_in_group, &sdr_scc_mgr->dq_ena);
 }
 
 /* load up dm config settings */
-static void scc_mgr_load_dm(uint32_t dm)
+static void scc_mgr_load_dm(u32 dm)
 {
        writel(dm, &sdr_scc_mgr->dm_ena);
 }
@@ -378,7 +374,7 @@ static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
 {
        u32 r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                scc_mgr_set(off, grp, val);
 
@@ -403,8 +399,8 @@ static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
                              read_group, phase, 0);
 }
 
-static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
-                                                    uint32_t phase)
+static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group,
+                                                    u32 phase)
 {
        /*
         * USER although the h/w doesn't support different phases per
@@ -418,8 +414,8 @@ static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
                              write_group, phase, 0);
 }
 
-static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
-                                              uint32_t delay)
+static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group,
+                                              u32 delay)
 {
        /*
         * In shadow register mode, the T11 settings are stored in
@@ -443,8 +439,8 @@ static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
  */
 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
 {
-       const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                         RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+       const int ratio = rwcfg->mem_if_read_dqs_width /
+                         rwcfg->mem_if_write_dqs_width;
        const int base = write_group * ratio;
        int i;
        /*
@@ -500,23 +496,23 @@ static void scc_mgr_zero_all(void)
         * USER Zero all DQS config settings, across all groups and all
         * shadow registers
         */
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
-               for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+               for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
                        /*
                         * The phases actually don't exist on a per-rank basis,
                         * but there's no harm updating them several times, so
                         * let's keep the code simple.
                         */
-                       scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
+                       scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve);
                        scc_mgr_set_dqs_en_phase(i, 0);
                        scc_mgr_set_dqs_en_delay(i, 0);
                }
 
-               for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+               for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
                        scc_mgr_set_dqdqs_output_phase(i, 0);
                        /* Arria V/Cyclone V don't have out2. */
-                       scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
+                       scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve);
                }
        }
 
@@ -555,8 +551,8 @@ static void scc_set_bypass_mode(const u32 write_group)
  */
 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
 {
-       const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                         RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+       const int ratio = rwcfg->mem_if_read_dqs_width /
+                         rwcfg->mem_if_write_dqs_width;
        const int base = write_group * ratio;
        int i;
        /*
@@ -579,10 +575,10 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only)
 {
        int i, r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                /* Zero all DQ config settings. */
-               for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+               for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
                        scc_mgr_set_dq_out1_delay(i, 0);
                        if (!out_only)
                                scc_mgr_set_dq_in_delay(i, 0);
@@ -603,8 +599,8 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only)
                        scc_mgr_set_dqs_io_in_delay(0);
 
                /* Arria V/Cyclone V don't have out2. */
-               scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
-               scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
+               scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve);
+               scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve);
                scc_mgr_load_dqs_for_write_group(write_group);
 
                /* Multicast to all DQS IO enables (only 1 in total). */
@@ -619,11 +615,11 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only)
  * apply and load a particular input delay for the DQ pins in a group
  * group_bgn is the index of the first dq pin (in the write group)
  */
-static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
+static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay)
 {
-       uint32_t i, p;
+       u32 i, p;
 
-       for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
+       for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) {
                scc_mgr_set_dq_in_delay(p, delay);
                scc_mgr_load_dq(p);
        }
@@ -639,16 +635,16 @@ static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
 {
        int i;
 
-       for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+       for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
                scc_mgr_set_dq_out1_delay(i, delay);
                scc_mgr_load_dq(i);
        }
 }
 
 /* apply and load a particular output delay for the DM pins in a group */
-static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
+static void scc_mgr_apply_group_dm_out1_delay(u32 delay1)
 {
-       uint32_t i;
+       u32 i;
 
        for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
                scc_mgr_set_dm_out1_delay(i, delay1);
@@ -658,8 +654,8 @@ static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
 
 
 /* apply and load delay on both DQS and OCT out1 */
-static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
-                                                   uint32_t delay)
+static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group,
+                                                   u32 delay)
 {
        scc_mgr_set_dqs_out1_delay(delay);
        scc_mgr_load_dqs_io();
@@ -681,7 +677,7 @@ static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
        u32 i, new_delay;
 
        /* DQ shift */
-       for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
+       for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++)
                scc_mgr_load_dq(i);
 
        /* DM shift */
@@ -690,13 +686,13 @@ static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
 
        /* DQS shift */
        new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
-       if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+       if (new_delay > iocfg->io_out2_delay_max) {
                debug_cond(DLEVEL == 1,
                           "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
                           __func__, __LINE__, write_group, delay, new_delay,
-                          IO_IO_OUT2_DELAY_MAX,
-                          new_delay - IO_IO_OUT2_DELAY_MAX);
-               new_delay -= IO_IO_OUT2_DELAY_MAX;
+                          iocfg->io_out2_delay_max,
+                          new_delay - iocfg->io_out2_delay_max);
+               new_delay -= iocfg->io_out2_delay_max;
                scc_mgr_set_dqs_out1_delay(new_delay);
        }
 
@@ -704,13 +700,13 @@ static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
 
        /* OCT shift */
        new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
-       if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+       if (new_delay > iocfg->io_out2_delay_max) {
                debug_cond(DLEVEL == 1,
                           "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
                           __func__, __LINE__, write_group, delay,
-                          new_delay, IO_IO_OUT2_DELAY_MAX,
-                          new_delay - IO_IO_OUT2_DELAY_MAX);
-               new_delay -= IO_IO_OUT2_DELAY_MAX;
+                          new_delay, iocfg->io_out2_delay_max,
+                          new_delay - iocfg->io_out2_delay_max);
+               new_delay -= iocfg->io_out2_delay_max;
                scc_mgr_set_oct_out1_delay(write_group, new_delay);
        }
 
@@ -730,7 +726,7 @@ scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
 {
        int r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                scc_mgr_apply_group_all_out_delay_add(write_group, delay);
                writel(0, &sdr_scc_mgr->update);
@@ -751,48 +747,39 @@ static void set_jump_as_return(void)
         * we always jump.
         */
        writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
-       writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+       writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
 }
 
-/*
- * should always use constants as argument to ensure all computations are
- * performed at compile time
+/**
+ * delay_for_n_mem_clocks() - Delay for N memory clocks
+ * @clocks:    Length of the delay
+ *
+ * Delay for N memory clocks.
  */
-static void delay_for_n_mem_clocks(const uint32_t clocks)
+static void delay_for_n_mem_clocks(const u32 clocks)
 {
-       uint32_t afi_clocks;
-       uint8_t inner = 0;
-       uint8_t outer = 0;
-       uint16_t c_loop = 0;
+       u32 afi_clocks;
+       u16 c_loop;
+       u8 inner;
+       u8 outer;
 
        debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
 
-
-       afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
-       /* scale (rounding up) to get afi clocks */
+       /* Scale (rounding up) to get afi clocks. */
+       afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio);
+       if (afi_clocks) /* Temporary underflow protection */
+               afi_clocks--;
 
        /*
-        * Note, we don't bother accounting for being off a little bit
-        * because of a few extra instructions in outer loops
-        * Note, the loops have a test at the end, and do the test before
-        * the decrement, and so always perform the loop
+        * Note, we don't bother accounting for being off a little
+        * bit because of a few extra instructions in outer loops.
+        * Note, the loops have a test at the end, and do the test
+        * before the decrement, and so always perform the loop
         * 1 time more than the counter value
         */
-       if (afi_clocks == 0) {
-               ;
-       } else if (afi_clocks <= 0x100) {
-               inner = afi_clocks-1;
-               outer = 0;
-               c_loop = 0;
-       } else if (afi_clocks <= 0x10000) {
-               inner = 0xff;
-               outer = (afi_clocks-1) >> 8;
-               c_loop = 0;
-       } else {
-               inner = 0xff;
-               outer = 0xff;
-               c_loop = (afi_clocks-1) >> 16;
-       }
+       c_loop = afi_clocks >> 16;
+       outer = c_loop ? 0xff : (afi_clocks >> 8);
+       inner = outer ? 0xff : afi_clocks;
 
        /*
         * rom instructions are structured as follows:
@@ -811,14 +798,14 @@ static void delay_for_n_mem_clocks(const uint32_t clocks)
         * and sequencer rom and keeps the delays more accurate and reduces
         * overhead
         */
-       if (afi_clocks <= 0x100) {
+       if (afi_clocks < 0x100) {
                writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
                        &sdr_rw_load_mgr_regs->load_cntr1);
 
-               writel(RW_MGR_IDLE_LOOP1,
+               writel(rwcfg->idle_loop1,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
-               writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+               writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                          RW_MGR_RUN_SINGLE_GROUP_OFFSET);
        } else {
                writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
@@ -827,24 +814,17 @@ static void delay_for_n_mem_clocks(const uint32_t clocks)
                writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
                        &sdr_rw_load_mgr_regs->load_cntr1);
 
-               writel(RW_MGR_IDLE_LOOP2,
+               writel(rwcfg->idle_loop2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add0);
 
-               writel(RW_MGR_IDLE_LOOP2,
+               writel(rwcfg->idle_loop2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
-               /* hack to get around compiler not being smart enough */
-               if (afi_clocks <= 0x10000) {
-                       /* only need to run once */
-                       writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
-                                                 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
-               } else {
-                       do {
-                               writel(RW_MGR_IDLE_LOOP2,
-                                       SDR_PHYGRP_RWMGRGRP_ADDRESS |
-                                       RW_MGR_RUN_SINGLE_GROUP_OFFSET);
-                       } while (c_loop-- != 0);
-               }
+               do {
+                       writel(rwcfg->idle_loop2,
+                               SDR_PHYGRP_RWMGRGRP_ADDRESS |
+                               RW_MGR_RUN_SINGLE_GROUP_OFFSET);
+               } while (c_loop-- != 0);
        }
        debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
 }
@@ -860,7 +840,7 @@ static void delay_for_n_mem_clocks(const uint32_t clocks)
  */
 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
 {
-       uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+       u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
                           RW_MGR_RUN_SINGLE_GROUP_OFFSET;
 
        /* Load counters */
@@ -895,44 +875,39 @@ static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
                      RW_MGR_RUN_SINGLE_GROUP_OFFSET;
        u32 r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
-               if (param->skip_ranks[r]) {
-                       /* request to skip the rank */
-                       continue;
-               }
-
+       for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
                /* set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
                /* precharge all banks ... */
                if (precharge)
-                       writel(RW_MGR_PRECHARGE_ALL, grpaddr);
+                       writel(rwcfg->precharge_all, grpaddr);
 
                /*
                 * USER Use Mirror-ed commands for odd ranks if address
                 * mirrorring is on
                 */
-               if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
+               if ((rwcfg->mem_address_mirroring >> r) & 0x1) {
                        set_jump_as_return();
-                       writel(RW_MGR_MRS2_MIRR, grpaddr);
+                       writel(rwcfg->mrs2_mirr, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS3_MIRR, grpaddr);
+                       writel(rwcfg->mrs3_mirr, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS1_MIRR, grpaddr);
+                       writel(rwcfg->mrs1_mirr, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
                        writel(fin1, grpaddr);
                } else {
                        set_jump_as_return();
-                       writel(RW_MGR_MRS2, grpaddr);
+                       writel(rwcfg->mrs2, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS3, grpaddr);
+                       writel(rwcfg->mrs3, grpaddr);
                        delay_for_n_mem_clocks(4);
                        set_jump_as_return();
-                       writel(RW_MGR_MRS1, grpaddr);
+                       writel(rwcfg->mrs1, grpaddr);
                        set_jump_as_return();
                        writel(fin2, grpaddr);
                }
@@ -941,7 +916,7 @@ static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
                        continue;
 
                set_jump_as_return();
-               writel(RW_MGR_ZQCL, grpaddr);
+               writel(rwcfg->zqcl, grpaddr);
 
                /* tZQinit = tDLLK = 512 ck cycles */
                delay_for_n_mem_clocks(512);
@@ -984,9 +959,9 @@ static void rw_mgr_mem_initialize(void)
         * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
         * b = 6A
         */
-       rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
-                                 SEQ_TINIT_CNTR2_VAL,
-                                 RW_MGR_INIT_RESET_0_CKE_0);
+       rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val, misccfg->tinit_cntr1_val,
+                                 misccfg->tinit_cntr2_val,
+                                 rwcfg->init_reset_0_cke_0);
 
        /* Indicate that memory is stable. */
        writel(1, &phy_mgr_cfg->reset_mem_stbl);
@@ -1005,46 +980,51 @@ static void rw_mgr_mem_initialize(void)
         * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
         * b = FF
         */
-       rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
-                                 SEQ_TRESET_CNTR2_VAL,
-                                 RW_MGR_INIT_RESET_1_CKE_0);
+       rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val, misccfg->treset_cntr1_val,
+                                 misccfg->treset_cntr2_val,
+                                 rwcfg->init_reset_1_cke_0);
 
        /* Bring up clock enable. */
 
        /* tXRP < 250 ck cycles */
        delay_for_n_mem_clocks(250);
 
-       rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
+       rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset,
                             0);
 }
 
-/*
- * At the end of calibration we have to program the user settings in, and
- * USER  hand off the memory to the user.
+/**
+ * rw_mgr_mem_handoff() - Hand off the memory to user
+ *
+ * At the end of calibration we have to program the user settings in
+ * and hand off the memory to the user.
  */
 static void rw_mgr_mem_handoff(void)
 {
-       rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
+       rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1);
        /*
-        * USER  need to wait tMOD (12CK or 15ns) time before issuing
-        * other commands, but we will have plenty of NIOS cycles before
-        * actual handoff so its okay.
+        * Need to wait tMOD (12CK or 15ns) time before issuing other
+        * commands, but we will have plenty of NIOS cycles before actual
+        * handoff so its okay.
         */
 }
 
-/*
- * issue write test command.
- * two variants are provided. one that just tests a write pattern and
- * another that tests datamask functionality.
+/**
+ * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
+ * @group:     Write Group
+ * @use_dm:    Use DM
+ *
+ * Issue write test command. Two variants are provided, one that just tests
+ * a write pattern and another that tests datamask functionality.
  */
-static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
-                                                 uint32_t test_dm)
+static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
+                                                 u32 test_dm)
 {
-       uint32_t mcc_instruction;
-       uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
-               ENABLE_SUPER_QUICK_CALIBRATION);
-       uint32_t rw_wl_nop_cycles;
-       uint32_t addr;
+       const u32 quick_write_mode =
+               (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
+               misccfg->enable_super_quick_calibration;
+       u32 mcc_instruction;
+       u32 rw_wl_nop_cycles;
 
        /*
         * Set counter and jump addresses for the right
@@ -1085,16 +1065,16 @@ static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
 
                /* CNTR 3 - Not used */
                if (test_dm) {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
-                       writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
+                       writel(rwcfg->lfsr_wr_rd_dm_bank_0_data,
                               &sdr_rw_load_jump_mgr_regs->load_jump_add2);
-                       writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
+                       writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
                               &sdr_rw_load_jump_mgr_regs->load_jump_add3);
                } else {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
-                       writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1;
+                       writel(rwcfg->lfsr_wr_rd_bank_0_data,
                                &sdr_rw_load_jump_mgr_regs->load_jump_add2);
-                       writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
+                       writel(rwcfg->lfsr_wr_rd_bank_0_nop,
                                &sdr_rw_load_jump_mgr_regs->load_jump_add3);
                }
        } else if (rw_wl_nop_cycles == 0) {
@@ -1107,12 +1087,12 @@ static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
 
                /* CNTR 3 - Not used */
                if (test_dm) {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
-                       writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
+                       writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
                               &sdr_rw_load_jump_mgr_regs->load_jump_add2);
                } else {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
-                       writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
+                       writel(rwcfg->lfsr_wr_rd_bank_0_dqs,
                                &sdr_rw_load_jump_mgr_regs->load_jump_add2);
                }
        } else {
@@ -1130,12 +1110,12 @@ static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
                 */
                writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
                if (test_dm) {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
-                       writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
+                       writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
                                &sdr_rw_load_jump_mgr_regs->load_jump_add3);
                } else {
-                       mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
-                       writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
+                       mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
+                       writel(rwcfg->lfsr_wr_rd_bank_0_nop,
                                &sdr_rw_load_jump_mgr_regs->load_jump_add3);
                }
        }
@@ -1157,28 +1137,39 @@ static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
        writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
 
        if (test_dm) {
-               writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
+               writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
        } else {
-               writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
+               writel(rwcfg->lfsr_wr_rd_bank_0_wait,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
        }
 
-       addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
-       writel(mcc_instruction, addr + (group << 2));
+       writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
+                               RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
+                               (group << 2));
 }
 
-/* Test writes, can check for a single bit pass or multiple bit pass */
+/**
+ * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
+ * @rank_bgn:          Rank number
+ * @write_group:       Write Group
+ * @use_dm:            Use DM
+ * @all_correct:       All bits must be correct in the mask
+ * @bit_chk:           Resulting bit mask after the test
+ * @all_ranks:         Test all ranks
+ *
+ * Test writes, can check for a single bit pass or multiple bit pass.
+ */
 static int
 rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
                                const u32 use_dm, const u32 all_correct,
                                u32 *bit_chk, const u32 all_ranks)
 {
        const u32 rank_end = all_ranks ?
-                               RW_MGR_MEM_NUMBER_OF_RANKS :
+                               rwcfg->mem_number_of_ranks :
                                (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
-       const u32 shift_ratio = RW_MGR_MEM_DQ_PER_WRITE_DQS /
-                               RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS;
+       const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs /
+                               rwcfg->mem_virtual_groups_per_write_dqs;
        const u32 correct_mask_vg = param->write_correct_mask_vg;
 
        u32 tmp_bit_chk, base_rw_mgr;
@@ -1187,22 +1178,18 @@ rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
        *bit_chk = param->write_correct_mask;
 
        for (r = rank_bgn; r < rank_end; r++) {
-               /* Request to skip the rank */
-               if (param->skip_ranks[r])
-                       continue;
-
                /* Set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
 
                tmp_bit_chk = 0;
-               for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;
+               for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1;
                     vg >= 0; vg--) {
                        /* Reset the FIFOs to get pointers to known state. */
                        writel(0, &phy_mgr_cmd->fifo_reset);
 
                        rw_mgr_mem_calibrate_write_test_issue(
                                write_group *
-                               RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + vg,
+                               rwcfg->mem_virtual_groups_per_write_dqs + vg,
                                use_dm);
 
                        base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
@@ -1246,12 +1233,12 @@ rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
        const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
                         RW_MGR_RUN_SINGLE_GROUP_OFFSET;
        const u32 addr_offset =
-                        (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
+                        (group * rwcfg->mem_virtual_groups_per_read_dqs) << 2;
        const u32 rank_end = all_ranks ?
-                               RW_MGR_MEM_NUMBER_OF_RANKS :
+                               rwcfg->mem_number_of_ranks :
                                (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
-       const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
-                               RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+       const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs /
+                               rwcfg->mem_virtual_groups_per_read_dqs;
        const u32 correct_mask_vg = param->read_correct_mask_vg;
 
        u32 tmp_bit_chk, base_rw_mgr, bit_chk;
@@ -1261,30 +1248,26 @@ rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
        bit_chk = param->read_correct_mask;
 
        for (r = rank_bgn; r < rank_end; r++) {
-               /* Request to skip the rank */
-               if (param->skip_ranks[r])
-                       continue;
-
                /* Set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
 
                /* Load up a constant bursts of read commands */
                writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
-               writel(RW_MGR_GUARANTEED_READ,
+               writel(rwcfg->guaranteed_read,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add0);
 
                writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
-               writel(RW_MGR_GUARANTEED_READ_CONT,
+               writel(rwcfg->guaranteed_read_cont,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
                tmp_bit_chk = 0;
-               for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
+               for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1;
                     vg >= 0; vg--) {
                        /* Reset the FIFOs to get pointers to known state. */
                        writel(0, &phy_mgr_cmd->fifo_reset);
                        writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                  RW_MGR_RESET_READ_DATAPATH_OFFSET);
-                       writel(RW_MGR_GUARANTEED_READ,
+                       writel(rwcfg->guaranteed_read,
                               addr + addr_offset + (vg << 2));
 
                        base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
@@ -1295,7 +1278,7 @@ rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
                bit_chk &= tmp_bit_chk;
        }
 
-       writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
+       writel(rwcfg->clear_dqs_enable, addr + (group << 2));
 
        set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
 
@@ -1321,42 +1304,38 @@ static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
                                                    const int all_ranks)
 {
        const u32 rank_end = all_ranks ?
-                       RW_MGR_MEM_NUMBER_OF_RANKS :
+                       rwcfg->mem_number_of_ranks :
                        (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
        u32 r;
 
        debug("%s:%d\n", __func__, __LINE__);
 
        for (r = rank_bgn; r < rank_end; r++) {
-               if (param->skip_ranks[r])
-                       /* request to skip the rank */
-                       continue;
-
                /* set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
 
                /* Load up a constant bursts */
                writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
 
-               writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
+               writel(rwcfg->guaranteed_write_wait0,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add0);
 
                writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
 
-               writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
+               writel(rwcfg->guaranteed_write_wait1,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
                writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
 
-               writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
+               writel(rwcfg->guaranteed_write_wait2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add2);
 
                writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
 
-               writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
+               writel(rwcfg->guaranteed_write_wait3,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add3);
 
-               writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+               writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                                RW_MGR_RUN_SINGLE_GROUP_OFFSET);
        }
 
@@ -1383,11 +1362,11 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
                               u32 *bit_chk,
                               const u32 all_groups, const u32 all_ranks)
 {
-       const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+       const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks :
                (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
        const u32 quick_read_mode =
                ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
-                ENABLE_SUPER_QUICK_CALIBRATION);
+                misccfg->enable_super_quick_calibration);
        u32 correct_mask_vg = param->read_correct_mask_vg;
        u32 tmp_bit_chk;
        u32 base_rw_mgr;
@@ -1398,20 +1377,16 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
        *bit_chk = param->read_correct_mask;
 
        for (r = rank_bgn; r < rank_end; r++) {
-               if (param->skip_ranks[r])
-                       /* request to skip the rank */
-                       continue;
-
                /* set rank */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
 
                writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
 
-               writel(RW_MGR_READ_B2B_WAIT1,
+               writel(rwcfg->read_b2b_wait1,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
                writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
-               writel(RW_MGR_READ_B2B_WAIT2,
+               writel(rwcfg->read_b2b_wait2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add2);
 
                if (quick_read_mode)
@@ -1422,20 +1397,20 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
                else
                        writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
 
-               writel(RW_MGR_READ_B2B,
+               writel(rwcfg->read_b2b,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add0);
                if (all_groups)
-                       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
-                              RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
+                       writel(rwcfg->mem_if_read_dqs_width *
+                              rwcfg->mem_virtual_groups_per_read_dqs - 1,
                               &sdr_rw_load_mgr_regs->load_cntr3);
                else
                        writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
 
-               writel(RW_MGR_READ_B2B,
+               writel(rwcfg->read_b2b,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add3);
 
                tmp_bit_chk = 0;
-               for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
+               for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0;
                     vg--) {
                        /* Reset the FIFOs to get pointers to known state. */
                        writel(0, &phy_mgr_cmd->fifo_reset);
@@ -1450,13 +1425,13 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
                                       RW_MGR_RUN_SINGLE_GROUP_OFFSET;
                        }
 
-                       writel(RW_MGR_READ_B2B, addr +
-                              ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
+                       writel(rwcfg->read_b2b, addr +
+                              ((group * rwcfg->mem_virtual_groups_per_read_dqs +
                               vg) << 2));
 
                        base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
-                       tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
-                                       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+                       tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs /
+                                       rwcfg->mem_virtual_groups_per_read_dqs;
                        tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
                }
 
@@ -1464,7 +1439,7 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
        }
 
        addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
-       writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
+       writel(rwcfg->clear_dqs_enable, addr + (group << 2));
 
        set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
 
@@ -1525,7 +1500,7 @@ static void rw_mgr_decr_vfifo(const u32 grp)
 {
        u32 i;
 
-       for (i = 0; i < VFIFO_SIZE - 1; i++)
+       for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++)
                rw_mgr_incr_vfifo(grp);
 }
 
@@ -1539,7 +1514,7 @@ static int find_vfifo_failing_read(const u32 grp)
 {
        u32 v, ret, fail_cnt = 0;
 
-       for (v = 0; v < VFIFO_SIZE; v++) {
+       for (v = 0; v < misccfg->read_valid_fifo_size; v++) {
                debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
                           __func__, __LINE__, v);
                ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
@@ -1574,7 +1549,7 @@ static int find_vfifo_failing_read(const u32 grp)
 static int sdr_find_phase_delay(int working, int delay, const u32 grp,
                                u32 *work, const u32 work_inc, u32 *pd)
 {
-       const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
+       const u32 max = delay ? iocfg->dqs_en_delay_max : iocfg->dqs_en_phase_max;
        u32 ret;
 
        for (; *pd <= max; (*pd)++) {
@@ -1610,7 +1585,7 @@ static int sdr_find_phase_delay(int working, int delay, const u32 grp,
 static int sdr_find_phase(int working, const u32 grp, u32 *work,
                          u32 *i, u32 *p)
 {
-       const u32 end = VFIFO_SIZE + (working ? 0 : 1);
+       const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1);
        int ret;
 
        for (; *i < end; (*i)++) {
@@ -1618,11 +1593,11 @@ static int sdr_find_phase(int working, const u32 grp, u32 *work,
                        *p = 0;
 
                ret = sdr_find_phase_delay(working, 0, grp, work,
-                                          IO_DELAY_PER_OPA_TAP, p);
+                                          iocfg->delay_per_opa_tap, p);
                if (!ret)
                        return 0;
 
-               if (*p > IO_DQS_EN_PHASE_MAX) {
+               if (*p > iocfg->dqs_en_phase_max) {
                        /* Fiddle with FIFO. */
                        rw_mgr_incr_vfifo(grp);
                        if (!working)
@@ -1646,8 +1621,8 @@ static int sdr_find_phase(int working, const u32 grp, u32 *work,
 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
                             u32 *p, u32 *i)
 {
-       const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
-                                  IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+       const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap /
+                                  iocfg->delay_per_dqs_en_dchain_tap;
        int ret;
 
        *work_bgn = 0;
@@ -1658,7 +1633,7 @@ static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
                ret = sdr_find_phase(1, grp, work_bgn, i, p);
                if (!ret)
                        return 0;
-               *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+               *work_bgn += iocfg->delay_per_dqs_en_dchain_tap;
        }
 
        /* Cannot find working solution */
@@ -1682,15 +1657,15 @@ static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
 
        /* Special case code for backing up a phase */
        if (*p == 0) {
-               *p = IO_DQS_EN_PHASE_MAX;
+               *p = iocfg->dqs_en_phase_max;
                rw_mgr_decr_vfifo(grp);
        } else {
                (*p)--;
        }
-       tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
+       tmp_delay = *work_bgn - iocfg->delay_per_opa_tap;
        scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
 
-       for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
+       for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn; d++) {
                scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
 
                ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
@@ -1700,12 +1675,12 @@ static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
                        break;
                }
 
-               tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+               tmp_delay += iocfg->delay_per_dqs_en_dchain_tap;
        }
 
        /* Restore VFIFO to old state before we decremented it (if needed). */
        (*p)++;
-       if (*p > IO_DQS_EN_PHASE_MAX) {
+       if (*p > iocfg->dqs_en_phase_max) {
                *p = 0;
                rw_mgr_incr_vfifo(grp);
        }
@@ -1727,8 +1702,8 @@ static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
        int ret;
 
        (*p)++;
-       *work_end += IO_DELAY_PER_OPA_TAP;
-       if (*p > IO_DQS_EN_PHASE_MAX) {
+       *work_end += iocfg->delay_per_opa_tap;
+       if (*p > iocfg->dqs_en_phase_max) {
                /* Fiddle with FIFO. */
                *p = 0;
                rw_mgr_incr_vfifo(grp);
@@ -1764,23 +1739,23 @@ static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
        debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
                   work_bgn, work_end, work_mid);
        /* Get the middle delay to be less than a VFIFO delay */
-       tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
+       tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap;
 
        debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
        work_mid %= tmp_delay;
        debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
 
-       tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
-       if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
-               tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
-       p = tmp_delay / IO_DELAY_PER_OPA_TAP;
+       tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap);
+       if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap)
+               tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap;
+       p = tmp_delay / iocfg->delay_per_opa_tap;
 
        debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
 
-       d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
-       if (d > IO_DQS_EN_DELAY_MAX)
-               d = IO_DQS_EN_DELAY_MAX;
-       tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+       d = DIV_ROUND_UP(work_mid - tmp_delay, iocfg->delay_per_dqs_en_dchain_tap);
+       if (d > iocfg->dqs_en_delay_max)
+               d = iocfg->dqs_en_delay_max;
+       tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap;
 
        debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
 
@@ -1791,7 +1766,7 @@ static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
         * push vfifo until we can successfully calibrate. We can do this
         * because the largest possible margin in 1 VFIFO cycle.
         */
-       for (i = 0; i < VFIFO_SIZE; i++) {
+       for (i = 0; i < misccfg->read_valid_fifo_size; i++) {
                debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
                if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
                                                             PASS_ONE_BIT,
@@ -1833,7 +1808,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
        scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
 
        /* Step 0: Determine number of delay taps for each phase tap. */
-       dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+       dtaps_per_ptap = iocfg->delay_per_opa_tap / iocfg->delay_per_dqs_en_dchain_tap;
 
        /* Step 1: First push vfifo until we get a failing read. */
        find_vfifo_failing_read(grp);
@@ -1870,13 +1845,13 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
 
                /* Special case code for backing up a phase */
                if (p == 0) {
-                       p = IO_DQS_EN_PHASE_MAX;
+                       p = iocfg->dqs_en_phase_max;
                        rw_mgr_decr_vfifo(grp);
                } else {
                        p = p - 1;
                }
 
-               work_end -= IO_DELAY_PER_OPA_TAP;
+               work_end -= iocfg->delay_per_opa_tap;
                scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
 
                d = 0;
@@ -1887,11 +1862,11 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
 
        /* The dtap increment to find the failing edge is done here. */
        sdr_find_phase_delay(0, 1, grp, &work_end,
-                            IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
+                            iocfg->delay_per_dqs_en_dchain_tap, &d);
 
        /* Go back to working dtap */
        if (d != 0)
-               work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+               work_end -= iocfg->delay_per_dqs_en_dchain_tap;
 
        debug_cond(DLEVEL == 2,
                   "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
@@ -1917,7 +1892,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
 
        /* Special case code for backing up a phase */
        if (p == 0) {
-               p = IO_DQS_EN_PHASE_MAX;
+               p = iocfg->dqs_en_phase_max;
                rw_mgr_decr_vfifo(grp);
                debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
                           __func__, __LINE__, p);
@@ -1958,7 +1933,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
        /*
         * The dynamically calculated dtaps_per_ptap is only valid if we
         * found a passing/failing read. If we didn't, it means d hit the max
-        * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
+        * (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
         * statically calculated value.
         */
        if (found_passing_read && found_failing_read)
@@ -1992,12 +1967,12 @@ static u32 search_stop_check(const int write, const int d, const int rank_bgn,
                             u32 *bit_chk, u32 *sticky_bit_chk,
                             const u32 use_read_test)
 {
-       const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                         RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+       const u32 ratio = rwcfg->mem_if_read_dqs_width /
+                         rwcfg->mem_if_write_dqs_width;
        const u32 correct_mask = write ? param->write_correct_mask :
                                         param->read_correct_mask;
-       const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
-                                   RW_MGR_MEM_DQ_PER_READ_DQS;
+       const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
+                                   rwcfg->mem_dq_per_read_dqs;
        u32 ret;
        /*
         * Stop searching when the read test doesn't pass AND when
@@ -2047,10 +2022,10 @@ static void search_left_edge(const int write, const int rank_bgn,
        u32 *sticky_bit_chk,
        int *left_edge, int *right_edge, const u32 use_read_test)
 {
-       const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
-       const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
-       const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
-                                   RW_MGR_MEM_DQ_PER_READ_DQS;
+       const u32 delay_max = write ? iocfg->io_out1_delay_max : iocfg->io_in_delay_max;
+       const u32 dqs_max = write ? iocfg->io_out1_delay_max : iocfg->dqs_in_delay_max;
+       const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
+                                   rwcfg->mem_dq_per_read_dqs;
        u32 stop, bit_chk;
        int i, d;
 
@@ -2158,10 +2133,10 @@ static int search_right_edge(const int write, const int rank_bgn,
        u32 *sticky_bit_chk,
        int *left_edge, int *right_edge, const u32 use_read_test)
 {
-       const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
-       const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
-       const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
-                                   RW_MGR_MEM_DQ_PER_READ_DQS;
+       const u32 delay_max = write ? iocfg->io_out1_delay_max : iocfg->io_in_delay_max;
+       const u32 dqs_max = write ? iocfg->io_out1_delay_max : iocfg->dqs_in_delay_max;
+       const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
+                                   rwcfg->mem_dq_per_read_dqs;
        u32 stop, bit_chk;
        int i, d;
 
@@ -2171,10 +2146,10 @@ static int search_right_edge(const int write, const int rank_bgn,
                                                                d + start_dqs);
                } else {        /* READ-ONLY */
                        scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
-                       if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
-                               uint32_t delay = d + start_dqs_en;
-                               if (delay > IO_DQS_EN_DELAY_MAX)
-                                       delay = IO_DQS_EN_DELAY_MAX;
+                       if (iocfg->shift_dqs_en_when_shift_dqs) {
+                               u32 delay = d + start_dqs_en;
+                               if (delay > iocfg->dqs_en_delay_max)
+                                       delay = iocfg->dqs_en_delay_max;
                                scc_mgr_set_dqs_en_delay(read_group, delay);
                        }
                        scc_mgr_load_dqs(read_group);
@@ -2187,7 +2162,7 @@ static int search_right_edge(const int write, const int rank_bgn,
                                         use_read_test);
                if (stop == 1) {
                        if (write && (d == 0)) {        /* WRITE-ONLY */
-                               for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+                               for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
                                        /*
                                         * d = 0 failed, but it passed when
                                         * testing the left edge, so it must be
@@ -2277,8 +2252,8 @@ static int search_right_edge(const int write, const int rank_bgn,
 static int get_window_mid_index(const int write, int *left_edge,
                                int *right_edge, int *mid_min)
 {
-       const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
-                                   RW_MGR_MEM_DQ_PER_READ_DQS;
+       const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
+                                   rwcfg->mem_dq_per_read_dqs;
        int i, mid, min_index;
 
        /* Find middle of window for each DQ bit */
@@ -2326,9 +2301,9 @@ static void center_dq_windows(const int write, int *left_edge, int *right_edge,
                              const int min_index, const int test_bgn,
                              int *dq_margin, int *dqs_margin)
 {
-       const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
-       const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
-                                   RW_MGR_MEM_DQ_PER_READ_DQS;
+       const u32 delay_max = write ? iocfg->io_out1_delay_max : iocfg->io_in_delay_max;
+       const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
+                                   rwcfg->mem_dq_per_read_dqs;
        const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
                                      SCC_MGR_IO_IN_DELAY_OFFSET;
        const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
@@ -2406,11 +2381,11 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
         * Store these as signed since there are comparisons with
         * signed numbers.
         */
-       uint32_t sticky_bit_chk;
-       int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
-       int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+       u32 sticky_bit_chk;
+       int32_t left_edge[rwcfg->mem_dq_per_read_dqs];
+       int32_t right_edge[rwcfg->mem_dq_per_read_dqs];
        int32_t orig_mid_min, mid_min;
-       int32_t new_dqs, start_dqs, start_dqs_en, final_dqs_en;
+       int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
        int32_t dq_margin, dqs_margin;
        int i, min_index;
        int ret;
@@ -2418,15 +2393,15 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
        debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
 
        start_dqs = readl(addr);
-       if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
-               start_dqs_en = readl(addr - IO_DQS_EN_DELAY_OFFSET);
+       if (iocfg->shift_dqs_en_when_shift_dqs)
+               start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset);
 
        /* set the left and right edge of each bit to an illegal value */
-       /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
+       /* use (iocfg->io_in_delay_max + 1) as an illegal value */
        sticky_bit_chk = 0;
-       for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
-               left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
-               right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+       for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) {
+               left_edge[i]  = iocfg->io_in_delay_max + 1;
+               right_edge[i] = iocfg->io_in_delay_max + 1;
        }
 
        /* Search for the left edge of the window for each bit */
@@ -2447,7 +2422,7 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
                 * dqs/ck relationships.
                 */
                scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
-               if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+               if (iocfg->shift_dqs_en_when_shift_dqs)
                        scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
 
                scc_mgr_load_dqs(rw_group);
@@ -2458,12 +2433,12 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
                           __func__, __LINE__, i, left_edge[i], right_edge[i]);
                if (use_read_test) {
                        set_failing_group_stage(rw_group *
-                               RW_MGR_MEM_DQ_PER_READ_DQS + i,
+                               rwcfg->mem_dq_per_read_dqs + i,
                                CAL_STAGE_VFIFO,
                                CAL_SUBSTAGE_VFIFO_CENTER);
                } else {
                        set_failing_group_stage(rw_group *
-                               RW_MGR_MEM_DQ_PER_READ_DQS + i,
+                               rwcfg->mem_dq_per_read_dqs + i,
                                CAL_STAGE_VFIFO_AFTER_WRITES,
                                CAL_SUBSTAGE_VFIFO_CENTER);
                }
@@ -2475,8 +2450,8 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
        /* Determine the amount we can change DQS (which is -mid_min) */
        orig_mid_min = mid_min;
        new_dqs = start_dqs - mid_min;
-       if (new_dqs > IO_DQS_IN_DELAY_MAX)
-               new_dqs = IO_DQS_IN_DELAY_MAX;
+       if (new_dqs > iocfg->dqs_in_delay_max)
+               new_dqs = iocfg->dqs_in_delay_max;
        else if (new_dqs < 0)
                new_dqs = 0;
 
@@ -2484,9 +2459,9 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
        debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
                   mid_min, new_dqs);
 
-       if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
-               if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
-                       mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
+       if (iocfg->shift_dqs_en_when_shift_dqs) {
+               if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max)
+                       mid_min += start_dqs_en - mid_min - iocfg->dqs_en_delay_max;
                else if (start_dqs_en - mid_min < 0)
                        mid_min += start_dqs_en - mid_min;
        }
@@ -2495,7 +2470,7 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
        debug_cond(DLEVEL == 1,
                   "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
                   start_dqs,
-                  IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
+                  iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
                   new_dqs, mid_min);
 
        /* Add delay to bring centre of all DQ windows to the same "level". */
@@ -2503,7 +2478,7 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
                          min_index, test_bgn, &dq_margin, &dqs_margin);
 
        /* Move DQS-en */
-       if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+       if (iocfg->shift_dqs_en_when_shift_dqs) {
                final_dqs_en = start_dqs_en - mid_min;
                scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
                scc_mgr_load_dqs(rw_group);
@@ -2587,18 +2562,18 @@ static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
         */
 
        /* We start at zero, so have one less dq to devide among */
-       const u32 delay_step = IO_IO_IN_DELAY_MAX /
-                              (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
+       const u32 delay_step = iocfg->io_in_delay_max /
+                              (rwcfg->mem_dq_per_read_dqs - 1);
        int ret;
        u32 i, p, d, r;
 
        debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
 
        /* Try different dq_in_delays since the DQ path is shorter than DQS. */
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                for (i = 0, p = test_bgn, d = 0;
-                    i < RW_MGR_MEM_DQ_PER_READ_DQS;
+                    i < rwcfg->mem_dq_per_read_dqs;
                     i++, p++, d += delay_step) {
                        debug_cond(DLEVEL == 1,
                                   "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
@@ -2621,7 +2596,7 @@ static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
                   "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
                   __func__, __LINE__, rw_group, !ret);
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
                writel(0, &sdr_scc_mgr->update);
@@ -2655,12 +2630,8 @@ rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
         */
        grp_calibrated = 1;
        for (rank_bgn = 0, sr = 0;
-            rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+            rank_bgn < rwcfg->mem_number_of_ranks;
             rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
-               /* Check if this set of ranks should be skipped entirely. */
-               if (param->skip_shadow_regs[sr])
-                       continue;
-
                ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
                                                        test_bgn,
                                                        use_read_test,
@@ -2694,9 +2665,9 @@ rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
  */
 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
 {
-       uint32_t p, d;
-       uint32_t dtaps_per_ptap;
-       uint32_t failed_substage;
+       u32 p, d;
+       u32 dtaps_per_ptap;
+       u32 failed_substage;
 
        int ret;
 
@@ -2710,8 +2681,8 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
        failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
 
        /* USER Determine number of delay taps for each phase tap. */
-       dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
-                                     IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
+       dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap,
+                                     iocfg->delay_per_dqs_en_dchain_tap) - 1;
 
        for (d = 0; d <= dtaps_per_ptap; d += 2) {
                /*
@@ -2725,7 +2696,7 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
                                                                rw_group, d);
                }
 
-               for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
+               for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) {
                        /* 1) Guaranteed Write */
                        ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
                        if (ret)
@@ -2773,107 +2744,95 @@ cal_done_ok:
        return 1;
 }
 
-/* VFIFO Calibration -- Read Deskew Calibration after write deskew */
-static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
-                                              uint32_t test_bgn)
+/**
+ * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
+ * @rw_group:          Read/Write Group
+ * @test_bgn:          Rank at which the test begins
+ *
+ * Stage 3: DQ/DQS Centering.
+ *
+ * This function implements UniPHY calibration Stage 3, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ */
+static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
+                                         const u32 test_bgn)
 {
-       uint32_t rank_bgn, sr;
-       uint32_t grp_calibrated;
-       uint32_t write_group;
-
-       debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
+       int ret;
 
-       /* update info for sims */
+       debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
 
+       /* Update info for sims. */
+       reg_file_set_group(rw_group);
        reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
        reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
 
-       write_group = read_group;
-
-       /* update info for sims */
-       reg_file_set_group(read_group);
-
-       grp_calibrated = 1;
-       /* Read per-bit deskew can be done on a per shadow register basis */
-       for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
-               rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
-               /* Determine if this set of ranks should be skipped entirely */
-               if (!param->skip_shadow_regs[sr]) {
-               /* This is the last calibration round, update FOM here */
-                       if (rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
-                                                               read_group,
-                                                               test_bgn, 0,
-                                                               1)) {
-                               grp_calibrated = 0;
-                       }
-               }
-       }
-
-
-       if (grp_calibrated == 0) {
-               set_failing_group_stage(write_group,
+       ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
+       if (ret)
+               set_failing_group_stage(rw_group,
                                        CAL_STAGE_VFIFO_AFTER_WRITES,
                                        CAL_SUBSTAGE_VFIFO_CENTER);
-               return 0;
-       }
-
-       return 1;
+       return ret;
 }
 
-/* Calibrate LFIFO to find smallest read latency */
-static uint32_t rw_mgr_mem_calibrate_lfifo(void)
+/**
+ * rw_mgr_mem_calibrate_lfifo() - Minimize latency
+ *
+ * Stage 4: Minimize latency.
+ *
+ * This function implements UniPHY calibration Stage 4, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ * Calibrate LFIFO to find smallest read latency.
+ */
+static u32 rw_mgr_mem_calibrate_lfifo(void)
 {
-       uint32_t found_one;
+       int found_one = 0;
 
        debug("%s:%d\n", __func__, __LINE__);
 
-       /* update info for sims */
+       /* Update info for sims. */
        reg_file_set_stage(CAL_STAGE_LFIFO);
        reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
 
        /* Load up the patterns used by read calibration for all ranks */
        rw_mgr_mem_calibrate_read_load_patterns(0, 1);
-       found_one = 0;
 
        do {
                writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
                debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
                           __func__, __LINE__, gbl->curr_read_lat);
 
-               if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
-                                                             NUM_READ_TESTS,
-                                                             PASS_ALL_BITS,
-                                                             1)) {
+               if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
+                                                             PASS_ALL_BITS, 1))
                        break;
-               }
 
                found_one = 1;
-               /* reduce read latency and see if things are working */
-               /* correctly */
+               /*
+                * Reduce read latency and see if things are
+                * working correctly.
+                */
                gbl->curr_read_lat--;
        } while (gbl->curr_read_lat > 0);
 
-       /* reset the fifos to get pointers to known state */
-
+       /* Reset the fifos to get pointers to known state. */
        writel(0, &phy_mgr_cmd->fifo_reset);
 
        if (found_one) {
-               /* add a fudge factor to the read latency that was determined */
+               /* Add a fudge factor to the read latency that was determined */
                gbl->curr_read_lat += 2;
                writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
-               debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
-                          read_lat=%u\n", __func__, __LINE__,
-                          gbl->curr_read_lat);
-               return 1;
+               debug_cond(DLEVEL == 2,
+                          "%s:%d lfifo: success: using read_lat=%u\n",
+                          __func__, __LINE__, gbl->curr_read_lat);
        } else {
                set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
                                        CAL_SUBSTAGE_READ_LATENCY);
 
-               debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
-                          read_lat=%u\n", __func__, __LINE__,
-                          gbl->curr_read_lat);
-               return 0;
+               debug_cond(DLEVEL == 2,
+                          "%s:%d lfifo: failed at initial read_lat=%u\n",
+                          __func__, __LINE__, gbl->curr_read_lat);
        }
+
+       return found_one;
 }
 
 /**
@@ -2896,7 +2855,7 @@ static void search_window(const int search_dm,
                          int *end_best, int *win_best, int new_dqs)
 {
        u32 bit_chk;
-       const int max = IO_IO_OUT1_DELAY_MAX - new_dqs;
+       const int max = iocfg->io_out1_delay_max - new_dqs;
        int d, di;
 
        /* Search for the/part of the window with DM/DQS shift. */
@@ -2927,7 +2886,7 @@ static void search_window(const int search_dm,
                         * If a starting edge of our window has not been seen
                         * this is our current start of the DM window.
                         */
-                       if (*bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+                       if (*bgn_curr == iocfg->io_out1_delay_max + 1)
                                *bgn_curr = search_dm ? -d : d;
 
                        /*
@@ -2941,8 +2900,8 @@ static void search_window(const int search_dm,
                        }
                } else {
                        /* We just saw a failing test. Reset temp edge. */
-                       *bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
-                       *end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+                       *bgn_curr = iocfg->io_out1_delay_max + 1;
+                       *end_curr = iocfg->io_out1_delay_max + 1;
 
                        /* Early exit is only applicable to DQS. */
                        if (search_dm)
@@ -2953,7 +2912,7 @@ static void search_window(const int search_dm,
                         * chain space is less than already seen largest
                         * window we can exit.
                         */
-                       if (*win_best - 1 > IO_IO_OUT1_DELAY_MAX - new_dqs - d)
+                       if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d)
                                break;
                }
        }
@@ -2975,16 +2934,16 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
        int i;
        u32 sticky_bit_chk;
        u32 min_index;
-       int left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
-       int right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+       int left_edge[rwcfg->mem_dq_per_write_dqs];
+       int right_edge[rwcfg->mem_dq_per_write_dqs];
        int mid;
        int mid_min, orig_mid_min;
        int new_dqs, start_dqs;
        int dq_margin, dqs_margin, dm_margin;
-       int bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
-       int end_curr = IO_IO_OUT1_DELAY_MAX + 1;
-       int bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
-       int end_best = IO_IO_OUT1_DELAY_MAX + 1;
+       int bgn_curr = iocfg->io_out1_delay_max + 1;
+       int end_curr = iocfg->io_out1_delay_max + 1;
+       int bgn_best = iocfg->io_out1_delay_max + 1;
+       int end_best = iocfg->io_out1_delay_max + 1;
        int win_best = 0;
 
        int ret;
@@ -2995,18 +2954,18 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
 
        start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
                          SCC_MGR_IO_OUT1_DELAY_OFFSET) +
-                         (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
+                         (rwcfg->mem_dq_per_write_dqs << 2));
 
        /* Per-bit deskew. */
 
        /*
         * Set the left and right edge of each bit to an illegal value.
-        * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
+        * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
         */
        sticky_bit_chk = 0;
-       for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
-               left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
-               right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+       for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
+               left_edge[i]  = iocfg->io_out1_delay_max + 1;
+               right_edge[i] = iocfg->io_out1_delay_max + 1;
        }
 
        /* Search for the left edge of the window for each bit. */
@@ -3048,10 +3007,10 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
 
        /*
         * Set the left and right edge of each bit to an illegal value.
-        * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
+        * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
         */
-       left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
-       right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
+       left_edge[0]  = iocfg->io_out1_delay_max + 1;
+       right_edge[0] = iocfg->io_out1_delay_max + 1;
 
        /* Search for the/part of the window with DM shift. */
        search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
@@ -3066,8 +3025,8 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
         * search begins as a new search.
         */
        if (end_curr != 0) {
-               bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
-               end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+               bgn_curr = iocfg->io_out1_delay_max + 1;
+               end_curr = iocfg->io_out1_delay_max + 1;
        }
 
        /* Search for the/part of the window with DQS shifts. */
@@ -3165,28 +3124,24 @@ static void mem_precharge_and_activate(void)
 {
        int r;
 
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
-               /* Test if the rank should be skipped. */
-               if (param->skip_ranks[r])
-                       continue;
-
+       for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
                /* Set rank. */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
                /* Precharge all banks. */
-               writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+               writel(rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                             RW_MGR_RUN_SINGLE_GROUP_OFFSET);
 
                writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
-               writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
+               writel(rwcfg->activate_0_and_1_wait1,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add0);
 
                writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
-               writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
+               writel(rwcfg->activate_0_and_1_wait2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
                /* Activate rows. */
-               writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+               writel(rwcfg->activate_0_and_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                                RW_MGR_RUN_SINGLE_GROUP_OFFSET);
        }
 }
@@ -3203,7 +3158,7 @@ static void mem_init_latency(void)
         * so max latency in AFI clocks, used here, is correspondingly
         * smaller.
         */
-       const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
+       const u32 max_latency = (1 << misccfg->max_latency_count_width) - 1;
        u32 rlat, wlat;
 
        debug("%s:%d\n", __func__, __LINE__);
@@ -3238,24 +3193,23 @@ static void mem_init_latency(void)
  */
 static void mem_skip_calibrate(void)
 {
-       uint32_t vfifo_offset;
-       uint32_t i, j, r;
+       u32 vfifo_offset;
+       u32 i, j, r;
 
        debug("%s:%d\n", __func__, __LINE__);
        /* Need to update every shadow register set used by the interface */
-       for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+       for (r = 0; r < rwcfg->mem_number_of_ranks;
             r += NUM_RANKS_PER_SHADOW_REG) {
                /*
                 * Set output phase alignment settings appropriate for
                 * skip calibration.
                 */
-               for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+               for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
                        scc_mgr_set_dqs_en_phase(i, 0);
-#if IO_DLL_CHAIN_LENGTH == 6
-                       scc_mgr_set_dqdqs_output_phase(i, 6);
-#else
-                       scc_mgr_set_dqdqs_output_phase(i, 7);
-#endif
+                       if (iocfg->dll_chain_length == 6)
+                               scc_mgr_set_dqdqs_output_phase(i, 6);
+                       else
+                               scc_mgr_set_dqdqs_output_phase(i, 7);
                        /*
                         * Case:33398
                         *
@@ -3274,20 +3228,20 @@ static void mem_skip_calibrate(void)
                         *
                         * Hence, to make DQS aligned to CK, we need to delay
                         * DQS by:
-                        *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
+                        *    (720 - 90 - 180 - 2 * (360 / iocfg->dll_chain_length))
                         *
-                        * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
+                        * Dividing the above by (360 / iocfg->dll_chain_length)
                         * gives us the number of ptaps, which simplies to:
                         *
-                        *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
+                        *    (1.25 * iocfg->dll_chain_length - 2)
                         */
                        scc_mgr_set_dqdqs_output_phase(i,
-                                       1.25 * IO_DLL_CHAIN_LENGTH - 2);
+                                       1.25 * iocfg->dll_chain_length - 2);
                }
                writel(0xff, &sdr_scc_mgr->dqs_ena);
                writel(0xff, &sdr_scc_mgr->dqs_io_ena);
 
-               for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+               for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
                        writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
                                  SCC_MGR_GROUP_COUNTER_OFFSET);
                }
@@ -3297,7 +3251,7 @@ static void mem_skip_calibrate(void)
        }
 
        /* Compensate for simulation model behaviour */
-       for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+       for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
                scc_mgr_set_dqs_bus_in_delay(i, 10);
                scc_mgr_load_dqs(i);
        }
@@ -3307,7 +3261,7 @@ static void mem_skip_calibrate(void)
         * ArriaV has hard FIFOs that can only be initialized by incrementing
         * in sequencer.
         */
-       vfifo_offset = CALIB_VFIFO_OFFSET;
+       vfifo_offset = misccfg->calib_vfifo_offset;
        for (j = 0; j < vfifo_offset; j++)
                writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
        writel(0, &phy_mgr_cmd->fifo_reset);
@@ -3316,7 +3270,7 @@ static void mem_skip_calibrate(void)
         * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
         * setting from generation-time constant.
         */
-       gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
+       gbl->curr_read_lat = misccfg->calib_lfifo_offset;
        writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
 }
 
@@ -3325,18 +3279,18 @@ static void mem_skip_calibrate(void)
  *
  * Perform memory calibration.
  */
-static uint32_t mem_calibrate(void)
+static u32 mem_calibrate(void)
 {
-       uint32_t i;
-       uint32_t rank_bgn, sr;
-       uint32_t write_group, write_test_bgn;
-       uint32_t read_group, read_test_bgn;
-       uint32_t run_groups, current_run;
-       uint32_t failing_groups = 0;
-       uint32_t group_failed = 0;
+       u32 i;
+       u32 rank_bgn, sr;
+       u32 write_group, write_test_bgn;
+       u32 read_group, read_test_bgn;
+       u32 run_groups, current_run;
+       u32 failing_groups = 0;
+       u32 group_failed = 0;
 
-       const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                               RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+       const u32 rwdqs_ratio = rwcfg->mem_if_read_dqs_width /
+                               rwcfg->mem_if_write_dqs_width;
 
        debug("%s:%d\n", __func__, __LINE__);
 
@@ -3353,7 +3307,7 @@ static uint32_t mem_calibrate(void)
        /* Initialize bit slips. */
        mem_precharge_and_activate();
 
-       for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+       for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
                writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
                          SCC_MGR_GROUP_COUNTER_OFFSET);
                /* Only needed once to set all groups, pins, DQ, DQS, DM. */
@@ -3387,11 +3341,11 @@ static uint32_t mem_calibrate(void)
                 */
                scc_mgr_zero_all();
 
-               run_groups = ~param->skip_groups;
+               run_groups = ~0;
 
                for (write_group = 0, write_test_bgn = 0; write_group
-                       < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
-                       write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
+                       < rwcfg->mem_if_write_dqs_width; write_group++,
+                       write_test_bgn += rwcfg->mem_dq_per_write_dqs) {
 
                        /* Initialize the group failure */
                        group_failed = 0;
@@ -3412,7 +3366,7 @@ static uint32_t mem_calibrate(void)
                             read_test_bgn = 0;
                             read_group < (write_group + 1) * rwdqs_ratio;
                             read_group++,
-                            read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+                            read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
                                if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
                                        continue;
 
@@ -3430,7 +3384,7 @@ static uint32_t mem_calibrate(void)
 
                        /* Calibrate the output side */
                        for (rank_bgn = 0, sr = 0;
-                            rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+                            rank_bgn < rwcfg->mem_number_of_ranks;
                             rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
                                if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
                                        continue;
@@ -3439,13 +3393,6 @@ static uint32_t mem_calibrate(void)
                                if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
                                        continue;
 
-                               /*
-                                * Determine if this set of ranks
-                                * should be skipped entirely.
-                                */
-                               if (param->skip_shadow_regs[sr])
-                                       continue;
-
                                /* Calibrate WRITEs */
                                if (!rw_mgr_mem_calibrate_writes(rank_bgn,
                                                write_group, write_test_bgn))
@@ -3464,11 +3411,11 @@ static uint32_t mem_calibrate(void)
                             read_test_bgn = 0;
                             read_group < (write_group + 1) * rwdqs_ratio;
                             read_group++,
-                            read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+                            read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
                                if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
                                        continue;
 
-                               if (rw_mgr_mem_calibrate_vfifo_end(read_group,
+                               if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
                                                                read_test_bgn))
                                        continue;
 
@@ -3496,13 +3443,6 @@ grp_failed:              /* A group failed, increment the counter. */
                if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
                        continue;
 
-               /*
-                * If we're skipping groups as part of debug,
-                * don't calibrate LFIFO.
-                */
-               if (param->skip_groups != 0)
-                       continue;
-
                /* Calibrate the LFIFO */
                if (!rw_mgr_mem_calibrate_lfifo())
                        return 0;
@@ -3566,7 +3506,7 @@ static int run_mem_calibrate(void)
  */
 static void debug_mem_calibrate(int pass)
 {
-       uint32_t debug_info;
+       u32 debug_info;
 
        if (pass) {
                printf("%s: CALIBRATION PASSED\n", __FILE__);
@@ -3615,15 +3555,19 @@ static void debug_mem_calibrate(int pass)
  */
 static void hc_initialize_rom_data(void)
 {
+       unsigned int nelem = 0;
+       const u32 *rom_init;
        u32 i, addr;
 
+       socfpga_get_seq_inst_init(&rom_init, &nelem);
        addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
-       for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
-               writel(inst_rom_init[i], addr + (i << 2));
+       for (i = 0; i < nelem; i++)
+               writel(rom_init[i], addr + (i << 2));
 
+       socfpga_get_seq_ac_init(&rom_init, &nelem);
        addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
-       for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
-               writel(ac_rom_init[i], addr + (i << 2));
+       for (i = 0; i < nelem; i++)
+               writel(rom_init[i], addr + (i << 2));
 }
 
 /**
@@ -3634,7 +3578,7 @@ static void hc_initialize_rom_data(void)
 static void initialize_reg_file(void)
 {
        /* Initialize the register file with the correct data */
-       writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
+       writel(misccfg->reg_file_init_seq_signature, &sdr_reg_file->signature);
        writel(0, &sdr_reg_file->debug_data_addr);
        writel(0, &sdr_reg_file->cur_stage);
        writel(0, &sdr_reg_file->fom);
@@ -3650,13 +3594,13 @@ static void initialize_reg_file(void)
  */
 static void initialize_hps_phy(void)
 {
-       uint32_t reg;
+       u32 reg;
        /*
         * Tracking also gets configured here because it's in the
         * same register.
         */
-       uint32_t trk_sample_count = 7500;
-       uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
+       u32 trk_sample_count = 7500;
+       u32 trk_long_idle_sample_count = (10 << 16) | 100;
        /*
         * Format is number of outer loops in the 16 MSB, sample
         * count in 16 LSB.
@@ -3705,7 +3649,7 @@ static void initialize_tracking(void)
         * Compute usable version of value in case we skip full
         * computation later.
         */
-       writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
+       writel(DIV_ROUND_UP(iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap) - 1,
               &sdr_reg_file->dtaps_per_ptap);
 
        /* trk_sample_count */
@@ -3724,15 +3668,15 @@ static void initialize_tracking(void)
               &sdr_reg_file->delays);
 
        /* mux delay */
-       writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
-              (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
+       writel((rwcfg->idle << 24) | (rwcfg->activate_1 << 16) |
+              (rwcfg->sgle_read << 8) | (rwcfg->precharge_all << 0),
               &sdr_reg_file->trk_rw_mgr_addr);
 
-       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
+       writel(rwcfg->mem_if_read_dqs_width,
               &sdr_reg_file->trk_read_dqs_width);
 
        /* trefi [7:0] */
-       writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
+       writel((rwcfg->refresh_all << 24) | (1000 << 0),
               &sdr_reg_file->trk_rfsh);
 }
 
@@ -3740,7 +3684,7 @@ int sdram_calibration_full(void)
 {
        struct param_type my_param;
        struct gbl_type my_gbl;
-       uint32_t pass;
+       u32 pass;
 
        memset(&my_param, 0, sizeof(my_param));
        memset(&my_gbl, 0, sizeof(my_gbl));
@@ -3748,6 +3692,10 @@ int sdram_calibration_full(void)
        param = &my_param;
        gbl = &my_gbl;
 
+       rwcfg = socfpga_get_sdram_rwmgr_config();
+       iocfg = socfpga_get_sdram_io_config();
+       misccfg = socfpga_get_sdram_misc_config();
+
        /* Set the calibration enabled by default */
        gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
        /*
@@ -3772,25 +3720,25 @@ int sdram_calibration_full(void)
        debug("%s:%d\n", __func__, __LINE__);
        debug_cond(DLEVEL == 1,
                   "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
-                  RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
-                  RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
-                  RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
-                  RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
+                  rwcfg->mem_number_of_ranks, rwcfg->mem_number_of_cs_per_dimm,
+                  rwcfg->mem_dq_per_read_dqs, rwcfg->mem_dq_per_write_dqs,
+                  rwcfg->mem_virtual_groups_per_read_dqs,
+                  rwcfg->mem_virtual_groups_per_write_dqs);
        debug_cond(DLEVEL == 1,
                   "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
-                  RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
-                  RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
-                  IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
+                  rwcfg->mem_if_read_dqs_width, rwcfg->mem_if_write_dqs_width,
+                  rwcfg->mem_data_width, rwcfg->mem_data_mask_width,
+                  iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap);
        debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
-                  IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
+                  iocfg->delay_per_dqs_en_dchain_tap, iocfg->dll_chain_length);
        debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
-                  IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
-                  IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
+                  iocfg->dqs_en_phase_max, iocfg->dqdqs_out_phase_max,
+                  iocfg->dqs_en_delay_max, iocfg->dqs_in_delay_max);
        debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
-                  IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
-                  IO_IO_OUT2_DELAY_MAX);
+                  iocfg->io_in_delay_max, iocfg->io_out1_delay_max,
+                  iocfg->io_out2_delay_max);
        debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
-                  IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
+                  iocfg->dqs_in_reserve, iocfg->dqs_out_reserve);
 
        hc_initialize_rom_data();