]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - drivers/ddr/altera/sequencer.c
ddr: altera: Internal rw_mgr_mem_calibrate_vfifo() cleanup part 6
[karo-tx-uboot.git] / drivers / ddr / altera / sequencer.c
1 /*
2  * Copyright Altera Corporation (C) 2012-2015
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/arch/sdram.h>
10 #include "sequencer.h"
11 #include "sequencer_auto.h"
12 #include "sequencer_auto_ac_init.h"
13 #include "sequencer_auto_inst_init.h"
14 #include "sequencer_defines.h"
15
16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
17         (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
18
19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
20         (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
21
22 static struct socfpga_sdr_reg_file *sdr_reg_file =
23         (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
24
25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
26         (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
27
28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
29         (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
30
31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
32         (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
33
34 static struct socfpga_data_mgr *data_mgr =
35         (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
36
37 static struct socfpga_sdr_ctrl *sdr_ctrl =
38         (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
39
40 #define DELTA_D         1
41
42 /*
43  * In order to reduce ROM size, most of the selectable calibration steps are
44  * decided at compile time based on the user's calibration mode selection,
45  * as captured by the STATIC_CALIB_STEPS selection below.
46  *
47  * However, to support simulation-time selection of fast simulation mode, where
48  * we skip everything except the bare minimum, we need a few of the steps to
49  * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
50  * check, which is based on the rtl-supplied value, or we dynamically compute
51  * the value to use based on the dynamically-chosen calibration mode
52  */
53
54 #define DLEVEL 0
55 #define STATIC_IN_RTL_SIM 0
56 #define STATIC_SKIP_DELAY_LOOPS 0
57
58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
59         STATIC_SKIP_DELAY_LOOPS)
60
61 /* calibration steps requested by the rtl */
62 uint16_t dyn_calib_steps;
63
64 /*
65  * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
66  * instead of static, we use boolean logic to select between
67  * non-skip and skip values
68  *
69  * The mask is set to include all bits when not-skipping, but is
70  * zero when skipping
71  */
72
73 uint16_t skip_delay_mask;       /* mask off bits when skipping/not-skipping */
74
75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
76         ((non_skip_value) & skip_delay_mask)
77
78 struct gbl_type *gbl;
79 struct param_type *param;
80 uint32_t curr_shadow_reg;
81
82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
83         uint32_t write_group, uint32_t use_dm,
84         uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
85
86 static void set_failing_group_stage(uint32_t group, uint32_t stage,
87         uint32_t substage)
88 {
89         /*
90          * Only set the global stage if there was not been any other
91          * failing group
92          */
93         if (gbl->error_stage == CAL_STAGE_NIL)  {
94                 gbl->error_substage = substage;
95                 gbl->error_stage = stage;
96                 gbl->error_group = group;
97         }
98 }
99
100 static void reg_file_set_group(u16 set_group)
101 {
102         clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
103 }
104
105 static void reg_file_set_stage(u8 set_stage)
106 {
107         clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
108 }
109
110 static void reg_file_set_sub_stage(u8 set_sub_stage)
111 {
112         set_sub_stage &= 0xff;
113         clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
114 }
115
116 /**
117  * phy_mgr_initialize() - Initialize PHY Manager
118  *
119  * Initialize PHY Manager.
120  */
121 static void phy_mgr_initialize(void)
122 {
123         u32 ratio;
124
125         debug("%s:%d\n", __func__, __LINE__);
126         /* Calibration has control over path to memory */
127         /*
128          * In Hard PHY this is a 2-bit control:
129          * 0: AFI Mux Select
130          * 1: DDIO Mux Select
131          */
132         writel(0x3, &phy_mgr_cfg->mux_sel);
133
134         /* USER memory clock is not stable we begin initialization  */
135         writel(0, &phy_mgr_cfg->reset_mem_stbl);
136
137         /* USER calibration status all set to zero */
138         writel(0, &phy_mgr_cfg->cal_status);
139
140         writel(0, &phy_mgr_cfg->cal_debug_info);
141
142         /* Init params only if we do NOT skip calibration. */
143         if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
144                 return;
145
146         ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
147                 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
148         param->read_correct_mask_vg = (1 << ratio) - 1;
149         param->write_correct_mask_vg = (1 << ratio) - 1;
150         param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
151         param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
152         ratio = RW_MGR_MEM_DATA_WIDTH /
153                 RW_MGR_MEM_DATA_MASK_WIDTH;
154         param->dm_correct_mask = (1 << ratio) - 1;
155 }
156
157 /**
158  * set_rank_and_odt_mask() - Set Rank and ODT mask
159  * @rank:       Rank mask
160  * @odt_mode:   ODT mode, OFF or READ_WRITE
161  *
162  * Set Rank and ODT mask (On-Die Termination).
163  */
164 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
165 {
166         u32 odt_mask_0 = 0;
167         u32 odt_mask_1 = 0;
168         u32 cs_and_odt_mask;
169
170         if (odt_mode == RW_MGR_ODT_MODE_OFF) {
171                 odt_mask_0 = 0x0;
172                 odt_mask_1 = 0x0;
173         } else {        /* RW_MGR_ODT_MODE_READ_WRITE */
174                 switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
175                 case 1: /* 1 Rank */
176                         /* Read: ODT = 0 ; Write: ODT = 1 */
177                         odt_mask_0 = 0x0;
178                         odt_mask_1 = 0x1;
179                         break;
180                 case 2: /* 2 Ranks */
181                         if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
182                                 /*
183                                  * - Dual-Slot , Single-Rank (1 CS per DIMM)
184                                  *   OR
185                                  * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
186                                  *
187                                  * Since MEM_NUMBER_OF_RANKS is 2, they
188                                  * are both single rank with 2 CS each
189                                  * (special for RDIMM).
190                                  *
191                                  * Read: Turn on ODT on the opposite rank
192                                  * Write: Turn on ODT on all ranks
193                                  */
194                                 odt_mask_0 = 0x3 & ~(1 << rank);
195                                 odt_mask_1 = 0x3;
196                         } else {
197                                 /*
198                                  * - Single-Slot , Dual-Rank (2 CS per DIMM)
199                                  *
200                                  * Read: Turn on ODT off on all ranks
201                                  * Write: Turn on ODT on active rank
202                                  */
203                                 odt_mask_0 = 0x0;
204                                 odt_mask_1 = 0x3 & (1 << rank);
205                         }
206                         break;
207                 case 4: /* 4 Ranks */
208                         /* Read:
209                          * ----------+-----------------------+
210                          *           |         ODT           |
211                          * Read From +-----------------------+
212                          *   Rank    |  3  |  2  |  1  |  0  |
213                          * ----------+-----+-----+-----+-----+
214                          *     0     |  0  |  1  |  0  |  0  |
215                          *     1     |  1  |  0  |  0  |  0  |
216                          *     2     |  0  |  0  |  0  |  1  |
217                          *     3     |  0  |  0  |  1  |  0  |
218                          * ----------+-----+-----+-----+-----+
219                          *
220                          * Write:
221                          * ----------+-----------------------+
222                          *           |         ODT           |
223                          * Write To  +-----------------------+
224                          *   Rank    |  3  |  2  |  1  |  0  |
225                          * ----------+-----+-----+-----+-----+
226                          *     0     |  0  |  1  |  0  |  1  |
227                          *     1     |  1  |  0  |  1  |  0  |
228                          *     2     |  0  |  1  |  0  |  1  |
229                          *     3     |  1  |  0  |  1  |  0  |
230                          * ----------+-----+-----+-----+-----+
231                          */
232                         switch (rank) {
233                         case 0:
234                                 odt_mask_0 = 0x4;
235                                 odt_mask_1 = 0x5;
236                                 break;
237                         case 1:
238                                 odt_mask_0 = 0x8;
239                                 odt_mask_1 = 0xA;
240                                 break;
241                         case 2:
242                                 odt_mask_0 = 0x1;
243                                 odt_mask_1 = 0x5;
244                                 break;
245                         case 3:
246                                 odt_mask_0 = 0x2;
247                                 odt_mask_1 = 0xA;
248                                 break;
249                         }
250                         break;
251                 }
252         }
253
254         cs_and_odt_mask = (0xFF & ~(1 << rank)) |
255                           ((0xFF & odt_mask_0) << 8) |
256                           ((0xFF & odt_mask_1) << 16);
257         writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
258                                 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
259 }
260
261 /**
262  * scc_mgr_set() - Set SCC Manager register
263  * @off:        Base offset in SCC Manager space
264  * @grp:        Read/Write group
265  * @val:        Value to be set
266  *
267  * This function sets the SCC Manager (Scan Chain Control Manager) register.
268  */
269 static void scc_mgr_set(u32 off, u32 grp, u32 val)
270 {
271         writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
272 }
273
274 /**
275  * scc_mgr_initialize() - Initialize SCC Manager registers
276  *
277  * Initialize SCC Manager registers.
278  */
279 static void scc_mgr_initialize(void)
280 {
281         /*
282          * Clear register file for HPS. 16 (2^4) is the size of the
283          * full register file in the scc mgr:
284          *      RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
285          *                             MEM_IF_READ_DQS_WIDTH - 1);
286          */
287         int i;
288
289         for (i = 0; i < 16; i++) {
290                 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
291                            __func__, __LINE__, i);
292                 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
293         }
294 }
295
296 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
297 {
298         scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
299 }
300
301 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
302 {
303         scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
304 }
305
306 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
307 {
308         scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
309 }
310
311 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
312 {
313         scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
314 }
315
316 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
317 {
318         scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
319                     delay);
320 }
321
322 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
323 {
324         scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
325 }
326
327 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
328 {
329         scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
330 }
331
332 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
333 {
334         scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
335                     delay);
336 }
337
338 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
339 {
340         scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
341                     RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
342                     delay);
343 }
344
345 /* load up dqs config settings */
346 static void scc_mgr_load_dqs(uint32_t dqs)
347 {
348         writel(dqs, &sdr_scc_mgr->dqs_ena);
349 }
350
351 /* load up dqs io config settings */
352 static void scc_mgr_load_dqs_io(void)
353 {
354         writel(0, &sdr_scc_mgr->dqs_io_ena);
355 }
356
357 /* load up dq config settings */
358 static void scc_mgr_load_dq(uint32_t dq_in_group)
359 {
360         writel(dq_in_group, &sdr_scc_mgr->dq_ena);
361 }
362
363 /* load up dm config settings */
364 static void scc_mgr_load_dm(uint32_t dm)
365 {
366         writel(dm, &sdr_scc_mgr->dm_ena);
367 }
368
369 /**
370  * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
371  * @off:        Base offset in SCC Manager space
372  * @grp:        Read/Write group
373  * @val:        Value to be set
374  * @update:     If non-zero, trigger SCC Manager update for all ranks
375  *
376  * This function sets the SCC Manager (Scan Chain Control Manager) register
377  * and optionally triggers the SCC update for all ranks.
378  */
379 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
380                                   const int update)
381 {
382         u32 r;
383
384         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
385              r += NUM_RANKS_PER_SHADOW_REG) {
386                 scc_mgr_set(off, grp, val);
387
388                 if (update || (r == 0)) {
389                         writel(grp, &sdr_scc_mgr->dqs_ena);
390                         writel(0, &sdr_scc_mgr->update);
391                 }
392         }
393 }
394
395 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
396 {
397         /*
398          * USER although the h/w doesn't support different phases per
399          * shadow register, for simplicity our scc manager modeling
400          * keeps different phase settings per shadow reg, and it's
401          * important for us to keep them in sync to match h/w.
402          * for efficiency, the scan chain update should occur only
403          * once to sr0.
404          */
405         scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
406                               read_group, phase, 0);
407 }
408
409 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
410                                                      uint32_t phase)
411 {
412         /*
413          * USER although the h/w doesn't support different phases per
414          * shadow register, for simplicity our scc manager modeling
415          * keeps different phase settings per shadow reg, and it's
416          * important for us to keep them in sync to match h/w.
417          * for efficiency, the scan chain update should occur only
418          * once to sr0.
419          */
420         scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
421                               write_group, phase, 0);
422 }
423
424 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
425                                                uint32_t delay)
426 {
427         /*
428          * In shadow register mode, the T11 settings are stored in
429          * registers in the core, which are updated by the DQS_ENA
430          * signals. Not issuing the SCC_MGR_UPD command allows us to
431          * save lots of rank switching overhead, by calling
432          * select_shadow_regs_for_update with update_scan_chains
433          * set to 0.
434          */
435         scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
436                               read_group, delay, 1);
437         writel(0, &sdr_scc_mgr->update);
438 }
439
440 /**
441  * scc_mgr_set_oct_out1_delay() - Set OCT output delay
442  * @write_group:        Write group
443  * @delay:              Delay value
444  *
445  * This function sets the OCT output delay in SCC manager.
446  */
447 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
448 {
449         const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
450                           RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
451         const int base = write_group * ratio;
452         int i;
453         /*
454          * Load the setting in the SCC manager
455          * Although OCT affects only write data, the OCT delay is controlled
456          * by the DQS logic block which is instantiated once per read group.
457          * For protocols where a write group consists of multiple read groups,
458          * the setting must be set multiple times.
459          */
460         for (i = 0; i < ratio; i++)
461                 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
462 }
463
464 /**
465  * scc_mgr_set_hhp_extras() - Set HHP extras.
466  *
467  * Load the fixed setting in the SCC manager HHP extras.
468  */
469 static void scc_mgr_set_hhp_extras(void)
470 {
471         /*
472          * Load the fixed setting in the SCC manager
473          * bits: 0:0 = 1'b1     - DQS bypass
474          * bits: 1:1 = 1'b1     - DQ bypass
475          * bits: 4:2 = 3'b001   - rfifo_mode
476          * bits: 6:5 = 2'b01    - rfifo clock_select
477          * bits: 7:7 = 1'b0     - separate gating from ungating setting
478          * bits: 8:8 = 1'b0     - separate OE from Output delay setting
479          */
480         const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
481                           (1 << 2) | (1 << 1) | (1 << 0);
482         const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
483                          SCC_MGR_HHP_GLOBALS_OFFSET |
484                          SCC_MGR_HHP_EXTRAS_OFFSET;
485
486         debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
487                    __func__, __LINE__);
488         writel(value, addr);
489         debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
490                    __func__, __LINE__);
491 }
492
493 /**
494  * scc_mgr_zero_all() - Zero all DQS config
495  *
496  * Zero all DQS config.
497  */
498 static void scc_mgr_zero_all(void)
499 {
500         int i, r;
501
502         /*
503          * USER Zero all DQS config settings, across all groups and all
504          * shadow registers
505          */
506         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
507              r += NUM_RANKS_PER_SHADOW_REG) {
508                 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
509                         /*
510                          * The phases actually don't exist on a per-rank basis,
511                          * but there's no harm updating them several times, so
512                          * let's keep the code simple.
513                          */
514                         scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
515                         scc_mgr_set_dqs_en_phase(i, 0);
516                         scc_mgr_set_dqs_en_delay(i, 0);
517                 }
518
519                 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
520                         scc_mgr_set_dqdqs_output_phase(i, 0);
521                         /* Arria V/Cyclone V don't have out2. */
522                         scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
523                 }
524         }
525
526         /* Multicast to all DQS group enables. */
527         writel(0xff, &sdr_scc_mgr->dqs_ena);
528         writel(0, &sdr_scc_mgr->update);
529 }
530
531 /**
532  * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
533  * @write_group:        Write group
534  *
535  * Set bypass mode and trigger SCC update.
536  */
537 static void scc_set_bypass_mode(const u32 write_group)
538 {
539         /* Multicast to all DQ enables. */
540         writel(0xff, &sdr_scc_mgr->dq_ena);
541         writel(0xff, &sdr_scc_mgr->dm_ena);
542
543         /* Update current DQS IO enable. */
544         writel(0, &sdr_scc_mgr->dqs_io_ena);
545
546         /* Update the DQS logic. */
547         writel(write_group, &sdr_scc_mgr->dqs_ena);
548
549         /* Hit update. */
550         writel(0, &sdr_scc_mgr->update);
551 }
552
553 /**
554  * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
555  * @write_group:        Write group
556  *
557  * Load DQS settings for Write Group, do not trigger SCC update.
558  */
559 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
560 {
561         const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
562                           RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
563         const int base = write_group * ratio;
564         int i;
565         /*
566          * Load the setting in the SCC manager
567          * Although OCT affects only write data, the OCT delay is controlled
568          * by the DQS logic block which is instantiated once per read group.
569          * For protocols where a write group consists of multiple read groups,
570          * the setting must be set multiple times.
571          */
572         for (i = 0; i < ratio; i++)
573                 writel(base + i, &sdr_scc_mgr->dqs_ena);
574 }
575
576 /**
577  * scc_mgr_zero_group() - Zero all configs for a group
578  *
579  * Zero DQ, DM, DQS and OCT configs for a group.
580  */
581 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
582 {
583         int i, r;
584
585         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
586              r += NUM_RANKS_PER_SHADOW_REG) {
587                 /* Zero all DQ config settings. */
588                 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
589                         scc_mgr_set_dq_out1_delay(i, 0);
590                         if (!out_only)
591                                 scc_mgr_set_dq_in_delay(i, 0);
592                 }
593
594                 /* Multicast to all DQ enables. */
595                 writel(0xff, &sdr_scc_mgr->dq_ena);
596
597                 /* Zero all DM config settings. */
598                 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
599                         scc_mgr_set_dm_out1_delay(i, 0);
600
601                 /* Multicast to all DM enables. */
602                 writel(0xff, &sdr_scc_mgr->dm_ena);
603
604                 /* Zero all DQS IO settings. */
605                 if (!out_only)
606                         scc_mgr_set_dqs_io_in_delay(0);
607
608                 /* Arria V/Cyclone V don't have out2. */
609                 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
610                 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
611                 scc_mgr_load_dqs_for_write_group(write_group);
612
613                 /* Multicast to all DQS IO enables (only 1 in total). */
614                 writel(0, &sdr_scc_mgr->dqs_io_ena);
615
616                 /* Hit update to zero everything. */
617                 writel(0, &sdr_scc_mgr->update);
618         }
619 }
620
621 /*
622  * apply and load a particular input delay for the DQ pins in a group
623  * group_bgn is the index of the first dq pin (in the write group)
624  */
625 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
626 {
627         uint32_t i, p;
628
629         for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
630                 scc_mgr_set_dq_in_delay(p, delay);
631                 scc_mgr_load_dq(p);
632         }
633 }
634
635 /**
636  * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
637  * @delay:              Delay value
638  *
639  * Apply and load a particular output delay for the DQ pins in a group.
640  */
641 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
642 {
643         int i;
644
645         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
646                 scc_mgr_set_dq_out1_delay(i, delay);
647                 scc_mgr_load_dq(i);
648         }
649 }
650
651 /* apply and load a particular output delay for the DM pins in a group */
652 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
653 {
654         uint32_t i;
655
656         for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
657                 scc_mgr_set_dm_out1_delay(i, delay1);
658                 scc_mgr_load_dm(i);
659         }
660 }
661
662
663 /* apply and load delay on both DQS and OCT out1 */
664 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
665                                                     uint32_t delay)
666 {
667         scc_mgr_set_dqs_out1_delay(delay);
668         scc_mgr_load_dqs_io();
669
670         scc_mgr_set_oct_out1_delay(write_group, delay);
671         scc_mgr_load_dqs_for_write_group(write_group);
672 }
673
674 /**
675  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
676  * @write_group:        Write group
677  * @delay:              Delay value
678  *
679  * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
680  */
681 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
682                                                   const u32 delay)
683 {
684         u32 i, new_delay;
685
686         /* DQ shift */
687         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
688                 scc_mgr_load_dq(i);
689
690         /* DM shift */
691         for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
692                 scc_mgr_load_dm(i);
693
694         /* DQS shift */
695         new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
696         if (new_delay > IO_IO_OUT2_DELAY_MAX) {
697                 debug_cond(DLEVEL == 1,
698                            "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
699                            __func__, __LINE__, write_group, delay, new_delay,
700                            IO_IO_OUT2_DELAY_MAX,
701                            new_delay - IO_IO_OUT2_DELAY_MAX);
702                 new_delay -= IO_IO_OUT2_DELAY_MAX;
703                 scc_mgr_set_dqs_out1_delay(new_delay);
704         }
705
706         scc_mgr_load_dqs_io();
707
708         /* OCT shift */
709         new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
710         if (new_delay > IO_IO_OUT2_DELAY_MAX) {
711                 debug_cond(DLEVEL == 1,
712                            "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
713                            __func__, __LINE__, write_group, delay,
714                            new_delay, IO_IO_OUT2_DELAY_MAX,
715                            new_delay - IO_IO_OUT2_DELAY_MAX);
716                 new_delay -= IO_IO_OUT2_DELAY_MAX;
717                 scc_mgr_set_oct_out1_delay(write_group, new_delay);
718         }
719
720         scc_mgr_load_dqs_for_write_group(write_group);
721 }
722
723 /**
724  * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
725  * @write_group:        Write group
726  * @delay:              Delay value
727  *
728  * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
729  */
730 static void
731 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
732                                                 const u32 delay)
733 {
734         int r;
735
736         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
737              r += NUM_RANKS_PER_SHADOW_REG) {
738                 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
739                 writel(0, &sdr_scc_mgr->update);
740         }
741 }
742
743 /**
744  * set_jump_as_return() - Return instruction optimization
745  *
746  * Optimization used to recover some slots in ddr3 inst_rom could be
747  * applied to other protocols if we wanted to
748  */
749 static void set_jump_as_return(void)
750 {
751         /*
752          * To save space, we replace return with jump to special shared
753          * RETURN instruction so we set the counter to large value so that
754          * we always jump.
755          */
756         writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
757         writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
758 }
759
760 /*
761  * should always use constants as argument to ensure all computations are
762  * performed at compile time
763  */
764 static void delay_for_n_mem_clocks(const uint32_t clocks)
765 {
766         uint32_t afi_clocks;
767         uint8_t inner = 0;
768         uint8_t outer = 0;
769         uint16_t c_loop = 0;
770
771         debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
772
773
774         afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
775         /* scale (rounding up) to get afi clocks */
776
777         /*
778          * Note, we don't bother accounting for being off a little bit
779          * because of a few extra instructions in outer loops
780          * Note, the loops have a test at the end, and do the test before
781          * the decrement, and so always perform the loop
782          * 1 time more than the counter value
783          */
784         if (afi_clocks == 0) {
785                 ;
786         } else if (afi_clocks <= 0x100) {
787                 inner = afi_clocks-1;
788                 outer = 0;
789                 c_loop = 0;
790         } else if (afi_clocks <= 0x10000) {
791                 inner = 0xff;
792                 outer = (afi_clocks-1) >> 8;
793                 c_loop = 0;
794         } else {
795                 inner = 0xff;
796                 outer = 0xff;
797                 c_loop = (afi_clocks-1) >> 16;
798         }
799
800         /*
801          * rom instructions are structured as follows:
802          *
803          *    IDLE_LOOP2: jnz cntr0, TARGET_A
804          *    IDLE_LOOP1: jnz cntr1, TARGET_B
805          *                return
806          *
807          * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
808          * TARGET_B is set to IDLE_LOOP2 as well
809          *
810          * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
811          * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
812          *
813          * a little confusing, but it helps save precious space in the inst_rom
814          * and sequencer rom and keeps the delays more accurate and reduces
815          * overhead
816          */
817         if (afi_clocks <= 0x100) {
818                 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
819                         &sdr_rw_load_mgr_regs->load_cntr1);
820
821                 writel(RW_MGR_IDLE_LOOP1,
822                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
823
824                 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
825                                           RW_MGR_RUN_SINGLE_GROUP_OFFSET);
826         } else {
827                 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
828                         &sdr_rw_load_mgr_regs->load_cntr0);
829
830                 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
831                         &sdr_rw_load_mgr_regs->load_cntr1);
832
833                 writel(RW_MGR_IDLE_LOOP2,
834                         &sdr_rw_load_jump_mgr_regs->load_jump_add0);
835
836                 writel(RW_MGR_IDLE_LOOP2,
837                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
838
839                 /* hack to get around compiler not being smart enough */
840                 if (afi_clocks <= 0x10000) {
841                         /* only need to run once */
842                         writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
843                                                   RW_MGR_RUN_SINGLE_GROUP_OFFSET);
844                 } else {
845                         do {
846                                 writel(RW_MGR_IDLE_LOOP2,
847                                         SDR_PHYGRP_RWMGRGRP_ADDRESS |
848                                         RW_MGR_RUN_SINGLE_GROUP_OFFSET);
849                         } while (c_loop-- != 0);
850                 }
851         }
852         debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
853 }
854
855 /**
856  * rw_mgr_mem_init_load_regs() - Load instruction registers
857  * @cntr0:      Counter 0 value
858  * @cntr1:      Counter 1 value
859  * @cntr2:      Counter 2 value
860  * @jump:       Jump instruction value
861  *
862  * Load instruction registers.
863  */
864 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
865 {
866         uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
867                            RW_MGR_RUN_SINGLE_GROUP_OFFSET;
868
869         /* Load counters */
870         writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
871                &sdr_rw_load_mgr_regs->load_cntr0);
872         writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
873                &sdr_rw_load_mgr_regs->load_cntr1);
874         writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
875                &sdr_rw_load_mgr_regs->load_cntr2);
876
877         /* Load jump address */
878         writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
879         writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
880         writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
881
882         /* Execute count instruction */
883         writel(jump, grpaddr);
884 }
885
886 /**
887  * rw_mgr_mem_load_user() - Load user calibration values
888  * @fin1:       Final instruction 1
889  * @fin2:       Final instruction 2
890  * @precharge:  If 1, precharge the banks at the end
891  *
892  * Load user calibration values and optionally precharge the banks.
893  */
894 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
895                                  const int precharge)
896 {
897         u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
898                       RW_MGR_RUN_SINGLE_GROUP_OFFSET;
899         u32 r;
900
901         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
902                 if (param->skip_ranks[r]) {
903                         /* request to skip the rank */
904                         continue;
905                 }
906
907                 /* set rank */
908                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
909
910                 /* precharge all banks ... */
911                 if (precharge)
912                         writel(RW_MGR_PRECHARGE_ALL, grpaddr);
913
914                 /*
915                  * USER Use Mirror-ed commands for odd ranks if address
916                  * mirrorring is on
917                  */
918                 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
919                         set_jump_as_return();
920                         writel(RW_MGR_MRS2_MIRR, grpaddr);
921                         delay_for_n_mem_clocks(4);
922                         set_jump_as_return();
923                         writel(RW_MGR_MRS3_MIRR, grpaddr);
924                         delay_for_n_mem_clocks(4);
925                         set_jump_as_return();
926                         writel(RW_MGR_MRS1_MIRR, grpaddr);
927                         delay_for_n_mem_clocks(4);
928                         set_jump_as_return();
929                         writel(fin1, grpaddr);
930                 } else {
931                         set_jump_as_return();
932                         writel(RW_MGR_MRS2, grpaddr);
933                         delay_for_n_mem_clocks(4);
934                         set_jump_as_return();
935                         writel(RW_MGR_MRS3, grpaddr);
936                         delay_for_n_mem_clocks(4);
937                         set_jump_as_return();
938                         writel(RW_MGR_MRS1, grpaddr);
939                         set_jump_as_return();
940                         writel(fin2, grpaddr);
941                 }
942
943                 if (precharge)
944                         continue;
945
946                 set_jump_as_return();
947                 writel(RW_MGR_ZQCL, grpaddr);
948
949                 /* tZQinit = tDLLK = 512 ck cycles */
950                 delay_for_n_mem_clocks(512);
951         }
952 }
953
954 /**
955  * rw_mgr_mem_initialize() - Initialize RW Manager
956  *
957  * Initialize RW Manager.
958  */
959 static void rw_mgr_mem_initialize(void)
960 {
961         debug("%s:%d\n", __func__, __LINE__);
962
963         /* The reset / cke part of initialization is broadcasted to all ranks */
964         writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
965                                 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
966
967         /*
968          * Here's how you load register for a loop
969          * Counters are located @ 0x800
970          * Jump address are located @ 0xC00
971          * For both, registers 0 to 3 are selected using bits 3 and 2, like
972          * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
973          * I know this ain't pretty, but Avalon bus throws away the 2 least
974          * significant bits
975          */
976
977         /* Start with memory RESET activated */
978
979         /* tINIT = 200us */
980
981         /*
982          * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
983          * If a and b are the number of iteration in 2 nested loops
984          * it takes the following number of cycles to complete the operation:
985          * number_of_cycles = ((2 + n) * a + 2) * b
986          * where n is the number of instruction in the inner loop
987          * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
988          * b = 6A
989          */
990         rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
991                                   SEQ_TINIT_CNTR2_VAL,
992                                   RW_MGR_INIT_RESET_0_CKE_0);
993
994         /* Indicate that memory is stable. */
995         writel(1, &phy_mgr_cfg->reset_mem_stbl);
996
997         /*
998          * transition the RESET to high
999          * Wait for 500us
1000          */
1001
1002         /*
1003          * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1004          * If a and b are the number of iteration in 2 nested loops
1005          * it takes the following number of cycles to complete the operation
1006          * number_of_cycles = ((2 + n) * a + 2) * b
1007          * where n is the number of instruction in the inner loop
1008          * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1009          * b = FF
1010          */
1011         rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
1012                                   SEQ_TRESET_CNTR2_VAL,
1013                                   RW_MGR_INIT_RESET_1_CKE_0);
1014
1015         /* Bring up clock enable. */
1016
1017         /* tXRP < 250 ck cycles */
1018         delay_for_n_mem_clocks(250);
1019
1020         rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1021                              0);
1022 }
1023
1024 /*
1025  * At the end of calibration we have to program the user settings in, and
1026  * USER  hand off the memory to the user.
1027  */
1028 static void rw_mgr_mem_handoff(void)
1029 {
1030         rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1031         /*
1032          * USER  need to wait tMOD (12CK or 15ns) time before issuing
1033          * other commands, but we will have plenty of NIOS cycles before
1034          * actual handoff so its okay.
1035          */
1036 }
1037
1038 /*
1039  * performs a guaranteed read on the patterns we are going to use during a
1040  * read test to ensure memory works
1041  */
1042 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1043         uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1044         uint32_t all_ranks)
1045 {
1046         uint32_t r, vg;
1047         uint32_t correct_mask_vg;
1048         uint32_t tmp_bit_chk;
1049         uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1050                 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1051         uint32_t addr;
1052         uint32_t base_rw_mgr;
1053
1054         *bit_chk = param->read_correct_mask;
1055         correct_mask_vg = param->read_correct_mask_vg;
1056
1057         for (r = rank_bgn; r < rank_end; r++) {
1058                 if (param->skip_ranks[r])
1059                         /* request to skip the rank */
1060                         continue;
1061
1062                 /* set rank */
1063                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1064
1065                 /* Load up a constant bursts of read commands */
1066                 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1067                 writel(RW_MGR_GUARANTEED_READ,
1068                         &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1069
1070                 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1071                 writel(RW_MGR_GUARANTEED_READ_CONT,
1072                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1073
1074                 tmp_bit_chk = 0;
1075                 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1076                         /* reset the fifos to get pointers to known state */
1077
1078                         writel(0, &phy_mgr_cmd->fifo_reset);
1079                         writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1080                                   RW_MGR_RESET_READ_DATAPATH_OFFSET);
1081
1082                         tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1083                                 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1084
1085                         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1086                         writel(RW_MGR_GUARANTEED_READ, addr +
1087                                ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1088                                 vg) << 2));
1089
1090                         base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1091                         tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1092
1093                         if (vg == 0)
1094                                 break;
1095                 }
1096                 *bit_chk &= tmp_bit_chk;
1097         }
1098
1099         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1100         writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1101
1102         set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1103         debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1104                    %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1105                    (long unsigned int)(*bit_chk == param->read_correct_mask));
1106         return *bit_chk == param->read_correct_mask;
1107 }
1108
1109 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1110         (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1111 {
1112         return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1113                 num_tries, bit_chk, 1);
1114 }
1115
1116 /* load up the patterns we are going to use during a read test */
1117 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1118         uint32_t all_ranks)
1119 {
1120         uint32_t r;
1121         uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1122                 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1123
1124         debug("%s:%d\n", __func__, __LINE__);
1125         for (r = rank_bgn; r < rank_end; r++) {
1126                 if (param->skip_ranks[r])
1127                         /* request to skip the rank */
1128                         continue;
1129
1130                 /* set rank */
1131                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1132
1133                 /* Load up a constant bursts */
1134                 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1135
1136                 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1137                         &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1138
1139                 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1140
1141                 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1142                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1143
1144                 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1145
1146                 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1147                         &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1148
1149                 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1150
1151                 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1152                         &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1153
1154                 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1155                                                 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1156         }
1157
1158         set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1159 }
1160
1161 /*
1162  * try a read and see if it returns correct data back. has dummy reads
1163  * inserted into the mix used to align dqs enable. has more thorough checks
1164  * than the regular read test.
1165  */
1166 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1167         uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1168         uint32_t all_groups, uint32_t all_ranks)
1169 {
1170         uint32_t r, vg;
1171         uint32_t correct_mask_vg;
1172         uint32_t tmp_bit_chk;
1173         uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1174                 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1175         uint32_t addr;
1176         uint32_t base_rw_mgr;
1177
1178         *bit_chk = param->read_correct_mask;
1179         correct_mask_vg = param->read_correct_mask_vg;
1180
1181         uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1182                 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1183
1184         for (r = rank_bgn; r < rank_end; r++) {
1185                 if (param->skip_ranks[r])
1186                         /* request to skip the rank */
1187                         continue;
1188
1189                 /* set rank */
1190                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1191
1192                 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1193
1194                 writel(RW_MGR_READ_B2B_WAIT1,
1195                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1196
1197                 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1198                 writel(RW_MGR_READ_B2B_WAIT2,
1199                         &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1200
1201                 if (quick_read_mode)
1202                         writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1203                         /* need at least two (1+1) reads to capture failures */
1204                 else if (all_groups)
1205                         writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1206                 else
1207                         writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1208
1209                 writel(RW_MGR_READ_B2B,
1210                         &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1211                 if (all_groups)
1212                         writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1213                                RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1214                                &sdr_rw_load_mgr_regs->load_cntr3);
1215                 else
1216                         writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1217
1218                 writel(RW_MGR_READ_B2B,
1219                         &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1220
1221                 tmp_bit_chk = 0;
1222                 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1223                         /* reset the fifos to get pointers to known state */
1224                         writel(0, &phy_mgr_cmd->fifo_reset);
1225                         writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1226                                   RW_MGR_RESET_READ_DATAPATH_OFFSET);
1227
1228                         tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1229                                 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1230
1231                         if (all_groups)
1232                                 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1233                         else
1234                                 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1235
1236                         writel(RW_MGR_READ_B2B, addr +
1237                                ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1238                                vg) << 2));
1239
1240                         base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1241                         tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1242
1243                         if (vg == 0)
1244                                 break;
1245                 }
1246                 *bit_chk &= tmp_bit_chk;
1247         }
1248
1249         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1250         writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1251
1252         if (all_correct) {
1253                 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1254                 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1255                            (%u == %u) => %lu", __func__, __LINE__, group,
1256                            all_groups, *bit_chk, param->read_correct_mask,
1257                            (long unsigned int)(*bit_chk ==
1258                            param->read_correct_mask));
1259                 return *bit_chk == param->read_correct_mask;
1260         } else  {
1261                 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1262                 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1263                            (%u != %lu) => %lu\n", __func__, __LINE__,
1264                            group, all_groups, *bit_chk, (long unsigned int)0,
1265                            (long unsigned int)(*bit_chk != 0x00));
1266                 return *bit_chk != 0x00;
1267         }
1268 }
1269
1270 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1271         uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1272         uint32_t all_groups)
1273 {
1274         return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1275                                               bit_chk, all_groups, 1);
1276 }
1277
1278 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1279 {
1280         writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1281         (*v)++;
1282 }
1283
1284 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1285 {
1286         uint32_t i;
1287
1288         for (i = 0; i < VFIFO_SIZE-1; i++)
1289                 rw_mgr_incr_vfifo(grp, v);
1290 }
1291
1292 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1293 {
1294         uint32_t  v;
1295         uint32_t fail_cnt = 0;
1296         uint32_t test_status;
1297
1298         for (v = 0; v < VFIFO_SIZE; ) {
1299                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1300                            __func__, __LINE__, v);
1301                 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1302                         (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1303                 if (!test_status) {
1304                         fail_cnt++;
1305
1306                         if (fail_cnt == 2)
1307                                 break;
1308                 }
1309
1310                 /* fiddle with FIFO */
1311                 rw_mgr_incr_vfifo(grp, &v);
1312         }
1313
1314         if (v >= VFIFO_SIZE) {
1315                 /* no failing read found!! Something must have gone wrong */
1316                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1317                            __func__, __LINE__);
1318                 return 0;
1319         } else {
1320                 return v;
1321         }
1322 }
1323
1324 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1325                               uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1326                               uint32_t *v, uint32_t *d, uint32_t *p,
1327                               uint32_t *i, uint32_t *max_working_cnt)
1328 {
1329         uint32_t found_begin = 0;
1330         uint32_t tmp_delay = 0;
1331         uint32_t test_status;
1332
1333         for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1334                 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1335                 *work_bgn = tmp_delay;
1336                 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1337
1338                 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1339                         for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1340                                 IO_DELAY_PER_OPA_TAP) {
1341                                 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1342
1343                                 test_status =
1344                                 rw_mgr_mem_calibrate_read_test_all_ranks
1345                                 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1346
1347                                 if (test_status) {
1348                                         *max_working_cnt = 1;
1349                                         found_begin = 1;
1350                                         break;
1351                                 }
1352                         }
1353
1354                         if (found_begin)
1355                                 break;
1356
1357                         if (*p > IO_DQS_EN_PHASE_MAX)
1358                                 /* fiddle with FIFO */
1359                                 rw_mgr_incr_vfifo(*grp, v);
1360                 }
1361
1362                 if (found_begin)
1363                         break;
1364         }
1365
1366         if (*i >= VFIFO_SIZE) {
1367                 /* cannot find working solution */
1368                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1369                            ptap/dtap\n", __func__, __LINE__);
1370                 return 0;
1371         } else {
1372                 return 1;
1373         }
1374 }
1375
1376 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1377                              uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1378                              uint32_t *p, uint32_t *max_working_cnt)
1379 {
1380         uint32_t found_begin = 0;
1381         uint32_t tmp_delay;
1382
1383         /* Special case code for backing up a phase */
1384         if (*p == 0) {
1385                 *p = IO_DQS_EN_PHASE_MAX;
1386                 rw_mgr_decr_vfifo(*grp, v);
1387         } else {
1388                 (*p)--;
1389         }
1390         tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1391         scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1392
1393         for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1394                 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1395                 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1396
1397                 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1398                                                              PASS_ONE_BIT,
1399                                                              bit_chk, 0)) {
1400                         found_begin = 1;
1401                         *work_bgn = tmp_delay;
1402                         break;
1403                 }
1404         }
1405
1406         /* We have found a working dtap before the ptap found above */
1407         if (found_begin == 1)
1408                 (*max_working_cnt)++;
1409
1410         /*
1411          * Restore VFIFO to old state before we decremented it
1412          * (if needed).
1413          */
1414         (*p)++;
1415         if (*p > IO_DQS_EN_PHASE_MAX) {
1416                 *p = 0;
1417                 rw_mgr_incr_vfifo(*grp, v);
1418         }
1419
1420         scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1421 }
1422
1423 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1424                              uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1425                              uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1426                              uint32_t *work_end)
1427 {
1428         uint32_t found_end = 0;
1429
1430         (*p)++;
1431         *work_end += IO_DELAY_PER_OPA_TAP;
1432         if (*p > IO_DQS_EN_PHASE_MAX) {
1433                 /* fiddle with FIFO */
1434                 *p = 0;
1435                 rw_mgr_incr_vfifo(*grp, v);
1436         }
1437
1438         for (; *i < VFIFO_SIZE + 1; (*i)++) {
1439                 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1440                         += IO_DELAY_PER_OPA_TAP) {
1441                         scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1442
1443                         if (!rw_mgr_mem_calibrate_read_test_all_ranks
1444                                 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1445                                 found_end = 1;
1446                                 break;
1447                         } else {
1448                                 (*max_working_cnt)++;
1449                         }
1450                 }
1451
1452                 if (found_end)
1453                         break;
1454
1455                 if (*p > IO_DQS_EN_PHASE_MAX) {
1456                         /* fiddle with FIFO */
1457                         rw_mgr_incr_vfifo(*grp, v);
1458                         *p = 0;
1459                 }
1460         }
1461
1462         if (*i >= VFIFO_SIZE + 1) {
1463                 /* cannot see edge of failing read */
1464                 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1465                            failed\n", __func__, __LINE__);
1466                 return 0;
1467         } else {
1468                 return 1;
1469         }
1470 }
1471
1472 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1473                                   uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1474                                   uint32_t *p, uint32_t *work_mid,
1475                                   uint32_t *work_end)
1476 {
1477         int i;
1478         int tmp_delay = 0;
1479
1480         *work_mid = (*work_bgn + *work_end) / 2;
1481
1482         debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1483                    *work_bgn, *work_end, *work_mid);
1484         /* Get the middle delay to be less than a VFIFO delay */
1485         for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1486                 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1487                 ;
1488         debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1489         while (*work_mid > tmp_delay)
1490                 *work_mid -= tmp_delay;
1491         debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1492
1493         tmp_delay = 0;
1494         for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1495                 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1496                 ;
1497         tmp_delay -= IO_DELAY_PER_OPA_TAP;
1498         debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1499         for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1500                 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1501                 ;
1502         debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1503
1504         scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1505         scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1506
1507         /*
1508          * push vfifo until we can successfully calibrate. We can do this
1509          * because the largest possible margin in 1 VFIFO cycle.
1510          */
1511         for (i = 0; i < VFIFO_SIZE; i++) {
1512                 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1513                            *v);
1514                 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1515                                                              PASS_ONE_BIT,
1516                                                              bit_chk, 0)) {
1517                         break;
1518                 }
1519
1520                 /* fiddle with FIFO */
1521                 rw_mgr_incr_vfifo(*grp, v);
1522         }
1523
1524         if (i >= VFIFO_SIZE) {
1525                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1526                            failed\n", __func__, __LINE__);
1527                 return 0;
1528         } else {
1529                 return 1;
1530         }
1531 }
1532
1533 /* find a good dqs enable to use */
1534 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1535 {
1536         uint32_t v, d, p, i;
1537         uint32_t max_working_cnt;
1538         uint32_t bit_chk;
1539         uint32_t dtaps_per_ptap;
1540         uint32_t work_bgn, work_mid, work_end;
1541         uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1542
1543         debug("%s:%d %u\n", __func__, __LINE__, grp);
1544
1545         reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1546
1547         scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1548         scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1549
1550         /* ************************************************************** */
1551         /* * Step 0 : Determine number of delay taps for each phase tap * */
1552         dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1553
1554         /* ********************************************************* */
1555         /* * Step 1 : First push vfifo until we get a failing read * */
1556         v = find_vfifo_read(grp, &bit_chk);
1557
1558         max_working_cnt = 0;
1559
1560         /* ******************************************************** */
1561         /* * step 2: find first working phase, increment in ptaps * */
1562         work_bgn = 0;
1563         if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1564                                 &p, &i, &max_working_cnt) == 0)
1565                 return 0;
1566
1567         work_end = work_bgn;
1568
1569         /*
1570          * If d is 0 then the working window covers a phase tap and
1571          * we can follow the old procedure otherwise, we've found the beginning,
1572          * and we need to increment the dtaps until we find the end.
1573          */
1574         if (d == 0) {
1575                 /* ********************************************************* */
1576                 /* * step 3a: if we have room, back off by one and
1577                 increment in dtaps * */
1578
1579                 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1580                                  &max_working_cnt);
1581
1582                 /* ********************************************************* */
1583                 /* * step 4a: go forward from working phase to non working
1584                 phase, increment in ptaps * */
1585                 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1586                                          &i, &max_working_cnt, &work_end) == 0)
1587                         return 0;
1588
1589                 /* ********************************************************* */
1590                 /* * step 5a:  back off one from last, increment in dtaps  * */
1591
1592                 /* Special case code for backing up a phase */
1593                 if (p == 0) {
1594                         p = IO_DQS_EN_PHASE_MAX;
1595                         rw_mgr_decr_vfifo(grp, &v);
1596                 } else {
1597                         p = p - 1;
1598                 }
1599
1600                 work_end -= IO_DELAY_PER_OPA_TAP;
1601                 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1602
1603                 /* * The actual increment of dtaps is done outside of
1604                 the if/else loop to share code */
1605                 d = 0;
1606
1607                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1608                            vfifo=%u ptap=%u\n", __func__, __LINE__,
1609                            v, p);
1610         } else {
1611                 /* ******************************************************* */
1612                 /* * step 3-5b:  Find the right edge of the window using
1613                 delay taps   * */
1614                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1615                            ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1616                            v, p, d, work_bgn);
1617
1618                 work_end = work_bgn;
1619
1620                 /* * The actual increment of dtaps is done outside of the
1621                 if/else loop to share code */
1622
1623                 /* Only here to counterbalance a subtract later on which is
1624                 not needed if this branch of the algorithm is taken */
1625                 max_working_cnt++;
1626         }
1627
1628         /* The dtap increment to find the failing edge is done here */
1629         for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1630                 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1631                         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1632                                    end-2: dtap=%u\n", __func__, __LINE__, d);
1633                         scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1634
1635                         if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1636                                                                       PASS_ONE_BIT,
1637                                                                       &bit_chk, 0)) {
1638                                 break;
1639                         }
1640         }
1641
1642         /* Go back to working dtap */
1643         if (d != 0)
1644                 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1645
1646         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1647                    ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1648                    v, p, d-1, work_end);
1649
1650         if (work_end < work_bgn) {
1651                 /* nil range */
1652                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1653                            failed\n", __func__, __LINE__);
1654                 return 0;
1655         }
1656
1657         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1658                    __func__, __LINE__, work_bgn, work_end);
1659
1660         /* *************************************************************** */
1661         /*
1662          * * We need to calculate the number of dtaps that equal a ptap
1663          * * To do that we'll back up a ptap and re-find the edge of the
1664          * * window using dtaps
1665          */
1666
1667         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1668                    for tracking\n", __func__, __LINE__);
1669
1670         /* Special case code for backing up a phase */
1671         if (p == 0) {
1672                 p = IO_DQS_EN_PHASE_MAX;
1673                 rw_mgr_decr_vfifo(grp, &v);
1674                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1675                            cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1676                            v, p);
1677         } else {
1678                 p = p - 1;
1679                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1680                            phase only: v=%u p=%u", __func__, __LINE__,
1681                            v, p);
1682         }
1683
1684         scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1685
1686         /*
1687          * Increase dtap until we first see a passing read (in case the
1688          * window is smaller than a ptap),
1689          * and then a failing read to mark the edge of the window again
1690          */
1691
1692         /* Find a passing read */
1693         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1694                    __func__, __LINE__);
1695         found_passing_read = 0;
1696         found_failing_read = 0;
1697         initial_failing_dtap = d;
1698         for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1699                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1700                            read d=%u\n", __func__, __LINE__, d);
1701                 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1702
1703                 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1704                                                              PASS_ONE_BIT,
1705                                                              &bit_chk, 0)) {
1706                         found_passing_read = 1;
1707                         break;
1708                 }
1709         }
1710
1711         if (found_passing_read) {
1712                 /* Find a failing read */
1713                 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1714                            read\n", __func__, __LINE__);
1715                 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1716                         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1717                                    testing read d=%u\n", __func__, __LINE__, d);
1718                         scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1719
1720                         if (!rw_mgr_mem_calibrate_read_test_all_ranks
1721                                 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1722                                 found_failing_read = 1;
1723                                 break;
1724                         }
1725                 }
1726         } else {
1727                 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1728                            calculate dtaps", __func__, __LINE__);
1729                 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1730         }
1731
1732         /*
1733          * The dynamically calculated dtaps_per_ptap is only valid if we
1734          * found a passing/failing read. If we didn't, it means d hit the max
1735          * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1736          * statically calculated value.
1737          */
1738         if (found_passing_read && found_failing_read)
1739                 dtaps_per_ptap = d - initial_failing_dtap;
1740
1741         writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1742         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1743                    - %u = %u",  __func__, __LINE__, d,
1744                    initial_failing_dtap, dtaps_per_ptap);
1745
1746         /* ******************************************** */
1747         /* * step 6:  Find the centre of the window   * */
1748         if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1749                                    &work_mid, &work_end) == 0)
1750                 return 0;
1751
1752         debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1753                    vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1754                    v, p-1, d);
1755         return 1;
1756 }
1757
1758 /*
1759  * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1760  * dq_in_delay values
1761  */
1762 static uint32_t
1763 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1764 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1765 {
1766         uint32_t found;
1767         uint32_t i;
1768         uint32_t p;
1769         uint32_t d;
1770         uint32_t r;
1771
1772         const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1773                 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1774                 /* we start at zero, so have one less dq to devide among */
1775
1776         debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1777               test_bgn);
1778
1779         /* try different dq_in_delays since the dq path is shorter than dqs */
1780
1781         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1782              r += NUM_RANKS_PER_SHADOW_REG) {
1783                 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) {
1784                         debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1785                                    vfifo_find_dqs_", __func__, __LINE__);
1786                         debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1787                                write_group, read_group);
1788                         debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
1789                         scc_mgr_set_dq_in_delay(p, d);
1790                         scc_mgr_load_dq(p);
1791                 }
1792                 writel(0, &sdr_scc_mgr->update);
1793         }
1794
1795         found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1796
1797         debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1798                    en_phase_sweep_dq", __func__, __LINE__);
1799         debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1800                    chain to zero\n", write_group, read_group, found);
1801
1802         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1803              r += NUM_RANKS_PER_SHADOW_REG) {
1804                 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1805                         i++, p++) {
1806                         scc_mgr_set_dq_in_delay(p, 0);
1807                         scc_mgr_load_dq(p);
1808                 }
1809                 writel(0, &sdr_scc_mgr->update);
1810         }
1811
1812         return found;
1813 }
1814
1815 /* per-bit deskew DQ and center */
1816 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1817         uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1818         uint32_t use_read_test, uint32_t update_fom)
1819 {
1820         uint32_t i, p, d, min_index;
1821         /*
1822          * Store these as signed since there are comparisons with
1823          * signed numbers.
1824          */
1825         uint32_t bit_chk;
1826         uint32_t sticky_bit_chk;
1827         int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1828         int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1829         int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1830         int32_t mid;
1831         int32_t orig_mid_min, mid_min;
1832         int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1833                 final_dqs_en;
1834         int32_t dq_margin, dqs_margin;
1835         uint32_t stop;
1836         uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1837         uint32_t addr;
1838
1839         debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1840
1841         addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1842         start_dqs = readl(addr + (read_group << 2));
1843         if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1844                 start_dqs_en = readl(addr + ((read_group << 2)
1845                                      - IO_DQS_EN_DELAY_OFFSET));
1846
1847         /* set the left and right edge of each bit to an illegal value */
1848         /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1849         sticky_bit_chk = 0;
1850         for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1851                 left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
1852                 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1853         }
1854
1855         /* Search for the left edge of the window for each bit */
1856         for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1857                 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1858
1859                 writel(0, &sdr_scc_mgr->update);
1860
1861                 /*
1862                  * Stop searching when the read test doesn't pass AND when
1863                  * we've seen a passing read on every bit.
1864                  */
1865                 if (use_read_test) {
1866                         stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1867                                 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1868                                 &bit_chk, 0, 0);
1869                 } else {
1870                         rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1871                                                         0, PASS_ONE_BIT,
1872                                                         &bit_chk, 0);
1873                         bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1874                                 (read_group - (write_group *
1875                                         RW_MGR_MEM_IF_READ_DQS_WIDTH /
1876                                         RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1877                         stop = (bit_chk == 0);
1878                 }
1879                 sticky_bit_chk = sticky_bit_chk | bit_chk;
1880                 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1881                 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1882                            && %u", __func__, __LINE__, d,
1883                            sticky_bit_chk,
1884                         param->read_correct_mask, stop);
1885
1886                 if (stop == 1) {
1887                         break;
1888                 } else {
1889                         for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1890                                 if (bit_chk & 1) {
1891                                         /* Remember a passing test as the
1892                                         left_edge */
1893                                         left_edge[i] = d;
1894                                 } else {
1895                                         /* If a left edge has not been seen yet,
1896                                         then a future passing test will mark
1897                                         this edge as the right edge */
1898                                         if (left_edge[i] ==
1899                                                 IO_IO_IN_DELAY_MAX + 1) {
1900                                                 right_edge[i] = -(d + 1);
1901                                         }
1902                                 }
1903                                 bit_chk = bit_chk >> 1;
1904                         }
1905                 }
1906         }
1907
1908         /* Reset DQ delay chains to 0 */
1909         scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1910         sticky_bit_chk = 0;
1911         for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1912                 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1913                            %d right_edge[%u]: %d\n", __func__, __LINE__,
1914                            i, left_edge[i], i, right_edge[i]);
1915
1916                 /*
1917                  * Check for cases where we haven't found the left edge,
1918                  * which makes our assignment of the the right edge invalid.
1919                  * Reset it to the illegal value.
1920                  */
1921                 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1922                         right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1923                         right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1924                         debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1925                                    right_edge[%u]: %d\n", __func__, __LINE__,
1926                                    i, right_edge[i]);
1927                 }
1928
1929                 /*
1930                  * Reset sticky bit (except for bits where we have seen
1931                  * both the left and right edge).
1932                  */
1933                 sticky_bit_chk = sticky_bit_chk << 1;
1934                 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1935                     (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1936                         sticky_bit_chk = sticky_bit_chk | 1;
1937                 }
1938
1939                 if (i == 0)
1940                         break;
1941         }
1942
1943         /* Search for the right edge of the window for each bit */
1944         for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1945                 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1946                 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1947                         uint32_t delay = d + start_dqs_en;
1948                         if (delay > IO_DQS_EN_DELAY_MAX)
1949                                 delay = IO_DQS_EN_DELAY_MAX;
1950                         scc_mgr_set_dqs_en_delay(read_group, delay);
1951                 }
1952                 scc_mgr_load_dqs(read_group);
1953
1954                 writel(0, &sdr_scc_mgr->update);
1955
1956                 /*
1957                  * Stop searching when the read test doesn't pass AND when
1958                  * we've seen a passing read on every bit.
1959                  */
1960                 if (use_read_test) {
1961                         stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1962                                 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1963                                 &bit_chk, 0, 0);
1964                 } else {
1965                         rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1966                                                         0, PASS_ONE_BIT,
1967                                                         &bit_chk, 0);
1968                         bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1969                                 (read_group - (write_group *
1970                                         RW_MGR_MEM_IF_READ_DQS_WIDTH /
1971                                         RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1972                         stop = (bit_chk == 0);
1973                 }
1974                 sticky_bit_chk = sticky_bit_chk | bit_chk;
1975                 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1976
1977                 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1978                            %u && %u", __func__, __LINE__, d,
1979                            sticky_bit_chk, param->read_correct_mask, stop);
1980
1981                 if (stop == 1) {
1982                         break;
1983                 } else {
1984                         for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1985                                 if (bit_chk & 1) {
1986                                         /* Remember a passing test as
1987                                         the right_edge */
1988                                         right_edge[i] = d;
1989                                 } else {
1990                                         if (d != 0) {
1991                                                 /* If a right edge has not been
1992                                                 seen yet, then a future passing
1993                                                 test will mark this edge as the
1994                                                 left edge */
1995                                                 if (right_edge[i] ==
1996                                                 IO_IO_IN_DELAY_MAX + 1) {
1997                                                         left_edge[i] = -(d + 1);
1998                                                 }
1999                                         } else {
2000                                                 /* d = 0 failed, but it passed
2001                                                 when testing the left edge,
2002                                                 so it must be marginal,
2003                                                 set it to -1 */
2004                                                 if (right_edge[i] ==
2005                                                         IO_IO_IN_DELAY_MAX + 1 &&
2006                                                         left_edge[i] !=
2007                                                         IO_IO_IN_DELAY_MAX
2008                                                         + 1) {
2009                                                         right_edge[i] = -1;
2010                                                 }
2011                                                 /* If a right edge has not been
2012                                                 seen yet, then a future passing
2013                                                 test will mark this edge as the
2014                                                 left edge */
2015                                                 else if (right_edge[i] ==
2016                                                         IO_IO_IN_DELAY_MAX +
2017                                                         1) {
2018                                                         left_edge[i] = -(d + 1);
2019                                                 }
2020                                         }
2021                                 }
2022
2023                                 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2024                                            d=%u]: ", __func__, __LINE__, d);
2025                                 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2026                                            (int)(bit_chk & 1), i, left_edge[i]);
2027                                 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2028                                            right_edge[i]);
2029                                 bit_chk = bit_chk >> 1;
2030                         }
2031                 }
2032         }
2033
2034         /* Check that all bits have a window */
2035         for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2036                 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2037                            %d right_edge[%u]: %d", __func__, __LINE__,
2038                            i, left_edge[i], i, right_edge[i]);
2039                 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2040                         == IO_IO_IN_DELAY_MAX + 1)) {
2041                         /*
2042                          * Restore delay chain settings before letting the loop
2043                          * in rw_mgr_mem_calibrate_vfifo to retry different
2044                          * dqs/ck relationships.
2045                          */
2046                         scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2047                         if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2048                                 scc_mgr_set_dqs_en_delay(read_group,
2049                                                          start_dqs_en);
2050                         }
2051                         scc_mgr_load_dqs(read_group);
2052                         writel(0, &sdr_scc_mgr->update);
2053
2054                         debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2055                                    find edge [%u]: %d %d", __func__, __LINE__,
2056                                    i, left_edge[i], right_edge[i]);
2057                         if (use_read_test) {
2058                                 set_failing_group_stage(read_group *
2059                                         RW_MGR_MEM_DQ_PER_READ_DQS + i,
2060                                         CAL_STAGE_VFIFO,
2061                                         CAL_SUBSTAGE_VFIFO_CENTER);
2062                         } else {
2063                                 set_failing_group_stage(read_group *
2064                                         RW_MGR_MEM_DQ_PER_READ_DQS + i,
2065                                         CAL_STAGE_VFIFO_AFTER_WRITES,
2066                                         CAL_SUBSTAGE_VFIFO_CENTER);
2067                         }
2068                         return 0;
2069                 }
2070         }
2071
2072         /* Find middle of window for each DQ bit */
2073         mid_min = left_edge[0] - right_edge[0];
2074         min_index = 0;
2075         for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2076                 mid = left_edge[i] - right_edge[i];
2077                 if (mid < mid_min) {
2078                         mid_min = mid;
2079                         min_index = i;
2080                 }
2081         }
2082
2083         /*
2084          * -mid_min/2 represents the amount that we need to move DQS.
2085          * If mid_min is odd and positive we'll need to add one to
2086          * make sure the rounding in further calculations is correct
2087          * (always bias to the right), so just add 1 for all positive values.
2088          */
2089         if (mid_min > 0)
2090                 mid_min++;
2091
2092         mid_min = mid_min / 2;
2093
2094         debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2095                    __func__, __LINE__, mid_min, min_index);
2096
2097         /* Determine the amount we can change DQS (which is -mid_min) */
2098         orig_mid_min = mid_min;
2099         new_dqs = start_dqs - mid_min;
2100         if (new_dqs > IO_DQS_IN_DELAY_MAX)
2101                 new_dqs = IO_DQS_IN_DELAY_MAX;
2102         else if (new_dqs < 0)
2103                 new_dqs = 0;
2104
2105         mid_min = start_dqs - new_dqs;
2106         debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2107                    mid_min, new_dqs);
2108
2109         if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2110                 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2111                         mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2112                 else if (start_dqs_en - mid_min < 0)
2113                         mid_min += start_dqs_en - mid_min;
2114         }
2115         new_dqs = start_dqs - mid_min;
2116
2117         debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2118                    new_dqs=%d mid_min=%d\n", start_dqs,
2119                    IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2120                    new_dqs, mid_min);
2121
2122         /* Initialize data for export structures */
2123         dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2124         dq_margin  = IO_IO_IN_DELAY_MAX + 1;
2125
2126         /* add delay to bring centre of all DQ windows to the same "level" */
2127         for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2128                 /* Use values before divide by 2 to reduce round off error */
2129                 shift_dq = (left_edge[i] - right_edge[i] -
2130                         (left_edge[min_index] - right_edge[min_index]))/2  +
2131                         (orig_mid_min - mid_min);
2132
2133                 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2134                            shift_dq[%u]=%d\n", i, shift_dq);
2135
2136                 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2137                 temp_dq_in_delay1 = readl(addr + (p << 2));
2138                 temp_dq_in_delay2 = readl(addr + (i << 2));
2139
2140                 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2141                         (int32_t)IO_IO_IN_DELAY_MAX) {
2142                         shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2143                 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2144                         shift_dq = -(int32_t)temp_dq_in_delay1;
2145                 }
2146                 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2147                            shift_dq[%u]=%d\n", i, shift_dq);
2148                 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2149                 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2150                 scc_mgr_load_dq(p);
2151
2152                 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2153                            left_edge[i] - shift_dq + (-mid_min),
2154                            right_edge[i] + shift_dq - (-mid_min));
2155                 /* To determine values for export structures */
2156                 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2157                         dq_margin = left_edge[i] - shift_dq + (-mid_min);
2158
2159                 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2160                         dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2161         }
2162
2163         final_dqs = new_dqs;
2164         if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2165                 final_dqs_en = start_dqs_en - mid_min;
2166
2167         /* Move DQS-en */
2168         if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2169                 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2170                 scc_mgr_load_dqs(read_group);
2171         }
2172
2173         /* Move DQS */
2174         scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2175         scc_mgr_load_dqs(read_group);
2176         debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2177                    dqs_margin=%d", __func__, __LINE__,
2178                    dq_margin, dqs_margin);
2179
2180         /*
2181          * Do not remove this line as it makes sure all of our decisions
2182          * have been applied. Apply the update bit.
2183          */
2184         writel(0, &sdr_scc_mgr->update);
2185
2186         return (dq_margin >= 0) && (dqs_margin >= 0);
2187 }
2188
2189 /**
2190  * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2191  * @rw_group:           Read/Write Group
2192  * @test_bgn:           Rank at which the test begins
2193  *
2194  * Stage 1: Calibrate the read valid prediction FIFO.
2195  *
2196  * This function implements UniPHY calibration Stage 1, as explained in
2197  * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2198  *
2199  * - read valid prediction will consist of finding:
2200  *   - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2201  *   - DQS input phase  and DQS input delay (DQ/DQS Centering)
2202  *  - we also do a per-bit deskew on the DQ lines.
2203  */
2204 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2205 {
2206         uint32_t p, d, rank_bgn, sr;
2207         uint32_t dtaps_per_ptap;
2208         uint32_t bit_chk;
2209         uint32_t grp_calibrated;
2210         uint32_t failed_substage;
2211
2212         debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2213
2214         /* update info for sims */
2215         reg_file_set_stage(CAL_STAGE_VFIFO);
2216
2217         /* USER Determine number of delay taps for each phase tap */
2218         dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2219                                       IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2220
2221         /* update info for sims */
2222         reg_file_set_group(rw_group);
2223
2224         reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2225         failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2226
2227         for (d = 0; d <= dtaps_per_ptap; d += 2) {
2228                 /*
2229                  * In RLDRAMX we may be messing the delay of pins in
2230                  * the same write rw_group but outside of the current read
2231                  * the rw_group, but that's ok because we haven't calibrated
2232                  * output side yet.
2233                  */
2234                 if (d > 0) {
2235                         scc_mgr_apply_group_all_out_delay_add_all_ranks(
2236                                                                 rw_group, d);
2237                 }
2238
2239                 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2240                         /* set a particular dqdqs phase */
2241                         scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, p);
2242
2243                         debug_cond(DLEVEL == 1,
2244                                    "%s:%d calibrate_vfifo: g=%u p=%u d=%u\n",
2245                                    __func__, __LINE__, rw_group, p, d);
2246
2247                         /*
2248                          * Load up the patterns used by read calibration
2249                          * using current DQDQS phase.
2250                          */
2251                         rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2252                         if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2253                                 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2254                                                                 (rw_group, 1, &bit_chk)) {
2255                                         debug_cond(DLEVEL == 1,
2256                                                    "%s:%d Guaranteed read test failed: g=%u p=%u d=%u\n",
2257                                                    __func__, __LINE__, rw_group, p, d);
2258                                         break;
2259                                 }
2260                         }
2261
2262                         /* case:56390 */
2263                         if (!rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2264                             (rw_group, rw_group, test_bgn)) {
2265                                 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2266                                 continue;
2267                         }
2268
2269                         /*
2270                          * USER Read per-bit deskew can be done on a
2271                          * per shadow register basis.
2272                          */
2273                         grp_calibrated = 1;
2274                         for (rank_bgn = 0, sr = 0;
2275                              rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2276                              rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2277                                 /*
2278                                  * Determine if this set of ranks
2279                                  * should be skipped entirely.
2280                                  */
2281                                 if (param->skip_shadow_regs[sr])
2282                                         continue;
2283                                 /*
2284                                  * If doing read after write
2285                                  * calibration, do not update
2286                                  * FOM, now - do it then.
2287                                  */
2288                                 if (rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2289                                                         rw_group, rw_group,
2290                                                         test_bgn, 1, 0))
2291                                         continue;
2292
2293                                 grp_calibrated = 0;
2294                                 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2295                         }
2296
2297                         if (grp_calibrated)
2298                                 goto cal_done_ok;
2299                 }
2300         }
2301
2302         /* Calibration Stage 1 failed. */
2303         set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2304         return 0;
2305
2306         /* Calibration Stage 1 completed OK. */
2307 cal_done_ok:
2308         /*
2309          * Reset the delay chains back to zero if they have moved > 1
2310          * (check for > 1 because loop will increase d even when pass in
2311          * first case).
2312          */
2313         if (d > 2)
2314                 scc_mgr_zero_group(rw_group, 1);
2315
2316         return 1;
2317 }
2318
2319 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2320 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2321                                                uint32_t test_bgn)
2322 {
2323         uint32_t rank_bgn, sr;
2324         uint32_t grp_calibrated;
2325         uint32_t write_group;
2326
2327         debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2328
2329         /* update info for sims */
2330
2331         reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2332         reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2333
2334         write_group = read_group;
2335
2336         /* update info for sims */
2337         reg_file_set_group(read_group);
2338
2339         grp_calibrated = 1;
2340         /* Read per-bit deskew can be done on a per shadow register basis */
2341         for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2342                 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2343                 /* Determine if this set of ranks should be skipped entirely */
2344                 if (!param->skip_shadow_regs[sr]) {
2345                 /* This is the last calibration round, update FOM here */
2346                         if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2347                                                                 write_group,
2348                                                                 read_group,
2349                                                                 test_bgn, 0,
2350                                                                 1)) {
2351                                 grp_calibrated = 0;
2352                         }
2353                 }
2354         }
2355
2356
2357         if (grp_calibrated == 0) {
2358                 set_failing_group_stage(write_group,
2359                                         CAL_STAGE_VFIFO_AFTER_WRITES,
2360                                         CAL_SUBSTAGE_VFIFO_CENTER);
2361                 return 0;
2362         }
2363
2364         return 1;
2365 }
2366
2367 /* Calibrate LFIFO to find smallest read latency */
2368 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2369 {
2370         uint32_t found_one;
2371         uint32_t bit_chk;
2372
2373         debug("%s:%d\n", __func__, __LINE__);
2374
2375         /* update info for sims */
2376         reg_file_set_stage(CAL_STAGE_LFIFO);
2377         reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2378
2379         /* Load up the patterns used by read calibration for all ranks */
2380         rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2381         found_one = 0;
2382
2383         do {
2384                 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2385                 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2386                            __func__, __LINE__, gbl->curr_read_lat);
2387
2388                 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2389                                                               NUM_READ_TESTS,
2390                                                               PASS_ALL_BITS,
2391                                                               &bit_chk, 1)) {
2392                         break;
2393                 }
2394
2395                 found_one = 1;
2396                 /* reduce read latency and see if things are working */
2397                 /* correctly */
2398                 gbl->curr_read_lat--;
2399         } while (gbl->curr_read_lat > 0);
2400
2401         /* reset the fifos to get pointers to known state */
2402
2403         writel(0, &phy_mgr_cmd->fifo_reset);
2404
2405         if (found_one) {
2406                 /* add a fudge factor to the read latency that was determined */
2407                 gbl->curr_read_lat += 2;
2408                 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2409                 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2410                            read_lat=%u\n", __func__, __LINE__,
2411                            gbl->curr_read_lat);
2412                 return 1;
2413         } else {
2414                 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2415                                         CAL_SUBSTAGE_READ_LATENCY);
2416
2417                 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2418                            read_lat=%u\n", __func__, __LINE__,
2419                            gbl->curr_read_lat);
2420                 return 0;
2421         }
2422 }
2423
2424 /*
2425  * issue write test command.
2426  * two variants are provided. one that just tests a write pattern and
2427  * another that tests datamask functionality.
2428  */
2429 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2430                                                   uint32_t test_dm)
2431 {
2432         uint32_t mcc_instruction;
2433         uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2434                 ENABLE_SUPER_QUICK_CALIBRATION);
2435         uint32_t rw_wl_nop_cycles;
2436         uint32_t addr;
2437
2438         /*
2439          * Set counter and jump addresses for the right
2440          * number of NOP cycles.
2441          * The number of supported NOP cycles can range from -1 to infinity
2442          * Three different cases are handled:
2443          *
2444          * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2445          *    mechanism will be used to insert the right number of NOPs
2446          *
2447          * 2. For a number of NOP cycles equals to 0, the micro-instruction
2448          *    issuing the write command will jump straight to the
2449          *    micro-instruction that turns on DQS (for DDRx), or outputs write
2450          *    data (for RLD), skipping
2451          *    the NOP micro-instruction all together
2452          *
2453          * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2454          *    turned on in the same micro-instruction that issues the write
2455          *    command. Then we need
2456          *    to directly jump to the micro-instruction that sends out the data
2457          *
2458          * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2459          *       (2 and 3). One jump-counter (0) is used to perform multiple
2460          *       write-read operations.
2461          *       one counter left to issue this command in "multiple-group" mode
2462          */
2463
2464         rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2465
2466         if (rw_wl_nop_cycles == -1) {
2467                 /*
2468                  * CNTR 2 - We want to execute the special write operation that
2469                  * turns on DQS right away and then skip directly to the
2470                  * instruction that sends out the data. We set the counter to a
2471                  * large number so that the jump is always taken.
2472                  */
2473                 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2474
2475                 /* CNTR 3 - Not used */
2476                 if (test_dm) {
2477                         mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2478                         writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2479                                &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2480                         writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2481                                &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2482                 } else {
2483                         mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2484                         writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2485                                 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2486                         writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2487                                 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2488                 }
2489         } else if (rw_wl_nop_cycles == 0) {
2490                 /*
2491                  * CNTR 2 - We want to skip the NOP operation and go straight
2492                  * to the DQS enable instruction. We set the counter to a large
2493                  * number so that the jump is always taken.
2494                  */
2495                 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2496
2497                 /* CNTR 3 - Not used */
2498                 if (test_dm) {
2499                         mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2500                         writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2501                                &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2502                 } else {
2503                         mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2504                         writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2505                                 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2506                 }
2507         } else {
2508                 /*
2509                  * CNTR 2 - In this case we want to execute the next instruction
2510                  * and NOT take the jump. So we set the counter to 0. The jump
2511                  * address doesn't count.
2512                  */
2513                 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2514                 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2515
2516                 /*
2517                  * CNTR 3 - Set the nop counter to the number of cycles we
2518                  * need to loop for, minus 1.
2519                  */
2520                 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2521                 if (test_dm) {
2522                         mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2523                         writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2524                                 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2525                 } else {
2526                         mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2527                         writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2528                                 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2529                 }
2530         }
2531
2532         writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2533                   RW_MGR_RESET_READ_DATAPATH_OFFSET);
2534
2535         if (quick_write_mode)
2536                 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2537         else
2538                 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2539
2540         writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2541
2542         /*
2543          * CNTR 1 - This is used to ensure enough time elapses
2544          * for read data to come back.
2545          */
2546         writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2547
2548         if (test_dm) {
2549                 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2550                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2551         } else {
2552                 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2553                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2554         }
2555
2556         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2557         writel(mcc_instruction, addr + (group << 2));
2558 }
2559
2560 /* Test writes, can check for a single bit pass or multiple bit pass */
2561 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2562         uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2563         uint32_t *bit_chk, uint32_t all_ranks)
2564 {
2565         uint32_t r;
2566         uint32_t correct_mask_vg;
2567         uint32_t tmp_bit_chk;
2568         uint32_t vg;
2569         uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2570                 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2571         uint32_t addr_rw_mgr;
2572         uint32_t base_rw_mgr;
2573
2574         *bit_chk = param->write_correct_mask;
2575         correct_mask_vg = param->write_correct_mask_vg;
2576
2577         for (r = rank_bgn; r < rank_end; r++) {
2578                 if (param->skip_ranks[r]) {
2579                         /* request to skip the rank */
2580                         continue;
2581                 }
2582
2583                 /* set rank */
2584                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2585
2586                 tmp_bit_chk = 0;
2587                 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2588                 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2589                         /* reset the fifos to get pointers to known state */
2590                         writel(0, &phy_mgr_cmd->fifo_reset);
2591
2592                         tmp_bit_chk = tmp_bit_chk <<
2593                                 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2594                                 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2595                         rw_mgr_mem_calibrate_write_test_issue(write_group *
2596                                 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2597                                 use_dm);
2598
2599                         base_rw_mgr = readl(addr_rw_mgr);
2600                         tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2601                         if (vg == 0)
2602                                 break;
2603                 }
2604                 *bit_chk &= tmp_bit_chk;
2605         }
2606
2607         if (all_correct) {
2608                 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2609                 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2610                            %u => %lu", write_group, use_dm,
2611                            *bit_chk, param->write_correct_mask,
2612                            (long unsigned int)(*bit_chk ==
2613                            param->write_correct_mask));
2614                 return *bit_chk == param->write_correct_mask;
2615         } else {
2616                 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2617                 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2618                        write_group, use_dm, *bit_chk);
2619                 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2620                         (long unsigned int)(*bit_chk != 0));
2621                 return *bit_chk != 0x00;
2622         }
2623 }
2624
2625 /*
2626  * center all windows. do per-bit-deskew to possibly increase size of
2627  * certain windows.
2628  */
2629 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2630         uint32_t write_group, uint32_t test_bgn)
2631 {
2632         uint32_t i, p, min_index;
2633         int32_t d;
2634         /*
2635          * Store these as signed since there are comparisons with
2636          * signed numbers.
2637          */
2638         uint32_t bit_chk;
2639         uint32_t sticky_bit_chk;
2640         int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2641         int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2642         int32_t mid;
2643         int32_t mid_min, orig_mid_min;
2644         int32_t new_dqs, start_dqs, shift_dq;
2645         int32_t dq_margin, dqs_margin, dm_margin;
2646         uint32_t stop;
2647         uint32_t temp_dq_out1_delay;
2648         uint32_t addr;
2649
2650         debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2651
2652         dm_margin = 0;
2653
2654         addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2655         start_dqs = readl(addr +
2656                           (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2657
2658         /* per-bit deskew */
2659
2660         /*
2661          * set the left and right edge of each bit to an illegal value
2662          * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2663          */
2664         sticky_bit_chk = 0;
2665         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2666                 left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
2667                 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2668         }
2669
2670         /* Search for the left edge of the window for each bit */
2671         for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2672                 scc_mgr_apply_group_dq_out1_delay(write_group, d);
2673
2674                 writel(0, &sdr_scc_mgr->update);
2675
2676                 /*
2677                  * Stop searching when the read test doesn't pass AND when
2678                  * we've seen a passing read on every bit.
2679                  */
2680                 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2681                         0, PASS_ONE_BIT, &bit_chk, 0);
2682                 sticky_bit_chk = sticky_bit_chk | bit_chk;
2683                 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2684                 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2685                            == %u && %u [bit_chk= %u ]\n",
2686                         d, sticky_bit_chk, param->write_correct_mask,
2687                         stop, bit_chk);
2688
2689                 if (stop == 1) {
2690                         break;
2691                 } else {
2692                         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2693                                 if (bit_chk & 1) {
2694                                         /*
2695                                          * Remember a passing test as the
2696                                          * left_edge.
2697                                          */
2698                                         left_edge[i] = d;
2699                                 } else {
2700                                         /*
2701                                          * If a left edge has not been seen
2702                                          * yet, then a future passing test will
2703                                          * mark this edge as the right edge.
2704                                          */
2705                                         if (left_edge[i] ==
2706                                                 IO_IO_OUT1_DELAY_MAX + 1) {
2707                                                 right_edge[i] = -(d + 1);
2708                                         }
2709                                 }
2710                                 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2711                                 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2712                                            (int)(bit_chk & 1), i, left_edge[i]);
2713                                 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2714                                        right_edge[i]);
2715                                 bit_chk = bit_chk >> 1;
2716                         }
2717                 }
2718         }
2719
2720         /* Reset DQ delay chains to 0 */
2721         scc_mgr_apply_group_dq_out1_delay(0);
2722         sticky_bit_chk = 0;
2723         for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2724                 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2725                            %d right_edge[%u]: %d\n", __func__, __LINE__,
2726                            i, left_edge[i], i, right_edge[i]);
2727
2728                 /*
2729                  * Check for cases where we haven't found the left edge,
2730                  * which makes our assignment of the the right edge invalid.
2731                  * Reset it to the illegal value.
2732                  */
2733                 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2734                     (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2735                         right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2736                         debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2737                                    right_edge[%u]: %d\n", __func__, __LINE__,
2738                                    i, right_edge[i]);
2739                 }
2740
2741                 /*
2742                  * Reset sticky bit (except for bits where we have
2743                  * seen the left edge).
2744                  */
2745                 sticky_bit_chk = sticky_bit_chk << 1;
2746                 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2747                         sticky_bit_chk = sticky_bit_chk | 1;
2748
2749                 if (i == 0)
2750                         break;
2751         }
2752
2753         /* Search for the right edge of the window for each bit */
2754         for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2755                 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2756                                                         d + start_dqs);
2757
2758                 writel(0, &sdr_scc_mgr->update);
2759
2760                 /*
2761                  * Stop searching when the read test doesn't pass AND when
2762                  * we've seen a passing read on every bit.
2763                  */
2764                 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2765                         0, PASS_ONE_BIT, &bit_chk, 0);
2766
2767                 sticky_bit_chk = sticky_bit_chk | bit_chk;
2768                 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2769
2770                 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2771                            %u && %u\n", d, sticky_bit_chk,
2772                            param->write_correct_mask, stop);
2773
2774                 if (stop == 1) {
2775                         if (d == 0) {
2776                                 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2777                                         i++) {
2778                                         /* d = 0 failed, but it passed when
2779                                         testing the left edge, so it must be
2780                                         marginal, set it to -1 */
2781                                         if (right_edge[i] ==
2782                                                 IO_IO_OUT1_DELAY_MAX + 1 &&
2783                                                 left_edge[i] !=
2784                                                 IO_IO_OUT1_DELAY_MAX + 1) {
2785                                                 right_edge[i] = -1;
2786                                         }
2787                                 }
2788                         }
2789                         break;
2790                 } else {
2791                         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2792                                 if (bit_chk & 1) {
2793                                         /*
2794                                          * Remember a passing test as
2795                                          * the right_edge.
2796                                          */
2797                                         right_edge[i] = d;
2798                                 } else {
2799                                         if (d != 0) {
2800                                                 /*
2801                                                  * If a right edge has not
2802                                                  * been seen yet, then a future
2803                                                  * passing test will mark this
2804                                                  * edge as the left edge.
2805                                                  */
2806                                                 if (right_edge[i] ==
2807                                                     IO_IO_OUT1_DELAY_MAX + 1)
2808                                                         left_edge[i] = -(d + 1);
2809                                         } else {
2810                                                 /*
2811                                                  * d = 0 failed, but it passed
2812                                                  * when testing the left edge,
2813                                                  * so it must be marginal, set
2814                                                  * it to -1.
2815                                                  */
2816                                                 if (right_edge[i] ==
2817                                                     IO_IO_OUT1_DELAY_MAX + 1 &&
2818                                                     left_edge[i] !=
2819                                                     IO_IO_OUT1_DELAY_MAX + 1)
2820                                                         right_edge[i] = -1;
2821                                                 /*
2822                                                  * If a right edge has not been
2823                                                  * seen yet, then a future
2824                                                  * passing test will mark this
2825                                                  * edge as the left edge.
2826                                                  */
2827                                                 else if (right_edge[i] ==
2828                                                         IO_IO_OUT1_DELAY_MAX +
2829                                                         1)
2830                                                         left_edge[i] = -(d + 1);
2831                                         }
2832                                 }
2833                                 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2834                                 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2835                                            (int)(bit_chk & 1), i, left_edge[i]);
2836                                 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2837                                            right_edge[i]);
2838                                 bit_chk = bit_chk >> 1;
2839                         }
2840                 }
2841         }
2842
2843         /* Check that all bits have a window */
2844         for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2845                 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2846                            %d right_edge[%u]: %d", __func__, __LINE__,
2847                            i, left_edge[i], i, right_edge[i]);
2848                 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2849                     (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2850                         set_failing_group_stage(test_bgn + i,
2851                                                 CAL_STAGE_WRITES,
2852                                                 CAL_SUBSTAGE_WRITES_CENTER);
2853                         return 0;
2854                 }
2855         }
2856
2857         /* Find middle of window for each DQ bit */
2858         mid_min = left_edge[0] - right_edge[0];
2859         min_index = 0;
2860         for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2861                 mid = left_edge[i] - right_edge[i];
2862                 if (mid < mid_min) {
2863                         mid_min = mid;
2864                         min_index = i;
2865                 }
2866         }
2867
2868         /*
2869          * -mid_min/2 represents the amount that we need to move DQS.
2870          * If mid_min is odd and positive we'll need to add one to
2871          * make sure the rounding in further calculations is correct
2872          * (always bias to the right), so just add 1 for all positive values.
2873          */
2874         if (mid_min > 0)
2875                 mid_min++;
2876         mid_min = mid_min / 2;
2877         debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2878                    __LINE__, mid_min);
2879
2880         /* Determine the amount we can change DQS (which is -mid_min) */
2881         orig_mid_min = mid_min;
2882         new_dqs = start_dqs;
2883         mid_min = 0;
2884         debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2885                    mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2886         /* Initialize data for export structures */
2887         dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2888         dq_margin  = IO_IO_OUT1_DELAY_MAX + 1;
2889
2890         /* add delay to bring centre of all DQ windows to the same "level" */
2891         for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2892                 /* Use values before divide by 2 to reduce round off error */
2893                 shift_dq = (left_edge[i] - right_edge[i] -
2894                         (left_edge[min_index] - right_edge[min_index]))/2  +
2895                 (orig_mid_min - mid_min);
2896
2897                 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2898                            [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2899
2900                 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2901                 temp_dq_out1_delay = readl(addr + (i << 2));
2902                 if (shift_dq + (int32_t)temp_dq_out1_delay >
2903                         (int32_t)IO_IO_OUT1_DELAY_MAX) {
2904                         shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2905                 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2906                         shift_dq = -(int32_t)temp_dq_out1_delay;
2907                 }
2908                 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2909                            i, shift_dq);
2910                 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2911                 scc_mgr_load_dq(i);
2912
2913                 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2914                            left_edge[i] - shift_dq + (-mid_min),
2915                            right_edge[i] + shift_dq - (-mid_min));
2916                 /* To determine values for export structures */
2917                 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2918                         dq_margin = left_edge[i] - shift_dq + (-mid_min);
2919
2920                 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2921                         dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2922         }
2923
2924         /* Move DQS */
2925         scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2926         writel(0, &sdr_scc_mgr->update);
2927
2928         /* Centre DM */
2929         debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2930
2931         /*
2932          * set the left and right edge of each bit to an illegal value,
2933          * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2934          */
2935         left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
2936         right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2937         int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2938         int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2939         int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2940         int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2941         int32_t win_best = 0;
2942
2943         /* Search for the/part of the window with DM shift */
2944         for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2945                 scc_mgr_apply_group_dm_out1_delay(d);
2946                 writel(0, &sdr_scc_mgr->update);
2947
2948                 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2949                                                     PASS_ALL_BITS, &bit_chk,
2950                                                     0)) {
2951                         /* USE Set current end of the window */
2952                         end_curr = -d;
2953                         /*
2954                          * If a starting edge of our window has not been seen
2955                          * this is our current start of the DM window.
2956                          */
2957                         if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2958                                 bgn_curr = -d;
2959
2960                         /*
2961                          * If current window is bigger than best seen.
2962                          * Set best seen to be current window.
2963                          */
2964                         if ((end_curr-bgn_curr+1) > win_best) {
2965                                 win_best = end_curr-bgn_curr+1;
2966                                 bgn_best = bgn_curr;
2967                                 end_best = end_curr;
2968                         }
2969                 } else {
2970                         /* We just saw a failing test. Reset temp edge */
2971                         bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2972                         end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2973                         }
2974                 }
2975
2976
2977         /* Reset DM delay chains to 0 */
2978         scc_mgr_apply_group_dm_out1_delay(0);
2979
2980         /*
2981          * Check to see if the current window nudges up aganist 0 delay.
2982          * If so we need to continue the search by shifting DQS otherwise DQS
2983          * search begins as a new search. */
2984         if (end_curr != 0) {
2985                 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2986                 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2987         }
2988
2989         /* Search for the/part of the window with DQS shifts */
2990         for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
2991                 /*
2992                  * Note: This only shifts DQS, so are we limiting ourselve to
2993                  * width of DQ unnecessarily.
2994                  */
2995                 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2996                                                         d + new_dqs);
2997
2998                 writel(0, &sdr_scc_mgr->update);
2999                 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3000                                                     PASS_ALL_BITS, &bit_chk,
3001                                                     0)) {
3002                         /* USE Set current end of the window */
3003                         end_curr = d;
3004                         /*
3005                          * If a beginning edge of our window has not been seen
3006                          * this is our current begin of the DM window.
3007                          */
3008                         if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3009                                 bgn_curr = d;
3010
3011                         /*
3012                          * If current window is bigger than best seen. Set best
3013                          * seen to be current window.
3014                          */
3015                         if ((end_curr-bgn_curr+1) > win_best) {
3016                                 win_best = end_curr-bgn_curr+1;
3017                                 bgn_best = bgn_curr;
3018                                 end_best = end_curr;
3019                         }
3020                 } else {
3021                         /* We just saw a failing test. Reset temp edge */
3022                         bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3023                         end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3024
3025                         /* Early exit optimization: if ther remaining delay
3026                         chain space is less than already seen largest window
3027                         we can exit */
3028                         if ((win_best-1) >
3029                                 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3030                                         break;
3031                                 }
3032                         }
3033                 }
3034
3035         /* assign left and right edge for cal and reporting; */
3036         left_edge[0] = -1*bgn_best;
3037         right_edge[0] = end_best;
3038
3039         debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3040                    __LINE__, left_edge[0], right_edge[0]);
3041
3042         /* Move DQS (back to orig) */
3043         scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3044
3045         /* Move DM */
3046
3047         /* Find middle of window for the DM bit */
3048         mid = (left_edge[0] - right_edge[0]) / 2;
3049
3050         /* only move right, since we are not moving DQS/DQ */
3051         if (mid < 0)
3052                 mid = 0;
3053
3054         /* dm_marign should fail if we never find a window */
3055         if (win_best == 0)
3056                 dm_margin = -1;
3057         else
3058                 dm_margin = left_edge[0] - mid;
3059
3060         scc_mgr_apply_group_dm_out1_delay(mid);
3061         writel(0, &sdr_scc_mgr->update);
3062
3063         debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3064                    dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3065                    right_edge[0], mid, dm_margin);
3066         /* Export values */
3067         gbl->fom_out += dq_margin + dqs_margin;
3068
3069         debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3070                    dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3071                    dq_margin, dqs_margin, dm_margin);
3072
3073         /*
3074          * Do not remove this line as it makes sure all of our
3075          * decisions have been applied.
3076          */
3077         writel(0, &sdr_scc_mgr->update);
3078         return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3079 }
3080
3081 /* calibrate the write operations */
3082 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3083         uint32_t test_bgn)
3084 {
3085         /* update info for sims */
3086         debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3087
3088         reg_file_set_stage(CAL_STAGE_WRITES);
3089         reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3090
3091         reg_file_set_group(g);
3092
3093         if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3094                 set_failing_group_stage(g, CAL_STAGE_WRITES,
3095                                         CAL_SUBSTAGE_WRITES_CENTER);
3096                 return 0;
3097         }
3098
3099         return 1;
3100 }
3101
3102 /**
3103  * mem_precharge_and_activate() - Precharge all banks and activate
3104  *
3105  * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3106  */
3107 static void mem_precharge_and_activate(void)
3108 {
3109         int r;
3110
3111         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3112                 /* Test if the rank should be skipped. */
3113                 if (param->skip_ranks[r])
3114                         continue;
3115
3116                 /* Set rank. */
3117                 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3118
3119                 /* Precharge all banks. */
3120                 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3121                                              RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3122
3123                 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3124                 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3125                         &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3126
3127                 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3128                 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3129                         &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3130
3131                 /* Activate rows. */
3132                 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3133                                                 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3134         }
3135 }
3136
3137 /**
3138  * mem_init_latency() - Configure memory RLAT and WLAT settings
3139  *
3140  * Configure memory RLAT and WLAT parameters.
3141  */
3142 static void mem_init_latency(void)
3143 {
3144         /*
3145          * For AV/CV, LFIFO is hardened and always runs at full rate
3146          * so max latency in AFI clocks, used here, is correspondingly
3147          * smaller.
3148          */
3149         const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3150         u32 rlat, wlat;
3151
3152         debug("%s:%d\n", __func__, __LINE__);
3153
3154         /*
3155          * Read in write latency.
3156          * WL for Hard PHY does not include additive latency.
3157          */
3158         wlat = readl(&data_mgr->t_wl_add);
3159         wlat += readl(&data_mgr->mem_t_add);
3160
3161         gbl->rw_wl_nop_cycles = wlat - 1;
3162
3163         /* Read in readl latency. */
3164         rlat = readl(&data_mgr->t_rl_add);
3165
3166         /* Set a pretty high read latency initially. */
3167         gbl->curr_read_lat = rlat + 16;
3168         if (gbl->curr_read_lat > max_latency)
3169                 gbl->curr_read_lat = max_latency;
3170
3171         writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3172
3173         /* Advertise write latency. */
3174         writel(wlat, &phy_mgr_cfg->afi_wlat);
3175 }
3176
3177 /**
3178  * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3179  *
3180  * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3181  */
3182 static void mem_skip_calibrate(void)
3183 {
3184         uint32_t vfifo_offset;
3185         uint32_t i, j, r;
3186
3187         debug("%s:%d\n", __func__, __LINE__);
3188         /* Need to update every shadow register set used by the interface */
3189         for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3190              r += NUM_RANKS_PER_SHADOW_REG) {
3191                 /*
3192                  * Set output phase alignment settings appropriate for
3193                  * skip calibration.
3194                  */
3195                 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3196                         scc_mgr_set_dqs_en_phase(i, 0);
3197 #if IO_DLL_CHAIN_LENGTH == 6
3198                         scc_mgr_set_dqdqs_output_phase(i, 6);
3199 #else
3200                         scc_mgr_set_dqdqs_output_phase(i, 7);
3201 #endif
3202                         /*
3203                          * Case:33398
3204                          *
3205                          * Write data arrives to the I/O two cycles before write
3206                          * latency is reached (720 deg).
3207                          *   -> due to bit-slip in a/c bus
3208                          *   -> to allow board skew where dqs is longer than ck
3209                          *      -> how often can this happen!?
3210                          *      -> can claim back some ptaps for high freq
3211                          *       support if we can relax this, but i digress...
3212                          *
3213                          * The write_clk leads mem_ck by 90 deg
3214                          * The minimum ptap of the OPA is 180 deg
3215                          * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3216                          * The write_clk is always delayed by 2 ptaps
3217                          *
3218                          * Hence, to make DQS aligned to CK, we need to delay
3219                          * DQS by:
3220                          *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3221                          *
3222                          * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3223                          * gives us the number of ptaps, which simplies to:
3224                          *
3225                          *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3226                          */
3227                         scc_mgr_set_dqdqs_output_phase(i,
3228                                         1.25 * IO_DLL_CHAIN_LENGTH - 2);
3229                 }
3230                 writel(0xff, &sdr_scc_mgr->dqs_ena);
3231                 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3232
3233                 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3234                         writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3235                                   SCC_MGR_GROUP_COUNTER_OFFSET);
3236                 }
3237                 writel(0xff, &sdr_scc_mgr->dq_ena);
3238                 writel(0xff, &sdr_scc_mgr->dm_ena);
3239                 writel(0, &sdr_scc_mgr->update);
3240         }
3241
3242         /* Compensate for simulation model behaviour */
3243         for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3244                 scc_mgr_set_dqs_bus_in_delay(i, 10);
3245                 scc_mgr_load_dqs(i);
3246         }
3247         writel(0, &sdr_scc_mgr->update);
3248
3249         /*
3250          * ArriaV has hard FIFOs that can only be initialized by incrementing
3251          * in sequencer.
3252          */
3253         vfifo_offset = CALIB_VFIFO_OFFSET;
3254         for (j = 0; j < vfifo_offset; j++)
3255                 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3256         writel(0, &phy_mgr_cmd->fifo_reset);
3257
3258         /*
3259          * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3260          * setting from generation-time constant.
3261          */
3262         gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3263         writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3264 }
3265
3266 /**
3267  * mem_calibrate() - Memory calibration entry point.
3268  *
3269  * Perform memory calibration.
3270  */
3271 static uint32_t mem_calibrate(void)
3272 {
3273         uint32_t i;
3274         uint32_t rank_bgn, sr;
3275         uint32_t write_group, write_test_bgn;
3276         uint32_t read_group, read_test_bgn;
3277         uint32_t run_groups, current_run;
3278         uint32_t failing_groups = 0;
3279         uint32_t group_failed = 0;
3280
3281         const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3282                                 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3283
3284         debug("%s:%d\n", __func__, __LINE__);
3285
3286         /* Initialize the data settings */
3287         gbl->error_substage = CAL_SUBSTAGE_NIL;
3288         gbl->error_stage = CAL_STAGE_NIL;
3289         gbl->error_group = 0xff;
3290         gbl->fom_in = 0;
3291         gbl->fom_out = 0;
3292
3293         /* Initialize WLAT and RLAT. */
3294         mem_init_latency();
3295
3296         /* Initialize bit slips. */
3297         mem_precharge_and_activate();
3298
3299         for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3300                 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3301                           SCC_MGR_GROUP_COUNTER_OFFSET);
3302                 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3303                 if (i == 0)
3304                         scc_mgr_set_hhp_extras();
3305
3306                 scc_set_bypass_mode(i);
3307         }
3308
3309         /* Calibration is skipped. */
3310         if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3311                 /*
3312                  * Set VFIFO and LFIFO to instant-on settings in skip
3313                  * calibration mode.
3314                  */
3315                 mem_skip_calibrate();
3316
3317                 /*
3318                  * Do not remove this line as it makes sure all of our
3319                  * decisions have been applied.
3320                  */
3321                 writel(0, &sdr_scc_mgr->update);
3322                 return 1;
3323         }
3324
3325         /* Calibration is not skipped. */
3326         for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3327                 /*
3328                  * Zero all delay chain/phase settings for all
3329                  * groups and all shadow register sets.
3330                  */
3331                 scc_mgr_zero_all();
3332
3333                 run_groups = ~param->skip_groups;
3334
3335                 for (write_group = 0, write_test_bgn = 0; write_group
3336                         < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3337                         write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3338
3339                         /* Initialize the group failure */
3340                         group_failed = 0;
3341
3342                         current_run = run_groups & ((1 <<
3343                                 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3344                         run_groups = run_groups >>
3345                                 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3346
3347                         if (current_run == 0)
3348                                 continue;
3349
3350                         writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3351                                             SCC_MGR_GROUP_COUNTER_OFFSET);
3352                         scc_mgr_zero_group(write_group, 0);
3353
3354                         for (read_group = write_group * rwdqs_ratio,
3355                              read_test_bgn = 0;
3356                              read_group < (write_group + 1) * rwdqs_ratio;
3357                              read_group++,
3358                              read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3359                                 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3360                                         continue;
3361
3362                                 /* Calibrate the VFIFO */
3363                                 if (rw_mgr_mem_calibrate_vfifo(read_group,
3364                                                                read_test_bgn))
3365                                         continue;
3366
3367                                 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3368                                         return 0;
3369
3370                                 /* The group failed, we're done. */
3371                                 goto grp_failed;
3372                         }
3373
3374                         /* Calibrate the output side */
3375                         for (rank_bgn = 0, sr = 0;
3376                              rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3377                              rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3378                                 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3379                                         continue;
3380
3381                                 /* Not needed in quick mode! */
3382                                 if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3383                                         continue;
3384
3385                                 /*
3386                                  * Determine if this set of ranks
3387                                  * should be skipped entirely.
3388                                  */
3389                                 if (param->skip_shadow_regs[sr])
3390                                         continue;
3391
3392                                 /* Calibrate WRITEs */
3393                                 if (rw_mgr_mem_calibrate_writes(rank_bgn,
3394                                                 write_group, write_test_bgn))
3395                                         continue;
3396
3397                                 group_failed = 1;
3398                                 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3399                                         return 0;
3400                         }
3401
3402                         /* Some group failed, we're done. */
3403                         if (group_failed)
3404                                 goto grp_failed;
3405
3406                         for (read_group = write_group * rwdqs_ratio,
3407                              read_test_bgn = 0;
3408                              read_group < (write_group + 1) * rwdqs_ratio;
3409                              read_group++,
3410                              read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3411                                 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3412                                         continue;
3413
3414                                 if (rw_mgr_mem_calibrate_vfifo_end(read_group,
3415                                                                 read_test_bgn))
3416                                         continue;
3417
3418                                 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3419                                         return 0;
3420
3421                                 /* The group failed, we're done. */
3422                                 goto grp_failed;
3423                         }
3424
3425                         /* No group failed, continue as usual. */
3426                         continue;
3427
3428 grp_failed:             /* A group failed, increment the counter. */
3429                         failing_groups++;
3430                 }
3431
3432                 /*
3433                  * USER If there are any failing groups then report
3434                  * the failure.
3435                  */
3436                 if (failing_groups != 0)
3437                         return 0;
3438
3439                 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3440                         continue;
3441
3442                 /*
3443                  * If we're skipping groups as part of debug,
3444                  * don't calibrate LFIFO.
3445                  */
3446                 if (param->skip_groups != 0)
3447                         continue;
3448
3449                 /* Calibrate the LFIFO */
3450                 if (!rw_mgr_mem_calibrate_lfifo())
3451                         return 0;
3452         }
3453
3454         /*
3455          * Do not remove this line as it makes sure all of our decisions
3456          * have been applied.
3457          */
3458         writel(0, &sdr_scc_mgr->update);
3459         return 1;
3460 }
3461
3462 /**
3463  * run_mem_calibrate() - Perform memory calibration
3464  *
3465  * This function triggers the entire memory calibration procedure.
3466  */
3467 static int run_mem_calibrate(void)
3468 {
3469         int pass;
3470
3471         debug("%s:%d\n", __func__, __LINE__);
3472
3473         /* Reset pass/fail status shown on afi_cal_success/fail */
3474         writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3475
3476         /* Stop tracking manager. */
3477         clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3478
3479         phy_mgr_initialize();
3480         rw_mgr_mem_initialize();
3481
3482         /* Perform the actual memory calibration. */
3483         pass = mem_calibrate();
3484
3485         mem_precharge_and_activate();
3486         writel(0, &phy_mgr_cmd->fifo_reset);
3487
3488         /* Handoff. */
3489         rw_mgr_mem_handoff();
3490         /*
3491          * In Hard PHY this is a 2-bit control:
3492          * 0: AFI Mux Select
3493          * 1: DDIO Mux Select
3494          */
3495         writel(0x2, &phy_mgr_cfg->mux_sel);
3496
3497         /* Start tracking manager. */
3498         setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3499
3500         return pass;
3501 }
3502
3503 /**
3504  * debug_mem_calibrate() - Report result of memory calibration
3505  * @pass:       Value indicating whether calibration passed or failed
3506  *
3507  * This function reports the results of the memory calibration
3508  * and writes debug information into the register file.
3509  */
3510 static void debug_mem_calibrate(int pass)
3511 {
3512         uint32_t debug_info;
3513
3514         if (pass) {
3515                 printf("%s: CALIBRATION PASSED\n", __FILE__);
3516
3517                 gbl->fom_in /= 2;
3518                 gbl->fom_out /= 2;
3519
3520                 if (gbl->fom_in > 0xff)
3521                         gbl->fom_in = 0xff;
3522
3523                 if (gbl->fom_out > 0xff)
3524                         gbl->fom_out = 0xff;
3525
3526                 /* Update the FOM in the register file */
3527                 debug_info = gbl->fom_in;
3528                 debug_info |= gbl->fom_out << 8;
3529                 writel(debug_info, &sdr_reg_file->fom);
3530
3531                 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3532                 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3533         } else {
3534                 printf("%s: CALIBRATION FAILED\n", __FILE__);
3535
3536                 debug_info = gbl->error_stage;
3537                 debug_info |= gbl->error_substage << 8;
3538                 debug_info |= gbl->error_group << 16;
3539
3540                 writel(debug_info, &sdr_reg_file->failing_stage);
3541                 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3542                 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3543
3544                 /* Update the failing group/stage in the register file */
3545                 debug_info = gbl->error_stage;
3546                 debug_info |= gbl->error_substage << 8;
3547                 debug_info |= gbl->error_group << 16;
3548                 writel(debug_info, &sdr_reg_file->failing_stage);
3549         }
3550
3551         printf("%s: Calibration complete\n", __FILE__);
3552 }
3553
3554 /**
3555  * hc_initialize_rom_data() - Initialize ROM data
3556  *
3557  * Initialize ROM data.
3558  */
3559 static void hc_initialize_rom_data(void)
3560 {
3561         u32 i, addr;
3562
3563         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3564         for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3565                 writel(inst_rom_init[i], addr + (i << 2));
3566
3567         addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3568         for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3569                 writel(ac_rom_init[i], addr + (i << 2));
3570 }
3571
3572 /**
3573  * initialize_reg_file() - Initialize SDR register file
3574  *
3575  * Initialize SDR register file.
3576  */
3577 static void initialize_reg_file(void)
3578 {
3579         /* Initialize the register file with the correct data */
3580         writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3581         writel(0, &sdr_reg_file->debug_data_addr);
3582         writel(0, &sdr_reg_file->cur_stage);
3583         writel(0, &sdr_reg_file->fom);
3584         writel(0, &sdr_reg_file->failing_stage);
3585         writel(0, &sdr_reg_file->debug1);
3586         writel(0, &sdr_reg_file->debug2);
3587 }
3588
3589 /**
3590  * initialize_hps_phy() - Initialize HPS PHY
3591  *
3592  * Initialize HPS PHY.
3593  */
3594 static void initialize_hps_phy(void)
3595 {
3596         uint32_t reg;
3597         /*
3598          * Tracking also gets configured here because it's in the
3599          * same register.
3600          */
3601         uint32_t trk_sample_count = 7500;
3602         uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3603         /*
3604          * Format is number of outer loops in the 16 MSB, sample
3605          * count in 16 LSB.
3606          */
3607
3608         reg = 0;
3609         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3610         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3611         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3612         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3613         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3614         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3615         /*
3616          * This field selects the intrinsic latency to RDATA_EN/FULL path.
3617          * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3618          */
3619         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3620         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3621                 trk_sample_count);
3622         writel(reg, &sdr_ctrl->phy_ctrl0);
3623
3624         reg = 0;
3625         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3626                 trk_sample_count >>
3627                 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3628         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3629                 trk_long_idle_sample_count);
3630         writel(reg, &sdr_ctrl->phy_ctrl1);
3631
3632         reg = 0;
3633         reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3634                 trk_long_idle_sample_count >>
3635                 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3636         writel(reg, &sdr_ctrl->phy_ctrl2);
3637 }
3638
3639 /**
3640  * initialize_tracking() - Initialize tracking
3641  *
3642  * Initialize the register file with usable initial data.
3643  */
3644 static void initialize_tracking(void)
3645 {
3646         /*
3647          * Initialize the register file with the correct data.
3648          * Compute usable version of value in case we skip full
3649          * computation later.
3650          */
3651         writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3652                &sdr_reg_file->dtaps_per_ptap);
3653
3654         /* trk_sample_count */
3655         writel(7500, &sdr_reg_file->trk_sample_count);
3656
3657         /* longidle outer loop [15:0] */
3658         writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3659
3660         /*
3661          * longidle sample count [31:24]
3662          * trfc, worst case of 933Mhz 4Gb [23:16]
3663          * trcd, worst case [15:8]
3664          * vfifo wait [7:0]
3665          */
3666         writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3667                &sdr_reg_file->delays);
3668
3669         /* mux delay */
3670         writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3671                (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3672                &sdr_reg_file->trk_rw_mgr_addr);
3673
3674         writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3675                &sdr_reg_file->trk_read_dqs_width);
3676
3677         /* trefi [7:0] */
3678         writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3679                &sdr_reg_file->trk_rfsh);
3680 }
3681
3682 int sdram_calibration_full(void)
3683 {
3684         struct param_type my_param;
3685         struct gbl_type my_gbl;
3686         uint32_t pass;
3687
3688         memset(&my_param, 0, sizeof(my_param));
3689         memset(&my_gbl, 0, sizeof(my_gbl));
3690
3691         param = &my_param;
3692         gbl = &my_gbl;
3693
3694         /* Set the calibration enabled by default */
3695         gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3696         /*
3697          * Only sweep all groups (regardless of fail state) by default
3698          * Set enabled read test by default.
3699          */
3700 #if DISABLE_GUARANTEED_READ
3701         gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3702 #endif
3703         /* Initialize the register file */
3704         initialize_reg_file();
3705
3706         /* Initialize any PHY CSR */
3707         initialize_hps_phy();
3708
3709         scc_mgr_initialize();
3710
3711         initialize_tracking();
3712
3713         printf("%s: Preparing to start memory calibration\n", __FILE__);
3714
3715         debug("%s:%d\n", __func__, __LINE__);
3716         debug_cond(DLEVEL == 1,
3717                    "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3718                    RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3719                    RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3720                    RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3721                    RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3722         debug_cond(DLEVEL == 1,
3723                    "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3724                    RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3725                    RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3726                    IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3727         debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3728                    IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3729         debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3730                    IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3731                    IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3732         debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3733                    IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3734                    IO_IO_OUT2_DELAY_MAX);
3735         debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3736                    IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3737
3738         hc_initialize_rom_data();
3739
3740         /* update info for sims */
3741         reg_file_set_stage(CAL_STAGE_NIL);
3742         reg_file_set_group(0);
3743
3744         /*
3745          * Load global needed for those actions that require
3746          * some dynamic calibration support.
3747          */
3748         dyn_calib_steps = STATIC_CALIB_STEPS;
3749         /*
3750          * Load global to allow dynamic selection of delay loop settings
3751          * based on calibration mode.
3752          */
3753         if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3754                 skip_delay_mask = 0xff;
3755         else
3756                 skip_delay_mask = 0x0;
3757
3758         pass = run_mem_calibrate();
3759         debug_mem_calibrate(pass);
3760         return pass;
3761 }