blob: 254b1308ffd2a930c51ff1e97903f84e20b3832d [file] [log] [blame]
Dinh Nguyen3da42852015-06-02 22:52:49 -05001/*
2 * Copyright Altera Corporation (C) 2012-2015
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common.h>
8#include <asm/io.h>
9#include <asm/arch/sdram.h>
Marek Vasut04372fb2015-07-18 02:46:56 +020010#include <errno.h>
Dinh Nguyen3da42852015-06-02 22:52:49 -050011#include "sequencer.h"
Marek Vasut9c76df52015-08-02 16:55:45 +020012
Dinh Nguyen3da42852015-06-02 22:52:49 -050013static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
Marek Vasut139823e2015-08-02 19:47:01 +020014 (struct socfpga_sdr_rw_load_manager *)
15 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
Dinh Nguyen3da42852015-06-02 22:52:49 -050016static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
Marek Vasut139823e2015-08-02 19:47:01 +020017 (struct socfpga_sdr_rw_load_jump_manager *)
18 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
Dinh Nguyen3da42852015-06-02 22:52:49 -050019static struct socfpga_sdr_reg_file *sdr_reg_file =
Marek Vasuta1c654a2015-07-12 18:31:05 +020020 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -050021static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
Marek Vasut139823e2015-08-02 19:47:01 +020022 (struct socfpga_sdr_scc_mgr *)
23 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
Dinh Nguyen3da42852015-06-02 22:52:49 -050024static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
Marek Vasut1bc6f142015-07-12 18:54:37 +020025 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -050026static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
Marek Vasut139823e2015-08-02 19:47:01 +020027 (struct socfpga_phy_mgr_cfg *)
28 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
Dinh Nguyen3da42852015-06-02 22:52:49 -050029static struct socfpga_data_mgr *data_mgr =
Marek Vasutc4815f72015-07-12 19:03:33 +020030 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
Marek Vasut6cb9f162015-07-12 20:49:39 +020031static struct socfpga_sdr_ctrl *sdr_ctrl =
32 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
33
Marek Vasutd718a262015-08-02 18:12:08 +020034const struct socfpga_sdram_rw_mgr_config *rwcfg;
Marek Vasut10c14262015-08-02 19:00:23 +020035const struct socfpga_sdram_io_config *iocfg;
Marek Vasut042ff2d2015-08-02 19:18:47 +020036const struct socfpga_sdram_misc_config *misccfg;
Marek Vasutd718a262015-08-02 18:12:08 +020037
Dinh Nguyen3da42852015-06-02 22:52:49 -050038#define DELTA_D 1
Dinh Nguyen3da42852015-06-02 22:52:49 -050039
40/*
41 * In order to reduce ROM size, most of the selectable calibration steps are
42 * decided at compile time based on the user's calibration mode selection,
43 * as captured by the STATIC_CALIB_STEPS selection below.
44 *
45 * However, to support simulation-time selection of fast simulation mode, where
46 * we skip everything except the bare minimum, we need a few of the steps to
47 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
48 * check, which is based on the rtl-supplied value, or we dynamically compute
49 * the value to use based on the dynamically-chosen calibration mode
50 */
51
52#define DLEVEL 0
53#define STATIC_IN_RTL_SIM 0
54#define STATIC_SKIP_DELAY_LOOPS 0
55
56#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
57 STATIC_SKIP_DELAY_LOOPS)
58
59/* calibration steps requested by the rtl */
Marek Vasut5ded7322015-08-02 19:42:26 +020060u16 dyn_calib_steps;
Dinh Nguyen3da42852015-06-02 22:52:49 -050061
62/*
63 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
64 * instead of static, we use boolean logic to select between
65 * non-skip and skip values
66 *
67 * The mask is set to include all bits when not-skipping, but is
68 * zero when skipping
69 */
70
Marek Vasut5ded7322015-08-02 19:42:26 +020071u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */
Dinh Nguyen3da42852015-06-02 22:52:49 -050072
73#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
74 ((non_skip_value) & skip_delay_mask)
75
76struct gbl_type *gbl;
77struct param_type *param;
Dinh Nguyen3da42852015-06-02 22:52:49 -050078
Marek Vasut5ded7322015-08-02 19:42:26 +020079static void set_failing_group_stage(u32 group, u32 stage,
80 u32 substage)
Dinh Nguyen3da42852015-06-02 22:52:49 -050081{
82 /*
83 * Only set the global stage if there was not been any other
84 * failing group
85 */
86 if (gbl->error_stage == CAL_STAGE_NIL) {
87 gbl->error_substage = substage;
88 gbl->error_stage = stage;
89 gbl->error_group = group;
90 }
91}
92
Marek Vasut2c0d2d92015-07-12 21:10:24 +020093static void reg_file_set_group(u16 set_group)
Dinh Nguyen3da42852015-06-02 22:52:49 -050094{
Marek Vasut2c0d2d92015-07-12 21:10:24 +020095 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
Dinh Nguyen3da42852015-06-02 22:52:49 -050096}
97
Marek Vasut2c0d2d92015-07-12 21:10:24 +020098static void reg_file_set_stage(u8 set_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -050099{
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200100 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500101}
102
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200103static void reg_file_set_sub_stage(u8 set_sub_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500104{
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200105 set_sub_stage &= 0xff;
106 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500107}
108
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200109/**
110 * phy_mgr_initialize() - Initialize PHY Manager
111 *
112 * Initialize PHY Manager.
113 */
Marek Vasut9fa9c902015-07-17 01:12:07 +0200114static void phy_mgr_initialize(void)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500115{
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200116 u32 ratio;
117
Dinh Nguyen3da42852015-06-02 22:52:49 -0500118 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200119 /* Calibration has control over path to memory */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500120 /*
121 * In Hard PHY this is a 2-bit control:
122 * 0: AFI Mux Select
123 * 1: DDIO Mux Select
124 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200125 writel(0x3, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500126
127 /* USER memory clock is not stable we begin initialization */
Marek Vasut1273dd92015-07-12 21:05:08 +0200128 writel(0, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500129
130 /* USER calibration status all set to zero */
Marek Vasut1273dd92015-07-12 21:05:08 +0200131 writel(0, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500132
Marek Vasut1273dd92015-07-12 21:05:08 +0200133 writel(0, &phy_mgr_cfg->cal_debug_info);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500134
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200135 /* Init params only if we do NOT skip calibration. */
136 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
137 return;
138
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200139 ratio = rwcfg->mem_dq_per_read_dqs /
140 rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200141 param->read_correct_mask_vg = (1 << ratio) - 1;
142 param->write_correct_mask_vg = (1 << ratio) - 1;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200143 param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1;
144 param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500145}
146
Marek Vasut080bf642015-07-20 08:15:57 +0200147/**
148 * set_rank_and_odt_mask() - Set Rank and ODT mask
149 * @rank: Rank mask
150 * @odt_mode: ODT mode, OFF or READ_WRITE
151 *
152 * Set Rank and ODT mask (On-Die Termination).
153 */
Marek Vasutb2dfd102015-07-20 08:03:11 +0200154static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500155{
Marek Vasutb2dfd102015-07-20 08:03:11 +0200156 u32 odt_mask_0 = 0;
157 u32 odt_mask_1 = 0;
158 u32 cs_and_odt_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500159
Marek Vasutb2dfd102015-07-20 08:03:11 +0200160 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
161 odt_mask_0 = 0x0;
162 odt_mask_1 = 0x0;
163 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200164 switch (rwcfg->mem_number_of_ranks) {
Marek Vasut287cdf62015-07-20 08:09:05 +0200165 case 1: /* 1 Rank */
166 /* Read: ODT = 0 ; Write: ODT = 1 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500167 odt_mask_0 = 0x0;
168 odt_mask_1 = 0x1;
Marek Vasut287cdf62015-07-20 08:09:05 +0200169 break;
170 case 2: /* 2 Ranks */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200171 if (rwcfg->mem_number_of_cs_per_dimm == 1) {
Marek Vasut080bf642015-07-20 08:15:57 +0200172 /*
173 * - Dual-Slot , Single-Rank (1 CS per DIMM)
174 * OR
175 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
176 *
177 * Since MEM_NUMBER_OF_RANKS is 2, they
178 * are both single rank with 2 CS each
179 * (special for RDIMM).
180 *
Dinh Nguyen3da42852015-06-02 22:52:49 -0500181 * Read: Turn on ODT on the opposite rank
182 * Write: Turn on ODT on all ranks
183 */
184 odt_mask_0 = 0x3 & ~(1 << rank);
185 odt_mask_1 = 0x3;
186 } else {
187 /*
Marek Vasut080bf642015-07-20 08:15:57 +0200188 * - Single-Slot , Dual-Rank (2 CS per DIMM)
189 *
190 * Read: Turn on ODT off on all ranks
191 * Write: Turn on ODT on active rank
Dinh Nguyen3da42852015-06-02 22:52:49 -0500192 */
193 odt_mask_0 = 0x0;
194 odt_mask_1 = 0x3 & (1 << rank);
195 }
Marek Vasut287cdf62015-07-20 08:09:05 +0200196 break;
197 case 4: /* 4 Ranks */
198 /* Read:
Dinh Nguyen3da42852015-06-02 22:52:49 -0500199 * ----------+-----------------------+
Dinh Nguyen3da42852015-06-02 22:52:49 -0500200 * | ODT |
201 * Read From +-----------------------+
202 * Rank | 3 | 2 | 1 | 0 |
203 * ----------+-----+-----+-----+-----+
204 * 0 | 0 | 1 | 0 | 0 |
205 * 1 | 1 | 0 | 0 | 0 |
206 * 2 | 0 | 0 | 0 | 1 |
207 * 3 | 0 | 0 | 1 | 0 |
208 * ----------+-----+-----+-----+-----+
209 *
210 * Write:
211 * ----------+-----------------------+
Dinh Nguyen3da42852015-06-02 22:52:49 -0500212 * | ODT |
213 * Write To +-----------------------+
214 * Rank | 3 | 2 | 1 | 0 |
215 * ----------+-----+-----+-----+-----+
216 * 0 | 0 | 1 | 0 | 1 |
217 * 1 | 1 | 0 | 1 | 0 |
218 * 2 | 0 | 1 | 0 | 1 |
219 * 3 | 1 | 0 | 1 | 0 |
220 * ----------+-----+-----+-----+-----+
221 */
222 switch (rank) {
223 case 0:
224 odt_mask_0 = 0x4;
225 odt_mask_1 = 0x5;
226 break;
227 case 1:
228 odt_mask_0 = 0x8;
229 odt_mask_1 = 0xA;
230 break;
231 case 2:
232 odt_mask_0 = 0x1;
233 odt_mask_1 = 0x5;
234 break;
235 case 3:
236 odt_mask_0 = 0x2;
237 odt_mask_1 = 0xA;
238 break;
239 }
Marek Vasut287cdf62015-07-20 08:09:05 +0200240 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500241 }
Dinh Nguyen3da42852015-06-02 22:52:49 -0500242 }
243
Marek Vasutb2dfd102015-07-20 08:03:11 +0200244 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
245 ((0xFF & odt_mask_0) << 8) |
246 ((0xFF & odt_mask_1) << 16);
Marek Vasut1273dd92015-07-12 21:05:08 +0200247 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
248 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500249}
250
Marek Vasutc76976d2015-07-12 22:28:33 +0200251/**
252 * scc_mgr_set() - Set SCC Manager register
253 * @off: Base offset in SCC Manager space
254 * @grp: Read/Write group
255 * @val: Value to be set
256 *
257 * This function sets the SCC Manager (Scan Chain Control Manager) register.
258 */
259static void scc_mgr_set(u32 off, u32 grp, u32 val)
260{
261 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
262}
263
Marek Vasute893f4d2015-07-20 07:16:42 +0200264/**
265 * scc_mgr_initialize() - Initialize SCC Manager registers
266 *
267 * Initialize SCC Manager registers.
268 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500269static void scc_mgr_initialize(void)
270{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500271 /*
Marek Vasute893f4d2015-07-20 07:16:42 +0200272 * Clear register file for HPS. 16 (2^4) is the size of the
273 * full register file in the scc mgr:
274 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
275 * MEM_IF_READ_DQS_WIDTH - 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500276 */
Marek Vasutc76976d2015-07-12 22:28:33 +0200277 int i;
Marek Vasute893f4d2015-07-20 07:16:42 +0200278
Dinh Nguyen3da42852015-06-02 22:52:49 -0500279 for (i = 0; i < 16; i++) {
Marek Vasut7ac40d22015-06-26 18:56:54 +0200280 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -0500281 __func__, __LINE__, i);
Marek Vasut8e9e62c2016-04-04 17:28:16 +0200282 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500283 }
284}
285
Marek Vasut5ded7322015-08-02 19:42:26 +0200286static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200287{
Marek Vasutc76976d2015-07-12 22:28:33 +0200288 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200289}
290
Marek Vasut5ded7322015-08-02 19:42:26 +0200291static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500292{
Marek Vasutc76976d2015-07-12 22:28:33 +0200293 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500294}
295
Marek Vasut5ded7322015-08-02 19:42:26 +0200296static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500297{
Marek Vasutc76976d2015-07-12 22:28:33 +0200298 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500299}
300
Marek Vasut5ded7322015-08-02 19:42:26 +0200301static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200302{
Marek Vasutc76976d2015-07-12 22:28:33 +0200303 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200304}
305
Marek Vasut5ded7322015-08-02 19:42:26 +0200306static void scc_mgr_set_dqs_io_in_delay(u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200307{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200308 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
Marek Vasutc76976d2015-07-12 22:28:33 +0200309 delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200310}
311
Marek Vasut5ded7322015-08-02 19:42:26 +0200312static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200313{
Marek Vasutc76976d2015-07-12 22:28:33 +0200314 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200315}
316
Marek Vasut5ded7322015-08-02 19:42:26 +0200317static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200318{
Marek Vasutc76976d2015-07-12 22:28:33 +0200319 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200320}
321
Marek Vasut5ded7322015-08-02 19:42:26 +0200322static void scc_mgr_set_dqs_out1_delay(u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200323{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200324 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
Marek Vasutc76976d2015-07-12 22:28:33 +0200325 delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200326}
327
Marek Vasut5ded7322015-08-02 19:42:26 +0200328static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200329{
Marek Vasutc76976d2015-07-12 22:28:33 +0200330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200331 rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasutc76976d2015-07-12 22:28:33 +0200332 delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200333}
334
335/* load up dqs config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200336static void scc_mgr_load_dqs(u32 dqs)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200337{
338 writel(dqs, &sdr_scc_mgr->dqs_ena);
339}
340
341/* load up dqs io config settings */
342static void scc_mgr_load_dqs_io(void)
343{
344 writel(0, &sdr_scc_mgr->dqs_io_ena);
345}
346
347/* load up dq config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200348static void scc_mgr_load_dq(u32 dq_in_group)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200349{
350 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
351}
352
353/* load up dm config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200354static void scc_mgr_load_dm(u32 dm)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200355{
356 writel(dm, &sdr_scc_mgr->dm_ena);
357}
358
Marek Vasut0b69b802015-07-12 23:25:21 +0200359/**
360 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
361 * @off: Base offset in SCC Manager space
362 * @grp: Read/Write group
363 * @val: Value to be set
364 * @update: If non-zero, trigger SCC Manager update for all ranks
365 *
366 * This function sets the SCC Manager (Scan Chain Control Manager) register
367 * and optionally triggers the SCC update for all ranks.
368 */
369static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
370 const int update)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500371{
Marek Vasut0b69b802015-07-12 23:25:21 +0200372 u32 r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500373
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200374 for (r = 0; r < rwcfg->mem_number_of_ranks;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500375 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut0b69b802015-07-12 23:25:21 +0200376 scc_mgr_set(off, grp, val);
Marek Vasut162d60e2015-07-12 23:14:33 +0200377
Marek Vasut0b69b802015-07-12 23:25:21 +0200378 if (update || (r == 0)) {
379 writel(grp, &sdr_scc_mgr->dqs_ena);
Marek Vasut1273dd92015-07-12 21:05:08 +0200380 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500381 }
382 }
383}
384
Marek Vasut0b69b802015-07-12 23:25:21 +0200385static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
386{
387 /*
388 * USER although the h/w doesn't support different phases per
389 * shadow register, for simplicity our scc manager modeling
390 * keeps different phase settings per shadow reg, and it's
391 * important for us to keep them in sync to match h/w.
392 * for efficiency, the scan chain update should occur only
393 * once to sr0.
394 */
395 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
396 read_group, phase, 0);
397}
398
Marek Vasut5ded7322015-08-02 19:42:26 +0200399static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group,
400 u32 phase)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500401{
Marek Vasut0b69b802015-07-12 23:25:21 +0200402 /*
403 * USER although the h/w doesn't support different phases per
404 * shadow register, for simplicity our scc manager modeling
405 * keeps different phase settings per shadow reg, and it's
406 * important for us to keep them in sync to match h/w.
407 * for efficiency, the scan chain update should occur only
408 * once to sr0.
409 */
410 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
411 write_group, phase, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500412}
413
Marek Vasut5ded7322015-08-02 19:42:26 +0200414static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group,
415 u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500416{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500417 /*
418 * In shadow register mode, the T11 settings are stored in
419 * registers in the core, which are updated by the DQS_ENA
420 * signals. Not issuing the SCC_MGR_UPD command allows us to
421 * save lots of rank switching overhead, by calling
422 * select_shadow_regs_for_update with update_scan_chains
423 * set to 0.
424 */
Marek Vasut0b69b802015-07-12 23:25:21 +0200425 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
426 read_group, delay, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500427}
428
Marek Vasut5be355c2015-07-12 23:39:06 +0200429/**
430 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
431 * @write_group: Write group
432 * @delay: Delay value
433 *
434 * This function sets the OCT output delay in SCC manager.
435 */
436static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500437{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200438 const int ratio = rwcfg->mem_if_read_dqs_width /
439 rwcfg->mem_if_write_dqs_width;
Marek Vasut5be355c2015-07-12 23:39:06 +0200440 const int base = write_group * ratio;
441 int i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500442 /*
443 * Load the setting in the SCC manager
444 * Although OCT affects only write data, the OCT delay is controlled
445 * by the DQS logic block which is instantiated once per read group.
446 * For protocols where a write group consists of multiple read groups,
447 * the setting must be set multiple times.
448 */
Marek Vasut5be355c2015-07-12 23:39:06 +0200449 for (i = 0; i < ratio; i++)
450 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500451}
452
Marek Vasut37a37ca2015-07-19 01:32:55 +0200453/**
454 * scc_mgr_set_hhp_extras() - Set HHP extras.
455 *
456 * Load the fixed setting in the SCC manager HHP extras.
457 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500458static void scc_mgr_set_hhp_extras(void)
459{
460 /*
461 * Load the fixed setting in the SCC manager
Marek Vasut37a37ca2015-07-19 01:32:55 +0200462 * bits: 0:0 = 1'b1 - DQS bypass
463 * bits: 1:1 = 1'b1 - DQ bypass
464 * bits: 4:2 = 3'b001 - rfifo_mode
465 * bits: 6:5 = 2'b01 - rfifo clock_select
466 * bits: 7:7 = 1'b0 - separate gating from ungating setting
467 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
Dinh Nguyen3da42852015-06-02 22:52:49 -0500468 */
Marek Vasut37a37ca2015-07-19 01:32:55 +0200469 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
470 (1 << 2) | (1 << 1) | (1 << 0);
471 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
472 SCC_MGR_HHP_GLOBALS_OFFSET |
473 SCC_MGR_HHP_EXTRAS_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500474
Marek Vasut37a37ca2015-07-19 01:32:55 +0200475 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
476 __func__, __LINE__);
477 writel(value, addr);
478 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
479 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500480}
481
Marek Vasutf42af352015-07-20 04:41:53 +0200482/**
483 * scc_mgr_zero_all() - Zero all DQS config
484 *
485 * Zero all DQS config.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500486 */
487static void scc_mgr_zero_all(void)
488{
Marek Vasutf42af352015-07-20 04:41:53 +0200489 int i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500490
491 /*
492 * USER Zero all DQS config settings, across all groups and all
493 * shadow registers
494 */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200495 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasutf42af352015-07-20 04:41:53 +0200496 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200497 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -0500498 /*
499 * The phases actually don't exist on a per-rank basis,
500 * but there's no harm updating them several times, so
501 * let's keep the code simple.
502 */
Marek Vasut160695d2015-08-02 19:10:58 +0200503 scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500504 scc_mgr_set_dqs_en_phase(i, 0);
505 scc_mgr_set_dqs_en_delay(i, 0);
506 }
507
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200508 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -0500509 scc_mgr_set_dqdqs_output_phase(i, 0);
Marek Vasutf42af352015-07-20 04:41:53 +0200510 /* Arria V/Cyclone V don't have out2. */
Marek Vasut160695d2015-08-02 19:10:58 +0200511 scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500512 }
513 }
514
Marek Vasutf42af352015-07-20 04:41:53 +0200515 /* Multicast to all DQS group enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200516 writel(0xff, &sdr_scc_mgr->dqs_ena);
517 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500518}
519
Marek Vasutc5c5f532015-07-17 02:06:20 +0200520/**
521 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
522 * @write_group: Write group
523 *
524 * Set bypass mode and trigger SCC update.
525 */
526static void scc_set_bypass_mode(const u32 write_group)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500527{
Marek Vasutc5c5f532015-07-17 02:06:20 +0200528 /* Multicast to all DQ enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200529 writel(0xff, &sdr_scc_mgr->dq_ena);
530 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500531
Marek Vasutc5c5f532015-07-17 02:06:20 +0200532 /* Update current DQS IO enable. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200533 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500534
Marek Vasutc5c5f532015-07-17 02:06:20 +0200535 /* Update the DQS logic. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200536 writel(write_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500537
Marek Vasutc5c5f532015-07-17 02:06:20 +0200538 /* Hit update. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200539 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500540}
541
Marek Vasut5e837892015-07-13 00:30:09 +0200542/**
543 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
544 * @write_group: Write group
545 *
546 * Load DQS settings for Write Group, do not trigger SCC update.
547 */
548static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200549{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200550 const int ratio = rwcfg->mem_if_read_dqs_width /
551 rwcfg->mem_if_write_dqs_width;
Marek Vasut5e837892015-07-13 00:30:09 +0200552 const int base = write_group * ratio;
553 int i;
Marek Vasut5ff825b2015-07-12 22:11:55 +0200554 /*
Marek Vasut5e837892015-07-13 00:30:09 +0200555 * Load the setting in the SCC manager
Marek Vasut5ff825b2015-07-12 22:11:55 +0200556 * Although OCT affects only write data, the OCT delay is controlled
557 * by the DQS logic block which is instantiated once per read group.
558 * For protocols where a write group consists of multiple read groups,
Marek Vasut5e837892015-07-13 00:30:09 +0200559 * the setting must be set multiple times.
Marek Vasut5ff825b2015-07-12 22:11:55 +0200560 */
Marek Vasut5e837892015-07-13 00:30:09 +0200561 for (i = 0; i < ratio; i++)
562 writel(base + i, &sdr_scc_mgr->dqs_ena);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200563}
564
Marek Vasutd41ea932015-07-20 08:41:04 +0200565/**
566 * scc_mgr_zero_group() - Zero all configs for a group
567 *
568 * Zero DQ, DM, DQS and OCT configs for a group.
569 */
570static void scc_mgr_zero_group(const u32 write_group, const int out_only)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500571{
Marek Vasutd41ea932015-07-20 08:41:04 +0200572 int i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500573
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200574 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasutd41ea932015-07-20 08:41:04 +0200575 r += NUM_RANKS_PER_SHADOW_REG) {
576 /* Zero all DQ config settings. */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200577 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200578 scc_mgr_set_dq_out1_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500579 if (!out_only)
Marek Vasut07aee5b2015-07-12 22:07:33 +0200580 scc_mgr_set_dq_in_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500581 }
582
Marek Vasutd41ea932015-07-20 08:41:04 +0200583 /* Multicast to all DQ enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200584 writel(0xff, &sdr_scc_mgr->dq_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500585
Marek Vasutd41ea932015-07-20 08:41:04 +0200586 /* Zero all DM config settings. */
587 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
Marek Vasut07aee5b2015-07-12 22:07:33 +0200588 scc_mgr_set_dm_out1_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500589
Marek Vasutd41ea932015-07-20 08:41:04 +0200590 /* Multicast to all DM enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200591 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500592
Marek Vasutd41ea932015-07-20 08:41:04 +0200593 /* Zero all DQS IO settings. */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500594 if (!out_only)
Marek Vasut32675242015-07-17 06:07:13 +0200595 scc_mgr_set_dqs_io_in_delay(0);
Marek Vasutd41ea932015-07-20 08:41:04 +0200596
597 /* Arria V/Cyclone V don't have out2. */
Marek Vasut160695d2015-08-02 19:10:58 +0200598 scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve);
599 scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500600 scc_mgr_load_dqs_for_write_group(write_group);
601
Marek Vasutd41ea932015-07-20 08:41:04 +0200602 /* Multicast to all DQS IO enables (only 1 in total). */
Marek Vasut1273dd92015-07-12 21:05:08 +0200603 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500604
Marek Vasutd41ea932015-07-20 08:41:04 +0200605 /* Hit update to zero everything. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200606 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500607 }
608}
609
Dinh Nguyen3da42852015-06-02 22:52:49 -0500610/*
611 * apply and load a particular input delay for the DQ pins in a group
612 * group_bgn is the index of the first dq pin (in the write group)
613 */
Marek Vasut5ded7322015-08-02 19:42:26 +0200614static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500615{
Marek Vasut5ded7322015-08-02 19:42:26 +0200616 u32 i, p;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500617
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200618 for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200619 scc_mgr_set_dq_in_delay(p, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500620 scc_mgr_load_dq(p);
621 }
622}
623
Marek Vasut300c2e62015-07-17 05:42:49 +0200624/**
625 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
626 * @delay: Delay value
627 *
628 * Apply and load a particular output delay for the DQ pins in a group.
629 */
630static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500631{
Marek Vasut300c2e62015-07-17 05:42:49 +0200632 int i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500633
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200634 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasut300c2e62015-07-17 05:42:49 +0200635 scc_mgr_set_dq_out1_delay(i, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500636 scc_mgr_load_dq(i);
637 }
638}
639
640/* apply and load a particular output delay for the DM pins in a group */
Marek Vasut5ded7322015-08-02 19:42:26 +0200641static void scc_mgr_apply_group_dm_out1_delay(u32 delay1)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500642{
Marek Vasut5ded7322015-08-02 19:42:26 +0200643 u32 i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500644
645 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200646 scc_mgr_set_dm_out1_delay(i, delay1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500647 scc_mgr_load_dm(i);
648 }
649}
650
651
652/* apply and load delay on both DQS and OCT out1 */
Marek Vasut5ded7322015-08-02 19:42:26 +0200653static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group,
654 u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500655{
Marek Vasut32675242015-07-17 06:07:13 +0200656 scc_mgr_set_dqs_out1_delay(delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500657 scc_mgr_load_dqs_io();
658
659 scc_mgr_set_oct_out1_delay(write_group, delay);
660 scc_mgr_load_dqs_for_write_group(write_group);
661}
662
Marek Vasut5cb1b502015-07-17 05:33:28 +0200663/**
664 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
665 * @write_group: Write group
666 * @delay: Delay value
667 *
668 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
669 */
Marek Vasut8eccde32015-07-17 05:30:14 +0200670static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
Marek Vasut8eccde32015-07-17 05:30:14 +0200671 const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500672{
Marek Vasut8eccde32015-07-17 05:30:14 +0200673 u32 i, new_delay;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500674
Marek Vasut8eccde32015-07-17 05:30:14 +0200675 /* DQ shift */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200676 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500677 scc_mgr_load_dq(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500678
Marek Vasut8eccde32015-07-17 05:30:14 +0200679 /* DM shift */
680 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500681 scc_mgr_load_dm(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500682
Marek Vasut5cb1b502015-07-17 05:33:28 +0200683 /* DQS shift */
684 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
Marek Vasut160695d2015-08-02 19:10:58 +0200685 if (new_delay > iocfg->io_out2_delay_max) {
Marek Vasut5cb1b502015-07-17 05:33:28 +0200686 debug_cond(DLEVEL == 1,
687 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
688 __func__, __LINE__, write_group, delay, new_delay,
Marek Vasut160695d2015-08-02 19:10:58 +0200689 iocfg->io_out2_delay_max,
690 new_delay - iocfg->io_out2_delay_max);
691 new_delay -= iocfg->io_out2_delay_max;
Marek Vasut5cb1b502015-07-17 05:33:28 +0200692 scc_mgr_set_dqs_out1_delay(new_delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500693 }
694
695 scc_mgr_load_dqs_io();
696
Marek Vasut5cb1b502015-07-17 05:33:28 +0200697 /* OCT shift */
698 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
Marek Vasut160695d2015-08-02 19:10:58 +0200699 if (new_delay > iocfg->io_out2_delay_max) {
Marek Vasut5cb1b502015-07-17 05:33:28 +0200700 debug_cond(DLEVEL == 1,
701 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
702 __func__, __LINE__, write_group, delay,
Marek Vasut160695d2015-08-02 19:10:58 +0200703 new_delay, iocfg->io_out2_delay_max,
704 new_delay - iocfg->io_out2_delay_max);
705 new_delay -= iocfg->io_out2_delay_max;
Marek Vasut5cb1b502015-07-17 05:33:28 +0200706 scc_mgr_set_oct_out1_delay(write_group, new_delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500707 }
708
709 scc_mgr_load_dqs_for_write_group(write_group);
710}
711
Marek Vasutf51a7d32015-07-19 02:18:21 +0200712/**
713 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
714 * @write_group: Write group
715 * @delay: Delay value
716 *
717 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500718 */
Marek Vasutf51a7d32015-07-19 02:18:21 +0200719static void
720scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
721 const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500722{
Marek Vasutf51a7d32015-07-19 02:18:21 +0200723 int r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500724
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200725 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasutf51a7d32015-07-19 02:18:21 +0200726 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut5cb1b502015-07-17 05:33:28 +0200727 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
Marek Vasut1273dd92015-07-12 21:05:08 +0200728 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500729 }
730}
731
Marek Vasutf936f942015-07-26 11:07:19 +0200732/**
733 * set_jump_as_return() - Return instruction optimization
734 *
735 * Optimization used to recover some slots in ddr3 inst_rom could be
736 * applied to other protocols if we wanted to
737 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500738static void set_jump_as_return(void)
739{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500740 /*
Marek Vasutf936f942015-07-26 11:07:19 +0200741 * To save space, we replace return with jump to special shared
Dinh Nguyen3da42852015-06-02 22:52:49 -0500742 * RETURN instruction so we set the counter to large value so that
Marek Vasutf936f942015-07-26 11:07:19 +0200743 * we always jump.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500744 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200745 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200746 writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500747}
748
Marek Vasut3de96222015-07-26 11:46:04 +0200749/**
750 * delay_for_n_mem_clocks() - Delay for N memory clocks
751 * @clocks: Length of the delay
752 *
753 * Delay for N memory clocks.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500754 */
Marek Vasut90a584b2015-07-26 11:11:28 +0200755static void delay_for_n_mem_clocks(const u32 clocks)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500756{
Marek Vasut90a584b2015-07-26 11:11:28 +0200757 u32 afi_clocks;
Marek Vasut6a39be62015-07-26 11:42:53 +0200758 u16 c_loop;
759 u8 inner;
760 u8 outer;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500761
762 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
763
Marek Vasutcbcaf462015-07-26 11:34:09 +0200764 /* Scale (rounding up) to get afi clocks. */
Marek Vasut96fd4362015-08-02 19:26:55 +0200765 afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio);
Marek Vasutcbcaf462015-07-26 11:34:09 +0200766 if (afi_clocks) /* Temporary underflow protection */
767 afi_clocks--;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500768
769 /*
Marek Vasut90a584b2015-07-26 11:11:28 +0200770 * Note, we don't bother accounting for being off a little
771 * bit because of a few extra instructions in outer loops.
772 * Note, the loops have a test at the end, and do the test
773 * before the decrement, and so always perform the loop
Dinh Nguyen3da42852015-06-02 22:52:49 -0500774 * 1 time more than the counter value
775 */
Marek Vasut6a39be62015-07-26 11:42:53 +0200776 c_loop = afi_clocks >> 16;
777 outer = c_loop ? 0xff : (afi_clocks >> 8);
778 inner = outer ? 0xff : afi_clocks;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500779
780 /*
781 * rom instructions are structured as follows:
782 *
783 * IDLE_LOOP2: jnz cntr0, TARGET_A
784 * IDLE_LOOP1: jnz cntr1, TARGET_B
785 * return
786 *
787 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
788 * TARGET_B is set to IDLE_LOOP2 as well
789 *
790 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
791 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
792 *
793 * a little confusing, but it helps save precious space in the inst_rom
794 * and sequencer rom and keeps the delays more accurate and reduces
795 * overhead
796 */
Marek Vasutcbcaf462015-07-26 11:34:09 +0200797 if (afi_clocks < 0x100) {
Marek Vasut1273dd92015-07-12 21:05:08 +0200798 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasut139823e2015-08-02 19:47:01 +0200799 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500800
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200801 writel(rwcfg->idle_loop1,
Marek Vasut139823e2015-08-02 19:47:01 +0200802 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500803
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200804 writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +0200805 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500806 } else {
Marek Vasut1273dd92015-07-12 21:05:08 +0200807 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasut139823e2015-08-02 19:47:01 +0200808 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500809
Marek Vasut1273dd92015-07-12 21:05:08 +0200810 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
Marek Vasut139823e2015-08-02 19:47:01 +0200811 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500812
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200813 writel(rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200814 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500815
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200816 writel(rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200817 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500818
Marek Vasut0c1b81b2015-07-26 11:44:54 +0200819 do {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200820 writel(rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200821 SDR_PHYGRP_RWMGRGRP_ADDRESS |
822 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Marek Vasut0c1b81b2015-07-26 11:44:54 +0200823 } while (c_loop-- != 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500824 }
825 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
826}
827
Marek Vasut944fe712015-07-13 00:44:30 +0200828/**
829 * rw_mgr_mem_init_load_regs() - Load instruction registers
830 * @cntr0: Counter 0 value
831 * @cntr1: Counter 1 value
832 * @cntr2: Counter 2 value
833 * @jump: Jump instruction value
834 *
835 * Load instruction registers.
836 */
837static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
838{
Marek Vasut5ded7322015-08-02 19:42:26 +0200839 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut944fe712015-07-13 00:44:30 +0200840 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
841
842 /* Load counters */
843 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
844 &sdr_rw_load_mgr_regs->load_cntr0);
845 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
846 &sdr_rw_load_mgr_regs->load_cntr1);
847 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
848 &sdr_rw_load_mgr_regs->load_cntr2);
849
850 /* Load jump address */
851 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
852 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
853 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
854
855 /* Execute count instruction */
856 writel(jump, grpaddr);
857}
858
Marek Vasutecd23342015-07-13 00:51:05 +0200859/**
860 * rw_mgr_mem_load_user() - Load user calibration values
861 * @fin1: Final instruction 1
862 * @fin2: Final instruction 2
863 * @precharge: If 1, precharge the banks at the end
864 *
865 * Load user calibration values and optionally precharge the banks.
866 */
867static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
868 const int precharge)
869{
870 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
871 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
872 u32 r;
873
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200874 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
Marek Vasutecd23342015-07-13 00:51:05 +0200875 /* set rank */
876 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
877
878 /* precharge all banks ... */
879 if (precharge)
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200880 writel(rwcfg->precharge_all, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200881
882 /*
883 * USER Use Mirror-ed commands for odd ranks if address
884 * mirrorring is on
885 */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200886 if ((rwcfg->mem_address_mirroring >> r) & 0x1) {
Marek Vasutecd23342015-07-13 00:51:05 +0200887 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200888 writel(rwcfg->mrs2_mirr, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200889 delay_for_n_mem_clocks(4);
890 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200891 writel(rwcfg->mrs3_mirr, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200892 delay_for_n_mem_clocks(4);
893 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200894 writel(rwcfg->mrs1_mirr, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200895 delay_for_n_mem_clocks(4);
896 set_jump_as_return();
897 writel(fin1, grpaddr);
898 } else {
899 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200900 writel(rwcfg->mrs2, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200901 delay_for_n_mem_clocks(4);
902 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200903 writel(rwcfg->mrs3, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200904 delay_for_n_mem_clocks(4);
905 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200906 writel(rwcfg->mrs1, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200907 set_jump_as_return();
908 writel(fin2, grpaddr);
909 }
910
911 if (precharge)
912 continue;
913
914 set_jump_as_return();
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200915 writel(rwcfg->zqcl, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200916
917 /* tZQinit = tDLLK = 512 ck cycles */
918 delay_for_n_mem_clocks(512);
919 }
920}
921
Marek Vasut8e9d7d02015-07-26 10:57:06 +0200922/**
923 * rw_mgr_mem_initialize() - Initialize RW Manager
924 *
925 * Initialize RW Manager.
926 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500927static void rw_mgr_mem_initialize(void)
928{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500929 debug("%s:%d\n", __func__, __LINE__);
930
931 /* The reset / cke part of initialization is broadcasted to all ranks */
Marek Vasut1273dd92015-07-12 21:05:08 +0200932 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
933 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500934
935 /*
936 * Here's how you load register for a loop
937 * Counters are located @ 0x800
938 * Jump address are located @ 0xC00
939 * For both, registers 0 to 3 are selected using bits 3 and 2, like
940 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
941 * I know this ain't pretty, but Avalon bus throws away the 2 least
942 * significant bits
943 */
944
Marek Vasut8e9d7d02015-07-26 10:57:06 +0200945 /* Start with memory RESET activated */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500946
947 /* tINIT = 200us */
948
949 /*
950 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
951 * If a and b are the number of iteration in 2 nested loops
952 * it takes the following number of cycles to complete the operation:
953 * number_of_cycles = ((2 + n) * a + 2) * b
954 * where n is the number of instruction in the inner loop
955 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
956 * b = 6A
957 */
Marek Vasut139823e2015-08-02 19:47:01 +0200958 rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val,
959 misccfg->tinit_cntr1_val,
Marek Vasut96fd4362015-08-02 19:26:55 +0200960 misccfg->tinit_cntr2_val,
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200961 rwcfg->init_reset_0_cke_0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500962
Marek Vasut8e9d7d02015-07-26 10:57:06 +0200963 /* Indicate that memory is stable. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200964 writel(1, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500965
966 /*
967 * transition the RESET to high
968 * Wait for 500us
969 */
970
971 /*
972 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
973 * If a and b are the number of iteration in 2 nested loops
974 * it takes the following number of cycles to complete the operation
975 * number_of_cycles = ((2 + n) * a + 2) * b
976 * where n is the number of instruction in the inner loop
977 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
978 * b = FF
979 */
Marek Vasut139823e2015-08-02 19:47:01 +0200980 rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val,
981 misccfg->treset_cntr1_val,
Marek Vasut96fd4362015-08-02 19:26:55 +0200982 misccfg->treset_cntr2_val,
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200983 rwcfg->init_reset_1_cke_0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500984
Marek Vasut8e9d7d02015-07-26 10:57:06 +0200985 /* Bring up clock enable. */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500986
987 /* tXRP < 250 ck cycles */
988 delay_for_n_mem_clocks(250);
989
Marek Vasut1fa0c8c2015-08-02 18:44:06 +0200990 rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset,
Marek Vasutecd23342015-07-13 00:51:05 +0200991 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500992}
993
Marek Vasutf1f22f72015-07-26 10:59:19 +0200994/**
995 * rw_mgr_mem_handoff() - Hand off the memory to user
996 *
997 * At the end of calibration we have to program the user settings in
998 * and hand off the memory to the user.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500999 */
1000static void rw_mgr_mem_handoff(void)
1001{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001002 rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1);
Marek Vasutecd23342015-07-13 00:51:05 +02001003 /*
Marek Vasutf1f22f72015-07-26 10:59:19 +02001004 * Need to wait tMOD (12CK or 15ns) time before issuing other
1005 * commands, but we will have plenty of NIOS cycles before actual
1006 * handoff so its okay.
Marek Vasutecd23342015-07-13 00:51:05 +02001007 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001008}
1009
Marek Vasut8371c2e2015-07-21 06:00:36 +02001010/**
1011 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1012 * @group: Write Group
1013 * @use_dm: Use DM
1014 *
1015 * Issue write test command. Two variants are provided, one that just tests
1016 * a write pattern and another that tests datamask functionality.
Marek Vasutad64769c2015-07-21 05:43:37 +02001017 */
Marek Vasut8371c2e2015-07-21 06:00:36 +02001018static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
1019 u32 test_dm)
Marek Vasutad64769c2015-07-21 05:43:37 +02001020{
Marek Vasut8371c2e2015-07-21 06:00:36 +02001021 const u32 quick_write_mode =
1022 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
Marek Vasut96fd4362015-08-02 19:26:55 +02001023 misccfg->enable_super_quick_calibration;
Marek Vasut8371c2e2015-07-21 06:00:36 +02001024 u32 mcc_instruction;
1025 u32 rw_wl_nop_cycles;
Marek Vasutad64769c2015-07-21 05:43:37 +02001026
1027 /*
1028 * Set counter and jump addresses for the right
1029 * number of NOP cycles.
1030 * The number of supported NOP cycles can range from -1 to infinity
1031 * Three different cases are handled:
1032 *
1033 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1034 * mechanism will be used to insert the right number of NOPs
1035 *
1036 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1037 * issuing the write command will jump straight to the
1038 * micro-instruction that turns on DQS (for DDRx), or outputs write
1039 * data (for RLD), skipping
1040 * the NOP micro-instruction all together
1041 *
1042 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1043 * turned on in the same micro-instruction that issues the write
1044 * command. Then we need
1045 * to directly jump to the micro-instruction that sends out the data
1046 *
1047 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1048 * (2 and 3). One jump-counter (0) is used to perform multiple
1049 * write-read operations.
1050 * one counter left to issue this command in "multiple-group" mode
1051 */
1052
1053 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
1054
1055 if (rw_wl_nop_cycles == -1) {
1056 /*
1057 * CNTR 2 - We want to execute the special write operation that
1058 * turns on DQS right away and then skip directly to the
1059 * instruction that sends out the data. We set the counter to a
1060 * large number so that the jump is always taken.
1061 */
1062 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1063
1064 /* CNTR 3 - Not used */
1065 if (test_dm) {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001066 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1067 writel(rwcfg->lfsr_wr_rd_dm_bank_0_data,
Marek Vasutad64769c2015-07-21 05:43:37 +02001068 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001069 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasutad64769c2015-07-21 05:43:37 +02001070 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1071 } else {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001072 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1;
1073 writel(rwcfg->lfsr_wr_rd_bank_0_data,
Marek Vasut139823e2015-08-02 19:47:01 +02001074 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001075 writel(rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001076 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001077 }
1078 } else if (rw_wl_nop_cycles == 0) {
1079 /*
1080 * CNTR 2 - We want to skip the NOP operation and go straight
1081 * to the DQS enable instruction. We set the counter to a large
1082 * number so that the jump is always taken.
1083 */
1084 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1085
1086 /* CNTR 3 - Not used */
1087 if (test_dm) {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001088 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
1089 writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
Marek Vasutad64769c2015-07-21 05:43:37 +02001090 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1091 } else {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001092 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
1093 writel(rwcfg->lfsr_wr_rd_bank_0_dqs,
Marek Vasut139823e2015-08-02 19:47:01 +02001094 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasutad64769c2015-07-21 05:43:37 +02001095 }
1096 } else {
1097 /*
1098 * CNTR 2 - In this case we want to execute the next instruction
1099 * and NOT take the jump. So we set the counter to 0. The jump
1100 * address doesn't count.
1101 */
1102 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1103 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1104
1105 /*
1106 * CNTR 3 - Set the nop counter to the number of cycles we
1107 * need to loop for, minus 1.
1108 */
1109 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1110 if (test_dm) {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001111 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
1112 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001113 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001114 } else {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001115 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
1116 writel(rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001117 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001118 }
1119 }
1120
1121 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1122 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1123
1124 if (quick_write_mode)
1125 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1126 else
1127 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1128
1129 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1130
1131 /*
1132 * CNTR 1 - This is used to ensure enough time elapses
1133 * for read data to come back.
1134 */
1135 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1136
1137 if (test_dm) {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001138 writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait,
Marek Vasut139823e2015-08-02 19:47:01 +02001139 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasutad64769c2015-07-21 05:43:37 +02001140 } else {
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001141 writel(rwcfg->lfsr_wr_rd_bank_0_wait,
Marek Vasut139823e2015-08-02 19:47:01 +02001142 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasutad64769c2015-07-21 05:43:37 +02001143 }
1144
Marek Vasut8371c2e2015-07-21 06:00:36 +02001145 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1146 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1147 (group << 2));
Marek Vasutad64769c2015-07-21 05:43:37 +02001148}
1149
Marek Vasut4a82854b2015-07-21 05:57:11 +02001150/**
1151 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
1152 * @rank_bgn: Rank number
1153 * @write_group: Write Group
1154 * @use_dm: Use DM
1155 * @all_correct: All bits must be correct in the mask
1156 * @bit_chk: Resulting bit mask after the test
1157 * @all_ranks: Test all ranks
1158 *
1159 * Test writes, can check for a single bit pass or multiple bit pass.
1160 */
Marek Vasutb9452ea2015-07-21 05:54:39 +02001161static int
1162rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
1163 const u32 use_dm, const u32 all_correct,
1164 u32 *bit_chk, const u32 all_ranks)
Marek Vasutad64769c2015-07-21 05:43:37 +02001165{
Marek Vasutb9452ea2015-07-21 05:54:39 +02001166 const u32 rank_end = all_ranks ?
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001167 rwcfg->mem_number_of_ranks :
Marek Vasutb9452ea2015-07-21 05:54:39 +02001168 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001169 const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs /
1170 rwcfg->mem_virtual_groups_per_write_dqs;
Marek Vasutb9452ea2015-07-21 05:54:39 +02001171 const u32 correct_mask_vg = param->write_correct_mask_vg;
1172
1173 u32 tmp_bit_chk, base_rw_mgr;
1174 int vg, r;
Marek Vasutad64769c2015-07-21 05:43:37 +02001175
1176 *bit_chk = param->write_correct_mask;
Marek Vasutad64769c2015-07-21 05:43:37 +02001177
1178 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutb9452ea2015-07-21 05:54:39 +02001179 /* Set rank */
Marek Vasutad64769c2015-07-21 05:43:37 +02001180 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1181
1182 tmp_bit_chk = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001183 for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1;
Marek Vasutb9452ea2015-07-21 05:54:39 +02001184 vg >= 0; vg--) {
1185 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutad64769c2015-07-21 05:43:37 +02001186 writel(0, &phy_mgr_cmd->fifo_reset);
1187
Marek Vasutb9452ea2015-07-21 05:54:39 +02001188 rw_mgr_mem_calibrate_write_test_issue(
1189 write_group *
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001190 rwcfg->mem_virtual_groups_per_write_dqs + vg,
Marek Vasutad64769c2015-07-21 05:43:37 +02001191 use_dm);
1192
Marek Vasutb9452ea2015-07-21 05:54:39 +02001193 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1194 tmp_bit_chk <<= shift_ratio;
1195 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
Marek Vasutad64769c2015-07-21 05:43:37 +02001196 }
Marek Vasutb9452ea2015-07-21 05:54:39 +02001197
Marek Vasutad64769c2015-07-21 05:43:37 +02001198 *bit_chk &= tmp_bit_chk;
1199 }
1200
Marek Vasutb9452ea2015-07-21 05:54:39 +02001201 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
Marek Vasutad64769c2015-07-21 05:43:37 +02001202 if (all_correct) {
Marek Vasutb9452ea2015-07-21 05:54:39 +02001203 debug_cond(DLEVEL == 2,
1204 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1205 write_group, use_dm, *bit_chk,
1206 param->write_correct_mask,
1207 *bit_chk == param->write_correct_mask);
Marek Vasutad64769c2015-07-21 05:43:37 +02001208 return *bit_chk == param->write_correct_mask;
1209 } else {
Marek Vasutb9452ea2015-07-21 05:54:39 +02001210 debug_cond(DLEVEL == 2,
1211 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1212 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
Marek Vasutad64769c2015-07-21 05:43:37 +02001213 return *bit_chk != 0x00;
1214 }
1215}
1216
Marek Vasutd844c7d2015-07-18 03:55:07 +02001217/**
1218 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1219 * @rank_bgn: Rank number
1220 * @group: Read/Write Group
1221 * @all_ranks: Test all ranks
1222 *
1223 * Performs a guaranteed read on the patterns we are going to use during a
1224 * read test to ensure memory works.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001225 */
Marek Vasutd844c7d2015-07-18 03:55:07 +02001226static int
1227rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1228 const u32 all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001229{
Marek Vasutd844c7d2015-07-18 03:55:07 +02001230 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1231 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1232 const u32 addr_offset =
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001233 (group * rwcfg->mem_virtual_groups_per_read_dqs) << 2;
Marek Vasutd844c7d2015-07-18 03:55:07 +02001234 const u32 rank_end = all_ranks ?
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001235 rwcfg->mem_number_of_ranks :
Marek Vasutd844c7d2015-07-18 03:55:07 +02001236 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001237 const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs /
1238 rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasutd844c7d2015-07-18 03:55:07 +02001239 const u32 correct_mask_vg = param->read_correct_mask_vg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001240
Marek Vasutd844c7d2015-07-18 03:55:07 +02001241 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1242 int vg, r;
1243 int ret = 0;
1244
1245 bit_chk = param->read_correct_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001246
1247 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutd844c7d2015-07-18 03:55:07 +02001248 /* Set rank */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001249 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1250
1251 /* Load up a constant bursts of read commands */
Marek Vasut1273dd92015-07-12 21:05:08 +02001252 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001253 writel(rwcfg->guaranteed_read,
Marek Vasut139823e2015-08-02 19:47:01 +02001254 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001255
Marek Vasut1273dd92015-07-12 21:05:08 +02001256 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001257 writel(rwcfg->guaranteed_read_cont,
Marek Vasut139823e2015-08-02 19:47:01 +02001258 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001259
1260 tmp_bit_chk = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001261 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1;
Marek Vasutd844c7d2015-07-18 03:55:07 +02001262 vg >= 0; vg--) {
1263 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02001264 writel(0, &phy_mgr_cmd->fifo_reset);
1265 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1266 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001267 writel(rwcfg->guaranteed_read,
Marek Vasutd844c7d2015-07-18 03:55:07 +02001268 addr + addr_offset + (vg << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001269
Marek Vasut1273dd92015-07-12 21:05:08 +02001270 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Marek Vasutd844c7d2015-07-18 03:55:07 +02001271 tmp_bit_chk <<= shift_ratio;
1272 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001273 }
Marek Vasutd844c7d2015-07-18 03:55:07 +02001274
1275 bit_chk &= tmp_bit_chk;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001276 }
1277
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001278 writel(rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001279
1280 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
Marek Vasutd844c7d2015-07-18 03:55:07 +02001281
1282 if (bit_chk != param->read_correct_mask)
1283 ret = -EIO;
1284
1285 debug_cond(DLEVEL == 1,
1286 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1287 __func__, __LINE__, group, bit_chk,
1288 param->read_correct_mask, ret);
1289
1290 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001291}
1292
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001293/**
1294 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1295 * @rank_bgn: Rank number
1296 * @all_ranks: Test all ranks
1297 *
1298 * Load up the patterns we are going to use during a read test.
1299 */
1300static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1301 const int all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001302{
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001303 const u32 rank_end = all_ranks ?
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001304 rwcfg->mem_number_of_ranks :
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001305 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1306 u32 r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001307
1308 debug("%s:%d\n", __func__, __LINE__);
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001309
Dinh Nguyen3da42852015-06-02 22:52:49 -05001310 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001311 /* set rank */
1312 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1313
1314 /* Load up a constant bursts */
Marek Vasut1273dd92015-07-12 21:05:08 +02001315 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001316
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001317 writel(rwcfg->guaranteed_write_wait0,
Marek Vasut139823e2015-08-02 19:47:01 +02001318 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001319
Marek Vasut1273dd92015-07-12 21:05:08 +02001320 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001321
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001322 writel(rwcfg->guaranteed_write_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02001323 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001324
Marek Vasut1273dd92015-07-12 21:05:08 +02001325 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001326
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001327 writel(rwcfg->guaranteed_write_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02001328 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001329
Marek Vasut1273dd92015-07-12 21:05:08 +02001330 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001331
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001332 writel(rwcfg->guaranteed_write_wait3,
Marek Vasut139823e2015-08-02 19:47:01 +02001333 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001334
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001335 writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +02001336 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001337 }
1338
1339 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1340}
1341
Marek Vasut783fcf52015-07-20 03:26:05 +02001342/**
1343 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1344 * @rank_bgn: Rank number
1345 * @group: Read/Write group
1346 * @num_tries: Number of retries of the test
1347 * @all_correct: All bits must be correct in the mask
1348 * @bit_chk: Resulting bit mask after the test
1349 * @all_groups: Test all R/W groups
1350 * @all_ranks: Test all ranks
1351 *
1352 * Try a read and see if it returns correct data back. Test has dummy reads
1353 * inserted into the mix used to align DQS enable. Test has more thorough
1354 * checks than the regular read test.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001355 */
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001356static int
1357rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
1358 const u32 num_tries, const u32 all_correct,
1359 u32 *bit_chk,
1360 const u32 all_groups, const u32 all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001361{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001362 const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks :
Dinh Nguyen3da42852015-06-02 22:52:49 -05001363 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001364 const u32 quick_read_mode =
1365 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
Marek Vasut96fd4362015-08-02 19:26:55 +02001366 misccfg->enable_super_quick_calibration);
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001367 u32 correct_mask_vg = param->read_correct_mask_vg;
1368 u32 tmp_bit_chk;
1369 u32 base_rw_mgr;
1370 u32 addr;
1371
1372 int r, vg, ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001373
1374 *bit_chk = param->read_correct_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001375
1376 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001377 /* set rank */
1378 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1379
Marek Vasut1273dd92015-07-12 21:05:08 +02001380 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001381
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001382 writel(rwcfg->read_b2b_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02001383 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001384
Marek Vasut1273dd92015-07-12 21:05:08 +02001385 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001386 writel(rwcfg->read_b2b_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02001387 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001388
Dinh Nguyen3da42852015-06-02 22:52:49 -05001389 if (quick_read_mode)
Marek Vasut1273dd92015-07-12 21:05:08 +02001390 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001391 /* need at least two (1+1) reads to capture failures */
1392 else if (all_groups)
Marek Vasut1273dd92015-07-12 21:05:08 +02001393 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001394 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001395 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001396
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001397 writel(rwcfg->read_b2b,
Marek Vasut139823e2015-08-02 19:47:01 +02001398 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001399 if (all_groups)
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001400 writel(rwcfg->mem_if_read_dqs_width *
1401 rwcfg->mem_virtual_groups_per_read_dqs - 1,
Marek Vasut1273dd92015-07-12 21:05:08 +02001402 &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001403 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001404 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001405
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001406 writel(rwcfg->read_b2b,
Marek Vasut139823e2015-08-02 19:47:01 +02001407 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001408
1409 tmp_bit_chk = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001410 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0;
Marek Vasut7ce23bb2015-07-19 07:51:17 +02001411 vg--) {
Marek Vasutba522c72015-07-19 07:57:28 +02001412 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02001413 writel(0, &phy_mgr_cmd->fifo_reset);
1414 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1415 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001416
Marek Vasutba522c72015-07-19 07:57:28 +02001417 if (all_groups) {
1418 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1419 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1420 } else {
1421 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1422 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1423 }
Marek Vasutc4815f72015-07-12 19:03:33 +02001424
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001425 writel(rwcfg->read_b2b, addr +
Marek Vasut139823e2015-08-02 19:47:01 +02001426 ((group *
1427 rwcfg->mem_virtual_groups_per_read_dqs +
1428 vg) << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001429
Marek Vasut1273dd92015-07-12 21:05:08 +02001430 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001431 tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs /
1432 rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasutba522c72015-07-19 07:57:28 +02001433 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001434 }
Marek Vasut7ce23bb2015-07-19 07:51:17 +02001435
Dinh Nguyen3da42852015-06-02 22:52:49 -05001436 *bit_chk &= tmp_bit_chk;
1437 }
1438
Marek Vasutc4815f72015-07-12 19:03:33 +02001439 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001440 writel(rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001441
Marek Vasut3853d652015-07-19 07:44:21 +02001442 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1443
Dinh Nguyen3da42852015-06-02 22:52:49 -05001444 if (all_correct) {
Marek Vasut3853d652015-07-19 07:44:21 +02001445 ret = (*bit_chk == param->read_correct_mask);
1446 debug_cond(DLEVEL == 2,
1447 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1448 __func__, __LINE__, group, all_groups, *bit_chk,
1449 param->read_correct_mask, ret);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001450 } else {
Marek Vasut3853d652015-07-19 07:44:21 +02001451 ret = (*bit_chk != 0x00);
1452 debug_cond(DLEVEL == 2,
1453 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1454 __func__, __LINE__, group, all_groups, *bit_chk,
1455 0, ret);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001456 }
Marek Vasut3853d652015-07-19 07:44:21 +02001457
1458 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001459}
1460
Marek Vasut96df6032015-07-19 07:35:36 +02001461/**
1462 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1463 * @grp: Read/Write group
1464 * @num_tries: Number of retries of the test
1465 * @all_correct: All bits must be correct in the mask
1466 * @all_groups: Test all R/W groups
1467 *
1468 * Perform a READ test across all memory ranks.
1469 */
1470static int
1471rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
1472 const u32 all_correct,
1473 const u32 all_groups)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001474{
Marek Vasut96df6032015-07-19 07:35:36 +02001475 u32 bit_chk;
1476 return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
1477 &bit_chk, all_groups, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001478}
1479
Marek Vasut60bb8a82015-07-19 06:25:27 +02001480/**
1481 * rw_mgr_incr_vfifo() - Increase VFIFO value
1482 * @grp: Read/Write group
Marek Vasut60bb8a82015-07-19 06:25:27 +02001483 *
1484 * Increase VFIFO value.
1485 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001486static void rw_mgr_incr_vfifo(const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001487{
Marek Vasut1273dd92015-07-12 21:05:08 +02001488 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001489}
1490
Marek Vasut60bb8a82015-07-19 06:25:27 +02001491/**
1492 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1493 * @grp: Read/Write group
Marek Vasut60bb8a82015-07-19 06:25:27 +02001494 *
1495 * Decrease VFIFO value.
1496 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001497static void rw_mgr_decr_vfifo(const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001498{
Marek Vasut60bb8a82015-07-19 06:25:27 +02001499 u32 i;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001500
Marek Vasut96fd4362015-08-02 19:26:55 +02001501 for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++)
Marek Vasut8c887b62015-07-19 06:37:51 +02001502 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001503}
1504
Marek Vasutd145ca92015-07-19 06:45:43 +02001505/**
1506 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1507 * @grp: Read/Write group
1508 *
1509 * Push VFIFO until a failing read happens.
1510 */
1511static int find_vfifo_failing_read(const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001512{
Marek Vasut96df6032015-07-19 07:35:36 +02001513 u32 v, ret, fail_cnt = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001514
Marek Vasut96fd4362015-08-02 19:26:55 +02001515 for (v = 0; v < misccfg->read_valid_fifo_size; v++) {
Marek Vasutd145ca92015-07-19 06:45:43 +02001516 debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05001517 __func__, __LINE__, v);
Marek Vasutd145ca92015-07-19 06:45:43 +02001518 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
Marek Vasut96df6032015-07-19 07:35:36 +02001519 PASS_ONE_BIT, 0);
Marek Vasutd145ca92015-07-19 06:45:43 +02001520 if (!ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001521 fail_cnt++;
1522
1523 if (fail_cnt == 2)
Marek Vasutd145ca92015-07-19 06:45:43 +02001524 return v;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001525 }
1526
Marek Vasutd145ca92015-07-19 06:45:43 +02001527 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001528 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001529 }
1530
Marek Vasutd145ca92015-07-19 06:45:43 +02001531 /* No failing read found! Something must have gone wrong. */
1532 debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1533 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001534}
1535
Marek Vasut192d6f92015-07-19 05:26:49 +02001536/**
Marek Vasut52e8f212015-07-19 07:27:06 +02001537 * sdr_find_phase_delay() - Find DQS enable phase or delay
1538 * @working: If 1, look for working phase/delay, if 0, look for non-working
1539 * @delay: If 1, look for delay, if 0, look for phase
1540 * @grp: Read/Write group
1541 * @work: Working window position
1542 * @work_inc: Working window increment
1543 * @pd: DQS Phase/Delay Iterator
1544 *
1545 * Find working or non-working DQS enable phase setting.
1546 */
1547static int sdr_find_phase_delay(int working, int delay, const u32 grp,
1548 u32 *work, const u32 work_inc, u32 *pd)
1549{
Marek Vasut139823e2015-08-02 19:47:01 +02001550 const u32 max = delay ? iocfg->dqs_en_delay_max :
1551 iocfg->dqs_en_phase_max;
Marek Vasut96df6032015-07-19 07:35:36 +02001552 u32 ret;
Marek Vasut52e8f212015-07-19 07:27:06 +02001553
1554 for (; *pd <= max; (*pd)++) {
1555 if (delay)
1556 scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
1557 else
1558 scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
1559
1560 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
Marek Vasut96df6032015-07-19 07:35:36 +02001561 PASS_ONE_BIT, 0);
Marek Vasut52e8f212015-07-19 07:27:06 +02001562 if (!working)
1563 ret = !ret;
1564
1565 if (ret)
1566 return 0;
1567
1568 if (work)
1569 *work += work_inc;
1570 }
1571
1572 return -EINVAL;
1573}
1574/**
Marek Vasut192d6f92015-07-19 05:26:49 +02001575 * sdr_find_phase() - Find DQS enable phase
1576 * @working: If 1, look for working phase, if 0, look for non-working phase
1577 * @grp: Read/Write group
Marek Vasut192d6f92015-07-19 05:26:49 +02001578 * @work: Working window position
1579 * @i: Iterator
1580 * @p: DQS Phase Iterator
Marek Vasut192d6f92015-07-19 05:26:49 +02001581 *
1582 * Find working or non-working DQS enable phase setting.
1583 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001584static int sdr_find_phase(int working, const u32 grp, u32 *work,
Marek Vasut86a39dc2015-07-19 05:35:40 +02001585 u32 *i, u32 *p)
Marek Vasut192d6f92015-07-19 05:26:49 +02001586{
Marek Vasut96fd4362015-08-02 19:26:55 +02001587 const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1);
Marek Vasut52e8f212015-07-19 07:27:06 +02001588 int ret;
Marek Vasut192d6f92015-07-19 05:26:49 +02001589
1590 for (; *i < end; (*i)++) {
1591 if (working)
1592 *p = 0;
1593
Marek Vasut52e8f212015-07-19 07:27:06 +02001594 ret = sdr_find_phase_delay(working, 0, grp, work,
Marek Vasut160695d2015-08-02 19:10:58 +02001595 iocfg->delay_per_opa_tap, p);
Marek Vasut52e8f212015-07-19 07:27:06 +02001596 if (!ret)
1597 return 0;
Marek Vasut192d6f92015-07-19 05:26:49 +02001598
Marek Vasut160695d2015-08-02 19:10:58 +02001599 if (*p > iocfg->dqs_en_phase_max) {
Marek Vasut192d6f92015-07-19 05:26:49 +02001600 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001601 rw_mgr_incr_vfifo(grp);
Marek Vasut192d6f92015-07-19 05:26:49 +02001602 if (!working)
1603 *p = 0;
1604 }
1605 }
1606
1607 return -EINVAL;
1608}
1609
Marek Vasut4c5e5842015-07-19 06:04:00 +02001610/**
1611 * sdr_working_phase() - Find working DQS enable phase
1612 * @grp: Read/Write group
1613 * @work_bgn: Working window start position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001614 * @d: dtaps output value
1615 * @p: DQS Phase Iterator
1616 * @i: Iterator
1617 *
1618 * Find working DQS enable phase setting.
1619 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001620static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
Marek Vasut4c5e5842015-07-19 06:04:00 +02001621 u32 *p, u32 *i)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001622{
Marek Vasut160695d2015-08-02 19:10:58 +02001623 const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap /
1624 iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasut192d6f92015-07-19 05:26:49 +02001625 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001626
Marek Vasut192d6f92015-07-19 05:26:49 +02001627 *work_bgn = 0;
1628
1629 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1630 *i = 0;
Marek Vasut521fe392015-07-19 04:34:12 +02001631 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
Marek Vasut8c887b62015-07-19 06:37:51 +02001632 ret = sdr_find_phase(1, grp, work_bgn, i, p);
Marek Vasut192d6f92015-07-19 05:26:49 +02001633 if (!ret)
1634 return 0;
Marek Vasut160695d2015-08-02 19:10:58 +02001635 *work_bgn += iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001636 }
1637
Marek Vasut38ed6922015-07-19 05:01:12 +02001638 /* Cannot find working solution */
Marek Vasut192d6f92015-07-19 05:26:49 +02001639 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1640 __func__, __LINE__);
1641 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001642}
1643
Marek Vasut4c5e5842015-07-19 06:04:00 +02001644/**
1645 * sdr_backup_phase() - Find DQS enable backup phase
1646 * @grp: Read/Write group
1647 * @work_bgn: Working window start position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001648 * @p: DQS Phase Iterator
1649 *
1650 * Find DQS enable backup phase setting.
1651 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001652static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001653{
Marek Vasut96df6032015-07-19 07:35:36 +02001654 u32 tmp_delay, d;
Marek Vasut4c5e5842015-07-19 06:04:00 +02001655 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001656
1657 /* Special case code for backing up a phase */
1658 if (*p == 0) {
Marek Vasut160695d2015-08-02 19:10:58 +02001659 *p = iocfg->dqs_en_phase_max;
Marek Vasut8c887b62015-07-19 06:37:51 +02001660 rw_mgr_decr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001661 } else {
1662 (*p)--;
1663 }
Marek Vasut160695d2015-08-02 19:10:58 +02001664 tmp_delay = *work_bgn - iocfg->delay_per_opa_tap;
Marek Vasut521fe392015-07-19 04:34:12 +02001665 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001666
Marek Vasut139823e2015-08-02 19:47:01 +02001667 for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
1668 d++) {
Marek Vasut49891df62015-07-19 05:48:30 +02001669 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001670
Marek Vasut4c5e5842015-07-19 06:04:00 +02001671 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
Marek Vasut96df6032015-07-19 07:35:36 +02001672 PASS_ONE_BIT, 0);
Marek Vasut4c5e5842015-07-19 06:04:00 +02001673 if (ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001674 *work_bgn = tmp_delay;
1675 break;
1676 }
Marek Vasut49891df62015-07-19 05:48:30 +02001677
Marek Vasut160695d2015-08-02 19:10:58 +02001678 tmp_delay += iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001679 }
1680
Marek Vasut4c5e5842015-07-19 06:04:00 +02001681 /* Restore VFIFO to old state before we decremented it (if needed). */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001682 (*p)++;
Marek Vasut160695d2015-08-02 19:10:58 +02001683 if (*p > iocfg->dqs_en_phase_max) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001684 *p = 0;
Marek Vasut8c887b62015-07-19 06:37:51 +02001685 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001686 }
1687
Marek Vasut521fe392015-07-19 04:34:12 +02001688 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001689}
1690
Marek Vasut4c5e5842015-07-19 06:04:00 +02001691/**
1692 * sdr_nonworking_phase() - Find non-working DQS enable phase
1693 * @grp: Read/Write group
1694 * @work_end: Working window end position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001695 * @p: DQS Phase Iterator
1696 * @i: Iterator
1697 *
1698 * Find non-working DQS enable phase setting.
1699 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001700static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001701{
Marek Vasut192d6f92015-07-19 05:26:49 +02001702 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001703
1704 (*p)++;
Marek Vasut160695d2015-08-02 19:10:58 +02001705 *work_end += iocfg->delay_per_opa_tap;
1706 if (*p > iocfg->dqs_en_phase_max) {
Marek Vasut192d6f92015-07-19 05:26:49 +02001707 /* Fiddle with FIFO. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001708 *p = 0;
Marek Vasut8c887b62015-07-19 06:37:51 +02001709 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001710 }
1711
Marek Vasut8c887b62015-07-19 06:37:51 +02001712 ret = sdr_find_phase(0, grp, work_end, i, p);
Marek Vasut192d6f92015-07-19 05:26:49 +02001713 if (ret) {
1714 /* Cannot see edge of failing read. */
1715 debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1716 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001717 }
1718
Marek Vasut192d6f92015-07-19 05:26:49 +02001719 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001720}
1721
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001722/**
1723 * sdr_find_window_center() - Find center of the working DQS window.
1724 * @grp: Read/Write group
1725 * @work_bgn: First working settings
1726 * @work_end: Last working settings
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001727 *
1728 * Find center of the working DQS enable window.
1729 */
1730static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
Marek Vasut8c887b62015-07-19 06:37:51 +02001731 const u32 work_end)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001732{
Marek Vasut96df6032015-07-19 07:35:36 +02001733 u32 work_mid;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001734 int tmp_delay = 0;
Marek Vasut28fd2422015-07-19 02:56:59 +02001735 int i, p, d;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001736
Marek Vasut28fd2422015-07-19 02:56:59 +02001737 work_mid = (work_bgn + work_end) / 2;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001738
1739 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
Marek Vasut28fd2422015-07-19 02:56:59 +02001740 work_bgn, work_end, work_mid);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001741 /* Get the middle delay to be less than a VFIFO delay */
Marek Vasut160695d2015-08-02 19:10:58 +02001742 tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap;
Marek Vasut28fd2422015-07-19 02:56:59 +02001743
Dinh Nguyen3da42852015-06-02 22:52:49 -05001744 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001745 work_mid %= tmp_delay;
Marek Vasut28fd2422015-07-19 02:56:59 +02001746 debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001747
Marek Vasut160695d2015-08-02 19:10:58 +02001748 tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap);
1749 if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap)
1750 tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap;
1751 p = tmp_delay / iocfg->delay_per_opa_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001752
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001753 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1754
Marek Vasut139823e2015-08-02 19:47:01 +02001755 d = DIV_ROUND_UP(work_mid - tmp_delay,
1756 iocfg->delay_per_dqs_en_dchain_tap);
Marek Vasut160695d2015-08-02 19:10:58 +02001757 if (d > iocfg->dqs_en_delay_max)
1758 d = iocfg->dqs_en_delay_max;
1759 tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001760
Marek Vasut28fd2422015-07-19 02:56:59 +02001761 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1762
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001763 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
Marek Vasut28fd2422015-07-19 02:56:59 +02001764 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001765
1766 /*
1767 * push vfifo until we can successfully calibrate. We can do this
1768 * because the largest possible margin in 1 VFIFO cycle.
1769 */
Marek Vasut96fd4362015-08-02 19:26:55 +02001770 for (i = 0; i < misccfg->read_valid_fifo_size; i++) {
Marek Vasut8c887b62015-07-19 06:37:51 +02001771 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
Marek Vasut28fd2422015-07-19 02:56:59 +02001772 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
Dinh Nguyen3da42852015-06-02 22:52:49 -05001773 PASS_ONE_BIT,
Marek Vasut96df6032015-07-19 07:35:36 +02001774 0)) {
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001775 debug_cond(DLEVEL == 2,
Marek Vasut8c887b62015-07-19 06:37:51 +02001776 "%s:%d center: found: ptap=%u dtap=%u\n",
1777 __func__, __LINE__, p, d);
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001778 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001779 }
1780
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001781 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001782 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001783 }
1784
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001785 debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1786 __func__, __LINE__);
1787 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001788}
1789
Marek Vasut33756892015-07-20 09:11:09 +02001790/**
1791 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
1792 * @grp: Read/Write Group
1793 *
1794 * Find a good DQS enable to use.
1795 */
Marek Vasut914546e2015-07-20 09:20:42 +02001796static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001797{
Marek Vasut57355402015-07-20 09:20:20 +02001798 u32 d, p, i;
1799 u32 dtaps_per_ptap;
1800 u32 work_bgn, work_end;
Marek Vasut35e47b72015-08-10 23:01:43 +02001801 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
Marek Vasut57355402015-07-20 09:20:20 +02001802 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001803
1804 debug("%s:%d %u\n", __func__, __LINE__, grp);
1805
1806 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1807
1808 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1809 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1810
Marek Vasut2f3589c2015-07-19 02:42:21 +02001811 /* Step 0: Determine number of delay taps for each phase tap. */
Marek Vasut139823e2015-08-02 19:47:01 +02001812 dtaps_per_ptap = iocfg->delay_per_opa_tap /
1813 iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001814
Marek Vasut2f3589c2015-07-19 02:42:21 +02001815 /* Step 1: First push vfifo until we get a failing read. */
Marek Vasutd145ca92015-07-19 06:45:43 +02001816 find_vfifo_failing_read(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001817
Marek Vasut2f3589c2015-07-19 02:42:21 +02001818 /* Step 2: Find first working phase, increment in ptaps. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001819 work_bgn = 0;
Marek Vasut914546e2015-07-20 09:20:42 +02001820 ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
1821 if (ret)
1822 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001823
1824 work_end = work_bgn;
1825
1826 /*
Marek Vasut2f3589c2015-07-19 02:42:21 +02001827 * If d is 0 then the working window covers a phase tap and we can
1828 * follow the old procedure. Otherwise, we've found the beginning
Dinh Nguyen3da42852015-06-02 22:52:49 -05001829 * and we need to increment the dtaps until we find the end.
1830 */
1831 if (d == 0) {
Marek Vasut2f3589c2015-07-19 02:42:21 +02001832 /*
1833 * Step 3a: If we have room, back off by one and
1834 * increment in dtaps.
1835 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001836 sdr_backup_phase(grp, &work_bgn, &p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001837
Marek Vasut2f3589c2015-07-19 02:42:21 +02001838 /*
1839 * Step 4a: go forward from working phase to non working
1840 * phase, increment in ptaps.
1841 */
Marek Vasut914546e2015-07-20 09:20:42 +02001842 ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
1843 if (ret)
1844 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001845
Marek Vasut2f3589c2015-07-19 02:42:21 +02001846 /* Step 5a: Back off one from last, increment in dtaps. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001847
1848 /* Special case code for backing up a phase */
1849 if (p == 0) {
Marek Vasut160695d2015-08-02 19:10:58 +02001850 p = iocfg->dqs_en_phase_max;
Marek Vasut8c887b62015-07-19 06:37:51 +02001851 rw_mgr_decr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001852 } else {
1853 p = p - 1;
1854 }
1855
Marek Vasut160695d2015-08-02 19:10:58 +02001856 work_end -= iocfg->delay_per_opa_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001857 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1858
Dinh Nguyen3da42852015-06-02 22:52:49 -05001859 d = 0;
1860
Marek Vasut2f3589c2015-07-19 02:42:21 +02001861 debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1862 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001863 }
1864
Marek Vasut2f3589c2015-07-19 02:42:21 +02001865 /* The dtap increment to find the failing edge is done here. */
Marek Vasut52e8f212015-07-19 07:27:06 +02001866 sdr_find_phase_delay(0, 1, grp, &work_end,
Marek Vasut160695d2015-08-02 19:10:58 +02001867 iocfg->delay_per_dqs_en_dchain_tap, &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001868
1869 /* Go back to working dtap */
1870 if (d != 0)
Marek Vasut160695d2015-08-02 19:10:58 +02001871 work_end -= iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001872
Marek Vasut2f3589c2015-07-19 02:42:21 +02001873 debug_cond(DLEVEL == 2,
1874 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1875 __func__, __LINE__, p, d - 1, work_end);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001876
1877 if (work_end < work_bgn) {
1878 /* nil range */
Marek Vasut2f3589c2015-07-19 02:42:21 +02001879 debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1880 __func__, __LINE__);
Marek Vasut914546e2015-07-20 09:20:42 +02001881 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001882 }
1883
Marek Vasut2f3589c2015-07-19 02:42:21 +02001884 debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05001885 __func__, __LINE__, work_bgn, work_end);
1886
Dinh Nguyen3da42852015-06-02 22:52:49 -05001887 /*
Marek Vasut2f3589c2015-07-19 02:42:21 +02001888 * We need to calculate the number of dtaps that equal a ptap.
1889 * To do that we'll back up a ptap and re-find the edge of the
1890 * window using dtaps
Dinh Nguyen3da42852015-06-02 22:52:49 -05001891 */
Marek Vasut2f3589c2015-07-19 02:42:21 +02001892 debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1893 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001894
1895 /* Special case code for backing up a phase */
1896 if (p == 0) {
Marek Vasut160695d2015-08-02 19:10:58 +02001897 p = iocfg->dqs_en_phase_max;
Marek Vasut8c887b62015-07-19 06:37:51 +02001898 rw_mgr_decr_vfifo(grp);
Marek Vasut2f3589c2015-07-19 02:42:21 +02001899 debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1900 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001901 } else {
1902 p = p - 1;
Marek Vasut2f3589c2015-07-19 02:42:21 +02001903 debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1904 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001905 }
1906
1907 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1908
1909 /*
1910 * Increase dtap until we first see a passing read (in case the
Marek Vasut2f3589c2015-07-19 02:42:21 +02001911 * window is smaller than a ptap), and then a failing read to
1912 * mark the edge of the window again.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001913 */
1914
Marek Vasut2f3589c2015-07-19 02:42:21 +02001915 /* Find a passing read. */
1916 debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05001917 __func__, __LINE__);
Marek Vasut52e8f212015-07-19 07:27:06 +02001918
Dinh Nguyen3da42852015-06-02 22:52:49 -05001919 initial_failing_dtap = d;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001920
Marek Vasut52e8f212015-07-19 07:27:06 +02001921 found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001922 if (found_passing_read) {
Marek Vasut2f3589c2015-07-19 02:42:21 +02001923 /* Find a failing read. */
1924 debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1925 __func__, __LINE__);
Marek Vasut52e8f212015-07-19 07:27:06 +02001926 d++;
1927 found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
1928 &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001929 } else {
Marek Vasut2f3589c2015-07-19 02:42:21 +02001930 debug_cond(DLEVEL == 1,
1931 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1932 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001933 }
1934
1935 /*
1936 * The dynamically calculated dtaps_per_ptap is only valid if we
1937 * found a passing/failing read. If we didn't, it means d hit the max
Marek Vasut160695d2015-08-02 19:10:58 +02001938 * (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
Dinh Nguyen3da42852015-06-02 22:52:49 -05001939 * statically calculated value.
1940 */
1941 if (found_passing_read && found_failing_read)
1942 dtaps_per_ptap = d - initial_failing_dtap;
1943
Marek Vasut1273dd92015-07-12 21:05:08 +02001944 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
Marek Vasut2f3589c2015-07-19 02:42:21 +02001945 debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1946 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001947
Marek Vasut2f3589c2015-07-19 02:42:21 +02001948 /* Step 6: Find the centre of the window. */
Marek Vasut914546e2015-07-20 09:20:42 +02001949 ret = sdr_find_window_center(grp, work_bgn, work_end);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001950
Marek Vasut914546e2015-07-20 09:20:42 +02001951 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001952}
1953
Marek Vasutc4907892015-07-13 02:11:02 +02001954/**
Marek Vasut901dc362015-07-13 02:48:34 +02001955 * search_stop_check() - Check if the detected edge is valid
1956 * @write: Perform read (Stage 2) or write (Stage 3) calibration
1957 * @d: DQS delay
1958 * @rank_bgn: Rank number
1959 * @write_group: Write Group
1960 * @read_group: Read Group
1961 * @bit_chk: Resulting bit mask after the test
1962 * @sticky_bit_chk: Resulting sticky bit mask after the test
1963 * @use_read_test: Perform read test
1964 *
1965 * Test if the found edge is valid.
1966 */
1967static u32 search_stop_check(const int write, const int d, const int rank_bgn,
1968 const u32 write_group, const u32 read_group,
1969 u32 *bit_chk, u32 *sticky_bit_chk,
1970 const u32 use_read_test)
1971{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001972 const u32 ratio = rwcfg->mem_if_read_dqs_width /
1973 rwcfg->mem_if_write_dqs_width;
Marek Vasut901dc362015-07-13 02:48:34 +02001974 const u32 correct_mask = write ? param->write_correct_mask :
1975 param->read_correct_mask;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02001976 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
1977 rwcfg->mem_dq_per_read_dqs;
Marek Vasut901dc362015-07-13 02:48:34 +02001978 u32 ret;
1979 /*
1980 * Stop searching when the read test doesn't pass AND when
1981 * we've seen a passing read on every bit.
1982 */
1983 if (write) { /* WRITE-ONLY */
1984 ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1985 0, PASS_ONE_BIT,
1986 bit_chk, 0);
1987 } else if (use_read_test) { /* READ-ONLY */
1988 ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
1989 NUM_READ_PB_TESTS,
1990 PASS_ONE_BIT, bit_chk,
1991 0, 0);
1992 } else { /* READ-ONLY */
1993 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
1994 PASS_ONE_BIT, bit_chk, 0);
1995 *bit_chk = *bit_chk >> (per_dqs *
1996 (read_group - (write_group * ratio)));
1997 ret = (*bit_chk == 0);
1998 }
1999 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2000 ret = ret && (*sticky_bit_chk == correct_mask);
2001 debug_cond(DLEVEL == 2,
2002 "%s:%d center(left): dtap=%u => %u == %u && %u",
2003 __func__, __LINE__, d,
2004 *sticky_bit_chk, correct_mask, ret);
2005 return ret;
2006}
2007
2008/**
Marek Vasut71120772015-07-13 02:38:15 +02002009 * search_left_edge() - Find left edge of DQ/DQS working phase
2010 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2011 * @rank_bgn: Rank number
2012 * @write_group: Write Group
2013 * @read_group: Read Group
2014 * @test_bgn: Rank number to begin the test
Marek Vasut71120772015-07-13 02:38:15 +02002015 * @sticky_bit_chk: Resulting sticky bit mask after the test
2016 * @left_edge: Left edge of the DQ/DQS phase
2017 * @right_edge: Right edge of the DQ/DQS phase
2018 * @use_read_test: Perform read test
2019 *
2020 * Find left edge of DQ/DQS working phase.
2021 */
2022static void search_left_edge(const int write, const int rank_bgn,
2023 const u32 write_group, const u32 read_group, const u32 test_bgn,
Marek Vasut0c4be192015-07-18 20:34:00 +02002024 u32 *sticky_bit_chk,
Marek Vasut71120772015-07-13 02:38:15 +02002025 int *left_edge, int *right_edge, const u32 use_read_test)
2026{
Marek Vasut139823e2015-08-02 19:47:01 +02002027 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2028 iocfg->io_in_delay_max;
2029 const u32 dqs_max = write ? iocfg->io_out1_delay_max :
2030 iocfg->dqs_in_delay_max;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002031 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2032 rwcfg->mem_dq_per_read_dqs;
Marek Vasut0c4be192015-07-18 20:34:00 +02002033 u32 stop, bit_chk;
Marek Vasut71120772015-07-13 02:38:15 +02002034 int i, d;
2035
2036 for (d = 0; d <= dqs_max; d++) {
2037 if (write)
2038 scc_mgr_apply_group_dq_out1_delay(d);
2039 else
2040 scc_mgr_apply_group_dq_in_delay(test_bgn, d);
2041
2042 writel(0, &sdr_scc_mgr->update);
2043
Marek Vasut901dc362015-07-13 02:48:34 +02002044 stop = search_stop_check(write, d, rank_bgn, write_group,
Marek Vasut0c4be192015-07-18 20:34:00 +02002045 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut901dc362015-07-13 02:48:34 +02002046 use_read_test);
Marek Vasut71120772015-07-13 02:38:15 +02002047 if (stop == 1)
2048 break;
2049
2050 /* stop != 1 */
2051 for (i = 0; i < per_dqs; i++) {
Marek Vasut0c4be192015-07-18 20:34:00 +02002052 if (bit_chk & 1) {
Marek Vasut71120772015-07-13 02:38:15 +02002053 /*
2054 * Remember a passing test as
2055 * the left_edge.
2056 */
2057 left_edge[i] = d;
2058 } else {
2059 /*
2060 * If a left edge has not been seen
2061 * yet, then a future passing test
2062 * will mark this edge as the right
2063 * edge.
2064 */
2065 if (left_edge[i] == delay_max + 1)
2066 right_edge[i] = -(d + 1);
2067 }
Marek Vasut0c4be192015-07-18 20:34:00 +02002068 bit_chk >>= 1;
Marek Vasut71120772015-07-13 02:38:15 +02002069 }
2070 }
2071
2072 /* Reset DQ delay chains to 0 */
2073 if (write)
2074 scc_mgr_apply_group_dq_out1_delay(0);
2075 else
2076 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2077
2078 *sticky_bit_chk = 0;
2079 for (i = per_dqs - 1; i >= 0; i--) {
2080 debug_cond(DLEVEL == 2,
2081 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2082 __func__, __LINE__, i, left_edge[i],
2083 i, right_edge[i]);
2084
2085 /*
2086 * Check for cases where we haven't found the left edge,
2087 * which makes our assignment of the the right edge invalid.
2088 * Reset it to the illegal value.
2089 */
2090 if ((left_edge[i] == delay_max + 1) &&
2091 (right_edge[i] != delay_max + 1)) {
2092 right_edge[i] = delay_max + 1;
2093 debug_cond(DLEVEL == 2,
2094 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2095 __func__, __LINE__, i, right_edge[i]);
2096 }
2097
2098 /*
2099 * Reset sticky bit
2100 * READ: except for bits where we have seen both
2101 * the left and right edge.
2102 * WRITE: except for bits where we have seen the
2103 * left edge.
2104 */
2105 *sticky_bit_chk <<= 1;
2106 if (write) {
2107 if (left_edge[i] != delay_max + 1)
2108 *sticky_bit_chk |= 1;
2109 } else {
2110 if ((left_edge[i] != delay_max + 1) &&
2111 (right_edge[i] != delay_max + 1))
2112 *sticky_bit_chk |= 1;
2113 }
2114 }
Marek Vasut71120772015-07-13 02:38:15 +02002115}
2116
2117/**
Marek Vasutc4907892015-07-13 02:11:02 +02002118 * search_right_edge() - Find right edge of DQ/DQS working phase
2119 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2120 * @rank_bgn: Rank number
2121 * @write_group: Write Group
2122 * @read_group: Read Group
2123 * @start_dqs: DQS start phase
2124 * @start_dqs_en: DQS enable start phase
Marek Vasutc4907892015-07-13 02:11:02 +02002125 * @sticky_bit_chk: Resulting sticky bit mask after the test
2126 * @left_edge: Left edge of the DQ/DQS phase
2127 * @right_edge: Right edge of the DQ/DQS phase
2128 * @use_read_test: Perform read test
2129 *
2130 * Find right edge of DQ/DQS working phase.
2131 */
2132static int search_right_edge(const int write, const int rank_bgn,
2133 const u32 write_group, const u32 read_group,
2134 const int start_dqs, const int start_dqs_en,
Marek Vasut0c4be192015-07-18 20:34:00 +02002135 u32 *sticky_bit_chk,
Marek Vasutc4907892015-07-13 02:11:02 +02002136 int *left_edge, int *right_edge, const u32 use_read_test)
2137{
Marek Vasut139823e2015-08-02 19:47:01 +02002138 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2139 iocfg->io_in_delay_max;
2140 const u32 dqs_max = write ? iocfg->io_out1_delay_max :
2141 iocfg->dqs_in_delay_max;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002142 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2143 rwcfg->mem_dq_per_read_dqs;
Marek Vasut0c4be192015-07-18 20:34:00 +02002144 u32 stop, bit_chk;
Marek Vasutc4907892015-07-13 02:11:02 +02002145 int i, d;
2146
2147 for (d = 0; d <= dqs_max - start_dqs; d++) {
2148 if (write) { /* WRITE-ONLY */
2149 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2150 d + start_dqs);
2151 } else { /* READ-ONLY */
2152 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
Marek Vasut160695d2015-08-02 19:10:58 +02002153 if (iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut5ded7322015-08-02 19:42:26 +02002154 u32 delay = d + start_dqs_en;
Marek Vasut160695d2015-08-02 19:10:58 +02002155 if (delay > iocfg->dqs_en_delay_max)
2156 delay = iocfg->dqs_en_delay_max;
Marek Vasutc4907892015-07-13 02:11:02 +02002157 scc_mgr_set_dqs_en_delay(read_group, delay);
2158 }
2159 scc_mgr_load_dqs(read_group);
2160 }
2161
2162 writel(0, &sdr_scc_mgr->update);
2163
Marek Vasut901dc362015-07-13 02:48:34 +02002164 stop = search_stop_check(write, d, rank_bgn, write_group,
Marek Vasut0c4be192015-07-18 20:34:00 +02002165 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut901dc362015-07-13 02:48:34 +02002166 use_read_test);
Marek Vasutc4907892015-07-13 02:11:02 +02002167 if (stop == 1) {
2168 if (write && (d == 0)) { /* WRITE-ONLY */
Marek Vasut139823e2015-08-02 19:47:01 +02002169 for (i = 0; i < rwcfg->mem_dq_per_write_dqs;
2170 i++) {
Marek Vasutc4907892015-07-13 02:11:02 +02002171 /*
2172 * d = 0 failed, but it passed when
2173 * testing the left edge, so it must be
2174 * marginal, set it to -1
2175 */
2176 if (right_edge[i] == delay_max + 1 &&
2177 left_edge[i] != delay_max + 1)
2178 right_edge[i] = -1;
2179 }
2180 }
2181 break;
2182 }
2183
2184 /* stop != 1 */
2185 for (i = 0; i < per_dqs; i++) {
Marek Vasut0c4be192015-07-18 20:34:00 +02002186 if (bit_chk & 1) {
Marek Vasutc4907892015-07-13 02:11:02 +02002187 /*
2188 * Remember a passing test as
2189 * the right_edge.
2190 */
2191 right_edge[i] = d;
2192 } else {
2193 if (d != 0) {
2194 /*
2195 * If a right edge has not
2196 * been seen yet, then a future
2197 * passing test will mark this
2198 * edge as the left edge.
2199 */
2200 if (right_edge[i] == delay_max + 1)
2201 left_edge[i] = -(d + 1);
2202 } else {
2203 /*
2204 * d = 0 failed, but it passed
2205 * when testing the left edge,
2206 * so it must be marginal, set
2207 * it to -1
2208 */
2209 if (right_edge[i] == delay_max + 1 &&
2210 left_edge[i] != delay_max + 1)
2211 right_edge[i] = -1;
2212 /*
2213 * If a right edge has not been
2214 * seen yet, then a future
2215 * passing test will mark this
2216 * edge as the left edge.
2217 */
2218 else if (right_edge[i] == delay_max + 1)
2219 left_edge[i] = -(d + 1);
2220 }
2221 }
2222
2223 debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
2224 __func__, __LINE__, d);
2225 debug_cond(DLEVEL == 2,
2226 "bit_chk_test=%i left_edge[%u]: %d ",
Marek Vasut0c4be192015-07-18 20:34:00 +02002227 bit_chk & 1, i, left_edge[i]);
Marek Vasutc4907892015-07-13 02:11:02 +02002228 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2229 right_edge[i]);
Marek Vasut0c4be192015-07-18 20:34:00 +02002230 bit_chk >>= 1;
Marek Vasutc4907892015-07-13 02:11:02 +02002231 }
2232 }
2233
2234 /* Check that all bits have a window */
2235 for (i = 0; i < per_dqs; i++) {
2236 debug_cond(DLEVEL == 2,
2237 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2238 __func__, __LINE__, i, left_edge[i],
2239 i, right_edge[i]);
2240 if ((left_edge[i] == dqs_max + 1) ||
2241 (right_edge[i] == dqs_max + 1))
2242 return i + 1; /* FIXME: If we fail, retval > 0 */
2243 }
2244
2245 return 0;
2246}
2247
Marek Vasutafb3eb82015-07-18 19:18:06 +02002248/**
2249 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2250 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2251 * @left_edge: Left edge of the DQ/DQS phase
2252 * @right_edge: Right edge of the DQ/DQS phase
2253 * @mid_min: Best DQ/DQS phase middle setting
2254 *
2255 * Find index and value of the middle of the DQ/DQS working phase.
2256 */
2257static int get_window_mid_index(const int write, int *left_edge,
2258 int *right_edge, int *mid_min)
2259{
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002260 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2261 rwcfg->mem_dq_per_read_dqs;
Marek Vasutafb3eb82015-07-18 19:18:06 +02002262 int i, mid, min_index;
2263
2264 /* Find middle of window for each DQ bit */
2265 *mid_min = left_edge[0] - right_edge[0];
2266 min_index = 0;
2267 for (i = 1; i < per_dqs; i++) {
2268 mid = left_edge[i] - right_edge[i];
2269 if (mid < *mid_min) {
2270 *mid_min = mid;
2271 min_index = i;
2272 }
2273 }
2274
2275 /*
2276 * -mid_min/2 represents the amount that we need to move DQS.
2277 * If mid_min is odd and positive we'll need to add one to make
2278 * sure the rounding in further calculations is correct (always
2279 * bias to the right), so just add 1 for all positive values.
2280 */
2281 if (*mid_min > 0)
2282 (*mid_min)++;
2283 *mid_min = *mid_min / 2;
2284
2285 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
2286 __func__, __LINE__, *mid_min, min_index);
2287 return min_index;
2288}
2289
Marek Vasutffb8b662015-07-18 19:46:26 +02002290/**
2291 * center_dq_windows() - Center the DQ/DQS windows
2292 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2293 * @left_edge: Left edge of the DQ/DQS phase
2294 * @right_edge: Right edge of the DQ/DQS phase
2295 * @mid_min: Adjusted DQ/DQS phase middle setting
2296 * @orig_mid_min: Original DQ/DQS phase middle setting
2297 * @min_index: DQ/DQS phase middle setting index
2298 * @test_bgn: Rank number to begin the test
2299 * @dq_margin: Amount of shift for the DQ
2300 * @dqs_margin: Amount of shift for the DQS
2301 *
2302 * Align the DQ/DQS windows in each group.
2303 */
2304static void center_dq_windows(const int write, int *left_edge, int *right_edge,
2305 const int mid_min, const int orig_mid_min,
2306 const int min_index, const int test_bgn,
2307 int *dq_margin, int *dqs_margin)
2308{
Marek Vasut139823e2015-08-02 19:47:01 +02002309 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2310 iocfg->io_in_delay_max;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002311 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2312 rwcfg->mem_dq_per_read_dqs;
Marek Vasutffb8b662015-07-18 19:46:26 +02002313 const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
2314 SCC_MGR_IO_IN_DELAY_OFFSET;
2315 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
2316
2317 u32 temp_dq_io_delay1, temp_dq_io_delay2;
2318 int shift_dq, i, p;
2319
2320 /* Initialize data for export structures */
2321 *dqs_margin = delay_max + 1;
2322 *dq_margin = delay_max + 1;
2323
2324 /* add delay to bring centre of all DQ windows to the same "level" */
2325 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2326 /* Use values before divide by 2 to reduce round off error */
2327 shift_dq = (left_edge[i] - right_edge[i] -
2328 (left_edge[min_index] - right_edge[min_index]))/2 +
2329 (orig_mid_min - mid_min);
2330
2331 debug_cond(DLEVEL == 2,
2332 "vfifo_center: before: shift_dq[%u]=%d\n",
2333 i, shift_dq);
2334
2335 temp_dq_io_delay1 = readl(addr + (p << 2));
2336 temp_dq_io_delay2 = readl(addr + (i << 2));
2337
2338 if (shift_dq + temp_dq_io_delay1 > delay_max)
2339 shift_dq = delay_max - temp_dq_io_delay2;
2340 else if (shift_dq + temp_dq_io_delay1 < 0)
2341 shift_dq = -temp_dq_io_delay1;
2342
2343 debug_cond(DLEVEL == 2,
2344 "vfifo_center: after: shift_dq[%u]=%d\n",
2345 i, shift_dq);
2346
2347 if (write)
Marek Vasut139823e2015-08-02 19:47:01 +02002348 scc_mgr_set_dq_out1_delay(i,
2349 temp_dq_io_delay1 + shift_dq);
Marek Vasutffb8b662015-07-18 19:46:26 +02002350 else
Marek Vasut139823e2015-08-02 19:47:01 +02002351 scc_mgr_set_dq_in_delay(p,
2352 temp_dq_io_delay1 + shift_dq);
Marek Vasutffb8b662015-07-18 19:46:26 +02002353
2354 scc_mgr_load_dq(p);
2355
2356 debug_cond(DLEVEL == 2,
2357 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2358 left_edge[i] - shift_dq + (-mid_min),
2359 right_edge[i] + shift_dq - (-mid_min));
2360
2361 /* To determine values for export structures */
2362 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2363 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2364
2365 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2366 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2367 }
Marek Vasutffb8b662015-07-18 19:46:26 +02002368}
2369
Marek Vasutac63b9a2015-07-21 04:27:32 +02002370/**
2371 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2372 * @rank_bgn: Rank number
2373 * @rw_group: Read/Write Group
2374 * @test_bgn: Rank at which the test begins
2375 * @use_read_test: Perform a read test
2376 * @update_fom: Update FOM
2377 *
2378 * Per-bit deskew DQ and centering.
2379 */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002380static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
2381 const u32 rw_group, const u32 test_bgn,
2382 const int use_read_test, const int update_fom)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002383{
Marek Vasut5d6db442015-07-18 19:57:12 +02002384 const u32 addr =
2385 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
Marek Vasut0113c3e2015-07-18 20:42:27 +02002386 (rw_group << 2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002387 /*
2388 * Store these as signed since there are comparisons with
2389 * signed numbers.
2390 */
Marek Vasut5ded7322015-08-02 19:42:26 +02002391 u32 sticky_bit_chk;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002392 int32_t left_edge[rwcfg->mem_dq_per_read_dqs];
2393 int32_t right_edge[rwcfg->mem_dq_per_read_dqs];
Dinh Nguyen3da42852015-06-02 22:52:49 -05002394 int32_t orig_mid_min, mid_min;
Marek Vasut160695d2015-08-02 19:10:58 +02002395 int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002396 int32_t dq_margin, dqs_margin;
Marek Vasut5d6db442015-07-18 19:57:12 +02002397 int i, min_index;
Marek Vasutc4907892015-07-13 02:11:02 +02002398 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002399
Marek Vasut0113c3e2015-07-18 20:42:27 +02002400 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002401
Marek Vasut5d6db442015-07-18 19:57:12 +02002402 start_dqs = readl(addr);
Marek Vasut160695d2015-08-02 19:10:58 +02002403 if (iocfg->shift_dqs_en_when_shift_dqs)
2404 start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002405
2406 /* set the left and right edge of each bit to an illegal value */
Marek Vasut160695d2015-08-02 19:10:58 +02002407 /* use (iocfg->io_in_delay_max + 1) as an illegal value */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002408 sticky_bit_chk = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002409 for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) {
Marek Vasut160695d2015-08-02 19:10:58 +02002410 left_edge[i] = iocfg->io_in_delay_max + 1;
2411 right_edge[i] = iocfg->io_in_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002412 }
2413
Dinh Nguyen3da42852015-06-02 22:52:49 -05002414 /* Search for the left edge of the window for each bit */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002415 search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
Marek Vasut0c4be192015-07-18 20:34:00 +02002416 &sticky_bit_chk,
Marek Vasut71120772015-07-13 02:38:15 +02002417 left_edge, right_edge, use_read_test);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002418
Marek Vasutf0712c32015-07-18 08:01:45 +02002419
Dinh Nguyen3da42852015-06-02 22:52:49 -05002420 /* Search for the right edge of the window for each bit */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002421 ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
Marek Vasutc4907892015-07-13 02:11:02 +02002422 start_dqs, start_dqs_en,
Marek Vasut0c4be192015-07-18 20:34:00 +02002423 &sticky_bit_chk,
Marek Vasutc4907892015-07-13 02:11:02 +02002424 left_edge, right_edge, use_read_test);
2425 if (ret) {
2426 /*
2427 * Restore delay chain settings before letting the loop
2428 * in rw_mgr_mem_calibrate_vfifo to retry different
2429 * dqs/ck relationships.
2430 */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002431 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
Marek Vasut160695d2015-08-02 19:10:58 +02002432 if (iocfg->shift_dqs_en_when_shift_dqs)
Marek Vasut0113c3e2015-07-18 20:42:27 +02002433 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002434
Marek Vasut0113c3e2015-07-18 20:42:27 +02002435 scc_mgr_load_dqs(rw_group);
Marek Vasut1273dd92015-07-12 21:05:08 +02002436 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002437
Marek Vasutc4907892015-07-13 02:11:02 +02002438 debug_cond(DLEVEL == 1,
2439 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2440 __func__, __LINE__, i, left_edge[i], right_edge[i]);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002441 if (use_read_test) {
Marek Vasut0113c3e2015-07-18 20:42:27 +02002442 set_failing_group_stage(rw_group *
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002443 rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutc4907892015-07-13 02:11:02 +02002444 CAL_STAGE_VFIFO,
2445 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002446 } else {
Marek Vasut0113c3e2015-07-18 20:42:27 +02002447 set_failing_group_stage(rw_group *
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002448 rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutc4907892015-07-13 02:11:02 +02002449 CAL_STAGE_VFIFO_AFTER_WRITES,
2450 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002451 }
Marek Vasut98668242015-07-18 20:44:28 +02002452 return -EIO;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002453 }
2454
Marek Vasutafb3eb82015-07-18 19:18:06 +02002455 min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002456
2457 /* Determine the amount we can change DQS (which is -mid_min) */
2458 orig_mid_min = mid_min;
2459 new_dqs = start_dqs - mid_min;
Marek Vasut160695d2015-08-02 19:10:58 +02002460 if (new_dqs > iocfg->dqs_in_delay_max)
2461 new_dqs = iocfg->dqs_in_delay_max;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002462 else if (new_dqs < 0)
2463 new_dqs = 0;
2464
2465 mid_min = start_dqs - new_dqs;
2466 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2467 mid_min, new_dqs);
2468
Marek Vasut160695d2015-08-02 19:10:58 +02002469 if (iocfg->shift_dqs_en_when_shift_dqs) {
2470 if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max)
Marek Vasut139823e2015-08-02 19:47:01 +02002471 mid_min += start_dqs_en - mid_min -
2472 iocfg->dqs_en_delay_max;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002473 else if (start_dqs_en - mid_min < 0)
2474 mid_min += start_dqs_en - mid_min;
2475 }
2476 new_dqs = start_dqs - mid_min;
2477
Marek Vasutf0712c32015-07-18 08:01:45 +02002478 debug_cond(DLEVEL == 1,
2479 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2480 start_dqs,
Marek Vasut160695d2015-08-02 19:10:58 +02002481 iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
Dinh Nguyen3da42852015-06-02 22:52:49 -05002482 new_dqs, mid_min);
2483
Marek Vasutffb8b662015-07-18 19:46:26 +02002484 /* Add delay to bring centre of all DQ windows to the same "level". */
2485 center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
2486 min_index, test_bgn, &dq_margin, &dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002487
Dinh Nguyen3da42852015-06-02 22:52:49 -05002488 /* Move DQS-en */
Marek Vasut160695d2015-08-02 19:10:58 +02002489 if (iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut5d6db442015-07-18 19:57:12 +02002490 final_dqs_en = start_dqs_en - mid_min;
Marek Vasut0113c3e2015-07-18 20:42:27 +02002491 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2492 scc_mgr_load_dqs(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002493 }
2494
2495 /* Move DQS */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002496 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2497 scc_mgr_load_dqs(rw_group);
Marek Vasutf0712c32015-07-18 08:01:45 +02002498 debug_cond(DLEVEL == 2,
2499 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2500 __func__, __LINE__, dq_margin, dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002501
2502 /*
2503 * Do not remove this line as it makes sure all of our decisions
2504 * have been applied. Apply the update bit.
2505 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002506 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002507
Marek Vasut98668242015-07-18 20:44:28 +02002508 if ((dq_margin < 0) || (dqs_margin < 0))
2509 return -EINVAL;
2510
2511 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002512}
2513
Marek Vasutbce24ef2015-07-17 03:16:45 +02002514/**
Marek Vasut04372fb2015-07-18 02:46:56 +02002515 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2516 * @rw_group: Read/Write Group
2517 * @phase: DQ/DQS phase
2518 *
2519 * Because initially no communication ca be reliably performed with the memory
2520 * device, the sequencer uses a guaranteed write mechanism to write data into
2521 * the memory device.
2522 */
2523static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2524 const u32 phase)
2525{
Marek Vasut04372fb2015-07-18 02:46:56 +02002526 int ret;
2527
2528 /* Set a particular DQ/DQS phase. */
2529 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2530
2531 debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2532 __func__, __LINE__, rw_group, phase);
2533
2534 /*
2535 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2536 * Load up the patterns used by read calibration using the
2537 * current DQDQS phase.
2538 */
2539 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2540
2541 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2542 return 0;
2543
2544 /*
2545 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2546 * Back-to-Back reads of the patterns used for calibration.
2547 */
Marek Vasutd844c7d2015-07-18 03:55:07 +02002548 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2549 if (ret)
Marek Vasut04372fb2015-07-18 02:46:56 +02002550 debug_cond(DLEVEL == 1,
2551 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2552 __func__, __LINE__, rw_group, phase);
Marek Vasutd844c7d2015-07-18 03:55:07 +02002553 return ret;
Marek Vasut04372fb2015-07-18 02:46:56 +02002554}
2555
2556/**
Marek Vasutf09da112015-07-18 02:57:32 +02002557 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2558 * @rw_group: Read/Write Group
2559 * @test_bgn: Rank at which the test begins
2560 *
2561 * DQS enable calibration ensures reliable capture of the DQ signal without
2562 * glitches on the DQS line.
2563 */
2564static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2565 const u32 test_bgn)
2566{
Marek Vasutf09da112015-07-18 02:57:32 +02002567 /*
2568 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2569 * DQS and DQS Eanble Signal Relationships.
2570 */
Marek Vasut28ea8272015-07-18 04:28:42 +02002571
2572 /* We start at zero, so have one less dq to devide among */
Marek Vasut160695d2015-08-02 19:10:58 +02002573 const u32 delay_step = iocfg->io_in_delay_max /
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002574 (rwcfg->mem_dq_per_read_dqs - 1);
Marek Vasut914546e2015-07-20 09:20:42 +02002575 int ret;
Marek Vasut28ea8272015-07-18 04:28:42 +02002576 u32 i, p, d, r;
2577
2578 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2579
2580 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002581 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasut28ea8272015-07-18 04:28:42 +02002582 r += NUM_RANKS_PER_SHADOW_REG) {
2583 for (i = 0, p = test_bgn, d = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002584 i < rwcfg->mem_dq_per_read_dqs;
Marek Vasut28ea8272015-07-18 04:28:42 +02002585 i++, p++, d += delay_step) {
2586 debug_cond(DLEVEL == 1,
2587 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2588 __func__, __LINE__, rw_group, r, i, p, d);
2589
2590 scc_mgr_set_dq_in_delay(p, d);
2591 scc_mgr_load_dq(p);
2592 }
2593
2594 writel(0, &sdr_scc_mgr->update);
2595 }
2596
2597 /*
2598 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2599 * dq_in_delay values
2600 */
Marek Vasut914546e2015-07-20 09:20:42 +02002601 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
Marek Vasut28ea8272015-07-18 04:28:42 +02002602
2603 debug_cond(DLEVEL == 1,
2604 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
Marek Vasut914546e2015-07-20 09:20:42 +02002605 __func__, __LINE__, rw_group, !ret);
Marek Vasut28ea8272015-07-18 04:28:42 +02002606
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002607 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasut28ea8272015-07-18 04:28:42 +02002608 r += NUM_RANKS_PER_SHADOW_REG) {
2609 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2610 writel(0, &sdr_scc_mgr->update);
2611 }
2612
Marek Vasut914546e2015-07-20 09:20:42 +02002613 return ret;
Marek Vasutf09da112015-07-18 02:57:32 +02002614}
2615
2616/**
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002617 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2618 * @rw_group: Read/Write Group
2619 * @test_bgn: Rank at which the test begins
2620 * @use_read_test: Perform a read test
2621 * @update_fom: Update FOM
2622 *
2623 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2624 * within a group.
2625 */
2626static int
2627rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2628 const int use_read_test,
2629 const int update_fom)
2630
2631{
2632 int ret, grp_calibrated;
2633 u32 rank_bgn, sr;
2634
2635 /*
2636 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2637 * Read per-bit deskew can be done on a per shadow register basis.
2638 */
2639 grp_calibrated = 1;
2640 for (rank_bgn = 0, sr = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002641 rank_bgn < rwcfg->mem_number_of_ranks;
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002642 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002643 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
Marek Vasut0113c3e2015-07-18 20:42:27 +02002644 test_bgn,
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002645 use_read_test,
2646 update_fom);
Marek Vasut98668242015-07-18 20:44:28 +02002647 if (!ret)
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002648 continue;
2649
2650 grp_calibrated = 0;
2651 }
2652
2653 if (!grp_calibrated)
2654 return -EIO;
2655
2656 return 0;
2657}
2658
2659/**
Marek Vasutbce24ef2015-07-17 03:16:45 +02002660 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2661 * @rw_group: Read/Write Group
2662 * @test_bgn: Rank at which the test begins
Dinh Nguyen3da42852015-06-02 22:52:49 -05002663 *
Marek Vasutbce24ef2015-07-17 03:16:45 +02002664 * Stage 1: Calibrate the read valid prediction FIFO.
2665 *
2666 * This function implements UniPHY calibration Stage 1, as explained in
2667 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2668 *
2669 * - read valid prediction will consist of finding:
2670 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2671 * - DQS input phase and DQS input delay (DQ/DQS Centering)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002672 * - we also do a per-bit deskew on the DQ lines.
2673 */
Marek Vasutc336ca32015-07-17 04:24:18 +02002674static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002675{
Marek Vasut5ded7322015-08-02 19:42:26 +02002676 u32 p, d;
2677 u32 dtaps_per_ptap;
2678 u32 failed_substage;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002679
Marek Vasut04372fb2015-07-18 02:46:56 +02002680 int ret;
2681
Marek Vasutc336ca32015-07-17 04:24:18 +02002682 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002683
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002684 /* Update info for sims */
2685 reg_file_set_group(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002686 reg_file_set_stage(CAL_STAGE_VFIFO);
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002687 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002688
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002689 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2690
2691 /* USER Determine number of delay taps for each phase tap. */
Marek Vasut160695d2015-08-02 19:10:58 +02002692 dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap,
2693 iocfg->delay_per_dqs_en_dchain_tap) - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002694
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002695 for (d = 0; d <= dtaps_per_ptap; d += 2) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05002696 /*
2697 * In RLDRAMX we may be messing the delay of pins in
Marek Vasutc336ca32015-07-17 04:24:18 +02002698 * the same write rw_group but outside of the current read
2699 * the rw_group, but that's ok because we haven't calibrated
Marek Vasutac70d2f2015-07-17 03:44:26 +02002700 * output side yet.
Dinh Nguyen3da42852015-06-02 22:52:49 -05002701 */
2702 if (d > 0) {
Marek Vasutf51a7d32015-07-19 02:18:21 +02002703 scc_mgr_apply_group_all_out_delay_add_all_ranks(
Marek Vasutc336ca32015-07-17 04:24:18 +02002704 rw_group, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002705 }
2706
Marek Vasut160695d2015-08-02 19:10:58 +02002707 for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) {
Marek Vasut04372fb2015-07-18 02:46:56 +02002708 /* 1) Guaranteed Write */
2709 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2710 if (ret)
2711 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002712
Marek Vasutf09da112015-07-18 02:57:32 +02002713 /* 2) DQS Enable Calibration */
2714 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2715 test_bgn);
2716 if (ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05002717 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002718 continue;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002719 }
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002720
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002721 /* 3) Centering DQ/DQS */
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002722 /*
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002723 * If doing read after write calibration, do not update
2724 * FOM now. Do it then.
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002725 */
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002726 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2727 test_bgn, 1, 0);
2728 if (ret) {
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002729 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002730 continue;
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002731 }
2732
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002733 /* All done. */
2734 goto cal_done_ok;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002735 }
2736 }
2737
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002738 /* Calibration Stage 1 failed. */
Marek Vasutc336ca32015-07-17 04:24:18 +02002739 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002740 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002741
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002742 /* Calibration Stage 1 completed OK. */
2743cal_done_ok:
Dinh Nguyen3da42852015-06-02 22:52:49 -05002744 /*
2745 * Reset the delay chains back to zero if they have moved > 1
2746 * (check for > 1 because loop will increase d even when pass in
2747 * first case).
2748 */
2749 if (d > 2)
Marek Vasutc336ca32015-07-17 04:24:18 +02002750 scc_mgr_zero_group(rw_group, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002751
2752 return 1;
2753}
2754
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002755/**
2756 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2757 * @rw_group: Read/Write Group
2758 * @test_bgn: Rank at which the test begins
2759 *
2760 * Stage 3: DQ/DQS Centering.
2761 *
2762 * This function implements UniPHY calibration Stage 3, as explained in
2763 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2764 */
2765static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
2766 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002767{
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002768 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002769
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002770 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002771
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002772 /* Update info for sims. */
2773 reg_file_set_group(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002774 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2775 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2776
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002777 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
2778 if (ret)
2779 set_failing_group_stage(rw_group,
Dinh Nguyen3da42852015-06-02 22:52:49 -05002780 CAL_STAGE_VFIFO_AFTER_WRITES,
2781 CAL_SUBSTAGE_VFIFO_CENTER);
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002782 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002783}
2784
Marek Vasutc9842782015-07-21 06:18:57 +02002785/**
2786 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2787 *
2788 * Stage 4: Minimize latency.
2789 *
2790 * This function implements UniPHY calibration Stage 4, as explained in
2791 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2792 * Calibrate LFIFO to find smallest read latency.
2793 */
Marek Vasut5ded7322015-08-02 19:42:26 +02002794static u32 rw_mgr_mem_calibrate_lfifo(void)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002795{
Marek Vasutc9842782015-07-21 06:18:57 +02002796 int found_one = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002797
2798 debug("%s:%d\n", __func__, __LINE__);
2799
Marek Vasutc9842782015-07-21 06:18:57 +02002800 /* Update info for sims. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002801 reg_file_set_stage(CAL_STAGE_LFIFO);
2802 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2803
2804 /* Load up the patterns used by read calibration for all ranks */
2805 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002806
Dinh Nguyen3da42852015-06-02 22:52:49 -05002807 do {
Marek Vasut1273dd92015-07-12 21:05:08 +02002808 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002809 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2810 __func__, __LINE__, gbl->curr_read_lat);
2811
Marek Vasutc9842782015-07-21 06:18:57 +02002812 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
2813 PASS_ALL_BITS, 1))
Dinh Nguyen3da42852015-06-02 22:52:49 -05002814 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002815
2816 found_one = 1;
Marek Vasutc9842782015-07-21 06:18:57 +02002817 /*
2818 * Reduce read latency and see if things are
2819 * working correctly.
2820 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002821 gbl->curr_read_lat--;
2822 } while (gbl->curr_read_lat > 0);
2823
Marek Vasutc9842782015-07-21 06:18:57 +02002824 /* Reset the fifos to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02002825 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002826
2827 if (found_one) {
Marek Vasutc9842782015-07-21 06:18:57 +02002828 /* Add a fudge factor to the read latency that was determined */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002829 gbl->curr_read_lat += 2;
Marek Vasut1273dd92015-07-12 21:05:08 +02002830 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasutc9842782015-07-21 06:18:57 +02002831 debug_cond(DLEVEL == 2,
2832 "%s:%d lfifo: success: using read_lat=%u\n",
2833 __func__, __LINE__, gbl->curr_read_lat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002834 } else {
2835 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2836 CAL_SUBSTAGE_READ_LATENCY);
2837
Marek Vasutc9842782015-07-21 06:18:57 +02002838 debug_cond(DLEVEL == 2,
2839 "%s:%d lfifo: failed at initial read_lat=%u\n",
2840 __func__, __LINE__, gbl->curr_read_lat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002841 }
Marek Vasutc9842782015-07-21 06:18:57 +02002842
2843 return found_one;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002844}
2845
Marek Vasutc8570af2015-07-21 05:26:58 +02002846/**
2847 * search_window() - Search for the/part of the window with DM/DQS shift
2848 * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift
2849 * @rank_bgn: Rank number
2850 * @write_group: Write Group
2851 * @bgn_curr: Current window begin
2852 * @end_curr: Current window end
2853 * @bgn_best: Current best window begin
2854 * @end_best: Current best window end
2855 * @win_best: Size of the best window
2856 * @new_dqs: New DQS value (only applicable if search_dm = 0).
2857 *
2858 * Search for the/part of the window with DM/DQS shift.
2859 */
2860static void search_window(const int search_dm,
2861 const u32 rank_bgn, const u32 write_group,
2862 int *bgn_curr, int *end_curr, int *bgn_best,
2863 int *end_best, int *win_best, int new_dqs)
2864{
2865 u32 bit_chk;
Marek Vasut160695d2015-08-02 19:10:58 +02002866 const int max = iocfg->io_out1_delay_max - new_dqs;
Marek Vasutc8570af2015-07-21 05:26:58 +02002867 int d, di;
2868
2869 /* Search for the/part of the window with DM/DQS shift. */
2870 for (di = max; di >= 0; di -= DELTA_D) {
2871 if (search_dm) {
2872 d = di;
2873 scc_mgr_apply_group_dm_out1_delay(d);
2874 } else {
2875 /* For DQS, we go from 0...max */
2876 d = max - di;
2877 /*
Marek Vasut139823e2015-08-02 19:47:01 +02002878 * Note: This only shifts DQS, so are we limiting
2879 * ourselves to width of DQ unnecessarily.
Marek Vasutc8570af2015-07-21 05:26:58 +02002880 */
2881 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2882 d + new_dqs);
2883 }
2884
2885 writel(0, &sdr_scc_mgr->update);
2886
2887 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2888 PASS_ALL_BITS, &bit_chk,
2889 0)) {
2890 /* Set current end of the window. */
2891 *end_curr = search_dm ? -d : d;
2892
2893 /*
2894 * If a starting edge of our window has not been seen
2895 * this is our current start of the DM window.
2896 */
Marek Vasut160695d2015-08-02 19:10:58 +02002897 if (*bgn_curr == iocfg->io_out1_delay_max + 1)
Marek Vasutc8570af2015-07-21 05:26:58 +02002898 *bgn_curr = search_dm ? -d : d;
2899
2900 /*
2901 * If current window is bigger than best seen.
2902 * Set best seen to be current window.
2903 */
2904 if ((*end_curr - *bgn_curr + 1) > *win_best) {
2905 *win_best = *end_curr - *bgn_curr + 1;
2906 *bgn_best = *bgn_curr;
2907 *end_best = *end_curr;
2908 }
2909 } else {
2910 /* We just saw a failing test. Reset temp edge. */
Marek Vasut160695d2015-08-02 19:10:58 +02002911 *bgn_curr = iocfg->io_out1_delay_max + 1;
2912 *end_curr = iocfg->io_out1_delay_max + 1;
Marek Vasutc8570af2015-07-21 05:26:58 +02002913
2914 /* Early exit is only applicable to DQS. */
2915 if (search_dm)
2916 continue;
2917
2918 /*
2919 * Early exit optimization: if the remaining delay
2920 * chain space is less than already seen largest
2921 * window we can exit.
2922 */
Marek Vasut160695d2015-08-02 19:10:58 +02002923 if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d)
Marek Vasutc8570af2015-07-21 05:26:58 +02002924 break;
2925 }
2926 }
2927}
2928
Dinh Nguyen3da42852015-06-02 22:52:49 -05002929/*
Marek Vasuta386a502015-07-21 05:33:49 +02002930 * rw_mgr_mem_calibrate_writes_center() - Center all windows
2931 * @rank_bgn: Rank number
2932 * @write_group: Write group
2933 * @test_bgn: Rank at which the test begins
2934 *
2935 * Center all windows. Do per-bit-deskew to possibly increase size of
Dinh Nguyen3da42852015-06-02 22:52:49 -05002936 * certain windows.
2937 */
Marek Vasut3b44f552015-07-21 05:00:42 +02002938static int
2939rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
2940 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002941{
Marek Vasutc8570af2015-07-21 05:26:58 +02002942 int i;
Marek Vasut3b44f552015-07-21 05:00:42 +02002943 u32 sticky_bit_chk;
2944 u32 min_index;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002945 int left_edge[rwcfg->mem_dq_per_write_dqs];
2946 int right_edge[rwcfg->mem_dq_per_write_dqs];
Marek Vasut3b44f552015-07-21 05:00:42 +02002947 int mid;
2948 int mid_min, orig_mid_min;
2949 int new_dqs, start_dqs;
2950 int dq_margin, dqs_margin, dm_margin;
Marek Vasut160695d2015-08-02 19:10:58 +02002951 int bgn_curr = iocfg->io_out1_delay_max + 1;
2952 int end_curr = iocfg->io_out1_delay_max + 1;
2953 int bgn_best = iocfg->io_out1_delay_max + 1;
2954 int end_best = iocfg->io_out1_delay_max + 1;
Marek Vasut3b44f552015-07-21 05:00:42 +02002955 int win_best = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002956
Marek Vasutc4907892015-07-13 02:11:02 +02002957 int ret;
2958
Dinh Nguyen3da42852015-06-02 22:52:49 -05002959 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2960
2961 dm_margin = 0;
2962
Marek Vasutc6540872015-07-21 05:29:05 +02002963 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
2964 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002965 (rwcfg->mem_dq_per_write_dqs << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05002966
Marek Vasut3b44f552015-07-21 05:00:42 +02002967 /* Per-bit deskew. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002968
2969 /*
Marek Vasut3b44f552015-07-21 05:00:42 +02002970 * Set the left and right edge of each bit to an illegal value.
Marek Vasut160695d2015-08-02 19:10:58 +02002971 * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen3da42852015-06-02 22:52:49 -05002972 */
2973 sticky_bit_chk = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02002974 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasut160695d2015-08-02 19:10:58 +02002975 left_edge[i] = iocfg->io_out1_delay_max + 1;
2976 right_edge[i] = iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002977 }
2978
Marek Vasut3b44f552015-07-21 05:00:42 +02002979 /* Search for the left edge of the window for each bit. */
Marek Vasut71120772015-07-13 02:38:15 +02002980 search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
Marek Vasut0c4be192015-07-18 20:34:00 +02002981 &sticky_bit_chk,
Marek Vasut71120772015-07-13 02:38:15 +02002982 left_edge, right_edge, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002983
Marek Vasut3b44f552015-07-21 05:00:42 +02002984 /* Search for the right edge of the window for each bit. */
Marek Vasutc4907892015-07-13 02:11:02 +02002985 ret = search_right_edge(1, rank_bgn, write_group, 0,
2986 start_dqs, 0,
Marek Vasut0c4be192015-07-18 20:34:00 +02002987 &sticky_bit_chk,
Marek Vasutc4907892015-07-13 02:11:02 +02002988 left_edge, right_edge, 0);
2989 if (ret) {
2990 set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
2991 CAL_SUBSTAGE_WRITES_CENTER);
Marek Vasutd043ee52015-07-21 05:32:49 +02002992 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002993 }
2994
Marek Vasutafb3eb82015-07-18 19:18:06 +02002995 min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002996
Marek Vasut3b44f552015-07-21 05:00:42 +02002997 /* Determine the amount we can change DQS (which is -mid_min). */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002998 orig_mid_min = mid_min;
2999 new_dqs = start_dqs;
3000 mid_min = 0;
Marek Vasut3b44f552015-07-21 05:00:42 +02003001 debug_cond(DLEVEL == 1,
3002 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3003 __func__, __LINE__, start_dqs, new_dqs, mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003004
Marek Vasutffb8b662015-07-18 19:46:26 +02003005 /* Add delay to bring centre of all DQ windows to the same "level". */
3006 center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min,
3007 min_index, 0, &dq_margin, &dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003008
3009 /* Move DQS */
3010 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
Marek Vasut1273dd92015-07-12 21:05:08 +02003011 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003012
3013 /* Centre DM */
3014 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3015
3016 /*
Marek Vasut3b44f552015-07-21 05:00:42 +02003017 * Set the left and right edge of each bit to an illegal value.
Marek Vasut160695d2015-08-02 19:10:58 +02003018 * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen3da42852015-06-02 22:52:49 -05003019 */
Marek Vasut160695d2015-08-02 19:10:58 +02003020 left_edge[0] = iocfg->io_out1_delay_max + 1;
3021 right_edge[0] = iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003022
Marek Vasut3b44f552015-07-21 05:00:42 +02003023 /* Search for the/part of the window with DM shift. */
Marek Vasutc8570af2015-07-21 05:26:58 +02003024 search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
3025 &bgn_best, &end_best, &win_best, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003026
Marek Vasut3b44f552015-07-21 05:00:42 +02003027 /* Reset DM delay chains to 0. */
Marek Vasut32675242015-07-17 06:07:13 +02003028 scc_mgr_apply_group_dm_out1_delay(0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003029
3030 /*
3031 * Check to see if the current window nudges up aganist 0 delay.
3032 * If so we need to continue the search by shifting DQS otherwise DQS
Marek Vasut3b44f552015-07-21 05:00:42 +02003033 * search begins as a new search.
3034 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003035 if (end_curr != 0) {
Marek Vasut160695d2015-08-02 19:10:58 +02003036 bgn_curr = iocfg->io_out1_delay_max + 1;
3037 end_curr = iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003038 }
3039
Marek Vasut3b44f552015-07-21 05:00:42 +02003040 /* Search for the/part of the window with DQS shifts. */
Marek Vasutc8570af2015-07-21 05:26:58 +02003041 search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr,
3042 &bgn_best, &end_best, &win_best, new_dqs);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003043
Marek Vasut3b44f552015-07-21 05:00:42 +02003044 /* Assign left and right edge for cal and reporting. */
3045 left_edge[0] = -1 * bgn_best;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003046 right_edge[0] = end_best;
3047
Marek Vasut3b44f552015-07-21 05:00:42 +02003048 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n",
3049 __func__, __LINE__, left_edge[0], right_edge[0]);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003050
Marek Vasut3b44f552015-07-21 05:00:42 +02003051 /* Move DQS (back to orig). */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003052 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3053
3054 /* Move DM */
3055
Marek Vasut3b44f552015-07-21 05:00:42 +02003056 /* Find middle of window for the DM bit. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003057 mid = (left_edge[0] - right_edge[0]) / 2;
3058
Marek Vasut3b44f552015-07-21 05:00:42 +02003059 /* Only move right, since we are not moving DQS/DQ. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003060 if (mid < 0)
3061 mid = 0;
3062
Marek Vasut3b44f552015-07-21 05:00:42 +02003063 /* dm_marign should fail if we never find a window. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003064 if (win_best == 0)
3065 dm_margin = -1;
3066 else
3067 dm_margin = left_edge[0] - mid;
3068
Marek Vasut32675242015-07-17 06:07:13 +02003069 scc_mgr_apply_group_dm_out1_delay(mid);
Marek Vasut1273dd92015-07-12 21:05:08 +02003070 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003071
Marek Vasut3b44f552015-07-21 05:00:42 +02003072 debug_cond(DLEVEL == 2,
3073 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3074 __func__, __LINE__, left_edge[0], right_edge[0],
3075 mid, dm_margin);
3076 /* Export values. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003077 gbl->fom_out += dq_margin + dqs_margin;
3078
Marek Vasut3b44f552015-07-21 05:00:42 +02003079 debug_cond(DLEVEL == 2,
3080 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3081 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003082
3083 /*
3084 * Do not remove this line as it makes sure all of our
3085 * decisions have been applied.
3086 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003087 writel(0, &sdr_scc_mgr->update);
Marek Vasut3b44f552015-07-21 05:00:42 +02003088
Marek Vasutd043ee52015-07-21 05:32:49 +02003089 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3090 return -EINVAL;
3091
3092 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003093}
3094
Marek Vasutdb3a6062015-07-18 07:23:25 +02003095/**
3096 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3097 * @rank_bgn: Rank number
3098 * @group: Read/Write Group
3099 * @test_bgn: Rank at which the test begins
3100 *
3101 * Stage 2: Write Calibration Part One.
3102 *
3103 * This function implements UniPHY calibration Stage 2, as explained in
3104 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3105 */
3106static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
3107 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003108{
Marek Vasutdb3a6062015-07-18 07:23:25 +02003109 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003110
Marek Vasutdb3a6062015-07-18 07:23:25 +02003111 /* Update info for sims */
3112 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3113
3114 reg_file_set_group(group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003115 reg_file_set_stage(CAL_STAGE_WRITES);
3116 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3117
Marek Vasutdb3a6062015-07-18 07:23:25 +02003118 ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
Marek Vasutd043ee52015-07-21 05:32:49 +02003119 if (ret)
Marek Vasutdb3a6062015-07-18 07:23:25 +02003120 set_failing_group_stage(group, CAL_STAGE_WRITES,
Dinh Nguyen3da42852015-06-02 22:52:49 -05003121 CAL_SUBSTAGE_WRITES_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003122
Marek Vasutd043ee52015-07-21 05:32:49 +02003123 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003124}
3125
Marek Vasut4b0ac262015-07-20 07:33:33 +02003126/**
3127 * mem_precharge_and_activate() - Precharge all banks and activate
3128 *
3129 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3130 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003131static void mem_precharge_and_activate(void)
3132{
Marek Vasut4b0ac262015-07-20 07:33:33 +02003133 int r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003134
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003135 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
Marek Vasut4b0ac262015-07-20 07:33:33 +02003136 /* Set rank. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003137 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3138
Marek Vasut4b0ac262015-07-20 07:33:33 +02003139 /* Precharge all banks. */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003140 writel(rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +02003141 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003142
Marek Vasut1273dd92015-07-12 21:05:08 +02003143 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003144 writel(rwcfg->activate_0_and_1_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02003145 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003146
Marek Vasut1273dd92015-07-12 21:05:08 +02003147 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003148 writel(rwcfg->activate_0_and_1_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02003149 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003150
Marek Vasut4b0ac262015-07-20 07:33:33 +02003151 /* Activate rows. */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003152 writel(rwcfg->activate_0_and_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +02003153 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003154 }
3155}
3156
Marek Vasut16502a02015-07-17 01:57:41 +02003157/**
3158 * mem_init_latency() - Configure memory RLAT and WLAT settings
3159 *
3160 * Configure memory RLAT and WLAT parameters.
3161 */
3162static void mem_init_latency(void)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003163{
Marek Vasut16502a02015-07-17 01:57:41 +02003164 /*
3165 * For AV/CV, LFIFO is hardened and always runs at full rate
3166 * so max latency in AFI clocks, used here, is correspondingly
3167 * smaller.
3168 */
Marek Vasut96fd4362015-08-02 19:26:55 +02003169 const u32 max_latency = (1 << misccfg->max_latency_count_width) - 1;
Marek Vasut16502a02015-07-17 01:57:41 +02003170 u32 rlat, wlat;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003171
3172 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut16502a02015-07-17 01:57:41 +02003173
3174 /*
3175 * Read in write latency.
3176 * WL for Hard PHY does not include additive latency.
3177 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003178 wlat = readl(&data_mgr->t_wl_add);
3179 wlat += readl(&data_mgr->mem_t_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003180
Marek Vasut16502a02015-07-17 01:57:41 +02003181 gbl->rw_wl_nop_cycles = wlat - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003182
Marek Vasut16502a02015-07-17 01:57:41 +02003183 /* Read in readl latency. */
Marek Vasut1273dd92015-07-12 21:05:08 +02003184 rlat = readl(&data_mgr->t_rl_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003185
Marek Vasut16502a02015-07-17 01:57:41 +02003186 /* Set a pretty high read latency initially. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003187 gbl->curr_read_lat = rlat + 16;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003188 if (gbl->curr_read_lat > max_latency)
3189 gbl->curr_read_lat = max_latency;
3190
Marek Vasut1273dd92015-07-12 21:05:08 +02003191 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003192
Marek Vasut16502a02015-07-17 01:57:41 +02003193 /* Advertise write latency. */
3194 writel(wlat, &phy_mgr_cfg->afi_wlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003195}
3196
Marek Vasut51cea0b2015-07-26 10:54:15 +02003197/**
3198 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3199 *
3200 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3201 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003202static void mem_skip_calibrate(void)
3203{
Marek Vasut5ded7322015-08-02 19:42:26 +02003204 u32 vfifo_offset;
3205 u32 i, j, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003206
3207 debug("%s:%d\n", __func__, __LINE__);
3208 /* Need to update every shadow register set used by the interface */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003209 for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasut51cea0b2015-07-26 10:54:15 +02003210 r += NUM_RANKS_PER_SHADOW_REG) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003211 /*
3212 * Set output phase alignment settings appropriate for
3213 * skip calibration.
3214 */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003215 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003216 scc_mgr_set_dqs_en_phase(i, 0);
Marek Vasut160695d2015-08-02 19:10:58 +02003217 if (iocfg->dll_chain_length == 6)
3218 scc_mgr_set_dqdqs_output_phase(i, 6);
3219 else
3220 scc_mgr_set_dqdqs_output_phase(i, 7);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003221 /*
3222 * Case:33398
3223 *
3224 * Write data arrives to the I/O two cycles before write
3225 * latency is reached (720 deg).
3226 * -> due to bit-slip in a/c bus
3227 * -> to allow board skew where dqs is longer than ck
3228 * -> how often can this happen!?
3229 * -> can claim back some ptaps for high freq
3230 * support if we can relax this, but i digress...
3231 *
3232 * The write_clk leads mem_ck by 90 deg
3233 * The minimum ptap of the OPA is 180 deg
3234 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3235 * The write_clk is always delayed by 2 ptaps
3236 *
3237 * Hence, to make DQS aligned to CK, we need to delay
3238 * DQS by:
Marek Vasut139823e2015-08-02 19:47:01 +02003239 * (720 - 90 - 180 - 2) *
3240 * (360 / iocfg->dll_chain_length)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003241 *
Marek Vasut160695d2015-08-02 19:10:58 +02003242 * Dividing the above by (360 / iocfg->dll_chain_length)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003243 * gives us the number of ptaps, which simplies to:
3244 *
Marek Vasut160695d2015-08-02 19:10:58 +02003245 * (1.25 * iocfg->dll_chain_length - 2)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003246 */
Marek Vasut51cea0b2015-07-26 10:54:15 +02003247 scc_mgr_set_dqdqs_output_phase(i,
Marek Vasut6d7a3332015-08-10 22:50:11 +02003248 ((125 * iocfg->dll_chain_length) / 100) - 2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003249 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003250 writel(0xff, &sdr_scc_mgr->dqs_ena);
3251 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003252
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003253 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003254 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3255 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003256 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003257 writel(0xff, &sdr_scc_mgr->dq_ena);
3258 writel(0xff, &sdr_scc_mgr->dm_ena);
3259 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003260 }
3261
3262 /* Compensate for simulation model behaviour */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003263 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003264 scc_mgr_set_dqs_bus_in_delay(i, 10);
3265 scc_mgr_load_dqs(i);
3266 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003267 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003268
3269 /*
3270 * ArriaV has hard FIFOs that can only be initialized by incrementing
3271 * in sequencer.
3272 */
Marek Vasut96fd4362015-08-02 19:26:55 +02003273 vfifo_offset = misccfg->calib_vfifo_offset;
Marek Vasut51cea0b2015-07-26 10:54:15 +02003274 for (j = 0; j < vfifo_offset; j++)
Marek Vasut1273dd92015-07-12 21:05:08 +02003275 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
Marek Vasut1273dd92015-07-12 21:05:08 +02003276 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003277
3278 /*
Marek Vasut51cea0b2015-07-26 10:54:15 +02003279 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3280 * setting from generation-time constant.
Dinh Nguyen3da42852015-06-02 22:52:49 -05003281 */
Marek Vasut96fd4362015-08-02 19:26:55 +02003282 gbl->curr_read_lat = misccfg->calib_lfifo_offset;
Marek Vasut1273dd92015-07-12 21:05:08 +02003283 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003284}
3285
Marek Vasut3589fbf2015-07-20 04:34:51 +02003286/**
3287 * mem_calibrate() - Memory calibration entry point.
3288 *
3289 * Perform memory calibration.
3290 */
Marek Vasut5ded7322015-08-02 19:42:26 +02003291static u32 mem_calibrate(void)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003292{
Marek Vasut5ded7322015-08-02 19:42:26 +02003293 u32 i;
3294 u32 rank_bgn, sr;
3295 u32 write_group, write_test_bgn;
3296 u32 read_group, read_test_bgn;
3297 u32 run_groups, current_run;
3298 u32 failing_groups = 0;
3299 u32 group_failed = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003300
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003301 const u32 rwdqs_ratio = rwcfg->mem_if_read_dqs_width /
3302 rwcfg->mem_if_write_dqs_width;
Marek Vasut33c42bb2015-07-17 02:21:47 +02003303
Dinh Nguyen3da42852015-06-02 22:52:49 -05003304 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003305
Marek Vasut16502a02015-07-17 01:57:41 +02003306 /* Initialize the data settings */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003307 gbl->error_substage = CAL_SUBSTAGE_NIL;
3308 gbl->error_stage = CAL_STAGE_NIL;
3309 gbl->error_group = 0xff;
3310 gbl->fom_in = 0;
3311 gbl->fom_out = 0;
3312
Marek Vasut16502a02015-07-17 01:57:41 +02003313 /* Initialize WLAT and RLAT. */
3314 mem_init_latency();
3315
3316 /* Initialize bit slips. */
3317 mem_precharge_and_activate();
Dinh Nguyen3da42852015-06-02 22:52:49 -05003318
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003319 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003320 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3321 SCC_MGR_GROUP_COUNTER_OFFSET);
Marek Vasutfa5d8212015-07-19 01:34:43 +02003322 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3323 if (i == 0)
3324 scc_mgr_set_hhp_extras();
3325
Marek Vasutc5c5f532015-07-17 02:06:20 +02003326 scc_set_bypass_mode(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003327 }
3328
Marek Vasut722c9682015-07-17 02:07:12 +02003329 /* Calibration is skipped. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003330 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3331 /*
3332 * Set VFIFO and LFIFO to instant-on settings in skip
3333 * calibration mode.
3334 */
3335 mem_skip_calibrate();
Dinh Nguyen3da42852015-06-02 22:52:49 -05003336
Marek Vasut722c9682015-07-17 02:07:12 +02003337 /*
3338 * Do not remove this line as it makes sure all of our
3339 * decisions have been applied.
3340 */
3341 writel(0, &sdr_scc_mgr->update);
3342 return 1;
3343 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003344
Marek Vasut722c9682015-07-17 02:07:12 +02003345 /* Calibration is not skipped. */
3346 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3347 /*
3348 * Zero all delay chain/phase settings for all
3349 * groups and all shadow register sets.
3350 */
3351 scc_mgr_zero_all();
Dinh Nguyen3da42852015-06-02 22:52:49 -05003352
Marek Vasutf085ac32015-08-02 18:27:21 +02003353 run_groups = ~0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003354
Marek Vasut722c9682015-07-17 02:07:12 +02003355 for (write_group = 0, write_test_bgn = 0; write_group
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003356 < rwcfg->mem_if_write_dqs_width; write_group++,
3357 write_test_bgn += rwcfg->mem_dq_per_write_dqs) {
Marek Vasutc452dcd2015-07-17 02:50:56 +02003358 /* Initialize the group failure */
Marek Vasut722c9682015-07-17 02:07:12 +02003359 group_failed = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003360
Marek Vasut722c9682015-07-17 02:07:12 +02003361 current_run = run_groups & ((1 <<
3362 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3363 run_groups = run_groups >>
3364 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003365
Marek Vasut722c9682015-07-17 02:07:12 +02003366 if (current_run == 0)
3367 continue;
3368
3369 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3370 SCC_MGR_GROUP_COUNTER_OFFSET);
3371 scc_mgr_zero_group(write_group, 0);
3372
Marek Vasut33c42bb2015-07-17 02:21:47 +02003373 for (read_group = write_group * rwdqs_ratio,
3374 read_test_bgn = 0;
Marek Vasutc452dcd2015-07-17 02:50:56 +02003375 read_group < (write_group + 1) * rwdqs_ratio;
Marek Vasut33c42bb2015-07-17 02:21:47 +02003376 read_group++,
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003377 read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
Marek Vasut33c42bb2015-07-17 02:21:47 +02003378 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3379 continue;
Marek Vasut722c9682015-07-17 02:07:12 +02003380
Marek Vasut33c42bb2015-07-17 02:21:47 +02003381 /* Calibrate the VFIFO */
3382 if (rw_mgr_mem_calibrate_vfifo(read_group,
3383 read_test_bgn))
3384 continue;
3385
Marek Vasut139823e2015-08-02 19:47:01 +02003386 if (!(gbl->phy_debug_mode_flags &
3387 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003388 return 0;
3389
3390 /* The group failed, we're done. */
3391 goto grp_failed;
3392 }
3393
3394 /* Calibrate the output side */
3395 for (rank_bgn = 0, sr = 0;
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003396 rank_bgn < rwcfg->mem_number_of_ranks;
Marek Vasutc452dcd2015-07-17 02:50:56 +02003397 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3398 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3399 continue;
3400
3401 /* Not needed in quick mode! */
Marek Vasut139823e2015-08-02 19:47:01 +02003402 if (STATIC_CALIB_STEPS &
3403 CALIB_SKIP_DELAY_SWEEPS)
Marek Vasutc452dcd2015-07-17 02:50:56 +02003404 continue;
3405
Marek Vasutc452dcd2015-07-17 02:50:56 +02003406 /* Calibrate WRITEs */
Marek Vasutdb3a6062015-07-18 07:23:25 +02003407 if (!rw_mgr_mem_calibrate_writes(rank_bgn,
Marek Vasut139823e2015-08-02 19:47:01 +02003408 write_group,
3409 write_test_bgn))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003410 continue;
3411
Marek Vasut33c42bb2015-07-17 02:21:47 +02003412 group_failed = 1;
Marek Vasut139823e2015-08-02 19:47:01 +02003413 if (!(gbl->phy_debug_mode_flags &
3414 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut33c42bb2015-07-17 02:21:47 +02003415 return 0;
Marek Vasut722c9682015-07-17 02:07:12 +02003416 }
3417
Marek Vasutc452dcd2015-07-17 02:50:56 +02003418 /* Some group failed, we're done. */
3419 if (group_failed)
3420 goto grp_failed;
Marek Vasut4ac21612015-07-17 02:31:04 +02003421
Marek Vasutc452dcd2015-07-17 02:50:56 +02003422 for (read_group = write_group * rwdqs_ratio,
3423 read_test_bgn = 0;
3424 read_group < (write_group + 1) * rwdqs_ratio;
3425 read_group++,
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003426 read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
Marek Vasutc452dcd2015-07-17 02:50:56 +02003427 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3428 continue;
Marek Vasut4ac21612015-07-17 02:31:04 +02003429
Marek Vasut78cdd7d2015-07-18 05:58:44 +02003430 if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
Marek Vasut139823e2015-08-02 19:47:01 +02003431 read_test_bgn))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003432 continue;
Marek Vasut4ac21612015-07-17 02:31:04 +02003433
Marek Vasut139823e2015-08-02 19:47:01 +02003434 if (!(gbl->phy_debug_mode_flags &
3435 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003436 return 0;
Marek Vasut4ac21612015-07-17 02:31:04 +02003437
Marek Vasutc452dcd2015-07-17 02:50:56 +02003438 /* The group failed, we're done. */
3439 goto grp_failed;
Marek Vasut722c9682015-07-17 02:07:12 +02003440 }
3441
Marek Vasutc452dcd2015-07-17 02:50:56 +02003442 /* No group failed, continue as usual. */
3443 continue;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003444
Marek Vasutc452dcd2015-07-17 02:50:56 +02003445grp_failed: /* A group failed, increment the counter. */
3446 failing_groups++;
Marek Vasut722c9682015-07-17 02:07:12 +02003447 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003448
Marek Vasut722c9682015-07-17 02:07:12 +02003449 /*
3450 * USER If there are any failing groups then report
3451 * the failure.
3452 */
3453 if (failing_groups != 0)
3454 return 0;
3455
Marek Vasutc50ae302015-07-17 02:40:21 +02003456 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3457 continue;
3458
Marek Vasut722c9682015-07-17 02:07:12 +02003459 /* Calibrate the LFIFO */
Marek Vasutc50ae302015-07-17 02:40:21 +02003460 if (!rw_mgr_mem_calibrate_lfifo())
3461 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003462 }
3463
3464 /*
3465 * Do not remove this line as it makes sure all of our decisions
3466 * have been applied.
3467 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003468 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003469 return 1;
3470}
3471
Marek Vasut23a040c2015-07-17 01:20:21 +02003472/**
3473 * run_mem_calibrate() - Perform memory calibration
3474 *
3475 * This function triggers the entire memory calibration procedure.
3476 */
3477static int run_mem_calibrate(void)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003478{
Marek Vasut23a040c2015-07-17 01:20:21 +02003479 int pass;
Marek Vasutbba77112016-04-05 23:41:56 +02003480 u32 ctrl_cfg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003481
3482 debug("%s:%d\n", __func__, __LINE__);
3483
3484 /* Reset pass/fail status shown on afi_cal_success/fail */
Marek Vasut1273dd92015-07-12 21:05:08 +02003485 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003486
Marek Vasut23a040c2015-07-17 01:20:21 +02003487 /* Stop tracking manager. */
Marek Vasutbba77112016-04-05 23:41:56 +02003488 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3489 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3490 &sdr_ctrl->ctrl_cfg);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003491
Marek Vasut9fa9c902015-07-17 01:12:07 +02003492 phy_mgr_initialize();
Dinh Nguyen3da42852015-06-02 22:52:49 -05003493 rw_mgr_mem_initialize();
3494
Marek Vasut23a040c2015-07-17 01:20:21 +02003495 /* Perform the actual memory calibration. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003496 pass = mem_calibrate();
3497
3498 mem_precharge_and_activate();
Marek Vasut1273dd92015-07-12 21:05:08 +02003499 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003500
Marek Vasut23a040c2015-07-17 01:20:21 +02003501 /* Handoff. */
3502 rw_mgr_mem_handoff();
Dinh Nguyen3da42852015-06-02 22:52:49 -05003503 /*
Marek Vasut23a040c2015-07-17 01:20:21 +02003504 * In Hard PHY this is a 2-bit control:
3505 * 0: AFI Mux Select
3506 * 1: DDIO Mux Select
Dinh Nguyen3da42852015-06-02 22:52:49 -05003507 */
Marek Vasut23a040c2015-07-17 01:20:21 +02003508 writel(0x2, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003509
Marek Vasut23a040c2015-07-17 01:20:21 +02003510 /* Start tracking manager. */
Marek Vasutbba77112016-04-05 23:41:56 +02003511 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
Marek Vasut23a040c2015-07-17 01:20:21 +02003512
3513 return pass;
3514}
3515
3516/**
3517 * debug_mem_calibrate() - Report result of memory calibration
3518 * @pass: Value indicating whether calibration passed or failed
3519 *
3520 * This function reports the results of the memory calibration
3521 * and writes debug information into the register file.
3522 */
3523static void debug_mem_calibrate(int pass)
3524{
Marek Vasut5ded7322015-08-02 19:42:26 +02003525 u32 debug_info;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003526
3527 if (pass) {
3528 printf("%s: CALIBRATION PASSED\n", __FILE__);
3529
3530 gbl->fom_in /= 2;
3531 gbl->fom_out /= 2;
3532
3533 if (gbl->fom_in > 0xff)
3534 gbl->fom_in = 0xff;
3535
3536 if (gbl->fom_out > 0xff)
3537 gbl->fom_out = 0xff;
3538
3539 /* Update the FOM in the register file */
3540 debug_info = gbl->fom_in;
3541 debug_info |= gbl->fom_out << 8;
Marek Vasut1273dd92015-07-12 21:05:08 +02003542 writel(debug_info, &sdr_reg_file->fom);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003543
Marek Vasut1273dd92015-07-12 21:05:08 +02003544 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3545 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003546 } else {
3547 printf("%s: CALIBRATION FAILED\n", __FILE__);
3548
3549 debug_info = gbl->error_stage;
3550 debug_info |= gbl->error_substage << 8;
3551 debug_info |= gbl->error_group << 16;
3552
Marek Vasut1273dd92015-07-12 21:05:08 +02003553 writel(debug_info, &sdr_reg_file->failing_stage);
3554 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3555 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003556
3557 /* Update the failing group/stage in the register file */
3558 debug_info = gbl->error_stage;
3559 debug_info |= gbl->error_substage << 8;
3560 debug_info |= gbl->error_group << 16;
Marek Vasut1273dd92015-07-12 21:05:08 +02003561 writel(debug_info, &sdr_reg_file->failing_stage);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003562 }
3563
Marek Vasut23a040c2015-07-17 01:20:21 +02003564 printf("%s: Calibration complete\n", __FILE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003565}
3566
Marek Vasutbb064342015-07-19 06:12:42 +02003567/**
3568 * hc_initialize_rom_data() - Initialize ROM data
3569 *
3570 * Initialize ROM data.
3571 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003572static void hc_initialize_rom_data(void)
3573{
Marek Vasut04955cf2015-08-02 17:15:19 +02003574 unsigned int nelem = 0;
3575 const u32 *rom_init;
Marek Vasutbb064342015-07-19 06:12:42 +02003576 u32 i, addr;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003577
Marek Vasut04955cf2015-08-02 17:15:19 +02003578 socfpga_get_seq_inst_init(&rom_init, &nelem);
Marek Vasutc4815f72015-07-12 19:03:33 +02003579 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
Marek Vasut04955cf2015-08-02 17:15:19 +02003580 for (i = 0; i < nelem; i++)
3581 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003582
Marek Vasut04955cf2015-08-02 17:15:19 +02003583 socfpga_get_seq_ac_init(&rom_init, &nelem);
Marek Vasutc4815f72015-07-12 19:03:33 +02003584 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
Marek Vasut04955cf2015-08-02 17:15:19 +02003585 for (i = 0; i < nelem; i++)
3586 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003587}
3588
Marek Vasut9c1ab2c2015-07-19 06:13:37 +02003589/**
3590 * initialize_reg_file() - Initialize SDR register file
3591 *
3592 * Initialize SDR register file.
3593 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003594static void initialize_reg_file(void)
3595{
Dinh Nguyen3da42852015-06-02 22:52:49 -05003596 /* Initialize the register file with the correct data */
Marek Vasut96fd4362015-08-02 19:26:55 +02003597 writel(misccfg->reg_file_init_seq_signature, &sdr_reg_file->signature);
Marek Vasut1273dd92015-07-12 21:05:08 +02003598 writel(0, &sdr_reg_file->debug_data_addr);
3599 writel(0, &sdr_reg_file->cur_stage);
3600 writel(0, &sdr_reg_file->fom);
3601 writel(0, &sdr_reg_file->failing_stage);
3602 writel(0, &sdr_reg_file->debug1);
3603 writel(0, &sdr_reg_file->debug2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003604}
3605
Marek Vasut2ca151f2015-07-19 06:14:04 +02003606/**
3607 * initialize_hps_phy() - Initialize HPS PHY
3608 *
3609 * Initialize HPS PHY.
3610 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003611static void initialize_hps_phy(void)
3612{
Marek Vasut5ded7322015-08-02 19:42:26 +02003613 u32 reg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003614 /*
3615 * Tracking also gets configured here because it's in the
3616 * same register.
3617 */
Marek Vasut5ded7322015-08-02 19:42:26 +02003618 u32 trk_sample_count = 7500;
3619 u32 trk_long_idle_sample_count = (10 << 16) | 100;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003620 /*
3621 * Format is number of outer loops in the 16 MSB, sample
3622 * count in 16 LSB.
3623 */
3624
3625 reg = 0;
3626 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3627 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3629 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3630 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3631 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3632 /*
3633 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3634 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3635 */
3636 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3637 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3638 trk_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003639 writel(reg, &sdr_ctrl->phy_ctrl0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003640
3641 reg = 0;
3642 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3643 trk_sample_count >>
3644 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3645 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3646 trk_long_idle_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003647 writel(reg, &sdr_ctrl->phy_ctrl1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003648
3649 reg = 0;
3650 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3651 trk_long_idle_sample_count >>
3652 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003653 writel(reg, &sdr_ctrl->phy_ctrl2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003654}
3655
Marek Vasut880e46f2015-07-17 00:45:11 +02003656/**
3657 * initialize_tracking() - Initialize tracking
3658 *
3659 * Initialize the register file with usable initial data.
3660 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003661static void initialize_tracking(void)
3662{
Marek Vasut880e46f2015-07-17 00:45:11 +02003663 /*
3664 * Initialize the register file with the correct data.
3665 * Compute usable version of value in case we skip full
3666 * computation later.
3667 */
Marek Vasut139823e2015-08-02 19:47:01 +02003668 writel(DIV_ROUND_UP(iocfg->delay_per_opa_tap,
3669 iocfg->delay_per_dchain_tap) - 1,
Marek Vasut880e46f2015-07-17 00:45:11 +02003670 &sdr_reg_file->dtaps_per_ptap);
3671
3672 /* trk_sample_count */
3673 writel(7500, &sdr_reg_file->trk_sample_count);
3674
3675 /* longidle outer loop [15:0] */
3676 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003677
3678 /*
Marek Vasut880e46f2015-07-17 00:45:11 +02003679 * longidle sample count [31:24]
3680 * trfc, worst case of 933Mhz 4Gb [23:16]
3681 * trcd, worst case [15:8]
3682 * vfifo wait [7:0]
Dinh Nguyen3da42852015-06-02 22:52:49 -05003683 */
Marek Vasut880e46f2015-07-17 00:45:11 +02003684 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3685 &sdr_reg_file->delays);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003686
Marek Vasut880e46f2015-07-17 00:45:11 +02003687 /* mux delay */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003688 writel((rwcfg->idle << 24) | (rwcfg->activate_1 << 16) |
3689 (rwcfg->sgle_read << 8) | (rwcfg->precharge_all << 0),
Marek Vasut880e46f2015-07-17 00:45:11 +02003690 &sdr_reg_file->trk_rw_mgr_addr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003691
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003692 writel(rwcfg->mem_if_read_dqs_width,
Marek Vasut880e46f2015-07-17 00:45:11 +02003693 &sdr_reg_file->trk_read_dqs_width);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003694
Marek Vasut880e46f2015-07-17 00:45:11 +02003695 /* trefi [7:0] */
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003696 writel((rwcfg->refresh_all << 24) | (1000 << 0),
Marek Vasut880e46f2015-07-17 00:45:11 +02003697 &sdr_reg_file->trk_rfsh);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003698}
3699
3700int sdram_calibration_full(void)
3701{
3702 struct param_type my_param;
3703 struct gbl_type my_gbl;
Marek Vasut5ded7322015-08-02 19:42:26 +02003704 u32 pass;
Marek Vasut84e0b0c2015-07-17 01:05:36 +02003705
3706 memset(&my_param, 0, sizeof(my_param));
3707 memset(&my_gbl, 0, sizeof(my_gbl));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003708
3709 param = &my_param;
3710 gbl = &my_gbl;
3711
Marek Vasutd718a262015-08-02 18:12:08 +02003712 rwcfg = socfpga_get_sdram_rwmgr_config();
Marek Vasut10c14262015-08-02 19:00:23 +02003713 iocfg = socfpga_get_sdram_io_config();
Marek Vasut042ff2d2015-08-02 19:18:47 +02003714 misccfg = socfpga_get_sdram_misc_config();
Marek Vasutd718a262015-08-02 18:12:08 +02003715
Dinh Nguyen3da42852015-06-02 22:52:49 -05003716 /* Set the calibration enabled by default */
3717 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3718 /*
3719 * Only sweep all groups (regardless of fail state) by default
3720 * Set enabled read test by default.
3721 */
3722#if DISABLE_GUARANTEED_READ
3723 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3724#endif
3725 /* Initialize the register file */
3726 initialize_reg_file();
3727
3728 /* Initialize any PHY CSR */
3729 initialize_hps_phy();
3730
3731 scc_mgr_initialize();
3732
3733 initialize_tracking();
3734
Dinh Nguyen3da42852015-06-02 22:52:49 -05003735 printf("%s: Preparing to start memory calibration\n", __FILE__);
3736
3737 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut23f62b32015-07-13 01:05:27 +02003738 debug_cond(DLEVEL == 1,
3739 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003740 rwcfg->mem_number_of_ranks, rwcfg->mem_number_of_cs_per_dimm,
3741 rwcfg->mem_dq_per_read_dqs, rwcfg->mem_dq_per_write_dqs,
3742 rwcfg->mem_virtual_groups_per_read_dqs,
3743 rwcfg->mem_virtual_groups_per_write_dqs);
Marek Vasut23f62b32015-07-13 01:05:27 +02003744 debug_cond(DLEVEL == 1,
3745 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
Marek Vasut1fa0c8c2015-08-02 18:44:06 +02003746 rwcfg->mem_if_read_dqs_width, rwcfg->mem_if_write_dqs_width,
3747 rwcfg->mem_data_width, rwcfg->mem_data_mask_width,
Marek Vasut160695d2015-08-02 19:10:58 +02003748 iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap);
Marek Vasut23f62b32015-07-13 01:05:27 +02003749 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
Marek Vasut160695d2015-08-02 19:10:58 +02003750 iocfg->delay_per_dqs_en_dchain_tap, iocfg->dll_chain_length);
Marek Vasut139823e2015-08-02 19:47:01 +02003751 debug_cond(DLEVEL == 1,
3752 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
Marek Vasut160695d2015-08-02 19:10:58 +02003753 iocfg->dqs_en_phase_max, iocfg->dqdqs_out_phase_max,
3754 iocfg->dqs_en_delay_max, iocfg->dqs_in_delay_max);
Marek Vasut23f62b32015-07-13 01:05:27 +02003755 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
Marek Vasut160695d2015-08-02 19:10:58 +02003756 iocfg->io_in_delay_max, iocfg->io_out1_delay_max,
3757 iocfg->io_out2_delay_max);
Marek Vasut23f62b32015-07-13 01:05:27 +02003758 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
Marek Vasut160695d2015-08-02 19:10:58 +02003759 iocfg->dqs_in_reserve, iocfg->dqs_out_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003760
3761 hc_initialize_rom_data();
3762
3763 /* update info for sims */
3764 reg_file_set_stage(CAL_STAGE_NIL);
3765 reg_file_set_group(0);
3766
3767 /*
3768 * Load global needed for those actions that require
3769 * some dynamic calibration support.
3770 */
3771 dyn_calib_steps = STATIC_CALIB_STEPS;
3772 /*
3773 * Load global to allow dynamic selection of delay loop settings
3774 * based on calibration mode.
3775 */
3776 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3777 skip_delay_mask = 0xff;
3778 else
3779 skip_delay_mask = 0x0;
3780
3781 pass = run_mem_calibrate();
Marek Vasut23a040c2015-07-17 01:20:21 +02003782 debug_mem_calibrate(pass);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003783 return pass;
3784}