blob: 970e34aaff4e5ca689e5f6c68a6f30df7567971b [file] [log] [blame]
Aaron Williamsb8806c42020-12-11 17:06:04 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Marvell International Ltd.
4 *
5 * Helper utilities for qlm.
6 */
7
8#include <log.h>
9#include <time.h>
10#include <asm/global_data.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-helper.h>
21#include <mach/cvmx-helper-util.h>
22#include <mach/cvmx-bgxx-defs.h>
23#include <mach/cvmx-ciu-defs.h>
24#include <mach/cvmx-gmxx-defs.h>
25#include <mach/cvmx-gserx-defs.h>
26#include <mach/cvmx-mio-defs.h>
27#include <mach/cvmx-pciercx-defs.h>
28#include <mach/cvmx-pemx-defs.h>
29#include <mach/cvmx-pexp-defs.h>
30#include <mach/cvmx-rst-defs.h>
31#include <mach/cvmx-sata-defs.h>
32#include <mach/cvmx-sli-defs.h>
33#include <mach/cvmx-sriomaintx-defs.h>
34#include <mach/cvmx-sriox-defs.h>
35
36#include <mach/cvmx-helper.h>
37#include <mach/cvmx-helper-jtag.h>
38
39DECLARE_GLOBAL_DATA_PTR;
40
41/*
42 * Their is a copy of this in bootloader qlm configuration, make sure
43 * to update both the places till i figure out
44 */
45#define R_25G_REFCLK100 0x0
46#define R_5G_REFCLK100 0x1
47#define R_8G_REFCLK100 0x2
48#define R_125G_REFCLK15625_KX 0x3
49#define R_3125G_REFCLK15625_XAUI 0x4
50#define R_103125G_REFCLK15625_KR 0x5
51#define R_125G_REFCLK15625_SGMII 0x6
52#define R_5G_REFCLK15625_QSGMII 0x7
53#define R_625G_REFCLK15625_RXAUI 0x8
54#define R_25G_REFCLK125 0x9
55#define R_5G_REFCLK125 0xa
56#define R_8G_REFCLK125 0xb
57
58static const int REF_100MHZ = 100000000;
59static const int REF_125MHZ = 125000000;
60static const int REF_156MHZ = 156250000;
61
62static qlm_jtag_uint32_t *__cvmx_qlm_jtag_xor_ref;
63
64/**
65 * Return the number of QLMs supported by the chip
66 *
67 * @return Number of QLMs
68 */
69int cvmx_qlm_get_num(void)
70{
71 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
72 return 5;
73 else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
74 return 3;
75 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
76 return 3;
77 else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
78 return 3;
79 else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
80 return 2;
81 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
82 return 8;
83 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
84 return 7;
85 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
86 return 9;
87 return 0;
88}
89
90/**
91 * Return the qlm number based on the interface
92 *
93 * @param xiface interface to look up
94 *
95 * @return the qlm number based on the xiface
96 */
97int cvmx_qlm_interface(int xiface)
98{
99 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
100
101 if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
102 return (xi.interface == 0) ? 2 : 0;
103 } else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)) {
104 return 2 - xi.interface;
105 } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
106 if (xi.interface == 0)
107 return 0;
108
109 debug("Warning: %s: Invalid interface %d\n",
110 __func__, xi.interface);
111 } else if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
112 debug("Warning: not supported\n");
113 return -1;
114 }
115
116 /* Must be cn68XX */
117 switch (xi.interface) {
118 case 1:
119 return 0;
120 default:
121 return xi.interface;
122 }
123
124 return -1;
125}
126
127/**
128 * Return the qlm number based for a port in the interface
129 *
130 * @param xiface interface to look up
131 * @param index index in an interface
132 *
133 * @return the qlm number based on the xiface
134 */
135int cvmx_qlm_lmac(int xiface, int index)
136{
137 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
138
139 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
140 cvmx_bgxx_cmr_global_config_t gconfig;
141 cvmx_gserx_phy_ctl_t phy_ctl;
142 cvmx_gserx_cfg_t gserx_cfg;
143 int qlm;
144
145 if (xi.interface < 6) {
146 if (xi.interface < 2) {
147 gconfig.u64 =
148 csr_rd_node(xi.node,
149 CVMX_BGXX_CMR_GLOBAL_CONFIG(xi.interface));
150 if (gconfig.s.pmux_sds_sel)
151 qlm = xi.interface + 2; /* QLM 2 or 3 */
152 else
153 qlm = xi.interface; /* QLM 0 or 1 */
154 } else {
155 qlm = xi.interface + 2; /* QLM 4-7 */
156 }
157
158 /* make sure the QLM is powered up and out of reset */
159 phy_ctl.u64 = csr_rd_node(xi.node, CVMX_GSERX_PHY_CTL(qlm));
160 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
161 return -1;
162 gserx_cfg.u64 = csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm));
163 if (gserx_cfg.s.bgx)
164 return qlm;
165 else
166 return -1;
167 } else if (xi.interface <= 7) { /* ILK */
168 int qlm;
169
170 for (qlm = 4; qlm < 8; qlm++) {
171 /* Make sure the QLM is powered and out of reset */
172 phy_ctl.u64 = csr_rd_node(xi.node, CVMX_GSERX_PHY_CTL(qlm));
173 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
174 continue;
175 /* Make sure the QLM is in ILK mode */
176 gserx_cfg.u64 = csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm));
177 if (gserx_cfg.s.ila)
178 return qlm;
179 }
180 }
181 return -1;
182 } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
183 cvmx_gserx_phy_ctl_t phy_ctl;
184 cvmx_gserx_cfg_t gserx_cfg;
185 int qlm;
186
187 /* (interface)0->QLM2, 1->QLM3, 2->DLM5/3->DLM6 */
188 if (xi.interface < 2) {
189 qlm = xi.interface + 2; /* (0,1)->ret(2,3) */
190
191 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
192 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
193 return -1;
194
195 gserx_cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
196 if (gserx_cfg.s.bgx)
197 return qlm;
198 else
199 return -1;
200 } else if (xi.interface == 2) {
201 cvmx_gserx_cfg_t g1, g2;
202
203 g1.u64 = csr_rd(CVMX_GSERX_CFG(5));
204 g2.u64 = csr_rd(CVMX_GSERX_CFG(6));
205 /* Check if both QLM5 & QLM6 are BGX2 */
206 if (g2.s.bgx) {
207 if (g1.s.bgx) {
208 cvmx_gserx_phy_ctl_t phy_ctl1;
209
210 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(5));
211 phy_ctl1.u64 = csr_rd(CVMX_GSERX_PHY_CTL(6));
212 if ((phy_ctl.s.phy_pd || phy_ctl.s.phy_reset) &&
213 (phy_ctl1.s.phy_pd || phy_ctl1.s.phy_reset))
214 return -1;
215 if (index >= 2)
216 return 6;
217 return 5;
218 } else { /* QLM6 is BGX2 */
219 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(6));
220 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
221 return -1;
222 return 6;
223 }
224 } else if (g1.s.bgx) {
225 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(5));
226 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
227 return -1;
228 return 5;
229 }
230 }
231 return -1;
232 } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
233 cvmx_gserx_phy_ctl_t phy_ctl;
234 cvmx_gserx_cfg_t gserx_cfg;
235 int qlm;
236
237 if (xi.interface == 0) {
238 cvmx_gserx_cfg_t g1, g2;
239
240 g1.u64 = csr_rd(CVMX_GSERX_CFG(4));
241 g2.u64 = csr_rd(CVMX_GSERX_CFG(5));
242 /* Check if both QLM4 & QLM5 are BGX0 */
243 if (g2.s.bgx) {
244 if (g1.s.bgx) {
245 cvmx_gserx_phy_ctl_t phy_ctl1;
246
247 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(4));
248 phy_ctl1.u64 = csr_rd(CVMX_GSERX_PHY_CTL(5));
249 if ((phy_ctl.s.phy_pd || phy_ctl.s.phy_reset) &&
250 (phy_ctl1.s.phy_pd || phy_ctl1.s.phy_reset))
251 return -1;
252 if (index >= 2)
253 return 5;
254 return 4;
255 }
256
257 /* QLM5 is BGX0 */
258 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(5));
259 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
260 return -1;
261 return 5;
262 } else if (g1.s.bgx) {
263 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(4));
264 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
265 return -1;
266 return 4;
267 }
268 } else if (xi.interface < 2) {
269 qlm = (xi.interface == 1) ? 2 : 3;
270 gserx_cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
271 if (gserx_cfg.s.srio)
272 return qlm;
273 }
274 return -1;
275 }
276 return -1;
277}
278
279/**
280 * Return if only DLM5/DLM6/DLM5+DLM6 is used by BGX
281 *
282 * @param BGX BGX to search for.
283 *
284 * @return muxes used 0 = DLM5+DLM6, 1 = DLM5, 2 = DLM6.
285 */
286int cvmx_qlm_mux_interface(int bgx)
287{
288 int mux = 0;
289 cvmx_gserx_cfg_t gser1, gser2;
290 int qlm1, qlm2;
291
292 if (OCTEON_IS_MODEL(OCTEON_CN73XX) && bgx != 2)
293 return -1;
294 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX) && bgx != 0)
295 return -1;
296
297 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
298 qlm1 = 5;
299 qlm2 = 6;
300 } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
301 qlm1 = 4;
302 qlm2 = 5;
303 } else {
304 return -1;
305 }
306
307 gser1.u64 = csr_rd(CVMX_GSERX_CFG(qlm1));
308 gser2.u64 = csr_rd(CVMX_GSERX_CFG(qlm2));
309
310 if (gser1.s.bgx && gser2.s.bgx)
311 mux = 0;
312 else if (gser1.s.bgx)
313 mux = 1; // BGX2 is using DLM5 only
314 else if (gser2.s.bgx)
315 mux = 2; // BGX2 is using DLM6 only
316
317 return mux;
318}
319
320/**
321 * Return number of lanes for a given qlm
322 *
323 * @param qlm QLM to examine
324 *
325 * @return Number of lanes
326 */
327int cvmx_qlm_get_lanes(int qlm)
328{
329 if (OCTEON_IS_MODEL(OCTEON_CN61XX) && qlm == 1)
330 return 2;
331 else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
332 return 2;
333 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
334 return (qlm < 4) ? 4 /*QLM0,1,2,3*/ : 2 /*DLM4,5,6*/;
335 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
336 return (qlm == 2 || qlm == 3) ? 4 /*QLM2,3*/ : 2 /*DLM0,1,4,5*/;
337 return 4;
338}
339
340/**
341 * Get the QLM JTAG fields based on Octeon model on the supported chips.
342 *
343 * @return qlm_jtag_field_t structure
344 */
345const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void)
346{
347 /* Figure out which JTAG chain description we're using */
348 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
349 return __cvmx_qlm_jtag_field_cn68xx;
350 } else if (OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) ||
351 OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
352 return __cvmx_qlm_jtag_field_cn66xx;
353 } else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
354 return __cvmx_qlm_jtag_field_cn63xx;
355 }
356
357 return NULL;
358}
359
360/**
361 * Get the QLM JTAG length by going through qlm_jtag_field for each
362 * Octeon model that is supported
363 *
364 * @return return the length.
365 */
366int cvmx_qlm_jtag_get_length(void)
367{
368 const __cvmx_qlm_jtag_field_t *qlm_ptr = cvmx_qlm_jtag_get_field();
369 int length = 0;
370
371 /* Figure out how many bits are in the JTAG chain */
372 while (qlm_ptr && qlm_ptr->name) {
373 if (qlm_ptr->stop_bit > length)
374 length = qlm_ptr->stop_bit + 1;
375 qlm_ptr++;
376 }
377 return length;
378}
379
380/**
381 * Initialize the QLM layer
382 */
383void cvmx_qlm_init(void)
384{
385 if (OCTEON_IS_OCTEON3())
386 return;
387
388 /* ToDo: No support for non-Octeon 3 yet */
389 printf("Please add support for unsupported Octeon SoC\n");
390}
391
392/**
393 * Lookup the bit information for a JTAG field name
394 *
395 * @param name Name to lookup
396 *
397 * @return Field info, or NULL on failure
398 */
399static const __cvmx_qlm_jtag_field_t *__cvmx_qlm_lookup_field(const char *name)
400{
401 const __cvmx_qlm_jtag_field_t *ptr = cvmx_qlm_jtag_get_field();
402
403 while (ptr->name) {
404 if (strcmp(name, ptr->name) == 0)
405 return ptr;
406 ptr++;
407 }
408
409 debug("%s: Illegal field name %s\n", __func__, name);
410 return NULL;
411}
412
413/**
414 * Get a field in a QLM JTAG chain
415 *
416 * @param qlm QLM to get
417 * @param lane Lane in QLM to get
418 * @param name String name of field
419 *
420 * @return JTAG field value
421 */
422uint64_t cvmx_qlm_jtag_get(int qlm, int lane, const char *name)
423{
424 const __cvmx_qlm_jtag_field_t *field = __cvmx_qlm_lookup_field(name);
425 int qlm_jtag_length = cvmx_qlm_jtag_get_length();
426 int num_lanes = cvmx_qlm_get_lanes(qlm);
427
428 if (!field)
429 return 0;
430
431 /* Capture the current settings */
432 cvmx_helper_qlm_jtag_capture(qlm);
433 /*
434 * Shift past lanes we don't care about. CN6XXX/7XXX shifts lane 0 first,
435 * CN3XXX/5XXX shifts lane 3 first
436 */
437 /* Shift to the start of the field */
438 cvmx_helper_qlm_jtag_shift_zeros(qlm,
439 qlm_jtag_length * (num_lanes - 1 - lane));
440 cvmx_helper_qlm_jtag_shift_zeros(qlm, field->start_bit);
441 /* Shift out the value and return it */
442 return cvmx_helper_qlm_jtag_shift(qlm, field->stop_bit - field->start_bit + 1, 0);
443}
444
445/**
446 * Set a field in a QLM JTAG chain
447 *
448 * @param qlm QLM to set
449 * @param lane Lane in QLM to set, or -1 for all lanes
450 * @param name String name of field
451 * @param value Value of the field
452 */
453void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, uint64_t value)
454{
455 int i, l;
456 u32 shift_values[CVMX_QLM_JTAG_UINT32];
457 int num_lanes = cvmx_qlm_get_lanes(qlm);
458 const __cvmx_qlm_jtag_field_t *field = __cvmx_qlm_lookup_field(name);
459 int qlm_jtag_length = cvmx_qlm_jtag_get_length();
460 int total_length = qlm_jtag_length * num_lanes;
461 int bits = 0;
462
463 if (!field)
464 return;
465
466 /* Get the current state */
467 cvmx_helper_qlm_jtag_capture(qlm);
468 for (i = 0; i < CVMX_QLM_JTAG_UINT32; i++)
469 shift_values[i] = cvmx_helper_qlm_jtag_shift(qlm, 32, 0);
470
471 /* Put new data in our local array */
472 for (l = 0; l < num_lanes; l++) {
473 u64 new_value = value;
474 int bits;
475 int adj_lanes;
476
477 if (l != lane && lane != -1)
478 continue;
479
480 adj_lanes = (num_lanes - 1 - l) * qlm_jtag_length;
481
482 for (bits = field->start_bit + adj_lanes; bits <= field->stop_bit + adj_lanes;
483 bits++) {
484 if (new_value & 1)
485 shift_values[bits / 32] |= 1 << (bits & 31);
486 else
487 shift_values[bits / 32] &= ~(1 << (bits & 31));
488 new_value >>= 1;
489 }
490 }
491
492 /* Shift out data and xor with reference */
493 while (bits < total_length) {
494 u32 shift = shift_values[bits / 32] ^ __cvmx_qlm_jtag_xor_ref[qlm][bits / 32];
495 int width = total_length - bits;
496
497 if (width > 32)
498 width = 32;
499 cvmx_helper_qlm_jtag_shift(qlm, width, shift);
500 bits += 32;
501 }
502
503 /* Update the new data */
504 cvmx_helper_qlm_jtag_update(qlm);
505
506 /*
507 * Always give the QLM 1ms to settle after every update. This may not
508 * always be needed, but some of the options make significant
509 * electrical changes
510 */
511 udelay(1000);
512}
513
514/**
515 * Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
516 * CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
517 * JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
518 */
519void __cvmx_qlm_speed_tweak(void)
520{
521 cvmx_mio_qlmx_cfg_t qlm_cfg;
522 int num_qlms = cvmx_qlm_get_num();
523 int qlm;
524
525 /* Workaround for Errata (G-16467) */
526 if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
527 for (qlm = 0; qlm < num_qlms; qlm++) {
528 int ir50dac;
529
530 /*
531 * This workaround only applies to QLMs running at
532 * 6.25Ghz
533 */
534 if (cvmx_qlm_get_gbaud_mhz(qlm) == 6250) {
535#ifdef CVMX_QLM_DUMP_STATE
536 debug("%s:%d: QLM%d: Applying workaround for Errata G-16467\n",
537 __func__, __LINE__, qlm);
538 cvmx_qlm_display_registers(qlm);
539 debug("\n");
540#endif
541 cvmx_qlm_jtag_set(qlm, -1, "cfg_cdr_trunc", 0);
542 /* Hold the QLM in reset */
543 cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_set", 0);
544 cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 1);
545 /* Forcfe TX to be idle */
546 cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_clr", 0);
547 cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 1);
548 if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_0)) {
549 ir50dac = cvmx_qlm_jtag_get(qlm, 0, "ir50dac");
550 while (++ir50dac <= 31)
551 cvmx_qlm_jtag_set(qlm, -1, "ir50dac", ir50dac);
552 }
553 cvmx_qlm_jtag_set(qlm, -1, "div4_byp", 0);
554 cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 16);
555 cvmx_qlm_jtag_set(qlm, -1, "serdes_pll_byp", 1);
556 cvmx_qlm_jtag_set(qlm, -1, "spdsel_byp", 1);
557#ifdef CVMX_QLM_DUMP_STATE
558 debug("%s:%d: QLM%d: Done applying workaround for Errata G-16467\n",
559 __func__, __LINE__, qlm);
560 cvmx_qlm_display_registers(qlm);
561 debug("\n\n");
562#endif
563 /*
564 * The QLM will be taken out of reset later
565 * when ILK/XAUI are initialized.
566 */
567 }
568 }
569 } else if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) ||
570 OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)) {
571 /* Loop through the QLMs */
572 for (qlm = 0; qlm < num_qlms; qlm++) {
573 /* Read the QLM speed */
574 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
575
576 /* If the QLM is at 6.25Ghz or 5Ghz then program JTAG */
577 if (qlm_cfg.s.qlm_spd == 5 || qlm_cfg.s.qlm_spd == 12 ||
578 qlm_cfg.s.qlm_spd == 0 || qlm_cfg.s.qlm_spd == 6 ||
579 qlm_cfg.s.qlm_spd == 11) {
580 cvmx_qlm_jtag_set(qlm, -1, "rx_cap_gen2", 0x1);
581 cvmx_qlm_jtag_set(qlm, -1, "rx_eq_gen2", 0x8);
582 }
583 }
584 }
585}
586
587/**
588 * Errata G-16174: QLM Gen2 PCIe IDLE DAC change.
589 * CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak.
590 * This function tweaks the JTAG setting for a QLMs for PCIe to run better.
591 */
592void __cvmx_qlm_pcie_idle_dac_tweak(void)
593{
594 int num_qlms = 0;
595 int qlm;
596
597 if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X))
598 num_qlms = 5;
599 else if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X))
600 num_qlms = 3;
601 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
602 num_qlms = 3;
603 else
604 return;
605
606 /* Loop through the QLMs */
607 for (qlm = 0; qlm < num_qlms; qlm++)
608 cvmx_qlm_jtag_set(qlm, -1, "idle_dac", 0x2);
609}
610
611void __cvmx_qlm_pcie_cfg_rxd_set_tweak(int qlm, int lane)
612{
613 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
614 cvmx_qlm_jtag_set(qlm, lane, "cfg_rxd_set", 0x1);
615}
616
617/**
618 * Get the speed (Gbaud) of the QLM in Mhz for a given node.
619 *
620 * @param node node of the QLM
621 * @param qlm QLM to examine
622 *
623 * @return Speed in Mhz
624 */
625int cvmx_qlm_get_gbaud_mhz_node(int node, int qlm)
626{
627 cvmx_gserx_lane_mode_t lane_mode;
628 cvmx_gserx_cfg_t cfg;
629
630 if (!octeon_has_feature(OCTEON_FEATURE_MULTINODE))
631 return 0;
632
633 if (qlm >= 8)
634 return -1; /* FIXME for OCI */
635 /* Check if QLM is configured */
636 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
637 if (cfg.u64 == 0)
638 return -1;
639 if (cfg.s.pcie) {
640 int pem = 0;
641 cvmx_pemx_cfg_t pemx_cfg;
642
643 switch (qlm) {
644 case 0: /* Either PEM0 x4 of PEM0 x8 */
645 pem = 0;
646 break;
647 case 1: /* Either PEM0 x4 of PEM1 x4 */
648 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
649 if (pemx_cfg.cn78xx.lanes8)
650 pem = 0;
651 else
652 pem = 1;
653 break;
654 case 2: /* Either PEM2 x4 of PEM2 x8 */
655 pem = 2;
656 break;
657 case 3: /* Either PEM2 x8 of PEM3 x4 or x8 */
658 /* Can be last 4 lanes of PEM2 */
659 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
660 if (pemx_cfg.cn78xx.lanes8) {
661 pem = 2;
662 } else {
663 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
664 if (pemx_cfg.cn78xx.lanes8)
665 pem = 3;
666 else
667 pem = 2;
668 }
669 break;
670 case 4: /* Either PEM3 x8 of PEM3 x4 */
671 pem = 3;
672 break;
673 default:
674 debug("QLM%d: Should be in PCIe mode\n", qlm);
675 break;
676 }
677 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
678 switch (pemx_cfg.s.md) {
679 case 0: /* Gen1 */
680 return 2500;
681 case 1: /* Gen2 */
682 return 5000;
683 case 2: /* Gen3 */
684 return 8000;
685 default:
686 return 0;
687 }
688 } else {
689 lane_mode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
690 switch (lane_mode.s.lmode) {
691 case R_25G_REFCLK100:
692 return 2500;
693 case R_5G_REFCLK100:
694 return 5000;
695 case R_8G_REFCLK100:
696 return 8000;
697 case R_125G_REFCLK15625_KX:
698 return 1250;
699 case R_3125G_REFCLK15625_XAUI:
700 return 3125;
701 case R_103125G_REFCLK15625_KR:
702 return 10312;
703 case R_125G_REFCLK15625_SGMII:
704 return 1250;
705 case R_5G_REFCLK15625_QSGMII:
706 return 5000;
707 case R_625G_REFCLK15625_RXAUI:
708 return 6250;
709 case R_25G_REFCLK125:
710 return 2500;
711 case R_5G_REFCLK125:
712 return 5000;
713 case R_8G_REFCLK125:
714 return 8000;
715 default:
716 return 0;
717 }
718 }
719}
720
721/**
722 * Get the speed (Gbaud) of the QLM in Mhz.
723 *
724 * @param qlm QLM to examine
725 *
726 * @return Speed in Mhz
727 */
728int cvmx_qlm_get_gbaud_mhz(int qlm)
729{
730 if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
731 if (qlm == 2) {
732 cvmx_gmxx_inf_mode_t inf_mode;
733
734 inf_mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
735 switch (inf_mode.s.speed) {
736 case 0:
737 return 5000; /* 5 Gbaud */
738 case 1:
739 return 2500; /* 2.5 Gbaud */
740 case 2:
741 return 2500; /* 2.5 Gbaud */
742 case 3:
743 return 1250; /* 1.25 Gbaud */
744 case 4:
745 return 1250; /* 1.25 Gbaud */
746 case 5:
747 return 6250; /* 6.25 Gbaud */
748 case 6:
749 return 5000; /* 5 Gbaud */
750 case 7:
751 return 2500; /* 2.5 Gbaud */
752 case 8:
753 return 3125; /* 3.125 Gbaud */
754 case 9:
755 return 2500; /* 2.5 Gbaud */
756 case 10:
757 return 1250; /* 1.25 Gbaud */
758 case 11:
759 return 5000; /* 5 Gbaud */
760 case 12:
761 return 6250; /* 6.25 Gbaud */
762 case 13:
763 return 3750; /* 3.75 Gbaud */
764 case 14:
765 return 3125; /* 3.125 Gbaud */
766 default:
767 return 0; /* Disabled */
768 }
769 } else {
770 cvmx_sriox_status_reg_t status_reg;
771
772 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(qlm));
773 if (status_reg.s.srio) {
774 cvmx_sriomaintx_port_0_ctl2_t sriomaintx_port_0_ctl2;
775
776 sriomaintx_port_0_ctl2.u32 =
777 csr_rd(CVMX_SRIOMAINTX_PORT_0_CTL2(qlm));
778 switch (sriomaintx_port_0_ctl2.s.sel_baud) {
779 case 1:
780 return 1250; /* 1.25 Gbaud */
781 case 2:
782 return 2500; /* 2.5 Gbaud */
783 case 3:
784 return 3125; /* 3.125 Gbaud */
785 case 4:
786 return 5000; /* 5 Gbaud */
787 case 5:
788 return 6250; /* 6.250 Gbaud */
789 default:
790 return 0; /* Disabled */
791 }
792 } else {
793 cvmx_pciercx_cfg032_t pciercx_cfg032;
794
795 pciercx_cfg032.u32 = csr_rd(CVMX_PCIERCX_CFG032(qlm));
796 switch (pciercx_cfg032.s.ls) {
797 case 1:
798 return 2500;
799 case 2:
800 return 5000;
801 case 4:
802 return 8000;
803 default: {
804 cvmx_mio_rst_boot_t mio_rst_boot;
805
806 mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
807 if (qlm == 0 && mio_rst_boot.s.qlm0_spd == 0xf)
808 return 0;
809
810 if (qlm == 1 && mio_rst_boot.s.qlm1_spd == 0xf)
811 return 0;
812
813 /* Best guess I can make */
814 return 5000;
815 }
816 }
817 }
818 }
819 } else if (OCTEON_IS_OCTEON2()) {
820 cvmx_mio_qlmx_cfg_t qlm_cfg;
821
822 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
823 switch (qlm_cfg.s.qlm_spd) {
824 case 0:
825 return 5000; /* 5 Gbaud */
826 case 1:
827 return 2500; /* 2.5 Gbaud */
828 case 2:
829 return 2500; /* 2.5 Gbaud */
830 case 3:
831 return 1250; /* 1.25 Gbaud */
832 case 4:
833 return 1250; /* 1.25 Gbaud */
834 case 5:
835 return 6250; /* 6.25 Gbaud */
836 case 6:
837 return 5000; /* 5 Gbaud */
838 case 7:
839 return 2500; /* 2.5 Gbaud */
840 case 8:
841 return 3125; /* 3.125 Gbaud */
842 case 9:
843 return 2500; /* 2.5 Gbaud */
844 case 10:
845 return 1250; /* 1.25 Gbaud */
846 case 11:
847 return 5000; /* 5 Gbaud */
848 case 12:
849 return 6250; /* 6.25 Gbaud */
850 case 13:
851 return 3750; /* 3.75 Gbaud */
852 case 14:
853 return 3125; /* 3.125 Gbaud */
854 default:
855 return 0; /* Disabled */
856 }
857 } else if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
858 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
859 u64 meas_refclock;
860 u64 freq;
861
862 /* Measure the reference clock */
863 meas_refclock = cvmx_qlm_measure_clock(qlm);
864 /* Multiply to get the final frequency */
865 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
866 freq = meas_refclock * mpll_multiplier.s.mpll_multiplier;
867 freq = (freq + 500000) / 1000000;
868
869 return freq;
870 } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
871 return cvmx_qlm_get_gbaud_mhz_node(cvmx_get_node_num(), qlm);
872 } else if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
873 cvmx_gserx_lane_mode_t lane_mode;
874
875 lane_mode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
876 switch (lane_mode.s.lmode) {
877 case R_25G_REFCLK100:
878 return 2500;
879 case R_5G_REFCLK100:
880 return 5000;
881 case R_8G_REFCLK100:
882 return 8000;
883 case R_125G_REFCLK15625_KX:
884 return 1250;
885 case R_3125G_REFCLK15625_XAUI:
886 return 3125;
887 case R_103125G_REFCLK15625_KR:
888 return 10312;
889 case R_125G_REFCLK15625_SGMII:
890 return 1250;
891 case R_5G_REFCLK15625_QSGMII:
892 return 5000;
893 case R_625G_REFCLK15625_RXAUI:
894 return 6250;
895 case R_25G_REFCLK125:
896 return 2500;
897 case R_5G_REFCLK125:
898 return 5000;
899 case R_8G_REFCLK125:
900 return 8000;
901 default:
902 return 0;
903 }
904 }
905 return 0;
906}
907
908static enum cvmx_qlm_mode __cvmx_qlm_get_mode_cn70xx(int qlm)
909{
910 switch (qlm) {
911 case 0: /* DLM0/DLM1 - SGMII/QSGMII/RXAUI */
912 {
913 union cvmx_gmxx_inf_mode inf_mode0, inf_mode1;
914
915 inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
916 inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
917
918 /* SGMII0 SGMII1 */
919 switch (inf_mode0.s.mode) {
920 case CVMX_GMX_INF_MODE_SGMII:
921 switch (inf_mode1.s.mode) {
922 case CVMX_GMX_INF_MODE_SGMII:
923 return CVMX_QLM_MODE_SGMII_SGMII;
924 case CVMX_GMX_INF_MODE_QSGMII:
925 return CVMX_QLM_MODE_SGMII_QSGMII;
926 default:
927 return CVMX_QLM_MODE_SGMII_DISABLED;
928 }
929 case CVMX_GMX_INF_MODE_QSGMII:
930 switch (inf_mode1.s.mode) {
931 case CVMX_GMX_INF_MODE_SGMII:
932 return CVMX_QLM_MODE_QSGMII_SGMII;
933 case CVMX_GMX_INF_MODE_QSGMII:
934 return CVMX_QLM_MODE_QSGMII_QSGMII;
935 default:
936 return CVMX_QLM_MODE_QSGMII_DISABLED;
937 }
938 case CVMX_GMX_INF_MODE_RXAUI:
939 return CVMX_QLM_MODE_RXAUI_1X2;
940 default:
941 switch (inf_mode1.s.mode) {
942 case CVMX_GMX_INF_MODE_SGMII:
943 return CVMX_QLM_MODE_DISABLED_SGMII;
944 case CVMX_GMX_INF_MODE_QSGMII:
945 return CVMX_QLM_MODE_DISABLED_QSGMII;
946 default:
947 return CVMX_QLM_MODE_DISABLED;
948 }
949 }
950 }
951 case 1: /* Sata / pem0 */
952 {
953 union cvmx_gserx_sata_cfg sata_cfg;
954 union cvmx_pemx_cfg pem0_cfg;
955
956 sata_cfg.u64 = csr_rd(CVMX_GSERX_SATA_CFG(0));
957 pem0_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
958
959 switch (pem0_cfg.cn70xx.md) {
960 case CVMX_PEM_MD_GEN2_2LANE:
961 case CVMX_PEM_MD_GEN1_2LANE:
962 return CVMX_QLM_MODE_PCIE_1X2;
963 case CVMX_PEM_MD_GEN2_1LANE:
964 case CVMX_PEM_MD_GEN1_1LANE:
965 if (sata_cfg.s.sata_en)
966 /* Both PEM0 and PEM1 */
967 return CVMX_QLM_MODE_PCIE_2X1;
968
969 /* Only PEM0 */
970 return CVMX_QLM_MODE_PCIE_1X1;
971 case CVMX_PEM_MD_GEN2_4LANE:
972 case CVMX_PEM_MD_GEN1_4LANE:
973 return CVMX_QLM_MODE_PCIE;
974 default:
975 return CVMX_QLM_MODE_DISABLED;
976 }
977 }
978 case 2: {
979 union cvmx_gserx_sata_cfg sata_cfg;
980 union cvmx_pemx_cfg pem0_cfg, pem1_cfg, pem2_cfg;
981
982 sata_cfg.u64 = csr_rd(CVMX_GSERX_SATA_CFG(0));
983 pem0_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
984 pem1_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
985 pem2_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
986
987 if (sata_cfg.s.sata_en)
988 return CVMX_QLM_MODE_SATA_2X1;
989 if (pem0_cfg.cn70xx.md == CVMX_PEM_MD_GEN2_4LANE ||
990 pem0_cfg.cn70xx.md == CVMX_PEM_MD_GEN1_4LANE)
991 return CVMX_QLM_MODE_PCIE;
992 if (pem1_cfg.cn70xx.md == CVMX_PEM_MD_GEN2_2LANE ||
993 pem1_cfg.cn70xx.md == CVMX_PEM_MD_GEN1_2LANE) {
994 return CVMX_QLM_MODE_PCIE_1X2;
995 }
996 if (pem1_cfg.cn70xx.md == CVMX_PEM_MD_GEN2_1LANE ||
997 pem1_cfg.cn70xx.md == CVMX_PEM_MD_GEN1_1LANE) {
998 if (pem2_cfg.cn70xx.md == CVMX_PEM_MD_GEN2_1LANE ||
999 pem2_cfg.cn70xx.md == CVMX_PEM_MD_GEN1_1LANE) {
1000 return CVMX_QLM_MODE_PCIE_2X1;
1001 } else {
1002 return CVMX_QLM_MODE_PCIE_1X1;
1003 }
1004 }
1005 if (pem2_cfg.cn70xx.md == CVMX_PEM_MD_GEN2_1LANE ||
1006 pem2_cfg.cn70xx.md == CVMX_PEM_MD_GEN1_1LANE)
1007 return CVMX_QLM_MODE_PCIE_2X1;
1008 return CVMX_QLM_MODE_DISABLED;
1009 }
1010 default:
1011 return CVMX_QLM_MODE_DISABLED;
1012 }
1013
1014 return CVMX_QLM_MODE_DISABLED;
1015}
1016
1017/*
1018 * Get the DLM mode for the interface based on the interface type.
1019 *
1020 * @param interface_type 0 - SGMII/QSGMII/RXAUI interface
1021 * 1 - PCIe
1022 * 2 - SATA
1023 * @param interface interface to use
1024 * @return the qlm mode the interface is
1025 */
1026enum cvmx_qlm_mode cvmx_qlm_get_dlm_mode(int interface_type, int interface)
1027{
1028 switch (interface_type) {
1029 case 0: /* SGMII/QSGMII/RXAUI */
1030 {
1031 enum cvmx_qlm_mode qlm_mode = __cvmx_qlm_get_mode_cn70xx(0);
1032
1033 switch (interface) {
1034 case 0:
1035 switch (qlm_mode) {
1036 case CVMX_QLM_MODE_SGMII_SGMII:
1037 case CVMX_QLM_MODE_SGMII_DISABLED:
1038 case CVMX_QLM_MODE_SGMII_QSGMII:
1039 return CVMX_QLM_MODE_SGMII;
1040 case CVMX_QLM_MODE_QSGMII_QSGMII:
1041 case CVMX_QLM_MODE_QSGMII_DISABLED:
1042 case CVMX_QLM_MODE_QSGMII_SGMII:
1043 return CVMX_QLM_MODE_QSGMII;
1044 case CVMX_QLM_MODE_RXAUI_1X2:
1045 return CVMX_QLM_MODE_RXAUI;
1046 default:
1047 return CVMX_QLM_MODE_DISABLED;
1048 }
1049 case 1:
1050 switch (qlm_mode) {
1051 case CVMX_QLM_MODE_SGMII_SGMII:
1052 case CVMX_QLM_MODE_DISABLED_SGMII:
1053 case CVMX_QLM_MODE_QSGMII_SGMII:
1054 return CVMX_QLM_MODE_SGMII;
1055 case CVMX_QLM_MODE_QSGMII_QSGMII:
1056 case CVMX_QLM_MODE_DISABLED_QSGMII:
1057 case CVMX_QLM_MODE_SGMII_QSGMII:
1058 return CVMX_QLM_MODE_QSGMII;
1059 default:
1060 return CVMX_QLM_MODE_DISABLED;
1061 }
1062 default:
1063 return qlm_mode;
1064 }
1065 }
1066 case 1: /* PCIe */
1067 {
1068 enum cvmx_qlm_mode qlm_mode1 = __cvmx_qlm_get_mode_cn70xx(1);
1069 enum cvmx_qlm_mode qlm_mode2 = __cvmx_qlm_get_mode_cn70xx(2);
1070
1071 switch (interface) {
1072 case 0: /* PCIe0 can be DLM1 with 1, 2 or 4 lanes */
1073 return qlm_mode1;
1074 case 1:
1075 /*
1076 * PCIe1 can be in DLM1 1 lane(1), DLM2 1 lane(0)
1077 * or 2 lanes(0-1)
1078 */
1079 if (qlm_mode1 == CVMX_QLM_MODE_PCIE_2X1)
1080 return CVMX_QLM_MODE_PCIE_2X1;
1081 else if (qlm_mode2 == CVMX_QLM_MODE_PCIE_1X2 ||
1082 qlm_mode2 == CVMX_QLM_MODE_PCIE_2X1)
1083 return qlm_mode2;
1084 else
1085 return CVMX_QLM_MODE_DISABLED;
1086 case 2: /* PCIe2 can be DLM2 1 lanes(1) */
1087 if (qlm_mode2 == CVMX_QLM_MODE_PCIE_2X1)
1088 return qlm_mode2;
1089 else
1090 return CVMX_QLM_MODE_DISABLED;
1091 default:
1092 return CVMX_QLM_MODE_DISABLED;
1093 }
1094 }
1095 case 2: /* SATA */
1096 {
1097 enum cvmx_qlm_mode qlm_mode = __cvmx_qlm_get_mode_cn70xx(2);
1098
1099 if (qlm_mode == CVMX_QLM_MODE_SATA_2X1)
1100 return CVMX_QLM_MODE_SATA_2X1;
1101 else
1102 return CVMX_QLM_MODE_DISABLED;
1103 }
1104 default:
1105 return CVMX_QLM_MODE_DISABLED;
1106 }
1107}
1108
1109static enum cvmx_qlm_mode __cvmx_qlm_get_mode_cn6xxx(int qlm)
1110{
1111 cvmx_mio_qlmx_cfg_t qlmx_cfg;
1112
1113 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1114 qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
1115 /* QLM is disabled when QLM SPD is 15. */
1116 if (qlmx_cfg.s.qlm_spd == 15)
1117 return CVMX_QLM_MODE_DISABLED;
1118
1119 switch (qlmx_cfg.s.qlm_cfg) {
1120 case 0: /* PCIE */
1121 return CVMX_QLM_MODE_PCIE;
1122 case 1: /* ILK */
1123 return CVMX_QLM_MODE_ILK;
1124 case 2: /* SGMII */
1125 return CVMX_QLM_MODE_SGMII;
1126 case 3: /* XAUI */
1127 return CVMX_QLM_MODE_XAUI;
1128 case 7: /* RXAUI */
1129 return CVMX_QLM_MODE_RXAUI;
1130 default:
1131 return CVMX_QLM_MODE_DISABLED;
1132 }
1133 } else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1134 qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
1135 /* QLM is disabled when QLM SPD is 15. */
1136 if (qlmx_cfg.s.qlm_spd == 15)
1137 return CVMX_QLM_MODE_DISABLED;
1138
1139 switch (qlmx_cfg.s.qlm_cfg) {
1140 case 0x9: /* SGMII */
1141 return CVMX_QLM_MODE_SGMII;
1142 case 0xb: /* XAUI */
1143 return CVMX_QLM_MODE_XAUI;
1144 case 0x0: /* PCIE gen2 */
1145 case 0x8: /* PCIE gen2 (alias) */
1146 case 0x2: /* PCIE gen1 */
1147 case 0xa: /* PCIE gen1 (alias) */
1148 return CVMX_QLM_MODE_PCIE;
1149 case 0x1: /* SRIO 1x4 short */
1150 case 0x3: /* SRIO 1x4 long */
1151 return CVMX_QLM_MODE_SRIO_1X4;
1152 case 0x4: /* SRIO 2x2 short */
1153 case 0x6: /* SRIO 2x2 long */
1154 return CVMX_QLM_MODE_SRIO_2X2;
1155 case 0x5: /* SRIO 4x1 short */
1156 case 0x7: /* SRIO 4x1 long */
1157 if (!OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
1158 return CVMX_QLM_MODE_SRIO_4X1;
1159 fallthrough;
1160 default:
1161 return CVMX_QLM_MODE_DISABLED;
1162 }
1163 } else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1164 cvmx_sriox_status_reg_t status_reg;
1165 /* For now skip qlm2 */
1166 if (qlm == 2) {
1167 cvmx_gmxx_inf_mode_t inf_mode;
1168
1169 inf_mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
1170 if (inf_mode.s.speed == 15)
1171 return CVMX_QLM_MODE_DISABLED;
1172 else if (inf_mode.s.mode == 0)
1173 return CVMX_QLM_MODE_SGMII;
1174 else
1175 return CVMX_QLM_MODE_XAUI;
1176 }
1177 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(qlm));
1178 if (status_reg.s.srio)
1179 return CVMX_QLM_MODE_SRIO_1X4;
1180 else
1181 return CVMX_QLM_MODE_PCIE;
1182 } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
1183 qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
1184 /* QLM is disabled when QLM SPD is 15. */
1185 if (qlmx_cfg.s.qlm_spd == 15)
1186 return CVMX_QLM_MODE_DISABLED;
1187
1188 switch (qlm) {
1189 case 0:
1190 switch (qlmx_cfg.s.qlm_cfg) {
1191 case 0: /* PCIe 1x4 gen2 / gen1 */
1192 return CVMX_QLM_MODE_PCIE;
1193 case 2: /* SGMII */
1194 return CVMX_QLM_MODE_SGMII;
1195 case 3: /* XAUI */
1196 return CVMX_QLM_MODE_XAUI;
1197 default:
1198 return CVMX_QLM_MODE_DISABLED;
1199 }
1200 break;
1201 case 1:
1202 switch (qlmx_cfg.s.qlm_cfg) {
1203 case 0: /* PCIe 1x2 gen2 / gen1 */
1204 return CVMX_QLM_MODE_PCIE_1X2;
1205 case 1: /* PCIe 2x1 gen2 / gen1 */
1206 return CVMX_QLM_MODE_PCIE_2X1;
1207 default:
1208 return CVMX_QLM_MODE_DISABLED;
1209 }
1210 break;
1211 case 2:
1212 switch (qlmx_cfg.s.qlm_cfg) {
1213 case 2: /* SGMII */
1214 return CVMX_QLM_MODE_SGMII;
1215 case 3: /* XAUI */
1216 return CVMX_QLM_MODE_XAUI;
1217 default:
1218 return CVMX_QLM_MODE_DISABLED;
1219 }
1220 break;
1221 }
1222 } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
1223 qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
1224 /* QLM is disabled when QLM SPD is 15. */
1225 if (qlmx_cfg.s.qlm_spd == 15)
1226 return CVMX_QLM_MODE_DISABLED;
1227
1228 switch (qlm) {
1229 case 0:
1230 if (qlmx_cfg.s.qlm_cfg == 2) /* SGMII */
1231 return CVMX_QLM_MODE_SGMII;
1232 break;
1233 case 1:
1234 switch (qlmx_cfg.s.qlm_cfg) {
1235 case 0: /* PCIe 1x2 gen2 / gen1 */
1236 return CVMX_QLM_MODE_PCIE_1X2;
1237 case 1: /* PCIe 2x1 gen2 / gen1 */
1238 return CVMX_QLM_MODE_PCIE_2X1;
1239 default:
1240 return CVMX_QLM_MODE_DISABLED;
1241 }
1242 break;
1243 }
1244 }
1245 return CVMX_QLM_MODE_DISABLED;
1246}
1247
1248/**
1249 * @INTERNAL
1250 * Decrement the MPLL Multiplier for the DLM as per Errata G-20669
1251 *
1252 * @param qlm DLM to configure
1253 * @param baud_mhz Speed of the DLM configured at
1254 * @param old_multiplier MPLL_MULTIPLIER value to decrement
1255 */
1256void __cvmx_qlm_set_mult(int qlm, int baud_mhz, int old_multiplier)
1257{
1258 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
1259 cvmx_gserx_dlmx_ref_clkdiv2_t clkdiv;
1260 u64 meas_refclock, mult;
1261
1262 if (!OCTEON_IS_MODEL(OCTEON_CN70XX))
1263 return;
1264
1265 if (qlm == -1)
1266 return;
1267
1268 meas_refclock = cvmx_qlm_measure_clock(qlm);
1269 if (meas_refclock == 0) {
1270 printf("DLM%d: Reference clock not running\n", qlm);
1271 return;
1272 }
1273
1274 /*
1275 * The baud rate multiplier needs to be adjusted on the CN70XX if
1276 * the reference clock is > 100MHz.
1277 */
1278 if (qlm == 0) {
1279 clkdiv.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
1280 if (clkdiv.s.ref_clkdiv2)
1281 baud_mhz *= 2;
1282 }
1283 mult = (uint64_t)baud_mhz * 1000000 + (meas_refclock / 2);
1284 mult /= meas_refclock;
1285
1286 /*
1287 * 6. Decrease MPLL_MULTIPLIER by one continually until it reaches
1288 * the desired long-term setting, ensuring that each MPLL_MULTIPLIER
1289 * value is constant for at least 1 msec before changing to the next
1290 * value. The desired long-term setting is as indicated in HRM tables
1291 * 21-1, 21-2, and 21-3. This is not required with the HRM
1292 * sequence.
1293 */
1294 do {
1295 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
1296 mpll_multiplier.s.mpll_multiplier = --old_multiplier;
1297 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
1298 /* Wait for 1 ms */
1299 udelay(1000);
1300 } while (old_multiplier > (int)mult);
1301}
1302
1303enum cvmx_qlm_mode cvmx_qlm_get_mode_cn78xx(int node, int qlm)
1304{
1305 cvmx_gserx_cfg_t gserx_cfg;
1306 int qlm_mode[2][9] = { { -1, -1, -1, -1, -1, -1, -1, -1 },
1307 { -1, -1, -1, -1, -1, -1, -1, -1 } };
1308
1309 if (qlm >= 8)
1310 return CVMX_QLM_MODE_OCI;
1311
1312 if (qlm_mode[node][qlm] != -1)
1313 return qlm_mode[node][qlm];
1314
1315 gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
1316 if (gserx_cfg.s.pcie) {
1317 switch (qlm) {
1318 case 0: /* Either PEM0 x4 or PEM0 x8 */
1319 case 1: /* Either PEM0 x8 or PEM1 x4 */
1320 {
1321 cvmx_pemx_cfg_t pemx_cfg;
1322
1323 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
1324 if (pemx_cfg.cn78xx.lanes8) {
1325 /* PEM0 x8 */
1326 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE_1X8;
1327 } else {
1328 /* PEM0 x4 */
1329 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE;
1330 }
1331 break;
1332 }
1333 case 2: /* Either PEM2 x4 or PEM2 x8 */
1334 {
1335 cvmx_pemx_cfg_t pemx_cfg;
1336
1337 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
1338 if (pemx_cfg.cn78xx.lanes8) {
1339 /* PEM2 x8 */
1340 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE_1X8;
1341 } else {
1342 /* PEM2 x4 */
1343 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE;
1344 }
1345 break;
1346 }
1347 case 3: /* Either PEM2 x8 or PEM3 x4 or PEM3 x8 */
1348 {
1349 cvmx_pemx_cfg_t pemx_cfg;
1350
1351 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
1352 if (pemx_cfg.cn78xx.lanes8) {
1353 /* PEM2 x8 */
1354 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE_1X8;
1355 }
1356
1357 /* Can be first 4 lanes of PEM3 */
1358 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
1359 if (pemx_cfg.cn78xx.lanes8) {
1360 /* PEM3 x8 */
1361 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE_1X8;
1362 } else {
1363 /* PEM2 x4 */
1364 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE;
1365 }
1366 break;
1367 }
1368 case 4: /* Either PEM3 x8 or PEM3 x4 */
1369 {
1370 cvmx_pemx_cfg_t pemx_cfg;
1371
1372 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
1373 if (pemx_cfg.cn78xx.lanes8) {
1374 /* PEM3 x8 */
1375 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE_1X8;
1376 } else {
1377 /* PEM3 x4 */
1378 qlm_mode[node][qlm] = CVMX_QLM_MODE_PCIE;
1379 }
1380 break;
1381 }
1382 default:
1383 qlm_mode[node][qlm] = CVMX_QLM_MODE_DISABLED;
1384 break;
1385 }
1386 } else if (gserx_cfg.s.ila) {
1387 qlm_mode[node][qlm] = CVMX_QLM_MODE_ILK;
1388 } else if (gserx_cfg.s.bgx) {
1389 cvmx_bgxx_cmrx_config_t cmr_config;
1390 cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1391 int bgx = (qlm < 2) ? qlm : qlm - 2;
1392
1393 cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(0, bgx));
1394 pmd_control.u64 = csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(0, bgx));
1395
1396 switch (cmr_config.s.lmac_type) {
1397 case 0:
1398 qlm_mode[node][qlm] = CVMX_QLM_MODE_SGMII;
1399 break;
1400 case 1:
1401 qlm_mode[node][qlm] = CVMX_QLM_MODE_XAUI;
1402 break;
1403 case 2:
1404 qlm_mode[node][qlm] = CVMX_QLM_MODE_RXAUI;
1405 break;
1406 case 3:
1407 /*
1408 * Use training to determine if we're in 10GBASE-KR
1409 * or XFI
1410 */
1411 if (pmd_control.s.train_en)
1412 qlm_mode[node][qlm] = CVMX_QLM_MODE_10G_KR;
1413 else
1414 qlm_mode[node][qlm] = CVMX_QLM_MODE_XFI;
1415 break;
1416 case 4:
1417 /*
1418 * Use training to determine if we're in 40GBASE-KR
1419 * or XLAUI
1420 */
1421 if (pmd_control.s.train_en)
1422 qlm_mode[node][qlm] = CVMX_QLM_MODE_40G_KR4;
1423 else
1424 qlm_mode[node][qlm] = CVMX_QLM_MODE_XLAUI;
1425 break;
1426 default:
1427 qlm_mode[node][qlm] = CVMX_QLM_MODE_DISABLED;
1428 break;
1429 }
1430 } else {
1431 qlm_mode[node][qlm] = CVMX_QLM_MODE_DISABLED;
1432 }
1433
1434 return qlm_mode[node][qlm];
1435}
1436
1437enum cvmx_qlm_mode __cvmx_qlm_get_mode_cn73xx(int qlm)
1438{
1439 cvmx_gserx_cfg_t gserx_cfg;
1440 int qlm_mode[7] = { -1, -1, -1, -1, -1, -1, -1 };
1441
1442 if (qlm_mode[qlm] != -1)
1443 return qlm_mode[qlm];
1444
1445 if (qlm > 6) {
1446 debug("Invalid QLM(%d) passed\n", qlm);
1447 return -1;
1448 }
1449
1450 gserx_cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
1451 if (gserx_cfg.s.pcie) {
1452 cvmx_pemx_cfg_t pemx_cfg;
1453
1454 switch (qlm) {
1455 case 0: /* Either PEM0 x4 or PEM0 x8 */
1456 case 1: /* Either PEM0 x8 or PEM1 x4 */
1457 {
1458 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
1459 if (pemx_cfg.cn78xx.lanes8) {
1460 /* PEM0 x8 */
1461 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE_1X8;
1462 } else {
1463 /* PEM0/PEM1 x4 */
1464 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE;
1465 }
1466 break;
1467 }
1468 case 2: /* Either PEM2 x4 or PEM2 x8 */
1469 {
1470 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
1471 if (pemx_cfg.cn78xx.lanes8) {
1472 /* PEM2 x8 */
1473 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE_1X8;
1474 } else {
1475 /* PEM2 x4 */
1476 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE;
1477 }
1478 break;
1479 }
1480 case 5:
1481 case 6: /* PEM3 x2 */
1482 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE_1X2; /* PEM3 x2 */
1483 break;
1484 case 3: /* Either PEM2 x8 or PEM3 x4 */
1485 {
1486 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
1487 if (pemx_cfg.cn78xx.lanes8) {
1488 /* PEM2 x8 */
1489 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE_1X8;
1490 } else {
1491 /* PEM3 x4 */
1492 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE;
1493 }
1494 break;
1495 }
1496 default:
1497 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1498 break;
1499 }
1500 } else if (gserx_cfg.s.bgx) {
1501 cvmx_bgxx_cmrx_config_t cmr_config;
1502 cvmx_bgxx_cmr_rx_lmacs_t bgx_cmr_rx_lmacs;
1503 cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1504 int bgx = 0;
1505 int start = 0, end = 4, index;
1506 int lane_mask = 0, train_mask = 0;
1507 int mux = 0; // 0:BGX2 (DLM5/DLM6), 1:BGX2(DLM5), 2:BGX2(DLM6)
1508
1509 if (qlm < 4) {
1510 bgx = qlm - 2;
1511 } else if (qlm == 5 || qlm == 6) {
1512 bgx = 2;
1513 mux = cvmx_qlm_mux_interface(bgx);
1514 if (mux == 0) {
1515 start = 0;
1516 end = 4;
1517 } else if (mux == 1) {
1518 start = 0;
1519 end = 2;
1520 } else if (mux == 2) {
1521 start = 2;
1522 end = 4;
1523 } else {
1524 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1525 return qlm_mode[qlm];
1526 }
1527 }
1528
1529 for (index = start; index < end; index++) {
1530 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
1531 pmd_control.u64 = csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
1532 lane_mask |= (cmr_config.s.lmac_type << (index * 4));
1533 train_mask |= (pmd_control.s.train_en << (index * 4));
1534 }
1535
1536 /* Need to include DLM5 lmacs when only DLM6 DLM is used */
1537 if (mux == 2)
1538 bgx_cmr_rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(2));
1539 switch (lane_mask) {
1540 case 0:
1541 if (mux == 1) {
1542 qlm_mode[qlm] = CVMX_QLM_MODE_SGMII_2X1;
1543 } else if (mux == 2) {
1544 qlm_mode[qlm] = CVMX_QLM_MODE_SGMII_2X1;
1545 bgx_cmr_rx_lmacs.s.lmacs = 4;
1546 }
1547 qlm_mode[qlm] = CVMX_QLM_MODE_SGMII;
1548 break;
1549 case 0x1:
1550 qlm_mode[qlm] = CVMX_QLM_MODE_XAUI;
1551 break;
1552 case 0x2:
1553 if (mux == 1) {
1554 // NONE+RXAUI
1555 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI_1X2;
1556 } else if (mux == 0) {
1557 // RXAUI+SGMII
1558 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1559 } else {
1560 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1561 }
1562 break;
1563 case 0x202:
1564 if (mux == 2) {
1565 // RXAUI+RXAUI
1566 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI_1X2;
1567 bgx_cmr_rx_lmacs.s.lmacs = 4;
1568 } else if (mux == 1) {
1569 // RXAUI+RXAUI
1570 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI_1X2;
1571 } else if (mux == 0) {
1572 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI;
1573 } else {
1574 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1575 }
1576 break;
1577 case 0x22:
1578 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI;
1579 break;
1580 case 0x3333:
1581 /*
1582 * Use training to determine if we're in 10GBASE-KR
1583 * or XFI
1584 */
1585 if (train_mask)
1586 qlm_mode[qlm] = CVMX_QLM_MODE_10G_KR;
1587 else
1588 qlm_mode[qlm] = CVMX_QLM_MODE_XFI;
1589 break;
1590 case 0x4:
1591 /*
1592 * Use training to determine if we're in 40GBASE-KR
1593 * or XLAUI
1594 */
1595 if (train_mask)
1596 qlm_mode[qlm] = CVMX_QLM_MODE_40G_KR4;
1597 else
1598 qlm_mode[qlm] = CVMX_QLM_MODE_XLAUI;
1599 break;
1600 case 0x0005:
1601 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_SGMII;
1602 break;
1603 case 0x3335:
1604 if (train_mask)
1605 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_10G_KR;
1606 else
1607 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_XFI;
1608 break;
1609 case 0x45:
1610 if (train_mask)
1611 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_40G_KR4;
1612 else
1613 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_XLAUI;
1614 break;
1615 case 0x225:
1616 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_RXAUI;
1617 break;
1618 case 0x15:
1619 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_XAUI;
1620 break;
1621
1622 case 0x200:
1623 if (mux == 2) {
1624 qlm_mode[qlm] = CVMX_QLM_MODE_RXAUI_1X2;
1625 bgx_cmr_rx_lmacs.s.lmacs = 4;
1626 } else
1627 case 0x205:
1628 case 0x233:
1629 case 0x3302:
1630 case 0x3305:
1631 if (mux == 0)
1632 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1633 else
1634 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1635 break;
1636 case 0x3300:
1637 if (mux == 0) {
1638 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1639 } else if (mux == 2) {
1640 if (train_mask)
1641 qlm_mode[qlm] = CVMX_QLM_MODE_10G_KR_1X2;
1642 else
1643 qlm_mode[qlm] = CVMX_QLM_MODE_XFI_1X2;
1644 bgx_cmr_rx_lmacs.s.lmacs = 4;
1645 } else {
1646 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1647 }
1648 break;
1649 case 0x33:
1650 if (mux == 1 || mux == 2) {
1651 if (train_mask)
1652 qlm_mode[qlm] = CVMX_QLM_MODE_10G_KR_1X2;
1653 else
1654 qlm_mode[qlm] = CVMX_QLM_MODE_XFI_1X2;
1655 if (mux == 2)
1656 bgx_cmr_rx_lmacs.s.lmacs = 4;
1657 } else {
1658 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1659 }
1660 break;
1661 case 0x0035:
1662 if (mux == 0)
1663 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1664 else if (train_mask)
1665 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_10G_KR_1X1;
1666 else
1667 qlm_mode[qlm] = CVMX_QLM_MODE_RGMII_XFI_1X1;
1668 break;
1669 case 0x235:
1670 if (mux == 0)
1671 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1672 else
1673 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1674 break;
1675 default:
1676 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1677 break;
1678 }
1679 if (mux == 2) {
1680 csr_wr(CVMX_BGXX_CMR_RX_LMACS(2), bgx_cmr_rx_lmacs.u64);
1681 csr_wr(CVMX_BGXX_CMR_TX_LMACS(2), bgx_cmr_rx_lmacs.u64);
1682 }
1683 } else if (gserx_cfg.s.sata) {
1684 qlm_mode[qlm] = CVMX_QLM_MODE_SATA_2X1;
1685 } else {
1686 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1687 }
1688
1689 return qlm_mode[qlm];
1690}
1691
1692enum cvmx_qlm_mode __cvmx_qlm_get_mode_cnf75xx(int qlm)
1693{
1694 cvmx_gserx_cfg_t gserx_cfg;
1695 int qlm_mode[9] = { -1, -1, -1, -1, -1, -1, -1 };
1696
1697 if (qlm_mode[qlm] != -1)
1698 return qlm_mode[qlm];
1699
1700 if (qlm > 9) {
1701 debug("Invalid QLM(%d) passed\n", qlm);
1702 return -1;
1703 }
1704
1705 if ((qlm == 2 || qlm == 3) && (OCTEON_IS_MODEL(OCTEON_CNF75XX))) {
1706 cvmx_sriox_status_reg_t status_reg;
1707 int port = (qlm == 2) ? 0 : 1;
1708
1709 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
1710 /* FIXME add different width */
1711 if (status_reg.s.srio)
1712 qlm_mode[qlm] = CVMX_QLM_MODE_SRIO_1X4;
1713 else
1714 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1715 return qlm_mode[qlm];
1716 }
1717
1718 gserx_cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
1719 if (gserx_cfg.s.pcie) {
1720 switch (qlm) {
1721 case 0: /* Either PEM0 x2 or PEM0 x4 */
1722 case 1: /* Either PEM1 x2 or PEM0 x4 */
1723 {
1724 /* FIXME later */
1725 qlm_mode[qlm] = CVMX_QLM_MODE_PCIE;
1726 break;
1727 }
1728 default:
1729 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1730 break;
1731 }
1732 } else if (gserx_cfg.s.bgx) {
1733 cvmx_bgxx_cmrx_config_t cmr_config;
1734 cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1735 int bgx = 0;
1736 int start = 0, end = 4, index;
1737 int lane_mask = 0, train_mask = 0;
1738 int mux = 0; // 0:BGX0 (DLM4/DLM5), 1:BGX0(DLM4), 2:BGX0(DLM5)
1739 cvmx_gserx_cfg_t gser1, gser2;
1740
1741 gser1.u64 = csr_rd(CVMX_GSERX_CFG(4));
1742 gser2.u64 = csr_rd(CVMX_GSERX_CFG(5));
1743 if (gser1.s.bgx && gser2.s.bgx) {
1744 start = 0;
1745 end = 4;
1746 } else if (gser1.s.bgx) {
1747 start = 0;
1748 end = 2;
1749 mux = 1;
1750 } else if (gser2.s.bgx) {
1751 start = 2;
1752 end = 4;
1753 mux = 2;
1754 } else {
1755 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1756 return qlm_mode[qlm];
1757 }
1758
1759 for (index = start; index < end; index++) {
1760 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
1761 pmd_control.u64 = csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
1762 lane_mask |= (cmr_config.s.lmac_type << (index * 4));
1763 train_mask |= (pmd_control.s.train_en << (index * 4));
1764 }
1765
1766 switch (lane_mask) {
1767 case 0:
1768 if (mux == 1 || mux == 2)
1769 qlm_mode[qlm] = CVMX_QLM_MODE_SGMII_2X1;
1770 else
1771 qlm_mode[qlm] = CVMX_QLM_MODE_SGMII;
1772 break;
1773 case 0x3300:
1774 if (mux == 0)
1775 qlm_mode[qlm] = CVMX_QLM_MODE_MIXED;
1776 else if (mux == 2)
1777 if (train_mask)
1778 qlm_mode[qlm] = CVMX_QLM_MODE_10G_KR_1X2;
1779 else
1780 qlm_mode[qlm] = CVMX_QLM_MODE_XFI_1X2;
1781 else
1782 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1783 break;
1784 default:
1785 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1786 break;
1787 }
1788 } else {
1789 qlm_mode[qlm] = CVMX_QLM_MODE_DISABLED;
1790 }
1791
1792 return qlm_mode[qlm];
1793}
1794
1795/*
1796 * Read QLM and return mode.
1797 */
1798enum cvmx_qlm_mode cvmx_qlm_get_mode(int qlm)
1799{
1800 if (OCTEON_IS_OCTEON2())
1801 return __cvmx_qlm_get_mode_cn6xxx(qlm);
1802 else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
1803 return __cvmx_qlm_get_mode_cn70xx(qlm);
1804 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
1805 return cvmx_qlm_get_mode_cn78xx(cvmx_get_node_num(), qlm);
1806 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
1807 return __cvmx_qlm_get_mode_cn73xx(qlm);
1808 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
1809 return __cvmx_qlm_get_mode_cnf75xx(qlm);
1810
1811 return CVMX_QLM_MODE_DISABLED;
1812}
1813
1814int cvmx_qlm_measure_clock_cn7xxx(int node, int qlm)
1815{
1816 cvmx_gserx_cfg_t cfg;
1817 cvmx_gserx_refclk_sel_t refclk_sel;
1818 cvmx_gserx_lane_mode_t lane_mode;
1819
1820 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1821 if (node != 0 || qlm >= 7)
1822 return -1;
1823 } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
1824 if (qlm >= 8 || node > 1)
1825 return -1; /* FIXME for OCI */
1826 } else {
1827 debug("%s: Unsupported OCTEON model\n", __func__);
1828 return -1;
1829 }
1830
1831 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
1832
1833 if (cfg.s.pcie) {
1834 refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
1835 if (refclk_sel.s.pcie_refclk125)
1836 return REF_125MHZ; /* Ref 125 Mhz */
1837 else
1838 return REF_100MHZ; /* Ref 100Mhz */
1839 }
1840
1841 lane_mode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
1842 switch (lane_mode.s.lmode) {
1843 case R_25G_REFCLK100:
1844 return REF_100MHZ;
1845 case R_5G_REFCLK100:
1846 return REF_100MHZ;
1847 case R_8G_REFCLK100:
1848 return REF_100MHZ;
1849 case R_125G_REFCLK15625_KX:
1850 return REF_156MHZ;
1851 case R_3125G_REFCLK15625_XAUI:
1852 return REF_156MHZ;
1853 case R_103125G_REFCLK15625_KR:
1854 return REF_156MHZ;
1855 case R_125G_REFCLK15625_SGMII:
1856 return REF_156MHZ;
1857 case R_5G_REFCLK15625_QSGMII:
1858 return REF_156MHZ;
1859 case R_625G_REFCLK15625_RXAUI:
1860 return REF_156MHZ;
1861 case R_25G_REFCLK125:
1862 return REF_125MHZ;
1863 case R_5G_REFCLK125:
1864 return REF_125MHZ;
1865 case R_8G_REFCLK125:
1866 return REF_125MHZ;
1867 default:
1868 return 0;
1869 }
1870}
1871
1872/**
1873 * Measure the reference clock of a QLM on a multi-node setup
1874 *
1875 * @param node node to measure
1876 * @param qlm QLM to measure
1877 *
1878 * @return Clock rate in Hz
1879 */
1880int cvmx_qlm_measure_clock_node(int node, int qlm)
1881{
1882 if (octeon_has_feature(OCTEON_FEATURE_MULTINODE))
1883 return cvmx_qlm_measure_clock_cn7xxx(node, qlm);
1884 else
1885 return cvmx_qlm_measure_clock(qlm);
1886}
1887
1888/**
1889 * Measure the reference clock of a QLM
1890 *
1891 * @param qlm QLM to measure
1892 *
1893 * @return Clock rate in Hz
1894 */
1895int cvmx_qlm_measure_clock(int qlm)
1896{
1897 cvmx_mio_ptp_clock_cfg_t ptp_clock;
1898 u64 count;
1899 u64 start_cycle, stop_cycle;
1900 int evcnt_offset = 0x10;
1901 int incr_count = 1;
1902 int ref_clock[16] = { 0 };
1903
1904 if (ref_clock[qlm])
1905 return ref_clock[qlm];
1906
1907 if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))
1908 return cvmx_qlm_measure_clock_cn7xxx(cvmx_get_node_num(), qlm);
1909
1910 if (OCTEON_IS_MODEL(OCTEON_CN70XX) && qlm == 0) {
1911 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
1912
1913 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
1914 if (ref_clkdiv2.s.ref_clkdiv2)
1915 incr_count = 2;
1916 }
1917
1918 /* Fix reference clock for OCI QLMs */
1919
1920 /* Disable the PTP event counter while we configure it */
1921 ptp_clock.u64 = csr_rd(CVMX_MIO_PTP_CLOCK_CFG); /* For CN63XXp1 errata */
1922 ptp_clock.s.evcnt_en = 0;
1923 csr_wr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
1924
1925 /* Count on rising edge, Choose which QLM to count */
1926 ptp_clock.u64 = csr_rd(CVMX_MIO_PTP_CLOCK_CFG); /* For CN63XXp1 errata */
1927 ptp_clock.s.evcnt_edge = 0;
1928 ptp_clock.s.evcnt_in = evcnt_offset + qlm;
1929 csr_wr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
1930
1931 /* Clear MIO_PTP_EVT_CNT */
1932 csr_rd(CVMX_MIO_PTP_EVT_CNT); /* For CN63XXp1 errata */
1933 count = csr_rd(CVMX_MIO_PTP_EVT_CNT);
1934 csr_wr(CVMX_MIO_PTP_EVT_CNT, -count);
1935
1936 /* Set MIO_PTP_EVT_CNT to 1 billion */
1937 csr_wr(CVMX_MIO_PTP_EVT_CNT, 1000000000);
1938
1939 /* Enable the PTP event counter */
1940 ptp_clock.u64 = csr_rd(CVMX_MIO_PTP_CLOCK_CFG); /* For CN63XXp1 errata */
1941 ptp_clock.s.evcnt_en = 1;
1942 csr_wr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
1943
1944 start_cycle = get_ticks();
1945 /* Wait for 50ms */
1946 mdelay(50);
1947
1948 /* Read the counter */
1949 csr_rd(CVMX_MIO_PTP_EVT_CNT); /* For CN63XXp1 errata */
1950 count = csr_rd(CVMX_MIO_PTP_EVT_CNT);
1951 stop_cycle = get_ticks();
1952
1953 /* Disable the PTP event counter */
1954 ptp_clock.u64 = csr_rd(CVMX_MIO_PTP_CLOCK_CFG); /* For CN63XXp1 errata */
1955 ptp_clock.s.evcnt_en = 0;
1956 csr_wr(CVMX_MIO_PTP_CLOCK_CFG, ptp_clock.u64);
1957
1958 /* Clock counted down, so reverse it */
1959 count = 1000000000 - count;
1960 count *= incr_count;
1961
1962 /* Return the rate */
1963 ref_clock[qlm] = count * gd->cpu_clk / (stop_cycle - start_cycle);
1964
1965 return ref_clock[qlm];
1966}
1967
1968/*
1969 * Perform RX equalization on a QLM
1970 *
1971 * @param node Node the QLM is on
1972 * @param qlm QLM to perform RX equalization on
1973 * @param lane Lane to use, or -1 for all lanes
1974 *
1975 * @return Zero on success, negative if any lane failed RX equalization
1976 */
1977int __cvmx_qlm_rx_equalization(int node, int qlm, int lane)
1978{
1979 cvmx_gserx_phy_ctl_t phy_ctl;
1980 cvmx_gserx_br_rxx_ctl_t rxx_ctl;
1981 cvmx_gserx_br_rxx_eer_t rxx_eer;
1982 cvmx_gserx_rx_eie_detsts_t eie_detsts;
1983 int fail, gbaud, l, lane_mask;
1984 enum cvmx_qlm_mode mode;
1985 int max_lanes = cvmx_qlm_get_lanes(qlm);
1986 cvmx_gserx_lane_mode_t lmode;
1987 cvmx_gserx_lane_px_mode_1_t pmode_1;
1988 int pending = 0;
1989 u64 timeout;
1990
1991 /* Don't touch QLMs if it is reset or powered down */
1992 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
1993 if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
1994 return -1;
1995
1996 /*
1997 * Check whether GSER PRBS pattern matcher is enabled on any of the
1998 * applicable lanes. Can't complete RX Equalization while pattern
1999 * matcher is enabled because it causes errors
2000 */
2001 for (l = 0; l < max_lanes; l++) {
2002 cvmx_gserx_lanex_lbert_cfg_t lbert_cfg;
2003
2004 if (lane != -1 && lane != l)
2005 continue;
2006
2007 lbert_cfg.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_LBERT_CFG(l, qlm));
2008 if (lbert_cfg.s.lbert_pm_en == 1)
2009 return -1;
2010 }
2011
2012 /* Get Lane Mode */
2013 lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
2014
2015 /*
2016 * Check to see if in VMA manual mode is set. If in VMA manual mode
2017 * don't complete rx equalization
2018 */
2019 pmode_1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm));
2020 if (pmode_1.s.vma_mm == 1) {
2021#ifdef DEBUG_QLM
2022 debug("N%d:QLM%d: VMA Manual (manual DFE) selected. Not completing Rx equalization\n",
2023 node, qlm);
2024#endif
2025 return 0;
2026 }
2027
2028 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
2029 gbaud = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
2030 mode = cvmx_qlm_get_mode_cn78xx(node, qlm);
2031 } else {
2032 gbaud = cvmx_qlm_get_gbaud_mhz(qlm);
2033 mode = cvmx_qlm_get_mode(qlm);
2034 }
2035
2036 /* Apply RX Equalization for speed >= 8G */
2037 if (qlm < 8) {
2038 if (gbaud < 6250)
2039 return 0;
2040 }
2041
2042 /* Don't run on PCIe Links */
2043 if (mode == CVMX_QLM_MODE_PCIE || mode == CVMX_QLM_MODE_PCIE_1X8 ||
2044 mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1)
2045 return -1;
2046
2047 fail = 0;
2048
2049 /*
2050 * Before completing Rx equalization wait for
2051 * GSERx_RX_EIE_DETSTS[CDRLOCK] to be set.
2052 * This ensures the rx data is valid
2053 */
2054 if (lane == -1) {
2055 /*
2056 * check all 4 Lanes (cdrlock = 1111/b) for CDR Lock with
2057 * lane == -1
2058 */
2059 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_RX_EIE_DETSTS(qlm),
2060 cvmx_gserx_rx_eie_detsts_t, cdrlock, ==,
2061 (1 << max_lanes) - 1, 500)) {
2062#ifdef DEBUG_QLM
2063 eie_detsts.u64 = csr_rd_node(node, CVMX_GSERX_RX_EIE_DETSTS(qlm));
2064 debug("ERROR: %d:QLM%d: CDR Lock not detected for all 4 lanes. CDR_LOCK(0x%x)\n",
2065 node, qlm, eie_detsts.s.cdrlock);
2066#endif
2067 return -1;
2068 }
2069 } else {
2070 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_RX_EIE_DETSTS(qlm),
2071 cvmx_gserx_rx_eie_detsts_t, cdrlock, &, (1 << lane),
2072 500)) {
2073#ifdef DEBUG_QLM
2074 eie_detsts.u64 = csr_rd_node(node, CVMX_GSERX_RX_EIE_DETSTS(qlm));
2075 debug("ERROR: %d:QLM%d: CDR Lock not detected for Lane%d CDR_LOCK(0x%x)\n",
2076 node, qlm, lane, eie_detsts.s.cdrlock);
2077#endif
2078 return -1;
2079 }
2080 }
2081
2082 /*
2083 * Errata (GSER-20075) GSER(0..13)_BR_RX3_EER[RXT_ERR] is
2084 * GSER(0..13)_BR_RX2_EER[RXT_ERR]. Since lanes 2-3 trigger at the
2085 * same time, we need to setup lane 3 before we loop through the lanes
2086 */
2087 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && (lane == -1 || lane == 3)) {
2088 /* Enable software control */
2089 rxx_ctl.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_CTL(3, qlm));
2090 rxx_ctl.s.rxt_swm = 1;
2091 csr_wr_node(node, CVMX_GSERX_BR_RXX_CTL(3, qlm), rxx_ctl.u64);
2092
2093 /* Clear the completion flag */
2094 rxx_eer.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_EER(3, qlm));
2095 rxx_eer.s.rxt_esv = 0;
2096 csr_wr_node(node, CVMX_GSERX_BR_RXX_EER(3, qlm), rxx_eer.u64);
2097 /* Initiate a new request on lane 2 */
2098 if (lane == 3) {
2099 rxx_eer.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_EER(2, qlm));
2100 rxx_eer.s.rxt_eer = 1;
2101 csr_wr_node(node, CVMX_GSERX_BR_RXX_EER(2, qlm), rxx_eer.u64);
2102 }
2103 }
2104
2105 for (l = 0; l < max_lanes; l++) {
2106 if (lane != -1 && lane != l)
2107 continue;
2108
2109 /*
2110 * Skip lane 3 on 78p1.x due to Errata (GSER-20075).
2111 * Handled above
2112 */
2113 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && l == 3) {
2114 /*
2115 * Need to add lane 3 to pending list for 78xx
2116 * pass 1.x
2117 */
2118 pending |= 1 << 3;
2119 continue;
2120 }
2121 /* Enable software control */
2122 rxx_ctl.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_CTL(l, qlm));
2123 rxx_ctl.s.rxt_swm = 1;
2124 csr_wr_node(node, CVMX_GSERX_BR_RXX_CTL(l, qlm), rxx_ctl.u64);
2125
2126 /* Clear the completion flag and initiate a new request */
2127 rxx_eer.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_EER(l, qlm));
2128 rxx_eer.s.rxt_esv = 0;
2129 rxx_eer.s.rxt_eer = 1;
2130 csr_wr_node(node, CVMX_GSERX_BR_RXX_EER(l, qlm), rxx_eer.u64);
2131 pending |= 1 << l;
2132 }
2133
2134 /*
2135 * Wait for 250ms, approx 10x times measured value, as XFI/XLAUI
2136 * can take 21-23ms, other interfaces can take 2-3ms.
2137 */
2138 timeout = get_timer(0);
2139
2140 lane_mask = 0;
2141 while (pending) {
2142 /* Wait for RX equalization to complete */
2143 for (l = 0; l < max_lanes; l++) {
2144 lane_mask = 1 << l;
2145 /* Only check lanes that are pending */
2146 if (!(pending & lane_mask))
2147 continue;
2148
2149 /*
2150 * Read the registers for checking Electrical Idle/CDR
2151 * lock and the status of the RX equalization
2152 */
2153 eie_detsts.u64 = csr_rd_node(node, CVMX_GSERX_RX_EIE_DETSTS(qlm));
2154 rxx_eer.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_EER(l, qlm));
2155
2156 /*
2157 * Mark failure if lane entered Electrical Idle or lost
2158 * CDR Lock. The bit for the lane will have cleared in
2159 * either EIESTS or CDRLOCK
2160 */
2161 if (!(eie_detsts.s.eiests & eie_detsts.s.cdrlock & lane_mask)) {
2162 fail |= lane_mask;
2163 pending &= ~lane_mask;
2164 } else if (rxx_eer.s.rxt_esv) {
2165 pending &= ~lane_mask;
2166 }
2167 }
2168
2169 /* Breakout of the loop on timeout */
2170 if (get_timer(timeout) > 250)
2171 break;
2172 }
2173
2174 lane_mask = 0;
2175 /* Cleanup and report status */
2176 for (l = 0; l < max_lanes; l++) {
2177 if (lane != -1 && lane != l)
2178 continue;
2179
2180 lane_mask = 1 << l;
2181 rxx_eer.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_EER(l, qlm));
2182 /* Switch back to hardware control */
2183 rxx_ctl.u64 = csr_rd_node(node, CVMX_GSERX_BR_RXX_CTL(l, qlm));
2184 rxx_ctl.s.rxt_swm = 0;
2185 csr_wr_node(node, CVMX_GSERX_BR_RXX_CTL(l, qlm), rxx_ctl.u64);
2186
2187 /* Report status */
2188 if (fail & lane_mask) {
2189#ifdef DEBUG_QLM
2190 debug("%d:QLM%d: Lane%d RX equalization lost CDR Lock or entered Electrical Idle\n",
2191 node, qlm, l);
2192#endif
2193 } else if ((pending & lane_mask) || !rxx_eer.s.rxt_esv) {
2194#ifdef DEBUG_QLM
2195 debug("%d:QLM%d: Lane %d RX equalization timeout\n", node, qlm, l);
2196#endif
2197 fail |= 1 << l;
2198 } else {
2199#ifdef DEBUG_QLM
2200 char *dir_label[4] = { "Hold", "Inc", "Dec", "Hold" };
2201#ifdef DEBUG_QLM_RX
2202 cvmx_gserx_lanex_rx_aeq_out_0_t rx_aeq_out_0;
2203 cvmx_gserx_lanex_rx_aeq_out_1_t rx_aeq_out_1;
2204 cvmx_gserx_lanex_rx_aeq_out_2_t rx_aeq_out_2;
2205 cvmx_gserx_lanex_rx_vma_status_0_t rx_vma_status_0;
2206#endif
2207 debug("%d:QLM%d: Lane%d: RX equalization completed.\n", node, qlm, l);
2208 debug(" Tx Direction Hints TXPRE: %s, TXMAIN: %s, TXPOST: %s, Figure of Merit: %d\n",
2209 dir_label[(rxx_eer.s.rxt_esm) & 0x3],
2210 dir_label[((rxx_eer.s.rxt_esm) >> 2) & 0x3],
2211 dir_label[((rxx_eer.s.rxt_esm) >> 4) & 0x3], rxx_eer.s.rxt_esm >> 6);
2212
2213#ifdef DEBUG_QLM_RX
2214 rx_aeq_out_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_AEQ_OUT_0(l, qlm));
2215 rx_aeq_out_1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_AEQ_OUT_1(l, qlm));
2216 rx_aeq_out_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_AEQ_OUT_2(l, qlm));
2217 rx_vma_status_0.u64 =
2218 csr_rd_node(node, CVMX_GSERX_LANEX_RX_VMA_STATUS_0(l, qlm));
2219 debug(" DFE Tap1:%lu, Tap2:%ld, Tap3:%ld, Tap4:%ld, Tap5:%ld\n",
2220 (unsigned int long)cvmx_bit_extract(rx_aeq_out_1.u64, 0, 5),
2221 (unsigned int long)cvmx_bit_extract_smag(rx_aeq_out_1.u64, 5, 9),
2222 (unsigned int long)cvmx_bit_extract_smag(rx_aeq_out_1.u64, 10, 14),
2223 (unsigned int long)cvmx_bit_extract_smag(rx_aeq_out_0.u64, 0, 4),
2224 (unsigned int long)cvmx_bit_extract_smag(rx_aeq_out_0.u64, 5, 9));
2225 debug(" Pre-CTLE Gain:%lu, Post-CTLE Gain:%lu, CTLE Peak:%lu, CTLE Pole:%lu\n",
2226 (unsigned int long)cvmx_bit_extract(rx_aeq_out_2.u64, 4, 4),
2227 (unsigned int long)cvmx_bit_extract(rx_aeq_out_2.u64, 0, 4),
2228 (unsigned int long)cvmx_bit_extract(rx_vma_status_0.u64, 2, 4),
2229 (unsigned int long)cvmx_bit_extract(rx_vma_status_0.u64, 0, 2));
2230#endif
2231#endif
2232 }
2233 }
2234
2235 return (fail) ? -1 : 0;
2236}
2237
2238/**
2239 * Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer
2240 * Training may not update PHY Tx Taps. This function is not static
2241 * so we can share it with BGX KR
2242 *
2243 * @param node Node to apply errata workaround
2244 * @param qlm QLM to apply errata workaround
2245 * @param lane Lane to apply the errata
2246 */
2247int cvmx_qlm_gser_errata_27882(int node, int qlm, int lane)
2248{
2249 cvmx_gserx_lanex_pcs_ctlifc_0_t clifc0;
2250 cvmx_gserx_lanex_pcs_ctlifc_2_t clifc2;
2251
2252 if (!(OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) || OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_1) ||
2253 OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_2) || OCTEON_IS_MODEL(OCTEON_CNF75XX_PASS1_0) ||
2254 OCTEON_IS_MODEL(OCTEON_CN78XX)))
2255 return 0;
2256
2257 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_RX_EIE_DETSTS(qlm),
2258 cvmx_gserx_rx_eie_detsts_t, cdrlock, &,
2259 (1 << lane), 200))
2260 return -1;
2261
2262 clifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm));
2263 clifc0.s.cfg_tx_coeff_req_ovrrd_val = 1;
2264 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm), clifc0.u64);
2265 clifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2266 clifc2.s.cfg_tx_coeff_req_ovrrd_en = 1;
2267 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), clifc2.u64);
2268 clifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2269 clifc2.s.ctlifc_ovrrd_req = 1;
2270 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), clifc2.u64);
2271 clifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2272 clifc2.s.cfg_tx_coeff_req_ovrrd_en = 0;
2273 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), clifc2.u64);
2274 clifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2275 clifc2.s.ctlifc_ovrrd_req = 1;
2276 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), clifc2.u64);
2277 return 0;
2278}
2279
2280/**
2281 * Updates the RX EQ Default Settings Update (CTLE Bias) to support longer
2282 * SERDES channels
2283 *
2284 * @INTERNAL
2285 *
2286 * @param node Node number to configure
2287 * @param qlm QLM number to configure
2288 */
2289void cvmx_qlm_gser_errata_25992(int node, int qlm)
2290{
2291 int lane;
2292 int num_lanes = cvmx_qlm_get_lanes(qlm);
2293
2294 if (!(OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) || OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_1) ||
2295 OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_2) || OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)))
2296 return;
2297
2298 for (lane = 0; lane < num_lanes; lane++) {
2299 cvmx_gserx_lanex_rx_ctle_ctrl_t rx_ctle_ctrl;
2300 cvmx_gserx_lanex_rx_cfg_4_t rx_cfg_4;
2301
2302 rx_ctle_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(lane, qlm));
2303 rx_ctle_ctrl.s.pcs_sds_rx_ctle_bias_ctrl = 3;
2304 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(lane, qlm), rx_ctle_ctrl.u64);
2305
2306 rx_cfg_4.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_4(lane, qlm));
2307 rx_cfg_4.s.cfg_rx_errdet_ctrl = 0xcd6f;
2308 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_4(lane, qlm), rx_cfg_4.u64);
2309 }
2310}
2311
2312void cvmx_qlm_display_registers(int qlm)
2313{
2314 int num_lanes = cvmx_qlm_get_lanes(qlm);
2315 int lane;
2316 const __cvmx_qlm_jtag_field_t *ptr = cvmx_qlm_jtag_get_field();
2317
2318 debug("%29s", "Field[<stop bit>:<start bit>]");
2319 for (lane = 0; lane < num_lanes; lane++)
2320 debug("\t Lane %d", lane);
2321 debug("\n");
2322
2323 while (ptr && ptr->name) {
2324 debug("%20s[%3d:%3d]", ptr->name, ptr->stop_bit, ptr->start_bit);
2325 for (lane = 0; lane < num_lanes; lane++) {
2326 u64 val;
2327 int tx_byp = 0;
2328
2329 /*
2330 * Make sure serdes_tx_byp is set for displaying
2331 * TX amplitude and TX demphasis field values.
2332 */
2333 if (strncmp(ptr->name, "biasdrv_", 8) == 0 ||
2334 strncmp(ptr->name, "tcoeff_", 7) == 0) {
2335 tx_byp = cvmx_qlm_jtag_get(qlm, lane, "serdes_tx_byp");
2336 if (tx_byp == 0) {
2337 debug("\t \t");
2338 continue;
2339 }
2340 }
2341 val = cvmx_qlm_jtag_get(qlm, lane, ptr->name);
2342 debug("\t%4llu (0x%04llx)", (unsigned long long)val,
2343 (unsigned long long)val);
2344 }
2345 debug("\n");
2346 ptr++;
2347 }
2348}
2349
2350/* ToDo: CVMX_DUMP_GSER removed for now (unused!) */