blob: 5ffcc5313d2dc3f8823f5f59d0decf96de64ba0d [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
12#include <malloc.h>
13#include <miiphy.h>
14#include <regmap.h>
15#include <reset.h>
16#include <syscon.h>
17#include <wait_bit.h>
18#include <asm/gpio.h>
19#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <linux/err.h>
22#include <linux/ioport.h>
23#include <linux/mdio.h>
24#include <linux/mii.h>
25
26#include "mtk_eth.h"
27
28#define NUM_TX_DESC 24
29#define NUM_RX_DESC 24
30#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
31#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
32#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
33
Landen Chao532de8d2020-02-18 16:49:37 +080034#define MT753X_NUM_PHYS 5
35#define MT753X_NUM_PORTS 7
36#define MT753X_DFL_SMI_ADDR 31
37#define MT753X_SMI_ADDR_MASK 0x1f
Weijie Gao23f17162018-12-20 16:12:53 +080038
Landen Chao532de8d2020-02-18 16:49:37 +080039#define MT753X_PHY_ADDR(base, addr) \
Weijie Gao23f17162018-12-20 16:12:53 +080040 (((base) + (addr)) & 0x1f)
41
42#define GDMA_FWD_TO_CPU \
43 (0x20000000 | \
44 GDM_ICS_EN | \
45 GDM_TCS_EN | \
46 GDM_UCS_EN | \
47 STRP_CRC | \
48 (DP_PDMA << MYMAC_DP_S) | \
49 (DP_PDMA << BC_DP_S) | \
50 (DP_PDMA << MC_DP_S) | \
51 (DP_PDMA << UN_DP_S))
52
53#define GDMA_FWD_DISCARD \
54 (0x20000000 | \
55 GDM_ICS_EN | \
56 GDM_TCS_EN | \
57 GDM_UCS_EN | \
58 STRP_CRC | \
59 (DP_DISCARD << MYMAC_DP_S) | \
60 (DP_DISCARD << BC_DP_S) | \
61 (DP_DISCARD << MC_DP_S) | \
62 (DP_DISCARD << UN_DP_S))
63
64struct pdma_rxd_info1 {
65 u32 PDP0;
66};
67
68struct pdma_rxd_info2 {
69 u32 PLEN1 : 14;
70 u32 LS1 : 1;
71 u32 UN_USED : 1;
72 u32 PLEN0 : 14;
73 u32 LS0 : 1;
74 u32 DDONE : 1;
75};
76
77struct pdma_rxd_info3 {
78 u32 PDP1;
79};
80
81struct pdma_rxd_info4 {
82 u32 FOE_ENTRY : 14;
83 u32 CRSN : 5;
84 u32 SP : 3;
85 u32 L4F : 1;
86 u32 L4VLD : 1;
87 u32 TACK : 1;
88 u32 IP4F : 1;
89 u32 IP4 : 1;
90 u32 IP6 : 1;
91 u32 UN_USED : 4;
92};
93
94struct pdma_rxdesc {
95 struct pdma_rxd_info1 rxd_info1;
96 struct pdma_rxd_info2 rxd_info2;
97 struct pdma_rxd_info3 rxd_info3;
98 struct pdma_rxd_info4 rxd_info4;
99};
100
101struct pdma_txd_info1 {
102 u32 SDP0;
103};
104
105struct pdma_txd_info2 {
106 u32 SDL1 : 14;
107 u32 LS1 : 1;
108 u32 BURST : 1;
109 u32 SDL0 : 14;
110 u32 LS0 : 1;
111 u32 DDONE : 1;
112};
113
114struct pdma_txd_info3 {
115 u32 SDP1;
116};
117
118struct pdma_txd_info4 {
119 u32 VLAN_TAG : 16;
120 u32 INS : 1;
121 u32 RESV : 2;
122 u32 UDF : 6;
123 u32 FPORT : 3;
124 u32 TSO : 1;
125 u32 TUI_CO : 3;
126};
127
128struct pdma_txdesc {
129 struct pdma_txd_info1 txd_info1;
130 struct pdma_txd_info2 txd_info2;
131 struct pdma_txd_info3 txd_info3;
132 struct pdma_txd_info4 txd_info4;
133};
134
135enum mtk_switch {
136 SW_NONE,
Landen Chao532de8d2020-02-18 16:49:37 +0800137 SW_MT7530,
138 SW_MT7531
Weijie Gao23f17162018-12-20 16:12:53 +0800139};
140
141enum mtk_soc {
142 SOC_MT7623,
MarkLeee3957172020-01-21 19:31:58 +0800143 SOC_MT7629,
144 SOC_MT7622
Weijie Gao23f17162018-12-20 16:12:53 +0800145};
146
147struct mtk_eth_priv {
148 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
149
150 struct pdma_txdesc *tx_ring_noc;
151 struct pdma_rxdesc *rx_ring_noc;
152
153 int rx_dma_owner_idx0;
154 int tx_cpu_owner_idx0;
155
156 void __iomem *fe_base;
157 void __iomem *gmac_base;
158 void __iomem *ethsys_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800159 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800160
161 struct mii_dev *mdio_bus;
162 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
163 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
164 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
165 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
166 u16 val);
167
168 enum mtk_soc soc;
169 int gmac_id;
170 int force_mode;
171 int speed;
172 int duplex;
173
174 struct phy_device *phydev;
175 int phy_interface;
176 int phy_addr;
177
178 enum mtk_switch sw;
179 int (*switch_init)(struct mtk_eth_priv *priv);
Landen Chao532de8d2020-02-18 16:49:37 +0800180 u32 mt753x_smi_addr;
181 u32 mt753x_phy_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800182
183 struct gpio_desc rst_gpio;
184 int mcm;
185
186 struct reset_ctl rst_fe;
187 struct reset_ctl rst_mcm;
188};
189
190static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
191{
192 writel(val, priv->fe_base + PDMA_BASE + reg);
193}
194
195static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
196 u32 set)
197{
198 clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
199}
200
201static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
202 u32 val)
203{
204 u32 gdma_base;
205
206 if (no == 1)
207 gdma_base = GDMA2_BASE;
208 else
209 gdma_base = GDMA1_BASE;
210
211 writel(val, priv->fe_base + gdma_base + reg);
212}
213
214static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
215{
216 return readl(priv->gmac_base + reg);
217}
218
219static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
220{
221 writel(val, priv->gmac_base + reg);
222}
223
224static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
225{
226 clrsetbits_le32(priv->gmac_base + reg, clr, set);
227}
228
229static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
230 u32 set)
231{
232 clrsetbits_le32(priv->ethsys_base + reg, clr, set);
233}
234
235/* Direct MDIO clause 22/45 access via SoC */
236static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
237 u32 cmd, u32 st)
238{
239 int ret;
240 u32 val;
241
242 val = (st << MDIO_ST_S) |
243 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
244 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
245 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
246
247 if (cmd == MDIO_CMD_WRITE)
248 val |= data & MDIO_RW_DATA_M;
249
250 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
251
252 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
253 PHY_ACS_ST, 0, 5000, 0);
254 if (ret) {
255 pr_warn("MDIO access timeout\n");
256 return ret;
257 }
258
259 if (cmd == MDIO_CMD_READ) {
260 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
261 return val & MDIO_RW_DATA_M;
262 }
263
264 return 0;
265}
266
267/* Direct MDIO clause 22 read via SoC */
268static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
269{
270 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
271}
272
273/* Direct MDIO clause 22 write via SoC */
274static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
275{
276 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
277}
278
279/* Direct MDIO clause 45 read via SoC */
280static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
281{
282 int ret;
283
284 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
285 if (ret)
286 return ret;
287
288 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
289 MDIO_ST_C45);
290}
291
292/* Direct MDIO clause 45 write via SoC */
293static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
294 u16 reg, u16 val)
295{
296 int ret;
297
298 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
299 if (ret)
300 return ret;
301
302 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
303 MDIO_ST_C45);
304}
305
306/* Indirect MDIO clause 45 read via MII registers */
307static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
308 u16 reg)
309{
310 int ret;
311
312 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
313 (MMD_ADDR << MMD_CMD_S) |
314 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
315 if (ret)
316 return ret;
317
318 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
319 if (ret)
320 return ret;
321
322 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
323 (MMD_DATA << MMD_CMD_S) |
324 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
325 if (ret)
326 return ret;
327
328 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
329}
330
331/* Indirect MDIO clause 45 write via MII registers */
332static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
333 u16 reg, u16 val)
334{
335 int ret;
336
337 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
338 (MMD_ADDR << MMD_CMD_S) |
339 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
340 if (ret)
341 return ret;
342
343 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
344 if (ret)
345 return ret;
346
347 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
348 (MMD_DATA << MMD_CMD_S) |
349 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
350 if (ret)
351 return ret;
352
353 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
354}
355
Landen Chao532de8d2020-02-18 16:49:37 +0800356/*
357 * MT7530 Internal Register Address Bits
358 * -------------------------------------------------------------------
359 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
360 * |----------------------------------------|---------------|--------|
361 * | Page Address | Reg Address | Unused |
362 * -------------------------------------------------------------------
363 */
364
365static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
366{
367 int ret, low_word, high_word;
368
369 /* Write page address */
370 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
371 if (ret)
372 return ret;
373
374 /* Read low word */
375 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
376 if (low_word < 0)
377 return low_word;
378
379 /* Read high word */
380 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
381 if (high_word < 0)
382 return high_word;
383
384 if (data)
385 *data = ((u32)high_word << 16) | (low_word & 0xffff);
386
387 return 0;
388}
389
390static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
391{
392 int ret;
393
394 /* Write page address */
395 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
396 if (ret)
397 return ret;
398
399 /* Write low word */
400 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
401 data & 0xffff);
402 if (ret)
403 return ret;
404
405 /* Write high word */
406 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
407}
408
409static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
410 u32 set)
411{
412 u32 val;
413
414 mt753x_reg_read(priv, reg, &val);
415 val &= ~clr;
416 val |= set;
417 mt753x_reg_write(priv, reg, val);
418}
419
420/* Indirect MDIO clause 22/45 access */
421static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
422 u32 cmd, u32 st)
423{
424 ulong timeout;
425 u32 val, timeout_ms;
426 int ret = 0;
427
428 val = (st << MDIO_ST_S) |
429 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
430 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
431 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
432
433 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
434 val |= data & MDIO_RW_DATA_M;
435
436 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
437
438 timeout_ms = 100;
439 timeout = get_timer(0);
440 while (1) {
441 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
442
443 if ((val & PHY_ACS_ST) == 0)
444 break;
445
446 if (get_timer(timeout) > timeout_ms)
447 return -ETIMEDOUT;
448 }
449
450 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
451 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
452 ret = val & MDIO_RW_DATA_M;
453 }
454
455 return ret;
456}
457
458static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
459{
460 u8 phy_addr;
461
462 if (phy >= MT753X_NUM_PHYS)
463 return -EINVAL;
464
465 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
466
467 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
468 MDIO_ST_C22);
469}
470
471static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
472 u16 val)
473{
474 u8 phy_addr;
475
476 if (phy >= MT753X_NUM_PHYS)
477 return -EINVAL;
478
479 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
480
481 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
482 MDIO_ST_C22);
483}
484
485int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
486{
487 u8 phy_addr;
488 int ret;
489
490 if (addr >= MT753X_NUM_PHYS)
491 return -EINVAL;
492
493 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
494
495 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
496 MDIO_ST_C45);
497 if (ret)
498 return ret;
499
500 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
501 MDIO_ST_C45);
502}
503
504static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
505 u16 reg, u16 val)
506{
507 u8 phy_addr;
508 int ret;
509
510 if (addr >= MT753X_NUM_PHYS)
511 return 0;
512
513 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
514
515 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
516 MDIO_ST_C45);
517 if (ret)
518 return ret;
519
520 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
521 MDIO_ST_C45);
522}
523
Weijie Gao23f17162018-12-20 16:12:53 +0800524static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
525{
526 struct mtk_eth_priv *priv = bus->priv;
527
528 if (devad < 0)
529 return priv->mii_read(priv, addr, reg);
530 else
531 return priv->mmd_read(priv, addr, devad, reg);
532}
533
534static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
535 u16 val)
536{
537 struct mtk_eth_priv *priv = bus->priv;
538
539 if (devad < 0)
540 return priv->mii_write(priv, addr, reg, val);
541 else
542 return priv->mmd_write(priv, addr, devad, reg, val);
543}
544
545static int mtk_mdio_register(struct udevice *dev)
546{
547 struct mtk_eth_priv *priv = dev_get_priv(dev);
548 struct mii_dev *mdio_bus = mdio_alloc();
549 int ret;
550
551 if (!mdio_bus)
552 return -ENOMEM;
553
554 /* Assign MDIO access APIs according to the switch/phy */
555 switch (priv->sw) {
556 case SW_MT7530:
557 priv->mii_read = mtk_mii_read;
558 priv->mii_write = mtk_mii_write;
559 priv->mmd_read = mtk_mmd_ind_read;
560 priv->mmd_write = mtk_mmd_ind_write;
561 break;
Landen Chao532de8d2020-02-18 16:49:37 +0800562 case SW_MT7531:
563 priv->mii_read = mt7531_mii_ind_read;
564 priv->mii_write = mt7531_mii_ind_write;
565 priv->mmd_read = mt7531_mmd_ind_read;
566 priv->mmd_write = mt7531_mmd_ind_write;
567 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800568 default:
569 priv->mii_read = mtk_mii_read;
570 priv->mii_write = mtk_mii_write;
571 priv->mmd_read = mtk_mmd_read;
572 priv->mmd_write = mtk_mmd_write;
573 }
574
575 mdio_bus->read = mtk_mdio_read;
576 mdio_bus->write = mtk_mdio_write;
577 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
578
579 mdio_bus->priv = (void *)priv;
580
581 ret = mdio_register(mdio_bus);
582
583 if (ret)
584 return ret;
585
586 priv->mdio_bus = mdio_bus;
587
588 return 0;
589}
590
Landen Chao532de8d2020-02-18 16:49:37 +0800591static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
Weijie Gao23f17162018-12-20 16:12:53 +0800592{
Landen Chao532de8d2020-02-18 16:49:37 +0800593 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800594
Landen Chao532de8d2020-02-18 16:49:37 +0800595 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800596}
597
Landen Chao532de8d2020-02-18 16:49:37 +0800598static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
Weijie Gao23f17162018-12-20 16:12:53 +0800599{
Landen Chao532de8d2020-02-18 16:49:37 +0800600 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800601
Landen Chao532de8d2020-02-18 16:49:37 +0800602 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800603}
604
605static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
606{
607 u32 ncpo1, ssc_delta;
608
609 switch (mode) {
610 case PHY_INTERFACE_MODE_RGMII:
611 ncpo1 = 0x0c80;
612 ssc_delta = 0x87;
613 break;
614 default:
615 printf("error: xMII mode %d not supported\n", mode);
616 return -EINVAL;
617 }
618
619 /* Disable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800620 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800621
622 /* Disable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800623 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800624 (2 << RG_GSWPLL_POSDIV_200M_S) |
625 (32 << RG_GSWPLL_FBKDIV_200M_S));
626
627 /* For MT7530 core clock = 500Mhz */
Landen Chao532de8d2020-02-18 16:49:37 +0800628 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800629 (1 << RG_GSWPLL_POSDIV_500M_S) |
630 (25 << RG_GSWPLL_FBKDIV_500M_S));
631
632 /* Enable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800633 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800634 (2 << RG_GSWPLL_POSDIV_200M_S) |
635 (32 << RG_GSWPLL_FBKDIV_200M_S) |
636 RG_GSWPLL_EN_PRE);
637
638 udelay(20);
639
Landen Chao532de8d2020-02-18 16:49:37 +0800640 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
Weijie Gao23f17162018-12-20 16:12:53 +0800641
642 /* Setup the MT7530 TRGMII Tx Clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800643 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
644 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
645 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
646 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
647 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
Weijie Gao23f17162018-12-20 16:12:53 +0800648 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
649
Landen Chao532de8d2020-02-18 16:49:37 +0800650 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800651 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
652 (1 << RG_SYSPLL_POSDIV_S));
653
Landen Chao532de8d2020-02-18 16:49:37 +0800654 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
Weijie Gao23f17162018-12-20 16:12:53 +0800655 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
656 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
657
658 /* Enable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800659 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
Weijie Gao23f17162018-12-20 16:12:53 +0800660 REG_GSWCK_EN | REG_TRGMIICK_EN);
661
662 return 0;
663}
664
665static int mt7530_setup(struct mtk_eth_priv *priv)
666{
667 u16 phy_addr, phy_val;
668 u32 val;
669 int i;
670
671 /* Select 250MHz clk for RGMII mode */
672 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
673 ETHSYS_TRGMII_CLK_SEL362_5, 0);
674
Landen Chao532de8d2020-02-18 16:49:37 +0800675 /* Modify HWTRAP first to allow direct access to internal PHYs */
676 mt753x_reg_read(priv, HWTRAP_REG, &val);
677 val |= CHG_TRAP;
678 val &= ~C_MDIO_BPS;
679 mt753x_reg_write(priv, MHWTRAP_REG, val);
680
681 /* Calculate the phy base address */
682 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
683 priv->mt753x_phy_base = (val | 0x7) + 1;
684
685 /* Turn off PHYs */
686 for (i = 0; i < MT753X_NUM_PHYS; i++) {
687 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
688 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
689 phy_val |= BMCR_PDOWN;
690 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
691 }
692
693 /* Force MAC link down before reset */
694 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
695 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
696
697 /* MT7530 reset */
698 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
699 udelay(100);
700
701 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
702 MAC_MODE | FORCE_MODE |
703 MAC_TX_EN | MAC_RX_EN |
704 BKOFF_EN | BACKPR_EN |
705 (SPEED_1000M << FORCE_SPD_S) |
706 FORCE_DPX | FORCE_LINK;
707
708 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
709 mt753x_reg_write(priv, PMCR_REG(6), val);
710
711 /* MT7530 Port5: Forced link down */
712 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
713
714 /* MT7530 Port6: Set to RGMII */
715 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
716
717 /* Hardware Trap: Enable Port6, Disable Port5 */
718 mt753x_reg_read(priv, HWTRAP_REG, &val);
719 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
720 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
721 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
722 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
723 mt753x_reg_write(priv, MHWTRAP_REG, val);
724
725 /* Setup switch core pll */
726 mt7530_pad_clk_setup(priv, priv->phy_interface);
727
728 /* Lower Tx Driving for TRGMII path */
729 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
730 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
731 (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
732
733 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
734 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
735
736 /* Turn on PHYs */
737 for (i = 0; i < MT753X_NUM_PHYS; i++) {
738 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
739 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
740 phy_val &= ~BMCR_PDOWN;
741 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
742 }
743
744 return 0;
745}
746
747static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
748{
749 /* Step 1 : Disable MT7531 COREPLL */
750 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
751
752 /* Step 2: switch to XTAL output */
753 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
754
755 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
756
757 /* Step 3: disable PLLGP and enable program PLLGP */
758 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
759
760 /* Step 4: program COREPLL output frequency to 500MHz */
761 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
762 2 << RG_COREPLL_POSDIV_S);
763 udelay(25);
764
765 /* Currently, support XTAL 25Mhz only */
766 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
767 0x140000 << RG_COREPLL_SDM_PCW_S);
768
769 /* Set feedback divide ratio update signal to high */
770 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
771 RG_COREPLL_SDM_PCW_CHG);
772
773 /* Wait for at least 16 XTAL clocks */
774 udelay(10);
775
776 /* Step 5: set feedback divide ratio update signal to low */
777 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
778
779 /* add enable 325M clock for SGMII */
780 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
781
782 /* add enable 250SSC clock for RGMII */
783 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
784
785 /*Step 6: Enable MT7531 PLL */
786 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
787
788 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
789
790 udelay(25);
791}
792
793static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
794 u32 port)
795{
796 if (port != 5 && port != 6) {
797 printf("mt7531: port %d is not a SGMII port\n", port);
798 return -EINVAL;
799 }
800
801 /* Set SGMII GEN2 speed(2.5G) */
802 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
803 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
804
805 /* Disable SGMII AN */
806 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
807 SGMII_AN_ENABLE, 0);
808
809 /* SGMII force mode setting */
810 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
811
812 /* Release PHYA power down state */
813 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
814 SGMII_PHYA_PWD, 0);
815
816 return 0;
817}
818
819static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
820{
821 u32 val;
822
823 if (port != 5) {
824 printf("error: RGMII mode is not available for port %d\n",
825 port);
826 return -EINVAL;
827 }
828
829 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
830 val |= GP_CLK_EN;
831 val &= ~GP_MODE_M;
832 val |= GP_MODE_RGMII << GP_MODE_S;
833 val |= TXCLK_NO_REVERSE;
834 val |= RXCLK_NO_DELAY;
835 val &= ~CLK_SKEW_IN_M;
836 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
837 val &= ~CLK_SKEW_OUT_M;
838 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
839 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
840
841 return 0;
842}
843
844static void mt7531_phy_setting(struct mtk_eth_priv *priv)
845{
846 int i;
847 u32 val;
848
849 for (i = 0; i < MT753X_NUM_PHYS; i++) {
850 /* Enable HW auto downshift */
851 priv->mii_write(priv, i, 0x1f, 0x1);
852 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
853 val |= PHY_EN_DOWN_SHFIT;
854 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
855
856 /* PHY link down power saving enable */
857 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
858 val |= PHY_LINKDOWN_POWER_SAVING_EN;
859 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
860
861 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
862 val &= ~PHY_POWER_SAVING_M;
863 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
864 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
865 }
866}
867
868static int mt7531_setup(struct mtk_eth_priv *priv)
869{
870 u16 phy_addr, phy_val;
871 u32 val;
872 u32 pmcr;
873 u32 port5_sgmii;
874 int i;
875
876 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
877 MT753X_SMI_ADDR_MASK;
878
879 /* Turn off PHYs */
880 for (i = 0; i < MT753X_NUM_PHYS; i++) {
881 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
882 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
883 phy_val |= BMCR_PDOWN;
884 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
885 }
886
887 /* Force MAC link down before reset */
888 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
889 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
890
891 /* Switch soft reset */
892 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
893 udelay(100);
894
895 /* Enable MDC input Schmitt Trigger */
896 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
897 SMT_IOLB_5_SMI_MDC_EN);
898
899 mt7531_core_pll_setup(priv, priv->mcm);
900
901 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
902 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
903
904 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
905 switch (priv->phy_interface) {
906 case PHY_INTERFACE_MODE_RGMII:
907 if (!port5_sgmii)
908 mt7531_port_rgmii_init(priv, 5);
909 break;
910 case PHY_INTERFACE_MODE_SGMII:
911 mt7531_port_sgmii_init(priv, 6);
912 if (port5_sgmii)
913 mt7531_port_sgmii_init(priv, 5);
914 break;
915 default:
916 break;
917 }
918
919 pmcr = MT7531_FORCE_MODE |
920 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
921 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
922 BKOFF_EN | BACKPR_EN |
923 FORCE_RX_FC | FORCE_TX_FC |
924 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
925 FORCE_LINK;
926
927 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
928 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
929
930 /* Turn on PHYs */
931 for (i = 0; i < MT753X_NUM_PHYS; i++) {
932 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
933 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
934 phy_val &= ~BMCR_PDOWN;
935 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
936 }
937
938 mt7531_phy_setting(priv);
939
940 /* Enable Internal PHYs */
941 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
942 val |= MT7531_BYPASS_MODE;
943 val &= ~MT7531_POWER_ON_OFF;
944 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
945
946 return 0;
947}
948
949int mt753x_switch_init(struct mtk_eth_priv *priv)
950{
951 int ret;
952 int i;
953
Weijie Gao23f17162018-12-20 16:12:53 +0800954 /* Global reset switch */
955 if (priv->mcm) {
956 reset_assert(&priv->rst_mcm);
957 udelay(1000);
958 reset_deassert(&priv->rst_mcm);
959 mdelay(1000);
960 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
961 dm_gpio_set_value(&priv->rst_gpio, 0);
962 udelay(1000);
963 dm_gpio_set_value(&priv->rst_gpio, 1);
964 mdelay(1000);
965 }
966
Landen Chao532de8d2020-02-18 16:49:37 +0800967 ret = priv->switch_init(priv);
968 if (ret)
969 return ret;
Weijie Gao23f17162018-12-20 16:12:53 +0800970
971 /* Set port isolation */
Landen Chao532de8d2020-02-18 16:49:37 +0800972 for (i = 0; i < MT753X_NUM_PORTS; i++) {
Weijie Gao23f17162018-12-20 16:12:53 +0800973 /* Set port matrix mode */
974 if (i != 6)
Landen Chao532de8d2020-02-18 16:49:37 +0800975 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800976 (0x40 << PORT_MATRIX_S));
977 else
Landen Chao532de8d2020-02-18 16:49:37 +0800978 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800979 (0x3f << PORT_MATRIX_S));
980
981 /* Set port mode to user port */
Landen Chao532de8d2020-02-18 16:49:37 +0800982 mt753x_reg_write(priv, PVC_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800983 (0x8100 << STAG_VPID_S) |
984 (VLAN_ATTR_USER << VLAN_ATTR_S));
985 }
986
987 return 0;
988}
989
990static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
991{
992 u16 lcl_adv = 0, rmt_adv = 0;
993 u8 flowctrl;
994 u32 mcr;
995
Landen Chao532de8d2020-02-18 16:49:37 +0800996 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +0800997 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
998 MAC_MODE | FORCE_MODE |
999 MAC_TX_EN | MAC_RX_EN |
1000 BKOFF_EN | BACKPR_EN;
1001
1002 switch (priv->phydev->speed) {
1003 case SPEED_10:
1004 mcr |= (SPEED_10M << FORCE_SPD_S);
1005 break;
1006 case SPEED_100:
1007 mcr |= (SPEED_100M << FORCE_SPD_S);
1008 break;
1009 case SPEED_1000:
1010 mcr |= (SPEED_1000M << FORCE_SPD_S);
1011 break;
1012 };
1013
1014 if (priv->phydev->link)
1015 mcr |= FORCE_LINK;
1016
1017 if (priv->phydev->duplex) {
1018 mcr |= FORCE_DPX;
1019
1020 if (priv->phydev->pause)
1021 rmt_adv = LPA_PAUSE_CAP;
1022 if (priv->phydev->asym_pause)
1023 rmt_adv |= LPA_PAUSE_ASYM;
1024
1025 if (priv->phydev->advertising & ADVERTISED_Pause)
1026 lcl_adv |= ADVERTISE_PAUSE_CAP;
1027 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1028 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1029
1030 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1031
1032 if (flowctrl & FLOW_CTRL_TX)
1033 mcr |= FORCE_TX_FC;
1034 if (flowctrl & FLOW_CTRL_RX)
1035 mcr |= FORCE_RX_FC;
1036
1037 debug("rx pause %s, tx pause %s\n",
1038 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1039 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1040 }
1041
1042 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1043}
1044
1045static int mtk_phy_start(struct mtk_eth_priv *priv)
1046{
1047 struct phy_device *phydev = priv->phydev;
1048 int ret;
1049
1050 ret = phy_startup(phydev);
1051
1052 if (ret) {
1053 debug("Could not initialize PHY %s\n", phydev->dev->name);
1054 return ret;
1055 }
1056
1057 if (!phydev->link) {
1058 debug("%s: link down.\n", phydev->dev->name);
1059 return 0;
1060 }
1061
1062 mtk_phy_link_adjust(priv);
1063
1064 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1065 (phydev->duplex) ? "full" : "half",
1066 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1067
1068 return 0;
1069}
1070
1071static int mtk_phy_probe(struct udevice *dev)
1072{
1073 struct mtk_eth_priv *priv = dev_get_priv(dev);
1074 struct phy_device *phydev;
1075
1076 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1077 priv->phy_interface);
1078 if (!phydev)
1079 return -ENODEV;
1080
1081 phydev->supported &= PHY_GBIT_FEATURES;
1082 phydev->advertising = phydev->supported;
1083
1084 priv->phydev = phydev;
1085 phy_config(phydev);
1086
1087 return 0;
1088}
1089
MarkLeeb4ef49a2020-01-21 19:31:57 +08001090static void mtk_sgmii_init(struct mtk_eth_priv *priv)
1091{
1092 /* Set SGMII GEN2 speed(2.5G) */
1093 clrsetbits_le32(priv->sgmii_base + SGMSYS_GEN2_SPEED,
1094 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
1095
1096 /* Disable SGMII AN */
1097 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1098 SGMII_AN_ENABLE, 0);
1099
1100 /* SGMII force mode setting */
1101 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1102
1103 /* Release PHYA power down state */
1104 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1105 SGMII_PHYA_PWD, 0);
1106}
1107
Weijie Gao23f17162018-12-20 16:12:53 +08001108static void mtk_mac_init(struct mtk_eth_priv *priv)
1109{
1110 int i, ge_mode = 0;
1111 u32 mcr;
1112
1113 switch (priv->phy_interface) {
1114 case PHY_INTERFACE_MODE_RGMII_RXID:
1115 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +08001116 ge_mode = GE_MODE_RGMII;
1117 break;
Weijie Gao23f17162018-12-20 16:12:53 +08001118 case PHY_INTERFACE_MODE_SGMII:
1119 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +08001120 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1121 SYSCFG0_SGMII_SEL(priv->gmac_id));
1122 mtk_sgmii_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001123 break;
1124 case PHY_INTERFACE_MODE_MII:
1125 case PHY_INTERFACE_MODE_GMII:
1126 ge_mode = GE_MODE_MII;
1127 break;
1128 case PHY_INTERFACE_MODE_RMII:
1129 ge_mode = GE_MODE_RMII;
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 /* set the gmac to the right mode */
1136 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1137 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1138 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1139
1140 if (priv->force_mode) {
Landen Chao532de8d2020-02-18 16:49:37 +08001141 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001142 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1143 MAC_MODE | FORCE_MODE |
1144 MAC_TX_EN | MAC_RX_EN |
1145 BKOFF_EN | BACKPR_EN |
1146 FORCE_LINK;
1147
1148 switch (priv->speed) {
1149 case SPEED_10:
1150 mcr |= SPEED_10M << FORCE_SPD_S;
1151 break;
1152 case SPEED_100:
1153 mcr |= SPEED_100M << FORCE_SPD_S;
1154 break;
1155 case SPEED_1000:
1156 mcr |= SPEED_1000M << FORCE_SPD_S;
1157 break;
1158 }
1159
1160 if (priv->duplex)
1161 mcr |= FORCE_DPX;
1162
1163 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1164 }
1165
1166 if (priv->soc == SOC_MT7623) {
1167 /* Lower Tx Driving for TRGMII path */
1168 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1169 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1170 (8 << TD_DM_DRVP_S) |
1171 (8 << TD_DM_DRVN_S));
1172
1173 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1174 RX_RST | RXC_DQSISEL);
1175 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1176 }
1177}
1178
1179static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1180{
1181 char *pkt_base = priv->pkt_pool;
1182 int i;
1183
1184 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1185 udelay(500);
1186
1187 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
1188 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
1189 memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
1190
Frank Wunderlich47b14312020-01-31 10:23:29 +01001191 flush_dcache_range((ulong)pkt_base,
1192 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +08001193
1194 priv->rx_dma_owner_idx0 = 0;
1195 priv->tx_cpu_owner_idx0 = 0;
1196
1197 for (i = 0; i < NUM_TX_DESC; i++) {
1198 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
1199 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
1200 priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
1201
1202 priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
1203 pkt_base += PKTSIZE_ALIGN;
1204 }
1205
1206 for (i = 0; i < NUM_RX_DESC; i++) {
1207 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1208 priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
1209 pkt_base += PKTSIZE_ALIGN;
1210 }
1211
1212 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1213 virt_to_phys(priv->tx_ring_noc));
1214 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1215 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1216
1217 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1218 virt_to_phys(priv->rx_ring_noc));
1219 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1220 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1221
1222 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1223}
1224
1225static int mtk_eth_start(struct udevice *dev)
1226{
1227 struct mtk_eth_priv *priv = dev_get_priv(dev);
1228 int ret;
1229
1230 /* Reset FE */
1231 reset_assert(&priv->rst_fe);
1232 udelay(1000);
1233 reset_deassert(&priv->rst_fe);
1234 mdelay(10);
1235
1236 /* Packets forward to PDMA */
1237 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1238
1239 if (priv->gmac_id == 0)
1240 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1241 else
1242 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1243
1244 udelay(500);
1245
1246 mtk_eth_fifo_init(priv);
1247
1248 /* Start PHY */
1249 if (priv->sw == SW_NONE) {
1250 ret = mtk_phy_start(priv);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1256 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1257 udelay(500);
1258
1259 return 0;
1260}
1261
1262static void mtk_eth_stop(struct udevice *dev)
1263{
1264 struct mtk_eth_priv *priv = dev_get_priv(dev);
1265
1266 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1267 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1268 udelay(500);
1269
1270 wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
1271 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1272}
1273
1274static int mtk_eth_write_hwaddr(struct udevice *dev)
1275{
1276 struct eth_pdata *pdata = dev_get_platdata(dev);
1277 struct mtk_eth_priv *priv = dev_get_priv(dev);
1278 unsigned char *mac = pdata->enetaddr;
1279 u32 macaddr_lsb, macaddr_msb;
1280
1281 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1282 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1283 ((u32)mac[4] << 8) | (u32)mac[5];
1284
1285 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1286 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1287
1288 return 0;
1289}
1290
1291static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1292{
1293 struct mtk_eth_priv *priv = dev_get_priv(dev);
1294 u32 idx = priv->tx_cpu_owner_idx0;
1295 void *pkt_base;
1296
1297 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
1298 debug("mtk-eth: TX DMA descriptor ring is full\n");
1299 return -EPERM;
1300 }
1301
1302 pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
1303 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001304 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001305 roundup(length, ARCH_DMA_MINALIGN));
1306
1307 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
1308 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
1309
1310 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1311 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1312
1313 return 0;
1314}
1315
1316static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1317{
1318 struct mtk_eth_priv *priv = dev_get_priv(dev);
1319 u32 idx = priv->rx_dma_owner_idx0;
1320 uchar *pkt_base;
1321 u32 length;
1322
1323 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
1324 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1325 return -EAGAIN;
1326 }
1327
1328 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
1329 pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001330 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001331 roundup(length, ARCH_DMA_MINALIGN));
1332
1333 if (packetp)
1334 *packetp = pkt_base;
1335
1336 return length;
1337}
1338
1339static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1340{
1341 struct mtk_eth_priv *priv = dev_get_priv(dev);
1342 u32 idx = priv->rx_dma_owner_idx0;
1343
1344 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
1345 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
1346 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1347
1348 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1349 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1350
1351 return 0;
1352}
1353
1354static int mtk_eth_probe(struct udevice *dev)
1355{
1356 struct eth_pdata *pdata = dev_get_platdata(dev);
1357 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001358 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001359 int ret;
1360
1361 /* Frame Engine Register Base */
1362 priv->fe_base = (void *)iobase;
1363
1364 /* GMAC Register Base */
1365 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1366
1367 /* MDIO register */
1368 ret = mtk_mdio_register(dev);
1369 if (ret)
1370 return ret;
1371
1372 /* Prepare for tx/rx rings */
1373 priv->tx_ring_noc = (struct pdma_txdesc *)
1374 noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1375 ARCH_DMA_MINALIGN);
1376 priv->rx_ring_noc = (struct pdma_rxdesc *)
1377 noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1378 ARCH_DMA_MINALIGN);
1379
1380 /* Set MAC mode */
1381 mtk_mac_init(priv);
1382
1383 /* Probe phy if switch is not specified */
1384 if (priv->sw == SW_NONE)
1385 return mtk_phy_probe(dev);
1386
1387 /* Initialize switch */
Landen Chao532de8d2020-02-18 16:49:37 +08001388 return mt753x_switch_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001389}
1390
1391static int mtk_eth_remove(struct udevice *dev)
1392{
1393 struct mtk_eth_priv *priv = dev_get_priv(dev);
1394
1395 /* MDIO unregister */
1396 mdio_unregister(priv->mdio_bus);
1397 mdio_free(priv->mdio_bus);
1398
1399 /* Stop possibly started DMA */
1400 mtk_eth_stop(dev);
1401
1402 return 0;
1403}
1404
1405static int mtk_eth_ofdata_to_platdata(struct udevice *dev)
1406{
1407 struct eth_pdata *pdata = dev_get_platdata(dev);
1408 struct mtk_eth_priv *priv = dev_get_priv(dev);
1409 struct ofnode_phandle_args args;
1410 struct regmap *regmap;
1411 const char *str;
1412 ofnode subnode;
1413 int ret;
1414
1415 priv->soc = dev_get_driver_data(dev);
1416
1417 pdata->iobase = devfdt_get_addr(dev);
1418
1419 /* get corresponding ethsys phandle */
1420 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1421 &args);
1422 if (ret)
1423 return ret;
1424
1425 regmap = syscon_node_to_regmap(args.node);
1426 if (IS_ERR(regmap))
1427 return PTR_ERR(regmap);
1428
1429 priv->ethsys_base = regmap_get_range(regmap, 0);
1430 if (!priv->ethsys_base) {
1431 dev_err(dev, "Unable to find ethsys\n");
1432 return -ENODEV;
1433 }
1434
1435 /* Reset controllers */
1436 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1437 if (ret) {
1438 printf("error: Unable to get reset ctrl for frame engine\n");
1439 return ret;
1440 }
1441
1442 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1443
1444 /* Interface mode is required */
1445 str = dev_read_string(dev, "phy-mode");
1446 if (str) {
1447 pdata->phy_interface = phy_get_interface_by_name(str);
1448 priv->phy_interface = pdata->phy_interface;
1449 } else {
1450 printf("error: phy-mode is not set\n");
1451 return -EINVAL;
1452 }
1453
1454 /* Force mode or autoneg */
1455 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1456 if (ofnode_valid(subnode)) {
1457 priv->force_mode = 1;
1458 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1459 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1460
1461 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1462 priv->speed != SPEED_1000) {
1463 printf("error: no valid speed set in fixed-link\n");
1464 return -EINVAL;
1465 }
1466 }
1467
MarkLeeb4ef49a2020-01-21 19:31:57 +08001468 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1469 /* get corresponding sgmii phandle */
1470 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1471 NULL, 0, 0, &args);
1472 if (ret)
1473 return ret;
1474
1475 regmap = syscon_node_to_regmap(args.node);
1476
1477 if (IS_ERR(regmap))
1478 return PTR_ERR(regmap);
1479
1480 priv->sgmii_base = regmap_get_range(regmap, 0);
1481
1482 if (!priv->sgmii_base) {
1483 dev_err(dev, "Unable to find sgmii\n");
1484 return -ENODEV;
1485 }
1486 }
1487
Weijie Gao23f17162018-12-20 16:12:53 +08001488 /* check for switch first, otherwise phy will be used */
1489 priv->sw = SW_NONE;
1490 priv->switch_init = NULL;
1491 str = dev_read_string(dev, "mediatek,switch");
1492
1493 if (str) {
1494 if (!strcmp(str, "mt7530")) {
1495 priv->sw = SW_MT7530;
1496 priv->switch_init = mt7530_setup;
Landen Chao532de8d2020-02-18 16:49:37 +08001497 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1498 } else if (!strcmp(str, "mt7531")) {
1499 priv->sw = SW_MT7531;
1500 priv->switch_init = mt7531_setup;
1501 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gao23f17162018-12-20 16:12:53 +08001502 } else {
1503 printf("error: unsupported switch\n");
1504 return -EINVAL;
1505 }
1506
1507 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1508 if (priv->mcm) {
1509 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1510 if (ret) {
1511 printf("error: no reset ctrl for mcm\n");
1512 return ret;
1513 }
1514 } else {
1515 gpio_request_by_name(dev, "reset-gpios", 0,
1516 &priv->rst_gpio, GPIOD_IS_OUT);
1517 }
1518 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001519 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1520 0, &args);
1521 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001522 printf("error: phy-handle is not specified\n");
1523 return ret;
1524 }
1525
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001526 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001527 if (priv->phy_addr < 0) {
1528 printf("error: phy address is not specified\n");
1529 return ret;
1530 }
1531 }
1532
1533 return 0;
1534}
1535
1536static const struct udevice_id mtk_eth_ids[] = {
1537 { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1538 { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
MarkLeee3957172020-01-21 19:31:58 +08001539 { .compatible = "mediatek,mt7622-eth", .data = SOC_MT7622 },
Weijie Gao23f17162018-12-20 16:12:53 +08001540 {}
1541};
1542
1543static const struct eth_ops mtk_eth_ops = {
1544 .start = mtk_eth_start,
1545 .stop = mtk_eth_stop,
1546 .send = mtk_eth_send,
1547 .recv = mtk_eth_recv,
1548 .free_pkt = mtk_eth_free_pkt,
1549 .write_hwaddr = mtk_eth_write_hwaddr,
1550};
1551
1552U_BOOT_DRIVER(mtk_eth) = {
1553 .name = "mtk-eth",
1554 .id = UCLASS_ETH,
1555 .of_match = mtk_eth_ids,
1556 .ofdata_to_platdata = mtk_eth_ofdata_to_platdata,
1557 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1558 .probe = mtk_eth_probe,
1559 .remove = mtk_eth_remove,
1560 .ops = &mtk_eth_ops,
1561 .priv_auto_alloc_size = sizeof(struct mtk_eth_priv),
1562 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1563};