blob: edfa5d1ce8934911b493029c81165010563c024e [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
12#include <malloc.h>
13#include <miiphy.h>
14#include <regmap.h>
15#include <reset.h>
16#include <syscon.h>
17#include <wait_bit.h>
18#include <asm/gpio.h>
19#include <asm/io.h>
20#include <linux/err.h>
21#include <linux/ioport.h>
22#include <linux/mdio.h>
23#include <linux/mii.h>
24
25#include "mtk_eth.h"
26
27#define NUM_TX_DESC 24
28#define NUM_RX_DESC 24
29#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
30#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
31#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
32
33#define MT7530_NUM_PHYS 5
34#define MT7530_DFL_SMI_ADDR 31
35
36#define MT7530_PHY_ADDR(base, addr) \
37 (((base) + (addr)) & 0x1f)
38
39#define GDMA_FWD_TO_CPU \
40 (0x20000000 | \
41 GDM_ICS_EN | \
42 GDM_TCS_EN | \
43 GDM_UCS_EN | \
44 STRP_CRC | \
45 (DP_PDMA << MYMAC_DP_S) | \
46 (DP_PDMA << BC_DP_S) | \
47 (DP_PDMA << MC_DP_S) | \
48 (DP_PDMA << UN_DP_S))
49
50#define GDMA_FWD_DISCARD \
51 (0x20000000 | \
52 GDM_ICS_EN | \
53 GDM_TCS_EN | \
54 GDM_UCS_EN | \
55 STRP_CRC | \
56 (DP_DISCARD << MYMAC_DP_S) | \
57 (DP_DISCARD << BC_DP_S) | \
58 (DP_DISCARD << MC_DP_S) | \
59 (DP_DISCARD << UN_DP_S))
60
61struct pdma_rxd_info1 {
62 u32 PDP0;
63};
64
65struct pdma_rxd_info2 {
66 u32 PLEN1 : 14;
67 u32 LS1 : 1;
68 u32 UN_USED : 1;
69 u32 PLEN0 : 14;
70 u32 LS0 : 1;
71 u32 DDONE : 1;
72};
73
74struct pdma_rxd_info3 {
75 u32 PDP1;
76};
77
78struct pdma_rxd_info4 {
79 u32 FOE_ENTRY : 14;
80 u32 CRSN : 5;
81 u32 SP : 3;
82 u32 L4F : 1;
83 u32 L4VLD : 1;
84 u32 TACK : 1;
85 u32 IP4F : 1;
86 u32 IP4 : 1;
87 u32 IP6 : 1;
88 u32 UN_USED : 4;
89};
90
91struct pdma_rxdesc {
92 struct pdma_rxd_info1 rxd_info1;
93 struct pdma_rxd_info2 rxd_info2;
94 struct pdma_rxd_info3 rxd_info3;
95 struct pdma_rxd_info4 rxd_info4;
96};
97
98struct pdma_txd_info1 {
99 u32 SDP0;
100};
101
102struct pdma_txd_info2 {
103 u32 SDL1 : 14;
104 u32 LS1 : 1;
105 u32 BURST : 1;
106 u32 SDL0 : 14;
107 u32 LS0 : 1;
108 u32 DDONE : 1;
109};
110
111struct pdma_txd_info3 {
112 u32 SDP1;
113};
114
115struct pdma_txd_info4 {
116 u32 VLAN_TAG : 16;
117 u32 INS : 1;
118 u32 RESV : 2;
119 u32 UDF : 6;
120 u32 FPORT : 3;
121 u32 TSO : 1;
122 u32 TUI_CO : 3;
123};
124
125struct pdma_txdesc {
126 struct pdma_txd_info1 txd_info1;
127 struct pdma_txd_info2 txd_info2;
128 struct pdma_txd_info3 txd_info3;
129 struct pdma_txd_info4 txd_info4;
130};
131
132enum mtk_switch {
133 SW_NONE,
134 SW_MT7530
135};
136
137enum mtk_soc {
138 SOC_MT7623,
MarkLeee3957172020-01-21 19:31:58 +0800139 SOC_MT7629,
140 SOC_MT7622
Weijie Gao23f17162018-12-20 16:12:53 +0800141};
142
143struct mtk_eth_priv {
144 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
145
146 struct pdma_txdesc *tx_ring_noc;
147 struct pdma_rxdesc *rx_ring_noc;
148
149 int rx_dma_owner_idx0;
150 int tx_cpu_owner_idx0;
151
152 void __iomem *fe_base;
153 void __iomem *gmac_base;
154 void __iomem *ethsys_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800155 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800156
157 struct mii_dev *mdio_bus;
158 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
159 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
160 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
161 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
162 u16 val);
163
164 enum mtk_soc soc;
165 int gmac_id;
166 int force_mode;
167 int speed;
168 int duplex;
169
170 struct phy_device *phydev;
171 int phy_interface;
172 int phy_addr;
173
174 enum mtk_switch sw;
175 int (*switch_init)(struct mtk_eth_priv *priv);
176 u32 mt7530_smi_addr;
177 u32 mt7530_phy_base;
178
179 struct gpio_desc rst_gpio;
180 int mcm;
181
182 struct reset_ctl rst_fe;
183 struct reset_ctl rst_mcm;
184};
185
186static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
187{
188 writel(val, priv->fe_base + PDMA_BASE + reg);
189}
190
191static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
192 u32 set)
193{
194 clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
195}
196
197static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
198 u32 val)
199{
200 u32 gdma_base;
201
202 if (no == 1)
203 gdma_base = GDMA2_BASE;
204 else
205 gdma_base = GDMA1_BASE;
206
207 writel(val, priv->fe_base + gdma_base + reg);
208}
209
210static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
211{
212 return readl(priv->gmac_base + reg);
213}
214
215static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
216{
217 writel(val, priv->gmac_base + reg);
218}
219
220static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
221{
222 clrsetbits_le32(priv->gmac_base + reg, clr, set);
223}
224
225static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
226 u32 set)
227{
228 clrsetbits_le32(priv->ethsys_base + reg, clr, set);
229}
230
231/* Direct MDIO clause 22/45 access via SoC */
232static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
233 u32 cmd, u32 st)
234{
235 int ret;
236 u32 val;
237
238 val = (st << MDIO_ST_S) |
239 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
240 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
241 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
242
243 if (cmd == MDIO_CMD_WRITE)
244 val |= data & MDIO_RW_DATA_M;
245
246 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
247
248 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
249 PHY_ACS_ST, 0, 5000, 0);
250 if (ret) {
251 pr_warn("MDIO access timeout\n");
252 return ret;
253 }
254
255 if (cmd == MDIO_CMD_READ) {
256 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
257 return val & MDIO_RW_DATA_M;
258 }
259
260 return 0;
261}
262
263/* Direct MDIO clause 22 read via SoC */
264static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
265{
266 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
267}
268
269/* Direct MDIO clause 22 write via SoC */
270static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
271{
272 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
273}
274
275/* Direct MDIO clause 45 read via SoC */
276static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
277{
278 int ret;
279
280 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
281 if (ret)
282 return ret;
283
284 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
285 MDIO_ST_C45);
286}
287
288/* Direct MDIO clause 45 write via SoC */
289static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
290 u16 reg, u16 val)
291{
292 int ret;
293
294 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
295 if (ret)
296 return ret;
297
298 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
299 MDIO_ST_C45);
300}
301
302/* Indirect MDIO clause 45 read via MII registers */
303static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
304 u16 reg)
305{
306 int ret;
307
308 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
309 (MMD_ADDR << MMD_CMD_S) |
310 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
311 if (ret)
312 return ret;
313
314 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
315 if (ret)
316 return ret;
317
318 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
319 (MMD_DATA << MMD_CMD_S) |
320 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
321 if (ret)
322 return ret;
323
324 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
325}
326
327/* Indirect MDIO clause 45 write via MII registers */
328static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
329 u16 reg, u16 val)
330{
331 int ret;
332
333 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
334 (MMD_ADDR << MMD_CMD_S) |
335 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
336 if (ret)
337 return ret;
338
339 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
340 if (ret)
341 return ret;
342
343 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
344 (MMD_DATA << MMD_CMD_S) |
345 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
346 if (ret)
347 return ret;
348
349 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
350}
351
352static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
353{
354 struct mtk_eth_priv *priv = bus->priv;
355
356 if (devad < 0)
357 return priv->mii_read(priv, addr, reg);
358 else
359 return priv->mmd_read(priv, addr, devad, reg);
360}
361
362static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
363 u16 val)
364{
365 struct mtk_eth_priv *priv = bus->priv;
366
367 if (devad < 0)
368 return priv->mii_write(priv, addr, reg, val);
369 else
370 return priv->mmd_write(priv, addr, devad, reg, val);
371}
372
373static int mtk_mdio_register(struct udevice *dev)
374{
375 struct mtk_eth_priv *priv = dev_get_priv(dev);
376 struct mii_dev *mdio_bus = mdio_alloc();
377 int ret;
378
379 if (!mdio_bus)
380 return -ENOMEM;
381
382 /* Assign MDIO access APIs according to the switch/phy */
383 switch (priv->sw) {
384 case SW_MT7530:
385 priv->mii_read = mtk_mii_read;
386 priv->mii_write = mtk_mii_write;
387 priv->mmd_read = mtk_mmd_ind_read;
388 priv->mmd_write = mtk_mmd_ind_write;
389 break;
390 default:
391 priv->mii_read = mtk_mii_read;
392 priv->mii_write = mtk_mii_write;
393 priv->mmd_read = mtk_mmd_read;
394 priv->mmd_write = mtk_mmd_write;
395 }
396
397 mdio_bus->read = mtk_mdio_read;
398 mdio_bus->write = mtk_mdio_write;
399 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
400
401 mdio_bus->priv = (void *)priv;
402
403 ret = mdio_register(mdio_bus);
404
405 if (ret)
406 return ret;
407
408 priv->mdio_bus = mdio_bus;
409
410 return 0;
411}
412
413/*
414 * MT7530 Internal Register Address Bits
415 * -------------------------------------------------------------------
416 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
417 * |----------------------------------------|---------------|--------|
418 * | Page Address | Reg Address | Unused |
419 * -------------------------------------------------------------------
420 */
421
422static int mt7530_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
423{
424 int ret, low_word, high_word;
425
426 /* Write page address */
427 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
428 if (ret)
429 return ret;
430
431 /* Read low word */
432 low_word = mtk_mii_read(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf);
433 if (low_word < 0)
434 return low_word;
435
436 /* Read high word */
437 high_word = mtk_mii_read(priv, priv->mt7530_smi_addr, 0x10);
438 if (high_word < 0)
439 return high_word;
440
441 if (data)
442 *data = ((u32)high_word << 16) | (low_word & 0xffff);
443
444 return 0;
445}
446
447static int mt7530_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
448{
449 int ret;
450
451 /* Write page address */
452 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
453 if (ret)
454 return ret;
455
456 /* Write low word */
457 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf,
458 data & 0xffff);
459 if (ret)
460 return ret;
461
462 /* Write high word */
463 return mtk_mii_write(priv, priv->mt7530_smi_addr, 0x10, data >> 16);
464}
465
466static void mt7530_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
467 u32 set)
468{
469 u32 val;
470
471 mt7530_reg_read(priv, reg, &val);
472 val &= ~clr;
473 val |= set;
474 mt7530_reg_write(priv, reg, val);
475}
476
477static void mt7530_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
478{
479 u8 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, 0);
480
481 mtk_mmd_ind_write(priv, phy_addr, 0x1f, reg, val);
482}
483
484static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
485{
486 u32 ncpo1, ssc_delta;
487
488 switch (mode) {
489 case PHY_INTERFACE_MODE_RGMII:
490 ncpo1 = 0x0c80;
491 ssc_delta = 0x87;
492 break;
493 default:
494 printf("error: xMII mode %d not supported\n", mode);
495 return -EINVAL;
496 }
497
498 /* Disable MT7530 core clock */
499 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
500
501 /* Disable MT7530 PLL */
502 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
503 (2 << RG_GSWPLL_POSDIV_200M_S) |
504 (32 << RG_GSWPLL_FBKDIV_200M_S));
505
506 /* For MT7530 core clock = 500Mhz */
507 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP2,
508 (1 << RG_GSWPLL_POSDIV_500M_S) |
509 (25 << RG_GSWPLL_FBKDIV_500M_S));
510
511 /* Enable MT7530 PLL */
512 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
513 (2 << RG_GSWPLL_POSDIV_200M_S) |
514 (32 << RG_GSWPLL_FBKDIV_200M_S) |
515 RG_GSWPLL_EN_PRE);
516
517 udelay(20);
518
519 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
520
521 /* Setup the MT7530 TRGMII Tx Clock */
522 mt7530_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
523 mt7530_core_reg_write(priv, CORE_PLL_GROUP6, 0);
524 mt7530_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
525 mt7530_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
526 mt7530_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
527 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
528
529 mt7530_core_reg_write(priv, CORE_PLL_GROUP2,
530 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
531 (1 << RG_SYSPLL_POSDIV_S));
532
533 mt7530_core_reg_write(priv, CORE_PLL_GROUP7,
534 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
535 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
536
537 /* Enable MT7530 core clock */
538 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
539 REG_GSWCK_EN | REG_TRGMIICK_EN);
540
541 return 0;
542}
543
544static int mt7530_setup(struct mtk_eth_priv *priv)
545{
546 u16 phy_addr, phy_val;
547 u32 val;
548 int i;
549
550 /* Select 250MHz clk for RGMII mode */
551 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
552 ETHSYS_TRGMII_CLK_SEL362_5, 0);
553
554 /* Global reset switch */
555 if (priv->mcm) {
556 reset_assert(&priv->rst_mcm);
557 udelay(1000);
558 reset_deassert(&priv->rst_mcm);
559 mdelay(1000);
560 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
561 dm_gpio_set_value(&priv->rst_gpio, 0);
562 udelay(1000);
563 dm_gpio_set_value(&priv->rst_gpio, 1);
564 mdelay(1000);
565 }
566
567 /* Modify HWTRAP first to allow direct access to internal PHYs */
568 mt7530_reg_read(priv, HWTRAP_REG, &val);
569 val |= CHG_TRAP;
570 val &= ~C_MDIO_BPS;
571 mt7530_reg_write(priv, MHWTRAP_REG, val);
572
573 /* Calculate the phy base address */
574 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
575 priv->mt7530_phy_base = (val | 0x7) + 1;
576
577 /* Turn off PHYs */
578 for (i = 0; i < MT7530_NUM_PHYS; i++) {
579 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
580 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
581 phy_val |= BMCR_PDOWN;
582 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
583 }
584
585 /* Force MAC link down before reset */
586 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
587 mt7530_reg_write(priv, PCMR_REG(6), FORCE_MODE);
588
589 /* MT7530 reset */
590 mt7530_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
591 udelay(100);
592
593 val = (1 << IPG_CFG_S) |
594 MAC_MODE | FORCE_MODE |
595 MAC_TX_EN | MAC_RX_EN |
596 BKOFF_EN | BACKPR_EN |
597 (SPEED_1000M << FORCE_SPD_S) |
598 FORCE_DPX | FORCE_LINK;
599
600 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
601 mt7530_reg_write(priv, PCMR_REG(6), val);
602
603 /* MT7530 Port5: Forced link down */
604 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
605
606 /* MT7530 Port6: Set to RGMII */
607 mt7530_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
608
609 /* Hardware Trap: Enable Port6, Disable Port5 */
610 mt7530_reg_read(priv, HWTRAP_REG, &val);
611 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
612 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
613 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
614 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
615 mt7530_reg_write(priv, MHWTRAP_REG, val);
616
617 /* Setup switch core pll */
618 mt7530_pad_clk_setup(priv, priv->phy_interface);
619
620 /* Lower Tx Driving for TRGMII path */
621 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
622 mt7530_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
623 (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
624
625 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
626 mt7530_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
627
628 /* Turn on PHYs */
629 for (i = 0; i < MT7530_NUM_PHYS; i++) {
630 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
631 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
632 phy_val &= ~BMCR_PDOWN;
633 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
634 }
635
636 /* Set port isolation */
637 for (i = 0; i < 8; i++) {
638 /* Set port matrix mode */
639 if (i != 6)
640 mt7530_reg_write(priv, PCR_REG(i),
641 (0x40 << PORT_MATRIX_S));
642 else
643 mt7530_reg_write(priv, PCR_REG(i),
644 (0x3f << PORT_MATRIX_S));
645
646 /* Set port mode to user port */
647 mt7530_reg_write(priv, PVC_REG(i),
648 (0x8100 << STAG_VPID_S) |
649 (VLAN_ATTR_USER << VLAN_ATTR_S));
650 }
651
652 return 0;
653}
654
655static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
656{
657 u16 lcl_adv = 0, rmt_adv = 0;
658 u8 flowctrl;
659 u32 mcr;
660
661 mcr = (1 << IPG_CFG_S) |
662 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
663 MAC_MODE | FORCE_MODE |
664 MAC_TX_EN | MAC_RX_EN |
665 BKOFF_EN | BACKPR_EN;
666
667 switch (priv->phydev->speed) {
668 case SPEED_10:
669 mcr |= (SPEED_10M << FORCE_SPD_S);
670 break;
671 case SPEED_100:
672 mcr |= (SPEED_100M << FORCE_SPD_S);
673 break;
674 case SPEED_1000:
675 mcr |= (SPEED_1000M << FORCE_SPD_S);
676 break;
677 };
678
679 if (priv->phydev->link)
680 mcr |= FORCE_LINK;
681
682 if (priv->phydev->duplex) {
683 mcr |= FORCE_DPX;
684
685 if (priv->phydev->pause)
686 rmt_adv = LPA_PAUSE_CAP;
687 if (priv->phydev->asym_pause)
688 rmt_adv |= LPA_PAUSE_ASYM;
689
690 if (priv->phydev->advertising & ADVERTISED_Pause)
691 lcl_adv |= ADVERTISE_PAUSE_CAP;
692 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
693 lcl_adv |= ADVERTISE_PAUSE_ASYM;
694
695 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
696
697 if (flowctrl & FLOW_CTRL_TX)
698 mcr |= FORCE_TX_FC;
699 if (flowctrl & FLOW_CTRL_RX)
700 mcr |= FORCE_RX_FC;
701
702 debug("rx pause %s, tx pause %s\n",
703 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
704 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
705 }
706
707 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
708}
709
710static int mtk_phy_start(struct mtk_eth_priv *priv)
711{
712 struct phy_device *phydev = priv->phydev;
713 int ret;
714
715 ret = phy_startup(phydev);
716
717 if (ret) {
718 debug("Could not initialize PHY %s\n", phydev->dev->name);
719 return ret;
720 }
721
722 if (!phydev->link) {
723 debug("%s: link down.\n", phydev->dev->name);
724 return 0;
725 }
726
727 mtk_phy_link_adjust(priv);
728
729 debug("Speed: %d, %s duplex%s\n", phydev->speed,
730 (phydev->duplex) ? "full" : "half",
731 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
732
733 return 0;
734}
735
736static int mtk_phy_probe(struct udevice *dev)
737{
738 struct mtk_eth_priv *priv = dev_get_priv(dev);
739 struct phy_device *phydev;
740
741 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
742 priv->phy_interface);
743 if (!phydev)
744 return -ENODEV;
745
746 phydev->supported &= PHY_GBIT_FEATURES;
747 phydev->advertising = phydev->supported;
748
749 priv->phydev = phydev;
750 phy_config(phydev);
751
752 return 0;
753}
754
MarkLeeb4ef49a2020-01-21 19:31:57 +0800755static void mtk_sgmii_init(struct mtk_eth_priv *priv)
756{
757 /* Set SGMII GEN2 speed(2.5G) */
758 clrsetbits_le32(priv->sgmii_base + SGMSYS_GEN2_SPEED,
759 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
760
761 /* Disable SGMII AN */
762 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
763 SGMII_AN_ENABLE, 0);
764
765 /* SGMII force mode setting */
766 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
767
768 /* Release PHYA power down state */
769 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
770 SGMII_PHYA_PWD, 0);
771}
772
Weijie Gao23f17162018-12-20 16:12:53 +0800773static void mtk_mac_init(struct mtk_eth_priv *priv)
774{
775 int i, ge_mode = 0;
776 u32 mcr;
777
778 switch (priv->phy_interface) {
779 case PHY_INTERFACE_MODE_RGMII_RXID:
780 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +0800781 ge_mode = GE_MODE_RGMII;
782 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800783 case PHY_INTERFACE_MODE_SGMII:
784 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800785 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
786 SYSCFG0_SGMII_SEL(priv->gmac_id));
787 mtk_sgmii_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +0800788 break;
789 case PHY_INTERFACE_MODE_MII:
790 case PHY_INTERFACE_MODE_GMII:
791 ge_mode = GE_MODE_MII;
792 break;
793 case PHY_INTERFACE_MODE_RMII:
794 ge_mode = GE_MODE_RMII;
795 break;
796 default:
797 break;
798 }
799
800 /* set the gmac to the right mode */
801 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
802 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
803 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
804
805 if (priv->force_mode) {
806 mcr = (1 << IPG_CFG_S) |
807 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
808 MAC_MODE | FORCE_MODE |
809 MAC_TX_EN | MAC_RX_EN |
810 BKOFF_EN | BACKPR_EN |
811 FORCE_LINK;
812
813 switch (priv->speed) {
814 case SPEED_10:
815 mcr |= SPEED_10M << FORCE_SPD_S;
816 break;
817 case SPEED_100:
818 mcr |= SPEED_100M << FORCE_SPD_S;
819 break;
820 case SPEED_1000:
821 mcr |= SPEED_1000M << FORCE_SPD_S;
822 break;
823 }
824
825 if (priv->duplex)
826 mcr |= FORCE_DPX;
827
828 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
829 }
830
831 if (priv->soc == SOC_MT7623) {
832 /* Lower Tx Driving for TRGMII path */
833 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
834 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
835 (8 << TD_DM_DRVP_S) |
836 (8 << TD_DM_DRVN_S));
837
838 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
839 RX_RST | RXC_DQSISEL);
840 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
841 }
842}
843
844static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
845{
846 char *pkt_base = priv->pkt_pool;
847 int i;
848
849 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
850 udelay(500);
851
852 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
853 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
854 memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
855
Frank Wunderlich47b14312020-01-31 10:23:29 +0100856 flush_dcache_range((ulong)pkt_base,
857 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +0800858
859 priv->rx_dma_owner_idx0 = 0;
860 priv->tx_cpu_owner_idx0 = 0;
861
862 for (i = 0; i < NUM_TX_DESC; i++) {
863 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
864 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
865 priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
866
867 priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
868 pkt_base += PKTSIZE_ALIGN;
869 }
870
871 for (i = 0; i < NUM_RX_DESC; i++) {
872 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
873 priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
874 pkt_base += PKTSIZE_ALIGN;
875 }
876
877 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
878 virt_to_phys(priv->tx_ring_noc));
879 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
880 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
881
882 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
883 virt_to_phys(priv->rx_ring_noc));
884 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
885 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
886
887 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
888}
889
890static int mtk_eth_start(struct udevice *dev)
891{
892 struct mtk_eth_priv *priv = dev_get_priv(dev);
893 int ret;
894
895 /* Reset FE */
896 reset_assert(&priv->rst_fe);
897 udelay(1000);
898 reset_deassert(&priv->rst_fe);
899 mdelay(10);
900
901 /* Packets forward to PDMA */
902 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
903
904 if (priv->gmac_id == 0)
905 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
906 else
907 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
908
909 udelay(500);
910
911 mtk_eth_fifo_init(priv);
912
913 /* Start PHY */
914 if (priv->sw == SW_NONE) {
915 ret = mtk_phy_start(priv);
916 if (ret)
917 return ret;
918 }
919
920 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
921 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
922 udelay(500);
923
924 return 0;
925}
926
927static void mtk_eth_stop(struct udevice *dev)
928{
929 struct mtk_eth_priv *priv = dev_get_priv(dev);
930
931 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
932 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
933 udelay(500);
934
935 wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
936 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
937}
938
939static int mtk_eth_write_hwaddr(struct udevice *dev)
940{
941 struct eth_pdata *pdata = dev_get_platdata(dev);
942 struct mtk_eth_priv *priv = dev_get_priv(dev);
943 unsigned char *mac = pdata->enetaddr;
944 u32 macaddr_lsb, macaddr_msb;
945
946 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
947 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
948 ((u32)mac[4] << 8) | (u32)mac[5];
949
950 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
951 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
952
953 return 0;
954}
955
956static int mtk_eth_send(struct udevice *dev, void *packet, int length)
957{
958 struct mtk_eth_priv *priv = dev_get_priv(dev);
959 u32 idx = priv->tx_cpu_owner_idx0;
960 void *pkt_base;
961
962 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
963 debug("mtk-eth: TX DMA descriptor ring is full\n");
964 return -EPERM;
965 }
966
967 pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
968 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +0100969 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +0800970 roundup(length, ARCH_DMA_MINALIGN));
971
972 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
973 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
974
975 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
976 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
977
978 return 0;
979}
980
981static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
982{
983 struct mtk_eth_priv *priv = dev_get_priv(dev);
984 u32 idx = priv->rx_dma_owner_idx0;
985 uchar *pkt_base;
986 u32 length;
987
988 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
989 debug("mtk-eth: RX DMA descriptor ring is empty\n");
990 return -EAGAIN;
991 }
992
993 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
994 pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
Frank Wunderlich47b14312020-01-31 10:23:29 +0100995 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +0800996 roundup(length, ARCH_DMA_MINALIGN));
997
998 if (packetp)
999 *packetp = pkt_base;
1000
1001 return length;
1002}
1003
1004static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1005{
1006 struct mtk_eth_priv *priv = dev_get_priv(dev);
1007 u32 idx = priv->rx_dma_owner_idx0;
1008
1009 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
1010 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
1011 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1012
1013 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1014 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1015
1016 return 0;
1017}
1018
1019static int mtk_eth_probe(struct udevice *dev)
1020{
1021 struct eth_pdata *pdata = dev_get_platdata(dev);
1022 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001023 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001024 int ret;
1025
1026 /* Frame Engine Register Base */
1027 priv->fe_base = (void *)iobase;
1028
1029 /* GMAC Register Base */
1030 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1031
1032 /* MDIO register */
1033 ret = mtk_mdio_register(dev);
1034 if (ret)
1035 return ret;
1036
1037 /* Prepare for tx/rx rings */
1038 priv->tx_ring_noc = (struct pdma_txdesc *)
1039 noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1040 ARCH_DMA_MINALIGN);
1041 priv->rx_ring_noc = (struct pdma_rxdesc *)
1042 noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1043 ARCH_DMA_MINALIGN);
1044
1045 /* Set MAC mode */
1046 mtk_mac_init(priv);
1047
1048 /* Probe phy if switch is not specified */
1049 if (priv->sw == SW_NONE)
1050 return mtk_phy_probe(dev);
1051
1052 /* Initialize switch */
1053 return priv->switch_init(priv);
1054}
1055
1056static int mtk_eth_remove(struct udevice *dev)
1057{
1058 struct mtk_eth_priv *priv = dev_get_priv(dev);
1059
1060 /* MDIO unregister */
1061 mdio_unregister(priv->mdio_bus);
1062 mdio_free(priv->mdio_bus);
1063
1064 /* Stop possibly started DMA */
1065 mtk_eth_stop(dev);
1066
1067 return 0;
1068}
1069
1070static int mtk_eth_ofdata_to_platdata(struct udevice *dev)
1071{
1072 struct eth_pdata *pdata = dev_get_platdata(dev);
1073 struct mtk_eth_priv *priv = dev_get_priv(dev);
1074 struct ofnode_phandle_args args;
1075 struct regmap *regmap;
1076 const char *str;
1077 ofnode subnode;
1078 int ret;
1079
1080 priv->soc = dev_get_driver_data(dev);
1081
1082 pdata->iobase = devfdt_get_addr(dev);
1083
1084 /* get corresponding ethsys phandle */
1085 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1086 &args);
1087 if (ret)
1088 return ret;
1089
1090 regmap = syscon_node_to_regmap(args.node);
1091 if (IS_ERR(regmap))
1092 return PTR_ERR(regmap);
1093
1094 priv->ethsys_base = regmap_get_range(regmap, 0);
1095 if (!priv->ethsys_base) {
1096 dev_err(dev, "Unable to find ethsys\n");
1097 return -ENODEV;
1098 }
1099
1100 /* Reset controllers */
1101 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1102 if (ret) {
1103 printf("error: Unable to get reset ctrl for frame engine\n");
1104 return ret;
1105 }
1106
1107 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1108
1109 /* Interface mode is required */
1110 str = dev_read_string(dev, "phy-mode");
1111 if (str) {
1112 pdata->phy_interface = phy_get_interface_by_name(str);
1113 priv->phy_interface = pdata->phy_interface;
1114 } else {
1115 printf("error: phy-mode is not set\n");
1116 return -EINVAL;
1117 }
1118
1119 /* Force mode or autoneg */
1120 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1121 if (ofnode_valid(subnode)) {
1122 priv->force_mode = 1;
1123 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1124 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1125
1126 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1127 priv->speed != SPEED_1000) {
1128 printf("error: no valid speed set in fixed-link\n");
1129 return -EINVAL;
1130 }
1131 }
1132
MarkLeeb4ef49a2020-01-21 19:31:57 +08001133 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1134 /* get corresponding sgmii phandle */
1135 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1136 NULL, 0, 0, &args);
1137 if (ret)
1138 return ret;
1139
1140 regmap = syscon_node_to_regmap(args.node);
1141
1142 if (IS_ERR(regmap))
1143 return PTR_ERR(regmap);
1144
1145 priv->sgmii_base = regmap_get_range(regmap, 0);
1146
1147 if (!priv->sgmii_base) {
1148 dev_err(dev, "Unable to find sgmii\n");
1149 return -ENODEV;
1150 }
1151 }
1152
Weijie Gao23f17162018-12-20 16:12:53 +08001153 /* check for switch first, otherwise phy will be used */
1154 priv->sw = SW_NONE;
1155 priv->switch_init = NULL;
1156 str = dev_read_string(dev, "mediatek,switch");
1157
1158 if (str) {
1159 if (!strcmp(str, "mt7530")) {
1160 priv->sw = SW_MT7530;
1161 priv->switch_init = mt7530_setup;
1162 priv->mt7530_smi_addr = MT7530_DFL_SMI_ADDR;
1163 } else {
1164 printf("error: unsupported switch\n");
1165 return -EINVAL;
1166 }
1167
1168 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1169 if (priv->mcm) {
1170 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1171 if (ret) {
1172 printf("error: no reset ctrl for mcm\n");
1173 return ret;
1174 }
1175 } else {
1176 gpio_request_by_name(dev, "reset-gpios", 0,
1177 &priv->rst_gpio, GPIOD_IS_OUT);
1178 }
1179 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001180 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1181 0, &args);
1182 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001183 printf("error: phy-handle is not specified\n");
1184 return ret;
1185 }
1186
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001187 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001188 if (priv->phy_addr < 0) {
1189 printf("error: phy address is not specified\n");
1190 return ret;
1191 }
1192 }
1193
1194 return 0;
1195}
1196
1197static const struct udevice_id mtk_eth_ids[] = {
1198 { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1199 { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
MarkLeee3957172020-01-21 19:31:58 +08001200 { .compatible = "mediatek,mt7622-eth", .data = SOC_MT7622 },
Weijie Gao23f17162018-12-20 16:12:53 +08001201 {}
1202};
1203
1204static const struct eth_ops mtk_eth_ops = {
1205 .start = mtk_eth_start,
1206 .stop = mtk_eth_stop,
1207 .send = mtk_eth_send,
1208 .recv = mtk_eth_recv,
1209 .free_pkt = mtk_eth_free_pkt,
1210 .write_hwaddr = mtk_eth_write_hwaddr,
1211};
1212
1213U_BOOT_DRIVER(mtk_eth) = {
1214 .name = "mtk-eth",
1215 .id = UCLASS_ETH,
1216 .of_match = mtk_eth_ids,
1217 .ofdata_to_platdata = mtk_eth_ofdata_to_platdata,
1218 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1219 .probe = mtk_eth_probe,
1220 .remove = mtk_eth_remove,
1221 .ops = &mtk_eth_ops,
1222 .priv_auto_alloc_size = sizeof(struct mtk_eth_priv),
1223 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1224};