blob: d77da26d50a3da82b26ce1c61be593818e7e3b91 [file] [log] [blame]
Stefan Roesec895ef42018-10-26 14:53:27 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <malloc.h>
19#include <miiphy.h>
20#include <net.h>
Weijie Gao2734fde2019-09-25 17:45:32 +080021#include <reset.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020022#include <wait_bit.h>
23#include <asm/io.h>
24#include <linux/bitfield.h>
25#include <linux/err.h>
26
Stefan Roesec895ef42018-10-26 14:53:27 +020027/* Ethernet frame engine register */
28#define PDMA_RELATED 0x0800
29
30#define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
31#define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
32#define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
33#define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
34
35#define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
36#define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
37#define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
38
39#define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
40#define PDMA_RST_IDX (PDMA_RELATED + 0x208)
41#define DLY_INT_CFG (PDMA_RELATED + 0x20c)
42
43#define SDM_RELATED 0x0c00
44
45#define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
46#define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
47
48#define RST_DTX_IDX0 BIT(0)
49#define RST_DRX_IDX0 BIT(16)
50
51#define TX_DMA_EN BIT(0)
52#define TX_DMA_BUSY BIT(1)
53#define RX_DMA_EN BIT(2)
54#define RX_DMA_BUSY BIT(3)
55#define TX_WB_DDONE BIT(6)
56
57/* Ethernet switch register */
58#define MT7628_SWITCH_FCT0 0x0008
59#define MT7628_SWITCH_PFC1 0x0014
60#define MT7628_SWITCH_FPA 0x0084
61#define MT7628_SWITCH_SOCPC 0x008c
62#define MT7628_SWITCH_POC0 0x0090
63#define MT7628_SWITCH_POC2 0x0098
64#define MT7628_SWITCH_SGC 0x009c
65#define MT7628_SWITCH_PCR0 0x00c0
66#define PCR0_PHY_ADDR GENMASK(4, 0)
67#define PCR0_PHY_REG GENMASK(12, 8)
68#define PCR0_WT_PHY_CMD BIT(13)
69#define PCR0_RD_PHY_CMD BIT(14)
70#define PCR0_WT_DATA GENMASK(31, 16)
71
72#define MT7628_SWITCH_PCR1 0x00c4
73#define PCR1_WT_DONE BIT(0)
74#define PCR1_RD_RDY BIT(1)
75#define PCR1_RD_DATA GENMASK(31, 16)
76
77#define MT7628_SWITCH_FPA1 0x00c8
78#define MT7628_SWITCH_FCT2 0x00cc
79#define MT7628_SWITCH_SGC2 0x00e4
80#define MT7628_SWITCH_BMU_CTRL 0x0110
81
82/* rxd2 */
83#define RX_DMA_DONE BIT(31)
84#define RX_DMA_LSO BIT(30)
85#define RX_DMA_PLEN0 GENMASK(29, 16)
86#define RX_DMA_TAG BIT(15)
87
88struct fe_rx_dma {
89 unsigned int rxd1;
90 unsigned int rxd2;
91 unsigned int rxd3;
92 unsigned int rxd4;
93} __packed __aligned(4);
94
95#define TX_DMA_PLEN0 GENMASK(29, 16)
96#define TX_DMA_LS1 BIT(14)
97#define TX_DMA_LS0 BIT(30)
98#define TX_DMA_DONE BIT(31)
99
100#define TX_DMA_INS_VLAN_MT7621 BIT(16)
101#define TX_DMA_INS_VLAN BIT(7)
102#define TX_DMA_INS_PPPOE BIT(12)
103#define TX_DMA_PN GENMASK(26, 24)
104
105struct fe_tx_dma {
106 unsigned int txd1;
107 unsigned int txd2;
108 unsigned int txd3;
109 unsigned int txd4;
110} __packed __aligned(4);
111
112#define NUM_RX_DESC 256
113#define NUM_TX_DESC 4
Weijie Gaof0793212019-09-25 17:45:33 +0800114#define NUM_PHYS 5
Stefan Roesec895ef42018-10-26 14:53:27 +0200115
116#define PADDING_LENGTH 60
117
118#define MTK_QDMA_PAGE_SIZE 2048
119
120#define CONFIG_MDIO_TIMEOUT 100
121#define CONFIG_DMA_STOP_TIMEOUT 100
122#define CONFIG_TX_DMA_TIMEOUT 100
123
Stefan Roesec895ef42018-10-26 14:53:27 +0200124struct mt7628_eth_dev {
125 void __iomem *base; /* frame engine base address */
126 void __iomem *eth_sw_base; /* switch base address */
Stefan Roesec895ef42018-10-26 14:53:27 +0200127
128 struct mii_dev *bus;
129
130 struct fe_tx_dma *tx_ring;
131 struct fe_rx_dma *rx_ring;
132
133 u8 *rx_buf[NUM_RX_DESC];
134
135 /* Point to the next RXD DMA wants to use in RXD Ring0 */
136 int rx_dma_idx;
137 /* Point to the next TXD in TXD Ring0 CPU wants to use */
138 int tx_dma_idx;
Weijie Gao2734fde2019-09-25 17:45:32 +0800139
140 struct reset_ctl rst_ephy;
Weijie Gaof0793212019-09-25 17:45:33 +0800141
142 struct phy_device *phy;
Stefan Roesec895ef42018-10-26 14:53:27 +0200143};
144
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800145static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
146
Stefan Roesec895ef42018-10-26 14:53:27 +0200147static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
148{
149 void __iomem *base = priv->eth_sw_base;
150 int ret;
151
152 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
153 CONFIG_MDIO_TIMEOUT, false);
154 if (ret) {
155 printf("MDIO operation timeout!\n");
156 return -ETIMEDOUT;
157 }
158
159 return 0;
160}
161
162static int mii_mgr_read(struct mt7628_eth_dev *priv,
163 u32 phy_addr, u32 phy_register, u32 *read_data)
164{
165 void __iomem *base = priv->eth_sw_base;
166 u32 status = 0;
167 u32 ret;
168
169 *read_data = 0xffff;
170 /* Make sure previous read operation is complete */
171 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
172 if (ret)
173 return ret;
174
175 writel(PCR0_RD_PHY_CMD |
176 FIELD_PREP(PCR0_PHY_REG, phy_register) |
177 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
178 base + MT7628_SWITCH_PCR0);
179
180 /* Make sure previous read operation is complete */
181 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
182 if (ret)
183 return ret;
184
185 status = readl(base + MT7628_SWITCH_PCR1);
186 *read_data = FIELD_GET(PCR1_RD_DATA, status);
187
188 return 0;
189}
190
191static int mii_mgr_write(struct mt7628_eth_dev *priv,
192 u32 phy_addr, u32 phy_register, u32 write_data)
193{
194 void __iomem *base = priv->eth_sw_base;
195 u32 data;
196 int ret;
197
198 /* Make sure previous write operation is complete */
199 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
200 if (ret)
201 return ret;
202
203 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
204 FIELD_PREP(PCR0_PHY_REG, phy_register) |
205 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
206 PCR0_WT_PHY_CMD;
207 writel(data, base + MT7628_SWITCH_PCR0);
208
209 return mdio_wait_read(priv, PCR1_WT_DONE, true);
210}
211
212static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
213{
214 u32 val;
215 int ret;
216
217 ret = mii_mgr_read(bus->priv, addr, reg, &val);
218 if (ret)
219 return ret;
220
221 return val;
222}
223
224static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
225 u16 value)
226{
227 return mii_mgr_write(bus->priv, addr, reg, value);
228}
229
230static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
231{
232 int i;
233
234 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
235 mii_mgr_write(priv, 0, 26, 0x0000);
236
237 for (i = 0; i < 5; i++) {
238 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
239 mii_mgr_write(priv, i, 0, 0x3100);
240
241 /* EEE disable */
242 mii_mgr_write(priv, i, 30, 0xa000);
243 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
244 mii_mgr_write(priv, i, 16, 0x0606);
245 mii_mgr_write(priv, i, 23, 0x0f0e);
246 mii_mgr_write(priv, i, 24, 0x1610);
247 mii_mgr_write(priv, i, 30, 0x1f15);
248 mii_mgr_write(priv, i, 28, 0x6111);
249 }
250
251 /* 100Base AOI setting */
252 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
253 mii_mgr_write(priv, 0, 19, 0x004a);
254 mii_mgr_write(priv, 0, 20, 0x015a);
255 mii_mgr_write(priv, 0, 21, 0x00ee);
256 mii_mgr_write(priv, 0, 22, 0x0033);
257 mii_mgr_write(priv, 0, 23, 0x020a);
258 mii_mgr_write(priv, 0, 24, 0x0000);
259 mii_mgr_write(priv, 0, 25, 0x024a);
260 mii_mgr_write(priv, 0, 26, 0x035a);
261 mii_mgr_write(priv, 0, 27, 0x02ee);
262 mii_mgr_write(priv, 0, 28, 0x0233);
263 mii_mgr_write(priv, 0, 29, 0x000a);
264 mii_mgr_write(priv, 0, 30, 0x0000);
265
266 /* Fix EPHY idle state abnormal behavior */
267 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
268 mii_mgr_write(priv, 0, 29, 0x000d);
269 mii_mgr_write(priv, 0, 30, 0x0500);
270}
271
272static void rt305x_esw_init(struct mt7628_eth_dev *priv)
273{
274 void __iomem *base = priv->eth_sw_base;
275
276 /*
277 * FC_RLS_TH=200, FC_SET_TH=160
278 * DROP_RLS=120, DROP_SET_TH=80
279 */
280 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
281 writel(0x00000000, base + MT7628_SWITCH_SGC2);
282 writel(0x00405555, base + MT7628_SWITCH_PFC1);
283 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
284 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
285 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
286 /* hashing algorithm=XOR48, aging interval=300sec */
287 writel(0x0008a301, base + MT7628_SWITCH_SGC);
288 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
289
290 /* Ext PHY Addr=0x1f */
291 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
292 writel(0x00000000, base + MT7628_SWITCH_FPA);
293 /* 1us cycle number=125 (FE's clock=125Mhz) */
294 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
295
Stefan Roesec895ef42018-10-26 14:53:27 +0200296 /* Reset PHY */
Weijie Gao2734fde2019-09-25 17:45:32 +0800297 reset_assert(&priv->rst_ephy);
298 reset_deassert(&priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200299 mdelay(10);
300
301 mt7628_ephy_init(priv);
302}
303
304static void eth_dma_start(struct mt7628_eth_dev *priv)
305{
306 void __iomem *base = priv->base;
307
308 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
309}
310
311static void eth_dma_stop(struct mt7628_eth_dev *priv)
312{
313 void __iomem *base = priv->base;
314 int ret;
315
316 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
317
318 /* Wait for DMA to stop */
319 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
320 RX_DMA_BUSY | TX_DMA_BUSY, false,
321 CONFIG_DMA_STOP_TIMEOUT, false);
322 if (ret)
323 printf("DMA stop timeout error!\n");
324}
325
326static int mt7628_eth_write_hwaddr(struct udevice *dev)
327{
328 struct mt7628_eth_dev *priv = dev_get_priv(dev);
329 void __iomem *base = priv->base;
330 u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
331 u32 val;
332
333 /* Set MAC address. */
334 val = addr[0];
335 val = (val << 8) | addr[1];
336 writel(val, base + SDM_MAC_ADRH);
337
338 val = addr[2];
339 val = (val << 8) | addr[3];
340 val = (val << 8) | addr[4];
341 val = (val << 8) | addr[5];
342 writel(val, base + SDM_MAC_ADRL);
343
344 return 0;
345}
346
347static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
348{
349 struct mt7628_eth_dev *priv = dev_get_priv(dev);
350 void __iomem *base = priv->base;
351 int ret;
352 int idx;
353 int i;
354
355 idx = priv->tx_dma_idx;
356
357 /* Pad message to a minimum length */
358 if (length < PADDING_LENGTH) {
359 char *p = (char *)packet;
360
361 for (i = 0; i < PADDING_LENGTH - length; i++)
362 p[length + i] = 0;
363 length = PADDING_LENGTH;
364 }
365
366 /* Check if buffer is ready for next TX DMA */
367 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
368 CONFIG_TX_DMA_TIMEOUT, false);
369 if (ret) {
370 printf("TX: DMA still busy on buffer %d\n", idx);
371 return ret;
372 }
373
374 flush_dcache_range((u32)packet, (u32)packet + length);
375
376 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
377 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
378 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
379 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
380
381 idx = (idx + 1) % NUM_TX_DESC;
382
383 /* Make sure the writes executed at this place */
384 wmb();
385 writel(idx, base + TX_CTX_IDX0);
386
387 priv->tx_dma_idx = idx;
388
389 return 0;
390}
391
392static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
393{
394 struct mt7628_eth_dev *priv = dev_get_priv(dev);
395 u32 rxd_info;
396 int length;
397 int idx;
398
399 idx = priv->rx_dma_idx;
400
401 rxd_info = priv->rx_ring[idx].rxd2;
402 if ((rxd_info & RX_DMA_DONE) == 0)
403 return -EAGAIN;
404
405 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
406 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
407 printf("%s: invalid length (%d bytes)\n", __func__, length);
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800408 mt7628_eth_free_pkt(dev, NULL, 0);
Stefan Roesec895ef42018-10-26 14:53:27 +0200409 return -EIO;
410 }
411
412 *packetp = priv->rx_buf[idx];
413 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
414
415 priv->rx_ring[idx].rxd4 = 0;
416 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
417
418 /* Make sure the writes executed at this place */
419 wmb();
420
421 return length;
422}
423
424static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
425{
426 struct mt7628_eth_dev *priv = dev_get_priv(dev);
427 void __iomem *base = priv->base;
428 int idx;
429
430 idx = priv->rx_dma_idx;
431
432 /* Move point to next RXD which wants to alloc */
433 writel(idx, base + RX_CALC_IDX0);
434
435 /* Update to Next packet point that was received */
436 idx = (idx + 1) % NUM_RX_DESC;
437
438 priv->rx_dma_idx = idx;
439
440 return 0;
441}
442
Stefan Roesec895ef42018-10-26 14:53:27 +0200443static int mt7628_eth_start(struct udevice *dev)
444{
445 struct mt7628_eth_dev *priv = dev_get_priv(dev);
446 void __iomem *base = priv->base;
447 uchar packet[MTK_QDMA_PAGE_SIZE];
448 uchar *packetp;
Weijie Gaof0793212019-09-25 17:45:33 +0800449 int ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200450 int i;
451
452 for (i = 0; i < NUM_RX_DESC; i++) {
453 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
454 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
455 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
456 }
457
458 for (i = 0; i < NUM_TX_DESC; i++) {
459 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
460 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
461 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
462 }
463
464 priv->rx_dma_idx = 0;
465 priv->tx_dma_idx = 0;
466
467 /* Make sure the writes executed at this place */
468 wmb();
469
470 /* disable delay interrupt */
471 writel(0, base + DLY_INT_CFG);
472
473 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
474
475 /* Tell the adapter where the TX/RX rings are located. */
476 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
477 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
478
479 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
480 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
481
482 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
483 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
484
485 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
486 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
487
488 /* Make sure the writes executed at this place */
489 wmb();
490 eth_dma_start(priv);
491
Weijie Gaof0793212019-09-25 17:45:33 +0800492 if (priv->phy) {
493 ret = phy_startup(priv->phy);
494 if (ret)
495 return ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200496
Weijie Gaof0793212019-09-25 17:45:33 +0800497 if (!priv->phy->link)
498 return -EAGAIN;
Stefan Roesec895ef42018-10-26 14:53:27 +0200499 }
500
501 /*
502 * The integrated switch seems to queue some received ethernet
503 * packets in some FIFO. Lets read the already queued packets
504 * out by using the receive routine, so that these old messages
505 * are dropped before the new xfer starts.
506 */
507 packetp = &packet[0];
508 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
509 mt7628_eth_free_pkt(dev, packetp, 0);
510
511 return 0;
512}
513
514static void mt7628_eth_stop(struct udevice *dev)
515{
516 struct mt7628_eth_dev *priv = dev_get_priv(dev);
517
518 eth_dma_stop(priv);
519}
520
521static int mt7628_eth_probe(struct udevice *dev)
522{
523 struct mt7628_eth_dev *priv = dev_get_priv(dev);
Stefan Roesec895ef42018-10-26 14:53:27 +0200524 struct mii_dev *bus;
Weijie Gaof0793212019-09-25 17:45:33 +0800525 int poll_link_phy;
Stefan Roesec895ef42018-10-26 14:53:27 +0200526 int ret;
527 int i;
528
529 /* Save frame-engine base address for later use */
530 priv->base = dev_remap_addr_index(dev, 0);
531 if (IS_ERR(priv->base))
532 return PTR_ERR(priv->base);
533
534 /* Save switch base address for later use */
535 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
536 if (IS_ERR(priv->eth_sw_base))
537 return PTR_ERR(priv->eth_sw_base);
538
Weijie Gao2734fde2019-09-25 17:45:32 +0800539 /* Reset controller */
540 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200541 if (ret) {
Weijie Gao2734fde2019-09-25 17:45:32 +0800542 pr_err("unable to find reset controller for ethernet PHYs\n");
Stefan Roesec895ef42018-10-26 14:53:27 +0200543 return ret;
544 }
545
Stefan Roesec895ef42018-10-26 14:53:27 +0200546 /* Put rx and tx rings into KSEG1 area (uncached) */
547 priv->tx_ring = (struct fe_tx_dma *)
548 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
549 sizeof(*priv->tx_ring) * NUM_TX_DESC));
550 priv->rx_ring = (struct fe_rx_dma *)
551 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
552 sizeof(*priv->rx_ring) * NUM_RX_DESC));
553
554 for (i = 0; i < NUM_RX_DESC; i++)
555 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
556
557 bus = mdio_alloc();
558 if (!bus) {
559 printf("Failed to allocate MDIO bus\n");
560 return -ENOMEM;
561 }
562
563 bus->read = mt7628_mdio_read;
564 bus->write = mt7628_mdio_write;
565 snprintf(bus->name, sizeof(bus->name), dev->name);
566 bus->priv = (void *)priv;
567
568 ret = mdio_register(bus);
569 if (ret)
570 return ret;
571
Weijie Gaof0793212019-09-25 17:45:33 +0800572 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
573 if (poll_link_phy >= 0) {
574 if (poll_link_phy >= NUM_PHYS) {
575 pr_err("invalid phy %d for poll-link-phy\n",
576 poll_link_phy);
577 return ret;
578 }
579
580 priv->phy = phy_connect(bus, poll_link_phy, dev,
581 PHY_INTERFACE_MODE_MII);
582 if (!priv->phy) {
583 pr_err("failed to probe phy %d\n", poll_link_phy);
584 return -ENODEV;
585 }
586
587 priv->phy->advertising = priv->phy->supported;
588 phy_config(priv->phy);
589 }
590
Stefan Roesec895ef42018-10-26 14:53:27 +0200591 /* Switch configuration */
592 rt305x_esw_init(priv);
593
594 return 0;
595}
596
597static const struct eth_ops mt7628_eth_ops = {
598 .start = mt7628_eth_start,
599 .send = mt7628_eth_send,
600 .recv = mt7628_eth_recv,
601 .free_pkt = mt7628_eth_free_pkt,
602 .stop = mt7628_eth_stop,
603 .write_hwaddr = mt7628_eth_write_hwaddr,
604};
605
606static const struct udevice_id mt7628_eth_ids[] = {
607 { .compatible = "mediatek,mt7628-eth" },
608 { }
609};
610
611U_BOOT_DRIVER(mt7628_eth) = {
612 .name = "mt7628_eth",
613 .id = UCLASS_ETH,
614 .of_match = mt7628_eth_ids,
615 .probe = mt7628_eth_probe,
616 .ops = &mt7628_eth_ops,
617 .priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
618 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
619};