blob: a2e022db3552b6f6a514001882d01d1cf55c63c9 [file] [log] [blame]
Stefan Roesec895ef42018-10-26 14:53:27 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020018#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060019#include <log.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020020#include <malloc.h>
21#include <miiphy.h>
22#include <net.h>
Weijie Gao2734fde2019-09-25 17:45:32 +080023#include <reset.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020024#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060025#include <asm/cache.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020026#include <asm/io.h>
27#include <linux/bitfield.h>
Simon Glassc05ed002020-05-10 11:40:11 -060028#include <linux/delay.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020029#include <linux/err.h>
30
Stefan Roesec895ef42018-10-26 14:53:27 +020031/* Ethernet frame engine register */
32#define PDMA_RELATED 0x0800
33
34#define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
35#define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
36#define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
37#define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
38
39#define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
40#define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
41#define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
42
43#define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
44#define PDMA_RST_IDX (PDMA_RELATED + 0x208)
45#define DLY_INT_CFG (PDMA_RELATED + 0x20c)
46
47#define SDM_RELATED 0x0c00
48
49#define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
50#define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
51
52#define RST_DTX_IDX0 BIT(0)
53#define RST_DRX_IDX0 BIT(16)
54
55#define TX_DMA_EN BIT(0)
56#define TX_DMA_BUSY BIT(1)
57#define RX_DMA_EN BIT(2)
58#define RX_DMA_BUSY BIT(3)
59#define TX_WB_DDONE BIT(6)
60
61/* Ethernet switch register */
62#define MT7628_SWITCH_FCT0 0x0008
63#define MT7628_SWITCH_PFC1 0x0014
Weijie Gao877d0392019-09-25 17:45:35 +080064#define MT7628_SWITCH_PVIDC0 0x0040
65#define MT7628_SWITCH_PVIDC1 0x0044
66#define MT7628_SWITCH_PVIDC2 0x0048
67#define MT7628_SWITCH_PVIDC3 0x004c
68#define MT7628_SWITCH_VMSC0 0x0070
Stefan Roesec895ef42018-10-26 14:53:27 +020069#define MT7628_SWITCH_FPA 0x0084
70#define MT7628_SWITCH_SOCPC 0x008c
71#define MT7628_SWITCH_POC0 0x0090
72#define MT7628_SWITCH_POC2 0x0098
73#define MT7628_SWITCH_SGC 0x009c
74#define MT7628_SWITCH_PCR0 0x00c0
75#define PCR0_PHY_ADDR GENMASK(4, 0)
76#define PCR0_PHY_REG GENMASK(12, 8)
77#define PCR0_WT_PHY_CMD BIT(13)
78#define PCR0_RD_PHY_CMD BIT(14)
79#define PCR0_WT_DATA GENMASK(31, 16)
80
81#define MT7628_SWITCH_PCR1 0x00c4
82#define PCR1_WT_DONE BIT(0)
83#define PCR1_RD_RDY BIT(1)
84#define PCR1_RD_DATA GENMASK(31, 16)
85
86#define MT7628_SWITCH_FPA1 0x00c8
87#define MT7628_SWITCH_FCT2 0x00cc
88#define MT7628_SWITCH_SGC2 0x00e4
89#define MT7628_SWITCH_BMU_CTRL 0x0110
90
91/* rxd2 */
92#define RX_DMA_DONE BIT(31)
93#define RX_DMA_LSO BIT(30)
94#define RX_DMA_PLEN0 GENMASK(29, 16)
95#define RX_DMA_TAG BIT(15)
96
97struct fe_rx_dma {
98 unsigned int rxd1;
99 unsigned int rxd2;
100 unsigned int rxd3;
101 unsigned int rxd4;
102} __packed __aligned(4);
103
104#define TX_DMA_PLEN0 GENMASK(29, 16)
105#define TX_DMA_LS1 BIT(14)
106#define TX_DMA_LS0 BIT(30)
107#define TX_DMA_DONE BIT(31)
108
109#define TX_DMA_INS_VLAN_MT7621 BIT(16)
110#define TX_DMA_INS_VLAN BIT(7)
111#define TX_DMA_INS_PPPOE BIT(12)
112#define TX_DMA_PN GENMASK(26, 24)
113
114struct fe_tx_dma {
115 unsigned int txd1;
116 unsigned int txd2;
117 unsigned int txd3;
118 unsigned int txd4;
119} __packed __aligned(4);
120
121#define NUM_RX_DESC 256
122#define NUM_TX_DESC 4
Weijie Gaof0793212019-09-25 17:45:33 +0800123#define NUM_PHYS 5
Stefan Roesec895ef42018-10-26 14:53:27 +0200124
125#define PADDING_LENGTH 60
126
127#define MTK_QDMA_PAGE_SIZE 2048
128
129#define CONFIG_MDIO_TIMEOUT 100
130#define CONFIG_DMA_STOP_TIMEOUT 100
131#define CONFIG_TX_DMA_TIMEOUT 100
132
Stefan Roesec895ef42018-10-26 14:53:27 +0200133struct mt7628_eth_dev {
134 void __iomem *base; /* frame engine base address */
135 void __iomem *eth_sw_base; /* switch base address */
Stefan Roesec895ef42018-10-26 14:53:27 +0200136
137 struct mii_dev *bus;
138
139 struct fe_tx_dma *tx_ring;
140 struct fe_rx_dma *rx_ring;
141
142 u8 *rx_buf[NUM_RX_DESC];
143
144 /* Point to the next RXD DMA wants to use in RXD Ring0 */
145 int rx_dma_idx;
146 /* Point to the next TXD in TXD Ring0 CPU wants to use */
147 int tx_dma_idx;
Weijie Gao2734fde2019-09-25 17:45:32 +0800148
149 struct reset_ctl rst_ephy;
Weijie Gaof0793212019-09-25 17:45:33 +0800150
151 struct phy_device *phy;
Weijie Gao877d0392019-09-25 17:45:35 +0800152
153 int wan_port;
Stefan Roesec895ef42018-10-26 14:53:27 +0200154};
155
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800156static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
157
Stefan Roesec895ef42018-10-26 14:53:27 +0200158static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
159{
160 void __iomem *base = priv->eth_sw_base;
161 int ret;
162
163 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
164 CONFIG_MDIO_TIMEOUT, false);
165 if (ret) {
166 printf("MDIO operation timeout!\n");
167 return -ETIMEDOUT;
168 }
169
170 return 0;
171}
172
173static int mii_mgr_read(struct mt7628_eth_dev *priv,
174 u32 phy_addr, u32 phy_register, u32 *read_data)
175{
176 void __iomem *base = priv->eth_sw_base;
177 u32 status = 0;
178 u32 ret;
179
180 *read_data = 0xffff;
181 /* Make sure previous read operation is complete */
182 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
183 if (ret)
184 return ret;
185
186 writel(PCR0_RD_PHY_CMD |
187 FIELD_PREP(PCR0_PHY_REG, phy_register) |
188 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
189 base + MT7628_SWITCH_PCR0);
190
191 /* Make sure previous read operation is complete */
192 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
193 if (ret)
194 return ret;
195
196 status = readl(base + MT7628_SWITCH_PCR1);
197 *read_data = FIELD_GET(PCR1_RD_DATA, status);
198
199 return 0;
200}
201
202static int mii_mgr_write(struct mt7628_eth_dev *priv,
203 u32 phy_addr, u32 phy_register, u32 write_data)
204{
205 void __iomem *base = priv->eth_sw_base;
206 u32 data;
207 int ret;
208
209 /* Make sure previous write operation is complete */
210 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
211 if (ret)
212 return ret;
213
214 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
215 FIELD_PREP(PCR0_PHY_REG, phy_register) |
216 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
217 PCR0_WT_PHY_CMD;
218 writel(data, base + MT7628_SWITCH_PCR0);
219
220 return mdio_wait_read(priv, PCR1_WT_DONE, true);
221}
222
223static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
224{
225 u32 val;
226 int ret;
227
228 ret = mii_mgr_read(bus->priv, addr, reg, &val);
229 if (ret)
230 return ret;
231
232 return val;
233}
234
235static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
236 u16 value)
237{
238 return mii_mgr_write(bus->priv, addr, reg, value);
239}
240
241static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
242{
243 int i;
244
245 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
246 mii_mgr_write(priv, 0, 26, 0x0000);
247
248 for (i = 0; i < 5; i++) {
249 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
250 mii_mgr_write(priv, i, 0, 0x3100);
251
252 /* EEE disable */
253 mii_mgr_write(priv, i, 30, 0xa000);
254 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
255 mii_mgr_write(priv, i, 16, 0x0606);
256 mii_mgr_write(priv, i, 23, 0x0f0e);
257 mii_mgr_write(priv, i, 24, 0x1610);
258 mii_mgr_write(priv, i, 30, 0x1f15);
259 mii_mgr_write(priv, i, 28, 0x6111);
260 }
261
262 /* 100Base AOI setting */
263 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
264 mii_mgr_write(priv, 0, 19, 0x004a);
265 mii_mgr_write(priv, 0, 20, 0x015a);
266 mii_mgr_write(priv, 0, 21, 0x00ee);
267 mii_mgr_write(priv, 0, 22, 0x0033);
268 mii_mgr_write(priv, 0, 23, 0x020a);
269 mii_mgr_write(priv, 0, 24, 0x0000);
270 mii_mgr_write(priv, 0, 25, 0x024a);
271 mii_mgr_write(priv, 0, 26, 0x035a);
272 mii_mgr_write(priv, 0, 27, 0x02ee);
273 mii_mgr_write(priv, 0, 28, 0x0233);
274 mii_mgr_write(priv, 0, 29, 0x000a);
275 mii_mgr_write(priv, 0, 30, 0x0000);
276
277 /* Fix EPHY idle state abnormal behavior */
278 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
279 mii_mgr_write(priv, 0, 29, 0x000d);
280 mii_mgr_write(priv, 0, 30, 0x0500);
281}
282
283static void rt305x_esw_init(struct mt7628_eth_dev *priv)
284{
285 void __iomem *base = priv->eth_sw_base;
Weijie Gao877d0392019-09-25 17:45:35 +0800286 void __iomem *reg;
287 u32 val = 0, pvid;
288 int i;
Stefan Roesec895ef42018-10-26 14:53:27 +0200289
290 /*
291 * FC_RLS_TH=200, FC_SET_TH=160
292 * DROP_RLS=120, DROP_SET_TH=80
293 */
294 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
295 writel(0x00000000, base + MT7628_SWITCH_SGC2);
296 writel(0x00405555, base + MT7628_SWITCH_PFC1);
297 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
298 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
299 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
300 /* hashing algorithm=XOR48, aging interval=300sec */
301 writel(0x0008a301, base + MT7628_SWITCH_SGC);
302 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
303
304 /* Ext PHY Addr=0x1f */
305 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
306 writel(0x00000000, base + MT7628_SWITCH_FPA);
307 /* 1us cycle number=125 (FE's clock=125Mhz) */
308 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
309
Weijie Gao877d0392019-09-25 17:45:35 +0800310 /* LAN/WAN partition, WAN port will be unusable in u-boot network */
311 if (priv->wan_port >= 0 && priv->wan_port < 6) {
312 for (i = 0; i < 8; i++) {
313 pvid = i == priv->wan_port ? 2 : 1;
314 reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
315 if (i % 2 == 0) {
316 val = pvid;
317 } else {
318 val |= (pvid << 12);
319 writel(val, reg);
320 }
321 }
322
323 val = 0xffff407f;
324 val |= 1 << (8 + priv->wan_port);
325 val &= ~(1 << priv->wan_port);
326 writel(val, base + MT7628_SWITCH_VMSC0);
327 }
328
Stefan Roesec895ef42018-10-26 14:53:27 +0200329 /* Reset PHY */
Weijie Gao2734fde2019-09-25 17:45:32 +0800330 reset_assert(&priv->rst_ephy);
331 reset_deassert(&priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200332 mdelay(10);
333
334 mt7628_ephy_init(priv);
335}
336
337static void eth_dma_start(struct mt7628_eth_dev *priv)
338{
339 void __iomem *base = priv->base;
340
341 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
342}
343
344static void eth_dma_stop(struct mt7628_eth_dev *priv)
345{
346 void __iomem *base = priv->base;
347 int ret;
348
349 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
350
351 /* Wait for DMA to stop */
352 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
353 RX_DMA_BUSY | TX_DMA_BUSY, false,
354 CONFIG_DMA_STOP_TIMEOUT, false);
355 if (ret)
356 printf("DMA stop timeout error!\n");
357}
358
359static int mt7628_eth_write_hwaddr(struct udevice *dev)
360{
361 struct mt7628_eth_dev *priv = dev_get_priv(dev);
362 void __iomem *base = priv->base;
363 u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
364 u32 val;
365
366 /* Set MAC address. */
367 val = addr[0];
368 val = (val << 8) | addr[1];
369 writel(val, base + SDM_MAC_ADRH);
370
371 val = addr[2];
372 val = (val << 8) | addr[3];
373 val = (val << 8) | addr[4];
374 val = (val << 8) | addr[5];
375 writel(val, base + SDM_MAC_ADRL);
376
377 return 0;
378}
379
380static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
381{
382 struct mt7628_eth_dev *priv = dev_get_priv(dev);
383 void __iomem *base = priv->base;
384 int ret;
385 int idx;
386 int i;
387
388 idx = priv->tx_dma_idx;
389
390 /* Pad message to a minimum length */
391 if (length < PADDING_LENGTH) {
392 char *p = (char *)packet;
393
394 for (i = 0; i < PADDING_LENGTH - length; i++)
395 p[length + i] = 0;
396 length = PADDING_LENGTH;
397 }
398
399 /* Check if buffer is ready for next TX DMA */
400 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
401 CONFIG_TX_DMA_TIMEOUT, false);
402 if (ret) {
403 printf("TX: DMA still busy on buffer %d\n", idx);
404 return ret;
405 }
406
407 flush_dcache_range((u32)packet, (u32)packet + length);
408
409 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
410 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
411 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
412 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
413
414 idx = (idx + 1) % NUM_TX_DESC;
415
416 /* Make sure the writes executed at this place */
417 wmb();
418 writel(idx, base + TX_CTX_IDX0);
419
420 priv->tx_dma_idx = idx;
421
422 return 0;
423}
424
425static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
426{
427 struct mt7628_eth_dev *priv = dev_get_priv(dev);
428 u32 rxd_info;
429 int length;
430 int idx;
431
432 idx = priv->rx_dma_idx;
433
434 rxd_info = priv->rx_ring[idx].rxd2;
435 if ((rxd_info & RX_DMA_DONE) == 0)
436 return -EAGAIN;
437
438 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
439 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
440 printf("%s: invalid length (%d bytes)\n", __func__, length);
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800441 mt7628_eth_free_pkt(dev, NULL, 0);
Stefan Roesec895ef42018-10-26 14:53:27 +0200442 return -EIO;
443 }
444
445 *packetp = priv->rx_buf[idx];
446 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
447
448 priv->rx_ring[idx].rxd4 = 0;
449 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
450
451 /* Make sure the writes executed at this place */
452 wmb();
453
454 return length;
455}
456
457static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
458{
459 struct mt7628_eth_dev *priv = dev_get_priv(dev);
460 void __iomem *base = priv->base;
461 int idx;
462
463 idx = priv->rx_dma_idx;
464
465 /* Move point to next RXD which wants to alloc */
466 writel(idx, base + RX_CALC_IDX0);
467
468 /* Update to Next packet point that was received */
469 idx = (idx + 1) % NUM_RX_DESC;
470
471 priv->rx_dma_idx = idx;
472
473 return 0;
474}
475
Stefan Roesec895ef42018-10-26 14:53:27 +0200476static int mt7628_eth_start(struct udevice *dev)
477{
478 struct mt7628_eth_dev *priv = dev_get_priv(dev);
479 void __iomem *base = priv->base;
480 uchar packet[MTK_QDMA_PAGE_SIZE];
481 uchar *packetp;
Weijie Gaof0793212019-09-25 17:45:33 +0800482 int ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200483 int i;
484
485 for (i = 0; i < NUM_RX_DESC; i++) {
486 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
487 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
488 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
489 }
490
491 for (i = 0; i < NUM_TX_DESC; i++) {
492 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
493 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
494 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
495 }
496
497 priv->rx_dma_idx = 0;
498 priv->tx_dma_idx = 0;
499
500 /* Make sure the writes executed at this place */
501 wmb();
502
503 /* disable delay interrupt */
504 writel(0, base + DLY_INT_CFG);
505
506 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
507
508 /* Tell the adapter where the TX/RX rings are located. */
509 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
510 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
511
512 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
513 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
514
515 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
516 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
517
518 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
519 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
520
521 /* Make sure the writes executed at this place */
522 wmb();
523 eth_dma_start(priv);
524
Weijie Gaof0793212019-09-25 17:45:33 +0800525 if (priv->phy) {
526 ret = phy_startup(priv->phy);
527 if (ret)
528 return ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200529
Weijie Gaof0793212019-09-25 17:45:33 +0800530 if (!priv->phy->link)
531 return -EAGAIN;
Stefan Roesec895ef42018-10-26 14:53:27 +0200532 }
533
534 /*
535 * The integrated switch seems to queue some received ethernet
536 * packets in some FIFO. Lets read the already queued packets
537 * out by using the receive routine, so that these old messages
538 * are dropped before the new xfer starts.
539 */
540 packetp = &packet[0];
541 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
542 mt7628_eth_free_pkt(dev, packetp, 0);
543
544 return 0;
545}
546
547static void mt7628_eth_stop(struct udevice *dev)
548{
549 struct mt7628_eth_dev *priv = dev_get_priv(dev);
550
551 eth_dma_stop(priv);
552}
553
554static int mt7628_eth_probe(struct udevice *dev)
555{
556 struct mt7628_eth_dev *priv = dev_get_priv(dev);
Stefan Roesec895ef42018-10-26 14:53:27 +0200557 struct mii_dev *bus;
Weijie Gaof0793212019-09-25 17:45:33 +0800558 int poll_link_phy;
Stefan Roesec895ef42018-10-26 14:53:27 +0200559 int ret;
560 int i;
561
562 /* Save frame-engine base address for later use */
563 priv->base = dev_remap_addr_index(dev, 0);
564 if (IS_ERR(priv->base))
565 return PTR_ERR(priv->base);
566
567 /* Save switch base address for later use */
568 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
569 if (IS_ERR(priv->eth_sw_base))
570 return PTR_ERR(priv->eth_sw_base);
571
Weijie Gao2734fde2019-09-25 17:45:32 +0800572 /* Reset controller */
573 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200574 if (ret) {
Weijie Gao2734fde2019-09-25 17:45:32 +0800575 pr_err("unable to find reset controller for ethernet PHYs\n");
Stefan Roesec895ef42018-10-26 14:53:27 +0200576 return ret;
577 }
578
Weijie Gao877d0392019-09-25 17:45:35 +0800579 /* WAN port will be isolated from LAN ports */
580 priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
581
Stefan Roesec895ef42018-10-26 14:53:27 +0200582 /* Put rx and tx rings into KSEG1 area (uncached) */
583 priv->tx_ring = (struct fe_tx_dma *)
584 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
585 sizeof(*priv->tx_ring) * NUM_TX_DESC));
586 priv->rx_ring = (struct fe_rx_dma *)
587 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
588 sizeof(*priv->rx_ring) * NUM_RX_DESC));
589
590 for (i = 0; i < NUM_RX_DESC; i++)
591 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
592
593 bus = mdio_alloc();
594 if (!bus) {
595 printf("Failed to allocate MDIO bus\n");
596 return -ENOMEM;
597 }
598
599 bus->read = mt7628_mdio_read;
600 bus->write = mt7628_mdio_write;
601 snprintf(bus->name, sizeof(bus->name), dev->name);
602 bus->priv = (void *)priv;
603
604 ret = mdio_register(bus);
605 if (ret)
606 return ret;
607
Weijie Gaof0793212019-09-25 17:45:33 +0800608 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
609 if (poll_link_phy >= 0) {
610 if (poll_link_phy >= NUM_PHYS) {
611 pr_err("invalid phy %d for poll-link-phy\n",
612 poll_link_phy);
613 return ret;
614 }
615
616 priv->phy = phy_connect(bus, poll_link_phy, dev,
617 PHY_INTERFACE_MODE_MII);
618 if (!priv->phy) {
619 pr_err("failed to probe phy %d\n", poll_link_phy);
620 return -ENODEV;
621 }
622
623 priv->phy->advertising = priv->phy->supported;
624 phy_config(priv->phy);
625 }
626
Stefan Roesec895ef42018-10-26 14:53:27 +0200627 /* Switch configuration */
628 rt305x_esw_init(priv);
629
630 return 0;
631}
632
633static const struct eth_ops mt7628_eth_ops = {
634 .start = mt7628_eth_start,
635 .send = mt7628_eth_send,
636 .recv = mt7628_eth_recv,
637 .free_pkt = mt7628_eth_free_pkt,
638 .stop = mt7628_eth_stop,
639 .write_hwaddr = mt7628_eth_write_hwaddr,
640};
641
642static const struct udevice_id mt7628_eth_ids[] = {
643 { .compatible = "mediatek,mt7628-eth" },
644 { }
645};
646
647U_BOOT_DRIVER(mt7628_eth) = {
648 .name = "mt7628_eth",
649 .id = UCLASS_ETH,
650 .of_match = mt7628_eth_ids,
651 .probe = mt7628_eth_probe,
652 .ops = &mt7628_eth_ops,
653 .priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
654 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
655};