blob: 393ee9bb816a94283b5d3833bacbf59af5b05b31 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut8ae51b62017-05-13 15:54:28 +02002/*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
Marek Vasut8ae51b62017-05-13 15:54:28 +02009 */
10
11#include <common.h>
Marek Vasut1fea9e22017-07-21 23:20:35 +020012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020014#include <dm.h>
15#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020017#include <miiphy.h>
18#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Simon Glasscd93d622020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060021#include <linux/delay.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020022#include <linux/mii.h>
23#include <wait_bit.h>
24#include <asm/io.h>
Marek Vasutbddb44e2017-09-15 21:11:15 +020025#include <asm/gpio.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020026
27/* Registers */
28#define RAVB_REG_CCC 0x000
29#define RAVB_REG_DBAT 0x004
30#define RAVB_REG_CSR 0x00C
31#define RAVB_REG_APSR 0x08C
32#define RAVB_REG_RCR 0x090
33#define RAVB_REG_TGC 0x300
34#define RAVB_REG_TCCR 0x304
35#define RAVB_REG_RIC0 0x360
36#define RAVB_REG_RIC1 0x368
37#define RAVB_REG_RIC2 0x370
38#define RAVB_REG_TIC 0x378
39#define RAVB_REG_ECMR 0x500
40#define RAVB_REG_RFLR 0x508
41#define RAVB_REG_ECSIPR 0x518
42#define RAVB_REG_PIR 0x520
43#define RAVB_REG_GECMR 0x5b0
44#define RAVB_REG_MAHR 0x5c0
45#define RAVB_REG_MALR 0x5c8
46
47#define CCC_OPC_CONFIG BIT(0)
48#define CCC_OPC_OPERATION BIT(1)
49#define CCC_BOC BIT(20)
50
51#define CSR_OPS 0x0000000F
52#define CSR_OPS_CONFIG BIT(1)
53
Marek Vasutef8c8782019-04-13 11:42:34 +020054#define APSR_TDM BIT(14)
55
Marek Vasut8ae51b62017-05-13 15:54:28 +020056#define TCCR_TSRQ0 BIT(0)
57
58#define RFLR_RFL_MIN 0x05EE
59
60#define PIR_MDI BIT(3)
61#define PIR_MDO BIT(2)
62#define PIR_MMD BIT(1)
63#define PIR_MDC BIT(0)
64
65#define ECMR_TRCCM BIT(26)
66#define ECMR_RZPF BIT(20)
67#define ECMR_PFR BIT(18)
68#define ECMR_RXF BIT(17)
69#define ECMR_RE BIT(6)
70#define ECMR_TE BIT(5)
71#define ECMR_DM BIT(1)
72#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
73
74/* DMA Descriptors */
75#define RAVB_NUM_BASE_DESC 16
76#define RAVB_NUM_TX_DESC 8
77#define RAVB_NUM_RX_DESC 8
78
79#define RAVB_TX_QUEUE_OFFSET 0
80#define RAVB_RX_QUEUE_OFFSET 4
81
82#define RAVB_DESC_DT(n) ((n) << 28)
83#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
84#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
85#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
86#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
87#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
88#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
89
90#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
91#define RAVB_DESC_DS_MASK 0xfff
92
93#define RAVB_RX_DESC_MSC_MC BIT(23)
94#define RAVB_RX_DESC_MSC_CEEF BIT(22)
95#define RAVB_RX_DESC_MSC_CRL BIT(21)
96#define RAVB_RX_DESC_MSC_FRE BIT(20)
97#define RAVB_RX_DESC_MSC_RTLF BIT(19)
98#define RAVB_RX_DESC_MSC_RTSF BIT(18)
99#define RAVB_RX_DESC_MSC_RFE BIT(17)
100#define RAVB_RX_DESC_MSC_CRC BIT(16)
101#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
102
103#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
104 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
105 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
106
107#define RAVB_TX_TIMEOUT_MS 1000
108
109struct ravb_desc {
110 u32 ctrl;
111 u32 dptr;
112};
113
114struct ravb_rxdesc {
115 struct ravb_desc data;
116 struct ravb_desc link;
117 u8 __pad[48];
118 u8 packet[PKTSIZE_ALIGN];
119};
120
121struct ravb_priv {
122 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
123 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
124 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
125 u32 rx_desc_idx;
126 u32 tx_desc_idx;
127
128 struct phy_device *phydev;
129 struct mii_dev *bus;
130 void __iomem *iobase;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200131 struct clk clk;
Marek Vasutbddb44e2017-09-15 21:11:15 +0200132 struct gpio_desc reset_gpio;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200133};
134
135static inline void ravb_flush_dcache(u32 addr, u32 len)
136{
137 flush_dcache_range(addr, addr + len);
138}
139
140static inline void ravb_invalidate_dcache(u32 addr, u32 len)
141{
142 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
143 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
144 invalidate_dcache_range(start, end);
145}
146
147static int ravb_send(struct udevice *dev, void *packet, int len)
148{
149 struct ravb_priv *eth = dev_get_priv(dev);
150 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
151 unsigned int start;
152
153 /* Update TX descriptor */
154 ravb_flush_dcache((uintptr_t)packet, len);
155 memset(desc, 0x0, sizeof(*desc));
156 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
157 desc->dptr = (uintptr_t)packet;
158 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
159
160 /* Restart the transmitter if disabled */
161 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
162 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
163
164 /* Wait until packet is transmitted */
165 start = get_timer(0);
166 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
167 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
168 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
169 break;
170 udelay(10);
171 };
172
173 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
174 return -ETIMEDOUT;
175
176 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
177 return 0;
178}
179
180static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
181{
182 struct ravb_priv *eth = dev_get_priv(dev);
183 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
184 int len;
185 u8 *packet;
186
187 /* Check if the rx descriptor is ready */
188 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
189 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
190 return -EAGAIN;
191
192 /* Check for errors */
193 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
194 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
195 return -EAGAIN;
196 }
197
198 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
199 packet = (u8 *)(uintptr_t)desc->data.dptr;
200 ravb_invalidate_dcache((uintptr_t)packet, len);
201
202 *packetp = packet;
203 return len;
204}
205
206static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
207{
208 struct ravb_priv *eth = dev_get_priv(dev);
209 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
210
211 /* Make current descriptor available again */
212 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
213 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
214
215 /* Point to the next descriptor */
216 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
217 desc = &eth->rx_desc[eth->rx_desc_idx];
218 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
219
220 return 0;
221}
222
223static int ravb_reset(struct udevice *dev)
224{
225 struct ravb_priv *eth = dev_get_priv(dev);
226
227 /* Set config mode */
228 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
229
230 /* Check the operating mode is changed to the config mode. */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100231 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
232 CSR_OPS_CONFIG, true, 100, true);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200233}
234
235static void ravb_base_desc_init(struct ravb_priv *eth)
236{
237 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
238 int i;
239
240 /* Initialize all descriptors */
241 memset(eth->base_desc, 0x0, desc_size);
242
243 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
244 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
245
246 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
247
248 /* Register the descriptor base address table */
249 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
250}
251
252static void ravb_tx_desc_init(struct ravb_priv *eth)
253{
254 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
255 int i;
256
257 /* Initialize all descriptors */
258 memset(eth->tx_desc, 0x0, desc_size);
259 eth->tx_desc_idx = 0;
260
261 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
262 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
263
264 /* Mark the end of the descriptors */
265 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
266 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
267 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
268
269 /* Point the controller to the TX descriptor list. */
270 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
271 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
272 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
273 sizeof(struct ravb_desc));
274}
275
276static void ravb_rx_desc_init(struct ravb_priv *eth)
277{
278 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
279 int i;
280
281 /* Initialize all descriptors */
282 memset(eth->rx_desc, 0x0, desc_size);
283 eth->rx_desc_idx = 0;
284
285 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
286 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
287 RAVB_DESC_DS(PKTSIZE_ALIGN);
288 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
289
290 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
291 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
292 }
293
294 /* Mark the end of the descriptors */
295 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
296 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
297 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
298
299 /* Point the controller to the rx descriptor list */
300 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
301 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
302 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
303 sizeof(struct ravb_desc));
304}
305
306static int ravb_phy_config(struct udevice *dev)
307{
308 struct ravb_priv *eth = dev_get_priv(dev);
309 struct eth_pdata *pdata = dev_get_platdata(dev);
310 struct phy_device *phydev;
Marek Vasute821a7b2017-07-21 23:20:34 +0200311 int mask = 0xffffffff, reg;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200312
Marek Vasutbddb44e2017-09-15 21:11:15 +0200313 if (dm_gpio_is_valid(&eth->reset_gpio)) {
314 dm_gpio_set_value(&eth->reset_gpio, 1);
315 mdelay(20);
316 dm_gpio_set_value(&eth->reset_gpio, 0);
317 mdelay(1);
318 }
319
Marek Vasute821a7b2017-07-21 23:20:34 +0200320 phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200321 if (!phydev)
322 return -ENODEV;
323
Marek Vasute821a7b2017-07-21 23:20:34 +0200324 phy_connect_dev(phydev, dev);
325
Marek Vasut8ae51b62017-05-13 15:54:28 +0200326 eth->phydev = phydev;
327
Marek Vasut536fb5d2018-06-18 05:44:53 +0200328 phydev->supported &= SUPPORTED_100baseT_Full |
329 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
330 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
331 SUPPORTED_Asym_Pause;
332
Marek Vasut8ae51b62017-05-13 15:54:28 +0200333 if (pdata->max_speed != 1000) {
Marek Vasut536fb5d2018-06-18 05:44:53 +0200334 phydev->supported &= ~SUPPORTED_1000baseT_Full;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200335 reg = phy_read(phydev, -1, MII_CTRL1000);
336 reg &= ~(BIT(9) | BIT(8));
337 phy_write(phydev, -1, MII_CTRL1000, reg);
338 }
339
340 phy_config(phydev);
341
342 return 0;
343}
344
345/* Set Mac address */
346static int ravb_write_hwaddr(struct udevice *dev)
347{
348 struct ravb_priv *eth = dev_get_priv(dev);
349 struct eth_pdata *pdata = dev_get_platdata(dev);
350 unsigned char *mac = pdata->enetaddr;
351
352 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
353 eth->iobase + RAVB_REG_MAHR);
354
355 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
356
357 return 0;
358}
359
360/* E-MAC init function */
361static int ravb_mac_init(struct ravb_priv *eth)
362{
363 /* Disable MAC Interrupt */
364 writel(0, eth->iobase + RAVB_REG_ECSIPR);
365
366 /* Recv frame limit set register */
367 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
368
369 return 0;
370}
371
372/* AVB-DMAC init function */
373static int ravb_dmac_init(struct udevice *dev)
374{
375 struct ravb_priv *eth = dev_get_priv(dev);
376 struct eth_pdata *pdata = dev_get_platdata(dev);
377 int ret = 0;
378
379 /* Set CONFIG mode */
380 ret = ravb_reset(dev);
381 if (ret)
382 return ret;
383
384 /* Disable all interrupts */
385 writel(0, eth->iobase + RAVB_REG_RIC0);
386 writel(0, eth->iobase + RAVB_REG_RIC1);
387 writel(0, eth->iobase + RAVB_REG_RIC2);
388 writel(0, eth->iobase + RAVB_REG_TIC);
389
390 /* Set little endian */
391 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
392
393 /* AVB rx set */
394 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
395
396 /* FIFO size set */
397 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
398
Marek Vasutef8c8782019-04-13 11:42:34 +0200399 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
400 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
401 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
402 return 0;
403
404 if ((pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
405 (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID))
406 writel(APSR_TDM, eth->iobase + RAVB_REG_APSR);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200407
408 return 0;
409}
410
411static int ravb_config(struct udevice *dev)
412{
413 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasutd64c7892018-02-13 17:21:15 +0100414 struct phy_device *phy = eth->phydev;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200415 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
416 int ret;
417
418 /* Configure AVB-DMAC register */
419 ravb_dmac_init(dev);
420
421 /* Configure E-MAC registers */
422 ravb_mac_init(eth);
423 ravb_write_hwaddr(dev);
424
Marek Vasut8ae51b62017-05-13 15:54:28 +0200425 ret = phy_startup(phy);
426 if (ret)
427 return ret;
428
429 /* Set the transfer speed */
430 if (phy->speed == 100)
431 writel(0, eth->iobase + RAVB_REG_GECMR);
432 else if (phy->speed == 1000)
433 writel(1, eth->iobase + RAVB_REG_GECMR);
434
435 /* Check if full duplex mode is supported by the phy */
436 if (phy->duplex)
437 mask |= ECMR_DM;
438
439 writel(mask, eth->iobase + RAVB_REG_ECMR);
440
441 phy->drv->writeext(phy, -1, 0x02, 0x08, (0x0f << 5) | 0x19);
442
443 return 0;
444}
445
Marek Vasute3105ea2018-01-19 23:58:32 +0100446static int ravb_start(struct udevice *dev)
Marek Vasut8ae51b62017-05-13 15:54:28 +0200447{
448 struct ravb_priv *eth = dev_get_priv(dev);
449 int ret;
450
Marek Vasut1fea9e22017-07-21 23:20:35 +0200451 ret = ravb_reset(dev);
452 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200453 return ret;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200454
Marek Vasut8ae51b62017-05-13 15:54:28 +0200455 ravb_base_desc_init(eth);
456 ravb_tx_desc_init(eth);
457 ravb_rx_desc_init(eth);
458
459 ret = ravb_config(dev);
460 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200461 return ret;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200462
463 /* Setting the control will start the AVB-DMAC process. */
464 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
465
466 return 0;
467}
468
469static void ravb_stop(struct udevice *dev)
470{
Marek Vasut1fea9e22017-07-21 23:20:35 +0200471 struct ravb_priv *eth = dev_get_priv(dev);
472
Marek Vasutd64c7892018-02-13 17:21:15 +0100473 phy_shutdown(eth->phydev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200474 ravb_reset(dev);
475}
476
477static int ravb_probe(struct udevice *dev)
478{
479 struct eth_pdata *pdata = dev_get_platdata(dev);
480 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasut701db6e2018-06-18 04:02:15 +0200481 struct ofnode_phandle_args phandle_args;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200482 struct mii_dev *mdiodev;
483 void __iomem *iobase;
484 int ret;
485
486 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
487 eth->iobase = iobase;
488
Marek Vasut1fea9e22017-07-21 23:20:35 +0200489 ret = clk_get_by_index(dev, 0, &eth->clk);
490 if (ret < 0)
491 goto err_mdio_alloc;
492
Marek Vasut701db6e2018-06-18 04:02:15 +0200493 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
494 if (!ret) {
495 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
496 &eth->reset_gpio, GPIOD_IS_OUT);
497 }
498
499 if (!dm_gpio_is_valid(&eth->reset_gpio)) {
500 gpio_request_by_name(dev, "reset-gpios", 0, &eth->reset_gpio,
501 GPIOD_IS_OUT);
502 }
Marek Vasutbddb44e2017-09-15 21:11:15 +0200503
Marek Vasut8ae51b62017-05-13 15:54:28 +0200504 mdiodev = mdio_alloc();
505 if (!mdiodev) {
506 ret = -ENOMEM;
507 goto err_mdio_alloc;
508 }
509
510 mdiodev->read = bb_miiphy_read;
511 mdiodev->write = bb_miiphy_write;
512 bb_miiphy_buses[0].priv = eth;
513 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
514
515 ret = mdio_register(mdiodev);
516 if (ret < 0)
517 goto err_mdio_register;
518
519 eth->bus = miiphy_get_dev_by_name(dev->name);
520
Marek Vasutd64c7892018-02-13 17:21:15 +0100521 /* Bring up PHY */
522 ret = clk_enable(&eth->clk);
523 if (ret)
524 goto err_mdio_register;
525
526 ret = ravb_reset(dev);
527 if (ret)
528 goto err_mdio_reset;
529
530 ret = ravb_phy_config(dev);
531 if (ret)
532 goto err_mdio_reset;
533
Marek Vasut8ae51b62017-05-13 15:54:28 +0200534 return 0;
535
Marek Vasutd64c7892018-02-13 17:21:15 +0100536err_mdio_reset:
537 clk_disable(&eth->clk);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200538err_mdio_register:
539 mdio_free(mdiodev);
540err_mdio_alloc:
541 unmap_physmem(eth->iobase, MAP_NOCACHE);
542 return ret;
543}
544
545static int ravb_remove(struct udevice *dev)
546{
547 struct ravb_priv *eth = dev_get_priv(dev);
548
Marek Vasutd64c7892018-02-13 17:21:15 +0100549 clk_disable(&eth->clk);
550
Marek Vasut8ae51b62017-05-13 15:54:28 +0200551 free(eth->phydev);
552 mdio_unregister(eth->bus);
553 mdio_free(eth->bus);
Marek Vasut90997cd2017-11-09 22:49:19 +0100554 if (dm_gpio_is_valid(&eth->reset_gpio))
555 dm_gpio_free(dev, &eth->reset_gpio);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200556 unmap_physmem(eth->iobase, MAP_NOCACHE);
557
558 return 0;
559}
560
561int ravb_bb_init(struct bb_miiphy_bus *bus)
562{
563 return 0;
564}
565
566int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
567{
568 struct ravb_priv *eth = bus->priv;
569
570 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
571
572 return 0;
573}
574
575int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
576{
577 struct ravb_priv *eth = bus->priv;
578
579 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
580
581 return 0;
582}
583
584int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
585{
586 struct ravb_priv *eth = bus->priv;
587
588 if (v)
589 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
590 else
591 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
592
593 return 0;
594}
595
596int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
597{
598 struct ravb_priv *eth = bus->priv;
599
600 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
601
602 return 0;
603}
604
605int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
606{
607 struct ravb_priv *eth = bus->priv;
608
609 if (v)
610 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
611 else
612 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
613
614 return 0;
615}
616
617int ravb_bb_delay(struct bb_miiphy_bus *bus)
618{
619 udelay(10);
620
621 return 0;
622}
623
624struct bb_miiphy_bus bb_miiphy_buses[] = {
625 {
626 .name = "ravb",
627 .init = ravb_bb_init,
628 .mdio_active = ravb_bb_mdio_active,
629 .mdio_tristate = ravb_bb_mdio_tristate,
630 .set_mdio = ravb_bb_set_mdio,
631 .get_mdio = ravb_bb_get_mdio,
632 .set_mdc = ravb_bb_set_mdc,
633 .delay = ravb_bb_delay,
634 },
635};
636int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
637
638static const struct eth_ops ravb_ops = {
639 .start = ravb_start,
640 .send = ravb_send,
641 .recv = ravb_recv,
642 .free_pkt = ravb_free_pkt,
643 .stop = ravb_stop,
644 .write_hwaddr = ravb_write_hwaddr,
645};
646
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200647int ravb_ofdata_to_platdata(struct udevice *dev)
648{
649 struct eth_pdata *pdata = dev_get_platdata(dev);
650 const char *phy_mode;
651 const fdt32_t *cell;
652 int ret = 0;
653
654 pdata->iobase = devfdt_get_addr(dev);
655 pdata->phy_interface = -1;
656 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
657 NULL);
658 if (phy_mode)
659 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
660 if (pdata->phy_interface == -1) {
661 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
662 return -EINVAL;
663 }
664
665 pdata->max_speed = 1000;
666 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
667 if (cell)
668 pdata->max_speed = fdt32_to_cpu(*cell);
669
670 sprintf(bb_miiphy_buses[0].name, dev->name);
671
672 return ret;
673}
674
675static const struct udevice_id ravb_ids[] = {
676 { .compatible = "renesas,etheravb-r8a7795" },
677 { .compatible = "renesas,etheravb-r8a7796" },
Marek Vasut7a7081e2018-02-26 10:35:15 +0100678 { .compatible = "renesas,etheravb-r8a77965" },
Marek Vasutdc3bb3d2017-10-21 11:33:17 +0200679 { .compatible = "renesas,etheravb-r8a77970" },
Marek Vasut34f1dba2018-04-26 13:20:10 +0200680 { .compatible = "renesas,etheravb-r8a77990" },
Marek Vasut9e4a6372017-10-21 11:35:49 +0200681 { .compatible = "renesas,etheravb-r8a77995" },
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200682 { .compatible = "renesas,etheravb-rcar-gen3" },
683 { }
684};
685
Marek Vasut8ae51b62017-05-13 15:54:28 +0200686U_BOOT_DRIVER(eth_ravb) = {
687 .name = "ravb",
688 .id = UCLASS_ETH,
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200689 .of_match = ravb_ids,
690 .ofdata_to_platdata = ravb_ofdata_to_platdata,
Marek Vasut8ae51b62017-05-13 15:54:28 +0200691 .probe = ravb_probe,
692 .remove = ravb_remove,
693 .ops = &ravb_ops,
694 .priv_auto_alloc_size = sizeof(struct ravb_priv),
695 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
696 .flags = DM_FLAG_ALLOC_PRIV_DMA,
697};