blob: 7f0e2568b76b2c6225c3f722262a48a5699a5f98 [file] [log] [blame]
Marek Vasut8ae51b62017-05-13 15:54:28 +02001/*
2 * drivers/net/ravb.c
3 * This file is driver for Renesas Ethernet AVB.
4 *
5 * Copyright (C) 2015-2017 Renesas Electronics Corporation
6 *
7 * Based on the SuperH Ethernet driver.
8 *
9 * SPDX-License-Identifier: GPL-2.0+
10 */
11
12#include <common.h>
13#include <dm.h>
14#include <errno.h>
15#include <miiphy.h>
16#include <malloc.h>
17#include <linux/mii.h>
18#include <wait_bit.h>
19#include <asm/io.h>
20
21/* Registers */
22#define RAVB_REG_CCC 0x000
23#define RAVB_REG_DBAT 0x004
24#define RAVB_REG_CSR 0x00C
25#define RAVB_REG_APSR 0x08C
26#define RAVB_REG_RCR 0x090
27#define RAVB_REG_TGC 0x300
28#define RAVB_REG_TCCR 0x304
29#define RAVB_REG_RIC0 0x360
30#define RAVB_REG_RIC1 0x368
31#define RAVB_REG_RIC2 0x370
32#define RAVB_REG_TIC 0x378
33#define RAVB_REG_ECMR 0x500
34#define RAVB_REG_RFLR 0x508
35#define RAVB_REG_ECSIPR 0x518
36#define RAVB_REG_PIR 0x520
37#define RAVB_REG_GECMR 0x5b0
38#define RAVB_REG_MAHR 0x5c0
39#define RAVB_REG_MALR 0x5c8
40
41#define CCC_OPC_CONFIG BIT(0)
42#define CCC_OPC_OPERATION BIT(1)
43#define CCC_BOC BIT(20)
44
45#define CSR_OPS 0x0000000F
46#define CSR_OPS_CONFIG BIT(1)
47
48#define TCCR_TSRQ0 BIT(0)
49
50#define RFLR_RFL_MIN 0x05EE
51
52#define PIR_MDI BIT(3)
53#define PIR_MDO BIT(2)
54#define PIR_MMD BIT(1)
55#define PIR_MDC BIT(0)
56
57#define ECMR_TRCCM BIT(26)
58#define ECMR_RZPF BIT(20)
59#define ECMR_PFR BIT(18)
60#define ECMR_RXF BIT(17)
61#define ECMR_RE BIT(6)
62#define ECMR_TE BIT(5)
63#define ECMR_DM BIT(1)
64#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
65
66/* DMA Descriptors */
67#define RAVB_NUM_BASE_DESC 16
68#define RAVB_NUM_TX_DESC 8
69#define RAVB_NUM_RX_DESC 8
70
71#define RAVB_TX_QUEUE_OFFSET 0
72#define RAVB_RX_QUEUE_OFFSET 4
73
74#define RAVB_DESC_DT(n) ((n) << 28)
75#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
76#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
77#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
78#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
79#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
80#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
81
82#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
83#define RAVB_DESC_DS_MASK 0xfff
84
85#define RAVB_RX_DESC_MSC_MC BIT(23)
86#define RAVB_RX_DESC_MSC_CEEF BIT(22)
87#define RAVB_RX_DESC_MSC_CRL BIT(21)
88#define RAVB_RX_DESC_MSC_FRE BIT(20)
89#define RAVB_RX_DESC_MSC_RTLF BIT(19)
90#define RAVB_RX_DESC_MSC_RTSF BIT(18)
91#define RAVB_RX_DESC_MSC_RFE BIT(17)
92#define RAVB_RX_DESC_MSC_CRC BIT(16)
93#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
94
95#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
96 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
97 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
98
99#define RAVB_TX_TIMEOUT_MS 1000
100
101struct ravb_desc {
102 u32 ctrl;
103 u32 dptr;
104};
105
106struct ravb_rxdesc {
107 struct ravb_desc data;
108 struct ravb_desc link;
109 u8 __pad[48];
110 u8 packet[PKTSIZE_ALIGN];
111};
112
113struct ravb_priv {
114 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
115 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
116 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
117 u32 rx_desc_idx;
118 u32 tx_desc_idx;
119
120 struct phy_device *phydev;
121 struct mii_dev *bus;
122 void __iomem *iobase;
123};
124
125static inline void ravb_flush_dcache(u32 addr, u32 len)
126{
127 flush_dcache_range(addr, addr + len);
128}
129
130static inline void ravb_invalidate_dcache(u32 addr, u32 len)
131{
132 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
133 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
134 invalidate_dcache_range(start, end);
135}
136
137static int ravb_send(struct udevice *dev, void *packet, int len)
138{
139 struct ravb_priv *eth = dev_get_priv(dev);
140 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
141 unsigned int start;
142
143 /* Update TX descriptor */
144 ravb_flush_dcache((uintptr_t)packet, len);
145 memset(desc, 0x0, sizeof(*desc));
146 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
147 desc->dptr = (uintptr_t)packet;
148 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
149
150 /* Restart the transmitter if disabled */
151 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
152 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
153
154 /* Wait until packet is transmitted */
155 start = get_timer(0);
156 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
157 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
158 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
159 break;
160 udelay(10);
161 };
162
163 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
164 return -ETIMEDOUT;
165
166 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
167 return 0;
168}
169
170static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
171{
172 struct ravb_priv *eth = dev_get_priv(dev);
173 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
174 int len;
175 u8 *packet;
176
177 /* Check if the rx descriptor is ready */
178 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
179 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
180 return -EAGAIN;
181
182 /* Check for errors */
183 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
184 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
185 return -EAGAIN;
186 }
187
188 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
189 packet = (u8 *)(uintptr_t)desc->data.dptr;
190 ravb_invalidate_dcache((uintptr_t)packet, len);
191
192 *packetp = packet;
193 return len;
194}
195
196static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
197{
198 struct ravb_priv *eth = dev_get_priv(dev);
199 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
200
201 /* Make current descriptor available again */
202 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
203 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
204
205 /* Point to the next descriptor */
206 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
207 desc = &eth->rx_desc[eth->rx_desc_idx];
208 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
209
210 return 0;
211}
212
213static int ravb_reset(struct udevice *dev)
214{
215 struct ravb_priv *eth = dev_get_priv(dev);
216
217 /* Set config mode */
218 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
219
220 /* Check the operating mode is changed to the config mode. */
221 return wait_for_bit(dev->name, (void *)eth->iobase + RAVB_REG_CSR,
222 CSR_OPS_CONFIG, true, 100, true);
223}
224
225static void ravb_base_desc_init(struct ravb_priv *eth)
226{
227 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
228 int i;
229
230 /* Initialize all descriptors */
231 memset(eth->base_desc, 0x0, desc_size);
232
233 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
234 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
235
236 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
237
238 /* Register the descriptor base address table */
239 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
240}
241
242static void ravb_tx_desc_init(struct ravb_priv *eth)
243{
244 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
245 int i;
246
247 /* Initialize all descriptors */
248 memset(eth->tx_desc, 0x0, desc_size);
249 eth->tx_desc_idx = 0;
250
251 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
252 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
253
254 /* Mark the end of the descriptors */
255 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
256 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
257 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
258
259 /* Point the controller to the TX descriptor list. */
260 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
261 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
262 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
263 sizeof(struct ravb_desc));
264}
265
266static void ravb_rx_desc_init(struct ravb_priv *eth)
267{
268 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
269 int i;
270
271 /* Initialize all descriptors */
272 memset(eth->rx_desc, 0x0, desc_size);
273 eth->rx_desc_idx = 0;
274
275 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
276 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
277 RAVB_DESC_DS(PKTSIZE_ALIGN);
278 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
279
280 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
281 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
282 }
283
284 /* Mark the end of the descriptors */
285 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
286 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
287 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
288
289 /* Point the controller to the rx descriptor list */
290 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
291 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
292 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
293 sizeof(struct ravb_desc));
294}
295
296static int ravb_phy_config(struct udevice *dev)
297{
298 struct ravb_priv *eth = dev_get_priv(dev);
299 struct eth_pdata *pdata = dev_get_platdata(dev);
300 struct phy_device *phydev;
301 int reg;
302
303 phydev = phy_connect(eth->bus, pdata->phy_interface,
304 dev, PHY_INTERFACE_MODE_RGMII_ID);
305 if (!phydev)
306 return -ENODEV;
307
308 eth->phydev = phydev;
309
310 /* 10BASE is not supported for Ethernet AVB MAC */
311 phydev->supported &= ~(SUPPORTED_10baseT_Full
312 | SUPPORTED_10baseT_Half);
313 if (pdata->max_speed != 1000) {
314 phydev->supported &= ~(SUPPORTED_1000baseT_Half
315 | SUPPORTED_1000baseT_Full);
316 reg = phy_read(phydev, -1, MII_CTRL1000);
317 reg &= ~(BIT(9) | BIT(8));
318 phy_write(phydev, -1, MII_CTRL1000, reg);
319 }
320
321 phy_config(phydev);
322
323 return 0;
324}
325
326/* Set Mac address */
327static int ravb_write_hwaddr(struct udevice *dev)
328{
329 struct ravb_priv *eth = dev_get_priv(dev);
330 struct eth_pdata *pdata = dev_get_platdata(dev);
331 unsigned char *mac = pdata->enetaddr;
332
333 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
334 eth->iobase + RAVB_REG_MAHR);
335
336 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
337
338 return 0;
339}
340
341/* E-MAC init function */
342static int ravb_mac_init(struct ravb_priv *eth)
343{
344 /* Disable MAC Interrupt */
345 writel(0, eth->iobase + RAVB_REG_ECSIPR);
346
347 /* Recv frame limit set register */
348 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
349
350 return 0;
351}
352
353/* AVB-DMAC init function */
354static int ravb_dmac_init(struct udevice *dev)
355{
356 struct ravb_priv *eth = dev_get_priv(dev);
357 struct eth_pdata *pdata = dev_get_platdata(dev);
358 int ret = 0;
359
360 /* Set CONFIG mode */
361 ret = ravb_reset(dev);
362 if (ret)
363 return ret;
364
365 /* Disable all interrupts */
366 writel(0, eth->iobase + RAVB_REG_RIC0);
367 writel(0, eth->iobase + RAVB_REG_RIC1);
368 writel(0, eth->iobase + RAVB_REG_RIC2);
369 writel(0, eth->iobase + RAVB_REG_TIC);
370
371 /* Set little endian */
372 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
373
374 /* AVB rx set */
375 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
376
377 /* FIFO size set */
378 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
379
380 /* Delay CLK: 2ns */
381 if (pdata->max_speed == 1000)
382 writel(BIT(14), eth->iobase + RAVB_REG_APSR);
383
384 return 0;
385}
386
387static int ravb_config(struct udevice *dev)
388{
389 struct ravb_priv *eth = dev_get_priv(dev);
390 struct phy_device *phy;
391 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
392 int ret;
393
394 /* Configure AVB-DMAC register */
395 ravb_dmac_init(dev);
396
397 /* Configure E-MAC registers */
398 ravb_mac_init(eth);
399 ravb_write_hwaddr(dev);
400
401 /* Configure phy */
402 ret = ravb_phy_config(dev);
403 if (ret)
404 return ret;
405
406 phy = eth->phydev;
407
408 ret = phy_startup(phy);
409 if (ret)
410 return ret;
411
412 /* Set the transfer speed */
413 if (phy->speed == 100)
414 writel(0, eth->iobase + RAVB_REG_GECMR);
415 else if (phy->speed == 1000)
416 writel(1, eth->iobase + RAVB_REG_GECMR);
417
418 /* Check if full duplex mode is supported by the phy */
419 if (phy->duplex)
420 mask |= ECMR_DM;
421
422 writel(mask, eth->iobase + RAVB_REG_ECMR);
423
424 phy->drv->writeext(phy, -1, 0x02, 0x08, (0x0f << 5) | 0x19);
425
426 return 0;
427}
428
429int ravb_start(struct udevice *dev)
430{
431 struct ravb_priv *eth = dev_get_priv(dev);
432 int ret;
433
434 ret = ravb_reset(dev);
435 if (ret)
436 return ret;
437
438 ravb_base_desc_init(eth);
439 ravb_tx_desc_init(eth);
440 ravb_rx_desc_init(eth);
441
442 ret = ravb_config(dev);
443 if (ret)
444 return ret;
445
446 /* Setting the control will start the AVB-DMAC process. */
447 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
448
449 return 0;
450}
451
452static void ravb_stop(struct udevice *dev)
453{
454 ravb_reset(dev);
455}
456
457static int ravb_probe(struct udevice *dev)
458{
459 struct eth_pdata *pdata = dev_get_platdata(dev);
460 struct ravb_priv *eth = dev_get_priv(dev);
461 struct mii_dev *mdiodev;
462 void __iomem *iobase;
463 int ret;
464
465 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
466 eth->iobase = iobase;
467
468 mdiodev = mdio_alloc();
469 if (!mdiodev) {
470 ret = -ENOMEM;
471 goto err_mdio_alloc;
472 }
473
474 mdiodev->read = bb_miiphy_read;
475 mdiodev->write = bb_miiphy_write;
476 bb_miiphy_buses[0].priv = eth;
477 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
478
479 ret = mdio_register(mdiodev);
480 if (ret < 0)
481 goto err_mdio_register;
482
483 eth->bus = miiphy_get_dev_by_name(dev->name);
484
485 return 0;
486
487err_mdio_register:
488 mdio_free(mdiodev);
489err_mdio_alloc:
490 unmap_physmem(eth->iobase, MAP_NOCACHE);
491 return ret;
492}
493
494static int ravb_remove(struct udevice *dev)
495{
496 struct ravb_priv *eth = dev_get_priv(dev);
497
498 free(eth->phydev);
499 mdio_unregister(eth->bus);
500 mdio_free(eth->bus);
501 unmap_physmem(eth->iobase, MAP_NOCACHE);
502
503 return 0;
504}
505
506int ravb_bb_init(struct bb_miiphy_bus *bus)
507{
508 return 0;
509}
510
511int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
512{
513 struct ravb_priv *eth = bus->priv;
514
515 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
516
517 return 0;
518}
519
520int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
521{
522 struct ravb_priv *eth = bus->priv;
523
524 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
525
526 return 0;
527}
528
529int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
530{
531 struct ravb_priv *eth = bus->priv;
532
533 if (v)
534 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
535 else
536 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
537
538 return 0;
539}
540
541int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
542{
543 struct ravb_priv *eth = bus->priv;
544
545 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
546
547 return 0;
548}
549
550int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
551{
552 struct ravb_priv *eth = bus->priv;
553
554 if (v)
555 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
556 else
557 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
558
559 return 0;
560}
561
562int ravb_bb_delay(struct bb_miiphy_bus *bus)
563{
564 udelay(10);
565
566 return 0;
567}
568
569struct bb_miiphy_bus bb_miiphy_buses[] = {
570 {
571 .name = "ravb",
572 .init = ravb_bb_init,
573 .mdio_active = ravb_bb_mdio_active,
574 .mdio_tristate = ravb_bb_mdio_tristate,
575 .set_mdio = ravb_bb_set_mdio,
576 .get_mdio = ravb_bb_get_mdio,
577 .set_mdc = ravb_bb_set_mdc,
578 .delay = ravb_bb_delay,
579 },
580};
581int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
582
583static const struct eth_ops ravb_ops = {
584 .start = ravb_start,
585 .send = ravb_send,
586 .recv = ravb_recv,
587 .free_pkt = ravb_free_pkt,
588 .stop = ravb_stop,
589 .write_hwaddr = ravb_write_hwaddr,
590};
591
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200592int ravb_ofdata_to_platdata(struct udevice *dev)
593{
594 struct eth_pdata *pdata = dev_get_platdata(dev);
595 const char *phy_mode;
596 const fdt32_t *cell;
597 int ret = 0;
598
599 pdata->iobase = devfdt_get_addr(dev);
600 pdata->phy_interface = -1;
601 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
602 NULL);
603 if (phy_mode)
604 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
605 if (pdata->phy_interface == -1) {
606 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
607 return -EINVAL;
608 }
609
610 pdata->max_speed = 1000;
611 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
612 if (cell)
613 pdata->max_speed = fdt32_to_cpu(*cell);
614
615 sprintf(bb_miiphy_buses[0].name, dev->name);
616
617 return ret;
618}
619
620static const struct udevice_id ravb_ids[] = {
621 { .compatible = "renesas,etheravb-r8a7795" },
622 { .compatible = "renesas,etheravb-r8a7796" },
623 { .compatible = "renesas,etheravb-rcar-gen3" },
624 { }
625};
626
Marek Vasut8ae51b62017-05-13 15:54:28 +0200627U_BOOT_DRIVER(eth_ravb) = {
628 .name = "ravb",
629 .id = UCLASS_ETH,
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200630 .of_match = ravb_ids,
631 .ofdata_to_platdata = ravb_ofdata_to_platdata,
Marek Vasut8ae51b62017-05-13 15:54:28 +0200632 .probe = ravb_probe,
633 .remove = ravb_remove,
634 .ops = &ravb_ops,
635 .priv_auto_alloc_size = sizeof(struct ravb_priv),
636 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
637 .flags = DM_FLAG_ALLOC_PRIV_DMA,
638};