blob: 555651937f863c00c5426c6cd86ac9fedb0d1e00 [file] [log] [blame]
Ashok Reddy Soma258ce792021-07-02 04:40:34 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Xilinx Multirate Ethernet MAC(MRMAC) driver
4 *
5 * Author(s): Ashok Reddy Soma <ashok.reddy.soma@xilinx.com>
Michal Simek174d72842023-07-10 14:35:49 +02006 * Michal Simek <michal.simek@amd.com>
Ashok Reddy Soma258ce792021-07-02 04:40:34 -06007 *
8 * Copyright (C) 2021 Xilinx, Inc. All rights reserved.
9 */
10
11#include <config.h>
Ashok Reddy Soma258ce792021-07-02 04:40:34 -060012#include <cpu_func.h>
13#include <dm.h>
14#include <log.h>
15#include <net.h>
16#include <malloc.h>
17#include <wait_bit.h>
18#include <asm/io.h>
19#include <linux/delay.h>
20#include <linux/ethtool.h>
21#include "xilinx_axi_mrmac.h"
22
23static void axi_mrmac_dma_write(struct mcdma_bd *bd, u32 *desc)
24{
25 if (IS_ENABLED(CONFIG_PHYS_64BIT))
26 writeq((unsigned long)bd, desc);
27 else
28 writel((uintptr_t)bd, desc);
29}
30
31/**
32 * axi_mrmac_ethernet_init - MRMAC init function
33 * @priv: MRMAC private structure
34 *
35 * Return: 0 on success, negative value on errors
36 *
37 * This function is called to reset and initialize MRMAC core. This is
38 * typically called during initialization. It does a reset of MRMAC Rx/Tx
39 * channels and Rx/Tx SERDES. It configures MRMAC speed based on mrmac_rate
40 * which is read from DT. This function waits for block lock bit to get set,
41 * if it is not set within 100ms time returns a timeout error.
42 */
43static int axi_mrmac_ethernet_init(struct axi_mrmac_priv *priv)
44{
45 struct mrmac_regs *regs = priv->iobase;
46 u32 reg;
47 u32 ret;
48
49 /* Perform all the RESET's required */
50 setbits_le32(&regs->reset, MRMAC_RX_SERDES_RST_MASK | MRMAC_RX_RST_MASK
51 | MRMAC_TX_SERDES_RST_MASK | MRMAC_TX_RST_MASK);
52
53 mdelay(MRMAC_RESET_DELAY);
54
55 /* Configure Mode register */
56 reg = readl(&regs->mode);
57
58 log_debug("Configuring MRMAC speed to %d\n", priv->mrmac_rate);
59
60 if (priv->mrmac_rate == SPEED_25000) {
61 reg &= ~MRMAC_CTL_RATE_CFG_MASK;
62 reg |= MRMAC_CTL_DATA_RATE_25G;
63 reg |= (MRMAC_CTL_AXIS_CFG_25G_IND << MRMAC_CTL_AXIS_CFG_SHIFT);
64 reg |= (MRMAC_CTL_SERDES_WIDTH_25G <<
65 MRMAC_CTL_SERDES_WIDTH_SHIFT);
66 } else {
67 reg &= ~MRMAC_CTL_RATE_CFG_MASK;
68 reg |= MRMAC_CTL_DATA_RATE_10G;
69 reg |= (MRMAC_CTL_AXIS_CFG_10G_IND << MRMAC_CTL_AXIS_CFG_SHIFT);
70 reg |= (MRMAC_CTL_SERDES_WIDTH_10G <<
71 MRMAC_CTL_SERDES_WIDTH_SHIFT);
72 }
73
74 /* For tick reg */
75 reg |= MRMAC_CTL_PM_TICK_MASK;
76 writel(reg, &regs->mode);
77
78 clrbits_le32(&regs->reset, MRMAC_RX_SERDES_RST_MASK | MRMAC_RX_RST_MASK
79 | MRMAC_TX_SERDES_RST_MASK | MRMAC_TX_RST_MASK);
80
81 mdelay(MRMAC_RESET_DELAY);
82
83 /* Setup MRMAC hardware options */
84 setbits_le32(&regs->rx_config, MRMAC_RX_DEL_FCS_MASK);
85 setbits_le32(&regs->tx_config, MRMAC_TX_INS_FCS_MASK);
86 setbits_le32(&regs->tx_config, MRMAC_TX_EN_MASK);
87 setbits_le32(&regs->rx_config, MRMAC_RX_EN_MASK);
88
89 /* Check for block lock bit to be set. This ensures that
90 * MRMAC ethernet IP is functioning normally.
91 */
92 writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
93 MRMAC_TX_STS_OFFSET);
94 writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
95 MRMAC_RX_STS_OFFSET);
96 writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
97 MRMAC_STATRX_BLKLCK_OFFSET);
98
99 ret = wait_for_bit_le32((u32 *)((phys_addr_t)priv->iobase +
100 MRMAC_STATRX_BLKLCK_OFFSET),
101 MRMAC_RX_BLKLCK_MASK, true,
102 MRMAC_BLKLCK_TIMEOUT, true);
103 if (ret) {
104 log_warning("Error: MRMAC block lock not complete!\n");
105 return -EIO;
106 }
107
108 writel(MRMAC_TICK_TRIGGER, &regs->tick_reg);
109
110 return 0;
111}
112
113/**
114 * axi_mcdma_init - Reset MCDMA engine
115 * @priv: MRMAC private structure
116 *
117 * Return: 0 on success, negative value on timeouts
118 *
119 * This function is called to reset and initialize MCDMA engine
120 */
121static int axi_mcdma_init(struct axi_mrmac_priv *priv)
122{
123 u32 ret;
124
125 /* Reset the engine so the hardware starts from a known state */
126 writel(XMCDMA_CR_RESET, &priv->mm2s_cmn->control);
127 writel(XMCDMA_CR_RESET, &priv->s2mm_cmn->control);
128
129 /* Check Tx/Rx MCDMA.RST. Reset is done when the reset bit is low */
130 ret = wait_for_bit_le32(&priv->mm2s_cmn->control, XMCDMA_CR_RESET,
131 false, MRMAC_DMARST_TIMEOUT, true);
132 if (ret) {
133 log_warning("Tx MCDMA reset Timeout\n");
134 return -ETIMEDOUT;
135 }
136
137 ret = wait_for_bit_le32(&priv->s2mm_cmn->control, XMCDMA_CR_RESET,
138 false, MRMAC_DMARST_TIMEOUT, true);
139 if (ret) {
140 log_warning("Rx MCDMA reset Timeout\n");
141 return -ETIMEDOUT;
142 }
143
144 /* Enable channel 1 for Tx and Rx */
145 writel(XMCDMA_CHANNEL_1, &priv->mm2s_cmn->chen);
146 writel(XMCDMA_CHANNEL_1, &priv->s2mm_cmn->chen);
147
148 return 0;
149}
150
151/**
152 * axi_mrmac_start - MRMAC start
153 * @dev: udevice structure
154 *
155 * Return: 0 on success, negative value on errors
156 *
157 * This is a initialization function of MRMAC. Call MCDMA initialization
158 * function and setup Rx buffer descriptors for starting reception of packets.
159 * Enable Tx and Rx channels and trigger Rx channel fetch.
160 */
161static int axi_mrmac_start(struct udevice *dev)
162{
163 struct axi_mrmac_priv *priv = dev_get_priv(dev);
164 struct mrmac_regs *regs = priv->iobase;
165
166 /*
167 * Initialize MCDMA engine. MCDMA engine must be initialized before
168 * MRMAC. During MCDMA engine initialization, MCDMA hardware is reset,
169 * since MCDMA reset line is connected to MRMAC, this would ensure a
170 * reset of MRMAC.
171 */
172 axi_mcdma_init(priv);
173
174 /* Initialize MRMAC hardware */
175 if (axi_mrmac_ethernet_init(priv))
176 return -EIO;
177
178 /* Disable all Rx interrupts before RxBD space setup */
179 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_IRQ_ALL_MASK);
180
181 /* Update current descriptor */
182 axi_mrmac_dma_write(priv->rx_bd[0], &priv->mcdma_rx->current);
183
184 /* Setup Rx BD. MRMAC needs atleast two descriptors */
185 memset(priv->rx_bd[0], 0, RX_BD_TOTAL_SIZE);
186
187 priv->rx_bd[0]->next_desc = lower_32_bits((u64)priv->rx_bd[1]);
188 priv->rx_bd[0]->buf_addr = lower_32_bits((u64)net_rx_packets[0]);
189
190 priv->rx_bd[1]->next_desc = lower_32_bits((u64)priv->rx_bd[0]);
191 priv->rx_bd[1]->buf_addr = lower_32_bits((u64)net_rx_packets[1]);
192
193 if (IS_ENABLED(CONFIG_PHYS_64BIT)) {
194 priv->rx_bd[0]->next_desc_msb = upper_32_bits((u64)priv->rx_bd[1]);
195 priv->rx_bd[0]->buf_addr_msb = upper_32_bits((u64)net_rx_packets[0]);
196
197 priv->rx_bd[1]->next_desc_msb = upper_32_bits((u64)priv->rx_bd[0]);
198 priv->rx_bd[1]->buf_addr_msb = upper_32_bits((u64)net_rx_packets[1]);
199 }
200
201 priv->rx_bd[0]->cntrl = PKTSIZE_ALIGN;
202 priv->rx_bd[1]->cntrl = PKTSIZE_ALIGN;
203
204 /* Flush the last BD so DMA core could see the updates */
205 flush_cache((phys_addr_t)priv->rx_bd[0], RX_BD_TOTAL_SIZE);
206
207 /* It is necessary to flush rx buffers because if you don't do it
208 * then cache can contain uninitialized data
209 */
210 flush_cache((phys_addr_t)priv->rx_bd[0]->buf_addr, RX_BUFF_TOTAL_SIZE);
211
212 /* Start the hardware */
213 setbits_le32(&priv->s2mm_cmn->control, XMCDMA_CR_RUNSTOP_MASK);
214 setbits_le32(&priv->mm2s_cmn->control, XMCDMA_CR_RUNSTOP_MASK);
215 setbits_le32(&priv->mcdma_rx->control, XMCDMA_IRQ_ALL_MASK);
216
217 /* Channel fetch */
218 setbits_le32(&priv->mcdma_rx->control, XMCDMA_CR_RUNSTOP_MASK);
219
220 /* Update tail descriptor. Now it's ready to receive data */
221 axi_mrmac_dma_write(priv->rx_bd[1], &priv->mcdma_rx->tail);
222
223 /* Enable Tx */
224 setbits_le32(&regs->tx_config, MRMAC_TX_EN_MASK);
225
226 /* Enable Rx */
227 setbits_le32(&regs->rx_config, MRMAC_RX_EN_MASK);
228
229 return 0;
230}
231
232/**
233 * axi_mrmac_send - MRMAC Tx function
234 * @dev: udevice structure
235 * @ptr: pointer to Tx buffer
236 * @len: transfer length
237 *
238 * Return: 0 on success, negative value on errors
239 *
240 * This is a Tx send function of MRMAC. Setup Tx buffer descriptors and trigger
241 * transfer. Wait till the data is transferred.
242 */
243static int axi_mrmac_send(struct udevice *dev, void *ptr, int len)
244{
245 struct axi_mrmac_priv *priv = dev_get_priv(dev);
246 u32 ret;
247
248#ifdef DEBUG
249 print_buffer(ptr, ptr, 1, len, 16);
250#endif
251 if (len > PKTSIZE_ALIGN)
252 len = PKTSIZE_ALIGN;
253
254 /* If size is less than min packet size, pad to min size */
255 if (len < MIN_PKT_SIZE) {
256 memset(priv->txminframe, 0, MIN_PKT_SIZE);
257 memcpy(priv->txminframe, ptr, len);
258 len = MIN_PKT_SIZE;
259 ptr = priv->txminframe;
260 }
261
262 writel(XMCDMA_IRQ_ALL_MASK, &priv->mcdma_tx->status);
263
264 clrbits_le32(&priv->mcdma_tx->control, XMCDMA_CR_RUNSTOP_MASK);
265
266 /* Flush packet to main memory to be trasfered by DMA */
267 flush_cache((phys_addr_t)ptr, len);
268
269 /* Setup Tx BD. MRMAC needs atleast two descriptors */
270 memset(priv->tx_bd[0], 0, TX_BD_TOTAL_SIZE);
271
272 priv->tx_bd[0]->next_desc = lower_32_bits((u64)priv->tx_bd[1]);
273 priv->tx_bd[0]->buf_addr = lower_32_bits((u64)ptr);
274
275 /* At the end of the ring, link the last BD back to the top */
276 priv->tx_bd[1]->next_desc = lower_32_bits((u64)priv->tx_bd[0]);
277 priv->tx_bd[1]->buf_addr = lower_32_bits((u64)ptr + len / 2);
278
279 if (IS_ENABLED(CONFIG_PHYS_64BIT)) {
280 priv->tx_bd[0]->next_desc_msb = upper_32_bits((u64)priv->tx_bd[1]);
281 priv->tx_bd[0]->buf_addr_msb = upper_32_bits((u64)ptr);
282
283 priv->tx_bd[1]->next_desc_msb = upper_32_bits((u64)priv->tx_bd[0]);
284 priv->tx_bd[1]->buf_addr_msb = upper_32_bits((u64)ptr + len / 2);
285 }
286
287 /* Split Tx data in to half and send in two descriptors */
288 priv->tx_bd[0]->cntrl = (len / 2) | XMCDMA_BD_CTRL_TXSOF_MASK;
289 priv->tx_bd[1]->cntrl = (len - len / 2) | XMCDMA_BD_CTRL_TXEOF_MASK;
290
291 /* Flush the last BD so DMA core could see the updates */
292 flush_cache((phys_addr_t)priv->tx_bd[0], TX_BD_TOTAL_SIZE);
293
294 if (readl(&priv->mcdma_tx->status) & XMCDMA_CH_IDLE) {
295 axi_mrmac_dma_write(priv->tx_bd[0], &priv->mcdma_tx->current);
296 /* Channel fetch */
297 setbits_le32(&priv->mcdma_tx->control, XMCDMA_CR_RUNSTOP_MASK);
298 } else {
299 log_warning("Error: current desc is not updated\n");
300 return -EIO;
301 }
302
303 setbits_le32(&priv->mcdma_tx->control, XMCDMA_IRQ_ALL_MASK);
304
305 /* Start transfer */
306 axi_mrmac_dma_write(priv->tx_bd[1], &priv->mcdma_tx->tail);
307
308 /* Wait for transmission to complete */
309 ret = wait_for_bit_le32(&priv->mcdma_tx->status, XMCDMA_IRQ_IOC_MASK,
310 true, 1, true);
311 if (ret) {
312 log_warning("%s: Timeout\n", __func__);
313 return -ETIMEDOUT;
314 }
315
316 /* Clear status */
317 priv->tx_bd[0]->sband_stats = 0;
318 priv->tx_bd[1]->sband_stats = 0;
319
320 log_debug("Sending complete\n");
321
322 return 0;
323}
324
325static bool isrxready(struct axi_mrmac_priv *priv)
326{
327 u32 status;
328
329 /* Read pending interrupts */
330 status = readl(&priv->mcdma_rx->status);
331
332 /* Acknowledge pending interrupts */
333 writel(status & XMCDMA_IRQ_ALL_MASK, &priv->mcdma_rx->status);
334
335 /*
336 * If Reception done interrupt is asserted, call Rx call back function
337 * to handle the processed BDs and then raise the according flag.
338 */
339 if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK))
340 return 1;
341
342 return 0;
343}
344
345/**
346 * axi_mrmac_recv - MRMAC Rx function
347 * @dev: udevice structure
348 * @flags: flags from network stack
349 * @packetp pointer to received data
350 *
351 * Return: received data length on success, negative value on errors
352 *
353 * This is a Rx function of MRMAC. Check if any data is received on MCDMA.
354 * Copy buffer pointer to packetp and return received data length.
355 */
356static int axi_mrmac_recv(struct udevice *dev, int flags, uchar **packetp)
357{
358 struct axi_mrmac_priv *priv = dev_get_priv(dev);
359 u32 rx_bd_end;
360 u32 length;
361
362 /* Wait for an incoming packet */
363 if (!isrxready(priv))
364 return -EAGAIN;
365
366 /* Clear all interrupts */
367 writel(XMCDMA_IRQ_ALL_MASK, &priv->mcdma_rx->status);
368
369 /* Disable IRQ for a moment till packet is handled */
370 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_IRQ_ALL_MASK);
371
372 /* Disable channel fetch */
373 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_CR_RUNSTOP_MASK);
374
375 rx_bd_end = (ulong)priv->rx_bd[0] + roundup(RX_BD_TOTAL_SIZE,
376 ARCH_DMA_MINALIGN);
377 /* Invalidate Rx descriptors to see proper Rx length */
378 invalidate_dcache_range((phys_addr_t)priv->rx_bd[0], rx_bd_end);
379
380 length = priv->rx_bd[0]->status & XMCDMA_BD_STS_ACTUAL_LEN_MASK;
381 *packetp = (uchar *)(ulong)priv->rx_bd[0]->buf_addr;
382
383 if (!length) {
384 length = priv->rx_bd[1]->status & XMCDMA_BD_STS_ACTUAL_LEN_MASK;
385 *packetp = (uchar *)(ulong)priv->rx_bd[1]->buf_addr;
386 }
387
388#ifdef DEBUG
389 print_buffer(*packetp, *packetp, 1, length, 16);
390#endif
391 /* Clear status */
392 priv->rx_bd[0]->status = 0;
393 priv->rx_bd[1]->status = 0;
394
395 return length;
396}
397
398/**
399 * axi_mrmac_free_pkt - MRMAC free packet function
400 * @dev: udevice structure
401 * @packet: receive buffer pointer
402 * @length received data length
403 *
404 * Return: 0 on success, negative value on errors
405 *
406 * This is Rx free packet function of MRMAC. Prepare MRMAC for reception of
407 * data again. Invalidate previous data from Rx buffers and set Rx buffer
408 * descriptors. Trigger reception by updating tail descriptor.
409 */
410static int axi_mrmac_free_pkt(struct udevice *dev, uchar *packet, int length)
411{
412 struct axi_mrmac_priv *priv = dev_get_priv(dev);
413
414#ifdef DEBUG
415 /* It is useful to clear buffer to be sure that it is consistent */
416 memset(priv->rx_bd[0]->buf_addr, 0, RX_BUFF_TOTAL_SIZE);
417#endif
418 /* Disable all Rx interrupts before RxBD space setup */
419 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_IRQ_ALL_MASK);
420
421 /* Disable channel fetch */
422 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_CR_RUNSTOP_MASK);
423
424 /* Update current descriptor */
425 axi_mrmac_dma_write(priv->rx_bd[0], &priv->mcdma_rx->current);
426
427 /* Write bd to HW */
428 flush_cache((phys_addr_t)priv->rx_bd[0], RX_BD_TOTAL_SIZE);
429
430 /* It is necessary to flush rx buffers because if you don't do it
431 * then cache will contain previous packet
432 */
433 flush_cache((phys_addr_t)priv->rx_bd[0]->buf_addr, RX_BUFF_TOTAL_SIZE);
434
435 /* Enable all IRQ */
436 setbits_le32(&priv->mcdma_rx->control, XMCDMA_IRQ_ALL_MASK);
437
438 /* Channel fetch */
439 setbits_le32(&priv->mcdma_rx->control, XMCDMA_CR_RUNSTOP_MASK);
440
441 /* Update tail descriptor. Now it's ready to receive data */
442 axi_mrmac_dma_write(priv->rx_bd[1], &priv->mcdma_rx->tail);
443
444 log_debug("Rx completed, framelength = %x\n", length);
445
446 return 0;
447}
448
449/**
450 * axi_mrmac_stop - Stop MCDMA transfers
451 * @dev: udevice structure
452 *
453 * Return: 0 on success, negative value on errors
454 *
455 * Stop MCDMA engine for both Tx and Rx transfers.
456 */
457static void axi_mrmac_stop(struct udevice *dev)
458{
459 struct axi_mrmac_priv *priv = dev_get_priv(dev);
460
461 /* Stop the hardware */
462 clrbits_le32(&priv->mcdma_tx->control, XMCDMA_CR_RUNSTOP_MASK);
463 clrbits_le32(&priv->mcdma_rx->control, XMCDMA_CR_RUNSTOP_MASK);
464
465 log_debug("Halted\n");
466}
467
468static int axi_mrmac_probe(struct udevice *dev)
469{
470 struct axi_mrmac_plat *plat = dev_get_plat(dev);
471 struct eth_pdata *pdata = &plat->eth_pdata;
472 struct axi_mrmac_priv *priv = dev_get_priv(dev);
473
474 priv->iobase = (struct mrmac_regs *)pdata->iobase;
475
476 priv->mm2s_cmn = plat->mm2s_cmn;
477 priv->mcdma_tx = (struct mcdma_chan_reg *)((phys_addr_t)priv->mm2s_cmn
478 + XMCDMA_CHAN_OFFSET);
479 priv->s2mm_cmn = (struct mcdma_common_regs *)((phys_addr_t)priv->mm2s_cmn
480 + XMCDMA_RX_OFFSET);
481 priv->mcdma_rx = (struct mcdma_chan_reg *)((phys_addr_t)priv->s2mm_cmn
482 + XMCDMA_CHAN_OFFSET);
483 priv->mrmac_rate = plat->mrmac_rate;
484
485 /* Align buffers to ARCH_DMA_MINALIGN */
486 priv->tx_bd[0] = memalign(ARCH_DMA_MINALIGN, TX_BD_TOTAL_SIZE);
487 priv->tx_bd[1] = (struct mcdma_bd *)((ulong)priv->tx_bd[0] +
488 sizeof(struct mcdma_bd));
489
490 priv->rx_bd[0] = memalign(ARCH_DMA_MINALIGN, RX_BD_TOTAL_SIZE);
491 priv->rx_bd[1] = (struct mcdma_bd *)((ulong)priv->rx_bd[0] +
492 sizeof(struct mcdma_bd));
493
494 priv->txminframe = memalign(ARCH_DMA_MINALIGN, MIN_PKT_SIZE);
495
496 return 0;
497}
498
499static int axi_mrmac_remove(struct udevice *dev)
500{
501 struct axi_mrmac_priv *priv = dev_get_priv(dev);
502
503 /* Free buffer descriptors */
504 free(priv->tx_bd[0]);
505 free(priv->rx_bd[0]);
506 free(priv->txminframe);
507
508 return 0;
509}
510
511static int axi_mrmac_of_to_plat(struct udevice *dev)
512{
513 struct axi_mrmac_plat *plat = dev_get_plat(dev);
514 struct eth_pdata *pdata = &plat->eth_pdata;
515 struct ofnode_phandle_args phandle_args;
516 int ret = 0;
517
518 pdata->iobase = dev_read_addr(dev);
519
520 ret = dev_read_phandle_with_args(dev, "axistream-connected", NULL, 0, 0,
521 &phandle_args);
522 if (ret) {
523 log_debug("axistream not found\n");
524 return -EINVAL;
525 }
526
527 plat->mm2s_cmn = (struct mcdma_common_regs *)ofnode_read_u64_default
528 (phandle_args.node, "reg", -1);
529 if (!plat->mm2s_cmn) {
530 log_warning("MRMAC dma register space not found\n");
531 return -EINVAL;
532 }
533
534 /* Set default MRMAC rate to 10000 */
535 plat->mrmac_rate = dev_read_u32_default(dev, "xlnx,mrmac-rate", 10000);
536
537 return 0;
538}
539
540static const struct eth_ops axi_mrmac_ops = {
541 .start = axi_mrmac_start,
542 .send = axi_mrmac_send,
543 .recv = axi_mrmac_recv,
544 .free_pkt = axi_mrmac_free_pkt,
545 .stop = axi_mrmac_stop,
546};
547
548static const struct udevice_id axi_mrmac_ids[] = {
549 { .compatible = "xlnx,mrmac-ethernet-1.0" },
550 { }
551};
552
553U_BOOT_DRIVER(axi_mrmac) = {
554 .name = "axi_mrmac",
555 .id = UCLASS_ETH,
556 .of_match = axi_mrmac_ids,
557 .of_to_plat = axi_mrmac_of_to_plat,
558 .probe = axi_mrmac_probe,
559 .remove = axi_mrmac_remove,
560 .ops = &axi_mrmac_ops,
561 .priv_auto = sizeof(struct axi_mrmac_priv),
562 .plat_auto = sizeof(struct axi_mrmac_plat),
563};