blob: bbf01f77d38587809e0a22a17f32d7b6b779e332 [file] [log] [blame]
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05301/*
2 * (C) Copyright 2010
3 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
4 *
Wolfgang Denk1a459662013-07-08 09:37:19 +02005 * SPDX-License-Identifier: GPL-2.0+
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05306 */
7
8/*
9 * Designware ethernet IP driver for u-boot
10 */
11
12#include <common.h>
13#include <miiphy.h>
14#include <malloc.h>
Stefan Roeseef760252012-05-07 12:04:25 +020015#include <linux/compiler.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053016#include <linux/err.h>
17#include <asm/io.h>
18#include "designware.h"
19
Alexey Brodkin92a190a2014-01-22 20:54:06 +040020#if !defined(CONFIG_PHYLIB)
21# error "DesignWare Ether MAC requires PHYLIB - missing CONFIG_PHYLIB"
22#endif
23
24static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
25{
26 struct eth_mac_regs *mac_p = bus->priv;
27 ulong start;
28 u16 miiaddr;
29 int timeout = CONFIG_MDIO_TIMEOUT;
30
31 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
32 ((reg << MIIREGSHIFT) & MII_REGMSK);
33
34 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
35
36 start = get_timer(0);
37 while (get_timer(start) < timeout) {
38 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
39 return readl(&mac_p->miidata);
40 udelay(10);
41 };
42
43 return -1;
44}
45
46static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
47 u16 val)
48{
49 struct eth_mac_regs *mac_p = bus->priv;
50 ulong start;
51 u16 miiaddr;
52 int ret = -1, timeout = CONFIG_MDIO_TIMEOUT;
53
54 writel(val, &mac_p->miidata);
55 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
56 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
57
58 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
59
60 start = get_timer(0);
61 while (get_timer(start) < timeout) {
62 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
63 ret = 0;
64 break;
65 }
66 udelay(10);
67 };
68
69 return ret;
70}
71
72static int dw_mdio_init(char *name, struct eth_mac_regs *mac_regs_p)
73{
74 struct mii_dev *bus = mdio_alloc();
75
76 if (!bus) {
77 printf("Failed to allocate MDIO bus\n");
78 return -1;
79 }
80
81 bus->read = dw_mdio_read;
82 bus->write = dw_mdio_write;
83 sprintf(bus->name, name);
84
85 bus->priv = (void *)mac_regs_p;
86
87 return mdio_register(bus);
88}
Vipin Kumar13edd172012-03-26 00:09:56 +000089
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053090static void tx_descs_init(struct eth_device *dev)
91{
92 struct dw_eth_dev *priv = dev->priv;
93 struct eth_dma_regs *dma_p = priv->dma_regs_p;
94 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
95 char *txbuffs = &priv->txbuffs[0];
96 struct dmamacdescr *desc_p;
97 u32 idx;
98
99 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
100 desc_p = &desc_table_p[idx];
101 desc_p->dmamac_addr = &txbuffs[idx * CONFIG_ETH_BUFSIZE];
102 desc_p->dmamac_next = &desc_table_p[idx + 1];
103
104#if defined(CONFIG_DW_ALTDESCRIPTOR)
105 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
106 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS | \
107 DESC_TXSTS_TXCHECKINSCTRL | \
108 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
109
110 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
111 desc_p->dmamac_cntl = 0;
112 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
113#else
114 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
115 desc_p->txrx_status = 0;
116#endif
117 }
118
119 /* Correcting the last pointer of the chain */
120 desc_p->dmamac_next = &desc_table_p[0];
121
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400122 /* Flush all Tx buffer descriptors at once */
123 flush_dcache_range((unsigned int)priv->tx_mac_descrtable,
124 (unsigned int)priv->tx_mac_descrtable +
125 sizeof(priv->tx_mac_descrtable));
126
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530127 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400128 priv->tx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530129}
130
131static void rx_descs_init(struct eth_device *dev)
132{
133 struct dw_eth_dev *priv = dev->priv;
134 struct eth_dma_regs *dma_p = priv->dma_regs_p;
135 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
136 char *rxbuffs = &priv->rxbuffs[0];
137 struct dmamacdescr *desc_p;
138 u32 idx;
139
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400140 /* Before passing buffers to GMAC we need to make sure zeros
141 * written there right after "priv" structure allocation were
142 * flushed into RAM.
143 * Otherwise there's a chance to get some of them flushed in RAM when
144 * GMAC is already pushing data to RAM via DMA. This way incoming from
145 * GMAC data will be corrupted. */
146 flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs +
147 RX_TOTAL_BUFSIZE);
148
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530149 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
150 desc_p = &desc_table_p[idx];
151 desc_p->dmamac_addr = &rxbuffs[idx * CONFIG_ETH_BUFSIZE];
152 desc_p->dmamac_next = &desc_table_p[idx + 1];
153
154 desc_p->dmamac_cntl =
155 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) | \
156 DESC_RXCTRL_RXCHAIN;
157
158 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
159 }
160
161 /* Correcting the last pointer of the chain */
162 desc_p->dmamac_next = &desc_table_p[0];
163
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400164 /* Flush all Rx buffer descriptors at once */
165 flush_dcache_range((unsigned int)priv->rx_mac_descrtable,
166 (unsigned int)priv->rx_mac_descrtable +
167 sizeof(priv->rx_mac_descrtable));
168
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530169 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400170 priv->rx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530171}
172
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530173static int dw_write_hwaddr(struct eth_device *dev)
174{
175 struct dw_eth_dev *priv = dev->priv;
176 struct eth_mac_regs *mac_p = priv->mac_regs_p;
177 u32 macid_lo, macid_hi;
178 u8 *mac_id = &dev->enetaddr[0];
179
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400180 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
181 (mac_id[3] << 24);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530182 macid_hi = mac_id[4] + (mac_id[5] << 8);
183
184 writel(macid_hi, &mac_p->macaddr0hi);
185 writel(macid_lo, &mac_p->macaddr0lo);
186
187 return 0;
188}
189
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400190static void dw_adjust_link(struct eth_mac_regs *mac_p,
191 struct phy_device *phydev)
192{
193 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
194
195 if (!phydev->link) {
196 printf("%s: No link.\n", phydev->dev->name);
197 return;
198 }
199
200 if (phydev->speed != 1000)
201 conf |= MII_PORTSELECT;
202
203 if (phydev->speed == 100)
204 conf |= FES_100;
205
206 if (phydev->duplex)
207 conf |= FULLDPLXMODE;
208
209 writel(conf, &mac_p->conf);
210
211 printf("Speed: %d, %s duplex%s\n", phydev->speed,
212 (phydev->duplex) ? "full" : "half",
213 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
214}
215
216static void dw_eth_halt(struct eth_device *dev)
217{
218 struct dw_eth_dev *priv = dev->priv;
219 struct eth_mac_regs *mac_p = priv->mac_regs_p;
220 struct eth_dma_regs *dma_p = priv->dma_regs_p;
221
222 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
223 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
224
225 phy_shutdown(priv->phydev);
226}
227
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530228static int dw_eth_init(struct eth_device *dev, bd_t *bis)
229{
230 struct dw_eth_dev *priv = dev->priv;
231 struct eth_mac_regs *mac_p = priv->mac_regs_p;
232 struct eth_dma_regs *dma_p = priv->dma_regs_p;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400233 unsigned int start;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530234
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400235 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
Vipin Kumar13edd172012-03-26 00:09:56 +0000236
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400237 start = get_timer(0);
238 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
Alexey Brodkin875143f2015-01-13 17:10:24 +0300239 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
240 printf("DMA reset timeout\n");
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400241 return -1;
Alexey Brodkin875143f2015-01-13 17:10:24 +0300242 }
Stefan Roeseef760252012-05-07 12:04:25 +0200243
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400244 mdelay(100);
245 };
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530246
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400247 /* Soft reset above clears HW address registers.
248 * So we have to set it here once again */
Vipin KUMARc7f6dbe2012-03-26 00:09:52 +0000249 dw_write_hwaddr(dev);
250
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400251 rx_descs_init(dev);
252 tx_descs_init(dev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530253
Ian Campbell49692c52014-05-08 22:26:35 +0100254 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530255
Sonic Zhangd2279222015-01-29 14:38:50 +0800256#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400257 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
258 &dma_p->opmode);
Sonic Zhangd2279222015-01-29 14:38:50 +0800259#else
260 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
261 &dma_p->opmode);
262#endif
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530263
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400264 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530265
Sonic Zhang2ddaf132015-01-29 13:37:31 +0800266#ifdef CONFIG_DW_AXI_BURST_LEN
267 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
268#endif
269
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400270 /* Start up the PHY */
271 if (phy_startup(priv->phydev)) {
272 printf("Could not initialize PHY %s\n",
273 priv->phydev->dev->name);
274 return -1;
Vipin Kumar9afc1af2012-05-07 13:06:44 +0530275 }
276
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400277 dw_adjust_link(mac_p, priv->phydev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530278
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400279 if (!priv->phydev->link)
280 return -1;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530281
Armando Viscontiaa510052012-03-26 00:09:55 +0000282 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530283
284 return 0;
285}
286
Joe Hershberger10cbe3b2012-05-22 18:36:19 +0000287static int dw_eth_send(struct eth_device *dev, void *packet, int length)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530288{
289 struct dw_eth_dev *priv = dev->priv;
290 struct eth_dma_regs *dma_p = priv->dma_regs_p;
291 u32 desc_num = priv->tx_currdescnum;
292 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
Marek Vasut96cec172014-09-15 01:05:23 +0200293 uint32_t desc_start = (uint32_t)desc_p;
294 uint32_t desc_end = desc_start +
295 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
296 uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
297 uint32_t data_end = data_start +
298 roundup(length, ARCH_DMA_MINALIGN);
Ian Campbell964ea7c2014-05-08 22:26:33 +0100299 /*
300 * Strictly we only need to invalidate the "txrx_status" field
301 * for the following check, but on some platforms we cannot
Marek Vasut96cec172014-09-15 01:05:23 +0200302 * invalidate only 4 bytes, so we flush the entire descriptor,
303 * which is 16 bytes in total. This is safe because the
304 * individual descriptors in the array are each aligned to
305 * ARCH_DMA_MINALIGN and padded appropriately.
Ian Campbell964ea7c2014-05-08 22:26:33 +0100306 */
Marek Vasut96cec172014-09-15 01:05:23 +0200307 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400308
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530309 /* Check if the descriptor is owned by CPU */
310 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
311 printf("CPU not owner of tx frame\n");
312 return -1;
313 }
314
Marek Vasut96cec172014-09-15 01:05:23 +0200315 memcpy(desc_p->dmamac_addr, packet, length);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530316
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400317 /* Flush data to be sent */
Marek Vasut96cec172014-09-15 01:05:23 +0200318 flush_dcache_range(data_start, data_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400319
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530320#if defined(CONFIG_DW_ALTDESCRIPTOR)
321 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
322 desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) & \
323 DESC_TXCTRL_SIZE1MASK;
324
325 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
326 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
327#else
328 desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) & \
329 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST | \
330 DESC_TXCTRL_TXFIRST;
331
332 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
333#endif
334
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400335 /* Flush modified buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200336 flush_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400337
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530338 /* Test the wrap-around condition. */
339 if (++desc_num >= CONFIG_TX_DESCR_NUM)
340 desc_num = 0;
341
342 priv->tx_currdescnum = desc_num;
343
344 /* Start the transmission */
345 writel(POLL_DATA, &dma_p->txpolldemand);
346
347 return 0;
348}
349
350static int dw_eth_recv(struct eth_device *dev)
351{
352 struct dw_eth_dev *priv = dev->priv;
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400353 u32 status, desc_num = priv->rx_currdescnum;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530354 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530355 int length = 0;
Marek Vasut96cec172014-09-15 01:05:23 +0200356 uint32_t desc_start = (uint32_t)desc_p;
357 uint32_t desc_end = desc_start +
358 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
359 uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
360 uint32_t data_end;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530361
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400362 /* Invalidate entire buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200363 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400364
365 status = desc_p->txrx_status;
366
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530367 /* Check if the owner is the CPU */
368 if (!(status & DESC_RXSTS_OWNBYDMA)) {
369
370 length = (status & DESC_RXSTS_FRMLENMSK) >> \
371 DESC_RXSTS_FRMLENSHFT;
372
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400373 /* Invalidate received data */
Marek Vasut96cec172014-09-15 01:05:23 +0200374 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
375 invalidate_dcache_range(data_start, data_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400376
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500377 net_process_received_packet(desc_p->dmamac_addr, length);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530378
379 /*
380 * Make the current descriptor valid again and go to
381 * the next one
382 */
383 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
384
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400385 /* Flush only status field - others weren't changed */
Marek Vasut96cec172014-09-15 01:05:23 +0200386 flush_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400387
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530388 /* Test the wrap-around condition. */
389 if (++desc_num >= CONFIG_RX_DESCR_NUM)
390 desc_num = 0;
391 }
392
393 priv->rx_currdescnum = desc_num;
394
395 return length;
396}
397
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400398static int dw_phy_init(struct eth_device *dev)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530399{
400 struct dw_eth_dev *priv = dev->priv;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400401 struct phy_device *phydev;
402 int mask = 0xffffffff;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530403
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400404#ifdef CONFIG_PHY_ADDR
405 mask = 1 << CONFIG_PHY_ADDR;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530406#endif
407
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400408 phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
409 if (!phydev)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530410 return -1;
411
Ian Campbell15e82e52014-04-28 20:14:05 +0100412 phy_connect_dev(phydev, dev);
413
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400414 phydev->supported &= PHY_GBIT_FEATURES;
415 phydev->advertising = phydev->supported;
416
417 priv->phydev = phydev;
418 phy_config(phydev);
419
420 return 1;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530421}
422
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400423int designware_initialize(ulong base_addr, u32 interface)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530424{
425 struct eth_device *dev;
426 struct dw_eth_dev *priv;
427
428 dev = (struct eth_device *) malloc(sizeof(struct eth_device));
429 if (!dev)
430 return -ENOMEM;
431
432 /*
433 * Since the priv structure contains the descriptors which need a strict
434 * buswidth alignment, memalign is used to allocate memory
435 */
Ian Campbell1c848a22014-05-08 22:26:32 +0100436 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
437 sizeof(struct dw_eth_dev));
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530438 if (!priv) {
439 free(dev);
440 return -ENOMEM;
441 }
442
443 memset(dev, 0, sizeof(struct eth_device));
444 memset(priv, 0, sizeof(struct dw_eth_dev));
445
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400446 sprintf(dev->name, "dwmac.%lx", base_addr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530447 dev->iobase = (int)base_addr;
448 dev->priv = priv;
449
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530450 priv->dev = dev;
451 priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
452 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
453 DW_DMA_BASE_OFFSET);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530454
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530455 dev->init = dw_eth_init;
456 dev->send = dw_eth_send;
457 dev->recv = dw_eth_recv;
458 dev->halt = dw_eth_halt;
459 dev->write_hwaddr = dw_write_hwaddr;
460
461 eth_register(dev);
462
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400463 priv->interface = interface;
464
465 dw_mdio_init(dev->name, priv->mac_regs_p);
466 priv->bus = miiphy_get_dev_by_name(dev->name);
467
468 return dw_phy_init(dev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530469}