blob: b7802a2fed70828c7f9f38c10013a1f15d5abf25 [file] [log] [blame]
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#include <common.h>
19
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010020/*
21 * The u-boot networking stack is a little weird. It seems like the
22 * networking core allocates receive buffers up front without any
23 * regard to the hardware that's supposed to actually receive those
24 * packets.
25 *
26 * The MACB receives packets into 128-byte receive buffers, so the
27 * buffers allocated by the core isn't very practical to use. We'll
28 * allocate our own, but we need one such buffer in case a packet
29 * wraps around the DMA ring so that we have to copy it.
30 *
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +020031 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010032 * configuration header. This way, the core allocates one RX buffer
33 * and one TX buffer, each of which can hold a ethernet packet of
34 * maximum size.
35 *
36 * For some reason, the networking core unconditionally specifies a
37 * 32-byte packet "alignment" (which really should be called
38 * "padding"). MACB shouldn't need that, but we'll refrain from any
39 * core modifications here...
40 */
41
42#include <net.h>
Ben Warren89973f82008-08-31 22:22:04 -070043#include <netdev.h>
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010044#include <malloc.h>
Semih Hazar0f751d62009-12-17 15:07:15 +020045#include <miiphy.h>
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010046
47#include <linux/mii.h>
48#include <asm/io.h>
49#include <asm/dma-mapping.h>
50#include <asm/arch/clk.h>
51
52#include "macb.h"
53
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +020054#define CONFIG_SYS_MACB_RX_BUFFER_SIZE 4096
55#define CONFIG_SYS_MACB_RX_RING_SIZE (CONFIG_SYS_MACB_RX_BUFFER_SIZE / 128)
56#define CONFIG_SYS_MACB_TX_RING_SIZE 16
57#define CONFIG_SYS_MACB_TX_TIMEOUT 1000
58#define CONFIG_SYS_MACB_AUTONEG_TIMEOUT 5000000
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010059
60struct macb_dma_desc {
61 u32 addr;
62 u32 ctrl;
63};
64
65#define RXADDR_USED 0x00000001
66#define RXADDR_WRAP 0x00000002
67
68#define RXBUF_FRMLEN_MASK 0x00000fff
69#define RXBUF_FRAME_START 0x00004000
70#define RXBUF_FRAME_END 0x00008000
71#define RXBUF_TYPEID_MATCH 0x00400000
72#define RXBUF_ADDR4_MATCH 0x00800000
73#define RXBUF_ADDR3_MATCH 0x01000000
74#define RXBUF_ADDR2_MATCH 0x02000000
75#define RXBUF_ADDR1_MATCH 0x04000000
76#define RXBUF_BROADCAST 0x80000000
77
78#define TXBUF_FRMLEN_MASK 0x000007ff
79#define TXBUF_FRAME_END 0x00008000
80#define TXBUF_NOCRC 0x00010000
81#define TXBUF_EXHAUSTED 0x08000000
82#define TXBUF_UNDERRUN 0x10000000
83#define TXBUF_MAXRETRY 0x20000000
84#define TXBUF_WRAP 0x40000000
85#define TXBUF_USED 0x80000000
86
87struct macb_device {
88 void *regs;
89
90 unsigned int rx_tail;
91 unsigned int tx_head;
92 unsigned int tx_tail;
93
94 void *rx_buffer;
95 void *tx_buffer;
96 struct macb_dma_desc *rx_ring;
97 struct macb_dma_desc *tx_ring;
98
99 unsigned long rx_buffer_dma;
100 unsigned long rx_ring_dma;
101 unsigned long tx_ring_dma;
102
103 const struct device *dev;
104 struct eth_device netdev;
105 unsigned short phy_addr;
Bo Shenb1a00062013-04-24 15:59:27 +0800106 struct mii_dev *bus;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100107};
108#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
109
Bo Shend256be22013-04-24 15:59:28 +0800110static int macb_is_gem(struct macb_device *macb)
111{
112 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
113}
114
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100115static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
116{
117 unsigned long netctl;
118 unsigned long netstat;
119 unsigned long frame;
120
121 netctl = macb_readl(macb, NCR);
122 netctl |= MACB_BIT(MPE);
123 macb_writel(macb, NCR, netctl);
124
125 frame = (MACB_BF(SOF, 1)
126 | MACB_BF(RW, 1)
127 | MACB_BF(PHYA, macb->phy_addr)
128 | MACB_BF(REGA, reg)
129 | MACB_BF(CODE, 2)
130 | MACB_BF(DATA, value));
131 macb_writel(macb, MAN, frame);
132
133 do {
134 netstat = macb_readl(macb, NSR);
135 } while (!(netstat & MACB_BIT(IDLE)));
136
137 netctl = macb_readl(macb, NCR);
138 netctl &= ~MACB_BIT(MPE);
139 macb_writel(macb, NCR, netctl);
140}
141
142static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
143{
144 unsigned long netctl;
145 unsigned long netstat;
146 unsigned long frame;
147
148 netctl = macb_readl(macb, NCR);
149 netctl |= MACB_BIT(MPE);
150 macb_writel(macb, NCR, netctl);
151
152 frame = (MACB_BF(SOF, 1)
153 | MACB_BF(RW, 2)
154 | MACB_BF(PHYA, macb->phy_addr)
155 | MACB_BF(REGA, reg)
156 | MACB_BF(CODE, 2));
157 macb_writel(macb, MAN, frame);
158
159 do {
160 netstat = macb_readl(macb, NSR);
161 } while (!(netstat & MACB_BIT(IDLE)));
162
163 frame = macb_readl(macb, MAN);
164
165 netctl = macb_readl(macb, NCR);
166 netctl &= ~MACB_BIT(MPE);
167 macb_writel(macb, NCR, netctl);
168
169 return MACB_BFEXT(DATA, frame);
170}
171
Joe Hershberger1b8c18b2013-06-24 19:06:38 -0500172void __weak arch_get_mdio_control(const char *name)
Shiraz Hashim416ce622012-12-13 17:22:52 +0530173{
174 return;
175}
176
Bo Shenb1a00062013-04-24 15:59:27 +0800177#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar0f751d62009-12-17 15:07:15 +0200178
Mike Frysinger5700bb62010-07-27 18:35:08 -0400179int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
Semih Hazar0f751d62009-12-17 15:07:15 +0200180{
181 struct eth_device *dev = eth_get_dev_by_name(devname);
182 struct macb_device *macb = to_macb(dev);
183
184 if ( macb->phy_addr != phy_adr )
185 return -1;
186
Shiraz Hashim416ce622012-12-13 17:22:52 +0530187 arch_get_mdio_control(devname);
Semih Hazar0f751d62009-12-17 15:07:15 +0200188 *value = macb_mdio_read(macb, reg);
189
190 return 0;
191}
192
Mike Frysinger5700bb62010-07-27 18:35:08 -0400193int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
Semih Hazar0f751d62009-12-17 15:07:15 +0200194{
195 struct eth_device *dev = eth_get_dev_by_name(devname);
196 struct macb_device *macb = to_macb(dev);
197
198 if ( macb->phy_addr != phy_adr )
199 return -1;
200
Shiraz Hashim416ce622012-12-13 17:22:52 +0530201 arch_get_mdio_control(devname);
Semih Hazar0f751d62009-12-17 15:07:15 +0200202 macb_mdio_write(macb, reg, value);
203
204 return 0;
205}
206#endif
207
208
Jon Loeliger07d38a12007-07-09 17:30:01 -0500209#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100210
Joe Hershberger9d9a89b2012-05-21 14:45:31 +0000211static int macb_send(struct eth_device *netdev, void *packet, int length)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100212{
213 struct macb_device *macb = to_macb(netdev);
214 unsigned long paddr, ctrl;
215 unsigned int tx_head = macb->tx_head;
216 int i;
217
218 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
219
220 ctrl = length & TXBUF_FRMLEN_MASK;
221 ctrl |= TXBUF_FRAME_END;
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200222 if (tx_head == (CONFIG_SYS_MACB_TX_RING_SIZE - 1)) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100223 ctrl |= TXBUF_WRAP;
224 macb->tx_head = 0;
225 } else
226 macb->tx_head++;
227
228 macb->tx_ring[tx_head].ctrl = ctrl;
229 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200230 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100231 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
232
233 /*
234 * I guess this is necessary because the networking core may
235 * re-use the transmit buffer as soon as we return...
236 */
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200237 for (i = 0; i <= CONFIG_SYS_MACB_TX_TIMEOUT; i++) {
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200238 barrier();
239 ctrl = macb->tx_ring[tx_head].ctrl;
240 if (ctrl & TXBUF_USED)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100241 break;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100242 udelay(1);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100243 }
244
245 dma_unmap_single(packet, length, paddr);
246
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200247 if (i <= CONFIG_SYS_MACB_TX_TIMEOUT) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100248 if (ctrl & TXBUF_UNDERRUN)
249 printf("%s: TX underrun\n", netdev->name);
250 if (ctrl & TXBUF_EXHAUSTED)
251 printf("%s: TX buffers exhausted in mid frame\n",
252 netdev->name);
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200253 } else {
254 printf("%s: TX timeout\n", netdev->name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100255 }
256
257 /* No one cares anyway */
258 return 0;
259}
260
261static void reclaim_rx_buffers(struct macb_device *macb,
262 unsigned int new_tail)
263{
264 unsigned int i;
265
266 i = macb->rx_tail;
267 while (i > new_tail) {
268 macb->rx_ring[i].addr &= ~RXADDR_USED;
269 i++;
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200270 if (i > CONFIG_SYS_MACB_RX_RING_SIZE)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100271 i = 0;
272 }
273
274 while (i < new_tail) {
275 macb->rx_ring[i].addr &= ~RXADDR_USED;
276 i++;
277 }
278
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200279 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100280 macb->rx_tail = new_tail;
281}
282
283static int macb_recv(struct eth_device *netdev)
284{
285 struct macb_device *macb = to_macb(netdev);
286 unsigned int rx_tail = macb->rx_tail;
287 void *buffer;
288 int length;
289 int wrapped = 0;
290 u32 status;
291
292 for (;;) {
293 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED))
294 return -1;
295
296 status = macb->rx_ring[rx_tail].ctrl;
297 if (status & RXBUF_FRAME_START) {
298 if (rx_tail != macb->rx_tail)
299 reclaim_rx_buffers(macb, rx_tail);
300 wrapped = 0;
301 }
302
303 if (status & RXBUF_FRAME_END) {
304 buffer = macb->rx_buffer + 128 * macb->rx_tail;
305 length = status & RXBUF_FRMLEN_MASK;
306 if (wrapped) {
307 unsigned int headlen, taillen;
308
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200309 headlen = 128 * (CONFIG_SYS_MACB_RX_RING_SIZE
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100310 - macb->rx_tail);
311 taillen = length - headlen;
312 memcpy((void *)NetRxPackets[0],
313 buffer, headlen);
314 memcpy((void *)NetRxPackets[0] + headlen,
315 macb->rx_buffer, taillen);
316 buffer = (void *)NetRxPackets[0];
317 }
318
319 NetReceive(buffer, length);
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200320 if (++rx_tail >= CONFIG_SYS_MACB_RX_RING_SIZE)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100321 rx_tail = 0;
322 reclaim_rx_buffers(macb, rx_tail);
323 } else {
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200324 if (++rx_tail >= CONFIG_SYS_MACB_RX_RING_SIZE) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100325 wrapped = 1;
326 rx_tail = 0;
327 }
328 }
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200329 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100330 }
331
332 return 0;
333}
334
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200335static void macb_phy_reset(struct macb_device *macb)
336{
337 struct eth_device *netdev = &macb->netdev;
338 int i;
339 u16 status, adv;
340
341 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
342 macb_mdio_write(macb, MII_ADVERTISE, adv);
343 printf("%s: Starting autonegotiation...\n", netdev->name);
344 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
345 | BMCR_ANRESTART));
346
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200347 for (i = 0; i < CONFIG_SYS_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200348 status = macb_mdio_read(macb, MII_BMSR);
349 if (status & BMSR_ANEGCOMPLETE)
350 break;
351 udelay(100);
352 }
353
354 if (status & BMSR_ANEGCOMPLETE)
355 printf("%s: Autonegotiation complete\n", netdev->name);
356 else
357 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
358 netdev->name, status);
359}
360
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100361#ifdef CONFIG_MACB_SEARCH_PHY
362static int macb_phy_find(struct macb_device *macb)
363{
364 int i;
365 u16 phy_id;
366
367 /* Search for PHY... */
368 for (i = 0; i < 32; i++) {
369 macb->phy_addr = i;
370 phy_id = macb_mdio_read(macb, MII_PHYSID1);
371 if (phy_id != 0xffff) {
372 printf("%s: PHY present at %d\n", macb->netdev.name, i);
373 return 1;
374 }
375 }
376
377 /* PHY isn't up to snuff */
Andreas Bießmann6ed0e942012-08-16 01:50:04 +0000378 printf("%s: PHY not found\n", macb->netdev.name);
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100379
380 return 0;
381}
382#endif /* CONFIG_MACB_SEARCH_PHY */
383
384
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100385static int macb_phy_init(struct macb_device *macb)
386{
387 struct eth_device *netdev = &macb->netdev;
Bo Shenb1a00062013-04-24 15:59:27 +0800388#ifdef CONFIG_PHYLIB
389 struct phy_device *phydev;
390#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100391 u32 ncfgr;
392 u16 phy_id, status, adv, lpa;
393 int media, speed, duplex;
394 int i;
395
Shiraz Hashim416ce622012-12-13 17:22:52 +0530396 arch_get_mdio_control(netdev->name);
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100397#ifdef CONFIG_MACB_SEARCH_PHY
398 /* Auto-detect phy_addr */
399 if (!macb_phy_find(macb)) {
400 return 0;
401 }
402#endif /* CONFIG_MACB_SEARCH_PHY */
403
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100404 /* Check if the PHY is up to snuff... */
405 phy_id = macb_mdio_read(macb, MII_PHYSID1);
406 if (phy_id == 0xffff) {
407 printf("%s: No PHY present\n", netdev->name);
408 return 0;
409 }
410
Bo Shenb1a00062013-04-24 15:59:27 +0800411#ifdef CONFIG_PHYLIB
412 phydev->bus = macb->bus;
413 phydev->dev = netdev;
414 phydev->addr = macb->phy_addr;
415 phy_config(phydev);
416#endif
417
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200418 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100419 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200420 /* Try to re-negotiate if we don't have link already. */
421 macb_phy_reset(macb);
422
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200423 for (i = 0; i < CONFIG_SYS_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100424 status = macb_mdio_read(macb, MII_BMSR);
425 if (status & BMSR_LSTATUS)
426 break;
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200427 udelay(100);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100428 }
429 }
430
431 if (!(status & BMSR_LSTATUS)) {
432 printf("%s: link down (status: 0x%04x)\n",
433 netdev->name, status);
434 return 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100435 }
Bo Shend256be22013-04-24 15:59:28 +0800436
437 /* First check for GMAC */
438 if (macb_is_gem(macb)) {
439 lpa = macb_mdio_read(macb, MII_STAT1000);
440 if (lpa & (1 << 11)) {
441 speed = 1000;
442 duplex = 1;
443 } else {
444 if (lpa & (1 << 10)) {
445 speed = 1000;
446 duplex = 1;
447 } else {
448 speed = 0;
449 }
450 }
451
452 if (speed == 1000) {
453 printf("%s: link up, %dMbps %s-duplex (lpa: 0x%04x)\n",
454 netdev->name,
455 speed,
456 duplex ? "full" : "half",
457 lpa);
458
459 ncfgr = macb_readl(macb, NCFGR);
460 ncfgr &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD));
461 if (speed)
462 ncfgr |= GEM_BIT(GBE);
463 if (duplex)
464 ncfgr |= MACB_BIT(FD);
465 macb_writel(macb, NCFGR, ncfgr);
466
467 return 1;
468 }
469 }
470
471 /* fall back for EMAC checking */
472 adv = macb_mdio_read(macb, MII_ADVERTISE);
473 lpa = macb_mdio_read(macb, MII_LPA);
474 media = mii_nway_result(lpa & adv);
475 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
476 ? 1 : 0);
477 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
478 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
479 netdev->name,
480 speed ? "100" : "10",
481 duplex ? "full" : "half",
482 lpa);
483
484 ncfgr = macb_readl(macb, NCFGR);
485 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
486 if (speed)
487 ncfgr |= MACB_BIT(SPD);
488 if (duplex)
489 ncfgr |= MACB_BIT(FD);
490 macb_writel(macb, NCFGR, ncfgr);
491
492 return 1;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100493}
494
495static int macb_init(struct eth_device *netdev, bd_t *bd)
496{
497 struct macb_device *macb = to_macb(netdev);
498 unsigned long paddr;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100499 int i;
500
501 /*
502 * macb_halt should have been called at some point before now,
503 * so we'll assume the controller is idle.
504 */
505
506 /* initialize DMA descriptors */
507 paddr = macb->rx_buffer_dma;
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200508 for (i = 0; i < CONFIG_SYS_MACB_RX_RING_SIZE; i++) {
509 if (i == (CONFIG_SYS_MACB_RX_RING_SIZE - 1))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100510 paddr |= RXADDR_WRAP;
511 macb->rx_ring[i].addr = paddr;
512 macb->rx_ring[i].ctrl = 0;
513 paddr += 128;
514 }
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200515 for (i = 0; i < CONFIG_SYS_MACB_TX_RING_SIZE; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100516 macb->tx_ring[i].addr = 0;
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200517 if (i == (CONFIG_SYS_MACB_TX_RING_SIZE - 1))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100518 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
519 else
520 macb->tx_ring[i].ctrl = TXBUF_USED;
521 }
522 macb->rx_tail = macb->tx_head = macb->tx_tail = 0;
523
524 macb_writel(macb, RBQP, macb->rx_ring_dma);
525 macb_writel(macb, TBQP, macb->tx_ring_dma);
526
Bo Shend256be22013-04-24 15:59:28 +0800527 if (macb_is_gem(macb)) {
528#ifdef CONFIG_RGMII
529 gem_writel(macb, UR, GEM_BIT(RGMII));
530#else
531 gem_writel(macb, UR, 0);
532#endif
533 } else {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100534 /* choose RMII or MII mode. This depends on the board */
535#ifdef CONFIG_RMII
Bo Shend8f64b42013-04-24 15:59:26 +0800536#ifdef CONFIG_AT91FAMILY
Stelian Pop7263ef12008-01-03 21:15:56 +0000537 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
538#else
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100539 macb_writel(macb, USRIO, 0);
Stelian Pop7263ef12008-01-03 21:15:56 +0000540#endif
541#else
Bo Shend8f64b42013-04-24 15:59:26 +0800542#ifdef CONFIG_AT91FAMILY
Stelian Pop7263ef12008-01-03 21:15:56 +0000543 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100544#else
545 macb_writel(macb, USRIO, MACB_BIT(MII));
546#endif
Stelian Pop7263ef12008-01-03 21:15:56 +0000547#endif /* CONFIG_RMII */
Bo Shend256be22013-04-24 15:59:28 +0800548 }
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100549
550 if (!macb_phy_init(macb))
Ben Warren422b1a02008-01-09 18:15:53 -0500551 return -1;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100552
553 /* Enable TX and RX */
554 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
555
Ben Warren422b1a02008-01-09 18:15:53 -0500556 return 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100557}
558
559static void macb_halt(struct eth_device *netdev)
560{
561 struct macb_device *macb = to_macb(netdev);
562 u32 ncr, tsr;
563
564 /* Halt the controller and wait for any ongoing transmission to end. */
565 ncr = macb_readl(macb, NCR);
566 ncr |= MACB_BIT(THALT);
567 macb_writel(macb, NCR, ncr);
568
569 do {
570 tsr = macb_readl(macb, TSR);
571 } while (tsr & MACB_BIT(TGO));
572
573 /* Disable TX and RX, and clear statistics */
574 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
575}
576
Ben Warren6bb46792010-06-01 11:55:42 -0700577static int macb_write_hwaddr(struct eth_device *dev)
578{
579 struct macb_device *macb = to_macb(dev);
580 u32 hwaddr_bottom;
581 u16 hwaddr_top;
582
583 /* set hardware address */
andreas.devel@googlemail.com6c169c12011-06-09 02:08:46 +0000584 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 |
585 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24;
Ben Warren6bb46792010-06-01 11:55:42 -0700586 macb_writel(macb, SA1B, hwaddr_bottom);
andreas.devel@googlemail.com6c169c12011-06-09 02:08:46 +0000587 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8;
Ben Warren6bb46792010-06-01 11:55:42 -0700588 macb_writel(macb, SA1T, hwaddr_top);
589 return 0;
590}
591
Bo Shend256be22013-04-24 15:59:28 +0800592static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
593{
594 u32 config;
595 unsigned long macb_hz = get_macb_pclk_rate(id);
596
597 if (macb_hz < 20000000)
598 config = MACB_BF(CLK, MACB_CLK_DIV8);
599 else if (macb_hz < 40000000)
600 config = MACB_BF(CLK, MACB_CLK_DIV16);
601 else if (macb_hz < 80000000)
602 config = MACB_BF(CLK, MACB_CLK_DIV32);
603 else
604 config = MACB_BF(CLK, MACB_CLK_DIV64);
605
606 return config;
607}
608
609static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
610{
611 u32 config;
612 unsigned long macb_hz = get_macb_pclk_rate(id);
613
614 if (macb_hz < 20000000)
615 config = GEM_BF(CLK, GEM_CLK_DIV8);
616 else if (macb_hz < 40000000)
617 config = GEM_BF(CLK, GEM_CLK_DIV16);
618 else if (macb_hz < 80000000)
619 config = GEM_BF(CLK, GEM_CLK_DIV32);
620 else if (macb_hz < 120000000)
621 config = GEM_BF(CLK, GEM_CLK_DIV48);
622 else if (macb_hz < 160000000)
623 config = GEM_BF(CLK, GEM_CLK_DIV64);
624 else
625 config = GEM_BF(CLK, GEM_CLK_DIV96);
626
627 return config;
628}
629
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100630int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
631{
632 struct macb_device *macb;
633 struct eth_device *netdev;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100634 u32 ncfgr;
635
636 macb = malloc(sizeof(struct macb_device));
637 if (!macb) {
638 printf("Error: Failed to allocate memory for MACB%d\n", id);
639 return -1;
640 }
641 memset(macb, 0, sizeof(struct macb_device));
642
643 netdev = &macb->netdev;
644
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200645 macb->rx_buffer = dma_alloc_coherent(CONFIG_SYS_MACB_RX_BUFFER_SIZE,
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100646 &macb->rx_buffer_dma);
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200647 macb->rx_ring = dma_alloc_coherent(CONFIG_SYS_MACB_RX_RING_SIZE
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100648 * sizeof(struct macb_dma_desc),
649 &macb->rx_ring_dma);
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +0200650 macb->tx_ring = dma_alloc_coherent(CONFIG_SYS_MACB_TX_RING_SIZE
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100651 * sizeof(struct macb_dma_desc),
652 &macb->tx_ring_dma);
653
654 macb->regs = regs;
655 macb->phy_addr = phy_addr;
656
Bo Shend256be22013-04-24 15:59:28 +0800657 if (macb_is_gem(macb))
658 sprintf(netdev->name, "gmac%d", id);
659 else
660 sprintf(netdev->name, "macb%d", id);
661
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100662 netdev->init = macb_init;
663 netdev->halt = macb_halt;
664 netdev->send = macb_send;
665 netdev->recv = macb_recv;
Ben Warren6bb46792010-06-01 11:55:42 -0700666 netdev->write_hwaddr = macb_write_hwaddr;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100667
668 /*
669 * Do some basic initialization so that we at least can talk
670 * to the PHY
671 */
Bo Shend256be22013-04-24 15:59:28 +0800672 if (macb_is_gem(macb)) {
673 ncfgr = gem_mdc_clk_div(id, macb);
674 ncfgr |= GEM_BF(DBW, 1);
675 } else {
676 ncfgr = macb_mdc_clk_div(id, macb);
677 }
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100678
679 macb_writel(macb, NCFGR, ncfgr);
680
681 eth_register(netdev);
682
Bo Shenb1a00062013-04-24 15:59:27 +0800683#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar0f751d62009-12-17 15:07:15 +0200684 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
Bo Shenb1a00062013-04-24 15:59:27 +0800685 macb->bus = miiphy_get_dev_by_name(netdev->name);
Semih Hazar0f751d62009-12-17 15:07:15 +0200686#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100687 return 0;
688}
689
Jon Loeliger07d38a12007-07-09 17:30:01 -0500690#endif