blob: 62941bb1753067d7bf8d90fa694c3fdd6af940a3 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
Stephen Warrenba4dfef2016-10-21 14:46:47 -06005 * Portions based on U-Boot's rtl8169.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -060029#include <common.h>
30#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070031#include <cpu_func.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060032#include <dm.h>
33#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060034#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070035#include <malloc.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060036#include <memalign.h>
37#include <miiphy.h>
38#include <net.h>
39#include <netdev.h>
40#include <phy.h>
41#include <reset.h>
42#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060043#include <asm/cache.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060044#include <asm/gpio.h>
45#include <asm/io.h>
Ye Li6a895d02020-05-03 22:41:15 +080046#include <eth_phy.h>
Fugang Duan0e9d2392020-05-03 22:41:18 +080047#ifdef CONFIG_ARCH_IMX8M
48#include <asm/arch/clock.h>
49#include <asm/mach-imx/sys_proto.h>
50#endif
Simon Glasscd93d622020-05-10 11:40:13 -060051#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060052#include <linux/delay.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060053
54/* Core registers */
55
56#define EQOS_MAC_REGS_BASE 0x000
57struct eqos_mac_regs {
58 uint32_t configuration; /* 0x000 */
59 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
60 uint32_t q0_tx_flow_ctrl; /* 0x070 */
61 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
62 uint32_t rx_flow_ctrl; /* 0x090 */
63 uint32_t unused_094; /* 0x094 */
64 uint32_t txq_prty_map0; /* 0x098 */
65 uint32_t unused_09c; /* 0x09c */
66 uint32_t rxq_ctrl0; /* 0x0a0 */
67 uint32_t unused_0a4; /* 0x0a4 */
68 uint32_t rxq_ctrl2; /* 0x0a8 */
69 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
70 uint32_t us_tic_counter; /* 0x0dc */
71 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
72 uint32_t hw_feature0; /* 0x11c */
73 uint32_t hw_feature1; /* 0x120 */
74 uint32_t hw_feature2; /* 0x124 */
75 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
76 uint32_t mdio_address; /* 0x200 */
77 uint32_t mdio_data; /* 0x204 */
78 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
79 uint32_t address0_high; /* 0x300 */
80 uint32_t address0_low; /* 0x304 */
81};
82
83#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
84#define EQOS_MAC_CONFIGURATION_CST BIT(21)
85#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
86#define EQOS_MAC_CONFIGURATION_WD BIT(19)
87#define EQOS_MAC_CONFIGURATION_JD BIT(17)
88#define EQOS_MAC_CONFIGURATION_JE BIT(16)
89#define EQOS_MAC_CONFIGURATION_PS BIT(15)
90#define EQOS_MAC_CONFIGURATION_FES BIT(14)
91#define EQOS_MAC_CONFIGURATION_DM BIT(13)
Fugang Duan3a97da12020-05-03 22:41:17 +080092#define EQOS_MAC_CONFIGURATION_LM BIT(12)
Stephen Warrenba4dfef2016-10-21 14:46:47 -060093#define EQOS_MAC_CONFIGURATION_TE BIT(1)
94#define EQOS_MAC_CONFIGURATION_RE BIT(0)
95
96#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
97#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
98#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
99
100#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
101
102#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
103#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
104
105#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
106#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
107#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
108#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200109#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600110
111#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
112#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
113
Fugang Duan3a97da12020-05-03 22:41:17 +0800114#define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
115#define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
116#define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
117#define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
118
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600119#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
120#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
121#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
122#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
123
Fugang Duan3a97da12020-05-03 22:41:17 +0800124#define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
125#define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
126
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600127#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
128#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
129#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
130#define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200131#define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600132#define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
133#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
134#define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
135#define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
136#define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
137#define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
138
139#define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
140
141#define EQOS_MTL_REGS_BASE 0xd00
142struct eqos_mtl_regs {
143 uint32_t txq0_operation_mode; /* 0xd00 */
144 uint32_t unused_d04; /* 0xd04 */
145 uint32_t txq0_debug; /* 0xd08 */
146 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
147 uint32_t txq0_quantum_weight; /* 0xd18 */
148 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
149 uint32_t rxq0_operation_mode; /* 0xd30 */
150 uint32_t unused_d34; /* 0xd34 */
151 uint32_t rxq0_debug; /* 0xd38 */
152};
153
154#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
155#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
156#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
157#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
158#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
159#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
160#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
161
162#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
163#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
164#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
165
166#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
167#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
168#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
169#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
170#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
171#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
172#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
173#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
Fugang Duan3a97da12020-05-03 22:41:17 +0800174#define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4)
175#define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600176
177#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
178#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
179#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
180#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
181
182#define EQOS_DMA_REGS_BASE 0x1000
183struct eqos_dma_regs {
184 uint32_t mode; /* 0x1000 */
185 uint32_t sysbus_mode; /* 0x1004 */
186 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
187 uint32_t ch0_control; /* 0x1100 */
188 uint32_t ch0_tx_control; /* 0x1104 */
189 uint32_t ch0_rx_control; /* 0x1108 */
190 uint32_t unused_110c; /* 0x110c */
191 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
192 uint32_t ch0_txdesc_list_address; /* 0x1114 */
193 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
194 uint32_t ch0_rxdesc_list_address; /* 0x111c */
195 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
196 uint32_t unused_1124; /* 0x1124 */
197 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
198 uint32_t ch0_txdesc_ring_length; /* 0x112c */
199 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
200};
201
202#define EQOS_DMA_MODE_SWR BIT(0)
203
204#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
205#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
206#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
207#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
208#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
209#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
210
211#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
212
213#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
214#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
215#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
216#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
217
218#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
219#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
220#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
221#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
222#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
223
224/* These registers are Tegra186-specific */
225#define EQOS_TEGRA186_REGS_BASE 0x8800
226struct eqos_tegra186_regs {
227 uint32_t sdmemcomppadctrl; /* 0x8800 */
228 uint32_t auto_cal_config; /* 0x8804 */
229 uint32_t unused_8808; /* 0x8808 */
230 uint32_t auto_cal_status; /* 0x880c */
231};
232
233#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
234
235#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
236#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
237
238#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
239
240/* Descriptors */
241
242#define EQOS_DESCRIPTOR_WORDS 4
243#define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4)
244/* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
245#define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN
246#define EQOS_DESCRIPTORS_TX 4
247#define EQOS_DESCRIPTORS_RX 4
248#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
249#define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \
250 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
251#define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
252#define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
253#define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
254
255/*
256 * Warn if the cache-line size is larger than the descriptor size. In such
257 * cases the driver will likely fail because the CPU needs to flush the cache
258 * when requeuing RX buffers, therefore descriptors written by the hardware
259 * may be discarded. Architectures with full IO coherence, such as x86, do not
260 * experience this issue, and hence are excluded from this condition.
261 *
262 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
263 * the driver to allocate descriptors from a pool of non-cached memory.
264 */
265#if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
266#if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
Trevor Woerner10015022019-05-03 09:41:00 -0400267 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) && !defined(CONFIG_X86)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600268#warning Cache line size is larger than descriptor size
269#endif
270#endif
271
272struct eqos_desc {
273 u32 des0;
274 u32 des1;
275 u32 des2;
276 u32 des3;
277};
278
279#define EQOS_DESC3_OWN BIT(31)
280#define EQOS_DESC3_FD BIT(29)
281#define EQOS_DESC3_LD BIT(28)
282#define EQOS_DESC3_BUF1V BIT(24)
283
284struct eqos_config {
285 bool reg_access_always_ok;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200286 int mdio_wait;
287 int swr_wait;
288 int config_mac;
289 int config_mac_mdio;
290 phy_interface_t (*interface)(struct udevice *dev);
291 struct eqos_ops *ops;
292};
293
294struct eqos_ops {
295 void (*eqos_inval_desc)(void *desc);
296 void (*eqos_flush_desc)(void *desc);
297 void (*eqos_inval_buffer)(void *buf, size_t size);
298 void (*eqos_flush_buffer)(void *buf, size_t size);
299 int (*eqos_probe_resources)(struct udevice *dev);
300 int (*eqos_remove_resources)(struct udevice *dev);
301 int (*eqos_stop_resets)(struct udevice *dev);
302 int (*eqos_start_resets)(struct udevice *dev);
303 void (*eqos_stop_clks)(struct udevice *dev);
304 int (*eqos_start_clks)(struct udevice *dev);
305 int (*eqos_calibrate_pads)(struct udevice *dev);
306 int (*eqos_disable_calibration)(struct udevice *dev);
307 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
308 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600309};
310
311struct eqos_priv {
312 struct udevice *dev;
313 const struct eqos_config *config;
314 fdt_addr_t regs;
315 struct eqos_mac_regs *mac_regs;
316 struct eqos_mtl_regs *mtl_regs;
317 struct eqos_dma_regs *dma_regs;
318 struct eqos_tegra186_regs *tegra186_regs;
319 struct reset_ctl reset_ctl;
320 struct gpio_desc phy_reset_gpio;
321 struct clk clk_master_bus;
322 struct clk clk_rx;
323 struct clk clk_ptp_ref;
324 struct clk clk_tx;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200325 struct clk clk_ck;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600326 struct clk clk_slave_bus;
327 struct mii_dev *mii;
328 struct phy_device *phy;
Patrick Delaunay4f60a512020-03-18 10:50:16 +0100329 int phyaddr;
330 u32 max_speed;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600331 void *descs;
332 struct eqos_desc *tx_descs;
333 struct eqos_desc *rx_descs;
334 int tx_desc_idx, rx_desc_idx;
335 void *tx_dma_buf;
336 void *rx_dma_buf;
337 void *rx_pkt;
338 bool started;
339 bool reg_access_ok;
340};
341
342/*
343 * TX and RX descriptors are 16 bytes. This causes problems with the cache
344 * maintenance on CPUs where the cache-line size exceeds the size of these
345 * descriptors. What will happen is that when the driver receives a packet
346 * it will be immediately requeued for the hardware to reuse. The CPU will
347 * therefore need to flush the cache-line containing the descriptor, which
348 * will cause all other descriptors in the same cache-line to be flushed
349 * along with it. If one of those descriptors had been written to by the
350 * device those changes (and the associated packet) will be lost.
351 *
352 * To work around this, we make use of non-cached memory if available. If
353 * descriptors are mapped uncached there's no need to manually flush them
354 * or invalidate them.
355 *
356 * Note that this only applies to descriptors. The packet data buffers do
357 * not have the same constraints since they are 1536 bytes large, so they
358 * are unlikely to share cache-lines.
359 */
360static void *eqos_alloc_descs(unsigned int num)
361{
362#ifdef CONFIG_SYS_NONCACHED_MEMORY
363 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
364 EQOS_DESCRIPTOR_ALIGN);
365#else
366 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
367#endif
368}
369
370static void eqos_free_descs(void *descs)
371{
372#ifdef CONFIG_SYS_NONCACHED_MEMORY
373 /* FIXME: noncached_alloc() has no opposite */
374#else
375 free(descs);
376#endif
377}
378
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200379static void eqos_inval_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600380{
381#ifndef CONFIG_SYS_NONCACHED_MEMORY
382 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
383 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
384 ARCH_DMA_MINALIGN);
385
386 invalidate_dcache_range(start, end);
387#endif
388}
389
Fugang Duan3a97da12020-05-03 22:41:17 +0800390static void eqos_inval_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200391{
392#ifndef CONFIG_SYS_NONCACHED_MEMORY
393 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
394 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
395 ARCH_DMA_MINALIGN);
396
397 invalidate_dcache_range(start, end);
398#endif
399}
400
401static void eqos_flush_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600402{
403#ifndef CONFIG_SYS_NONCACHED_MEMORY
404 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
405#endif
406}
407
Fugang Duan3a97da12020-05-03 22:41:17 +0800408static void eqos_flush_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200409{
410#ifndef CONFIG_SYS_NONCACHED_MEMORY
411 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
412 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
413 ARCH_DMA_MINALIGN);
414
415 flush_dcache_range(start, end);
416#endif
417}
418
419static void eqos_inval_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600420{
421 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
422 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
423
424 invalidate_dcache_range(start, end);
425}
426
Fugang Duan3a97da12020-05-03 22:41:17 +0800427static void eqos_inval_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200428{
429 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
430 unsigned long end = roundup((unsigned long)buf + size,
431 ARCH_DMA_MINALIGN);
432
433 invalidate_dcache_range(start, end);
434}
435
436static void eqos_flush_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600437{
438 flush_cache((unsigned long)buf, size);
439}
440
Fugang Duan3a97da12020-05-03 22:41:17 +0800441static void eqos_flush_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200442{
443 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
444 unsigned long end = roundup((unsigned long)buf + size,
445 ARCH_DMA_MINALIGN);
446
447 flush_dcache_range(start, end);
448}
449
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600450static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
451{
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100452 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
453 EQOS_MAC_MDIO_ADDRESS_GB, false,
454 1000000, true);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600455}
456
457static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
458 int mdio_reg)
459{
460 struct eqos_priv *eqos = bus->priv;
461 u32 val;
462 int ret;
463
464 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
465 mdio_reg);
466
467 ret = eqos_mdio_wait_idle(eqos);
468 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900469 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600470 return ret;
471 }
472
473 val = readl(&eqos->mac_regs->mdio_address);
474 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
475 EQOS_MAC_MDIO_ADDRESS_C45E;
476 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
477 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200478 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600479 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
480 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
481 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
482 EQOS_MAC_MDIO_ADDRESS_GB;
483 writel(val, &eqos->mac_regs->mdio_address);
484
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200485 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600486
487 ret = eqos_mdio_wait_idle(eqos);
488 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900489 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600490 return ret;
491 }
492
493 val = readl(&eqos->mac_regs->mdio_data);
494 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
495
496 debug("%s: val=%x\n", __func__, val);
497
498 return val;
499}
500
501static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
502 int mdio_reg, u16 mdio_val)
503{
504 struct eqos_priv *eqos = bus->priv;
505 u32 val;
506 int ret;
507
508 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
509 mdio_addr, mdio_reg, mdio_val);
510
511 ret = eqos_mdio_wait_idle(eqos);
512 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900513 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600514 return ret;
515 }
516
517 writel(mdio_val, &eqos->mac_regs->mdio_data);
518
519 val = readl(&eqos->mac_regs->mdio_address);
520 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
521 EQOS_MAC_MDIO_ADDRESS_C45E;
522 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
523 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200524 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600525 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
526 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
527 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
528 EQOS_MAC_MDIO_ADDRESS_GB;
529 writel(val, &eqos->mac_regs->mdio_address);
530
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200531 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600532
533 ret = eqos_mdio_wait_idle(eqos);
534 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900535 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600536 return ret;
537 }
538
539 return 0;
540}
541
542static int eqos_start_clks_tegra186(struct udevice *dev)
543{
Fugang Duan3a97da12020-05-03 22:41:17 +0800544#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600545 struct eqos_priv *eqos = dev_get_priv(dev);
546 int ret;
547
548 debug("%s(dev=%p):\n", __func__, dev);
549
550 ret = clk_enable(&eqos->clk_slave_bus);
551 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900552 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600553 goto err;
554 }
555
556 ret = clk_enable(&eqos->clk_master_bus);
557 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900558 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600559 goto err_disable_clk_slave_bus;
560 }
561
562 ret = clk_enable(&eqos->clk_rx);
563 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900564 pr_err("clk_enable(clk_rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600565 goto err_disable_clk_master_bus;
566 }
567
568 ret = clk_enable(&eqos->clk_ptp_ref);
569 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900570 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600571 goto err_disable_clk_rx;
572 }
573
574 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
575 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900576 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600577 goto err_disable_clk_ptp_ref;
578 }
579
580 ret = clk_enable(&eqos->clk_tx);
581 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900582 pr_err("clk_enable(clk_tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600583 goto err_disable_clk_ptp_ref;
584 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800585#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600586
587 debug("%s: OK\n", __func__);
588 return 0;
589
Fugang Duan3a97da12020-05-03 22:41:17 +0800590#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600591err_disable_clk_ptp_ref:
592 clk_disable(&eqos->clk_ptp_ref);
593err_disable_clk_rx:
594 clk_disable(&eqos->clk_rx);
595err_disable_clk_master_bus:
596 clk_disable(&eqos->clk_master_bus);
597err_disable_clk_slave_bus:
598 clk_disable(&eqos->clk_slave_bus);
599err:
600 debug("%s: FAILED: %d\n", __func__, ret);
601 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800602#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600603}
604
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200605static int eqos_start_clks_stm32(struct udevice *dev)
606{
Fugang Duan3a97da12020-05-03 22:41:17 +0800607#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200608 struct eqos_priv *eqos = dev_get_priv(dev);
609 int ret;
610
611 debug("%s(dev=%p):\n", __func__, dev);
612
613 ret = clk_enable(&eqos->clk_master_bus);
614 if (ret < 0) {
615 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
616 goto err;
617 }
618
619 ret = clk_enable(&eqos->clk_rx);
620 if (ret < 0) {
621 pr_err("clk_enable(clk_rx) failed: %d", ret);
622 goto err_disable_clk_master_bus;
623 }
624
625 ret = clk_enable(&eqos->clk_tx);
626 if (ret < 0) {
627 pr_err("clk_enable(clk_tx) failed: %d", ret);
628 goto err_disable_clk_rx;
629 }
630
631 if (clk_valid(&eqos->clk_ck)) {
632 ret = clk_enable(&eqos->clk_ck);
633 if (ret < 0) {
634 pr_err("clk_enable(clk_ck) failed: %d", ret);
635 goto err_disable_clk_tx;
636 }
637 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800638#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200639
640 debug("%s: OK\n", __func__);
641 return 0;
642
Fugang Duan3a97da12020-05-03 22:41:17 +0800643#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200644err_disable_clk_tx:
645 clk_disable(&eqos->clk_tx);
646err_disable_clk_rx:
647 clk_disable(&eqos->clk_rx);
648err_disable_clk_master_bus:
649 clk_disable(&eqos->clk_master_bus);
650err:
651 debug("%s: FAILED: %d\n", __func__, ret);
652 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800653#endif
654}
655
656static int eqos_start_clks_imx(struct udevice *dev)
657{
658 return 0;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200659}
660
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200661static void eqos_stop_clks_tegra186(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600662{
Fugang Duan3a97da12020-05-03 22:41:17 +0800663#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600664 struct eqos_priv *eqos = dev_get_priv(dev);
665
666 debug("%s(dev=%p):\n", __func__, dev);
667
668 clk_disable(&eqos->clk_tx);
669 clk_disable(&eqos->clk_ptp_ref);
670 clk_disable(&eqos->clk_rx);
671 clk_disable(&eqos->clk_master_bus);
672 clk_disable(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800673#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600674
675 debug("%s: OK\n", __func__);
676}
677
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200678static void eqos_stop_clks_stm32(struct udevice *dev)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200679{
Fugang Duan3a97da12020-05-03 22:41:17 +0800680#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200681 struct eqos_priv *eqos = dev_get_priv(dev);
682
683 debug("%s(dev=%p):\n", __func__, dev);
684
685 clk_disable(&eqos->clk_tx);
686 clk_disable(&eqos->clk_rx);
687 clk_disable(&eqos->clk_master_bus);
688 if (clk_valid(&eqos->clk_ck))
689 clk_disable(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +0800690#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200691
692 debug("%s: OK\n", __func__);
693}
694
Fugang Duan3a97da12020-05-03 22:41:17 +0800695static void eqos_stop_clks_imx(struct udevice *dev)
696{
697 /* empty */
698}
699
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600700static int eqos_start_resets_tegra186(struct udevice *dev)
701{
702 struct eqos_priv *eqos = dev_get_priv(dev);
703 int ret;
704
705 debug("%s(dev=%p):\n", __func__, dev);
706
707 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
708 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900709 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600710 return ret;
711 }
712
713 udelay(2);
714
715 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
716 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900717 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600718 return ret;
719 }
720
721 ret = reset_assert(&eqos->reset_ctl);
722 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900723 pr_err("reset_assert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600724 return ret;
725 }
726
727 udelay(2);
728
729 ret = reset_deassert(&eqos->reset_ctl);
730 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900731 pr_err("reset_deassert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600732 return ret;
733 }
734
735 debug("%s: OK\n", __func__);
736 return 0;
737}
738
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200739static int eqos_start_resets_stm32(struct udevice *dev)
740{
Christophe Roullier5177b312020-03-18 10:50:15 +0100741 struct eqos_priv *eqos = dev_get_priv(dev);
742 int ret;
743
744 debug("%s(dev=%p):\n", __func__, dev);
745 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
746 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
747 if (ret < 0) {
748 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
749 ret);
750 return ret;
751 }
752
753 udelay(2);
754
755 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
756 if (ret < 0) {
757 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
758 ret);
759 return ret;
760 }
761 }
762 debug("%s: OK\n", __func__);
763
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200764 return 0;
765}
766
Fugang Duan3a97da12020-05-03 22:41:17 +0800767static int eqos_start_resets_imx(struct udevice *dev)
768{
769 return 0;
770}
771
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600772static int eqos_stop_resets_tegra186(struct udevice *dev)
773{
774 struct eqos_priv *eqos = dev_get_priv(dev);
775
776 reset_assert(&eqos->reset_ctl);
777 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
778
779 return 0;
780}
781
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200782static int eqos_stop_resets_stm32(struct udevice *dev)
783{
Christophe Roullier5177b312020-03-18 10:50:15 +0100784 struct eqos_priv *eqos = dev_get_priv(dev);
785 int ret;
786
787 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
788 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
789 if (ret < 0) {
790 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
791 ret);
792 return ret;
793 }
794 }
795
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200796 return 0;
797}
798
Fugang Duan3a97da12020-05-03 22:41:17 +0800799static int eqos_stop_resets_imx(struct udevice *dev)
800{
801 return 0;
802}
803
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600804static int eqos_calibrate_pads_tegra186(struct udevice *dev)
805{
806 struct eqos_priv *eqos = dev_get_priv(dev);
807 int ret;
808
809 debug("%s(dev=%p):\n", __func__, dev);
810
811 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
812 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
813
814 udelay(1);
815
816 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
817 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
818
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100819 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
820 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600821 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900822 pr_err("calibrate didn't start");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600823 goto failed;
824 }
825
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100826 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
827 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600828 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900829 pr_err("calibrate didn't finish");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600830 goto failed;
831 }
832
833 ret = 0;
834
835failed:
836 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
837 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
838
839 debug("%s: returns %d\n", __func__, ret);
840
841 return ret;
842}
843
844static int eqos_disable_calibration_tegra186(struct udevice *dev)
845{
846 struct eqos_priv *eqos = dev_get_priv(dev);
847
848 debug("%s(dev=%p):\n", __func__, dev);
849
850 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
851 EQOS_AUTO_CAL_CONFIG_ENABLE);
852
853 return 0;
854}
855
856static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
857{
Fugang Duan3a97da12020-05-03 22:41:17 +0800858#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600859 struct eqos_priv *eqos = dev_get_priv(dev);
860
861 return clk_get_rate(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800862#else
863 return 0;
864#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600865}
866
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200867static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
868{
Fugang Duan3a97da12020-05-03 22:41:17 +0800869#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200870 struct eqos_priv *eqos = dev_get_priv(dev);
871
872 return clk_get_rate(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800873#else
874 return 0;
875#endif
876}
877
Fugang Duan0e9d2392020-05-03 22:41:18 +0800878__weak u32 imx_get_eqos_csr_clk(void)
879{
880 return 100 * 1000000;
881}
882__weak int imx_eqos_txclk_set_rate(unsigned long rate)
883{
884 return 0;
885}
886
Fugang Duan3a97da12020-05-03 22:41:17 +0800887static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
888{
Fugang Duan0e9d2392020-05-03 22:41:18 +0800889 return imx_get_eqos_csr_clk();
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200890}
891
892static int eqos_calibrate_pads_stm32(struct udevice *dev)
893{
894 return 0;
895}
896
Fugang Duan3a97da12020-05-03 22:41:17 +0800897static int eqos_calibrate_pads_imx(struct udevice *dev)
898{
899 return 0;
900}
901
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200902static int eqos_disable_calibration_stm32(struct udevice *dev)
903{
904 return 0;
905}
906
Fugang Duan3a97da12020-05-03 22:41:17 +0800907static int eqos_disable_calibration_imx(struct udevice *dev)
908{
909 return 0;
910}
911
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600912static int eqos_set_full_duplex(struct udevice *dev)
913{
914 struct eqos_priv *eqos = dev_get_priv(dev);
915
916 debug("%s(dev=%p):\n", __func__, dev);
917
918 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
919
920 return 0;
921}
922
923static int eqos_set_half_duplex(struct udevice *dev)
924{
925 struct eqos_priv *eqos = dev_get_priv(dev);
926
927 debug("%s(dev=%p):\n", __func__, dev);
928
929 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
930
931 /* WAR: Flush TX queue when switching to half-duplex */
932 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
933 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
934
935 return 0;
936}
937
938static int eqos_set_gmii_speed(struct udevice *dev)
939{
940 struct eqos_priv *eqos = dev_get_priv(dev);
941
942 debug("%s(dev=%p):\n", __func__, dev);
943
944 clrbits_le32(&eqos->mac_regs->configuration,
945 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
946
947 return 0;
948}
949
950static int eqos_set_mii_speed_100(struct udevice *dev)
951{
952 struct eqos_priv *eqos = dev_get_priv(dev);
953
954 debug("%s(dev=%p):\n", __func__, dev);
955
956 setbits_le32(&eqos->mac_regs->configuration,
957 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
958
959 return 0;
960}
961
962static int eqos_set_mii_speed_10(struct udevice *dev)
963{
964 struct eqos_priv *eqos = dev_get_priv(dev);
965
966 debug("%s(dev=%p):\n", __func__, dev);
967
968 clrsetbits_le32(&eqos->mac_regs->configuration,
969 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
970
971 return 0;
972}
973
974static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
975{
Fugang Duan3a97da12020-05-03 22:41:17 +0800976#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600977 struct eqos_priv *eqos = dev_get_priv(dev);
978 ulong rate;
979 int ret;
980
981 debug("%s(dev=%p):\n", __func__, dev);
982
983 switch (eqos->phy->speed) {
984 case SPEED_1000:
985 rate = 125 * 1000 * 1000;
986 break;
987 case SPEED_100:
988 rate = 25 * 1000 * 1000;
989 break;
990 case SPEED_10:
991 rate = 2.5 * 1000 * 1000;
992 break;
993 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900994 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600995 return -EINVAL;
996 }
997
998 ret = clk_set_rate(&eqos->clk_tx, rate);
999 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001000 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001001 return ret;
1002 }
Fugang Duan3a97da12020-05-03 22:41:17 +08001003#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001004
1005 return 0;
1006}
1007
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001008static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
1009{
1010 return 0;
1011}
1012
Fugang Duan3a97da12020-05-03 22:41:17 +08001013static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
1014{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001015 struct eqos_priv *eqos = dev_get_priv(dev);
1016 ulong rate;
1017 int ret;
1018
1019 debug("%s(dev=%p):\n", __func__, dev);
1020
1021 switch (eqos->phy->speed) {
1022 case SPEED_1000:
1023 rate = 125 * 1000 * 1000;
1024 break;
1025 case SPEED_100:
1026 rate = 25 * 1000 * 1000;
1027 break;
1028 case SPEED_10:
1029 rate = 2.5 * 1000 * 1000;
1030 break;
1031 default:
1032 pr_err("invalid speed %d", eqos->phy->speed);
1033 return -EINVAL;
1034 }
1035
1036 ret = imx_eqos_txclk_set_rate(rate);
1037 if (ret < 0) {
1038 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
1039 return ret;
1040 }
1041
Fugang Duan3a97da12020-05-03 22:41:17 +08001042 return 0;
1043}
1044
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001045static int eqos_adjust_link(struct udevice *dev)
1046{
1047 struct eqos_priv *eqos = dev_get_priv(dev);
1048 int ret;
1049 bool en_calibration;
1050
1051 debug("%s(dev=%p):\n", __func__, dev);
1052
1053 if (eqos->phy->duplex)
1054 ret = eqos_set_full_duplex(dev);
1055 else
1056 ret = eqos_set_half_duplex(dev);
1057 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001058 pr_err("eqos_set_*_duplex() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001059 return ret;
1060 }
1061
1062 switch (eqos->phy->speed) {
1063 case SPEED_1000:
1064 en_calibration = true;
1065 ret = eqos_set_gmii_speed(dev);
1066 break;
1067 case SPEED_100:
1068 en_calibration = true;
1069 ret = eqos_set_mii_speed_100(dev);
1070 break;
1071 case SPEED_10:
1072 en_calibration = false;
1073 ret = eqos_set_mii_speed_10(dev);
1074 break;
1075 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001076 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001077 return -EINVAL;
1078 }
1079 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001080 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001081 return ret;
1082 }
1083
1084 if (en_calibration) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001085 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001086 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001087 pr_err("eqos_calibrate_pads() failed: %d",
1088 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001089 return ret;
1090 }
1091 } else {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001092 ret = eqos->config->ops->eqos_disable_calibration(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001093 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001094 pr_err("eqos_disable_calibration() failed: %d",
1095 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001096 return ret;
1097 }
1098 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001099 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001100 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001101 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001102 return ret;
1103 }
1104
1105 return 0;
1106}
1107
1108static int eqos_write_hwaddr(struct udevice *dev)
1109{
1110 struct eth_pdata *plat = dev_get_platdata(dev);
1111 struct eqos_priv *eqos = dev_get_priv(dev);
1112 uint32_t val;
1113
1114 /*
1115 * This function may be called before start() or after stop(). At that
1116 * time, on at least some configurations of the EQoS HW, all clocks to
1117 * the EQoS HW block will be stopped, and a reset signal applied. If
1118 * any register access is attempted in this state, bus timeouts or CPU
1119 * hangs may occur. This check prevents that.
1120 *
1121 * A simple solution to this problem would be to not implement
1122 * write_hwaddr(), since start() always writes the MAC address into HW
1123 * anyway. However, it is desirable to implement write_hwaddr() to
1124 * support the case of SW that runs subsequent to U-Boot which expects
1125 * the MAC address to already be programmed into the EQoS registers,
1126 * which must happen irrespective of whether the U-Boot user (or
1127 * scripts) actually made use of the EQoS device, and hence
1128 * irrespective of whether start() was ever called.
1129 *
1130 * Note that this requirement by subsequent SW is not valid for
1131 * Tegra186, and is likely not valid for any non-PCI instantiation of
1132 * the EQoS HW block. This function is implemented solely as
1133 * future-proofing with the expectation the driver will eventually be
1134 * ported to some system where the expectation above is true.
1135 */
1136 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1137 return 0;
1138
1139 /* Update the MAC address */
1140 val = (plat->enetaddr[5] << 8) |
1141 (plat->enetaddr[4]);
1142 writel(val, &eqos->mac_regs->address0_high);
1143 val = (plat->enetaddr[3] << 24) |
1144 (plat->enetaddr[2] << 16) |
1145 (plat->enetaddr[1] << 8) |
1146 (plat->enetaddr[0]);
1147 writel(val, &eqos->mac_regs->address0_low);
1148
1149 return 0;
1150}
1151
Ye Li580fab42020-05-03 22:41:20 +08001152static int eqos_read_rom_hwaddr(struct udevice *dev)
1153{
1154 struct eth_pdata *pdata = dev_get_platdata(dev);
1155
1156#ifdef CONFIG_ARCH_IMX8M
1157 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr);
1158#endif
1159 return !is_valid_ethaddr(pdata->enetaddr);
1160}
1161
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001162static int eqos_start(struct udevice *dev)
1163{
1164 struct eqos_priv *eqos = dev_get_priv(dev);
1165 int ret, i;
1166 ulong rate;
1167 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1168 ulong last_rx_desc;
1169
1170 debug("%s(dev=%p):\n", __func__, dev);
1171
1172 eqos->tx_desc_idx = 0;
1173 eqos->rx_desc_idx = 0;
1174
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001175 ret = eqos->config->ops->eqos_start_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001176 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001177 pr_err("eqos_start_clks() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001178 goto err;
1179 }
1180
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001181 ret = eqos->config->ops->eqos_start_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001182 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001183 pr_err("eqos_start_resets() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001184 goto err_stop_clks;
1185 }
1186
1187 udelay(10);
1188
1189 eqos->reg_access_ok = true;
1190
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +01001191 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001192 EQOS_DMA_MODE_SWR, false,
1193 eqos->config->swr_wait, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001194 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001195 pr_err("EQOS_DMA_MODE_SWR stuck");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001196 goto err_stop_resets;
1197 }
1198
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001199 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001200 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001201 pr_err("eqos_calibrate_pads() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001202 goto err_stop_resets;
1203 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001204 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001205
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001206 val = (rate / 1000000) - 1;
1207 writel(val, &eqos->mac_regs->us_tic_counter);
1208
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001209 /*
1210 * if PHY was already connected and configured,
1211 * don't need to reconnect/reconfigure again
1212 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001213 if (!eqos->phy) {
Ye Li6a895d02020-05-03 22:41:15 +08001214 int addr = -1;
1215#ifdef CONFIG_DM_ETH_PHY
1216 addr = eth_phy_get_addr(dev);
1217#endif
1218#ifdef DWC_NET_PHYADDR
1219 addr = DWC_NET_PHYADDR;
1220#endif
1221 eqos->phy = phy_connect(eqos->mii, addr, dev,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001222 eqos->config->interface(dev));
1223 if (!eqos->phy) {
1224 pr_err("phy_connect() failed");
1225 goto err_stop_resets;
1226 }
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001227
1228 if (eqos->max_speed) {
1229 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1230 if (ret) {
1231 pr_err("phy_set_supported() failed: %d", ret);
1232 goto err_shutdown_phy;
1233 }
1234 }
1235
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001236 ret = phy_config(eqos->phy);
1237 if (ret < 0) {
1238 pr_err("phy_config() failed: %d", ret);
1239 goto err_shutdown_phy;
1240 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001241 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001242
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001243 ret = phy_startup(eqos->phy);
1244 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001245 pr_err("phy_startup() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001246 goto err_shutdown_phy;
1247 }
1248
1249 if (!eqos->phy->link) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001250 pr_err("No link");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001251 goto err_shutdown_phy;
1252 }
1253
1254 ret = eqos_adjust_link(dev);
1255 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001256 pr_err("eqos_adjust_link() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001257 goto err_shutdown_phy;
1258 }
1259
1260 /* Configure MTL */
Fugang Duan3a97da12020-05-03 22:41:17 +08001261 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001262
1263 /* Enable Store and Forward mode for TX */
1264 /* Program Tx operating mode */
1265 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1266 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1267 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1268 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1269
1270 /* Transmit Queue weight */
1271 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1272
1273 /* Enable Store and Forward mode for RX, since no jumbo frame */
1274 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
Fugang Duan3a97da12020-05-03 22:41:17 +08001275 EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1276 EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1277 EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001278
1279 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1280 val = readl(&eqos->mac_regs->hw_feature1);
1281 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1282 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1283 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1284 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1285
1286 /*
1287 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1288 * r/tqs is encoded as (n / 256) - 1.
1289 */
1290 tqs = (128 << tx_fifo_sz) / 256 - 1;
1291 rqs = (128 << rx_fifo_sz) / 256 - 1;
1292
1293 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1294 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1295 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1296 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1297 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1298 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1299 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1300 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1301
1302 /* Flow control used only if each channel gets 4KB or more FIFO */
1303 if (rqs >= ((4096 / 256) - 1)) {
1304 u32 rfd, rfa;
1305
1306 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1307 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1308
1309 /*
1310 * Set Threshold for Activating Flow Contol space for min 2
1311 * frames ie, (1500 * 1) = 1500 bytes.
1312 *
1313 * Set Threshold for Deactivating Flow Contol for space of
1314 * min 1 frame (frame size 1500bytes) in receive fifo
1315 */
1316 if (rqs == ((4096 / 256) - 1)) {
1317 /*
1318 * This violates the above formula because of FIFO size
1319 * limit therefore overflow may occur inspite of this.
1320 */
1321 rfd = 0x3; /* Full-3K */
1322 rfa = 0x1; /* Full-1.5K */
1323 } else if (rqs == ((8192 / 256) - 1)) {
1324 rfd = 0x6; /* Full-4K */
1325 rfa = 0xa; /* Full-6K */
1326 } else if (rqs == ((16384 / 256) - 1)) {
1327 rfd = 0x6; /* Full-4K */
1328 rfa = 0x12; /* Full-10K */
1329 } else {
1330 rfd = 0x6; /* Full-4K */
1331 rfa = 0x1E; /* Full-16K */
1332 }
1333
1334 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1335 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1336 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1337 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1338 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1339 (rfd <<
1340 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1341 (rfa <<
1342 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1343 }
1344
1345 /* Configure MAC */
1346
1347 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1348 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1349 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001350 eqos->config->config_mac <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001351 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1352
Fugang Duan3a97da12020-05-03 22:41:17 +08001353 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1354 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1355 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1356 0x2 <<
1357 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1358
1359 /* Multicast and Broadcast Queue Enable */
1360 setbits_le32(&eqos->mac_regs->unused_0a4,
1361 0x00100000);
1362 /* enable promise mode */
1363 setbits_le32(&eqos->mac_regs->unused_004[1],
1364 0x1);
1365
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001366 /* Set TX flow control parameters */
1367 /* Set Pause Time */
1368 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1369 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1370 /* Assign priority for TX flow control */
1371 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1372 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1373 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1374 /* Assign priority for RX flow control */
1375 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1376 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1377 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1378 /* Enable flow control */
1379 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1380 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1381 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1382 EQOS_MAC_RX_FLOW_CTRL_RFE);
1383
1384 clrsetbits_le32(&eqos->mac_regs->configuration,
1385 EQOS_MAC_CONFIGURATION_GPSLCE |
1386 EQOS_MAC_CONFIGURATION_WD |
1387 EQOS_MAC_CONFIGURATION_JD |
1388 EQOS_MAC_CONFIGURATION_JE,
1389 EQOS_MAC_CONFIGURATION_CST |
1390 EQOS_MAC_CONFIGURATION_ACS);
1391
1392 eqos_write_hwaddr(dev);
1393
1394 /* Configure DMA */
1395
1396 /* Enable OSP mode */
1397 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1398 EQOS_DMA_CH0_TX_CONTROL_OSP);
1399
1400 /* RX buffer size. Must be a multiple of bus width */
1401 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1402 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1403 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1404 EQOS_MAX_PACKET_SIZE <<
1405 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1406
1407 setbits_le32(&eqos->dma_regs->ch0_control,
1408 EQOS_DMA_CH0_CONTROL_PBLX8);
1409
1410 /*
1411 * Burst length must be < 1/2 FIFO size.
1412 * FIFO size in tqs is encoded as (n / 256) - 1.
1413 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1414 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1415 */
1416 pbl = tqs + 1;
1417 if (pbl > 32)
1418 pbl = 32;
1419 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1420 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1421 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1422 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1423
1424 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1425 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1426 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1427 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1428
1429 /* DMA performance configuration */
1430 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1431 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1432 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1433 writel(val, &eqos->dma_regs->sysbus_mode);
1434
1435 /* Set up descriptors */
1436
1437 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1438 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1439 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1440 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1441 (i * EQOS_MAX_PACKET_SIZE));
Marek Vasut4332d802020-03-23 02:02:57 +01001442 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Fugang Duan3a97da12020-05-03 22:41:17 +08001443 mb();
Marek Vasutdd90c2e2020-03-23 02:09:01 +01001444 eqos->config->ops->eqos_flush_desc(rx_desc);
Fugang Duan3a97da12020-05-03 22:41:17 +08001445 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1446 (i * EQOS_MAX_PACKET_SIZE),
1447 EQOS_MAX_PACKET_SIZE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001448 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001449
1450 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1451 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1452 writel(EQOS_DESCRIPTORS_TX - 1,
1453 &eqos->dma_regs->ch0_txdesc_ring_length);
1454
1455 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1456 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1457 writel(EQOS_DESCRIPTORS_RX - 1,
1458 &eqos->dma_regs->ch0_rxdesc_ring_length);
1459
1460 /* Enable everything */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001461 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1462 EQOS_DMA_CH0_TX_CONTROL_ST);
1463 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1464 EQOS_DMA_CH0_RX_CONTROL_SR);
Fugang Duan3a97da12020-05-03 22:41:17 +08001465 setbits_le32(&eqos->mac_regs->configuration,
1466 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001467
1468 /* TX tail pointer not written until we need to TX a packet */
1469 /*
1470 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1471 * first descriptor, implying all descriptors were available. However,
1472 * that's not distinguishable from none of the descriptors being
1473 * available.
1474 */
1475 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1476 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1477
1478 eqos->started = true;
1479
1480 debug("%s: OK\n", __func__);
1481 return 0;
1482
1483err_shutdown_phy:
1484 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001485err_stop_resets:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001486 eqos->config->ops->eqos_stop_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001487err_stop_clks:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001488 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001489err:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001490 pr_err("FAILED: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001491 return ret;
1492}
1493
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001494static void eqos_stop(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001495{
1496 struct eqos_priv *eqos = dev_get_priv(dev);
1497 int i;
1498
1499 debug("%s(dev=%p):\n", __func__, dev);
1500
1501 if (!eqos->started)
1502 return;
1503 eqos->started = false;
1504 eqos->reg_access_ok = false;
1505
1506 /* Disable TX DMA */
1507 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1508 EQOS_DMA_CH0_TX_CONTROL_ST);
1509
1510 /* Wait for TX all packets to drain out of MTL */
1511 for (i = 0; i < 1000000; i++) {
1512 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1513 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1514 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1515 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1516 if ((trcsts != 1) && (!txqsts))
1517 break;
1518 }
1519
1520 /* Turn off MAC TX and RX */
1521 clrbits_le32(&eqos->mac_regs->configuration,
1522 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1523
1524 /* Wait for all RX packets to drain out of MTL */
1525 for (i = 0; i < 1000000; i++) {
1526 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1527 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1528 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1529 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1530 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1531 if ((!prxq) && (!rxqsts))
1532 break;
1533 }
1534
1535 /* Turn off RX DMA */
1536 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1537 EQOS_DMA_CH0_RX_CONTROL_SR);
1538
1539 if (eqos->phy) {
1540 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001541 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001542 eqos->config->ops->eqos_stop_resets(dev);
1543 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001544
1545 debug("%s: OK\n", __func__);
1546}
1547
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001548static int eqos_send(struct udevice *dev, void *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001549{
1550 struct eqos_priv *eqos = dev_get_priv(dev);
1551 struct eqos_desc *tx_desc;
1552 int i;
1553
1554 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1555 length);
1556
1557 memcpy(eqos->tx_dma_buf, packet, length);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001558 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001559
1560 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1561 eqos->tx_desc_idx++;
1562 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1563
1564 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1565 tx_desc->des1 = 0;
1566 tx_desc->des2 = length;
1567 /*
1568 * Make sure that if HW sees the _OWN write below, it will see all the
1569 * writes to the rest of the descriptor too.
1570 */
1571 mb();
1572 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001573 eqos->config->ops->eqos_flush_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001574
Marek Vasut83858d82020-03-23 02:03:50 +01001575 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1576 &eqos->dma_regs->ch0_txdesc_tail_pointer);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001577
1578 for (i = 0; i < 1000000; i++) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001579 eqos->config->ops->eqos_inval_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001580 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1581 return 0;
1582 udelay(1);
1583 }
1584
1585 debug("%s: TX timeout\n", __func__);
1586
1587 return -ETIMEDOUT;
1588}
1589
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001590static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001591{
1592 struct eqos_priv *eqos = dev_get_priv(dev);
1593 struct eqos_desc *rx_desc;
1594 int length;
1595
1596 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1597
1598 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasut738ee272020-03-23 02:09:21 +01001599 eqos->config->ops->eqos_inval_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001600 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1601 debug("%s: RX packet not available\n", __func__);
1602 return -EAGAIN;
1603 }
1604
1605 *packetp = eqos->rx_dma_buf +
1606 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1607 length = rx_desc->des3 & 0x7fff;
1608 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1609
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001610 eqos->config->ops->eqos_inval_buffer(*packetp, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001611
1612 return length;
1613}
1614
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001615static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001616{
1617 struct eqos_priv *eqos = dev_get_priv(dev);
1618 uchar *packet_expected;
1619 struct eqos_desc *rx_desc;
1620
1621 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1622
1623 packet_expected = eqos->rx_dma_buf +
1624 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1625 if (packet != packet_expected) {
1626 debug("%s: Unexpected packet (expected %p)\n", __func__,
1627 packet_expected);
1628 return -EINVAL;
1629 }
1630
Fugang Duan3a97da12020-05-03 22:41:17 +08001631 eqos->config->ops->eqos_inval_buffer(packet, length);
1632
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001633 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001634
Marek Vasut24891dd2020-03-23 02:11:46 +01001635 rx_desc->des0 = 0;
1636 mb();
1637 eqos->config->ops->eqos_flush_desc(rx_desc);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001638 eqos->config->ops->eqos_inval_buffer(packet, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001639 rx_desc->des0 = (u32)(ulong)packet;
1640 rx_desc->des1 = 0;
1641 rx_desc->des2 = 0;
1642 /*
1643 * Make sure that if HW sees the _OWN write below, it will see all the
1644 * writes to the rest of the descriptor too.
1645 */
1646 mb();
Marek Vasut4332d802020-03-23 02:02:57 +01001647 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001648 eqos->config->ops->eqos_flush_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001649
1650 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1651
1652 eqos->rx_desc_idx++;
1653 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1654
1655 return 0;
1656}
1657
1658static int eqos_probe_resources_core(struct udevice *dev)
1659{
1660 struct eqos_priv *eqos = dev_get_priv(dev);
1661 int ret;
1662
1663 debug("%s(dev=%p):\n", __func__, dev);
1664
1665 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1666 EQOS_DESCRIPTORS_RX);
1667 if (!eqos->descs) {
1668 debug("%s: eqos_alloc_descs() failed\n", __func__);
1669 ret = -ENOMEM;
1670 goto err;
1671 }
1672 eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1673 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1674 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1675 eqos->rx_descs);
1676
1677 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1678 if (!eqos->tx_dma_buf) {
1679 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1680 ret = -ENOMEM;
1681 goto err_free_descs;
1682 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001683 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001684
1685 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1686 if (!eqos->rx_dma_buf) {
1687 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1688 ret = -ENOMEM;
1689 goto err_free_tx_dma_buf;
1690 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001691 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001692
1693 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1694 if (!eqos->rx_pkt) {
1695 debug("%s: malloc(rx_pkt) failed\n", __func__);
1696 ret = -ENOMEM;
1697 goto err_free_rx_dma_buf;
1698 }
1699 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1700
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001701 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1702 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1703
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001704 debug("%s: OK\n", __func__);
1705 return 0;
1706
1707err_free_rx_dma_buf:
1708 free(eqos->rx_dma_buf);
1709err_free_tx_dma_buf:
1710 free(eqos->tx_dma_buf);
1711err_free_descs:
1712 eqos_free_descs(eqos->descs);
1713err:
1714
1715 debug("%s: returns %d\n", __func__, ret);
1716 return ret;
1717}
1718
1719static int eqos_remove_resources_core(struct udevice *dev)
1720{
1721 struct eqos_priv *eqos = dev_get_priv(dev);
1722
1723 debug("%s(dev=%p):\n", __func__, dev);
1724
1725 free(eqos->rx_pkt);
1726 free(eqos->rx_dma_buf);
1727 free(eqos->tx_dma_buf);
1728 eqos_free_descs(eqos->descs);
1729
1730 debug("%s: OK\n", __func__);
1731 return 0;
1732}
1733
1734static int eqos_probe_resources_tegra186(struct udevice *dev)
1735{
1736 struct eqos_priv *eqos = dev_get_priv(dev);
1737 int ret;
1738
1739 debug("%s(dev=%p):\n", __func__, dev);
1740
1741 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1742 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001743 pr_err("reset_get_by_name(rst) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001744 return ret;
1745 }
1746
1747 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1748 &eqos->phy_reset_gpio,
1749 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1750 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001751 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001752 goto err_free_reset_eqos;
1753 }
1754
1755 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1756 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001757 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001758 goto err_free_gpio_phy_reset;
1759 }
1760
1761 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1762 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001763 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001764 goto err_free_clk_slave_bus;
1765 }
1766
1767 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1768 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001769 pr_err("clk_get_by_name(rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001770 goto err_free_clk_master_bus;
1771 }
1772
1773 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1774 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001775 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001776 goto err_free_clk_rx;
1777 return ret;
1778 }
1779
1780 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1781 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001782 pr_err("clk_get_by_name(tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001783 goto err_free_clk_ptp_ref;
1784 }
1785
1786 debug("%s: OK\n", __func__);
1787 return 0;
1788
1789err_free_clk_ptp_ref:
1790 clk_free(&eqos->clk_ptp_ref);
1791err_free_clk_rx:
1792 clk_free(&eqos->clk_rx);
1793err_free_clk_master_bus:
1794 clk_free(&eqos->clk_master_bus);
1795err_free_clk_slave_bus:
1796 clk_free(&eqos->clk_slave_bus);
1797err_free_gpio_phy_reset:
1798 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1799err_free_reset_eqos:
1800 reset_free(&eqos->reset_ctl);
1801
1802 debug("%s: returns %d\n", __func__, ret);
1803 return ret;
1804}
1805
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001806/* board-specific Ethernet Interface initializations. */
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001807__weak int board_interface_eth_init(struct udevice *dev,
1808 phy_interface_t interface_type)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001809{
1810 return 0;
1811}
1812
1813static int eqos_probe_resources_stm32(struct udevice *dev)
1814{
1815 struct eqos_priv *eqos = dev_get_priv(dev);
1816 int ret;
1817 phy_interface_t interface;
Christophe Roullier5177b312020-03-18 10:50:15 +01001818 struct ofnode_phandle_args phandle_args;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001819
1820 debug("%s(dev=%p):\n", __func__, dev);
1821
1822 interface = eqos->config->interface(dev);
1823
1824 if (interface == PHY_INTERFACE_MODE_NONE) {
1825 pr_err("Invalid PHY interface\n");
1826 return -EINVAL;
1827 }
1828
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001829 ret = board_interface_eth_init(dev, interface);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001830 if (ret)
1831 return -EINVAL;
1832
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001833 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1834
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001835 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1836 if (ret) {
1837 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1838 goto err_probe;
1839 }
1840
1841 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1842 if (ret) {
1843 pr_err("clk_get_by_name(rx) failed: %d", ret);
1844 goto err_free_clk_master_bus;
1845 }
1846
1847 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1848 if (ret) {
1849 pr_err("clk_get_by_name(tx) failed: %d", ret);
1850 goto err_free_clk_rx;
1851 }
1852
1853 /* Get ETH_CLK clocks (optional) */
1854 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1855 if (ret)
1856 pr_warn("No phy clock provided %d", ret);
1857
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001858 eqos->phyaddr = -1;
Christophe Roullier5177b312020-03-18 10:50:15 +01001859 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1860 &phandle_args);
1861 if (!ret) {
1862 /* search "reset-gpios" in phy node */
1863 ret = gpio_request_by_name_nodev(phandle_args.node,
1864 "reset-gpios", 0,
1865 &eqos->phy_reset_gpio,
1866 GPIOD_IS_OUT |
1867 GPIOD_IS_OUT_ACTIVE);
1868 if (ret)
1869 pr_warn("gpio_request_by_name(phy reset) not provided %d",
1870 ret);
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001871
1872 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1873 "reg", -1);
Christophe Roullier5177b312020-03-18 10:50:15 +01001874 }
1875
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001876 debug("%s: OK\n", __func__);
1877 return 0;
1878
1879err_free_clk_rx:
1880 clk_free(&eqos->clk_rx);
1881err_free_clk_master_bus:
1882 clk_free(&eqos->clk_master_bus);
1883err_probe:
1884
1885 debug("%s: returns %d\n", __func__, ret);
1886 return ret;
1887}
1888
1889static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1890{
1891 const char *phy_mode;
1892 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1893
1894 debug("%s(dev=%p):\n", __func__, dev);
1895
1896 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1897 NULL);
1898 if (phy_mode)
1899 interface = phy_get_interface_by_name(phy_mode);
1900
1901 return interface;
1902}
1903
1904static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1905{
1906 return PHY_INTERFACE_MODE_MII;
1907}
1908
Fugang Duan3a97da12020-05-03 22:41:17 +08001909static int eqos_probe_resources_imx(struct udevice *dev)
1910{
1911 struct eqos_priv *eqos = dev_get_priv(dev);
1912 phy_interface_t interface;
1913
1914 debug("%s(dev=%p):\n", __func__, dev);
1915
1916 interface = eqos->config->interface(dev);
1917
1918 if (interface == PHY_INTERFACE_MODE_NONE) {
1919 pr_err("Invalid PHY interface\n");
1920 return -EINVAL;
1921 }
1922
1923 debug("%s: OK\n", __func__);
1924 return 0;
1925}
1926
1927static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1928{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001929 const char *phy_mode;
1930 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1931
1932 debug("%s(dev=%p):\n", __func__, dev);
1933
1934 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1935 NULL);
1936 if (phy_mode)
1937 interface = phy_get_interface_by_name(phy_mode);
1938
1939 return interface;
Fugang Duan3a97da12020-05-03 22:41:17 +08001940}
1941
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001942static int eqos_remove_resources_tegra186(struct udevice *dev)
1943{
1944 struct eqos_priv *eqos = dev_get_priv(dev);
1945
1946 debug("%s(dev=%p):\n", __func__, dev);
1947
Fugang Duan3a97da12020-05-03 22:41:17 +08001948#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001949 clk_free(&eqos->clk_tx);
1950 clk_free(&eqos->clk_ptp_ref);
1951 clk_free(&eqos->clk_rx);
1952 clk_free(&eqos->clk_slave_bus);
1953 clk_free(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +08001954#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001955 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1956 reset_free(&eqos->reset_ctl);
1957
1958 debug("%s: OK\n", __func__);
1959 return 0;
1960}
1961
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001962static int eqos_remove_resources_stm32(struct udevice *dev)
1963{
Fugang Duan3a97da12020-05-03 22:41:17 +08001964#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001965 struct eqos_priv *eqos = dev_get_priv(dev);
1966
1967 debug("%s(dev=%p):\n", __func__, dev);
1968
1969 clk_free(&eqos->clk_tx);
1970 clk_free(&eqos->clk_rx);
1971 clk_free(&eqos->clk_master_bus);
1972 if (clk_valid(&eqos->clk_ck))
1973 clk_free(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +08001974#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001975
Christophe Roullier5177b312020-03-18 10:50:15 +01001976 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1977 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1978
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001979 debug("%s: OK\n", __func__);
1980 return 0;
1981}
1982
Fugang Duan3a97da12020-05-03 22:41:17 +08001983static int eqos_remove_resources_imx(struct udevice *dev)
1984{
1985 return 0;
1986}
1987
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001988static int eqos_probe(struct udevice *dev)
1989{
1990 struct eqos_priv *eqos = dev_get_priv(dev);
1991 int ret;
1992
1993 debug("%s(dev=%p):\n", __func__, dev);
1994
1995 eqos->dev = dev;
1996 eqos->config = (void *)dev_get_driver_data(dev);
1997
Tom Rini72083962020-07-24 08:42:06 -04001998 eqos->regs = devfdt_get_addr(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001999 if (eqos->regs == FDT_ADDR_T_NONE) {
Tom Rini72083962020-07-24 08:42:06 -04002000 pr_err("devfdt_get_addr() failed");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002001 return -ENODEV;
2002 }
2003 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
2004 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
2005 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
2006 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
2007
2008 ret = eqos_probe_resources_core(dev);
2009 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09002010 pr_err("eqos_probe_resources_core() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002011 return ret;
2012 }
2013
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002014 ret = eqos->config->ops->eqos_probe_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002015 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002016 pr_err("eqos_probe_resources() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002017 goto err_remove_resources_core;
2018 }
2019
Ye Li6a895d02020-05-03 22:41:15 +08002020#ifdef CONFIG_DM_ETH_PHY
2021 eqos->mii = eth_phy_get_mdio_bus(dev);
2022#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002023 if (!eqos->mii) {
Ye Li6a895d02020-05-03 22:41:15 +08002024 eqos->mii = mdio_alloc();
2025 if (!eqos->mii) {
2026 pr_err("mdio_alloc() failed");
2027 ret = -ENOMEM;
2028 goto err_remove_resources_tegra;
2029 }
2030 eqos->mii->read = eqos_mdio_read;
2031 eqos->mii->write = eqos_mdio_write;
2032 eqos->mii->priv = eqos;
2033 strcpy(eqos->mii->name, dev->name);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002034
Ye Li6a895d02020-05-03 22:41:15 +08002035 ret = mdio_register(eqos->mii);
2036 if (ret < 0) {
2037 pr_err("mdio_register() failed: %d", ret);
2038 goto err_free_mdio;
2039 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002040 }
2041
Ye Li6a895d02020-05-03 22:41:15 +08002042#ifdef CONFIG_DM_ETH_PHY
2043 eth_phy_set_mdio_bus(dev, eqos->mii);
2044#endif
2045
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002046 debug("%s: OK\n", __func__);
2047 return 0;
2048
2049err_free_mdio:
2050 mdio_free(eqos->mii);
2051err_remove_resources_tegra:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002052 eqos->config->ops->eqos_remove_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002053err_remove_resources_core:
2054 eqos_remove_resources_core(dev);
2055
2056 debug("%s: returns %d\n", __func__, ret);
2057 return ret;
2058}
2059
2060static int eqos_remove(struct udevice *dev)
2061{
2062 struct eqos_priv *eqos = dev_get_priv(dev);
2063
2064 debug("%s(dev=%p):\n", __func__, dev);
2065
2066 mdio_unregister(eqos->mii);
2067 mdio_free(eqos->mii);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002068 eqos->config->ops->eqos_remove_resources(dev);
2069
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002070 eqos_probe_resources_core(dev);
2071
2072 debug("%s: OK\n", __func__);
2073 return 0;
2074}
2075
2076static const struct eth_ops eqos_ops = {
2077 .start = eqos_start,
2078 .stop = eqos_stop,
2079 .send = eqos_send,
2080 .recv = eqos_recv,
2081 .free_pkt = eqos_free_pkt,
2082 .write_hwaddr = eqos_write_hwaddr,
Ye Li580fab42020-05-03 22:41:20 +08002083 .read_rom_hwaddr = eqos_read_rom_hwaddr,
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002084};
2085
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002086static struct eqos_ops eqos_tegra186_ops = {
2087 .eqos_inval_desc = eqos_inval_desc_tegra186,
2088 .eqos_flush_desc = eqos_flush_desc_tegra186,
2089 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
2090 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
2091 .eqos_probe_resources = eqos_probe_resources_tegra186,
2092 .eqos_remove_resources = eqos_remove_resources_tegra186,
2093 .eqos_stop_resets = eqos_stop_resets_tegra186,
2094 .eqos_start_resets = eqos_start_resets_tegra186,
2095 .eqos_stop_clks = eqos_stop_clks_tegra186,
2096 .eqos_start_clks = eqos_start_clks_tegra186,
2097 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2098 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
2099 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2100 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
2101};
2102
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002103static const struct eqos_config eqos_tegra186_config = {
2104 .reg_access_always_ok = false,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002105 .mdio_wait = 10,
2106 .swr_wait = 10,
2107 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2108 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2109 .interface = eqos_get_interface_tegra186,
2110 .ops = &eqos_tegra186_ops
2111};
2112
2113static struct eqos_ops eqos_stm32_ops = {
Fugang Duan3a97da12020-05-03 22:41:17 +08002114 .eqos_inval_desc = eqos_inval_desc_generic,
2115 .eqos_flush_desc = eqos_flush_desc_generic,
2116 .eqos_inval_buffer = eqos_inval_buffer_generic,
2117 .eqos_flush_buffer = eqos_flush_buffer_generic,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002118 .eqos_probe_resources = eqos_probe_resources_stm32,
2119 .eqos_remove_resources = eqos_remove_resources_stm32,
2120 .eqos_stop_resets = eqos_stop_resets_stm32,
2121 .eqos_start_resets = eqos_start_resets_stm32,
2122 .eqos_stop_clks = eqos_stop_clks_stm32,
2123 .eqos_start_clks = eqos_start_clks_stm32,
2124 .eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2125 .eqos_disable_calibration = eqos_disable_calibration_stm32,
2126 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2127 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
2128};
2129
2130static const struct eqos_config eqos_stm32_config = {
2131 .reg_access_always_ok = false,
2132 .mdio_wait = 10000,
2133 .swr_wait = 50,
2134 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2135 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2136 .interface = eqos_get_interface_stm32,
2137 .ops = &eqos_stm32_ops
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002138};
2139
Fugang Duan3a97da12020-05-03 22:41:17 +08002140static struct eqos_ops eqos_imx_ops = {
2141 .eqos_inval_desc = eqos_inval_desc_generic,
2142 .eqos_flush_desc = eqos_flush_desc_generic,
2143 .eqos_inval_buffer = eqos_inval_buffer_generic,
2144 .eqos_flush_buffer = eqos_flush_buffer_generic,
2145 .eqos_probe_resources = eqos_probe_resources_imx,
2146 .eqos_remove_resources = eqos_remove_resources_imx,
2147 .eqos_stop_resets = eqos_stop_resets_imx,
2148 .eqos_start_resets = eqos_start_resets_imx,
2149 .eqos_stop_clks = eqos_stop_clks_imx,
2150 .eqos_start_clks = eqos_start_clks_imx,
2151 .eqos_calibrate_pads = eqos_calibrate_pads_imx,
2152 .eqos_disable_calibration = eqos_disable_calibration_imx,
2153 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2154 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2155};
2156
2157struct eqos_config eqos_imx_config = {
2158 .reg_access_always_ok = false,
2159 .mdio_wait = 10000,
2160 .swr_wait = 50,
2161 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2162 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2163 .interface = eqos_get_interface_imx,
2164 .ops = &eqos_imx_ops
2165};
2166
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002167static const struct udevice_id eqos_ids[] = {
2168 {
2169 .compatible = "nvidia,tegra186-eqos",
2170 .data = (ulong)&eqos_tegra186_config
2171 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002172 {
Patrick Delaunaya718a5d2020-05-14 15:00:23 +02002173 .compatible = "st,stm32mp1-dwmac",
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002174 .data = (ulong)&eqos_stm32_config
2175 },
Fugang Duan3a97da12020-05-03 22:41:17 +08002176 {
2177 .compatible = "fsl,imx-eqos",
2178 .data = (ulong)&eqos_imx_config
2179 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002180
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002181 { }
2182};
2183
2184U_BOOT_DRIVER(eth_eqos) = {
2185 .name = "eth_eqos",
2186 .id = UCLASS_ETH,
Fugang Duan3a97da12020-05-03 22:41:17 +08002187 .of_match = of_match_ptr(eqos_ids),
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002188 .probe = eqos_probe,
2189 .remove = eqos_remove,
2190 .ops = &eqos_ops,
2191 .priv_auto_alloc_size = sizeof(struct eqos_priv),
2192 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
2193};