net: cortina_ni: Add eth support for Cortina Access CAxxxx SoCs
Add Cortina Access Ethernet device driver for CAxxxx SoCs.
This driver supports both legacy and DM_ETH network models.
Signed-off-by: Aaron Tseng <aaron.tseng@cortina-access.com>
Signed-off-by: Alex Nemirovsky <alex.nemirovsky@cortina-access.com>
Signed-off-by: Abbie Chang <abbie.chang@cortina-access.com>
CC: Joe Hershberger <joe.hershberger@ni.com>
CC: Abbie Chang <abbie.chang@Cortina-Access.com>
CC: Tom Rini <trini@konsulko.com>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d1a52c7..971a572 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -149,6 +149,13 @@
help
This driver supports the BCMGENET Ethernet MAC.
+config CORTINA_NI_ENET
+ bool "Cortina-Access Ethernet driver"
+ depends on DM_ETH && CORTINA_PLATFORM
+ help
+ This driver supports the Cortina-Access Ethernet MAC for
+ all supported CAxxxx SoCs.
+
config DWC_ETH_QOS
bool "Synopsys DWC Ethernet QOS device support"
depends on DM_ETH
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f2a0df5..6712b74 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@
obj-$(CONFIG_BCM_SF2_ETH) += bcm-sf2-eth.o
obj-$(CONFIG_BCM_SF2_ETH_GMAC) += bcm-sf2-eth-gmac.o
obj-$(CONFIG_CALXEDA_XGMAC) += calxedaxgmac.o
+obj-$(CONFIG_CORTINA_NI_ENET) += cortina_ni.o
obj-$(CONFIG_CS8900) += cs8900.o
obj-$(CONFIG_TULIP) += dc2114x.o
obj-$(CONFIG_ETH_DESIGNWARE) += designware.o
diff --git a/drivers/net/cortina_ni.c b/drivers/net/cortina_ni.c
new file mode 100644
index 0000000..ee424d9
--- /dev/null
+++ b/drivers/net/cortina_ni.c
@@ -0,0 +1,1103 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2020 Cortina Access Inc.
+ * Author: Aaron Tseng <aaron.tseng@cortina-access.com>
+ *
+ * Ethernet MAC Driver for all supported CAxxxx SoCs
+ */
+
+#include <common.h>
+#include <command.h>
+#include <malloc.h>
+#include <net.h>
+#include <miiphy.h>
+#include <env.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <u-boot/crc.h>
+#include <led.h>
+
+#include "cortina_ni.h"
+
+#define HEADER_A_SIZE 8
+
+enum ca_led_state_t {
+ CA_LED_OFF = 0,
+ CA_LED_ON = 1,
+};
+
+enum ca_port_t {
+ NI_PORT_0 = 0,
+ NI_PORT_1,
+ NI_PORT_2,
+ NI_PORT_3,
+ NI_PORT_4,
+ NI_PORT_5,
+ NI_PORT_MAX,
+};
+
+static struct udevice *curr_dev;
+
+static u32 *ca_rdwrptr_adv_one(u32 *x, unsigned long base, unsigned long max)
+{
+ if (x + 1 >= (u32 *)max)
+ return (u32 *)base;
+ else
+ return (x + 1);
+}
+
+static void ca_reg_read(void *reg, u64 base, u64 offset)
+{
+ u32 *val = (u32 *)reg;
+
+ *val = readl(KSEG1_ATU_XLAT(base + offset));
+}
+
+static void ca_reg_write(void *reg, u64 base, u64 offset)
+{
+ u32 val = *(u32 *)reg;
+
+ writel(val, KSEG1_ATU_XLAT(base + offset));
+}
+
+static int ca_mdio_write_rgmii(u32 addr, u32 offset, u16 data)
+{
+ /* up to 10000 cycles*/
+ u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
+ struct PER_MDIO_ADDR_t mdio_addr;
+ struct PER_MDIO_CTRL_t mdio_ctrl;
+ struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
+
+ memset(&mdio_addr, 0, sizeof(mdio_addr));
+ mdio_addr.mdio_addr = addr;
+ mdio_addr.mdio_offset = offset;
+ mdio_addr.mdio_rd_wr = __MDIO_WR_FLAG;
+ ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_ADDR_OFFSET);
+ ca_reg_write(&data, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_WRDATA_OFFSET);
+
+ memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
+ mdio_ctrl.mdiostart = 1;
+ ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+
+ debug("%s: phy_addr=%d, offset=%d, data=0x%x\n",
+ __func__, addr, offset, data);
+
+ do {
+ ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+ if (mdio_ctrl.mdiodone) {
+ ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+ return 0;
+ }
+ } while (--loop_wait);
+
+ printf("CA NI %s: PHY write timeout!!!\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int ca_mdio_write(u32 addr, u32 offset, u16 data)
+{
+ u32 reg_addr, reg_val;
+ struct NI_MDIO_OPER_T mdio_oper;
+
+ /* support range: 1~31*/
+ if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
+ return -EINVAL;
+
+ /* the phy addr 5 is connect to RGMII */
+ if (addr >= 5)
+ return ca_mdio_write_rgmii(addr, offset, data);
+
+ memset(&mdio_oper, 0, sizeof(mdio_oper));
+ mdio_oper.reg_off = offset;
+ mdio_oper.phy_addr = addr;
+ mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
+ reg_val = data;
+ memcpy(®_addr, &mdio_oper, sizeof(reg_addr));
+ ca_reg_write(®_val, (u64)reg_addr, 0);
+
+ return 0;
+}
+
+static int ca_mdio_read_rgmii(u32 addr, u32 offset, u16 *data)
+{
+ u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
+ struct PER_MDIO_ADDR_t mdio_addr;
+ struct PER_MDIO_CTRL_t mdio_ctrl;
+ struct PER_MDIO_RDDATA_t read_data;
+ struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
+
+ memset(&mdio_addr, 0, sizeof(mdio_addr));
+ mdio_addr.mdio_addr = addr;
+ mdio_addr.mdio_offset = offset;
+ mdio_addr.mdio_rd_wr = __MDIO_RD_FLAG;
+ ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_ADDR_OFFSET);
+
+ memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
+ mdio_ctrl.mdiostart = 1;
+ ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+
+ do {
+ ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+ if (mdio_ctrl.mdiodone) {
+ ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CTRL_OFFSET);
+ ca_reg_read(&read_data, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_RDDATA_OFFSET);
+ *data = read_data.mdio_rddata;
+ return 0;
+ }
+ } while (--loop_wait);
+
+ printf("CA NI %s: TIMEOUT!!\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int ca_mdio_read(u32 addr, u32 offset, u16 *data)
+{
+ u32 reg_addr, reg_val;
+ struct NI_MDIO_OPER_T mdio_oper;
+
+ if (!data)
+ return -EINVAL;
+
+ /* support range: 1~31*/
+ if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
+ return -EINVAL;
+
+ /* the phy addr 5 is connect to RGMII */
+ if (addr >= 5)
+ return ca_mdio_read_rgmii(addr, offset, data);
+
+ memset(&mdio_oper, 0, sizeof(mdio_oper));
+ mdio_oper.reg_off = offset;
+ mdio_oper.phy_addr = addr;
+ mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
+ reg_val = *data;
+ memcpy(®_addr, &mdio_oper, sizeof(reg_addr));
+ ca_reg_read(®_val, (u64)reg_addr, 0);
+ *data = reg_val;
+ return 0;
+}
+
+int ca_miiphy_read(const char *devname, u8 addr, u8 reg, u16 *value)
+{
+ return ca_mdio_read(addr, reg, value);
+}
+
+int ca_miiphy_write(const char *devname, u8 addr, u8 reg, u16 value)
+{
+ return ca_mdio_write(addr, reg, value);
+}
+
+static int cortina_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
+{
+ u16 data;
+
+ ca_mdio_read(addr, reg, &data);
+ return data;
+}
+
+static int cortina_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
+ u16 val)
+{
+ return ca_mdio_write(addr, reg, val);
+}
+
+static void ca_ni_setup_mac_addr(void)
+{
+ u8 mac[6];
+ struct NI_HV_GLB_MAC_ADDR_CFG0_t mac_addr_cfg0;
+ struct NI_HV_GLB_MAC_ADDR_CFG1_t mac_addr_cfg1;
+ struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
+ struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
+ struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
+
+ /* parsing ethaddr and set to NI registers. */
+ if (eth_env_get_enetaddr("ethaddr", mac)) {
+ /* The complete MAC address consists of
+ * {MAC_ADDR0_mac_addr0[0-3], MAC_ADDR1_mac_addr1[4],
+ * PT_PORT_STATIC_CFG_mac_addr6[5]}.
+ */
+ mac_addr_cfg0.mac_addr0 = (mac[0] << 24) + (mac[1] << 16) +
+ (mac[2] << 8) + mac[3];
+ ca_reg_write(&mac_addr_cfg0, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_MAC_ADDR_CFG0_OFFSET);
+
+ memset(&mac_addr_cfg1, 0, sizeof(mac_addr_cfg1));
+ mac_addr_cfg1.mac_addr1 = mac[4];
+ ca_reg_write(&mac_addr_cfg1, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_MAC_ADDR_CFG1_OFFSET);
+
+ ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_STATIC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+
+ port_static_cfg.mac_addr6 = mac[5];
+ ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_STATIC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+
+ /* received only Broadcast and Address matched packets */
+ ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ cpuxram_cfg.xram_mgmt_promisc_mode = 0;
+ cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
+ cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
+ ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ } else {
+ /* received all packets(promiscuous mode) */
+ ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ cpuxram_cfg.xram_mgmt_promisc_mode = 3;
+ cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
+ cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
+ ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ }
+}
+
+static void ca_ni_enable_tx_rx(void)
+{
+ struct NI_HV_PT_RXMAC_CFG_t rxmac_cfg;
+ struct NI_HV_PT_TXMAC_CFG_t txmac_cfg;
+ struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
+
+ /* Enable TX and RX functions */
+ ca_reg_read(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_RXMAC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+ rxmac_cfg.rx_en = 1;
+ ca_reg_write(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_RXMAC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+
+ ca_reg_read(&txmac_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_TXMAC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+ txmac_cfg.tx_en = 1;
+ ca_reg_write(&txmac_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_TXMAC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+}
+
+#define AUTO_SCAN_TIMEOUT 3000 /* 3 seconds */
+static int ca_ni_auto_scan_active_port(struct cortina_ni_priv *priv)
+{
+ u8 i;
+ u16 data;
+ u32 start_time;
+
+ start_time = get_timer(0);
+ while (get_timer(start_time) < AUTO_SCAN_TIMEOUT) {
+ for (i = 0; i < priv->valid_port_num; i++) {
+ if (!priv->port_map[i].phy_addr)
+ continue;
+
+ ca_mdio_read(priv->port_map[i].phy_addr, 1, &data);
+ if (data & 0x04) {
+ priv->active_port = priv->port_map[i].port;
+ return 0;
+ }
+ }
+ }
+
+ printf("CA NI %s: auto scan active_port timeout.\n", __func__);
+ return -1;
+}
+
+static void ca_ni_led(int port, int status)
+{
+ char label[10];
+ struct udevice *led_dev;
+
+ if (IS_ENABLED(CONFIG_LED_CORTINA)) {
+ snprintf(label, sizeof(label), "led%d", port);
+ debug("%s: set port %d led %s.\n",
+ __func__, port, status ? "on" : "off");
+ led_get_by_label(label, &led_dev);
+ led_set_state(led_dev, status);
+ }
+}
+
+static void ca_ni_reset(void)
+{
+ int i;
+ struct NI_HV_GLB_INIT_DONE_t init_done;
+ struct NI_HV_GLB_INTF_RST_CONFIG_t intf_rst_config;
+ struct NI_HV_GLB_STATIC_CFG_t static_cfg;
+ struct GLOBAL_BLOCK_RESET_t glb_blk_reset;
+ struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
+
+ /* NI global resets */
+ ca_reg_read(&glb_blk_reset, (u64)priv->glb_base_addr,
+ GLOBAL_BLOCK_RESET_OFFSET);
+ glb_blk_reset.reset_ni = 1;
+ ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
+ GLOBAL_BLOCK_RESET_OFFSET);
+ /* Remove resets */
+ glb_blk_reset.reset_ni = 0;
+ ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
+ GLOBAL_BLOCK_RESET_OFFSET);
+
+ /* check the ready bit of NI module */
+ for (i = 0; i < NI_READ_POLL_COUNT; i++) {
+ ca_reg_read(&init_done, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_INIT_DONE_OFFSET);
+ if (init_done.ni_init_done)
+ break;
+ }
+ if (i == NI_READ_POLL_COUNT) {
+ printf("CA NI %s: NI init done not ready, init_done=0x%x!!!\n",
+ __func__, init_done.ni_init_done);
+ }
+
+ ca_reg_read(&intf_rst_config, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
+ switch (priv->active_port) {
+ case NI_PORT_0:
+ intf_rst_config.intf_rst_p0 = 0;
+ intf_rst_config.mac_rx_rst_p0 = 0;
+ intf_rst_config.mac_tx_rst_p0 = 0;
+ break;
+ case NI_PORT_1:
+ intf_rst_config.intf_rst_p1 = 0;
+ intf_rst_config.mac_rx_rst_p1 = 0;
+ intf_rst_config.mac_tx_rst_p1 = 0;
+ break;
+ case NI_PORT_2:
+ intf_rst_config.intf_rst_p2 = 0;
+ intf_rst_config.mac_rx_rst_p2 = 0;
+ intf_rst_config.mac_tx_rst_p2 = 0;
+ break;
+ case NI_PORT_3:
+ intf_rst_config.intf_rst_p3 = 0;
+ intf_rst_config.mac_tx_rst_p3 = 0;
+ intf_rst_config.mac_rx_rst_p3 = 0;
+ break;
+ case NI_PORT_4:
+ intf_rst_config.intf_rst_p4 = 0;
+ intf_rst_config.mac_tx_rst_p4 = 0;
+ intf_rst_config.mac_rx_rst_p4 = 0;
+ break;
+ }
+
+ ca_reg_write(&intf_rst_config, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
+
+ /* Only one GMAC can connect to CPU */
+ ca_reg_read(&static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_STATIC_CFG_OFFSET);
+ static_cfg.port_to_cpu = priv->active_port;
+ static_cfg.txmib_mode = 1;
+ static_cfg.rxmib_mode = 1;
+
+ ca_reg_write(&static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_GLB_STATIC_CFG_OFFSET);
+}
+
+static void ca_internal_gphy_cal(struct cortina_ni_priv *priv)
+{
+ int i, port, num;
+ u32 reg_off, value;
+
+ num = priv->gphy_num;
+ for (port = 0; port < 4; port++) {
+ for (i = 0; i < num; i++) {
+ reg_off = priv->gphy_values[i].reg_off + (port * 0x80);
+ value = priv->gphy_values[i].value;
+ ca_reg_write(&value, reg_off, 0);
+ mdelay(50);
+ }
+ }
+}
+
+static int ca_mdio_register(struct udevice *dev)
+{
+ int ret;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+ struct mii_dev *mdio_bus = mdio_alloc();
+
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->read = cortina_mdio_read;
+ mdio_bus->write = cortina_mdio_write;
+ snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
+
+ mdio_bus->priv = (void *)priv;
+
+ ret = mdio_register(mdio_bus);
+ if (ret)
+ return ret;
+
+ priv->mdio_bus = mdio_bus;
+ return 0;
+}
+
+static void ca_rgmii_init(struct cortina_ni_priv *priv)
+{
+ struct GLOBAL_GLOBAL_CONFIG_t glb_config;
+ struct GLOBAL_IO_DRIVE_CONTROL_t io_drive_control;
+
+ /* Generating 25Mhz reference clock for switch */
+ ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
+ GLOBAL_GLOBAL_CONFIG_OFFSET);
+ glb_config.refclk_sel = 0x01;
+ glb_config.ext_reset = 0x01;
+ ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
+ GLOBAL_GLOBAL_CONFIG_OFFSET);
+
+ mdelay(20);
+
+ /* Do external reset */
+ ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
+ GLOBAL_GLOBAL_CONFIG_OFFSET);
+ glb_config.ext_reset = 0x0;
+ ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
+ GLOBAL_GLOBAL_CONFIG_OFFSET);
+
+ ca_reg_read(&io_drive_control, (u64)priv->glb_base_addr,
+ GLOBAL_IO_DRIVE_CONTROL_OFFSET);
+ io_drive_control.gmac_mode = 2;
+ io_drive_control.gmac_dn = 1;
+ io_drive_control.gmac_dp = 1;
+ ca_reg_write(&io_drive_control, (u64)priv->glb_base_addr,
+ GLOBAL_IO_DRIVE_CONTROL_OFFSET);
+}
+
+static int ca_phy_probe(struct udevice *dev)
+{
+ int auto_scan_active_port = 0, tmp_port;
+ char *buf;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+ struct phy_device *int_phydev, *ext_phydev;
+
+ /* Initialize internal phy device */
+ int_phydev = phy_connect(priv->mdio_bus,
+ priv->port_map[NI_PORT_3].phy_addr,
+ dev, priv->phy_interface);
+ if (int_phydev) {
+ int_phydev->supported &= PHY_GBIT_FEATURES;
+ int_phydev->advertising = int_phydev->supported;
+ phy_config(int_phydev);
+ } else {
+ printf("CA NI %s: There is no internal phy device\n", __func__);
+ }
+
+ /* Initialize external phy device */
+ ext_phydev = phy_connect(priv->mdio_bus,
+ priv->port_map[NI_PORT_4].phy_addr,
+ dev, priv->phy_interface);
+ if (ext_phydev) {
+ ext_phydev->supported &= PHY_GBIT_FEATURES;
+ ext_phydev->advertising = int_phydev->supported;
+ phy_config(ext_phydev);
+ } else {
+ printf("CA NI %s: There is no external phy device\n", __func__);
+ }
+
+ /* auto scan the first link up port as active_port */
+ buf = env_get("auto_scan_active_port");
+ if (buf != 0) {
+ auto_scan_active_port = simple_strtoul(buf, NULL, 0);
+ printf("CA NI %s: auto_scan_active_port=%d\n", __func__,
+ auto_scan_active_port);
+ }
+
+ if (auto_scan_active_port) {
+ ca_ni_auto_scan_active_port(priv);
+ } else {
+ buf = env_get("active_port");
+ if (buf != 0) {
+ tmp_port = simple_strtoul(buf, NULL, 0);
+ if (tmp_port < 0 &&
+ !(priv->valid_port_map && BIT(tmp_port))) {
+ printf("CA NI ERROR: not support this port.");
+ free(dev);
+ free(priv);
+ return 1;
+ }
+
+ priv->active_port = tmp_port;
+ }
+ }
+
+ printf("CA NI %s: active_port=%d\n", __func__, priv->active_port);
+ if (priv->active_port == NI_PORT_4)
+ priv->phydev = ext_phydev;
+ else
+ priv->phydev = int_phydev;
+
+ return 0;
+}
+
+static int cortina_eth_start(struct udevice *dev)
+{
+ int ret;
+ struct NI_HV_XRAM_CPUXRAM_ADRCFG_RX_t cpuxram_adrcfg_rx;
+ struct NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_t cpuxram_adrcfg_tx;
+ struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
+ struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
+ struct NI_HV_PT_PORT_GLB_CFG_t port_glb_cfg;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ ret = phy_startup(priv->phydev);
+ if (ret) {
+ ca_ni_led(priv->active_port, CA_LED_OFF);
+ printf("CA NI Could not initialize PHY %s, active_port=%d\n",
+ priv->phydev->dev->name, priv->active_port);
+ return ret;
+ }
+
+ if (!priv->phydev->link) {
+ printf("CA NI %s: link down.\n", priv->phydev->dev->name);
+ return 0;
+ }
+
+ ca_ni_led(priv->active_port, CA_LED_ON);
+ printf("CA NI PHY ID 0x%08X %dMbps %s duplex\n",
+ phydev->phy_id, phydev->speed,
+ phydev->duplex == DUPLEX_HALF ? "half" : "full");
+
+ /* RX XRAM ADDRESS CONFIG (start and end address) */
+ memset(&cpuxram_adrcfg_rx, 0, sizeof(cpuxram_adrcfg_rx));
+ cpuxram_adrcfg_rx.rx_top_addr = RX_TOP_ADDR;
+ cpuxram_adrcfg_rx.rx_base_addr = RX_BASE_ADDR;
+ ca_reg_write(&cpuxram_adrcfg_rx, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_ADRCFG_RX_OFFSET);
+
+ /* TX XRAM ADDRESS CONFIG (start and end address) */
+ memset(&cpuxram_adrcfg_tx, 0, sizeof(cpuxram_adrcfg_tx));
+ cpuxram_adrcfg_tx.tx_top_addr = TX_TOP_ADDR;
+ cpuxram_adrcfg_tx.tx_base_addr = TX_BASE_ADDR;
+ ca_reg_write(&cpuxram_adrcfg_tx, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_OFFSET);
+
+ /*
+ * Configuration for Management Ethernet Interface:
+ * - RGMII 1000 mode or RGMII 100 mode
+ * - MAC mode
+ */
+ ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_STATIC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+ if (phydev->speed == SPEED_1000) {
+ /* port 4 connects to RGMII PHY */
+ if (phydev->addr == 5)
+ port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_1000;
+ else
+ port_static_cfg.int_cfg = GE_MAC_INTF_GMII;
+ } else {
+ /* port 4 connects to RGMII PHY */
+ if (phydev->addr == 5)
+ port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_100;
+ else
+ port_static_cfg.int_cfg = GE_MAC_INTF_MII;
+ }
+
+ ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_STATIC_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+
+ ca_reg_read(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_GLB_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+ port_glb_cfg.speed = phydev->speed == SPEED_10 ? 1 : 0;
+ port_glb_cfg.duplex = phydev->duplex == DUPLEX_HALF ? 1 : 0;
+ ca_reg_write(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_PT_PORT_GLB_CFG_OFFSET +
+ (APB0_NI_HV_PT_STRIDE * priv->active_port));
+
+ /* Need to toggle the tx and rx cpu_pkt_dis bit */
+ /* after changing Address config register. */
+ ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ cpuxram_cfg.rx_0_cpu_pkt_dis = 1;
+ cpuxram_cfg.tx_0_cpu_pkt_dis = 1;
+ ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+
+ ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+ cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
+ cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
+ ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
+
+ ca_ni_enable_tx_rx();
+
+ return 0;
+}
+
+/*********************************************
+ * Packet receive routine from Management FE
+ * Expects a previously allocated buffer and
+ * fills the length
+ * Retruns 0 on success -1 on failure
+ *******************************************/
+static int cortina_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ u8 *ptr;
+ u32 next_link, pktlen = 0;
+ u32 sw_rx_rd_ptr, hw_rx_wr_ptr, *rx_xram_ptr, *data_ptr;
+ int loop, index = 0, blk_num;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+ struct NI_HEADER_X_T header_x;
+ struct NI_PACKET_STATUS packet_status;
+ struct NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_t cpuxram_cpu_sta_rx;
+ struct NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_t cpuxram_cpu_cfg_rx;
+
+ /* get the hw write pointer */
+ memset(&cpuxram_cpu_sta_rx, 0, sizeof(cpuxram_cpu_sta_rx));
+ ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
+ hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
+
+ /* get the sw read pointer */
+ memset(&cpuxram_cpu_cfg_rx, 0, sizeof(cpuxram_cpu_cfg_rx));
+ ca_reg_read(&cpuxram_cpu_cfg_rx, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ sw_rx_rd_ptr = cpuxram_cpu_cfg_rx.pkt_rd_ptr;
+
+ debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0 = 0x%p, ", __func__,
+ priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
+ debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0 = 0x%p\n",
+ priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ debug("%s : RX hw_wr_ptr = %d, sw_rd_ptr = %d\n",
+ __func__, hw_rx_wr_ptr, sw_rx_rd_ptr);
+
+ while (sw_rx_rd_ptr != hw_rx_wr_ptr) {
+ /* Point to the absolute memory address of XRAM
+ * where read pointer is
+ */
+ rx_xram_ptr = (u32 *)
+ ((unsigned long)priv->ni_xram_base
+ + sw_rx_rd_ptr * 8);
+
+ /* Wrap around if required */
+ if (rx_xram_ptr >= (u32 *)(unsigned long)priv->rx_xram_end_adr)
+ rx_xram_ptr = (u32 *)
+ (unsigned long)priv->rx_xram_base_adr;
+
+ /* Checking header XR. Do not update the read pointer yet */
+ /* skip unused 32-bit in Header XR */
+ rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
+ priv->rx_xram_base_adr,
+ priv->rx_xram_end_adr);
+
+ memcpy(&header_x, rx_xram_ptr, sizeof(header_x));
+ next_link = header_x.next_link;
+ /* Header XR [31:0] */
+
+ if (*rx_xram_ptr == 0xffffffff)
+ printf("CA NI %s: XRAM Error !\n", __func__);
+
+ debug("%s : RX next link 0x%x\n", __func__, next_link);
+ debug("%s : bytes_valid %x\n", __func__, header_x.bytes_valid);
+
+ if (header_x.ownership == 0) {
+ /* point to Packet status [31:0] */
+ rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
+ priv->rx_xram_base_adr,
+ priv->rx_xram_end_adr);
+
+ memcpy(&packet_status, rx_xram_ptr,
+ sizeof(rx_xram_ptr));
+ if (packet_status.valid == 0) {
+ debug("%s: Invalid Packet !!, ", __func__);
+ debug("next_link=%d\n", next_link);
+
+ /* Update the software read pointer */
+ ca_reg_write(&next_link,
+ (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ return 0;
+ }
+
+ if (packet_status.drop ||
+ packet_status.runt ||
+ packet_status.oversize ||
+ packet_status.jabber ||
+ packet_status.crc_error ||
+ packet_status.jumbo) {
+ debug("%s: Error Packet!!, ", __func__);
+ debug("next_link=%d\n", next_link);
+
+ /* Update the software read pointer */
+ ca_reg_write(&next_link,
+ (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ return 0;
+ }
+
+ /* check whether packet size is larger than 1514 */
+ if (packet_status.packet_size > 1518) {
+ debug("%s: Error Packet !! Packet size=%d, ",
+ __func__, packet_status.packet_size);
+ debug("larger than 1518, next_link=%d\n",
+ next_link);
+
+ /* Update the software read pointer */
+ ca_reg_write(&next_link,
+ (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ return 0;
+ }
+
+ rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
+ priv->rx_xram_base_adr,
+ priv->rx_xram_end_adr);
+
+ pktlen = packet_status.packet_size;
+
+ debug("%s : rx packet length = %d\n",
+ __func__, packet_status.packet_size);
+
+ rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
+ priv->rx_xram_base_adr,
+ priv->rx_xram_end_adr);
+
+ data_ptr = (u32 *)net_rx_packets[index];
+
+ /* Read out the packet */
+ /* Data is in little endian form in the XRAM */
+
+ /* Send the packet to upper layer */
+
+ debug("%s: packet data[]=", __func__);
+
+ for (loop = 0; loop <= pktlen / 4; loop++) {
+ ptr = (u8 *)rx_xram_ptr;
+ if (loop < 10)
+ debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]",
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+ *data_ptr++ = *rx_xram_ptr++;
+ /* Wrap around if required */
+ if (rx_xram_ptr >= (u32 *)
+ (unsigned long)priv->rx_xram_end_adr) {
+ rx_xram_ptr = (u32 *)(unsigned long)
+ (priv->rx_xram_base_adr);
+ }
+ }
+
+ debug("\n");
+ net_process_received_packet(net_rx_packets[index],
+ pktlen);
+ if (++index >= PKTBUFSRX)
+ index = 0;
+ blk_num = net_rx_packets[index][0x2c] * 255 +
+ net_rx_packets[index][0x2d];
+ debug("%s: tftp block number=%d\n", __func__, blk_num);
+
+ /* Update the software read pointer */
+ ca_reg_write(&next_link,
+ (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ }
+
+ /* get the hw write pointer */
+ ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
+ hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
+
+ /* get the sw read pointer */
+ ca_reg_read(&sw_rx_rd_ptr, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
+ }
+ return 0;
+}
+
+static int cortina_eth_send(struct udevice *dev, void *packet, int length)
+{
+ u32 hw_tx_rd_ptr = 0, sw_tx_wr_ptr = 0;
+ u32 loop, new_pkt_len, ca_crc32;
+ u32 *tx_xram_ptr, *data_ptr;
+ u16 next_link = 0;
+ u8 *ptr, *pkt_buf_ptr, valid_bytes = 0;
+ int pad = 0;
+ static u8 pkt_buf[2048];
+ struct NI_HEADER_X_T hdr_xt;
+ struct NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_t cpuxram_cpu_cfg_tx;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+
+ if (!packet || length > 2032)
+ return -1;
+
+ /* Get the hardware read pointer */
+ ca_reg_read(&hw_tx_rd_ptr, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET);
+
+ /* Get the software write pointer */
+ ca_reg_read(&sw_tx_wr_ptr, (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
+
+ debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0=0x%p, ",
+ __func__,
+ KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
+ NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET));
+ debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0=0x%p\n",
+ KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET));
+ debug("%s : hw_tx_rd_ptr = %d\n", __func__, hw_tx_rd_ptr);
+ debug("%s : sw_tx_wr_ptr = %d\n", __func__, sw_tx_wr_ptr);
+
+ if (hw_tx_rd_ptr != sw_tx_wr_ptr) {
+ printf("CA NI %s: Tx FIFO is not available!\n", __func__);
+ return 1;
+ }
+
+ /* a workaround on 2015/10/01
+ * the packet size+CRC should be 8-byte alignment
+ */
+ if (((length + 4) % 8) != 0)
+ length += (8 - ((length + 4) % 8));
+
+ memset(pkt_buf, 0x00, sizeof(pkt_buf));
+
+ /* add 8-byte header_A at the beginning of packet */
+ memcpy(&pkt_buf[HEADER_A_SIZE], (const void *)packet, length);
+
+ pad = 64 - (length + 4); /* if packet length < 60 */
+ pad = (pad < 0) ? 0 : pad;
+
+ debug("%s: length=%d, pad=%d\n", __func__, length, pad);
+
+ new_pkt_len = length + pad; /* new packet length */
+
+ pkt_buf_ptr = (u8 *)pkt_buf;
+
+ /* Calculate the CRC32, skip 8-byte header_A */
+ ca_crc32 = crc32(0, (u8 *)(pkt_buf_ptr + HEADER_A_SIZE), new_pkt_len);
+
+ debug("%s: crc32 is 0x%x\n", __func__, ca_crc32);
+ debug("%s: ~crc32 is 0x%x\n", __func__, ~ca_crc32);
+ debug("%s: pkt len %d\n", __func__, new_pkt_len);
+ /* should add 8-byte header_! */
+ /* CRC will re-calculated by hardware */
+ memcpy((pkt_buf_ptr + new_pkt_len + HEADER_A_SIZE),
+ (u8 *)(&ca_crc32), sizeof(ca_crc32));
+ new_pkt_len = new_pkt_len + 4; /* add CRC */
+
+ valid_bytes = new_pkt_len % 8;
+ valid_bytes = valid_bytes ? valid_bytes : 0;
+ debug("%s: valid_bytes %d\n", __func__, valid_bytes);
+
+ /* should add 8-byte headerA */
+ next_link = sw_tx_wr_ptr +
+ (new_pkt_len + 7 + HEADER_A_SIZE) / 8; /* for headr XT */
+ /* add header */
+ next_link = next_link + 1;
+ /* Wrap around if required */
+ if (next_link > priv->tx_xram_end) {
+ next_link = priv->tx_xram_start +
+ (next_link - (priv->tx_xram_end + 1));
+ }
+
+ debug("%s: TX next_link %x\n", __func__, next_link);
+ memset(&hdr_xt, 0, sizeof(hdr_xt));
+ hdr_xt.ownership = 1;
+ hdr_xt.bytes_valid = valid_bytes;
+ hdr_xt.next_link = next_link;
+
+ tx_xram_ptr = (u32 *)((unsigned long)priv->ni_xram_base
+ + sw_tx_wr_ptr * 8);
+
+ /* Wrap around if required */
+ if (tx_xram_ptr >= (u32 *)(unsigned long)priv->tx_xram_end_adr)
+ tx_xram_ptr = (u32 *)(unsigned long)priv->tx_xram_base_adr;
+
+ tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
+ priv->tx_xram_base_adr,
+ priv->tx_xram_end_adr);
+
+ memcpy(tx_xram_ptr, &hdr_xt, sizeof(*tx_xram_ptr));
+
+ tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
+ priv->tx_xram_base_adr,
+ priv->tx_xram_end_adr);
+
+ /* Now to copy the data. The first byte on the line goes first */
+ data_ptr = (u32 *)pkt_buf_ptr;
+ debug("%s: packet data[]=", __func__);
+
+ /* copy header_A to XRAM */
+ for (loop = 0; loop <= (new_pkt_len + HEADER_A_SIZE) / 4; loop++) {
+ ptr = (u8 *)data_ptr;
+ if ((loop % 4) == 0)
+ debug("\n");
+ debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]-",
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ *tx_xram_ptr = *data_ptr++;
+ tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
+ priv->tx_xram_base_adr,
+ priv->tx_xram_end_adr);
+ }
+ debug("\n");
+
+ /* Publish the software write pointer */
+ cpuxram_cpu_cfg_tx.pkt_wr_ptr = next_link;
+ ca_reg_write(&cpuxram_cpu_cfg_tx,
+ (u64)priv->ni_hv_base_addr,
+ NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
+
+ return 0;
+}
+
+static void cortina_eth_stop(struct udevice *netdev)
+{
+ /* Nothing to do for now. */
+}
+
+static int cortina_eth_probe(struct udevice *dev)
+{
+ int ret, reg_value;
+ struct cortina_ni_priv *priv;
+
+ priv = dev_get_priv(dev);
+ priv->rx_xram_base_adr = priv->ni_xram_base + (RX_BASE_ADDR * 8);
+ priv->rx_xram_end_adr = priv->ni_xram_base + ((RX_TOP_ADDR + 1) * 8);
+ priv->rx_xram_start = RX_BASE_ADDR;
+ priv->rx_xram_end = RX_TOP_ADDR;
+ priv->tx_xram_base_adr = priv->ni_xram_base + (TX_BASE_ADDR * 8);
+ priv->tx_xram_end_adr = priv->ni_xram_base + ((TX_TOP_ADDR + 1) * 8);
+ priv->tx_xram_start = TX_BASE_ADDR;
+ priv->tx_xram_end = TX_TOP_ADDR;
+
+ curr_dev = dev;
+ debug("%s: rx_base_addr:%x\t rx_top_addr %x\n",
+ __func__, priv->rx_xram_start, priv->rx_xram_end);
+ debug("%s: tx_base_addr:%x\t tx_top_addr %x\n",
+ __func__, priv->tx_xram_start, priv->tx_xram_end);
+ debug("%s: rx physical start address = %x end address = %x\n",
+ __func__, priv->rx_xram_base_adr, priv->rx_xram_end_adr);
+ debug("%s: tx physical start address = %x end address = %x\n",
+ __func__, priv->tx_xram_base_adr, priv->tx_xram_end_adr);
+
+ /* MDIO register */
+ ret = ca_mdio_register(dev);
+ if (ret)
+ return ret;
+
+ /* set MDIO pre-scale value */
+ ca_reg_read(®_value, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CFG_OFFSET);
+ reg_value = reg_value | 0x00280000;
+ ca_reg_write(®_value, (u64)priv->per_mdio_base_addr,
+ PER_MDIO_CFG_OFFSET);
+
+ ca_phy_probe(dev);
+ priv->phydev->addr = priv->port_map[priv->active_port].phy_addr;
+
+ ca_ni_led(priv->active_port, CA_LED_ON);
+
+ ca_ni_reset();
+
+ printf("CA NI %s: active_port=%d, phy_addr=%d\n",
+ __func__, priv->active_port, priv->phydev->addr);
+ printf("CA NI %s: phy_id=0x%x, phy_id & PHY_ID_MASK=0x%x\n", __func__,
+ priv->phydev->phy_id, priv->phydev->phy_id & 0xFFFFFFF0);
+
+ /* parsing ethaddr and set to NI registers. */
+ ca_ni_setup_mac_addr();
+
+#ifdef MIIPHY_REGISTER
+ /* the phy_read and phy_write
+ * should meet the proto type of miiphy_register
+ */
+ miiphy_register(dev->name, ca_miiphy_read, ca_miiphy_write);
+#endif
+
+ if (priv->init_rgmii) {
+ /* hardware settings for RGMII port */
+ ca_rgmii_init(priv);
+ }
+
+ if (priv->gphy_num > 0) {
+ /* do internal gphy calibration */
+ ca_internal_gphy_cal(priv);
+ }
+ return 0;
+}
+
+static int ca_ni_of_to_plat(struct udevice *dev)
+{
+ int i, ret;
+ struct cortina_ni_priv *priv = dev_get_priv(dev);
+
+ memset(priv, 0, sizeof(struct cortina_ni_priv));
+ priv->glb_base_addr = dev_remap_addr_index(dev, 0);
+ if (!priv->glb_base_addr)
+ return -ENOENT;
+ printf("CA NI %s: priv->glb_base_addr for index 0 is 0x%p\n",
+ __func__, priv->glb_base_addr);
+
+ priv->per_mdio_base_addr = dev_remap_addr_index(dev, 1);
+ if (!priv->per_mdio_base_addr)
+ return -ENOENT;
+ printf("CA NI %s: priv->per_mdio_base_addr for index 1 is 0x%p\n",
+ __func__, priv->per_mdio_base_addr);
+
+ priv->ni_hv_base_addr = dev_remap_addr_index(dev, 2);
+ if (!priv->ni_hv_base_addr)
+ return -ENOENT;
+ printf("CA NI %s: priv->ni_hv_base_addr for index 2 is 0x%p\n",
+ __func__, priv->ni_hv_base_addr);
+
+ priv->valid_port_map = dev_read_u32_default(dev, "valid-port-map", 1);
+ priv->valid_port_num = dev_read_u32_default(dev, "valid-port-num", 1);
+
+ for (i = 0; i < priv->valid_port_num; i++) {
+ ret = dev_read_u32_index(dev, "valid-ports", i * 2,
+ &priv->port_map[i].phy_addr);
+ ret = dev_read_u32_index(dev, "valid-ports", (i * 2) + 1,
+ &priv->port_map[i].port);
+ }
+
+ priv->gphy_num = dev_read_u32_default(dev, "inter-gphy-num", 1);
+ for (i = 0; i < priv->gphy_num; i++) {
+ ret = dev_read_u32_index(dev, "inter-gphy-val", i * 2,
+ &priv->gphy_values[i].reg_off);
+ ret = dev_read_u32_index(dev, "inter-gphy-val", (i * 2) + 1,
+ &priv->gphy_values[i].value);
+ }
+
+ priv->active_port = dev_read_u32_default(dev, "def-active-port", 1);
+ priv->init_rgmii = dev_read_u32_default(dev, "init-rgmii", 1);
+ priv->ni_xram_base = dev_read_u32_default(dev, "ni-xram-base", 1);
+ return 0;
+}
+
+static const struct eth_ops cortina_eth_ops = {
+ .start = cortina_eth_start,
+ .send = cortina_eth_send,
+ .recv = cortina_eth_recv,
+ .stop = cortina_eth_stop,
+};
+
+static const struct udevice_id cortina_eth_ids[] = {
+ { .compatible = "eth_cortina" },
+ { }
+};
+
+U_BOOT_DRIVER(eth_cortina) = {
+ .name = "eth_cortina",
+ .id = UCLASS_ETH,
+ .of_match = cortina_eth_ids,
+ .probe = cortina_eth_probe,
+ .ops = &cortina_eth_ops,
+ .priv_auto = sizeof(struct cortina_ni_priv),
+ .plat_auto = sizeof(struct eth_pdata),
+ .of_to_plat = ca_ni_of_to_plat,
+};
diff --git a/drivers/net/cortina_ni.h b/drivers/net/cortina_ni.h
new file mode 100644
index 0000000..0ced468
--- /dev/null
+++ b/drivers/net/cortina_ni.h
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Copyright (C) 2020 Cortina Access Inc.
+ * Author: Aaron Tseng <aaron.tseng@cortina-access.com>
+ *
+ * Ethernet MAC Driver for all supported CAxxxx SoCs
+ */
+
+#ifndef __CORTINA_NI_H
+#define __CORTINA_NI_H
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <config.h>
+
+#define GE_MAC_INTF_GMII 0x0
+#define GE_MAC_INTF_MII 0x1
+#define GE_MAC_INTF_RGMII_1000 0x2
+#define GE_MAC_INTF_RGMII_100 0x3
+
+/* Defines the base and top address in CPU XRA
+ * for packets to cpu instance 0
+ * 0x300 * 8-byte = 6K-byte
+ */
+#define RX_TOP_ADDR 0x02FF
+#define RX_BASE_ADDR 0x0000
+
+/* Defines the base and top address in CPU XRAM
+ * for packets from cpu instance 0.
+ * 0x100 * 8-byte = 2K-byte
+ */
+#define TX_TOP_ADDR 0x03FF
+#define TX_BASE_ADDR 0x0300
+
+struct port_map_s {
+ u32 phy_addr;
+ u32 port;
+};
+
+struct gphy_cal_s {
+ u32 reg_off;
+ u32 value;
+};
+
+#if !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+struct cortina_ni_priv {
+ u32 ni_xram_base;
+ u32 rx_xram_base_adr;
+ u32 rx_xram_end_adr;
+ u16 rx_xram_start;
+ u16 rx_xram_end;
+ u32 tx_xram_base_adr;
+ u32 tx_xram_end_adr;
+ u16 tx_xram_start;
+ u16 tx_xram_end;
+ u32 valid_port_map;
+ u32 valid_port_num;
+ u32 init_rgmii;
+ u32 gphy_num;
+ struct port_map_s port_map[5];
+ struct gphy_cal_s gphy_values[10];
+ void __iomem *glb_base_addr;
+ void __iomem *per_mdio_base_addr;
+ void __iomem *ni_hv_base_addr;
+
+ struct mii_dev *mdio_bus;
+ struct phy_device *phydev;
+ int phy_interface;
+ int active_port;
+};
+
+struct NI_HEADER_X_T {
+ u32 next_link : 10; /* bits 9: 0 */
+ u32 bytes_valid : 4; /* bits 13:10 */
+ u32 reserved : 16; /* bits 29:14 */
+ u32 hdr_a : 1; /* bits 30:30 */
+ u32 ownership : 1; /* bits 31:31 */
+};
+
+struct NI_PACKET_STATUS {
+ u32 packet_size : 14; /* bits 13:0 */
+ u32 byte_valid : 4; /* bits 17:14 */
+ u32 pfc : 1; /* bits 18:18 */
+ u32 valid : 1; /* bits 19:19 */
+ u32 drop : 1; /* bits 20:20 */
+ u32 runt : 1; /* bits 21:21 */
+ u32 oversize : 1; /* bits 22:22 */
+ u32 jumbo : 1; /* bits 23:23 */
+ u32 link_status : 1; /* bits 24:24 */
+ u32 jabber : 1; /* bits 25:25 */
+ u32 crc_error : 1; /* bits 26:26 */
+ u32 pause : 1; /* bits 27:27 */
+ u32 oam : 1; /* bits 28:28 */
+ u32 unknown_opcode : 1; /* bits 29:29 */
+ u32 multicast : 1; /* bits 30:30 */
+ u32 broadcast : 1; /* bits 31:31 */
+};
+
+struct NI_MDIO_OPER_T {
+ u32 reserved : 2; /* bits 1:0 */
+ u32 reg_off : 5; /* bits 6:2 */
+ u32 phy_addr : 5; /* bits 11:7 */
+ u32 reg_base : 20; /* bits 31:12 */
+};
+
+#define __MDIO_WR_FLAG (0)
+#define __MDIO_RD_FLAG (1)
+#define __MDIO_ACCESS_TIMEOUT (1000000)
+#define CA_MDIO_ADDR_MIN (1)
+#define CA_MDIO_ADDR_MAX (31)
+
+#endif /* !__ASSEMBLER__ */
+
+/* HW REG */
+struct NI_HV_GLB_MAC_ADDR_CFG0_t {
+ u32 mac_addr0 : 32; /* bits 31:0 */
+};
+
+struct NI_HV_GLB_MAC_ADDR_CFG1_t {
+ u32 mac_addr1 : 8; /* bits 7:0 */
+ u32 rsrvd1 : 24;
+};
+
+struct NI_HV_PT_PORT_STATIC_CFG_t {
+ u32 int_cfg : 4; /* bits 3:0 */
+ u32 phy_mode : 1; /* bits 4:4 */
+ u32 rmii_clksrc : 1; /* bits 5:5 */
+ u32 inv_clk_in : 1; /* bits 6:6 */
+ u32 inv_clk_out : 1; /* bits 7:7 */
+ u32 inv_rxclk_out : 1; /* bits 8:8 */
+ u32 tx_use_gefifo : 1; /* bits 9:9 */
+ u32 smii_tx_stat : 1; /* bits 10:10 */
+ u32 crs_polarity : 1; /* bits 11:11 */
+ u32 lpbk_mode : 2; /* bits 13:12 */
+ u32 gmii_like_half_duplex_en : 1; /* bits 14:14 */
+ u32 sup_tx_to_rx_lpbk_data : 1; /* bits 15:15 */
+ u32 rsrvd1 : 8;
+ u32 mac_addr6 : 8; /* bits 31:24 */
+};
+
+struct NI_HV_XRAM_CPUXRAM_CFG_t {
+ u32 rx_0_cpu_pkt_dis : 1; /* bits 0:0 */
+ u32 rsrvd1 : 8;
+ u32 tx_0_cpu_pkt_dis : 1; /* bits 9:9 */
+ u32 rsrvd2 : 1;
+ u32 rx_x_drop_err_pkt : 1; /* bits 11:11 */
+ u32 xram_mgmt_dis_drop_ovsz_pkt : 1; /* bits 12:12 */
+ u32 xram_mgmt_term_large_pkt : 1; /* bits 13:13 */
+ u32 xram_mgmt_promisc_mode : 2; /* bits 15:14 */
+ u32 xram_cntr_debug_mode : 1; /* bits 16:16 */
+ u32 xram_cntr_op_code : 2; /* bits 18:17 */
+ u32 rsrvd3 : 2;
+ u32 xram_rx_mgmtfifo_srst : 1; /* bits 21:21 */
+ u32 xram_dma_fifo_srst : 1; /* bits 22:22 */
+ u32 rsrvd4 : 9;
+};
+
+struct NI_HV_PT_RXMAC_CFG_t {
+ u32 rx_en : 1; /* bits 0:0 */
+ u32 rsrvd1 : 7;
+ u32 rx_flow_disable : 1; /* bits 8:8 */
+ u32 rsrvd2 : 3;
+ u32 rx_flow_to_tx_en : 1; /* bits 12:12 */
+ u32 rx_pfc_disable : 1; /* bits 13:13 */
+ u32 rsrvd3 : 15;
+ u32 send_pg_data : 1; /* bits 29:29 */
+ u32 rsrvd4 : 2;
+};
+
+struct NI_HV_PT_TXMAC_CFG_t {
+ u32 tx_en : 1; /* bits 0:0 */
+ u32 rsrvd1 : 7;
+ u32 mac_crc_calc_en : 1; /* bits 8:8 */
+ u32 tx_ipg_sel : 3; /* bits 11:9 */
+ u32 tx_flow_disable : 1; /* bits 12:12 */
+ u32 tx_drain : 1; /* bits 13:13 */
+ u32 tx_pfc_disable : 1; /* bits 14:14 */
+ u32 tx_pau_sel : 2; /* bits 16:15 */
+ u32 rsrvd2 : 9;
+ u32 tx_auto_xon : 1; /* bits 26:26 */
+ u32 rsrvd3 : 1;
+ u32 pass_thru_hdr : 1; /* bits 28:28 */
+ u32 rsrvd4 : 3;
+};
+
+struct NI_HV_GLB_INTF_RST_CONFIG_t {
+ u32 intf_rst_p0 : 1; /* bits 0:0 */
+ u32 intf_rst_p1 : 1; /* bits 1:1 */
+ u32 intf_rst_p2 : 1; /* bits 2:2 */
+ u32 intf_rst_p3 : 1; /* bits 3:3 */
+ u32 intf_rst_p4 : 1; /* bits 4:4 */
+ u32 mac_rx_rst_p0 : 1; /* bits 5:5 */
+ u32 mac_rx_rst_p1 : 1; /* bits 6:6 */
+ u32 mac_rx_rst_p2 : 1; /* bits 7:7 */
+ u32 mac_rx_rst_p3 : 1; /* bits 8:8 */
+ u32 mac_rx_rst_p4 : 1; /* bits 9:9 */
+ u32 mac_tx_rst_p0 : 1; /* bits 10:10 */
+ u32 mac_tx_rst_p1 : 1; /* bits 11:11 */
+ u32 mac_tx_rst_p2 : 1; /* bits 12:12 */
+ u32 mac_tx_rst_p3 : 1; /* bits 13:13 */
+ u32 mac_tx_rst_p4 : 1; /* bits 14:14 */
+ u32 port_rst_p5 : 1; /* bits 15:15 */
+ u32 pcs_rst_p6 : 1; /* bits 16:16 */
+ u32 pcs_rst_p7 : 1; /* bits 17:17 */
+ u32 mac_rst_p6 : 1; /* bits 18:18 */
+ u32 mac_rst_p7 : 1; /* bits 19:19 */
+ u32 rsrvd1 : 12;
+};
+
+struct NI_HV_GLB_STATIC_CFG_t {
+ u32 port_to_cpu : 4; /* bits 3:0 */
+ u32 mgmt_pt_to_fe_also : 1; /* bits 4:4 */
+ u32 txcrc_chk_en : 1; /* bits 5:5 */
+ u32 p4_rgmii_tx_clk_phase : 2; /* bits 7:6 */
+ u32 p4_rgmii_tx_data_order : 1; /* bits 8:8 */
+ u32 rsrvd1 : 7;
+ u32 rxmib_mode : 1; /* bits 16:16 */
+ u32 txmib_mode : 1; /* bits 17:17 */
+ u32 eth_sch_rdy_pkt : 1; /* bits 18:18 */
+ u32 rsrvd2 : 1;
+ u32 rxaui_mode : 2; /* bits 21:20 */
+ u32 rxaui_sigdet : 2; /* bits 23:22 */
+ u32 cnt_op_mode : 3; /* bits 26:24 */
+ u32 rsrvd3 : 5;
+};
+
+struct GLOBAL_BLOCK_RESET_t {
+ u32 reset_ni : 1; /* bits 0:0 */
+ u32 reset_l2fe : 1; /* bits 1:1 */
+ u32 reset_l2tm : 1; /* bits 2:2 */
+ u32 reset_l3fe : 1; /* bits 3:3 */
+ u32 reset_sdram : 1; /* bits 4:4 */
+ u32 reset_tqm : 1; /* bits 5:5 */
+ u32 reset_pcie0 : 1; /* bits 6:6 */
+ u32 reset_pcie1 : 1; /* bits 7:7 */
+ u32 reset_pcie2 : 1; /* bits 8:8 */
+ u32 reset_sata : 1; /* bits 9:9 */
+ u32 reset_gic400 : 1; /* bits 10:10 */
+ u32 rsrvd1 : 2;
+ u32 reset_usb : 1; /* bits 13:13 */
+ u32 reset_flash : 1; /* bits 14:14 */
+ u32 reset_per : 1; /* bits 15:15 */
+ u32 reset_dma : 1; /* bits 16:16 */
+ u32 reset_rtc : 1; /* bits 17:17 */
+ u32 reset_pe0 : 1; /* bits 18:18 */
+ u32 reset_pe1 : 1; /* bits 19:19 */
+ u32 reset_rcpu0 : 1; /* bits 20:20 */
+ u32 reset_rcpu1 : 1; /* bits 21:21 */
+ u32 reset_sadb : 1; /* bits 22:22 */
+ u32 rsrvd2 : 1;
+ u32 reset_rcrypto : 1; /* bits 24:24 */
+ u32 reset_ldma : 1; /* bits 25:25 */
+ u32 reset_fbm : 1; /* bits 26:26 */
+ u32 reset_eaxi : 1; /* bits 27:27 */
+ u32 reset_sd : 1; /* bits 28:28 */
+ u32 reset_otprom : 1; /* bits 29:29 */
+ u32 rsrvd3 : 2;
+};
+
+struct PER_MDIO_ADDR_t {
+ u32 mdio_addr : 5; /* bits 4:0 */
+ u32 rsrvd1 : 3;
+ u32 mdio_offset : 5; /* bits 12:8 */
+ u32 rsrvd2 : 2;
+ u32 mdio_rd_wr : 1; /* bits 15:15 */
+ u32 mdio_st : 1; /* bits 16:16 */
+ u32 rsrvd3 : 1;
+ u32 mdio_op : 2; /* bits 19:18 */
+ u32 rsrvd4 : 12;
+};
+
+struct PER_MDIO_CTRL_t {
+ u32 mdiodone : 1; /* bits 0:0 */
+ u32 rsrvd1 : 6;
+ u32 mdiostart : 1; /* bits 7:7 */
+ u32 rsrvd2 : 24;
+};
+
+struct PER_MDIO_RDDATA_t {
+ u32 mdio_rddata : 16; /* bits 15:0 */
+ u32 rsrvd1 : 16;
+};
+
+/* XRAM */
+
+struct NI_HV_XRAM_CPUXRAM_ADRCFG_RX_t {
+ u32 rx_base_addr : 10; /* bits 9:0 */
+ u32 rsrvd1 : 6;
+ u32 rx_top_addr : 10; /* bits 25:16 */
+ u32 rsrvd2 : 6;
+};
+
+struct NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_t {
+ u32 tx_base_addr : 10; /* bits 9:0 */
+ u32 rsrvd1 : 6;
+ u32 tx_top_addr : 10; /* bits 25:16 */
+ u32 rsrvd2 : 6;
+};
+
+struct NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_t {
+ u32 pkt_wr_ptr : 10; /* bits 9:0 */
+ u32 rsrvd1 : 5;
+ u32 int_colsc_thresh_reached : 1; /* bits 15:15 */
+ u32 rsrvd2 : 16;
+};
+
+struct NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_t {
+ u32 pkt_rd_ptr : 10; /* bits 9:0 */
+ u32 rsrvd1 : 22;
+};
+
+struct NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_t {
+ u32 pkt_wr_ptr : 10; /* bits 9:0 */
+ u32 rsrvd1 : 22;
+};
+
+struct GLOBAL_GLOBAL_CONFIG_t {
+ u32 rsrvd1 : 4;
+ u32 wd_reset_subsys_enable : 1; /* bits 4:4 */
+ u32 rsrvd2 : 1;
+ u32 wd_reset_all_blocks : 1; /* bits 6:6 */
+ u32 wd_reset_remap : 1; /* bits 7:7 */
+ u32 wd_reset_ext_reset : 1; /* bits 8:8 */
+ u32 ext_reset : 1; /* bits 9:9 */
+ u32 cfg_pcie_0_clken : 1; /* bits 10:10 */
+ u32 cfg_sata_clken : 1; /* bits 11:11 */
+ u32 cfg_pcie_1_clken : 1; /* bits 12:12 */
+ u32 rsrvd3 : 1;
+ u32 cfg_pcie_2_clken : 1; /* bits 14:14 */
+ u32 rsrvd4 : 2;
+ u32 ext_eth_refclk : 1; /* bits 17:17 */
+ u32 refclk_sel : 2; /* bits 19:18 */
+ u32 rsrvd5 : 7;
+ u32 l3fe_pd : 1; /* bits 27:27 */
+ u32 offload0_pd : 1; /* bits 28:28 */
+ u32 offload1_pd : 1; /* bits 29:29 */
+ u32 crypto_pd : 1; /* bits 30:30 */
+ u32 core_pd : 1; /* bits 31:31 */
+};
+
+struct GLOBAL_IO_DRIVE_CONTROL_t {
+ u32 gmac_dp : 3; /* bits 2:0 */
+ u32 gmac_dn : 3; /* bits 5:3 */
+ u32 gmac_mode : 2; /* bits 7:6 */
+ u32 gmac_ds : 1; /* bits 8:8 */
+ u32 flash_ds : 1; /* bits 9:9 */
+ u32 nu_ds : 1; /* bits 10:10 */
+ u32 ssp_ds : 1; /* bits 11:11 */
+ u32 spi_ds : 1; /* bits 12:12 */
+ u32 gpio_ds : 1; /* bits 13:13 */
+ u32 misc_ds : 1; /* bits 14:14 */
+ u32 eaxi_ds : 1; /* bits 15:15 */
+ u32 sd_ds : 8; /* bits 23:16 */
+ u32 rsrvd1 : 8;
+};
+
+struct NI_HV_GLB_INIT_DONE_t {
+ u32 rsrvd1 : 1;
+ u32 ni_init_done : 1; /* bits 1:1 */
+ u32 rsrvd2 : 30;
+};
+
+struct NI_HV_PT_PORT_GLB_CFG_t {
+ u32 speed : 1; /* bits 0:0 */
+ u32 duplex : 1; /* bits 1:1 */
+ u32 link_status : 1; /* bits 2:2 */
+ u32 link_stat_mask : 1; /* bits 3:3 */
+ u32 rsrvd1 : 7;
+ u32 power_dwn_rx : 1; /* bits 11:11 */
+ u32 power_dwn_tx : 1; /* bits 12:12 */
+ u32 tx_intf_lp_time : 1; /* bits 13:13 */
+ u32 rsrvd2 : 18;
+};
+
+#define NI_HV_GLB_INIT_DONE_OFFSET 0x004
+#define NI_HV_GLB_INTF_RST_CONFIG_OFFSET 0x008
+#define NI_HV_GLB_STATIC_CFG_OFFSET 0x00c
+
+#define NI_HV_PT_PORT_STATIC_CFG_OFFSET NI_HV_PT_BASE
+#define NI_HV_PT_PORT_GLB_CFG_OFFSET (0x4 + NI_HV_PT_BASE)
+#define NI_HV_PT_RXMAC_CFG_OFFSET (0x8 + NI_HV_PT_BASE)
+#define NI_HV_PT_TXMAC_CFG_OFFSET (0x14 + NI_HV_PT_BASE)
+
+#define NI_HV_XRAM_CPUXRAM_ADRCFG_RX_OFFSET NI_HV_XRAM_BASE
+#define NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_OFFSET (0x4 + NI_HV_XRAM_BASE)
+#define NI_HV_XRAM_CPUXRAM_CFG_OFFSET (0x8 + NI_HV_XRAM_BASE)
+#define NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET (0xc + NI_HV_XRAM_BASE)
+#define NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET (0x10 + NI_HV_XRAM_BASE)
+#define NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET (0x24 + NI_HV_XRAM_BASE)
+#define NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET (0x28 + NI_HV_XRAM_BASE)
+
+#define PER_MDIO_CFG_OFFSET 0x00
+#define PER_MDIO_ADDR_OFFSET 0x04
+#define PER_MDIO_WRDATA_OFFSET 0x08
+#define PER_MDIO_RDDATA_OFFSET 0x0C
+#define PER_MDIO_CTRL_OFFSET 0x10
+
+#define APB0_NI_HV_PT_STRIDE 160
+
+#endif /* __CORTINA_NI_H */