summaryrefslogtreecommitdiff
path: root/drivers/net/ti
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ti')
-rw-r--r--drivers/net/ti/Kconfig20
-rw-r--r--drivers/net/ti/Makefile7
-rw-r--r--drivers/net/ti/cpsw-common.c121
-rw-r--r--drivers/net/ti/cpsw.c1378
-rw-r--r--drivers/net/ti/cpsw_mdio.c203
-rw-r--r--drivers/net/ti/cpsw_mdio.h18
-rw-r--r--drivers/net/ti/davinci_emac.c901
-rw-r--r--drivers/net/ti/davinci_emac.h304
-rw-r--r--drivers/net/ti/keystone_net.c801
9 files changed, 3753 insertions, 0 deletions
diff --git a/drivers/net/ti/Kconfig b/drivers/net/ti/Kconfig
new file mode 100644
index 0000000000..82bc9f5d03
--- /dev/null
+++ b/drivers/net/ti/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+
+config DRIVER_TI_CPSW
+ bool "TI Common Platform Ethernet Switch"
+ select PHYLIB
+ help
+ This driver supports the TI three port switch gigabit ethernet
+ subsystem found in the TI SoCs.
+
+config DRIVER_TI_EMAC
+ bool "TI Davinci EMAC"
+ help
+ Support for davinci emac
+
+config DRIVER_TI_KEYSTONE_NET
+ bool "TI Keystone 2 Ethernet"
+ help
+ This driver supports the TI Keystone 2 Ethernet subsystem
diff --git a/drivers/net/ti/Makefile b/drivers/net/ti/Makefile
new file mode 100644
index 0000000000..ee3e4eb5d6
--- /dev/null
+++ b/drivers/net/ti/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+
+obj-$(CONFIG_DRIVER_TI_CPSW) += cpsw.o cpsw-common.o cpsw_mdio.o
+obj-$(CONFIG_DRIVER_TI_EMAC) += davinci_emac.o
+obj-$(CONFIG_DRIVER_TI_KEYSTONE_NET) += keystone_net.o cpsw_mdio.o
diff --git a/drivers/net/ti/cpsw-common.c b/drivers/net/ti/cpsw-common.c
new file mode 100644
index 0000000000..6c8ddbd936
--- /dev/null
+++ b/drivers/net/ti/cpsw-common.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW common - libs used across TI ethernet devices.
+ *
+ * Copyright (C) 2016, Texas Instruments, Incorporated
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <environment.h>
+#include <fdt_support.h>
+#include <asm/io.h>
+#include <cpsw.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CTRL_MAC_REG(offset, id) ((offset) + 0x8 * (id))
+
+static int davinci_emac_3517_get_macid(struct udevice *dev, u16 offset,
+ int slave, u8 *mac_addr)
+{
+ void *fdt = (void *)gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ u32 macid_lsb;
+ u32 macid_msb;
+ fdt32_t gmii = 0;
+ int syscon;
+ u32 addr;
+
+ syscon = fdtdec_lookup_phandle(fdt, node, "syscon");
+ if (syscon < 0) {
+ pr_err("Syscon offset not found\n");
+ return -ENOENT;
+ }
+
+ addr = (u32)map_physmem(fdt_translate_address(fdt, syscon, &gmii),
+ sizeof(u32), MAP_NOCACHE);
+ if (addr == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get syscon address to get mac efuse address\n");
+ return -ENOENT;
+ }
+
+ addr += CTRL_MAC_REG(offset, slave);
+
+ /* try reading mac address from efuse */
+ macid_lsb = readl(addr);
+ macid_msb = readl(addr + 4);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+
+ return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct udevice *dev, u16 offset, int slave,
+ u8 *mac_addr)
+{
+ void *fdt = (void *)gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ u32 macid_lo;
+ u32 macid_hi;
+ fdt32_t gmii = 0;
+ int syscon;
+ u32 addr;
+
+ syscon = fdtdec_lookup_phandle(fdt, node, "syscon");
+ if (syscon < 0) {
+ pr_err("Syscon offset not found\n");
+ return -ENOENT;
+ }
+
+ addr = (u32)map_physmem(fdt_translate_address(fdt, syscon, &gmii),
+ sizeof(u32), MAP_NOCACHE);
+ if (addr == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get syscon address to get mac efuse address\n");
+ return -ENOENT;
+ }
+
+ addr += CTRL_MAC_REG(offset, slave);
+
+ /* try reading mac address from efuse */
+ macid_lo = readl(addr);
+ macid_hi = readl(addr + 4);
+
+ mac_addr[5] = (macid_lo >> 8) & 0xff;
+ mac_addr[4] = macid_lo & 0xff;
+ mac_addr[3] = (macid_hi >> 24) & 0xff;
+ mac_addr[2] = (macid_hi >> 16) & 0xff;
+ mac_addr[1] = (macid_hi >> 8) & 0xff;
+ mac_addr[0] = macid_hi & 0xff;
+
+ return 0;
+}
+
+int ti_cm_get_macid(struct udevice *dev, int slave, u8 *mac_addr)
+{
+ if (of_machine_is_compatible("ti,dm8148"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am33xx"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (device_is_compatible(dev, "ti,am3517-emac"))
+ return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+ if (device_is_compatible(dev, "ti,dm816-emac"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am43"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,dra7"))
+ return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+ dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ return -ENOENT;
+}
diff --git a/drivers/net/ti/cpsw.c b/drivers/net/ti/cpsw.c
new file mode 100644
index 0000000000..f5fd02efe1
--- /dev/null
+++ b/drivers/net/ti/cpsw.c
@@ -0,0 +1,1378 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW Ethernet Switch Driver
+ *
+ * Copyright (C) 2010-2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <common.h>
+#include <command.h>
+#include <net.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <net.h>
+#include <netdev.h>
+#include <cpsw.h>
+#include <linux/errno.h>
+#include <asm/gpio.h>
+#include <asm/io.h>
+#include <phy.h>
+#include <asm/arch/cpu.h>
+#include <dm.h>
+#include <fdt_support.h>
+
+#include "cpsw_mdio.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define BITMASK(bits) (BIT(bits) - 1)
+#define NUM_DESCS (PKTBUFSRX * 2)
+#define PKT_MIN 60
+#define PKT_MAX (1500 + 14 + 4 + 4)
+#define CLEAR_BIT 1
+#define GIGABITEN BIT(7)
+#define FULLDUPLEXEN BIT(0)
+#define MIIEN BIT(15)
+
+/* reg offset */
+#define CPSW_HOST_PORT_OFFSET 0x108
+#define CPSW_SLAVE0_OFFSET 0x208
+#define CPSW_SLAVE1_OFFSET 0x308
+#define CPSW_SLAVE_SIZE 0x100
+#define CPSW_CPDMA_OFFSET 0x800
+#define CPSW_HW_STATS 0x900
+#define CPSW_STATERAM_OFFSET 0xa00
+#define CPSW_CPTS_OFFSET 0xc00
+#define CPSW_ALE_OFFSET 0xd00
+#define CPSW_SLIVER0_OFFSET 0xd80
+#define CPSW_SLIVER1_OFFSET 0xdc0
+#define CPSW_BD_OFFSET 0x2000
+#define CPSW_MDIO_DIV 0xff
+
+#define AM335X_GMII_SEL_OFFSET 0x630
+
+/* DMA Registers */
+#define CPDMA_TXCONTROL 0x004
+#define CPDMA_RXCONTROL 0x014
+#define CPDMA_SOFTRESET 0x01c
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP_VER1 0x100
+#define CPDMA_TXHDP_VER2 0x200
+#define CPDMA_RXHDP_VER1 0x120
+#define CPDMA_RXHDP_VER2 0x220
+#define CPDMA_TXCP_VER1 0x140
+#define CPDMA_TXCP_VER2 0x240
+#define CPDMA_RXCP_VER1 0x160
+#define CPDMA_RXCP_VER2 0x260
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define CPDMA_TIMEOUT 100 /* msecs */
+
+struct cpsw_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+};
+
+struct cpsw_slave_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+#ifdef CONFIG_AM33XX
+ u32 gap_thresh;
+#elif defined(CONFIG_TI814X)
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+#endif
+ u32 sa_lo;
+ u32 sa_hi;
+};
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_sliver_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 __reserved_1;
+ u32 rx_pri_map;
+};
+
+#define ALE_ENTRY_BITS 68
+#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
+/* ALE Registers */
+#define ALE_CONTROL 0x08
+#define ALE_UNKNOWNVLAN 0x18
+#define ALE_TABLE_CONTROL 0x20
+#define ALE_TABLE 0x34
+#define ALE_PORTCTL 0x40
+
+#define ALE_TABLE_WRITE BIT(31)
+
+#define ALE_TYPE_FREE 0
+#define ALE_TYPE_ADDR 1
+#define ALE_TYPE_VLAN 2
+#define ALE_TYPE_VLAN_ADDR 3
+
+#define ALE_UCAST_PERSISTANT 0
+#define ALE_UCAST_UNTOUCHED 1
+#define ALE_UCAST_OUI 2
+#define ALE_UCAST_TOUCHED 3
+
+#define ALE_MCAST_FWD 0
+#define ALE_MCAST_BLOCK_LEARN_FWD 1
+#define ALE_MCAST_FWD_LEARN 2
+#define ALE_MCAST_FWD_2 3
+
+enum cpsw_ale_port_state {
+ ALE_PORT_STATE_DISABLE = 0x00,
+ ALE_PORT_STATE_BLOCK = 0x01,
+ ALE_PORT_STATE_LEARN = 0x02,
+ ALE_PORT_STATE_FORWARD = 0x03,
+};
+
+/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+#define ALE_SECURE 1
+#define ALE_BLOCKED 2
+
+struct cpsw_slave {
+ struct cpsw_slave_regs *regs;
+ struct cpsw_sliver_regs *sliver;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+};
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_chan {
+ struct cpdma_desc *head, *tail;
+ void *hdp, *cp, *rxfree;
+};
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+
+#define GMII_SEL_MODE_MASK 0x3
+
+#define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
+
+#define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
+
+#define for_active_slave(slave, priv) \
+ slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
+#define for_each_slave(slave, priv) \
+ for (slave = (priv)->slaves; slave != (priv)->slaves + \
+ (priv)->data.slaves; slave++)
+
+struct cpsw_priv {
+#ifdef CONFIG_DM_ETH
+ struct udevice *dev;
+#else
+ struct eth_device *dev;
+#endif
+ struct cpsw_platform_data data;
+ int host_port;
+
+ struct cpsw_regs *regs;
+ void *dma_regs;
+ struct cpsw_host_regs *host_port_regs;
+ void *ale_regs;
+
+ struct cpdma_desc *descs;
+ struct cpdma_desc *desc_free;
+ struct cpdma_chan rx_chan, tx_chan;
+
+ struct cpsw_slave *slaves;
+ struct phy_device *phydev;
+ struct mii_dev *bus;
+
+ u32 phy_mask;
+};
+
+static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+{
+ int idx;
+
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ return (ale_entry[idx] >> start) & BITMASK(bits);
+}
+
+static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+{
+ int idx;
+
+ value &= BITMASK(bits);
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ ale_entry[idx] &= ~(BITMASK(bits) << start);
+ ale_entry[idx] |= (value << start);
+}
+
+#define DEFINE_ALE_FIELD(name, start, bits) \
+static inline int cpsw_ale_get_##name(u32 *ale_entry) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
+DEFINE_ALE_FIELD(entry_type, 60, 2)
+DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD(port_mask, 66, 3)
+DEFINE_ALE_FIELD(ucast_type, 62, 2)
+DEFINE_ALE_FIELD(port_num, 66, 2)
+DEFINE_ALE_FIELD(blocked, 65, 1)
+DEFINE_ALE_FIELD(secure, 64, 1)
+DEFINE_ALE_FIELD(mcast, 40, 1)
+
+/* The MAC address field in the ALE entry cannot be macroized as above */
+static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
+}
+
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
+}
+
+static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
+{
+ int i;
+
+ __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
+
+ return idx;
+}
+
+static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
+{
+ int i;
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
+
+ __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
+
+ return idx;
+}
+
+static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data.ale_entries; idx++) {
+ u8 entry_addr[6];
+
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ cpsw_ale_get_addr(ale_entry, entry_addr);
+ if (memcmp(entry_addr, addr, 6) == 0)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_match_free(struct cpsw_priv *priv)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data.ale_entries; idx++) {
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type == ALE_TYPE_FREE)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data.ale_entries; idx++) {
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ if (cpsw_ale_get_mcast(ale_entry))
+ continue;
+ type = cpsw_ale_get_ucast_type(ale_entry);
+ if (type != ALE_UCAST_PERSISTANT &&
+ type != ALE_UCAST_OUI)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
+ int port, int flags)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_port_num(ale_entry, port);
+
+ idx = cpsw_ale_match_addr(priv, addr);
+ if (idx < 0)
+ idx = cpsw_ale_match_free(priv);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(priv);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(priv, idx, ale_entry);
+ return 0;
+}
+
+static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
+ int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx, mask;
+
+ idx = cpsw_ale_match_addr(priv, addr);
+ if (idx >= 0)
+ cpsw_ale_read(priv, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
+
+ mask = cpsw_ale_get_port_mask(ale_entry);
+ port_mask |= mask;
+ cpsw_ale_set_port_mask(ale_entry, port_mask);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(priv);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(priv);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(priv, idx, ale_entry);
+ return 0;
+}
+
+static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
+{
+ u32 tmp, mask = BIT(bit);
+
+ tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
+ tmp &= ~mask;
+ tmp |= val ? mask : 0;
+ __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
+}
+
+#define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
+#define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
+#define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
+
+static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
+ int val)
+{
+ int offset = ALE_PORTCTL + 4 * port;
+ u32 tmp, mask = 0x3;
+
+ tmp = __raw_readl(priv->ale_regs + offset);
+ tmp &= ~mask;
+ tmp |= val & mask;
+ __raw_writel(tmp, priv->ale_regs + offset);
+}
+
+/* Set a self-clearing bit in a register, and wait for it to clear */
+static inline void setbit_and_wait_for_clear32(void *addr)
+{
+ __raw_writel(CLEAR_BIT, addr);
+ while (__raw_readl(addr) & CLEAR_BIT)
+ ;
+}
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void cpsw_set_slave_mac(struct cpsw_slave *slave,
+ struct cpsw_priv *priv)
+{
+#ifdef CONFIG_DM_ETH
+ struct eth_pdata *pdata = dev_get_platdata(priv->dev);
+
+ writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
+ writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
+#else
+ __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
+ __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
+#endif
+}
+
+static int cpsw_slave_update_link(struct cpsw_slave *slave,
+ struct cpsw_priv *priv, int *link)
+{
+ struct phy_device *phy;
+ u32 mac_control = 0;
+ int ret = -ENODEV;
+
+ phy = priv->phydev;
+ if (!phy)
+ goto out;
+
+ ret = phy_startup(phy);
+ if (ret)
+ goto out;
+
+ if (link)
+ *link = phy->link;
+
+ if (phy->link) { /* link up */
+ mac_control = priv->data.mac_control;
+ if (phy->speed == 1000)
+ mac_control |= GIGABITEN;
+ if (phy->duplex == DUPLEX_FULL)
+ mac_control |= FULLDUPLEXEN;
+ if (phy->speed == 100)
+ mac_control |= MIIEN;
+ }
+
+ if (mac_control == slave->mac_control)
+ goto out;
+
+ if (mac_control) {
+ printf("link up on port %d, speed %d, %s duplex\n",
+ slave->slave_num, phy->speed,
+ (phy->duplex == DUPLEX_FULL) ? "full" : "half");
+ } else {
+ printf("link down on port %d\n", slave->slave_num);
+ }
+
+ __raw_writel(mac_control, &slave->sliver->mac_control);
+ slave->mac_control = mac_control;
+
+out:
+ return ret;
+}
+
+static int cpsw_update_link(struct cpsw_priv *priv)
+{
+ int ret = -ENODEV;
+ struct cpsw_slave *slave;
+
+ for_active_slave(slave, priv)
+ ret = cpsw_slave_update_link(slave, priv, NULL);
+
+ return ret;
+}
+
+static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ u32 slave_port;
+
+ setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
+
+ /* setup priority mapping */
+ __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
+ __raw_writel(0x33221100, &slave->regs->tx_pri_map);
+
+ /* setup max packet size, and mac address */
+ __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ /* enable forwarding */
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
+
+ priv->phy_mask |= 1 << slave->data->phy_addr;
+}
+
+static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
+{
+ struct cpdma_desc *desc = priv->desc_free;
+
+ if (desc)
+ priv->desc_free = desc_read_ptr(desc, hw_next);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
+{
+ if (desc) {
+ desc_write(desc, hw_next, priv->desc_free);
+ priv->desc_free = desc;
+ }
+}
+
+static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
+ void *buffer, int len)
+{
+ struct cpdma_desc *desc, *prev;
+ u32 mode;
+
+ desc = cpdma_desc_alloc(priv);
+ if (!desc)
+ return -ENOMEM;
+
+ if (len < PKT_MIN)
+ len = PKT_MIN;
+
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ if (!chan->head) {
+ /* simple case - first packet enqueued */
+ chan->head = desc;
+ chan->tail = desc;
+ chan_write(chan, hdp, desc);
+ goto done;
+ }
+
+ /* not the first packet - enqueue at the tail */
+ prev = chan->tail;
+ desc_write(prev, hw_next, desc);
+ chan->tail = desc;
+
+ /* next check if EOQ has been triggered already */
+ if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
+ chan_write(chan, hdp, desc);
+
+done:
+ if (chan->rxfree)
+ chan_write(chan, rxfree, 1);
+ return 0;
+}
+
+static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
+ void **buffer, int *len)
+{
+ struct cpdma_desc *desc = chan->head;
+ u32 status;
+
+ if (!desc)
+ return -ENOENT;
+
+ status = desc_read(desc, hw_mode);
+
+ if (len)
+ *len = status & 0x7ff;
+
+ if (buffer)
+ *buffer = desc_read_ptr(desc, sw_buffer);
+
+ if (status & CPDMA_DESC_OWNER) {
+ if (chan_read(chan, hdp) == 0) {
+ if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
+ chan_write(chan, hdp, desc);
+ }
+
+ return -EBUSY;
+ }
+
+ chan->head = desc_read_ptr(desc, hw_next);
+ chan_write(chan, cp, desc);
+
+ cpdma_desc_free(priv, desc);
+ return 0;
+}
+
+static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
+{
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* soft reset the controller and initialize priv */
+ setbit_and_wait_for_clear32(&priv->regs->soft_reset);
+
+ /* initialize and reset the address lookup engine */
+ cpsw_ale_enable(priv, 1);
+ cpsw_ale_clear(priv, 1);
+ cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
+
+ /* setup host port priority mapping */
+ __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
+ __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation and enable statistics on all ports */
+ __raw_writel(0, &priv->regs->ptype);
+
+ /* enable statistics collection only on the host port */
+ __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
+ __raw_writel(0x7, &priv->regs->stat_port_en);
+
+ cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
+ cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
+
+ for_active_slave(slave, priv)
+ cpsw_slave_init(slave, priv);
+
+ ret = cpsw_update_link(priv);
+ if (ret)
+ goto out;
+
+ /* init descriptor pool */
+ for (i = 0; i < NUM_DESCS; i++) {
+ desc_write(&priv->descs[i], hw_next,
+ (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
+ }
+ priv->desc_free = &priv->descs[0];
+
+ /* initialize channels */
+ if (priv->data.version == CPSW_CTRL_VERSION_2) {
+ memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
+ priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
+ priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
+ priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
+
+ memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
+ priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
+ priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
+ } else {
+ memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
+ priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
+ priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
+ priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
+
+ memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
+ priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
+ priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
+ }
+
+ /* clear dma state */
+ setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
+
+ if (priv->data.version == CPSW_CTRL_VERSION_2) {
+ for (i = 0; i < priv->data.channels; i++) {
+ __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
+ * i);
+ }
+ } else {
+ for (i = 0; i < priv->data.channels; i++) {
+ __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
+ * i);
+
+ }
+ }
+
+ __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
+ __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
+
+ /* submit rx descs */
+ for (i = 0; i < PKTBUFSRX; i++) {
+ ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
+ PKTSIZE);
+ if (ret < 0) {
+ printf("error %d submitting rx desc\n", ret);
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int cpsw_reap_completed_packets(struct cpsw_priv *priv)
+{
+ int timeout = CPDMA_TIMEOUT;
+
+ /* reap completed packets */
+ while (timeout-- &&
+ (cpdma_process(priv, &priv->tx_chan, NULL, NULL) >= 0))
+ ;
+
+ return timeout;
+}
+
+static void _cpsw_halt(struct cpsw_priv *priv)
+{
+ cpsw_reap_completed_packets(priv);
+
+ writel(0, priv->dma_regs + CPDMA_TXCONTROL);
+ writel(0, priv->dma_regs + CPDMA_RXCONTROL);
+
+ /* soft reset the controller and initialize priv */
+ setbit_and_wait_for_clear32(&priv->regs->soft_reset);
+
+ /* clear dma state */
+ setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
+
+}
+
+static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
+{
+ int timeout;
+
+ flush_dcache_range((unsigned long)packet,
+ (unsigned long)packet + ALIGN(length, PKTALIGN));
+
+ timeout = cpsw_reap_completed_packets(priv);
+ if (timeout == -1) {
+ printf("cpdma_process timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return cpdma_submit(priv, &priv->tx_chan, packet, length);
+}
+
+static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
+{
+ void *buffer;
+ int len;
+ int ret;
+
+ ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
+ if (ret < 0)
+ return ret;
+
+ invalidate_dcache_range((unsigned long)buffer,
+ (unsigned long)buffer + PKTSIZE_ALIGN);
+ *pkt = buffer;
+
+ return len;
+}
+
+static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
+ struct cpsw_priv *priv)
+{
+ void *regs = priv->regs;
+ struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
+ slave->slave_num = slave_num;
+ slave->data = data;
+ slave->regs = regs + data->slave_reg_ofs;
+ slave->sliver = regs + data->sliver_reg_ofs;
+}
+
+static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
+{
+ struct phy_device *phydev;
+ u32 supported = PHY_GBIT_FEATURES;
+
+ phydev = phy_connect(priv->bus,
+ slave->data->phy_addr,
+ priv->dev,
+ slave->data->phy_if);
+
+ if (!phydev)
+ return -1;
+
+ phydev->supported &= supported;
+ phydev->advertising = phydev->supported;
+
+#ifdef CONFIG_DM_ETH
+ if (slave->data->phy_of_handle)
+ phydev->node = offset_to_ofnode(slave->data->phy_of_handle);
+#endif
+
+ priv->phydev = phydev;
+ phy_config(phydev);
+
+ return 1;
+}
+
+static void cpsw_phy_addr_update(struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = &priv->data;
+ u16 alive = cpsw_mdio_get_alive(priv->bus);
+ int active = data->active_slave;
+ int new_addr = ffs(alive) - 1;
+
+ /*
+ * If there is only one phy alive and its address does not match
+ * that of active slave, then phy address can safely be updated.
+ */
+ if (hweight16(alive) == 1 &&
+ data->slave_data[active].phy_addr != new_addr) {
+ printf("Updated phy address for CPSW#%d, old: %d, new: %d\n",
+ active, data->slave_data[active].phy_addr, new_addr);
+ data->slave_data[active].phy_addr = new_addr;
+ }
+}
+
+int _cpsw_register(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave;
+ struct cpsw_platform_data *data = &priv->data;
+ void *regs = (void *)data->cpsw_base;
+
+ priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
+ if (!priv->slaves) {
+ return -ENOMEM;
+ }
+
+ priv->host_port = data->host_port_num;
+ priv->regs = regs;
+ priv->host_port_regs = regs + data->host_port_reg_ofs;
+ priv->dma_regs = regs + data->cpdma_reg_ofs;
+ priv->ale_regs = regs + data->ale_reg_ofs;
+ priv->descs = (void *)regs + data->bd_ram_ofs;
+
+ int idx = 0;
+
+ for_each_slave(slave, priv) {
+ cpsw_slave_setup(slave, idx, priv);
+ idx = idx + 1;
+ }
+
+ priv->bus = cpsw_mdio_init(priv->dev->name, data->mdio_base, 0, 0);
+ if (!priv->bus)
+ return -EFAULT;
+
+ cpsw_phy_addr_update(priv);
+
+ for_active_slave(slave, priv)
+ cpsw_phy_init(priv, slave);
+
+ return 0;
+}
+
+#ifndef CONFIG_DM_ETH
+static int cpsw_init(struct eth_device *dev, bd_t *bis)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_init(priv, dev->enetaddr);
+}
+
+static void cpsw_halt(struct eth_device *dev)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_halt(priv);
+}
+
+static int cpsw_send(struct eth_device *dev, void *packet, int length)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_send(priv, packet, length);
+}
+
+static int cpsw_recv(struct eth_device *dev)
+{
+ struct cpsw_priv *priv = dev->priv;
+ uchar *pkt = NULL;
+ int len;
+
+ len = _cpsw_recv(priv, &pkt);
+
+ if (len > 0) {
+ net_process_received_packet(pkt, len);
+ cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
+ }
+
+ return len;
+}
+
+int cpsw_register(struct cpsw_platform_data *data)
+{
+ struct cpsw_priv *priv;
+ struct eth_device *dev;
+ int ret;
+
+ dev = calloc(sizeof(*dev), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ priv = calloc(sizeof(*priv), 1);
+ if (!priv) {
+ free(dev);
+ return -ENOMEM;
+ }
+
+ priv->dev = dev;
+ priv->data = *data;
+
+ strcpy(dev->name, "cpsw");
+ dev->iobase = 0;
+ dev->init = cpsw_init;
+ dev->halt = cpsw_halt;
+ dev->send = cpsw_send;
+ dev->recv = cpsw_recv;
+ dev->priv = priv;
+
+ eth_register(dev);
+
+ ret = _cpsw_register(priv);
+ if (ret < 0) {
+ eth_unregister(dev);
+ free(dev);
+ free(priv);
+ return ret;
+ }
+
+ return 1;
+}
+#else
+static int cpsw_eth_start(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_init(priv, pdata->enetaddr);
+}
+
+static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_send(priv, packet, length);
+}
+
+static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_recv(priv, packetp);
+}
+
+static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
+ int length)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
+}
+
+static void cpsw_eth_stop(struct udevice *dev)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_halt(priv);
+}
+
+
+static int cpsw_eth_probe(struct udevice *dev)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ priv->dev = dev;
+
+ return _cpsw_register(priv);
+}
+
+static const struct eth_ops cpsw_eth_ops = {
+ .start = cpsw_eth_start,
+ .send = cpsw_eth_send,
+ .recv = cpsw_eth_recv,
+ .free_pkt = cpsw_eth_free_pkt,
+ .stop = cpsw_eth_stop,
+};
+
+static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
+{
+ return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
+ false);
+}
+
+static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
+ phy_interface_t phy_mode)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+ bool rgmii_id = false;
+ int slave = priv->data.active_slave;
+
+ reg = readl(priv->data.gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ rgmii_id = true;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
+ mode <<= slave * 2;
+
+ if (priv->data.rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ if (rgmii_id) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+ else
+ mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->data.gmii_sel);
+}
+
+static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
+ phy_interface_t phy_mode)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+ int slave = priv->data.active_slave;
+
+ reg = readl(priv->data.gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->data.rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->data.gmii_sel);
+}
+
+static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
+ phy_interface_t phy_mode)
+{
+ if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
+ cpsw_gmii_sel_am3352(priv, phy_mode);
+ if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
+ cpsw_gmii_sel_am3352(priv, phy_mode);
+ else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
+ cpsw_gmii_sel_dra7xx(priv, phy_mode);
+}
+
+static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ struct cpsw_priv *priv = dev_get_priv(dev);
+ struct gpio_desc *mode_gpios;
+ const char *phy_mode;
+ const char *phy_sel_compat = NULL;
+ const void *fdt = gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ int subnode;
+ int slave_index = 0;
+ int active_slave;
+ int num_mode_gpios;
+ int ret;
+
+ pdata->iobase = devfdt_get_addr(dev);
+ priv->data.version = CPSW_CTRL_VERSION_2;
+ priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
+ priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
+ priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
+ priv->data.mdio_div = CPSW_MDIO_DIV;
+ priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
+
+ pdata->phy_interface = -1;
+
+ priv->data.cpsw_base = pdata->iobase;
+ priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
+ if (priv->data.channels <= 0) {
+ printf("error: cpdma_channels not found in dt\n");
+ return -ENOENT;
+ }
+
+ priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
+ if (priv->data.slaves <= 0) {
+ printf("error: slaves not found in dt\n");
+ return -ENOENT;
+ }
+ priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
+ priv->data.slaves);
+
+ priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
+ if (priv->data.ale_entries <= 0) {
+ printf("error: ale_entries not found in dt\n");
+ return -ENOENT;
+ }
+
+ priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
+ if (priv->data.bd_ram_ofs <= 0) {
+ printf("error: bd_ram_size not found in dt\n");
+ return -ENOENT;
+ }
+
+ priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
+ if (priv->data.mac_control <= 0) {
+ printf("error: ale_entries not found in dt\n");
+ return -ENOENT;
+ }
+
+ num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
+ if (num_mode_gpios > 0) {
+ mode_gpios = malloc(sizeof(struct gpio_desc) *
+ num_mode_gpios);
+ gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
+ num_mode_gpios, GPIOD_IS_OUT);
+ free(mode_gpios);
+ }
+
+ active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
+ priv->data.active_slave = active_slave;
+
+ fdt_for_each_subnode(subnode, fdt, node) {
+ int len;
+ const char *name;
+
+ name = fdt_get_name(fdt, subnode, &len);
+ if (!strncmp(name, "mdio", 4)) {
+ u32 mdio_base;
+
+ mdio_base = cpsw_get_addr_by_node(fdt, subnode);
+ if (mdio_base == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get MDIO address space\n");
+ return -ENOENT;
+ }
+ priv->data.mdio_base = mdio_base;
+ }
+
+ if (!strncmp(name, "slave", 5)) {
+ u32 phy_id[2];
+
+ if (slave_index >= priv->data.slaves)
+ continue;
+ phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
+ if (phy_mode)
+ priv->data.slave_data[slave_index].phy_if =
+ phy_get_interface_by_name(phy_mode);
+
+ priv->data.slave_data[slave_index].phy_of_handle =
+ fdtdec_lookup_phandle(fdt, subnode,
+ "phy-handle");
+
+ if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
+ priv->data.slave_data[slave_index].phy_addr =
+ fdtdec_get_int(gd->fdt_blob,
+ priv->data.slave_data[slave_index].phy_of_handle,
+ "reg", -1);
+ } else {
+ fdtdec_get_int_array(fdt, subnode, "phy_id",
+ phy_id, 2);
+ priv->data.slave_data[slave_index].phy_addr =
+ phy_id[1];
+ }
+ slave_index++;
+ }
+
+ if (!strncmp(name, "cpsw-phy-sel", 12)) {
+ priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
+ subnode);
+
+ if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get gmii_sel reg address\n");
+ return -ENOENT;
+ }
+
+ if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
+ NULL))
+ priv->data.rmii_clock_external = true;
+
+ phy_sel_compat = fdt_getprop(fdt, subnode, "compatible",
+ NULL);
+ if (!phy_sel_compat) {
+ pr_err("Not able to get gmii_sel compatible\n");
+ return -ENOENT;
+ }
+ }
+ }
+
+ priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
+ priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
+
+ if (priv->data.slaves == 2) {
+ priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
+ priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
+ }
+
+ ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
+ if (ret < 0) {
+ pr_err("cpsw read efuse mac failed\n");
+ return ret;
+ }
+
+ pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
+ if (pdata->phy_interface == -1) {
+ debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
+ return -EINVAL;
+ }
+
+ /* Select phy interface in control module */
+ cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface);
+
+ return 0;
+}
+
+int cpsw_get_slave_phy_addr(struct udevice *dev, int slave)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+ struct cpsw_platform_data *data = &priv->data;
+
+ return data->slave_data[slave].phy_addr;
+}
+
+static const struct udevice_id cpsw_eth_ids[] = {
+ { .compatible = "ti,cpsw" },
+ { .compatible = "ti,am335x-cpsw" },
+ { }
+};
+
+U_BOOT_DRIVER(eth_cpsw) = {
+ .name = "eth_cpsw",
+ .id = UCLASS_ETH,
+ .of_match = cpsw_eth_ids,
+ .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
+ .probe = cpsw_eth_probe,
+ .ops = &cpsw_eth_ops,
+ .priv_auto_alloc_size = sizeof(struct cpsw_priv),
+ .platdata_auto_alloc_size = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};
+#endif /* CONFIG_DM_ETH */
diff --git a/drivers/net/ti/cpsw_mdio.c b/drivers/net/ti/cpsw_mdio.c
new file mode 100644
index 0000000000..70f547e6d7
--- /dev/null
+++ b/drivers/net/ti/cpsw_mdio.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW MDIO generic driver for TI AMxx/K2x/EMAC devices.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <miiphy.h>
+#include <wait_bit.h>
+
+struct cpsw_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_FAULT BIT(19)
+#define CONTROL_FAULT_ENABLE BIT(18)
+#define CONTROL_DIV_MASK GENMASK(15, 0)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+ u32 physel;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_PHY_REG_SHIFT (21)
+#define USERACCESS_PHY_ADDR_SHIFT (16)
+#define USERACCESS_DATA GENMASK(15, 0)
+ } user[0];
+};
+
+#define CPSW_MDIO_DIV_DEF 0xff
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define CPSW_MDIO_TIMEOUT 100 /* msecs */
+
+struct cpsw_mdio {
+ struct cpsw_mdio_regs *regs;
+ struct mii_dev *bus;
+ int div;
+};
+
+/* wait until hardware is ready for another user access */
+static int cpsw_mdio_wait_for_user_access(struct cpsw_mdio *mdio)
+{
+ return wait_for_bit_le32(&mdio->regs->user[0].access,
+ USERACCESS_GO, false,
+ CPSW_MDIO_TIMEOUT, false);
+}
+
+static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
+ int dev_addr, int phy_reg)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ int data, ret;
+ u32 reg;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+ reg = (USERACCESS_GO | USERACCESS_READ |
+ (phy_reg << USERACCESS_PHY_REG_SHIFT) |
+ (phy_id << USERACCESS_PHY_ADDR_SHIFT));
+ writel(reg, &mdio->regs->user[0].access);
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+
+ reg = readl(&mdio->regs->user[0].access);
+ data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
+ return data;
+}
+
+static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
+ int phy_reg, u16 data)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+ reg = (USERACCESS_GO | USERACCESS_WRITE |
+ (phy_reg << USERACCESS_PHY_REG_SHIFT) |
+ (phy_id << USERACCESS_PHY_ADDR_SHIFT) |
+ (data & USERACCESS_DATA));
+ writel(reg, &mdio->regs->user[0].access);
+
+ return cpsw_mdio_wait_for_user_access(mdio);
+}
+
+u32 cpsw_mdio_get_alive(struct mii_dev *bus)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 val;
+
+ val = readl(&mdio->regs->control);
+ return val & GENMASK(15, 0);
+}
+
+struct mii_dev *cpsw_mdio_init(const char *name, u32 mdio_base,
+ u32 bus_freq, int fck_freq)
+{
+ struct cpsw_mdio *cpsw_mdio;
+ int ret;
+
+ cpsw_mdio = calloc(1, sizeof(*cpsw_mdio));
+ if (!cpsw_mdio) {
+ debug("failed to alloc cpsw_mdio\n");
+ return NULL;
+ }
+
+ cpsw_mdio->bus = mdio_alloc();
+ if (!cpsw_mdio->bus) {
+ debug("failed to alloc mii bus\n");
+ free(cpsw_mdio);
+ return NULL;
+ }
+
+ cpsw_mdio->regs = (struct cpsw_mdio_regs *)mdio_base;
+
+ if (!bus_freq || !fck_freq)
+ cpsw_mdio->div = CPSW_MDIO_DIV_DEF;
+ else
+ cpsw_mdio->div = (fck_freq / bus_freq) - 1;
+ cpsw_mdio->div &= CONTROL_DIV_MASK;
+
+ /* set enable and clock divider */
+ writel(cpsw_mdio->div | CONTROL_ENABLE | CONTROL_FAULT |
+ CONTROL_FAULT_ENABLE, &cpsw_mdio->regs->control);
+ wait_for_bit_le32(&cpsw_mdio->regs->control,
+ CONTROL_IDLE, false, CPSW_MDIO_TIMEOUT, true);
+
+ /*
+ * wait for scan logic to settle:
+ * the scan time consists of (a) a large fixed component, and (b) a
+ * small component that varies with the mii bus frequency. These
+ * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
+ * silicon. Since the effect of (b) was found to be largely
+ * negligible, we keep things simple here.
+ */
+ mdelay(1);
+
+ cpsw_mdio->bus->read = cpsw_mdio_read;
+ cpsw_mdio->bus->write = cpsw_mdio_write;
+ cpsw_mdio->bus->priv = cpsw_mdio;
+ snprintf(cpsw_mdio->bus->name, sizeof(cpsw_mdio->bus->name), name);
+
+ ret = mdio_register(cpsw_mdio->bus);
+ if (ret < 0) {
+ debug("failed to register mii bus\n");
+ goto free_bus;
+ }
+
+ return cpsw_mdio->bus;
+
+free_bus:
+ mdio_free(cpsw_mdio->bus);
+ free(cpsw_mdio);
+ return NULL;
+}
+
+void cpsw_mdio_free(struct mii_dev *bus)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 reg;
+
+ /* disable mdio */
+ reg = readl(&mdio->regs->control);
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &mdio->regs->control);
+
+ mdio_unregister(bus);
+ mdio_free(bus);
+ free(mdio);
+}
diff --git a/drivers/net/ti/cpsw_mdio.h b/drivers/net/ti/cpsw_mdio.h
new file mode 100644
index 0000000000..4a76d4e5c5
--- /dev/null
+++ b/drivers/net/ti/cpsw_mdio.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * CPSW MDIO generic driver API for TI AMxx/K2x/EMAC devices.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef CPSW_MDIO_H_
+#define CPSW_MDIO_H_
+
+struct cpsw_mdio;
+
+struct mii_dev *cpsw_mdio_init(const char *name, u32 mdio_base,
+ u32 bus_freq, int fck_freq);
+void cpsw_mdio_free(struct mii_dev *bus);
+u32 cpsw_mdio_get_alive(struct mii_dev *bus);
+
+#endif /* CPSW_MDIO_H_ */
diff --git a/drivers/net/ti/davinci_emac.c b/drivers/net/ti/davinci_emac.c
new file mode 100644
index 0000000000..bb879d8d4f
--- /dev/null
+++ b/drivers/net/ti/davinci_emac.c
@@ -0,0 +1,901 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ethernet driver for TI TMS320DM644x (DaVinci) chips.
+ *
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ *
+ * Parts shamelessly stolen from TI's dm644x_emac.c. Original copyright
+ * follows:
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * dm644x_emac.c
+ *
+ * TI DaVinci (DM644X) EMAC peripheral driver source for DV-EVM
+ *
+ * Copyright (C) 2005 Texas Instruments.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * Modifications:
+ * ver. 1.0: Sep 2005, Anant Gole - Created EMAC version for uBoot.
+ * ver 1.1: Nov 2005, Anant Gole - Extended the RX logic for multiple descriptors
+ */
+#include <common.h>
+#include <command.h>
+#include <net.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <netdev.h>
+#include <linux/compiler.h>
+#include <asm/arch/emac_defs.h>
+#include <asm/io.h>
+#include "davinci_emac.h"
+
+unsigned int emac_dbg = 0;
+#define debug_emac(fmt,args...) if (emac_dbg) printf(fmt,##args)
+
+#ifdef EMAC_HW_RAM_ADDR
+static inline unsigned long BD_TO_HW(unsigned long x)
+{
+ if (x == 0)
+ return 0;
+
+ return x - EMAC_WRAPPER_RAM_ADDR + EMAC_HW_RAM_ADDR;
+}
+
+static inline unsigned long HW_TO_BD(unsigned long x)
+{
+ if (x == 0)
+ return 0;
+
+ return x - EMAC_HW_RAM_ADDR + EMAC_WRAPPER_RAM_ADDR;
+}
+#else
+#define BD_TO_HW(x) (x)
+#define HW_TO_BD(x) (x)
+#endif
+
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+#define emac_gigabit_enable(phy_addr) davinci_eth_gigabit_enable(phy_addr)
+#else
+#define emac_gigabit_enable(phy_addr) /* no gigabit to enable */
+#endif
+
+#if !defined(CONFIG_SYS_EMAC_TI_CLKDIV)
+#define CONFIG_SYS_EMAC_TI_CLKDIV ((EMAC_MDIO_BUS_FREQ / \
+ EMAC_MDIO_CLOCK_FREQ) - 1)
+#endif
+
+static void davinci_eth_mdio_enable(void);
+
+static int gen_init_phy(int phy_addr);
+static int gen_is_phy_connected(int phy_addr);
+static int gen_get_link_speed(int phy_addr);
+static int gen_auto_negotiate(int phy_addr);
+
+void eth_mdio_enable(void)
+{
+ davinci_eth_mdio_enable();
+}
+
+/* EMAC Addresses */
+static volatile emac_regs *adap_emac = (emac_regs *)EMAC_BASE_ADDR;
+static volatile ewrap_regs *adap_ewrap = (ewrap_regs *)EMAC_WRAPPER_BASE_ADDR;
+static volatile mdio_regs *adap_mdio = (mdio_regs *)EMAC_MDIO_BASE_ADDR;
+
+/* EMAC descriptors */
+static volatile emac_desc *emac_rx_desc = (emac_desc *)(EMAC_WRAPPER_RAM_ADDR + EMAC_RX_DESC_BASE);
+static volatile emac_desc *emac_tx_desc = (emac_desc *)(EMAC_WRAPPER_RAM_ADDR + EMAC_TX_DESC_BASE);
+static volatile emac_desc *emac_rx_active_head = 0;
+static volatile emac_desc *emac_rx_active_tail = 0;
+static int emac_rx_queue_active = 0;
+
+/* Receive packet buffers */
+static unsigned char emac_rx_buffers[EMAC_MAX_RX_BUFFERS * EMAC_RXBUF_SIZE]
+ __aligned(ARCH_DMA_MINALIGN);
+
+#ifndef CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT
+#define CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT 3
+#endif
+
+/* PHY address for a discovered PHY (0xff - not found) */
+static u_int8_t active_phy_addr[CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT];
+
+/* number of PHY found active */
+static u_int8_t num_phy;
+
+phy_t phy[CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT];
+
+static int davinci_eth_set_mac_addr(struct eth_device *dev)
+{
+ unsigned long mac_hi;
+ unsigned long mac_lo;
+
+ /*
+ * Set MAC Addresses & Init multicast Hash to 0 (disable any multicast
+ * receive)
+ * Using channel 0 only - other channels are disabled
+ * */
+ writel(0, &adap_emac->MACINDEX);
+ mac_hi = (dev->enetaddr[3] << 24) |
+ (dev->enetaddr[2] << 16) |
+ (dev->enetaddr[1] << 8) |
+ (dev->enetaddr[0]);
+ mac_lo = (dev->enetaddr[5] << 8) |
+ (dev->enetaddr[4]);
+
+ writel(mac_hi, &adap_emac->MACADDRHI);
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(mac_lo | EMAC_MAC_ADDR_IS_VALID | EMAC_MAC_ADDR_MATCH,
+ &adap_emac->MACADDRLO);
+#else
+ writel(mac_lo, &adap_emac->MACADDRLO);
+#endif
+
+ writel(0, &adap_emac->MACHASH1);
+ writel(0, &adap_emac->MACHASH2);
+
+ /* Set source MAC address - REQUIRED */
+ writel(mac_hi, &adap_emac->MACSRCADDRHI);
+ writel(mac_lo, &adap_emac->MACSRCADDRLO);
+
+
+ return 0;
+}
+
+static void davinci_eth_mdio_enable(void)
+{
+ u_int32_t clkdiv;
+
+ clkdiv = CONFIG_SYS_EMAC_TI_CLKDIV;
+
+ writel((clkdiv & 0xff) |
+ MDIO_CONTROL_ENABLE |
+ MDIO_CONTROL_FAULT |
+ MDIO_CONTROL_FAULT_ENABLE,
+ &adap_mdio->CONTROL);
+
+ while (readl(&adap_mdio->CONTROL) & MDIO_CONTROL_IDLE)
+ ;
+}
+
+/*
+ * Tries to find an active connected PHY. Returns 1 if address if found.
+ * If no active PHY (or more than one PHY) found returns 0.
+ * Sets active_phy_addr variable.
+ */
+static int davinci_eth_phy_detect(void)
+{
+ u_int32_t phy_act_state;
+ int i;
+ int j;
+ unsigned int count = 0;
+
+ for (i = 0; i < CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT; i++)
+ active_phy_addr[i] = 0xff;
+
+ udelay(1000);
+ phy_act_state = readl(&adap_mdio->ALIVE);
+
+ if (phy_act_state == 0)
+ return 0; /* No active PHYs */
+
+ debug_emac("davinci_eth_phy_detect(), ALIVE = 0x%08x\n", phy_act_state);
+
+ for (i = 0, j = 0; i < 32; i++)
+ if (phy_act_state & (1 << i)) {
+ count++;
+ if (count <= CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT) {
+ active_phy_addr[j++] = i;
+ } else {
+ printf("%s: to many PHYs detected.\n",
+ __func__);
+ count = 0;
+ break;
+ }
+ }
+
+ num_phy = count;
+
+ return count;
+}
+
+
+/* Read a PHY register via MDIO inteface. Returns 1 on success, 0 otherwise */
+int davinci_eth_phy_read(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t *data)
+{
+ int tmp;
+
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ writel(MDIO_USERACCESS0_GO |
+ MDIO_USERACCESS0_WRITE_READ |
+ ((reg_num & 0x1f) << 21) |
+ ((phy_addr & 0x1f) << 16),
+ &adap_mdio->USERACCESS0);
+
+ /* Wait for command to complete */
+ while ((tmp = readl(&adap_mdio->USERACCESS0)) & MDIO_USERACCESS0_GO)
+ ;
+
+ if (tmp & MDIO_USERACCESS0_ACK) {
+ *data = tmp & 0xffff;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write to a PHY register via MDIO inteface. Blocks until operation is complete. */
+int davinci_eth_phy_write(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t data)
+{
+
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ writel(MDIO_USERACCESS0_GO |
+ MDIO_USERACCESS0_WRITE_WRITE |
+ ((reg_num & 0x1f) << 21) |
+ ((phy_addr & 0x1f) << 16) |
+ (data & 0xffff),
+ &adap_mdio->USERACCESS0);
+
+ /* Wait for command to complete */
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ return 1;
+}
+
+/* PHY functions for a generic PHY */
+static int gen_init_phy(int phy_addr)
+{
+ int ret = 1;
+
+ if (gen_get_link_speed(phy_addr)) {
+ /* Try another time */
+ ret = gen_get_link_speed(phy_addr);
+ }
+
+ return(ret);
+}
+
+static int gen_is_phy_connected(int phy_addr)
+{
+ u_int16_t dummy;
+
+ return davinci_eth_phy_read(phy_addr, MII_PHYSID1, &dummy);
+}
+
+static int get_active_phy(void)
+{
+ int i;
+
+ for (i = 0; i < num_phy; i++)
+ if (phy[i].get_link_speed(active_phy_addr[i]))
+ return i;
+
+ return -1; /* Return error if no link */
+}
+
+static int gen_get_link_speed(int phy_addr)
+{
+ u_int16_t tmp;
+
+ if (davinci_eth_phy_read(phy_addr, MII_STATUS_REG, &tmp) &&
+ (tmp & 0x04)) {
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ davinci_eth_phy_read(phy_addr, MII_LPA, &tmp);
+
+ /* Speed doesn't matter, there is no setting for it in EMAC. */
+ if (tmp & (LPA_100FULL | LPA_10FULL)) {
+ /* set EMAC for Full Duplex */
+ writel(EMAC_MACCONTROL_MIIEN_ENABLE |
+ EMAC_MACCONTROL_FULLDUPLEX_ENABLE,
+ &adap_emac->MACCONTROL);
+ } else {
+ /*set EMAC for Half Duplex */
+ writel(EMAC_MACCONTROL_MIIEN_ENABLE,
+ &adap_emac->MACCONTROL);
+ }
+
+ if (tmp & (LPA_100FULL | LPA_100HALF))
+ writel(readl(&adap_emac->MACCONTROL) |
+ EMAC_MACCONTROL_RMIISPEED_100,
+ &adap_emac->MACCONTROL);
+ else
+ writel(readl(&adap_emac->MACCONTROL) &
+ ~EMAC_MACCONTROL_RMIISPEED_100,
+ &adap_emac->MACCONTROL);
+#endif
+ return(1);
+ }
+
+ return(0);
+}
+
+static int gen_auto_negotiate(int phy_addr)
+{
+ u_int16_t tmp;
+ u_int16_t val;
+ unsigned long cntr = 0;
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMCR, &tmp))
+ return 0;
+
+ val = tmp | BMCR_FULLDPLX | BMCR_ANENABLE |
+ BMCR_SPEED100;
+ davinci_eth_phy_write(phy_addr, MII_BMCR, val);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_ADVERTISE, &val))
+ return 0;
+
+ val |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL |
+ ADVERTISE_10HALF);
+ davinci_eth_phy_write(phy_addr, MII_ADVERTISE, val);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMCR, &tmp))
+ return(0);
+
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+ davinci_eth_phy_read(phy_addr, MII_CTRL1000, &val);
+ val |= PHY_1000BTCR_1000FD;
+ val &= ~PHY_1000BTCR_1000HD;
+ davinci_eth_phy_write(phy_addr, MII_CTRL1000, val);
+ davinci_eth_phy_read(phy_addr, MII_CTRL1000, &val);
+#endif
+
+ /* Restart Auto_negotiation */
+ tmp |= BMCR_ANRESTART;
+ davinci_eth_phy_write(phy_addr, MII_BMCR, tmp);
+
+ /*check AutoNegotiate complete */
+ do {
+ udelay(40000);
+ if (!davinci_eth_phy_read(phy_addr, MII_BMSR, &tmp))
+ return 0;
+
+ if (tmp & BMSR_ANEGCOMPLETE)
+ break;
+
+ cntr++;
+ } while (cntr < 200);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMSR, &tmp))
+ return(0);
+
+ if (!(tmp & BMSR_ANEGCOMPLETE))
+ return(0);
+
+ return(gen_get_link_speed(phy_addr));
+}
+/* End of generic PHY functions */
+
+
+#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
+static int davinci_mii_phy_read(struct mii_dev *bus, int addr, int devad,
+ int reg)
+{
+ unsigned short value = 0;
+ int retval = davinci_eth_phy_read(addr, reg, &value);
+
+ return retval ? value : -EIO;
+}
+
+static int davinci_mii_phy_write(struct mii_dev *bus, int addr, int devad,
+ int reg, u16 value)
+{
+ return davinci_eth_phy_write(addr, reg, value) ? 0 : 1;
+}
+#endif
+
+static void __attribute__((unused)) davinci_eth_gigabit_enable(int phy_addr)
+{
+ u_int16_t data;
+
+ if (davinci_eth_phy_read(phy_addr, 0, &data)) {
+ if (data & (1 << 6)) { /* speed selection MSB */
+ /*
+ * Check if link detected is giga-bit
+ * If Gigabit mode detected, enable gigbit in MAC
+ */
+ writel(readl(&adap_emac->MACCONTROL) |
+ EMAC_MACCONTROL_GIGFORCE |
+ EMAC_MACCONTROL_GIGABIT_ENABLE,
+ &adap_emac->MACCONTROL);
+ }
+ }
+}
+
+/* Eth device open */
+static int davinci_eth_open(struct eth_device *dev, bd_t *bis)
+{
+ dv_reg_p addr;
+ u_int32_t clkdiv, cnt, mac_control;
+ uint16_t __maybe_unused lpa_val;
+ volatile emac_desc *rx_desc;
+ int index;
+
+ debug_emac("+ emac_open\n");
+
+ /* Reset EMAC module and disable interrupts in wrapper */
+ writel(1, &adap_emac->SOFTRESET);
+ while (readl(&adap_emac->SOFTRESET) != 0)
+ ;
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(1, &adap_ewrap->softrst);
+ while (readl(&adap_ewrap->softrst) != 0)
+ ;
+#else
+ writel(0, &adap_ewrap->EWCTL);
+ for (cnt = 0; cnt < 5; cnt++) {
+ clkdiv = readl(&adap_ewrap->EWCTL);
+ }
+#endif
+
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ adap_ewrap->c0rxen = adap_ewrap->c1rxen = adap_ewrap->c2rxen = 0;
+ adap_ewrap->c0txen = adap_ewrap->c1txen = adap_ewrap->c2txen = 0;
+ adap_ewrap->c0miscen = adap_ewrap->c1miscen = adap_ewrap->c2miscen = 0;
+#endif
+ rx_desc = emac_rx_desc;
+
+ writel(1, &adap_emac->TXCONTROL);
+ writel(1, &adap_emac->RXCONTROL);
+
+ davinci_eth_set_mac_addr(dev);
+
+ /* Set DMA 8 TX / 8 RX Head pointers to 0 */
+ addr = &adap_emac->TX0HDP;
+ for (cnt = 0; cnt < 8; cnt++)
+ writel(0, addr++);
+
+ addr = &adap_emac->RX0HDP;
+ for (cnt = 0; cnt < 8; cnt++)
+ writel(0, addr++);
+
+ /* Clear Statistics (do this before setting MacControl register) */
+ addr = &adap_emac->RXGOODFRAMES;
+ for(cnt = 0; cnt < EMAC_NUM_STATS; cnt++)
+ writel(0, addr++);
+
+ /* No multicast addressing */
+ writel(0, &adap_emac->MACHASH1);
+ writel(0, &adap_emac->MACHASH2);
+
+ /* Create RX queue and set receive process in place */
+ emac_rx_active_head = emac_rx_desc;
+ for (cnt = 0; cnt < EMAC_MAX_RX_BUFFERS; cnt++) {
+ rx_desc->next = BD_TO_HW((u_int32_t)(rx_desc + 1));
+ rx_desc->buffer = &emac_rx_buffers[cnt * EMAC_RXBUF_SIZE];
+ rx_desc->buff_off_len = EMAC_MAX_ETHERNET_PKT_SIZE;
+ rx_desc->pkt_flag_len = EMAC_CPPI_OWNERSHIP_BIT;
+ rx_desc++;
+ }
+
+ /* Finalize the rx desc list */
+ rx_desc--;
+ rx_desc->next = 0;
+ emac_rx_active_tail = rx_desc;
+ emac_rx_queue_active = 1;
+
+ /* Enable TX/RX */
+ writel(EMAC_MAX_ETHERNET_PKT_SIZE, &adap_emac->RXMAXLEN);
+ writel(0, &adap_emac->RXBUFFEROFFSET);
+
+ /*
+ * No fancy configs - Use this for promiscous debug
+ * - EMAC_RXMBPENABLE_RXCAFEN_ENABLE
+ */
+ writel(EMAC_RXMBPENABLE_RXBROADEN, &adap_emac->RXMBPENABLE);
+
+ /* Enable ch 0 only */
+ writel(1, &adap_emac->RXUNICASTSET);
+
+ /* Init MDIO & get link state */
+ clkdiv = CONFIG_SYS_EMAC_TI_CLKDIV;
+ writel((clkdiv & 0xff) | MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULT,
+ &adap_mdio->CONTROL);
+
+ /* We need to wait for MDIO to start */
+ udelay(1000);
+
+ index = get_active_phy();
+ if (index == -1)
+ return(0);
+
+ /* Enable MII interface */
+ mac_control = EMAC_MACCONTROL_MIIEN_ENABLE;
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+ davinci_eth_phy_read(active_phy_addr[index], MII_STAT1000, &lpa_val);
+ if (lpa_val & PHY_1000BTSR_1000FD) {
+ debug_emac("eth_open : gigabit negotiated\n");
+ mac_control |= EMAC_MACCONTROL_FULLDUPLEX_ENABLE;
+ mac_control |= EMAC_MACCONTROL_GIGABIT_ENABLE;
+ }
+#endif
+
+ davinci_eth_phy_read(active_phy_addr[index], MII_LPA, &lpa_val);
+ if (lpa_val & (LPA_100FULL | LPA_10FULL))
+ /* set EMAC for Full Duplex */
+ mac_control |= EMAC_MACCONTROL_FULLDUPLEX_ENABLE;
+#if defined(CONFIG_SOC_DA8XX) || \
+ (defined(CONFIG_OMAP34XX) && defined(CONFIG_DRIVER_TI_EMAC_USE_RMII))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_100;
+#endif
+ writel(mac_control, &adap_emac->MACCONTROL);
+ /* Start receive process */
+ writel(BD_TO_HW((u_int32_t)emac_rx_desc), &adap_emac->RX0HDP);
+
+ debug_emac("- emac_open\n");
+
+ return(1);
+}
+
+/* EMAC Channel Teardown */
+static void davinci_eth_ch_teardown(int ch)
+{
+ dv_reg dly = 0xff;
+ dv_reg cnt;
+
+ debug_emac("+ emac_ch_teardown\n");
+
+ if (ch == EMAC_CH_TX) {
+ /* Init TX channel teardown */
+ writel(0, &adap_emac->TXTEARDOWN);
+ do {
+ /*
+ * Wait here for Tx teardown completion interrupt to
+ * occur. Note: A task delay can be called here to pend
+ * rather than occupying CPU cycles - anyway it has
+ * been found that teardown takes very few cpu cycles
+ * and does not affect functionality
+ */
+ dly--;
+ udelay(1);
+ if (dly == 0)
+ break;
+ cnt = readl(&adap_emac->TX0CP);
+ } while (cnt != 0xfffffffc);
+ writel(cnt, &adap_emac->TX0CP);
+ writel(0, &adap_emac->TX0HDP);
+ } else {
+ /* Init RX channel teardown */
+ writel(0, &adap_emac->RXTEARDOWN);
+ do {
+ /*
+ * Wait here for Rx teardown completion interrupt to
+ * occur. Note: A task delay can be called here to pend
+ * rather than occupying CPU cycles - anyway it has
+ * been found that teardown takes very few cpu cycles
+ * and does not affect functionality
+ */
+ dly--;
+ udelay(1);
+ if (dly == 0)
+ break;
+ cnt = readl(&adap_emac->RX0CP);
+ } while (cnt != 0xfffffffc);
+ writel(cnt, &adap_emac->RX0CP);
+ writel(0, &adap_emac->RX0HDP);
+ }
+
+ debug_emac("- emac_ch_teardown\n");
+}
+
+/* Eth device close */
+static void davinci_eth_close(struct eth_device *dev)
+{
+ debug_emac("+ emac_close\n");
+
+ davinci_eth_ch_teardown(EMAC_CH_TX); /* TX Channel teardown */
+ if (readl(&adap_emac->RXCONTROL) & 1)
+ davinci_eth_ch_teardown(EMAC_CH_RX); /* RX Channel teardown */
+
+ /* Reset EMAC module and disable interrupts in wrapper */
+ writel(1, &adap_emac->SOFTRESET);
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(1, &adap_ewrap->softrst);
+#else
+ writel(0, &adap_ewrap->EWCTL);
+#endif
+
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ adap_ewrap->c0rxen = adap_ewrap->c1rxen = adap_ewrap->c2rxen = 0;
+ adap_ewrap->c0txen = adap_ewrap->c1txen = adap_ewrap->c2txen = 0;
+ adap_ewrap->c0miscen = adap_ewrap->c1miscen = adap_ewrap->c2miscen = 0;
+#endif
+ debug_emac("- emac_close\n");
+}
+
+static int tx_send_loop = 0;
+
+/*
+ * This function sends a single packet on the network and returns
+ * positive number (number of bytes transmitted) or negative for error
+ */
+static int davinci_eth_send_packet (struct eth_device *dev,
+ void *packet, int length)
+{
+ int ret_status = -1;
+ int index;
+ tx_send_loop = 0;
+
+ index = get_active_phy();
+ if (index == -1) {
+ printf(" WARN: emac_send_packet: No link\n");
+ return (ret_status);
+ }
+
+ /* Check packet size and if < EMAC_MIN_ETHERNET_PKT_SIZE, pad it up */
+ if (length < EMAC_MIN_ETHERNET_PKT_SIZE) {
+ length = EMAC_MIN_ETHERNET_PKT_SIZE;
+ }
+
+ /* Populate the TX descriptor */
+ emac_tx_desc->next = 0;
+ emac_tx_desc->buffer = (u_int8_t *) packet;
+ emac_tx_desc->buff_off_len = (length & 0xffff);
+ emac_tx_desc->pkt_flag_len = ((length & 0xffff) |
+ EMAC_CPPI_SOP_BIT |
+ EMAC_CPPI_OWNERSHIP_BIT |
+ EMAC_CPPI_EOP_BIT);
+
+ flush_dcache_range((unsigned long)packet,
+ (unsigned long)packet + ALIGN(length, PKTALIGN));
+
+ /* Send the packet */
+ writel(BD_TO_HW((unsigned long)emac_tx_desc), &adap_emac->TX0HDP);
+
+ /* Wait for packet to complete or link down */
+ while (1) {
+ if (!phy[index].get_link_speed(active_phy_addr[index])) {
+ davinci_eth_ch_teardown (EMAC_CH_TX);
+ return (ret_status);
+ }
+
+ if (readl(&adap_emac->TXINTSTATRAW) & 0x01) {
+ ret_status = length;
+ break;
+ }
+ tx_send_loop++;
+ }
+
+ return (ret_status);
+}
+
+/*
+ * This function handles receipt of a packet from the network
+ */
+static int davinci_eth_rcv_packet (struct eth_device *dev)
+{
+ volatile emac_desc *rx_curr_desc;
+ volatile emac_desc *curr_desc;
+ volatile emac_desc *tail_desc;
+ int status, ret = -1;
+
+ rx_curr_desc = emac_rx_active_head;
+ if (!rx_curr_desc)
+ return 0;
+ status = rx_curr_desc->pkt_flag_len;
+ if ((status & EMAC_CPPI_OWNERSHIP_BIT) == 0) {
+ if (status & EMAC_CPPI_RX_ERROR_FRAME) {
+ /* Error in packet - discard it and requeue desc */
+ printf ("WARN: emac_rcv_pkt: Error in packet\n");
+ } else {
+ unsigned long tmp = (unsigned long)rx_curr_desc->buffer;
+ unsigned short len =
+ rx_curr_desc->buff_off_len & 0xffff;
+
+ invalidate_dcache_range(tmp, tmp + ALIGN(len, PKTALIGN));
+ net_process_received_packet(rx_curr_desc->buffer, len);
+ ret = len;
+ }
+
+ /* Ack received packet descriptor */
+ writel(BD_TO_HW((ulong)rx_curr_desc), &adap_emac->RX0CP);
+ curr_desc = rx_curr_desc;
+ emac_rx_active_head =
+ (volatile emac_desc *) (HW_TO_BD(rx_curr_desc->next));
+
+ if (status & EMAC_CPPI_EOQ_BIT) {
+ if (emac_rx_active_head) {
+ writel(BD_TO_HW((ulong)emac_rx_active_head),
+ &adap_emac->RX0HDP);
+ } else {
+ emac_rx_queue_active = 0;
+ printf ("INFO:emac_rcv_packet: RX Queue not active\n");
+ }
+ }
+
+ /* Recycle RX descriptor */
+ rx_curr_desc->buff_off_len = EMAC_MAX_ETHERNET_PKT_SIZE;
+ rx_curr_desc->pkt_flag_len = EMAC_CPPI_OWNERSHIP_BIT;
+ rx_curr_desc->next = 0;
+
+ if (emac_rx_active_head == 0) {
+ printf ("INFO: emac_rcv_pkt: active queue head = 0\n");
+ emac_rx_active_head = curr_desc;
+ emac_rx_active_tail = curr_desc;
+ if (emac_rx_queue_active != 0) {
+ writel(BD_TO_HW((ulong)emac_rx_active_head),
+ &adap_emac->RX0HDP);
+ printf ("INFO: emac_rcv_pkt: active queue head = 0, HDP fired\n");
+ emac_rx_queue_active = 1;
+ }
+ } else {
+ tail_desc = emac_rx_active_tail;
+ emac_rx_active_tail = curr_desc;
+ tail_desc->next = BD_TO_HW((ulong) curr_desc);
+ status = tail_desc->pkt_flag_len;
+ if (status & EMAC_CPPI_EOQ_BIT) {
+ writel(BD_TO_HW((ulong)curr_desc),
+ &adap_emac->RX0HDP);
+ status &= ~EMAC_CPPI_EOQ_BIT;
+ tail_desc->pkt_flag_len = status;
+ }
+ }
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * This function initializes the emac hardware. It does NOT initialize
+ * EMAC modules power or pin multiplexors, that is done by board_init()
+ * much earlier in bootup process. Returns 1 on success, 0 otherwise.
+ */
+int davinci_emac_initialize(void)
+{
+ u_int32_t phy_id;
+ u_int16_t tmp;
+ int i;
+ int ret;
+ struct eth_device *dev;
+
+ dev = malloc(sizeof *dev);
+
+ if (dev == NULL)
+ return -1;
+
+ memset(dev, 0, sizeof *dev);
+ strcpy(dev->name, "DaVinci-EMAC");
+
+ dev->iobase = 0;
+ dev->init = davinci_eth_open;
+ dev->halt = davinci_eth_close;
+ dev->send = davinci_eth_send_packet;
+ dev->recv = davinci_eth_rcv_packet;
+ dev->write_hwaddr = davinci_eth_set_mac_addr;
+
+ eth_register(dev);
+
+ davinci_eth_mdio_enable();
+
+ /* let the EMAC detect the PHYs */
+ udelay(5000);
+
+ for (i = 0; i < 256; i++) {
+ if (readl(&adap_mdio->ALIVE))
+ break;
+ udelay(1000);
+ }
+
+ if (i >= 256) {
+ printf("No ETH PHY detected!!!\n");
+ return(0);
+ }
+
+ /* Find if PHY(s) is/are connected */
+ ret = davinci_eth_phy_detect();
+ if (!ret)
+ return(0);
+ else
+ debug_emac(" %d ETH PHY detected\n", ret);
+
+ /* Get PHY ID and initialize phy_ops for a detected PHY */
+ for (i = 0; i < num_phy; i++) {
+ if (!davinci_eth_phy_read(active_phy_addr[i], MII_PHYSID1,
+ &tmp)) {
+ active_phy_addr[i] = 0xff;
+ continue;
+ }
+
+ phy_id = (tmp << 16) & 0xffff0000;
+
+ if (!davinci_eth_phy_read(active_phy_addr[i], MII_PHYSID2,
+ &tmp)) {
+ active_phy_addr[i] = 0xff;
+ continue;
+ }
+
+ phy_id |= tmp & 0x0000ffff;
+
+ switch (phy_id) {
+#ifdef PHY_KSZ8873
+ case PHY_KSZ8873:
+ sprintf(phy[i].name, "KSZ8873 @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = ksz8873_init_phy;
+ phy[i].is_phy_connected = ksz8873_is_phy_connected;
+ phy[i].get_link_speed = ksz8873_get_link_speed;
+ phy[i].auto_negotiate = ksz8873_auto_negotiate;
+ break;
+#endif
+#ifdef PHY_LXT972
+ case PHY_LXT972:
+ sprintf(phy[i].name, "LXT972 @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = lxt972_init_phy;
+ phy[i].is_phy_connected = lxt972_is_phy_connected;
+ phy[i].get_link_speed = lxt972_get_link_speed;
+ phy[i].auto_negotiate = lxt972_auto_negotiate;
+ break;
+#endif
+#ifdef PHY_DP83848
+ case PHY_DP83848:
+ sprintf(phy[i].name, "DP83848 @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = dp83848_init_phy;
+ phy[i].is_phy_connected = dp83848_is_phy_connected;
+ phy[i].get_link_speed = dp83848_get_link_speed;
+ phy[i].auto_negotiate = dp83848_auto_negotiate;
+ break;
+#endif
+#ifdef PHY_ET1011C
+ case PHY_ET1011C:
+ sprintf(phy[i].name, "ET1011C @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = gen_init_phy;
+ phy[i].is_phy_connected = gen_is_phy_connected;
+ phy[i].get_link_speed = et1011c_get_link_speed;
+ phy[i].auto_negotiate = gen_auto_negotiate;
+ break;
+#endif
+ default:
+ sprintf(phy[i].name, "GENERIC @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = gen_init_phy;
+ phy[i].is_phy_connected = gen_is_phy_connected;
+ phy[i].get_link_speed = gen_get_link_speed;
+ phy[i].auto_negotiate = gen_auto_negotiate;
+ }
+
+ debug("Ethernet PHY: %s\n", phy[i].name);
+
+ int retval;
+ struct mii_dev *mdiodev = mdio_alloc();
+ if (!mdiodev)
+ return -ENOMEM;
+ strncpy(mdiodev->name, phy[i].name, MDIO_NAME_LEN);
+ mdiodev->read = davinci_mii_phy_read;
+ mdiodev->write = davinci_mii_phy_write;
+
+ retval = mdio_register(mdiodev);
+ if (retval < 0)
+ return retval;
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+#define PHY_CONF_REG 22
+ /* Enable PHY to clock out TX_CLK */
+ davinci_eth_phy_read(active_phy_addr[i], PHY_CONF_REG, &tmp);
+ tmp |= PHY_CONF_TXCLKEN;
+ davinci_eth_phy_write(active_phy_addr[i], PHY_CONF_REG, tmp);
+ davinci_eth_phy_read(active_phy_addr[i], PHY_CONF_REG, &tmp);
+#endif
+ }
+
+#if defined(CONFIG_TI816X) || (defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM) && \
+ !defined(CONFIG_DRIVER_TI_EMAC_RMII_NO_NEGOTIATE))
+ for (i = 0; i < num_phy; i++) {
+ if (phy[i].is_phy_connected(i))
+ phy[i].auto_negotiate(i);
+ }
+#endif
+ return(1);
+}
diff --git a/drivers/net/ti/davinci_emac.h b/drivers/net/ti/davinci_emac.h
new file mode 100644
index 0000000000..695855b4d5
--- /dev/null
+++ b/drivers/net/ti/davinci_emac.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2011 Ilya Yanok, Emcraft Systems
+ *
+ * Based on: mach-davinci/emac_defs.h
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ */
+
+#ifndef _DAVINCI_EMAC_H_
+#define _DAVINCI_EMAC_H_
+/* Ethernet Min/Max packet size */
+#define EMAC_MIN_ETHERNET_PKT_SIZE 60
+#define EMAC_MAX_ETHERNET_PKT_SIZE 1518
+/* Buffer size (should be aligned on 32 byte and cache line) */
+#define EMAC_RXBUF_SIZE ALIGN(ALIGN(EMAC_MAX_ETHERNET_PKT_SIZE, 32),\
+ ARCH_DMA_MINALIGN)
+
+/* Number of RX packet buffers
+ * NOTE: Only 1 buffer supported as of now
+ */
+#define EMAC_MAX_RX_BUFFERS 10
+
+
+/***********************************************
+ ******** Internally used macros ***************
+ ***********************************************/
+
+#define EMAC_CH_TX 1
+#define EMAC_CH_RX 0
+
+/* Each descriptor occupies 4 words, lets start RX desc's at 0 and
+ * reserve space for 64 descriptors max
+ */
+#define EMAC_RX_DESC_BASE 0x0
+#define EMAC_TX_DESC_BASE 0x1000
+
+/* EMAC Teardown value */
+#define EMAC_TEARDOWN_VALUE 0xfffffffc
+
+/* MII Status Register */
+#define MII_STATUS_REG 1
+/* PHY Configuration register */
+#define PHY_CONF_TXCLKEN (1 << 5)
+
+/* Number of statistics registers */
+#define EMAC_NUM_STATS 36
+
+
+/* EMAC Descriptor */
+typedef volatile struct _emac_desc
+{
+ u_int32_t next; /* Pointer to next descriptor
+ in chain */
+ u_int8_t *buffer; /* Pointer to data buffer */
+ u_int32_t buff_off_len; /* Buffer Offset(MSW) and Length(LSW) */
+ u_int32_t pkt_flag_len; /* Packet Flags(MSW) and Length(LSW) */
+} emac_desc;
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT (0x80000000)
+#define EMAC_CPPI_EOP_BIT (0x40000000)
+#define EMAC_CPPI_OWNERSHIP_BIT (0x20000000)
+#define EMAC_CPPI_EOQ_BIT (0x10000000)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT (0x08000000)
+#define EMAC_CPPI_PASS_CRC_BIT (0x04000000)
+
+#define EMAC_CPPI_RX_ERROR_FRAME (0x03fc0000)
+
+#define EMAC_MACCONTROL_MIIEN_ENABLE (0x20)
+#define EMAC_MACCONTROL_FULLDUPLEX_ENABLE (0x1)
+#define EMAC_MACCONTROL_GIGABIT_ENABLE (1 << 7)
+#define EMAC_MACCONTROL_GIGFORCE (1 << 17)
+#define EMAC_MACCONTROL_RMIISPEED_100 (1 << 15)
+
+#define EMAC_MAC_ADDR_MATCH (1 << 19)
+#define EMAC_MAC_ADDR_IS_VALID (1 << 20)
+
+#define EMAC_RXMBPENABLE_RXCAFEN_ENABLE (0x200000)
+#define EMAC_RXMBPENABLE_RXBROADEN (0x2000)
+
+
+#define MDIO_CONTROL_IDLE (0x80000000)
+#define MDIO_CONTROL_ENABLE (0x40000000)
+#define MDIO_CONTROL_FAULT_ENABLE (0x40000)
+#define MDIO_CONTROL_FAULT (0x80000)
+#define MDIO_USERACCESS0_GO (0x80000000)
+#define MDIO_USERACCESS0_WRITE_READ (0x0)
+#define MDIO_USERACCESS0_WRITE_WRITE (0x40000000)
+#define MDIO_USERACCESS0_ACK (0x20000000)
+
+/* Ethernet MAC Registers Structure */
+typedef struct {
+ dv_reg TXIDVER;
+ dv_reg TXCONTROL;
+ dv_reg TXTEARDOWN;
+ u_int8_t RSVD0[4];
+ dv_reg RXIDVER;
+ dv_reg RXCONTROL;
+ dv_reg RXTEARDOWN;
+ u_int8_t RSVD1[100];
+ dv_reg TXINTSTATRAW;
+ dv_reg TXINTSTATMASKED;
+ dv_reg TXINTMASKSET;
+ dv_reg TXINTMASKCLEAR;
+ dv_reg MACINVECTOR;
+ u_int8_t RSVD2[12];
+ dv_reg RXINTSTATRAW;
+ dv_reg RXINTSTATMASKED;
+ dv_reg RXINTMASKSET;
+ dv_reg RXINTMASKCLEAR;
+ dv_reg MACINTSTATRAW;
+ dv_reg MACINTSTATMASKED;
+ dv_reg MACINTMASKSET;
+ dv_reg MACINTMASKCLEAR;
+ u_int8_t RSVD3[64];
+ dv_reg RXMBPENABLE;
+ dv_reg RXUNICASTSET;
+ dv_reg RXUNICASTCLEAR;
+ dv_reg RXMAXLEN;
+ dv_reg RXBUFFEROFFSET;
+ dv_reg RXFILTERLOWTHRESH;
+ u_int8_t RSVD4[8];
+ dv_reg RX0FLOWTHRESH;
+ dv_reg RX1FLOWTHRESH;
+ dv_reg RX2FLOWTHRESH;
+ dv_reg RX3FLOWTHRESH;
+ dv_reg RX4FLOWTHRESH;
+ dv_reg RX5FLOWTHRESH;
+ dv_reg RX6FLOWTHRESH;
+ dv_reg RX7FLOWTHRESH;
+ dv_reg RX0FREEBUFFER;
+ dv_reg RX1FREEBUFFER;
+ dv_reg RX2FREEBUFFER;
+ dv_reg RX3FREEBUFFER;
+ dv_reg RX4FREEBUFFER;
+ dv_reg RX5FREEBUFFER;
+ dv_reg RX6FREEBUFFER;
+ dv_reg RX7FREEBUFFER;
+ dv_reg MACCONTROL;
+ dv_reg MACSTATUS;
+ dv_reg EMCONTROL;
+ dv_reg FIFOCONTROL;
+ dv_reg MACCONFIG;
+ dv_reg SOFTRESET;
+ u_int8_t RSVD5[88];
+ dv_reg MACSRCADDRLO;
+ dv_reg MACSRCADDRHI;
+ dv_reg MACHASH1;
+ dv_reg MACHASH2;
+ dv_reg BOFFTEST;
+ dv_reg TPACETEST;
+ dv_reg RXPAUSE;
+ dv_reg TXPAUSE;
+ u_int8_t RSVD6[16];
+ dv_reg RXGOODFRAMES;
+ dv_reg RXBCASTFRAMES;
+ dv_reg RXMCASTFRAMES;
+ dv_reg RXPAUSEFRAMES;
+ dv_reg RXCRCERRORS;
+ dv_reg RXALIGNCODEERRORS;
+ dv_reg RXOVERSIZED;
+ dv_reg RXJABBER;
+ dv_reg RXUNDERSIZED;
+ dv_reg RXFRAGMENTS;
+ dv_reg RXFILTERED;
+ dv_reg RXQOSFILTERED;
+ dv_reg RXOCTETS;
+ dv_reg TXGOODFRAMES;
+ dv_reg TXBCASTFRAMES;
+ dv_reg TXMCASTFRAMES;
+ dv_reg TXPAUSEFRAMES;
+ dv_reg TXDEFERRED;
+ dv_reg TXCOLLISION;
+ dv_reg TXSINGLECOLL;
+ dv_reg TXMULTICOLL;
+ dv_reg TXEXCESSIVECOLL;
+ dv_reg TXLATECOLL;
+ dv_reg TXUNDERRUN;
+ dv_reg TXCARRIERSENSE;
+ dv_reg TXOCTETS;
+ dv_reg FRAME64;
+ dv_reg FRAME65T127;
+ dv_reg FRAME128T255;
+ dv_reg FRAME256T511;
+ dv_reg FRAME512T1023;
+ dv_reg FRAME1024TUP;
+ dv_reg NETOCTETS;
+ dv_reg RXSOFOVERRUNS;
+ dv_reg RXMOFOVERRUNS;
+ dv_reg RXDMAOVERRUNS;
+ u_int8_t RSVD7[624];
+ dv_reg MACADDRLO;
+ dv_reg MACADDRHI;
+ dv_reg MACINDEX;
+ u_int8_t RSVD8[244];
+ dv_reg TX0HDP;
+ dv_reg TX1HDP;
+ dv_reg TX2HDP;
+ dv_reg TX3HDP;
+ dv_reg TX4HDP;
+ dv_reg TX5HDP;
+ dv_reg TX6HDP;
+ dv_reg TX7HDP;
+ dv_reg RX0HDP;
+ dv_reg RX1HDP;
+ dv_reg RX2HDP;
+ dv_reg RX3HDP;
+ dv_reg RX4HDP;
+ dv_reg RX5HDP;
+ dv_reg RX6HDP;
+ dv_reg RX7HDP;
+ dv_reg TX0CP;
+ dv_reg TX1CP;
+ dv_reg TX2CP;
+ dv_reg TX3CP;
+ dv_reg TX4CP;
+ dv_reg TX5CP;
+ dv_reg TX6CP;
+ dv_reg TX7CP;
+ dv_reg RX0CP;
+ dv_reg RX1CP;
+ dv_reg RX2CP;
+ dv_reg RX3CP;
+ dv_reg RX4CP;
+ dv_reg RX5CP;
+ dv_reg RX6CP;
+ dv_reg RX7CP;
+} emac_regs;
+
+/* EMAC Wrapper Registers Structure */
+typedef struct {
+#ifdef DAVINCI_EMAC_VERSION2
+ dv_reg idver;
+ dv_reg softrst;
+ dv_reg emctrl;
+ dv_reg c0rxthreshen;
+ dv_reg c0rxen;
+ dv_reg c0txen;
+ dv_reg c0miscen;
+ dv_reg c1rxthreshen;
+ dv_reg c1rxen;
+ dv_reg c1txen;
+ dv_reg c1miscen;
+ dv_reg c2rxthreshen;
+ dv_reg c2rxen;
+ dv_reg c2txen;
+ dv_reg c2miscen;
+ dv_reg c0rxthreshstat;
+ dv_reg c0rxstat;
+ dv_reg c0txstat;
+ dv_reg c0miscstat;
+ dv_reg c1rxthreshstat;
+ dv_reg c1rxstat;
+ dv_reg c1txstat;
+ dv_reg c1miscstat;
+ dv_reg c2rxthreshstat;
+ dv_reg c2rxstat;
+ dv_reg c2txstat;
+ dv_reg c2miscstat;
+ dv_reg c0rximax;
+ dv_reg c0tximax;
+ dv_reg c1rximax;
+ dv_reg c1tximax;
+ dv_reg c2rximax;
+ dv_reg c2tximax;
+#else
+ u_int8_t RSVD0[4100];
+ dv_reg EWCTL;
+ dv_reg EWINTTCNT;
+#endif
+} ewrap_regs;
+
+/* EMAC MDIO Registers Structure */
+typedef struct {
+ dv_reg VERSION;
+ dv_reg CONTROL;
+ dv_reg ALIVE;
+ dv_reg LINK;
+ dv_reg LINKINTRAW;
+ dv_reg LINKINTMASKED;
+ u_int8_t RSVD0[8];
+ dv_reg USERINTRAW;
+ dv_reg USERINTMASKED;
+ dv_reg USERINTMASKSET;
+ dv_reg USERINTMASKCLEAR;
+ u_int8_t RSVD1[80];
+ dv_reg USERACCESS0;
+ dv_reg USERPHYSEL0;
+ dv_reg USERACCESS1;
+ dv_reg USERPHYSEL1;
+} mdio_regs;
+
+int davinci_eth_phy_read(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t *data);
+int davinci_eth_phy_write(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t data);
+
+typedef struct {
+ char name[64];
+ int (*init)(int phy_addr);
+ int (*is_phy_connected)(int phy_addr);
+ int (*get_link_speed)(int phy_addr);
+ int (*auto_negotiate)(int phy_addr);
+} phy_t;
+
+#endif /* _DAVINCI_EMAC_H_ */
diff --git a/drivers/net/ti/keystone_net.c b/drivers/net/ti/keystone_net.c
new file mode 100644
index 0000000000..a3ba91cc3f
--- /dev/null
+++ b/drivers/net/ti/keystone_net.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ethernet driver for TI K2HK EVM.
+ *
+ * (C) Copyright 2012-2014
+ * Texas Instruments Incorporated, <www.ti.com>
+ */
+#include <common.h>
+#include <command.h>
+#include <console.h>
+
+#include <dm.h>
+#include <dm/lists.h>
+
+#include <net.h>
+#include <phy.h>
+#include <errno.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <asm/ti-common/keystone_nav.h>
+#include <asm/ti-common/keystone_net.h>
+#include <asm/ti-common/keystone_serdes.h>
+#include <asm/arch/psc_defs.h>
+
+#include "cpsw_mdio.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#ifdef KEYSTONE2_EMAC_GIG_ENABLE
+#define emac_gigabit_enable(x) keystone2_eth_gigabit_enable(x)
+#else
+#define emac_gigabit_enable(x) /* no gigabit to enable */
+#endif
+
+#define RX_BUFF_NUMS 24
+#define RX_BUFF_LEN 1520
+#define MAX_SIZE_STREAM_BUFFER RX_BUFF_LEN
+#define SGMII_ANEG_TIMEOUT 4000
+
+static u8 rx_buffs[RX_BUFF_NUMS * RX_BUFF_LEN] __aligned(16);
+
+enum link_type {
+ LINK_TYPE_SGMII_MAC_TO_MAC_AUTO = 0,
+ LINK_TYPE_SGMII_MAC_TO_PHY_MODE = 1,
+ LINK_TYPE_SGMII_MAC_TO_MAC_FORCED_MODE = 2,
+ LINK_TYPE_SGMII_MAC_TO_FIBRE_MODE = 3,
+ LINK_TYPE_SGMII_MAC_TO_PHY_NO_MDIO_MODE = 4,
+ LINK_TYPE_RGMII_LINK_MAC_PHY = 5,
+ LINK_TYPE_RGMII_LINK_MAC_MAC_FORCED = 6,
+ LINK_TYPE_RGMII_LINK_MAC_PHY_NO_MDIO = 7,
+ LINK_TYPE_10G_MAC_TO_PHY_MODE = 10,
+ LINK_TYPE_10G_MAC_TO_MAC_FORCED_MODE = 11,
+};
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+#ifdef CONFIG_KSNET_NETCP_V1_0
+
+#define EMAC_EMACSW_BASE_OFS 0x90800
+#define EMAC_EMACSW_PORT_BASE_OFS (EMAC_EMACSW_BASE_OFS + 0x60)
+
+/* CPSW Switch slave registers */
+#define CPGMACSL_REG_SA_LO 0x10
+#define CPGMACSL_REG_SA_HI 0x14
+
+#define DEVICE_EMACSW_BASE(base, x) ((base) + EMAC_EMACSW_PORT_BASE_OFS + \
+ (x) * 0x30)
+
+#elif defined(CONFIG_KSNET_NETCP_V1_5)
+
+#define EMAC_EMACSW_PORT_BASE_OFS 0x222000
+
+/* CPSW Switch slave registers */
+#define CPGMACSL_REG_SA_LO 0x308
+#define CPGMACSL_REG_SA_HI 0x30c
+
+#define DEVICE_EMACSW_BASE(base, x) ((base) + EMAC_EMACSW_PORT_BASE_OFS + \
+ (x) * 0x1000)
+
+#endif
+
+
+struct ks2_eth_priv {
+ struct udevice *dev;
+ struct phy_device *phydev;
+ struct mii_dev *mdio_bus;
+ int phy_addr;
+ phy_interface_t phy_if;
+ int sgmii_link_type;
+ void *mdio_base;
+ struct rx_buff_desc net_rx_buffs;
+ struct pktdma_cfg *netcp_pktdma;
+ void *hd;
+ int slave_port;
+ enum link_type link_type;
+ bool emac_open;
+ bool has_mdio;
+};
+
+static void __attribute__((unused))
+ keystone2_eth_gigabit_enable(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ /*
+ * Check if link detected is giga-bit
+ * If Gigabit mode detected, enable gigbit in MAC
+ */
+ if (priv->has_mdio) {
+ if (priv->phydev->speed != 1000)
+ return;
+ }
+
+ writel(readl(DEVICE_EMACSL_BASE(priv->slave_port - 1) +
+ CPGMACSL_REG_CTL) |
+ EMAC_MACCONTROL_GIGFORCE | EMAC_MACCONTROL_GIGABIT_ENABLE,
+ DEVICE_EMACSL_BASE(priv->slave_port - 1) + CPGMACSL_REG_CTL);
+}
+
+#ifdef CONFIG_SOC_K2G
+int keystone_rgmii_config(struct phy_device *phy_dev)
+{
+ unsigned int i, status;
+
+ i = 0;
+ do {
+ if (i > SGMII_ANEG_TIMEOUT) {
+ puts(" TIMEOUT !\n");
+ phy_dev->link = 0;
+ return 0;
+ }
+
+ if (ctrlc()) {
+ puts("user interrupt!\n");
+ phy_dev->link = 0;
+ return -EINTR;
+ }
+
+ if ((i++ % 500) == 0)
+ printf(".");
+
+ udelay(1000); /* 1 ms */
+ status = readl(RGMII_STATUS_REG);
+ } while (!(status & RGMII_REG_STATUS_LINK));
+
+ puts(" done\n");
+
+ return 0;
+}
+#else
+int keystone_sgmii_config(struct phy_device *phy_dev, int port, int interface)
+{
+ unsigned int i, status, mask;
+ unsigned int mr_adv_ability, control;
+
+ switch (interface) {
+ case SGMII_LINK_MAC_MAC_AUTONEG:
+ mr_adv_ability = (SGMII_REG_MR_ADV_ENABLE |
+ SGMII_REG_MR_ADV_LINK |
+ SGMII_REG_MR_ADV_FULL_DUPLEX |
+ SGMII_REG_MR_ADV_GIG_MODE);
+ control = (SGMII_REG_CONTROL_MASTER |
+ SGMII_REG_CONTROL_AUTONEG);
+
+ break;
+ case SGMII_LINK_MAC_PHY:
+ case SGMII_LINK_MAC_PHY_FORCED:
+ mr_adv_ability = SGMII_REG_MR_ADV_ENABLE;
+ control = SGMII_REG_CONTROL_AUTONEG;
+
+ break;
+ case SGMII_LINK_MAC_MAC_FORCED:
+ mr_adv_ability = (SGMII_REG_MR_ADV_ENABLE |
+ SGMII_REG_MR_ADV_LINK |
+ SGMII_REG_MR_ADV_FULL_DUPLEX |
+ SGMII_REG_MR_ADV_GIG_MODE);
+ control = SGMII_REG_CONTROL_MASTER;
+
+ break;
+ case SGMII_LINK_MAC_FIBER:
+ mr_adv_ability = 0x20;
+ control = SGMII_REG_CONTROL_AUTONEG;
+
+ break;
+ default:
+ mr_adv_ability = SGMII_REG_MR_ADV_ENABLE;
+ control = SGMII_REG_CONTROL_AUTONEG;
+ }
+
+ __raw_writel(0, SGMII_CTL_REG(port));
+
+ /*
+ * Wait for the SerDes pll to lock,
+ * but don't trap if lock is never read
+ */
+ for (i = 0; i < 1000; i++) {
+ udelay(2000);
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LOCK) != 0)
+ break;
+ }
+
+ __raw_writel(mr_adv_ability, SGMII_MRADV_REG(port));
+ __raw_writel(control, SGMII_CTL_REG(port));
+
+
+ mask = SGMII_REG_STATUS_LINK;
+
+ if (control & SGMII_REG_CONTROL_AUTONEG)
+ mask |= SGMII_REG_STATUS_AUTONEG;
+
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ if ((status & mask) == mask)
+ return 0;
+
+ printf("\n%s Waiting for SGMII auto negotiation to complete",
+ phy_dev->dev->name);
+ while ((status & mask) != mask) {
+ /*
+ * Timeout reached ?
+ */
+ if (i > SGMII_ANEG_TIMEOUT) {
+ puts(" TIMEOUT !\n");
+ phy_dev->link = 0;
+ return 0;
+ }
+
+ if (ctrlc()) {
+ puts("user interrupt!\n");
+ phy_dev->link = 0;
+ return -EINTR;
+ }
+
+ if ((i++ % 500) == 0)
+ printf(".");
+
+ udelay(1000); /* 1 ms */
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ }
+ puts(" done\n");
+
+ return 0;
+}
+#endif
+
+int mac_sl_reset(u32 port)
+{
+ u32 i, v;
+
+ if (port >= DEVICE_N_GMACSL_PORTS)
+ return GMACSL_RET_INVALID_PORT;
+
+ /* Set the soft reset bit */
+ writel(CPGMAC_REG_RESET_VAL_RESET,
+ DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+
+ /* Wait for the bit to clear */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+ if ((v & CPGMAC_REG_RESET_VAL_RESET_MASK) !=
+ CPGMAC_REG_RESET_VAL_RESET)
+ return GMACSL_RET_OK;
+ }
+
+ /* Timeout on the reset */
+ return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+int mac_sl_config(u_int16_t port, struct mac_sl_cfg *cfg)
+{
+ u32 v, i;
+ int ret = GMACSL_RET_OK;
+
+ if (port >= DEVICE_N_GMACSL_PORTS)
+ return GMACSL_RET_INVALID_PORT;
+
+ if (cfg->max_rx_len > CPGMAC_REG_MAXLEN_LEN) {
+ cfg->max_rx_len = CPGMAC_REG_MAXLEN_LEN;
+ ret = GMACSL_RET_WARN_MAXLEN_TOO_BIG;
+ }
+
+ /* Must wait if the device is undergoing reset */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+ if ((v & CPGMAC_REG_RESET_VAL_RESET_MASK) !=
+ CPGMAC_REG_RESET_VAL_RESET)
+ break;
+ }
+
+ if (i == DEVICE_EMACSL_RESET_POLL_COUNT)
+ return GMACSL_RET_CONFIG_FAIL_RESET_ACTIVE;
+
+ writel(cfg->max_rx_len, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_MAXLEN);
+ writel(cfg->ctl, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_CTL);
+
+#ifndef CONFIG_SOC_K2HK
+ /* Map RX packet flow priority to 0 */
+ writel(0, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RX_PRI_MAP);
+#endif
+
+ return ret;
+}
+
+int ethss_config(u32 ctl, u32 max_pkt_size)
+{
+ u32 i;
+
+ /* Max length register */
+ writel(max_pkt_size, DEVICE_CPSW_BASE + CPSW_REG_MAXLEN);
+
+ /* Control register */
+ writel(ctl, DEVICE_CPSW_BASE + CPSW_REG_CTL);
+
+ /* All statistics enabled by default */
+ writel(CPSW_REG_VAL_STAT_ENABLE_ALL,
+ DEVICE_CPSW_BASE + CPSW_REG_STAT_PORT_EN);
+
+ /* Reset and enable the ALE */
+ writel(CPSW_REG_VAL_ALE_CTL_RESET_AND_ENABLE |
+ CPSW_REG_VAL_ALE_CTL_BYPASS,
+ DEVICE_CPSW_BASE + CPSW_REG_ALE_CONTROL);
+
+ /* All ports put into forward mode */
+ for (i = 0; i < DEVICE_CPSW_NUM_PORTS; i++)
+ writel(CPSW_REG_VAL_PORTCTL_FORWARD_MODE,
+ DEVICE_CPSW_BASE + CPSW_REG_ALE_PORTCTL(i));
+
+ return 0;
+}
+
+int ethss_start(void)
+{
+ int i;
+ struct mac_sl_cfg cfg;
+
+ cfg.max_rx_len = MAX_SIZE_STREAM_BUFFER;
+ cfg.ctl = GMACSL_ENABLE | GMACSL_RX_ENABLE_EXT_CTL;
+
+ for (i = 0; i < DEVICE_N_GMACSL_PORTS; i++) {
+ mac_sl_reset(i);
+ mac_sl_config(i, &cfg);
+ }
+
+ return 0;
+}
+
+int ethss_stop(void)
+{
+ int i;
+
+ for (i = 0; i < DEVICE_N_GMACSL_PORTS; i++)
+ mac_sl_reset(i);
+
+ return 0;
+}
+
+struct ks2_serdes ks2_serdes_sgmii_156p25mhz = {
+ .clk = SERDES_CLOCK_156P25M,
+ .rate = SERDES_RATE_5G,
+ .rate_mode = SERDES_QUARTER_RATE,
+ .intf = SERDES_PHY_SGMII,
+ .loopback = 0,
+};
+
+#ifndef CONFIG_SOC_K2G
+static void keystone2_net_serdes_setup(void)
+{
+ ks2_serdes_init(CONFIG_KSNET_SERDES_SGMII_BASE,
+ &ks2_serdes_sgmii_156p25mhz,
+ CONFIG_KSNET_SERDES_LANES_PER_SGMII);
+
+#if defined(CONFIG_SOC_K2E) || defined(CONFIG_SOC_K2L)
+ ks2_serdes_init(CONFIG_KSNET_SERDES_SGMII2_BASE,
+ &ks2_serdes_sgmii_156p25mhz,
+ CONFIG_KSNET_SERDES_LANES_PER_SGMII);
+#endif
+
+ /* wait till setup */
+ udelay(5000);
+}
+#endif
+
+static int ks2_eth_start(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+#ifdef CONFIG_SOC_K2G
+ keystone_rgmii_config(priv->phydev);
+#else
+ keystone_sgmii_config(priv->phydev, priv->slave_port - 1,
+ priv->sgmii_link_type);
+#endif
+
+ udelay(10000);
+
+ /* On chip switch configuration */
+ ethss_config(target_get_switch_ctl(), SWITCH_MAX_PKT_SIZE);
+
+ qm_init();
+
+ if (ksnav_init(priv->netcp_pktdma, &priv->net_rx_buffs)) {
+ pr_err("ksnav_init failed\n");
+ goto err_knav_init;
+ }
+
+ /*
+ * Streaming switch configuration. If not present this
+ * statement is defined to void in target.h.
+ * If present this is usually defined to a series of register writes
+ */
+ hw_config_streaming_switch();
+
+ if (priv->has_mdio) {
+ phy_startup(priv->phydev);
+ if (priv->phydev->link == 0) {
+ pr_err("phy startup failed\n");
+ goto err_phy_start;
+ }
+ }
+
+ emac_gigabit_enable(dev);
+
+ ethss_start();
+
+ priv->emac_open = true;
+
+ return 0;
+
+err_phy_start:
+ ksnav_close(priv->netcp_pktdma);
+err_knav_init:
+ qm_close();
+
+ return -EFAULT;
+}
+
+static int ks2_eth_send(struct udevice *dev, void *packet, int length)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ genphy_update_link(priv->phydev);
+ if (priv->phydev->link == 0)
+ return -1;
+
+ if (length < EMAC_MIN_ETHERNET_PKT_SIZE)
+ length = EMAC_MIN_ETHERNET_PKT_SIZE;
+
+ return ksnav_send(priv->netcp_pktdma, (u32 *)packet,
+ length, (priv->slave_port) << 16);
+}
+
+static int ks2_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ int pkt_size;
+ u32 *pkt = NULL;
+
+ priv->hd = ksnav_recv(priv->netcp_pktdma, &pkt, &pkt_size);
+ if (priv->hd == NULL)
+ return -EAGAIN;
+
+ *packetp = (uchar *)pkt;
+
+ return pkt_size;
+}
+
+static int ks2_eth_free_pkt(struct udevice *dev, uchar *packet,
+ int length)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ ksnav_release_rxhd(priv->netcp_pktdma, priv->hd);
+
+ return 0;
+}
+
+static void ks2_eth_stop(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ if (!priv->emac_open)
+ return;
+ ethss_stop();
+
+ ksnav_close(priv->netcp_pktdma);
+ qm_close();
+ phy_shutdown(priv->phydev);
+ priv->emac_open = false;
+}
+
+int ks2_eth_read_rom_hwaddr(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ u32 maca = 0;
+ u32 macb = 0;
+
+ /* Read the e-fuse mac address */
+ if (priv->slave_port == 1) {
+ maca = __raw_readl(MAC_ID_BASE_ADDR);
+ macb = __raw_readl(MAC_ID_BASE_ADDR + 4);
+ }
+
+ pdata->enetaddr[0] = (macb >> 8) & 0xff;
+ pdata->enetaddr[1] = (macb >> 0) & 0xff;
+ pdata->enetaddr[2] = (maca >> 24) & 0xff;
+ pdata->enetaddr[3] = (maca >> 16) & 0xff;
+ pdata->enetaddr[4] = (maca >> 8) & 0xff;
+ pdata->enetaddr[5] = (maca >> 0) & 0xff;
+
+ return 0;
+}
+
+int ks2_eth_write_hwaddr(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+
+ writel(mac_hi(pdata->enetaddr),
+ DEVICE_EMACSW_BASE(pdata->iobase, priv->slave_port - 1) +
+ CPGMACSL_REG_SA_HI);
+ writel(mac_lo(pdata->enetaddr),
+ DEVICE_EMACSW_BASE(pdata->iobase, priv->slave_port - 1) +
+ CPGMACSL_REG_SA_LO);
+
+ return 0;
+}
+
+static int ks2_eth_probe(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct mii_dev *mdio_bus;
+
+ priv->dev = dev;
+ priv->emac_open = false;
+
+ /* These clock enables has to be moved to common location */
+ if (cpu_is_k2g())
+ writel(KS2_ETHERNET_RGMII, KS2_ETHERNET_CFG);
+
+ /* By default, select PA PLL clock as PA clock source */
+#ifndef CONFIG_SOC_K2G
+ if (psc_enable_module(KS2_LPSC_PA))
+ return -EACCES;
+#endif
+ if (psc_enable_module(KS2_LPSC_CPGMAC))
+ return -EACCES;
+ if (psc_enable_module(KS2_LPSC_CRYPTO))
+ return -EACCES;
+
+ if (cpu_is_k2e() || cpu_is_k2l())
+ pll_pa_clk_sel();
+
+ priv->net_rx_buffs.buff_ptr = rx_buffs;
+ priv->net_rx_buffs.num_buffs = RX_BUFF_NUMS;
+ priv->net_rx_buffs.buff_len = RX_BUFF_LEN;
+
+ if (priv->slave_port == 1) {
+#ifndef CONFIG_SOC_K2G
+ keystone2_net_serdes_setup();
+#endif
+ /*
+ * Register MDIO bus for slave 0 only, other slave have
+ * to re-use the same
+ */
+ mdio_bus = cpsw_mdio_init("ethernet-mdio",
+ (u32)priv->mdio_base,
+ EMAC_MDIO_CLOCK_FREQ,
+ EMAC_MDIO_BUS_FREQ);
+ if (!mdio_bus) {
+ pr_err("MDIO alloc failed\n");
+ return -ENOMEM;
+ }
+ priv->mdio_bus = mdio_bus;
+ } else {
+ /* Get the MDIO bus from slave 0 device */
+ struct ks2_eth_priv *parent_priv;
+
+ parent_priv = dev_get_priv(dev->parent);
+ priv->mdio_bus = parent_priv->mdio_bus;
+ priv->mdio_base = parent_priv->mdio_base;
+ }
+
+ priv->netcp_pktdma = &netcp_pktdma;
+
+ if (priv->has_mdio) {
+ priv->phydev = phy_connect(priv->mdio_bus, priv->phy_addr,
+ dev, priv->phy_if);
+ phy_config(priv->phydev);
+ }
+
+ return 0;
+}
+
+int ks2_eth_remove(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ cpsw_mdio_free(priv->mdio_bus);
+
+ return 0;
+}
+
+static const struct eth_ops ks2_eth_ops = {
+ .start = ks2_eth_start,
+ .send = ks2_eth_send,
+ .recv = ks2_eth_recv,
+ .free_pkt = ks2_eth_free_pkt,
+ .stop = ks2_eth_stop,
+ .read_rom_hwaddr = ks2_eth_read_rom_hwaddr,
+ .write_hwaddr = ks2_eth_write_hwaddr,
+};
+
+static int ks2_eth_bind_slaves(struct udevice *dev, int gbe, int *gbe_0)
+{
+ const void *fdt = gd->fdt_blob;
+ struct udevice *sl_dev;
+ int interfaces;
+ int sec_slave;
+ int slave;
+ int ret;
+ char *slave_name;
+
+ interfaces = fdt_subnode_offset(fdt, gbe, "interfaces");
+ fdt_for_each_subnode(slave, fdt, interfaces) {
+ int slave_no;
+
+ slave_no = fdtdec_get_int(fdt, slave, "slave-port", -ENOENT);
+ if (slave_no == -ENOENT)
+ continue;
+
+ if (slave_no == 0) {
+ /* This is the current eth device */
+ *gbe_0 = slave;
+ } else {
+ /* Slave devices to be registered */
+ slave_name = malloc(20);
+ snprintf(slave_name, 20, "netcp@slave-%d", slave_no);
+ ret = device_bind_driver_to_node(dev, "eth_ks2_sl",
+ slave_name, offset_to_ofnode(slave),
+ &sl_dev);
+ if (ret) {
+ pr_err("ks2_net - not able to bind slave interfaces\n");
+ return ret;
+ }
+ }
+ }
+
+ sec_slave = fdt_subnode_offset(fdt, gbe, "secondary-slave-ports");
+ fdt_for_each_subnode(slave, fdt, sec_slave) {
+ int slave_no;
+
+ slave_no = fdtdec_get_int(fdt, slave, "slave-port", -ENOENT);
+ if (slave_no == -ENOENT)
+ continue;
+
+ /* Slave devices to be registered */
+ slave_name = malloc(20);
+ snprintf(slave_name, 20, "netcp@slave-%d", slave_no);
+ ret = device_bind_driver_to_node(dev, "eth_ks2_sl", slave_name,
+ offset_to_ofnode(slave), &sl_dev);
+ if (ret) {
+ pr_err("ks2_net - not able to bind slave interfaces\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ks2_eth_parse_slave_interface(int netcp, int slave,
+ struct ks2_eth_priv *priv,
+ struct eth_pdata *pdata)
+{
+ const void *fdt = gd->fdt_blob;
+ int mdio;
+ int phy;
+ int dma_count;
+ u32 dma_channel[8];
+
+ priv->slave_port = fdtdec_get_int(fdt, slave, "slave-port", -1);
+ priv->net_rx_buffs.rx_flow = priv->slave_port * 8;
+
+ /* U-Boot slave port number starts with 1 instead of 0 */
+ priv->slave_port += 1;
+
+ dma_count = fdtdec_get_int_array_count(fdt, netcp,
+ "ti,navigator-dmas",
+ dma_channel, 8);
+
+ if (dma_count > (2 * priv->slave_port)) {
+ int dma_idx;
+
+ dma_idx = priv->slave_port * 2 - 1;
+ priv->net_rx_buffs.rx_flow = dma_channel[dma_idx];
+ }
+
+ priv->link_type = fdtdec_get_int(fdt, slave, "link-interface", -1);
+
+ phy = fdtdec_lookup_phandle(fdt, slave, "phy-handle");
+ if (phy >= 0) {
+ priv->phy_addr = fdtdec_get_int(fdt, phy, "reg", -1);
+
+ mdio = fdt_parent_offset(fdt, phy);
+ if (mdio < 0) {
+ pr_err("mdio dt not found\n");
+ return -ENODEV;
+ }
+ priv->mdio_base = (void *)fdtdec_get_addr(fdt, mdio, "reg");
+ }
+
+ if (priv->link_type == LINK_TYPE_SGMII_MAC_TO_PHY_MODE) {
+ priv->phy_if = PHY_INTERFACE_MODE_SGMII;
+ pdata->phy_interface = priv->phy_if;
+ priv->sgmii_link_type = SGMII_LINK_MAC_PHY;
+ priv->has_mdio = true;
+ } else if (priv->link_type == LINK_TYPE_RGMII_LINK_MAC_PHY) {
+ priv->phy_if = PHY_INTERFACE_MODE_RGMII;
+ pdata->phy_interface = priv->phy_if;
+ priv->has_mdio = true;
+ }
+
+ return 0;
+}
+
+static int ks2_sl_eth_ofdata_to_platdata(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ const void *fdt = gd->fdt_blob;
+ int slave = dev_of_offset(dev);
+ int interfaces;
+ int gbe;
+ int netcp_devices;
+ int netcp;
+
+ interfaces = fdt_parent_offset(fdt, slave);
+ gbe = fdt_parent_offset(fdt, interfaces);
+ netcp_devices = fdt_parent_offset(fdt, gbe);
+ netcp = fdt_parent_offset(fdt, netcp_devices);
+
+ ks2_eth_parse_slave_interface(netcp, slave, priv, pdata);
+
+ pdata->iobase = fdtdec_get_addr(fdt, netcp, "reg");
+
+ return 0;
+}
+
+static int ks2_eth_ofdata_to_platdata(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ const void *fdt = gd->fdt_blob;
+ int gbe_0 = -ENODEV;
+ int netcp_devices;
+ int gbe;
+
+ netcp_devices = fdt_subnode_offset(fdt, dev_of_offset(dev),
+ "netcp-devices");
+ gbe = fdt_subnode_offset(fdt, netcp_devices, "gbe");
+
+ ks2_eth_bind_slaves(dev, gbe, &gbe_0);
+
+ ks2_eth_parse_slave_interface(dev_of_offset(dev), gbe_0, priv, pdata);
+
+ pdata->iobase = devfdt_get_addr(dev);
+
+ return 0;
+}
+
+static const struct udevice_id ks2_eth_ids[] = {
+ { .compatible = "ti,netcp-1.0" },
+ { }
+};
+
+U_BOOT_DRIVER(eth_ks2_slave) = {
+ .name = "eth_ks2_sl",
+ .id = UCLASS_ETH,
+ .ofdata_to_platdata = ks2_sl_eth_ofdata_to_platdata,
+ .probe = ks2_eth_probe,
+ .remove = ks2_eth_remove,
+ .ops = &ks2_eth_ops,
+ .priv_auto_alloc_size = sizeof(struct ks2_eth_priv),
+ .platdata_auto_alloc_size = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};
+
+U_BOOT_DRIVER(eth_ks2) = {
+ .name = "eth_ks2",
+ .id = UCLASS_ETH,
+ .of_match = ks2_eth_ids,
+ .ofdata_to_platdata = ks2_eth_ofdata_to_platdata,
+ .probe = ks2_eth_probe,
+ .remove = ks2_eth_remove,
+ .ops = &ks2_eth_ops,
+ .priv_auto_alloc_size = sizeof(struct ks2_eth_priv),
+ .platdata_auto_alloc_size = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};