@@ -27,18 +27,74 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE
+ depends on (MICROBLAZE || ARCH_ZYNQ)
select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs.
-config XILINX_LL_TEMAC
- tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on (PPC || MICROBLAZE)
+#config XILINX_LL_TEMAC
+# tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+# depends on (PPC || MICROBLAZE)
+# select PHYLIB
+# ---help---
+# This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+# core used in Xilinx Spartan and Virtex FPGAs
+
+config XILINX_LLTEMAC
+ tristate "Xilinx LLTEMAC 10/100/1000 Ethernet MAC driver"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ select NEED_XILINX_LLDMA
+ help
+ This driver supports the 10/100/1000 LLTEMAC.
+
+choice
+ prompt "Xilinx LLTEMAC PHY Support"
+ depends on XILINX_LLTEMAC
+ default XILINX_LLTEMAC_MARVELL_88E1111_GMII
+
+config XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+ bool "MARVELL 88E1111 using RGMII"
+ help
+ This phy is used by many Xilinx boards. This option includes
+ code for enabling RGMII over copper.
+
+config XILINX_LLTEMAC_MARVELL_88E1111_GMII
+ bool "MARVELL 88E1111 using GMII"
+ help
+ This phy is used by many Xilinx boards. This option includes
+ code for enabling GMII over copper, and for setting the correct
+ speed based on whatever the phy is able to autonegotiate. This is
+ usually the best option to use on ML40x and ML50x boards.
+
+config XILINX_LLTEMAC_MARVELL_88E1111_MII
+ bool "MARVELL 88E1111 using MII or other PHY"
+ help
+ If your physical interface is not covered by the other
+ selections, then choose this option. This option includes generic
+ speed autonegotation code.
+
+config XILINX_LLTEMAC_XILINX_1000BASEX
+ bool "Xilinx 1000BASE-X PHY"
+ help
+ This PHY supports physical attachment via GT/GTP/GTX transceivers.
+
+endchoice
+
+config XILINX_PS_EMAC
+ tristate "Xilinx PS tri-speed EMAC support"
+ depends on ARCH_ZYNQ
select PHYLIB
---help---
- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
- core used in Xilinx Spartan and Virtex FPGAs
+ This driver supports tri-speed EMAC.
+
+config XILINX_PS_EMAC_HWTSTAMP
+ bool "Generate hardware packet timestamps"
+ depends on XILINX_PS_EMAC
+ default n
+ ---help---
+ Generate hardare packet timestamps. This is to facilitate IEE 1588.
+
endif # NET_VENDOR_XILINX
@@ -5,5 +5,8 @@
ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
+obj-$(CONFIG_XILINX_PS_EMAC) += xilinx_emacps.o
xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
+
+obj-$(CONFIG_XILINX_LLTEMAC) += xilinx_lltemac/
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -244,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
if (!lp->rx_bd_v)
goto out;
@@ -298,12 +297,6 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
- /* Init descriptor indexes */
- lp->tx_bd_ci = 0;
- lp->tx_bd_next = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
return 0;
out:
@@ -685,15 +678,12 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev)) {
+ if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
return NETDEV_TX_BUSY;
}
@@ -709,11 +699,12 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
- DMA_TO_DEVICE);
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
cur_p->app4 = (unsigned long)skb;
for (ii = 0; ii < num_frag; ii++) {
+ frag = &skb_shinfo(skb)->frags[ii];
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
@@ -724,7 +715,6 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag), DMA_TO_DEVICE);
cur_p->len = skb_frag_size(frag);
cur_p->app0 = 0;
- frag++;
}
cur_p->app0 |= STS_CTRL_APP0_EOP;
@@ -1014,7 +1004,7 @@ static int temac_of_probe(struct platform_device *op)
return -ENOMEM;
ether_setup(ndev);
- platform_set_drvdata(op, ndev);
+ dev_set_drvdata(&op->dev, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
@@ -1052,12 +1042,14 @@ static int temac_of_probe(struct platform_device *op)
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
+ dev_info(&op->dev, "TX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p)) {
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
}
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
+ dev_info(&op->dev, "RX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
@@ -1105,14 +1097,15 @@ static int temac_of_probe(struct platform_device *op)
}
temac_init_mac_address(ndev, (void *)addr);
- rc = temac_mdio_setup(lp, op->dev.of_node);
- if (rc)
- dev_warn(&op->dev, "error registering MDIO bus\n");
-
lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- if (lp->phy_node)
+ if (lp->phy_node) {
dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
+ rc = temac_mdio_setup(lp, op->dev.of_node);
+ if (rc)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+ }
+
/* Add the device attributes */
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
@@ -1143,7 +1136,7 @@ static int temac_of_probe(struct platform_device *op)
static int temac_of_remove(struct platform_device *op)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
temac_mdio_teardown(lp);
@@ -1152,6 +1145,7 @@ static int temac_of_remove(struct platform_device *op)
if (lp->phy_node)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
+ dev_set_drvdata(&op->dev, NULL);
iounmap(lp->regs);
if (lp->sdma_regs)
iounmap(lp->sdma_regs);
@@ -63,6 +63,7 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
int clk_div;
int rc, size;
struct resource res;
+ struct device_node *np1 = of_get_parent(lp->phy_node);
/* Calculate a reasonable divisor for the clock rate */
clk_div = 0x3f; /* worst-case default setting */
@@ -85,7 +86,7 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
if (!bus)
return -ENOMEM;
- of_address_to_resource(np, 0, &res);
+ of_address_to_resource(np1, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long)res.start);
bus->priv = lp;
@@ -97,7 +98,7 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
lp->mii_bus = bus;
- rc = of_mdiobus_register(bus, np);
+ rc = of_mdiobus_register(bus, np1);
if (rc)
goto err_register;
@@ -9,18 +9,19 @@
#define XILINX_AXIENET_H
#include <linux/netdevice.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
/* Configuration options */
@@ -337,6 +338,14 @@
#define DELAY_OF_ONE_MILLISEC 1000
+/* Read/Write access to the registers */
+#ifndef out_be32
+#ifdef CONFIG_ARCH_ZYNQ
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -448,7 +457,7 @@ struct axienet_local {
u32 rx_bd_ci;
u32 max_frm_size;
- u32 jumbo_support;
+ u32 rxmem;
int csum_offload_on_tx_path;
int csum_offload_on_rx_path;
@@ -201,15 +201,17 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/*
* Allocate the Tx and Rx buffer descriptors.
*/
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p,
+ GFP_KERNEL | __GFP_ZERO);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p,
+ GFP_KERNEL | __GFP_ZERO);
if (!lp->rx_bd_v)
goto out;
@@ -263,7 +265,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +276,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting.
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -354,7 +358,8 @@ static void axienet_set_multicast_list(struct net_device *ndev)
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
* promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it. */
+ * the flag is already set. If not we set it.
+ */
ndev->flags |= IFF_PROMISC;
reg = axienet_ior(lp, XAE_FMI_OFFSET);
reg |= XAE_FMI_PM_MASK;
@@ -438,14 +443,15 @@ static void __axienet_device_reset(struct axienet_local *lp,
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
- * reset process. */
+ * reset process.
+ */
axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- dev_err(dev, "axienet_device_reset DMA "
- "reset timeout!\n");
+ dev_err(dev,
+ "axienet_device_reset DMA reset timeout!\n");
break;
}
}
@@ -471,18 +477,21 @@ static void axienet_device_reset(struct net_device *ndev)
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+ lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU) &&
- (lp->jumbo_support)) {
- lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
- XAE_TRL_SIZE;
- lp->options |= XAE_OPTION_JUMBO;
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
+ lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE;
+
+ if (lp->max_frm_size <= lp->rxmem)
+ lp->options |= XAE_OPTION_JUMBO;
}
if (axienet_dma_bd_init(ndev)) {
- dev_err(&ndev->dev, "axienet_device_reset descriptor "
+ dev_err(&ndev->dev,
+ "axienet_device_reset descriptor "
"allocation failed\n");
}
@@ -497,7 +506,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -549,7 +559,8 @@ static void axienet_adjust_link(struct net_device *ndev)
emmc_reg |= XAE_EMMC_LINKSPD_10;
break;
default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
+ dev_err(&ndev->dev,
+ "Speed other than 10, 100 "
"or 1Gbps is not supported\n");
break;
}
@@ -558,8 +569,8 @@ static void axienet_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
} else {
- dev_err(&ndev->dev, "Error setting Axi Ethernet "
- "mac speed\n");
+ dev_err(&ndev->dev,
+ "Error setting Axi Ethernet mac speed\n");
}
}
}
@@ -601,7 +612,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ ++lp->tx_bd_ci;
+ lp->tx_bd_ci %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
@@ -687,7 +699,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -701,9 +714,12 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app4 = (unsigned long)skb;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ wmb();
+
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
}
@@ -723,15 +739,16 @@ static void axienet_recv(struct net_device *ndev)
u32 csumstatus;
u32 size = 0;
u32 packets = 0;
- dma_addr_t tail_p;
+ dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ rmb();
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
skb = (struct sk_buff *) (cur_p->sw_id_offset);
length = cur_p->app4 & 0x0000FFFF;
@@ -775,14 +792,16 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
- lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ ++lp->rx_bd_ci;
+ lp->rx_bd_ci %= RX_BD_NUM;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ if (tail_p)
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -804,6 +823,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
axienet_start_xmit_done(lp->ndev);
goto out;
}
@@ -827,9 +847,9 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -852,6 +872,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
axienet_recv(lp->ndev);
goto out;
}
@@ -875,9 +896,9 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -910,7 +931,8 @@ static int axienet_open(struct net_device *ndev)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
(mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
axienet_device_reset(ndev);
@@ -921,14 +943,20 @@ static int axienet_open(struct net_device *ndev)
return ret;
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
axienet_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
- if (!lp->phy_dev) {
- dev_err(lp->dev, "of_phy_connect() failed\n");
- return -ENODEV;
+ } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- phy_start(lp->phy_dev);
+
+ if (!lp->phy_dev)
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ else
+ phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -1013,15 +1041,15 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- if (lp->jumbo_support) {
- if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- } else {
- if ((new_mtu > XAE_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- }
+
+ if ((new_mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE) > lp->rxmem)
+ return -EINVAL;
+
+ if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
return 0;
}
@@ -1046,6 +1074,20 @@ static void axienet_poll_controller(struct net_device *ndev)
}
#endif
+/* Ioctl MII Interface */
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phy_dev)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(priv->phy_dev, rq, cmd);
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1054,6 +1096,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = axienet_set_multicast_list,
+ .ndo_do_ioctl = axienet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
#endif
@@ -1209,7 +1252,7 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
@@ -1223,8 +1266,9 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ dev_err(&ndev->dev,
+ "%s: Please stop netif before applying configuration\n",
+ ndev->name);
return -EFAULT;
}
@@ -1280,8 +1324,9 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ dev_err(&ndev->dev,
+ "%s: Please stop netif before applying configuration\n",
+ ndev->name);
return -EFAULT;
}
@@ -1350,7 +1395,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
@@ -1421,7 +1467,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1431,7 +1478,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1447,7 +1495,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -1456,9 +1505,8 @@ static void axienet_dma_err_handler(unsigned long data)
}
/**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op: Pointer to platform device structure.
- * @match: Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev: Pointer to platform device structure.
*
* returns: 0, on success
* Non-zero error value on failure.
@@ -1468,65 +1516,73 @@ static void axienet_dma_err_handler(unsigned long data)
* device. Parses through device tree and populates fields of
* axienet_local. It registers the Ethernet device.
*/
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
{
- __be32 *p;
- int size, ret = 0;
+ int ret;
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *addr;
+ u8 mac_addr[6];
+ struct resource *ethres, dmares;
+ u32 value;
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev)
return -ENOMEM;
ether_setup(ndev);
- platform_set_drvdata(op, ndev);
+ platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_FRAGLIST;
ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
/* Map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (!lp->regs) {
- dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
- goto nodev;
+ dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
+ ret = -ENOMEM;
+ goto free_netdev;
}
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+ if (!ret) {
+ dev_info(&pdev->dev, "TX_CSUM %d\n", value);
+
+ switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
case 2:
lp->csum_offload_on_tx_path =
XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
default:
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+ if (!ret) {
+ dev_info(&pdev->dev, "RX_CSUM %d\n", value);
+
+ switch (value) {
case 1:
lp->csum_offload_on_rx_path =
XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1542,85 +1598,79 @@ static int axienet_of_probe(struct platform_device *op)
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
- * a larger Rx/Tx Memory. Typically, the size must be more than or
- * equal to 16384 bytes, so that we can enable jumbo option and start
- * supporting jumbo frames. Here we check for memory allocated for
- * Rx/Tx in the hardware from the device-tree and accordingly set
- * flags. */
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
- if (p) {
- if ((be32_to_cpup(p)) >= 0x4000)
- lp->jumbo_support = 1;
- }
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
- NULL);
- if (p)
- lp->temac_type = be32_to_cpup(p);
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
- if (p)
- lp->phy_type = be32_to_cpup(p);
+ * a larger Rx/Tx Memory. Typically, the size must be large so that
+ * we can enable jumbo option and start supporting jumbo frames.
+ * Here we check for memory allocated for Rx/Tx in the hardware from
+ * the device-tree and accordingly set flags.
+ */
+ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ of_property_read_u32(pdev->dev.of_node, "xlnx,temac-type",
+ &lp->temac_type);
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- goto err_iounmap;
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
}
- lp->dma_regs = of_iomap(np, 0);
- if (lp->dma_regs) {
- dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (!lp->dma_regs) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = -ENOMEM;
+ goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&op->dev, "could not determine irqs\n");
+ dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto err_iounmap_2;
+ goto free_netdev;
}
/* Retrieve the MAC address */
- addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
- if ((!addr) || (size != 6)) {
- dev_err(&op->dev, "could not find MAC address\n");
- ret = -ENODEV;
- goto err_iounmap_2;
+ ret = of_property_read_u8_array(pdev->dev.of_node,
+ "local-mac-address", mac_addr, 6);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ goto free_netdev;
}
- axienet_set_mac_address(ndev, (void *) addr);
+ axienet_set_mac_address(ndev, (void *) mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- ret = axienet_mdio_setup(lp, op->dev.of_node);
- if (ret)
- dev_warn(&op->dev, "error registering MDIO bus\n");
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+ ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ if (ret)
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ }
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto err_iounmap_2;
+ goto free_netdev;
}
return 0;
-err_iounmap_2:
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
-err_iounmap:
- iounmap(lp->regs);
-nodev:
+free_netdev:
free_netdev(ndev);
- ndev = NULL;
+
return ret;
}
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
axienet_mdio_teardown(lp);
@@ -1630,17 +1680,14 @@ static int axienet_of_remove(struct platform_device *op)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
free_netdev(ndev);
return 0;
}
-static struct platform_driver axienet_of_driver = {
- .probe = axienet_of_probe,
- .remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+ .probe = axienet_probe,
+ .remove = axienet_remove,
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_axienet",
@@ -1648,7 +1695,7 @@ static struct platform_driver axienet_of_driver = {
},
};
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
MODULE_AUTHOR("Xilinx");
@@ -128,11 +128,12 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
{
int ret;
- u32 clk_div, host_clock;
- u32 *property_p;
+ u32 clk_div;
struct mii_bus *bus;
struct resource res;
struct device_node *np1;
+ /* the ethernet controller device node */
+ struct device_node *npp = NULL;
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -158,42 +159,49 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
* fHOST can be read from the flattened device tree as property
* "clock-frequency" from the CPU
*/
-
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- printk(KERN_WARNING "%s(): Could not find CPU device node.",
- __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- goto issue;
- }
- property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
- if (!property_p) {
- printk(KERN_WARNING "%s(): Could not find CPU property: "
- "clock-frequency.", __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ np1 = of_get_parent(lp->phy_node);
+ if (np1)
+ npp = of_get_parent(np1);
+ if (!npp) {
+ dev_warn(lp->dev,
+ "Could not find ethernet controller device node.");
+ dev_warn(lp->dev, "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
- of_node_put(np1);
- goto issue;
+ } else {
+ u32 *property_p;
+
+ property_p = (uint32_t *)of_get_property(npp,
+ "clock-frequency", NULL);
+ if (!property_p) {
+ dev_warn(lp->dev,
+ "Could not find clock ethernet "
+ "controller property.");
+ dev_warn(lp->dev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
+ } else {
+ u32 host_clock = be32_to_cpup(property_p);
+
+ clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+
+ /* If there is any remainder from the division of
+ * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 1
+ * to the clock divisor or we will surely be
+ * above 2.5 MHz
+ */
+ if (host_clock % (MAX_MDIO_FREQ * 2))
+ clk_div++;
+ dev_dbg(lp->dev,
+ "Setting MDIO clock divisor to %u "
+ "based on %u Hz host clock.\n",
+ clk_div, host_clock);
+ }
}
- host_clock = be32_to_cpup(property_p);
- clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
- /* If there is any remainder from the division of
- * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz */
- if (host_clock % (MAX_MDIO_FREQ * 2))
- clk_div++;
-
- printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
- "on %u Hz host clock.\n", __func__, clk_div, host_clock);
-
- of_node_put(np1);
-issue:
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (((u32)clk_div) |
+ XAE_MDIO_MC_MDIOEN_MASK));
ret = axienet_mdio_wait_until_ready(lp);
if (ret < 0)
@@ -203,8 +211,7 @@ issue:
if (!bus)
return -ENOMEM;
- np1 = of_get_parent(lp->phy_node);
- of_address_to_resource(np1, 0, &res);
+ of_address_to_resource(npp, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long) res.start);
@@ -233,7 +240,6 @@ issue:
void axienet_mdio_teardown(struct axienet_local *lp)
{
mdiobus_unregister(lp->mii_bus);
- kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
lp->mii_bus = NULL;
}
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -1172,7 +1173,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (mac_address)
/* Set the MAC address. */
- memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
+ memcpy(ndev->dev_addr, mac_address, 6);
else
dev_warn(dev, "No MAC address found\n");
new file mode 100644
@@ -0,0 +1,2922 @@
+/*
+ * Xilinx Ethernet: Linux driver for Ethernet.
+ *
+ * Author: Xilinx, Inc.
+ *
+ * 2010 (c) Xilinx, Inc. This file is licensed uner the terms of the GNU
+ * General Public License version 2. This program is licensed "as is"
+ * without any warranty of any kind, whether express or implied.
+ *
+ * This is a driver for xilinx processor sub-system (ps) ethernet device.
+ * This driver is mainly used in Linux 2.6.30 and above and it does _not_
+ * support Linux 2.4 kernel due to certain new features (e.g. NAPI) is
+ * introduced in this driver.
+ *
+ * TODO:
+ * 1. JUMBO frame is not enabled per EPs spec. Please update it if this
+ * support is added in and set MAX_MTU to 9000.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+
+/************************** Constant Definitions *****************************/
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xemacps"
+#define DRIVER_DESCRIPTION "Xilinx Tri-Mode Ethernet MAC driver"
+#define DRIVER_VERSION "1.00a"
+
+/* Transmission timeout is 3 seconds. */
+#define TX_TIMEOUT (3*HZ)
+
+/* for RX skb IP header word-aligned */
+#define RX_IP_ALIGN_OFFSET 2
+
+/* DMA buffer descriptors must be aligned on a 4-byte boundary. */
+#define ALIGNMENT_BD 8
+
+/* Maximum value for hash bits. 2**6 */
+#define XEMACPS_MAX_HASH_BITS 64
+
+/* MDC clock division
+ * currently supporting 8, 16, 32, 48, 64, 96, 128, 224.
+ */
+enum { MDC_DIV_8 = 0, MDC_DIV_16, MDC_DIV_32, MDC_DIV_48,
+MDC_DIV_64, MDC_DIV_96, MDC_DIV_128, MDC_DIV_224 };
+
+/* Specify the receive buffer size in bytes, 64, 128, 192, 10240 */
+#define XEMACPS_RX_BUF_SIZE 1536
+
+/* Number of receive buffer bytes as a unit, this is HW setup */
+#define XEMACPS_RX_BUF_UNIT 64
+
+/* Default SEND and RECV buffer descriptors (BD) numbers.
+ * BD Space needed is (XEMACPS_SEND_BD_CNT+XEMACPS_RECV_BD_CNT)*8
+ */
+#undef DEBUG
+#define DEBUG
+
+#define XEMACPS_SEND_BD_CNT 256
+#define XEMACPS_RECV_BD_CNT 256
+
+#define XEMACPS_NAPI_WEIGHT 64
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit. Names are self explained here.
+ */
+#define XEMACPS_NWCTRL_OFFSET 0x00000000 /* Network Control reg */
+#define XEMACPS_NWCFG_OFFSET 0x00000004 /* Network Config reg */
+#define XEMACPS_NWSR_OFFSET 0x00000008 /* Network Status reg */
+#define XEMACPS_USERIO_OFFSET 0x0000000C /* User IO reg */
+#define XEMACPS_DMACR_OFFSET 0x00000010 /* DMA Control reg */
+#define XEMACPS_TXSR_OFFSET 0x00000014 /* TX Status reg */
+#define XEMACPS_RXQBASE_OFFSET 0x00000018 /* RX Q Base address reg */
+#define XEMACPS_TXQBASE_OFFSET 0x0000001C /* TX Q Base address reg */
+#define XEMACPS_RXSR_OFFSET 0x00000020 /* RX Status reg */
+#define XEMACPS_ISR_OFFSET 0x00000024 /* Interrupt Status reg */
+#define XEMACPS_IER_OFFSET 0x00000028 /* Interrupt Enable reg */
+#define XEMACPS_IDR_OFFSET 0x0000002C /* Interrupt Disable reg */
+#define XEMACPS_IMR_OFFSET 0x00000030 /* Interrupt Mask reg */
+#define XEMACPS_PHYMNTNC_OFFSET 0x00000034 /* Phy Maintaince reg */
+#define XEMACPS_RXPAUSE_OFFSET 0x00000038 /* RX Pause Time reg */
+#define XEMACPS_TXPAUSE_OFFSET 0x0000003C /* TX Pause Time reg */
+#define XEMACPS_HASHL_OFFSET 0x00000080 /* Hash Low address reg */
+#define XEMACPS_HASHH_OFFSET 0x00000084 /* Hash High address reg */
+#define XEMACPS_LADDR1L_OFFSET 0x00000088 /* Specific1 addr low */
+#define XEMACPS_LADDR1H_OFFSET 0x0000008C /* Specific1 addr high */
+#define XEMACPS_LADDR2L_OFFSET 0x00000090 /* Specific2 addr low */
+#define XEMACPS_LADDR2H_OFFSET 0x00000094 /* Specific2 addr high */
+#define XEMACPS_LADDR3L_OFFSET 0x00000098 /* Specific3 addr low */
+#define XEMACPS_LADDR3H_OFFSET 0x0000009C /* Specific3 addr high */
+#define XEMACPS_LADDR4L_OFFSET 0x000000A0 /* Specific4 addr low */
+#define XEMACPS_LADDR4H_OFFSET 0x000000A4 /* Specific4 addr high */
+#define XEMACPS_MATCH1_OFFSET 0x000000A8 /* Type ID1 Match reg */
+#define XEMACPS_MATCH2_OFFSET 0x000000AC /* Type ID2 Match reg */
+#define XEMACPS_MATCH3_OFFSET 0x000000B0 /* Type ID3 Match reg */
+#define XEMACPS_MATCH4_OFFSET 0x000000B4 /* Type ID4 Match reg */
+#define XEMACPS_WOL_OFFSET 0x000000B8 /* Wake on LAN reg */
+#define XEMACPS_STRETCH_OFFSET 0x000000BC /* IPG Stretch reg */
+#define XEMACPS_SVLAN_OFFSET 0x000000C0 /* Stacked VLAN reg */
+#define XEMACPS_MODID_OFFSET 0x000000FC /* Module ID reg */
+#define XEMACPS_OCTTXL_OFFSET 0x00000100 /* Octects transmitted Low
+ reg */
+#define XEMACPS_OCTTXH_OFFSET 0x00000104 /* Octects transmitted High
+ reg */
+#define XEMACPS_TXCNT_OFFSET 0x00000108 /* Error-free Frmaes
+ transmitted counter */
+#define XEMACPS_TXBCCNT_OFFSET 0x0000010C /* Error-free Broadcast
+ Frames counter*/
+#define XEMACPS_TXMCCNT_OFFSET 0x00000110 /* Error-free Multicast
+ Frame counter */
+#define XEMACPS_TXPAUSECNT_OFFSET 0x00000114 /* Pause Frames Transmitted
+ Counter */
+#define XEMACPS_TX64CNT_OFFSET 0x00000118 /* Error-free 64 byte Frames
+ Transmitted counter */
+#define XEMACPS_TX65CNT_OFFSET 0x0000011C /* Error-free 65-127 byte
+ Frames Transmitted counter */
+#define XEMACPS_TX128CNT_OFFSET 0x00000120 /* Error-free 128-255 byte
+ Frames Transmitted counter */
+#define XEMACPS_TX256CNT_OFFSET 0x00000124 /* Error-free 256-511 byte
+ Frames transmitted counter */
+#define XEMACPS_TX512CNT_OFFSET 0x00000128 /* Error-free 512-1023 byte
+ Frames transmitted counter */
+#define XEMACPS_TX1024CNT_OFFSET 0x0000012C /* Error-free 1024-1518 byte
+ Frames transmitted counter */
+#define XEMACPS_TX1519CNT_OFFSET 0x00000130 /* Error-free larger than
+ 1519 byte Frames transmitted
+ Counter */
+#define XEMACPS_TXURUNCNT_OFFSET 0x00000134 /* TX under run error
+ Counter */
+#define XEMACPS_SNGLCOLLCNT_OFFSET 0x00000138 /* Single Collision Frame
+ Counter */
+#define XEMACPS_MULTICOLLCNT_OFFSET 0x0000013C /* Multiple Collision Frame
+ Counter */
+#define XEMACPS_EXCESSCOLLCNT_OFFSET 0x00000140 /* Excessive Collision Frame
+ Counter */
+#define XEMACPS_LATECOLLCNT_OFFSET 0x00000144 /* Late Collision Frame
+ Counter */
+#define XEMACPS_TXDEFERCNT_OFFSET 0x00000148 /* Deferred Transmission
+ Frame Counter */
+#define XEMACPS_CSENSECNT_OFFSET 0x0000014C /* Carrier Sense Error
+ Counter */
+#define XEMACPS_OCTRXL_OFFSET 0x00000150 /* Octects Received register
+ Low */
+#define XEMACPS_OCTRXH_OFFSET 0x00000154 /* Octects Received register
+ High */
+#define XEMACPS_RXCNT_OFFSET 0x00000158 /* Error-free Frames
+ Received Counter */
+#define XEMACPS_RXBROADCNT_OFFSET 0x0000015C /* Error-free Broadcast
+ Frames Received Counter */
+#define XEMACPS_RXMULTICNT_OFFSET 0x00000160 /* Error-free Multicast
+ Frames Received Counter */
+#define XEMACPS_RXPAUSECNT_OFFSET 0x00000164 /* Pause Frames
+ Received Counter */
+#define XEMACPS_RX64CNT_OFFSET 0x00000168 /* Error-free 64 byte Frames
+ Received Counter */
+#define XEMACPS_RX65CNT_OFFSET 0x0000016C /* Error-free 65-127 byte
+ Frames Received Counter */
+#define XEMACPS_RX128CNT_OFFSET 0x00000170 /* Error-free 128-255 byte
+ Frames Received Counter */
+#define XEMACPS_RX256CNT_OFFSET 0x00000174 /* Error-free 256-512 byte
+ Frames Received Counter */
+#define XEMACPS_RX512CNT_OFFSET 0x00000178 /* Error-free 512-1023 byte
+ Frames Received Counter */
+#define XEMACPS_RX1024CNT_OFFSET 0x0000017C /* Error-free 1024-1518 byte
+ Frames Received Counter */
+#define XEMACPS_RX1519CNT_OFFSET 0x00000180 /* Error-free 1519-max byte
+ Frames Received Counter */
+#define XEMACPS_RXUNDRCNT_OFFSET 0x00000184 /* Undersize Frames Received
+ Counter */
+#define XEMACPS_RXOVRCNT_OFFSET 0x00000188 /* Oversize Frames Received
+ Counter */
+#define XEMACPS_RXJABCNT_OFFSET 0x0000018C /* Jabbers Received
+ Counter */
+#define XEMACPS_RXFCSCNT_OFFSET 0x00000190 /* Frame Check Sequence
+ Error Counter */
+#define XEMACPS_RXLENGTHCNT_OFFSET 0x00000194 /* Length Field Error
+ Counter */
+#define XEMACPS_RXSYMBCNT_OFFSET 0x00000198 /* Symbol Error Counter */
+#define XEMACPS_RXALIGNCNT_OFFSET 0x0000019C /* Alignment Error
+ Counter */
+#define XEMACPS_RXRESERRCNT_OFFSET 0x000001A0 /* Receive Resource Error
+ Counter */
+#define XEMACPS_RXORCNT_OFFSET 0x000001A4 /* Receive Overrun */
+#define XEMACPS_RXIPCCNT_OFFSET 0x000001A8 /* IP header Checksum Error
+ Counter */
+#define XEMACPS_RXTCPCCNT_OFFSET 0x000001AC /* TCP Checksum Error
+ Counter */
+#define XEMACPS_RXUDPCCNT_OFFSET 0x000001B0 /* UDP Checksum Error
+ Counter */
+
+#define XEMACPS_1588S_OFFSET 0x000001D0 /* 1588 Timer Seconds */
+#define XEMACPS_1588NS_OFFSET 0x000001D4 /* 1588 Timer Nanoseconds */
+#define XEMACPS_1588ADJ_OFFSET 0x000001D8 /* 1588 Timer Adjust */
+#define XEMACPS_1588INC_OFFSET 0x000001DC /* 1588 Timer Increment */
+#define XEMACPS_PTPETXS_OFFSET 0x000001E0 /* PTP Event Frame
+ Transmitted Seconds */
+#define XEMACPS_PTPETXNS_OFFSET 0x000001E4 /* PTP Event Frame
+ Transmitted Nanoseconds */
+#define XEMACPS_PTPERXS_OFFSET 0x000001E8 /* PTP Event Frame Received
+ Seconds */
+#define XEMACPS_PTPERXNS_OFFSET 0x000001EC /* PTP Event Frame Received
+ Nanoseconds */
+#define XEMACPS_PTPPTXS_OFFSET 0x000001E0 /* PTP Peer Frame
+ Transmitted Seconds */
+#define XEMACPS_PTPPTXNS_OFFSET 0x000001E4 /* PTP Peer Frame
+ Transmitted Nanoseconds */
+#define XEMACPS_PTPPRXS_OFFSET 0x000001E8 /* PTP Peer Frame Received
+ Seconds */
+#define XEMACPS_PTPPRXNS_OFFSET 0x000001EC /* PTP Peer Frame Received
+ Nanoseconds */
+
+/* network control register bit definitions */
+#define XEMACPS_NWCTRL_FLUSH_DPRAM_MASK 0x00040000
+#define XEMACPS_NWCTRL_RXTSTAMP_MASK 0x00008000 /* RX Timestamp in CRC */
+#define XEMACPS_NWCTRL_ZEROPAUSETX_MASK 0x00001000 /* Transmit zero quantum
+ pause frame */
+#define XEMACPS_NWCTRL_PAUSETX_MASK 0x00000800 /* Transmit pause frame */
+#define XEMACPS_NWCTRL_HALTTX_MASK 0x00000400 /* Halt transmission
+ after current frame */
+#define XEMACPS_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
+
+#define XEMACPS_NWCTRL_STATWEN_MASK 0x00000080 /* Enable writing to
+ stat counters */
+#define XEMACPS_NWCTRL_STATINC_MASK 0x00000040 /* Increment statistic
+ registers */
+#define XEMACPS_NWCTRL_STATCLR_MASK 0x00000020 /* Clear statistic
+ registers */
+#define XEMACPS_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
+#define XEMACPS_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
+#define XEMACPS_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
+#define XEMACPS_NWCTRL_LOOPEN_MASK 0x00000002 /* local loopback */
+
+/* name network configuration register bit definitions */
+#define XEMACPS_NWCFG_BADPREAMBEN_MASK 0x20000000 /* disable rejection of
+ non-standard preamble */
+#define XEMACPS_NWCFG_IPDSTRETCH_MASK 0x10000000 /* enable transmit IPG */
+#define XEMACPS_NWCFG_FCSIGNORE_MASK 0x04000000 /* disable rejection of
+ FCS error */
+#define XEMACPS_NWCFG_HDRXEN_MASK 0x02000000 /* RX half duplex */
+#define XEMACPS_NWCFG_RXCHKSUMEN_MASK 0x01000000 /* enable RX checksum
+ offload */
+#define XEMACPS_NWCFG_PAUSECOPYDI_MASK 0x00800000 /* Do not copy pause
+ Frames to memory */
+#define XEMACPS_NWCFG_MDC_SHIFT_MASK 18 /* shift bits for MDC */
+#define XEMACPS_NWCFG_MDCCLKDIV_MASK 0x001C0000 /* MDC Mask PCLK divisor */
+#define XEMACPS_NWCFG_FCSREM_MASK 0x00020000 /* Discard FCS from
+ received frames */
+#define XEMACPS_NWCFG_LENGTHERRDSCRD_MASK 0x00010000
+/* RX length error discard */
+#define XEMACPS_NWCFG_RXOFFS_MASK 0x0000C000 /* RX buffer offset */
+#define XEMACPS_NWCFG_PAUSEEN_MASK 0x00002000 /* Enable pause TX */
+#define XEMACPS_NWCFG_RETRYTESTEN_MASK 0x00001000 /* Retry test */
+#define XEMACPS_NWCFG_1000_MASK 0x00000400 /* Gigbit mode */
+#define XEMACPS_NWCFG_EXTADDRMATCHEN_MASK 0x00000200
+/* External address match enable */
+#define XEMACPS_NWCFG_UCASTHASHEN_MASK 0x00000080 /* Receive unicast hash
+ frames */
+#define XEMACPS_NWCFG_MCASTHASHEN_MASK 0x00000040 /* Receive multicast hash
+ frames */
+#define XEMACPS_NWCFG_BCASTDI_MASK 0x00000020 /* Do not receive
+ broadcast frames */
+#define XEMACPS_NWCFG_COPYALLEN_MASK 0x00000010 /* Copy all frames */
+
+#define XEMACPS_NWCFG_NVLANDISC_MASK 0x00000004 /* Receive only VLAN
+ frames */
+#define XEMACPS_NWCFG_FDEN_MASK 0x00000002 /* Full duplex */
+#define XEMACPS_NWCFG_100_MASK 0x00000001 /* 10 or 100 Mbs */
+
+/* network status register bit definitaions */
+#define XEMACPS_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
+#define XEMACPS_NWSR_MDIO_MASK 0x00000002 /* Status of mdio_in */
+
+/* MAC address register word 1 mask */
+#define XEMACPS_LADDR_MACH_MASK 0x0000FFFF /* Address bits[47:32]
+ bit[31:0] are in BOTTOM */
+
+/* DMA control register bit definitions */
+#define XEMACPS_DMACR_RXBUF_MASK 0x00FF0000 /* Mask bit for RX buffer
+ size */
+#define XEMACPS_DMACR_RXBUF_SHIFT 16 /* Shift bit for RX buffer
+ size */
+#define XEMACPS_DMACR_TCPCKSUM_MASK 0x00000800 /* enable/disable TX
+ checksum offload */
+#define XEMACPS_DMACR_TXSIZE_MASK 0x00000400 /* TX buffer memory size */
+#define XEMACPS_DMACR_RXSIZE_MASK 0x00000300 /* RX buffer memory size */
+#define XEMACPS_DMACR_ENDIAN_MASK 0x00000080 /* Endian configuration */
+#define XEMACPS_DMACR_BLENGTH_MASK 0x0000001F /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR16 0x00000010 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR8 0x00000008 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR4 0x00000004 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_SINGLE 0x00000002 /* Buffer burst length */
+
+/* transmit status register bit definitions */
+#define XEMACPS_TXSR_HRESPNOK_MASK 0x00000100 /* Transmit hresp not OK */
+#define XEMACPS_TXSR_COL1000_MASK 0x00000080 /* Collision Gbs mode */
+#define XEMACPS_TXSR_URUN_MASK 0x00000040 /* Transmit underrun */
+#define XEMACPS_TXSR_TXCOMPL_MASK 0x00000020 /* Transmit completed OK */
+#define XEMACPS_TXSR_BUFEXH_MASK 0x00000010 /* Transmit buffs exhausted
+ mid frame */
+#define XEMACPS_TXSR_TXGO_MASK 0x00000008 /* Status of go flag */
+#define XEMACPS_TXSR_RXOVR_MASK 0x00000004 /* Retry limit exceeded */
+#define XEMACPS_TXSR_COL100_MASK 0x00000002 /* Collision 10/100 mode */
+#define XEMACPS_TXSR_USEDREAD_MASK 0x00000001 /* TX buffer used bit set */
+
+#define XEMACPS_TXSR_ERROR_MASK (XEMACPS_TXSR_HRESPNOK_MASK | \
+ XEMACPS_TXSR_COL1000_MASK | \
+ XEMACPS_TXSR_URUN_MASK | \
+ XEMACPS_TXSR_BUFEXH_MASK | \
+ XEMACPS_TXSR_RXOVR_MASK | \
+ XEMACPS_TXSR_COL100_MASK | \
+ XEMACPS_TXSR_USEDREAD_MASK)
+
+/* receive status register bit definitions */
+#define XEMACPS_RXSR_HRESPNOK_MASK 0x00000008 /* Receive hresp not OK */
+#define XEMACPS_RXSR_RXOVR_MASK 0x00000004 /* Receive overrun */
+#define XEMACPS_RXSR_FRAMERX_MASK 0x00000002 /* Frame received OK */
+#define XEMACPS_RXSR_BUFFNA_MASK 0x00000001 /* RX buffer used bit set */
+
+#define XEMACPS_RXSR_ERROR_MASK (XEMACPS_RXSR_HRESPNOK_MASK | \
+ XEMACPS_RXSR_RXOVR_MASK | \
+ XEMACPS_RXSR_BUFFNA_MASK)
+
+/* interrupts bit definitions
+ * Bits definitions are same in XEMACPS_ISR_OFFSET,
+ * XEMACPS_IER_OFFSET, XEMACPS_IDR_OFFSET, and XEMACPS_IMR_OFFSET
+ */
+#define XEMACPS_IXR_PTPPSTX_MASK 0x02000000 /* PTP Psync transmitted */
+#define XEMACPS_IXR_PTPPDRTX_MASK 0x01000000 /* PTP Pdelay_req
+ transmitted */
+#define XEMACPS_IXR_PTPSTX_MASK 0x00800000 /* PTP Sync transmitted */
+#define XEMACPS_IXR_PTPDRTX_MASK 0x00400000 /* PTP Delay_req
+ transmitted */
+#define XEMACPS_IXR_PTPPSRX_MASK 0x00200000 /* PTP Psync received */
+#define XEMACPS_IXR_PTPPDRRX_MASK 0x00100000 /* PTP Pdelay_req
+ received */
+#define XEMACPS_IXR_PTPSRX_MASK 0x00080000 /* PTP Sync received */
+#define XEMACPS_IXR_PTPDRRX_MASK 0x00040000 /* PTP Delay_req received */
+#define XEMACPS_IXR_PAUSETX_MASK 0x00004000 /* Pause frame
+ transmitted */
+#define XEMACPS_IXR_PAUSEZERO_MASK 0x00002000 /* Pause time has reached
+ zero */
+#define XEMACPS_IXR_PAUSENZERO_MASK 0x00001000 /* Pause frame received */
+#define XEMACPS_IXR_HRESPNOK_MASK 0x00000800 /* hresp not ok */
+#define XEMACPS_IXR_RXOVR_MASK 0x00000400 /* Receive overrun
+ occurred */
+#define XEMACPS_IXR_TXCOMPL_MASK 0x00000080 /* Frame transmitted ok */
+#define XEMACPS_IXR_TXEXH_MASK 0x00000040 /* Transmit err occurred or
+ no buffers*/
+#define XEMACPS_IXR_RETRY_MASK 0x00000020 /* Retry limit exceeded */
+#define XEMACPS_IXR_URUN_MASK 0x00000010 /* Transmit underrun */
+#define XEMACPS_IXR_TXUSED_MASK 0x00000008 /* Tx buffer used bit read */
+#define XEMACPS_IXR_RXUSED_MASK 0x00000004 /* Rx buffer used bit read */
+#define XEMACPS_IXR_FRAMERX_MASK 0x00000002 /* Frame received ok */
+#define XEMACPS_IXR_MGMNT_MASK 0x00000001 /* PHY management complete */
+#define XEMACPS_IXR_ALL_MASK 0x03FC7FFE /* Everything except MDIO */
+
+#define XEMACPS_IXR_TX_ERR_MASK (XEMACPS_IXR_TXEXH_MASK | \
+ XEMACPS_IXR_RETRY_MASK | \
+ XEMACPS_IXR_URUN_MASK | \
+ XEMACPS_IXR_TXUSED_MASK)
+
+#define XEMACPS_IXR_RX_ERR_MASK (XEMACPS_IXR_HRESPNOK_MASK | \
+ XEMACPS_IXR_RXUSED_MASK | \
+ XEMACPS_IXR_RXOVR_MASK)
+/* PHY Maintenance bit definitions */
+#define XEMACPS_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
+#define XEMACPS_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
+#define XEMACPS_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
+#define XEMACPS_PHYMNTNC_ADDR_MASK 0x0F800000 /* Address bits */
+#define XEMACPS_PHYMNTNC_REG_MASK 0x007C0000 /* register bits */
+#define XEMACPS_PHYMNTNC_DATA_MASK 0x0000FFFF /* data bits */
+#define XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
+#define XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
+
+/* Wake on LAN bit definition */
+#define XEMACPS_WOL_MCAST_MASK 0x00080000
+#define XEMACPS_WOL_SPEREG1_MASK 0x00040000
+#define XEMACPS_WOL_ARP_MASK 0x00020000
+#define XEMACPS_WOL_MAGIC_MASK 0x00010000
+#define XEMACPS_WOL_ARP_ADDR_MASK 0x0000FFFF
+
+/* Buffer descriptor status words offset */
+#define XEMACPS_BD_ADDR_OFFSET 0x00000000 /**< word 0/addr of BDs */
+#define XEMACPS_BD_STAT_OFFSET 0x00000004 /**< word 1/status of BDs */
+
+/* Transmit buffer descriptor status words bit positions.
+ * Transmit buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit address pointing to the location of
+ * the transmit data.
+ * The following register - word1, consists of various information to
+ * control transmit process. After transmit, this is updated with status
+ * information, whether the frame was transmitted OK or why it had failed.
+ */
+#define XEMACPS_TXBUF_USED_MASK 0x80000000 /* Used bit. */
+#define XEMACPS_TXBUF_WRAP_MASK 0x40000000 /* Wrap bit, last
+ descriptor */
+#define XEMACPS_TXBUF_RETRY_MASK 0x20000000 /* Retry limit exceeded */
+#define XEMACPS_TXBUF_EXH_MASK 0x08000000 /* Buffers exhausted */
+#define XEMACPS_TXBUF_LAC_MASK 0x04000000 /* Late collision. */
+#define XEMACPS_TXBUF_NOCRC_MASK 0x00010000 /* No CRC */
+#define XEMACPS_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
+#define XEMACPS_TXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
+
+#define XEMACPS_TXBUF_ERR_MASK 0x3C000000 /* Mask for length field */
+
+/* Receive buffer descriptor status words bit positions.
+ * Receive buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit word aligned address pointing to the
+ * address of the buffer. The lower two bits make up the wrap bit indicating
+ * the last descriptor and the ownership bit to indicate it has been used.
+ * The following register - word1, contains status information regarding why
+ * the frame was received (the filter match condition) as well as other
+ * useful info.
+ */
+#define XEMACPS_RXBUF_BCAST_MASK 0x80000000 /* Broadcast frame */
+#define XEMACPS_RXBUF_MULTIHASH_MASK 0x40000000 /* Multicast hashed frame */
+#define XEMACPS_RXBUF_UNIHASH_MASK 0x20000000 /* Unicast hashed frame */
+#define XEMACPS_RXBUF_EXH_MASK 0x08000000 /* buffer exhausted */
+#define XEMACPS_RXBUF_AMATCH_MASK 0x06000000 /* Specific address
+ matched */
+#define XEMACPS_RXBUF_IDFOUND_MASK 0x01000000 /* Type ID matched */
+#define XEMACPS_RXBUF_IDMATCH_MASK 0x00C00000 /* ID matched mask */
+#define XEMACPS_RXBUF_VLAN_MASK 0x00200000 /* VLAN tagged */
+#define XEMACPS_RXBUF_PRI_MASK 0x00100000 /* Priority tagged */
+#define XEMACPS_RXBUF_VPRI_MASK 0x000E0000 /* Vlan priority */
+#define XEMACPS_RXBUF_CFI_MASK 0x00010000 /* CFI frame */
+#define XEMACPS_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
+#define XEMACPS_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
+#define XEMACPS_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
+
+#define XEMACPS_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
+#define XEMACPS_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
+#define XEMACPS_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
+
+#define XEAMCPS_GEN_PURPOSE_TIMER_LOAD 100 /* timeout value is msecs */
+
+#define XEMACPS_GMII2RGMII_FULLDPLX BMCR_FULLDPLX
+#define XEMACPS_GMII2RGMII_SPEED1000 BMCR_SPEED1000
+#define XEMACPS_GMII2RGMII_SPEED100 BMCR_SPEED100
+#define XEMACPS_GMII2RGMII_REG_NUM 0x10
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+#define NS_PER_SEC 1000000000ULL /* Nanoseconds per
+ second */
+#endif
+
+#define xemacps_read(base, reg) \
+ __raw_readl(((void __iomem *)(base)) + (reg))
+#define xemacps_write(base, reg, val) \
+ __raw_writel((val), ((void __iomem *)(base)) + (reg))
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t len;
+};
+
+/* DMA buffer descriptor structure. Each BD is two words */
+struct xemacps_bd {
+ u32 addr;
+ u32 ctrl;
+};
+
+
+/* Our private device data. */
+struct net_local {
+ void __iomem *baseaddr;
+ struct clk *devclk;
+ struct clk *aperclk;
+ struct notifier_block clk_rate_change_nb;
+
+ struct device_node *phy_node;
+ struct device_node *gmii2rgmii_phy_node;
+ struct ring_info *tx_skb;
+ struct ring_info *rx_skb;
+
+ struct xemacps_bd *rx_bd;
+ struct xemacps_bd *tx_bd;
+
+ dma_addr_t rx_bd_dma; /* physical address */
+ dma_addr_t tx_bd_dma; /* physical address */
+
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u32 rx_bd_ci;
+
+ u32 tx_bd_freecnt;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ spinlock_t nwctrlreg_lock;
+
+ struct platform_device *pdev;
+ struct net_device *ndev; /* this device */
+ struct tasklet_struct tx_bdreclaim_tasklet;
+ struct workqueue_struct *txtimeout_handler_wq;
+ struct work_struct txtimeout_reinit;
+
+ struct napi_struct napi; /* napi information for device */
+ struct net_device_stats stats; /* Statistics for this device */
+
+ struct timer_list gen_purpose_timer; /* Used for stats update */
+
+ /* Manage internal timer for packet timestamping */
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct hwtstamp_config hwtstamp_config;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ struct phy_device *gmii2rgmii_phy_dev;
+ phy_interface_t phy_interface;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+ /* RX ip/tcp/udp checksum */
+ unsigned ip_summed;
+ unsigned int enetnum;
+ unsigned int lastrxfrmscntr;
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ unsigned int ptpenetclk;
+#endif
+};
+#define to_net_local(_nb) container_of(_nb, struct net_local,\
+ clk_rate_change_nb)
+
+static struct net_device_ops netdev_ops;
+
+/**
+ * xemacps_mdio_read - Read current value of phy register indicated by
+ * phyreg.
+ * @bus: mdio bus
+ * @mii_id: mii id
+ * @phyreg: phy register to be read
+ *
+ * @return: value read from specified phy register.
+ *
+ * note: This is for 802.3 clause 22 phys access. For 802.3 clause 45 phys
+ * access, set bit 30 to be 1. e.g. change XEMACPS_PHYMNTNC_OP_MASK to
+ * 0x00020000.
+ */
+static int xemacps_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int value;
+ volatile u32 ipisr;
+
+ regval = XEMACPS_PHYMNTNC_OP_MASK;
+ regval |= XEMACPS_PHYMNTNC_OP_R_MASK;
+ regval |= (mii_id << XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK);
+ regval |= (phyreg << XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK);
+
+ xemacps_write(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET, regval);
+
+ /* wait for end of transfer */
+ do {
+ cpu_relax();
+ ipisr = xemacps_read(lp->baseaddr, XEMACPS_NWSR_OFFSET);
+ } while ((ipisr & XEMACPS_NWSR_MDIOIDLE_MASK) == 0);
+
+ value = xemacps_read(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET) &
+ XEMACPS_PHYMNTNC_DATA_MASK;
+
+ return value;
+}
+
+/**
+ * xemacps_mdio_write - Write passed in value to phy register indicated
+ * by phyreg.
+ * @bus: mdio bus
+ * @mii_id: mii id
+ * @phyreg: phy register to be configured.
+ * @value: value to be written to phy register.
+ * return 0. This API requires to be int type or compile warning generated
+ *
+ * note: This is for 802.3 clause 22 phys access. For 802.3 clause 45 phys
+ * access, set bit 30 to be 1. e.g. change XEMACPS_PHYMNTNC_OP_MASK to
+ * 0x00020000.
+ */
+static int xemacps_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+ u16 value)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ volatile u32 ipisr;
+
+ regval = XEMACPS_PHYMNTNC_OP_MASK;
+ regval |= XEMACPS_PHYMNTNC_OP_W_MASK;
+ regval |= (mii_id << XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK);
+ regval |= (phyreg << XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK);
+ regval |= value;
+
+ xemacps_write(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET, regval);
+
+ /* wait for end of transfer */
+ do {
+ cpu_relax();
+ ipisr = xemacps_read(lp->baseaddr, XEMACPS_NWSR_OFFSET);
+ } while ((ipisr & XEMACPS_NWSR_MDIOIDLE_MASK) == 0);
+
+ return 0;
+}
+
+
+/**
+ * xemacps_mdio_reset - mdio reset. It seems to be required per open
+ * source documentation phy.txt. But there is no reset in this device.
+ * Provide function API for now.
+ * @bus: mdio bus
+ **/
+static int xemacps_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemacps_set_freq() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct device
+ */
+static void xemacps_set_freq(struct clk *clk, long rate, struct device *dev)
+{
+ rate = clk_round_rate(clk, rate);
+ if (rate < 0)
+ return;
+
+ dev_info(dev, "Set clk to %ld Hz\n", rate);
+ if (clk_set_rate(clk, rate))
+ dev_err(dev, "Setting new clock rate failed.\n");
+}
+
+/**
+ * xemacps_adjust_link - handles link status changes, such as speed,
+ * duplex, up/down, ...
+ * @ndev: network device
+ */
+static void xemacps_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *gmii2rgmii_phydev = lp->gmii2rgmii_phy_dev;
+ int status_change = 0;
+ u32 regval;
+ u16 gmii2rgmii_reg = 0;
+
+ if (phydev->link) {
+ if ((lp->speed != phydev->speed) ||
+ (lp->duplex != phydev->duplex)) {
+ regval = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCFG_OFFSET);
+ regval &= ~(XEMACPS_NWCFG_FDEN_MASK |
+ XEMACPS_NWCFG_1000_MASK |
+ XEMACPS_NWCFG_100_MASK);
+
+ if (phydev->duplex) {
+ regval |= XEMACPS_NWCFG_FDEN_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_FULLDPLX;
+ }
+
+ if (phydev->speed == SPEED_1000) {
+ regval |= XEMACPS_NWCFG_1000_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_SPEED1000;
+ xemacps_set_freq(lp->devclk, 125000000,
+ &lp->pdev->dev);
+ }
+ else if (phydev->speed == SPEED_100) {
+ regval |= XEMACPS_NWCFG_100_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_SPEED100;
+ xemacps_set_freq(lp->devclk, 25000000,
+ &lp->pdev->dev);
+ }
+ else if (phydev->speed == SPEED_10) {
+ xemacps_set_freq(lp->devclk, 2500000,
+ &lp->pdev->dev);
+ }
+ else {
+ dev_err(&lp->pdev->dev,
+ "%s: unknown PHY speed %d\n",
+ __func__, phydev->speed);
+ return;
+ }
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET,
+ regval);
+
+ if (gmii2rgmii_phydev != NULL) {
+ xemacps_mdio_write(lp->mii_bus,
+ gmii2rgmii_phydev->addr,
+ XEMACPS_GMII2RGMII_REG_NUM,
+ gmii2rgmii_reg);
+ }
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != lp->link) {
+ lp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link)
+ dev_info(&lp->pdev->dev, "link up (%d/%s)\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
+ else
+ dev_info(&lp->pdev->dev, "link down\n");
+ }
+}
+
+static int xemacps_clk_notifier_cb(struct notifier_block *nb, unsigned long
+ event, void *data)
+{
+/*
+ struct clk_notifier_data *ndata = data;
+ struct net_local *nl = to_net_local(nb);
+*/
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /* if a rate change is announced we need to check whether we can
+ * maintain the current frequency by changing the clock
+ * dividers.
+ * I don't see how this can be done using the current fmwk!?
+ * For now we always allow the rate change. Otherwise we would
+ * even prevent ourself to change the rate.
+ */
+ return NOTIFY_OK;
+ case POST_RATE_CHANGE:
+ /* not sure this will work. actually i'm sure it does not. this
+ * callback is not allowed to call back into COMMON_CLK, what
+ * adjust_link() does...*/
+ /*xemacps_adjust_link(nl->ndev); would likely lock up kernel */
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+/**
+ * xemacps_mii_probe - probe mii bus, find the right bus_id to register
+ * phy callback function.
+ * @ndev: network interface device structure
+ * return 0 on success, negative value if error
+ **/
+static int xemacps_mii_probe(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ if (lp->phy_node) {
+ phydev = of_phy_connect(lp->ndev,
+ lp->phy_node,
+ &xemacps_adjust_link,
+ 0,
+ lp->phy_interface);
+ }
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "%s: no PHY found\n", ndev->name);
+ return -1;
+ }
+
+ dev_dbg(&lp->pdev->dev,
+ "GEM: phydev %p, phydev->phy_id 0x%x, phydev->addr 0x%x\n",
+ phydev, phydev->phy_id, phydev->addr);
+
+ phydev->supported &= (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phydev->advertising = phydev->supported;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = -1;
+ lp->phy_dev = phydev;
+
+ phy_start(lp->phy_dev);
+
+ dev_dbg(&lp->pdev->dev, "phy_addr 0x%x, phy_id 0x%08x\n",
+ lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+ dev_dbg(&lp->pdev->dev, "attach [%s] phy driver\n",
+ lp->phy_dev->drv->name);
+
+ if (lp->gmii2rgmii_phy_node) {
+ phydev = of_phy_connect(lp->ndev,
+ lp->gmii2rgmii_phy_node,
+ NULL,
+ 0, 0);
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "%s: no gmii to rgmii converter found\n",
+ ndev->name);
+ return -1;
+ }
+ lp->gmii2rgmii_phy_dev = phydev;
+ } else
+ lp->gmii2rgmii_phy_dev = NULL;
+
+ return 0;
+}
+
+/**
+ * xemacps_mii_init - Initialize and register mii bus to network device
+ * @lp: local device instance pointer
+ * return 0 on success, negative value if error
+ **/
+static int xemacps_mii_init(struct net_local *lp)
+{
+ int rc = -ENXIO, i;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+ struct device_node *npp;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (lp->mii_bus == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "XEMACPS mii bus";
+ lp->mii_bus->read = &xemacps_mdio_read;
+ lp->mii_bus->write = &xemacps_mdio_write;
+ lp->mii_bus->reset = &xemacps_mdio_reset;
+ lp->mii_bus->priv = lp;
+ lp->mii_bus->parent = &lp->ndev->dev;
+
+ lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ rc = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+ npp = of_get_parent(np);
+ of_address_to_resource(npp, 0, &res);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ if (of_mdiobus_register(lp->mii_bus, np))
+ goto err_out_free_mdio_irq;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ return rc;
+}
+
+/**
+ * xemacps_update_hdaddr - Update device's MAC address when configured
+ * MAC address is not valid, reconfigure with a good one.
+ * @lp: local device instance pointer
+ **/
+static void xemacps_update_hwaddr(struct net_local *lp)
+{
+ u32 regvall;
+ u16 regvalh;
+ u8 addr[6];
+
+ regvall = xemacps_read(lp->baseaddr, XEMACPS_LADDR1L_OFFSET);
+ regvalh = xemacps_read(lp->baseaddr, XEMACPS_LADDR1H_OFFSET);
+ addr[0] = regvall & 0xFF;
+ addr[1] = (regvall >> 8) & 0xFF;
+ addr[2] = (regvall >> 16) & 0xFF;
+ addr[3] = (regvall >> 24) & 0xFF;
+ addr[4] = regvalh & 0xFF;
+ addr[5] = (regvalh >> 8) & 0xFF;
+
+ if (is_valid_ether_addr(addr)) {
+ memcpy(lp->ndev->dev_addr, addr, sizeof(addr));
+ } else {
+ dev_info(&lp->pdev->dev, "invalid address, use assigned\n");
+ random_ether_addr(lp->ndev->dev_addr);
+ dev_info(&lp->pdev->dev,
+ "MAC updated %02x:%02x:%02x:%02x:%02x:%02x\n",
+ lp->ndev->dev_addr[0], lp->ndev->dev_addr[1],
+ lp->ndev->dev_addr[2], lp->ndev->dev_addr[3],
+ lp->ndev->dev_addr[4], lp->ndev->dev_addr[5]);
+ }
+}
+
+/**
+ * xemacps_set_hwaddr - Set device's MAC address from ndev->dev_addr
+ * @lp: local device instance pointer
+ **/
+static void xemacps_set_hwaddr(struct net_local *lp)
+{
+ u32 regvall = 0;
+ u16 regvalh = 0;
+#ifdef __LITTLE_ENDIAN
+ regvall = cpu_to_le32(*((u32 *)lp->ndev->dev_addr));
+ regvalh = cpu_to_le16(*((u16 *)(lp->ndev->dev_addr + 4)));
+#endif
+#ifdef __BIG_ENDIAN
+ regvall = cpu_to_be32(*((u32 *)lp->ndev->dev_addr));
+ regvalh = cpu_to_be16(*((u16 *)(lp->ndev->dev_addr + 4)));
+#endif
+ /* LADDRXH has to be wriiten latter than LADDRXL to enable
+ * this address even if these 16 bits are zeros. */
+ xemacps_write(lp->baseaddr, XEMACPS_LADDR1L_OFFSET, regvall);
+ xemacps_write(lp->baseaddr, XEMACPS_LADDR1H_OFFSET, regvalh);
+#ifdef DEBUG
+ regvall = xemacps_read(lp->baseaddr, XEMACPS_LADDR1L_OFFSET);
+ regvalh = xemacps_read(lp->baseaddr, XEMACPS_LADDR1H_OFFSET);
+ dev_dbg(&lp->pdev->dev,
+ "MAC 0x%08x, 0x%08x, %02x:%02x:%02x:%02x:%02x:%02x\n",
+ regvall, regvalh,
+ (regvall & 0xff), ((regvall >> 8) & 0xff),
+ ((regvall >> 16) & 0xff), (regvall >> 24),
+ (regvalh & 0xff), (regvalh >> 8));
+#endif
+}
+
+/*
+ * xemacps_reset_hw - Helper function to reset the underlying hardware.
+ * This is called when we get into such deep trouble that we don't know
+ * how to handle otherwise.
+ * @lp: local device instance pointer
+ */
+static void xemacps_reset_hw(struct net_local *lp)
+{
+ u32 regisr;
+ /* make sure we have the buffer for ourselves */
+ wmb();
+
+ /* Have a clean start */
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, 0);
+
+ /* Clear statistic counters */
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ XEMACPS_NWCTRL_STATCLR_MASK);
+
+ /* Clear TX and RX status */
+ xemacps_write(lp->baseaddr, XEMACPS_TXSR_OFFSET, ~0UL);
+ xemacps_write(lp->baseaddr, XEMACPS_RXSR_OFFSET, ~0UL);
+
+ /* Disable all interrupts */
+ xemacps_write(lp->baseaddr, XEMACPS_IDR_OFFSET, ~0UL);
+ synchronize_irq(lp->ndev->irq);
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+}
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+
+/**
+ * xemacps_get_hwticks - get the current value of the GEM internal timer
+ * @lp: local device instance pointer
+ * return: nothing
+ **/
+static inline void
+xemacps_get_hwticks(struct net_local *lp, u64 *sec, u64 *nsec)
+{
+ do {
+ *nsec = xemacps_read(lp->baseaddr, XEMACPS_1588NS_OFFSET);
+ *sec = xemacps_read(lp->baseaddr, XEMACPS_1588S_OFFSET);
+ } while (*nsec > xemacps_read(lp->baseaddr, XEMACPS_1588NS_OFFSET));
+}
+
+/**
+ * xemacps_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t xemacps_read_clock(const struct cyclecounter *tc)
+{
+ struct net_local *lp =
+ container_of(tc, struct net_local, cycles);
+ u64 stamp;
+ u64 sec, nsec;
+
+ xemacps_get_hwticks(lp, &sec, &nsec);
+ stamp = (sec << 32) | nsec;
+
+ return stamp;
+}
+
+
+/**
+ * xemacps_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void xemacps_systim_to_hwtstamp(struct net_local *lp,
+ struct skb_shared_hwtstamps *shhwtstamps,
+ u64 regval)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&lp->clock, regval);
+ timecompare_update(&lp->compare, ns);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+}
+
+static void
+xemacps_rx_hwtstamp(struct net_local *lp,
+ struct sk_buff *skb, unsigned msg_type)
+{
+ u64 time64, sec, nsec;
+
+ if (!msg_type) {
+ /* PTP Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPERXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPERXNS_OFFSET);
+ } else {
+ /* PTP Peer Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPPRXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPPRXNS_OFFSET);
+ }
+ time64 = (sec << 32) | nsec;
+ xemacps_systim_to_hwtstamp(lp, skb_hwtstamps(skb), time64);
+}
+
+static void
+xemacps_tx_hwtstamp(struct net_local *lp,
+ struct sk_buff *skb, unsigned msg_type)
+{
+ u64 time64, sec, nsec;
+
+ if (!msg_type) {
+ /* PTP Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPETXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPETXNS_OFFSET);
+ } else {
+ /* PTP Peer Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPPTXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPPTXNS_OFFSET);
+ }
+
+ time64 = (sec << 32) | nsec;
+ xemacps_systim_to_hwtstamp(lp, skb_hwtstamps(skb), time64);
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+}
+
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+/**
+ * xemacps_rx - process received packets when napi called
+ * @lp: local device instance pointer
+ * @budget: NAPI budget
+ * return: number of BDs processed
+ **/
+static int xemacps_rx(struct net_local *lp, int budget)
+{
+ struct xemacps_bd *cur_p;
+ u32 len;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ unsigned int numbdfree = 0;
+ u32 size = 0;
+ u32 packets = 0;
+ u32 regval;
+
+ cur_p = &lp->rx_bd[lp->rx_bd_ci];
+ regval = cur_p->addr;
+ rmb();
+ while (numbdfree < budget) {
+ if (!(regval & XEMACPS_RXBUF_NEW_MASK))
+ break;
+
+ new_skb = netdev_alloc_skb(lp->ndev, XEMACPS_RX_BUF_SIZE);
+ if (new_skb == NULL) {
+ dev_err(&lp->ndev->dev, "no memory for new sk_buff\n");
+ break;
+ }
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ XEMACPS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* the packet length */
+ len = cur_p->ctrl & XEMACPS_RXBUF_LEN_MASK;
+ rmb();
+ skb = lp->rx_skb[lp->rx_bd_ci].skb;
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[lp->rx_bd_ci].mapping,
+ lp->rx_skb[lp->rx_bd_ci].len,
+ DMA_FROM_DEVICE);
+
+ /* setup received skb and send it upstream */
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+
+ skb->ip_summed = lp->ip_summed;
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ if ((lp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) &&
+ (ntohs(skb->protocol) == 0x800)) {
+ unsigned ip_proto, dest_port, msg_type;
+
+ /* While the GEM can timestamp PTP packets, it does
+ * not mark the RX descriptor to identify them. This
+ * is entirely the wrong place to be parsing UDP
+ * headers, but some minimal effort must be made.
+ * NOTE: the below parsing of ip_proto and dest_port
+ * depend on the use of Ethernet_II encapsulation,
+ * IPv4 without any options.
+ */
+ ip_proto = *((u8 *)skb->mac_header + 14 + 9);
+ dest_port = ntohs(*(((u16 *)skb->mac_header) +
+ ((14 + 20 + 2)/2)));
+ msg_type = *((u8 *)skb->mac_header + 42);
+ if ((ip_proto == IPPROTO_UDP) &&
+ (dest_port == 0x13F)) {
+ /* Timestamp this packet */
+ xemacps_rx_hwtstamp(lp, skb, msg_type & 0x2);
+ }
+ }
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+ size += len;
+ packets++;
+ netif_receive_skb(skb);
+
+ cur_p->addr = (cur_p->addr & ~XEMACPS_RXBUF_ADD_MASK)
+ | (new_skb_baddr);
+ lp->rx_skb[lp->rx_bd_ci].skb = new_skb;
+ lp->rx_skb[lp->rx_bd_ci].mapping = new_skb_baddr;
+ lp->rx_skb[lp->rx_bd_ci].len = XEMACPS_RX_BUF_SIZE;
+
+ cur_p->ctrl = 0;
+ cur_p->addr &= (~XEMACPS_RXBUF_NEW_MASK);
+ wmb();
+
+ lp->rx_bd_ci++;
+ lp->rx_bd_ci = lp->rx_bd_ci % XEMACPS_RECV_BD_CNT;
+ cur_p = &lp->rx_bd[lp->rx_bd_ci];
+ regval = cur_p->addr;
+ rmb();
+ numbdfree++;
+ }
+ wmb();
+ lp->stats.rx_packets += packets;
+ lp->stats.rx_bytes += size;
+ return numbdfree;
+}
+
+/**
+ * xemacps_rx_poll - NAPI poll routine
+ * napi: pointer to napi struct
+ * budget:
+ **/
+static int xemacps_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct net_local *lp = container_of(napi, struct net_local, napi);
+ int work_done = 0;
+ u32 regval;
+
+ spin_lock(&lp->rx_lock);
+ while (1) {
+ regval = xemacps_read(lp->baseaddr, XEMACPS_RXSR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_RXSR_OFFSET, regval);
+ if (regval & XEMACPS_RXSR_HRESPNOK_MASK)
+ dev_err(&lp->pdev->dev, "RX error 0x%x\n", regval);
+
+ work_done += xemacps_rx(lp, budget - work_done);
+ if (work_done >= budget)
+ break;
+
+ napi_complete(napi);
+ /* We disabled RX interrupts in interrupt service
+ * routine, now it is time to enable it back.
+ */
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IER_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+
+ /* If a packet has come in between the last check of the BD
+ * list and unmasking the interrupts, we may have missed the
+ * interrupt, so reschedule here.
+ */
+ if ((lp->rx_bd[lp->rx_bd_ci].addr & XEMACPS_RXBUF_NEW_MASK)
+ && napi_reschedule(napi)) {
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IDR_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+ continue;
+ }
+ break;
+ }
+ spin_unlock(&lp->rx_lock);
+ return work_done;
+}
+
+/**
+ * xemacps_tx_poll - tx bd reclaim tasklet handler
+ * @data: pointer to network interface device structure
+ **/
+static void xemacps_tx_poll(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+ u32 len = 0;
+ unsigned int bdcount = 0;
+ unsigned int bdpartialcount = 0;
+ unsigned int sop = 0;
+ struct xemacps_bd *cur_p;
+ u32 cur_i;
+ u32 numbdstofree;
+ u32 numbdsinhw;
+ struct ring_info *rp;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock(&lp->tx_lock);
+ regval = xemacps_read(lp->baseaddr, XEMACPS_TXSR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_TXSR_OFFSET, regval);
+ dev_dbg(&lp->pdev->dev, "TX status 0x%x\n", regval);
+ if (regval & (XEMACPS_TXSR_HRESPNOK_MASK | XEMACPS_TXSR_BUFEXH_MASK))
+ dev_err(&lp->pdev->dev, "TX error 0x%x\n", regval);
+
+ cur_i = lp->tx_bd_ci;
+ cur_p = &lp->tx_bd[cur_i];
+ numbdsinhw = XEMACPS_SEND_BD_CNT - lp->tx_bd_freecnt;
+ while (bdcount < numbdsinhw) {
+ if (sop == 0) {
+ if (cur_p->ctrl & XEMACPS_TXBUF_USED_MASK)
+ sop = 1;
+ else
+ break;
+ }
+
+ bdcount++;
+ bdpartialcount++;
+
+ /* hardware has processed this BD so check the "last" bit.
+ * If it is clear, then there are more BDs for the current
+ * packet. Keep a count of these partial packet BDs.
+ */
+ if (cur_p->ctrl & XEMACPS_TXBUF_LAST_MASK) {
+ sop = 0;
+ bdpartialcount = 0;
+ }
+
+ cur_i++;
+ cur_i = cur_i % XEMACPS_SEND_BD_CNT;
+ cur_p = &lp->tx_bd[cur_i];
+ }
+ numbdstofree = bdcount - bdpartialcount;
+ lp->tx_bd_freecnt += numbdstofree;
+ numbdsinhw -= numbdstofree;
+ if (!numbdstofree)
+ goto tx_poll_out;
+
+ cur_p = &lp->tx_bd[lp->tx_bd_ci];
+ while (numbdstofree) {
+ rp = &lp->tx_skb[lp->tx_bd_ci];
+ skb = rp->skb;
+
+ len += (cur_p->ctrl & XEMACPS_TXBUF_LEN_MASK);
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ if ((lp->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) &&
+ (ntohs(skb->protocol) == 0x800)) {
+ unsigned ip_proto, dest_port, msg_type;
+
+ skb_reset_mac_header(skb);
+
+ ip_proto = *((u8 *)skb->mac_header + 14 + 9);
+ dest_port = ntohs(*(((u16 *)skb->mac_header) +
+ ((14 + 20 + 2)/2)));
+ msg_type = *((u8 *)skb->mac_header + 42);
+ if ((ip_proto == IPPROTO_UDP) &&
+ (dest_port == 0x13F)) {
+ /* Timestamp this packet */
+ xemacps_tx_hwtstamp(lp, skb, msg_type & 0x2);
+ }
+ }
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+ dma_unmap_single(&lp->pdev->dev, rp->mapping, rp->len,
+ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb(skb);
+ /* log tx completed packets and bytes, errors logs
+ * are in other error counters.
+ */
+ if (cur_p->ctrl & XEMACPS_TXBUF_LAST_MASK) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += len;
+ len = 0;
+ }
+
+ /* Set used bit, preserve wrap bit; clear everything else. */
+ cur_p->ctrl |= XEMACPS_TXBUF_USED_MASK;
+ cur_p->ctrl &= (XEMACPS_TXBUF_USED_MASK |
+ XEMACPS_TXBUF_WRAP_MASK);
+
+ lp->tx_bd_ci++;
+ lp->tx_bd_ci = lp->tx_bd_ci % XEMACPS_SEND_BD_CNT;
+ cur_p = &lp->tx_bd[lp->tx_bd_ci];
+ numbdstofree--;
+ }
+ wmb();
+
+ if (numbdsinhw) {
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ regval |= XEMACPS_NWCTRL_STARTTX_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+ }
+
+ netif_wake_queue(ndev);
+
+tx_poll_out:
+ spin_unlock(&lp->tx_lock);
+}
+
+/**
+ * xemacps_interrupt - interrupt main service routine
+ * @irq: interrupt number
+ * @dev_id: pointer to a network device structure
+ * return IRQ_HANDLED or IRQ_NONE
+ **/
+static irqreturn_t xemacps_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regisr;
+ u32 regctrl;
+
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ if (unlikely(!regisr))
+ return IRQ_NONE;
+
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+
+ while (regisr) {
+ if (regisr & (XEMACPS_IXR_TXCOMPL_MASK |
+ XEMACPS_IXR_TX_ERR_MASK)) {
+ tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+ }
+
+ if (regisr & XEMACPS_IXR_RXUSED_MASK) {
+ spin_lock(&lp->nwctrlreg_lock);
+ regctrl = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET);
+ regctrl |= XEMACPS_NWCTRL_FLUSH_DPRAM_MASK;
+ xemacps_write(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET, regctrl);
+ spin_unlock(&lp->nwctrlreg_lock);
+ }
+
+ if (regisr & XEMACPS_IXR_FRAMERX_MASK) {
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IDR_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+ napi_schedule(&lp->napi);
+ }
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Free all packets presently in the descriptor rings.
+ */
+static void xemacps_clean_rings(struct net_local *lp)
+{
+ int i;
+
+ for (i = 0; i < XEMACPS_RECV_BD_CNT; i++) {
+ if (lp->rx_skb && lp->rx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[i].mapping,
+ lp->rx_skb[i].len,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb(lp->rx_skb[i].skb);
+ lp->rx_skb[i].skb = NULL;
+ lp->rx_skb[i].mapping = 0;
+ }
+ }
+
+ for (i = 0; i < XEMACPS_SEND_BD_CNT; i++) {
+ if (lp->tx_skb && lp->tx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb(lp->tx_skb[i].skb);
+ lp->tx_skb[i].skb = NULL;
+ lp->tx_skb[i].mapping = 0;
+ }
+ }
+}
+
+/**
+ * xemacps_descriptor_free - Free allocated TX and RX BDs
+ * @lp: local device instance pointer
+ **/
+static void xemacps_descriptor_free(struct net_local *lp)
+{
+ int size;
+
+ xemacps_clean_rings(lp);
+
+ /* kfree(NULL) is safe, no need to check here */
+ kfree(lp->tx_skb);
+ lp->tx_skb = NULL;
+ kfree(lp->rx_skb);
+ lp->rx_skb = NULL;
+
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct xemacps_bd);
+ if (lp->rx_bd) {
+ dma_free_coherent(&lp->pdev->dev, size,
+ lp->rx_bd, lp->rx_bd_dma);
+ lp->rx_bd = NULL;
+ }
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct xemacps_bd);
+ if (lp->tx_bd) {
+ dma_free_coherent(&lp->pdev->dev, size,
+ lp->tx_bd, lp->tx_bd_dma);
+ lp->tx_bd = NULL;
+ }
+}
+
+/**
+ * xemacps_descriptor_init - Allocate both TX and RX BDs
+ * @lp: local device instance pointer
+ * return 0 on success, negative value if error
+ **/
+static int xemacps_descriptor_init(struct net_local *lp)
+{
+ int size;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ u32 i;
+ struct xemacps_bd *cur_p;
+ u32 regval;
+
+ lp->tx_skb = NULL;
+ lp->rx_skb = NULL;
+ lp->rx_bd = NULL;
+ lp->tx_bd = NULL;
+
+ /* Reset the indexes which are used for accessing the BDs */
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct ring_info);
+ lp->tx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->tx_skb)
+ goto err_out;
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct ring_info);
+ lp->rx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->rx_skb)
+ goto err_out;
+
+ /*
+ * Set up RX buffer descriptors.
+ */
+
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct xemacps_bd);
+ lp->rx_bd = dma_alloc_coherent(&lp->pdev->dev, size,
+ &lp->rx_bd_dma, GFP_KERNEL);
+ if (!lp->rx_bd)
+ goto err_out;
+ dev_dbg(&lp->pdev->dev, "RX ring %d bytes at 0x%x mapped %p\n",
+ size, lp->rx_bd_dma, lp->rx_bd);
+
+ for (i = 0; i < XEMACPS_RECV_BD_CNT; i++) {
+ cur_p = &lp->rx_bd[i];
+
+ new_skb = netdev_alloc_skb(lp->ndev, XEMACPS_RX_BUF_SIZE);
+ if (new_skb == NULL) {
+ dev_err(&lp->ndev->dev, "alloc_skb error %d\n", i);
+ goto err_out;
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ XEMACPS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* set wrap bit for last BD */
+ regval = (new_skb_baddr & XEMACPS_RXBUF_ADD_MASK);
+ if (i == XEMACPS_RECV_BD_CNT - 1)
+ regval |= XEMACPS_RXBUF_WRAP_MASK;
+ cur_p->addr = regval;
+ cur_p->ctrl = 0;
+ wmb();
+
+ lp->rx_skb[i].skb = new_skb;
+ lp->rx_skb[i].mapping = new_skb_baddr;
+ lp->rx_skb[i].len = XEMACPS_RX_BUF_SIZE;
+ }
+
+ /*
+ * Set up TX buffer descriptors.
+ */
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct xemacps_bd);
+ lp->tx_bd = dma_alloc_coherent(&lp->pdev->dev, size,
+ &lp->tx_bd_dma, GFP_KERNEL);
+ if (!lp->tx_bd)
+ goto err_out;
+ dev_dbg(&lp->pdev->dev, "TX ring %d bytes at 0x%x mapped %p\n",
+ size, lp->tx_bd_dma, lp->tx_bd);
+
+ for (i = 0; i < XEMACPS_SEND_BD_CNT; i++) {
+ cur_p = &lp->tx_bd[i];
+ /* set wrap bit for last BD */
+ cur_p->addr = 0;
+ regval = XEMACPS_TXBUF_USED_MASK;
+ if (i == XEMACPS_SEND_BD_CNT - 1)
+ regval |= XEMACPS_TXBUF_WRAP_MASK;
+ cur_p->ctrl = regval;
+ }
+ wmb();
+
+ lp->tx_bd_freecnt = XEMACPS_SEND_BD_CNT;
+
+ dev_dbg(&lp->pdev->dev,
+ "lp->tx_bd %p lp->tx_bd_dma %p lp->tx_skb %p\n",
+ lp->tx_bd, (void *)lp->tx_bd_dma, lp->tx_skb);
+ dev_dbg(&lp->pdev->dev,
+ "lp->rx_bd %p lp->rx_bd_dma %p lp->rx_skb %p\n",
+ lp->rx_bd, (void *)lp->rx_bd_dma, lp->rx_skb);
+
+ return 0;
+
+err_out:
+ xemacps_descriptor_free(lp);
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+/*
+ * Initialize the GEM Time Stamp Unit
+ */
+static void xemacps_init_tsu(struct net_local *lp)
+{
+
+ memset(&lp->cycles, 0, sizeof(lp->cycles));
+ lp->cycles.read = xemacps_read_clock;
+ lp->cycles.mask = CLOCKSOURCE_MASK(64);
+ lp->cycles.mult = 1;
+ lp->cycles.shift = 0;
+
+ /* Set registers so that rollover occurs soon to test this. */
+ xemacps_write(lp->baseaddr, XEMACPS_1588NS_OFFSET, 0x00000000);
+ xemacps_write(lp->baseaddr, XEMACPS_1588S_OFFSET, 0xFF800000);
+
+ /* program the timer increment register with the numer of nanoseconds
+ * per clock tick.
+ *
+ * Note: The value is calculated based on the current operating
+ * frequency 50MHz
+ */
+ xemacps_write(lp->baseaddr, XEMACPS_1588INC_OFFSET,
+ (NS_PER_SEC/lp->ptpenetclk));
+
+ timecounter_init(&lp->clock, &lp->cycles,
+ ktime_to_ns(ktime_get_real()));
+ /*
+ * Synchronize our NIC clock against system wall clock.
+ */
+ memset(&lp->compare, 0, sizeof(lp->compare));
+ lp->compare.source = &lp->clock;
+ lp->compare.target = ktime_get_real;
+ lp->compare.num_samples = 10;
+ timecompare_update(&lp->compare, 0);
+
+ /* Initialize hwstamp config */
+ lp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+}
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+/**
+ * xemacps_init_hw - Initialize hardware to known good state
+ * @lp: local device instance pointer
+ **/
+static void xemacps_init_hw(struct net_local *lp)
+{
+ u32 regval;
+
+ xemacps_reset_hw(lp);
+ xemacps_set_hwaddr(lp);
+
+ /* network configuration */
+ regval = 0;
+ regval |= XEMACPS_NWCFG_FDEN_MASK;
+ regval |= XEMACPS_NWCFG_RXCHKSUMEN_MASK;
+ regval |= XEMACPS_NWCFG_PAUSECOPYDI_MASK;
+ regval |= XEMACPS_NWCFG_FCSREM_MASK;
+ regval |= XEMACPS_NWCFG_PAUSEEN_MASK;
+ regval |= XEMACPS_NWCFG_100_MASK;
+ regval |= XEMACPS_NWCFG_HDRXEN_MASK;
+
+ regval |= (MDC_DIV_224 << XEMACPS_NWCFG_MDC_SHIFT_MASK);
+ if (lp->ndev->flags & IFF_PROMISC) /* copy all */
+ regval |= XEMACPS_NWCFG_COPYALLEN_MASK;
+ if (!(lp->ndev->flags & IFF_BROADCAST)) /* No broadcast */
+ regval |= XEMACPS_NWCFG_BCASTDI_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+
+ /* Init TX and RX DMA Q address */
+ xemacps_write(lp->baseaddr, XEMACPS_RXQBASE_OFFSET, lp->rx_bd_dma);
+ xemacps_write(lp->baseaddr, XEMACPS_TXQBASE_OFFSET, lp->tx_bd_dma);
+
+ /* DMACR configurations */
+ regval = (((XEMACPS_RX_BUF_SIZE / XEMACPS_RX_BUF_UNIT) +
+ ((XEMACPS_RX_BUF_SIZE % XEMACPS_RX_BUF_UNIT) ? 1 : 0)) <<
+ XEMACPS_DMACR_RXBUF_SHIFT);
+ regval |= XEMACPS_DMACR_RXSIZE_MASK;
+ regval |= XEMACPS_DMACR_TXSIZE_MASK;
+ regval |= XEMACPS_DMACR_TCPCKSUM_MASK;
+#ifdef __LITTLE_ENDIAN
+ regval &= ~XEMACPS_DMACR_ENDIAN_MASK;
+#endif
+#ifdef __BIG_ENDIAN
+ regval |= XEMACPS_DMACR_ENDIAN_MASK;
+#endif
+ regval |= XEMACPS_DMACR_BLENGTH_INCR16;
+ xemacps_write(lp->baseaddr, XEMACPS_DMACR_OFFSET, regval);
+
+ /* Enable TX, RX and MDIO port */
+ regval = 0;
+ regval |= XEMACPS_NWCTRL_MDEN_MASK;
+ regval |= XEMACPS_NWCTRL_TXEN_MASK;
+ regval |= XEMACPS_NWCTRL_RXEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ /* Initialize the Time Stamp Unit */
+ xemacps_init_tsu(lp);
+#endif
+
+ /* Enable interrupts */
+ regval = XEMACPS_IXR_ALL_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_IER_OFFSET, regval);
+}
+
+/**
+ * xemacps_resetrx_for_no_rxdata - Resets the Rx if there is no data
+ * for a while (presently 100 msecs)
+ * @data: Used for net_local instance pointer
+ **/
+static void xemacps_resetrx_for_no_rxdata(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+ unsigned long regctrl;
+ unsigned long tempcntr;
+ unsigned long flags;
+
+ tempcntr = xemacps_read(lp->baseaddr, XEMACPS_RXCNT_OFFSET);
+ if ((!tempcntr) && (!(lp->lastrxfrmscntr))) {
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regctrl = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET);
+ regctrl &= (~XEMACPS_NWCTRL_RXEN_MASK);
+ xemacps_write(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET, regctrl);
+ regctrl = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ regctrl |= (XEMACPS_NWCTRL_RXEN_MASK);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regctrl);
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+ }
+ lp->lastrxfrmscntr = tempcntr;
+}
+
+/**
+ * xemacps_update_stats - Update the statistic structure entries from
+ * the corresponding emacps hardware statistic registers
+ * @data: Used for net_local instance pointer
+ **/
+static void xemacps_update_stats(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+ struct net_device_stats *nstat = &lp->stats;
+ u32 cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXUNDRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXOVRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXJABCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXFCSCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_crc_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXLENGTHCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXALIGNCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_frame_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXRESERRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_missed_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXORCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_fifo_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_TXURUNCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_fifo_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_SNGLCOLLCNT_OFFSET);
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_MULTICOLLCNT_OFFSET);
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_EXCESSCOLLCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_aborted_errors += cnt;
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_LATECOLLCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_CSENSECNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_carrier_errors += cnt;
+}
+
+/**
+ * xemacps_gen_purpose_timerhandler - Timer handler that is called at regular
+ * intervals upon expiry of the gen_purpose_timer defined in net_local struct.
+ * @data: Used for net_local instance pointer
+ *
+ * This timer handler is used to update the statistics by calling the API
+ * xemacps_update_stats. The statistics register can typically overflow pretty
+ * quickly under heavy load conditions. This timer is used to periodically
+ * read the stats registers and update the corresponding stats structure
+ * entries. The stats registers when read reset to 0.
+ **/
+static void xemacps_gen_purpose_timerhandler(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+
+ xemacps_update_stats(data);
+ xemacps_resetrx_for_no_rxdata(data);
+ mod_timer(&(lp->gen_purpose_timer),
+ jiffies + msecs_to_jiffies(XEAMCPS_GEN_PURPOSE_TIMER_LOAD));
+}
+
+/**
+ * xemacps_open - Called when a network device is made active
+ * @ndev: network interface device structure
+ * return 0 on success, negative value if error
+ *
+ * The open entry point is called when a network interface is made active
+ * by the system (IFF_UP). At this point all resources needed for transmit
+ * and receive operations are allocated, the interrupt handler is
+ * registered with OS, the watchdog timer is started, and the stack is
+ * notified that the interface is ready.
+ *
+ * note: if error(s), allocated resources before error require to be
+ * released or system issues (such as memory) leak might happen.
+ **/
+static int xemacps_open(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int rc;
+
+ dev_dbg(&lp->pdev->dev, "open\n");
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ rc = xemacps_descriptor_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev,
+ "Unable to allocate DMA memory, rc %d\n", rc);
+ return rc;
+ }
+
+ rc = pm_runtime_get_sync(&lp->pdev->dev);
+ if (rc < 0) {
+ dev_err(&lp->pdev->dev,
+ "pm_runtime_get_sync() failed, rc %d\n", rc);
+ goto err_free_rings;
+ }
+
+ xemacps_init_hw(lp);
+ rc = xemacps_mii_probe(ndev);
+ if (rc != 0) {
+ dev_err(&lp->pdev->dev,
+ "%s mii_probe fail.\n", lp->mii_bus->name);
+ if (rc == (-2)) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ }
+ rc = -ENXIO;
+ goto err_pm_put;
+ }
+
+ setup_timer(&(lp->gen_purpose_timer), xemacps_gen_purpose_timerhandler,
+ (unsigned long)lp);
+ mod_timer(&(lp->gen_purpose_timer),
+ jiffies + msecs_to_jiffies(XEAMCPS_GEN_PURPOSE_TIMER_LOAD));
+
+ napi_enable(&lp->napi);
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+ return 0;
+
+err_pm_put:
+ xemacps_reset_hw(lp);
+ pm_runtime_put(&lp->pdev->dev);
+err_free_rings:
+ xemacps_descriptor_free(lp);
+
+ return rc;
+}
+
+/**
+ * xemacps_close - disable a network interface
+ * @ndev: network interface device structure
+ * return 0
+ *
+ * The close entry point is called when a network interface is de-activated
+ * by OS. The hardware is still under the driver control, but needs to be
+ * disabled. A global MAC reset is issued to stop the hardware, and all
+ * transmit and receive resources are freed.
+ **/
+static int xemacps_close(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ del_timer_sync(&(lp->gen_purpose_timer));
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ netif_carrier_off(ndev);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ if (lp->gmii2rgmii_phy_node)
+ phy_disconnect(lp->gmii2rgmii_phy_dev);
+ xemacps_reset_hw(lp);
+ mdelay(500);
+ xemacps_descriptor_free(lp);
+
+ pm_runtime_put(&lp->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * xemacps_reinit_for_txtimeout - work queue scheduled for the tx timeout
+ * handling.
+ * @ndev: queue work structure
+ **/
+static void xemacps_reinit_for_txtimeout(struct work_struct *data)
+{
+ struct net_local *lp = container_of(data, struct net_local,
+ txtimeout_reinit);
+ int rc;
+
+ netif_stop_queue(lp->ndev);
+ napi_disable(&lp->napi);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ spin_lock_bh(&lp->tx_lock);
+ xemacps_reset_hw(lp);
+ spin_unlock_bh(&lp->tx_lock);
+
+ if (lp->phy_dev)
+ phy_stop(lp->phy_dev);
+
+ xemacps_descriptor_free(lp);
+ rc = xemacps_descriptor_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev,
+ "Unable to allocate DMA memory, rc %d\n", rc);
+ return;
+ }
+
+ xemacps_init_hw(lp);
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = -1;
+
+ if (lp->phy_dev)
+ phy_start(lp->phy_dev);
+
+ napi_enable(&lp->napi);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+ lp->ndev->trans_start = jiffies;
+ netif_wake_queue(lp->ndev);
+}
+
+/**
+ * xemacps_tx_timeout - callback used when the transmitter has not made
+ * any progress for dev->watchdog ticks.
+ * @ndev: network interface device structure
+ **/
+static void xemacps_tx_timeout(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ dev_err(&lp->pdev->dev, "transmit timeout %lu ms, reseting...\n",
+ TX_TIMEOUT * 1000UL / HZ);
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+/**
+ * xemacps_set_mac_address - set network interface mac address
+ * @ndev: network interface device structure
+ * @addr: pointer to MAC address
+ * return 0 on success, negative value if error
+ **/
+static int xemacps_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ dev_dbg(&lp->pdev->dev, "hwaddr 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ hwaddr->sa_data[0], hwaddr->sa_data[1], hwaddr->sa_data[2],
+ hwaddr->sa_data[3], hwaddr->sa_data[4], hwaddr->sa_data[5]);
+
+ memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+ xemacps_set_hwaddr(lp);
+ return 0;
+}
+
+/**
+ * xemacps_clear_csum - Clear the csum field for transport protocols
+ * @skb: socket buffer
+ * @ndev: network interface device structure
+ * return 0 on success, other value if error
+ **/
+static int xemacps_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+{
+ /* Only run for packets requiring a checksum. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+}
+
+/**
+ * xemacps_start_xmit - transmit a packet (called by kernel)
+ * @skb: socket buffer
+ * @ndev: network interface device structure
+ * return 0 on success, other value if error
+ **/
+static int xemacps_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ dma_addr_t mapping;
+ unsigned int nr_frags, len;
+ int i;
+ u32 regval;
+ void *virt_addr;
+ skb_frag_t *frag;
+ struct xemacps_bd *cur_p;
+ unsigned long flags;
+ u32 bd_tail;
+
+ nr_frags = skb_shinfo(skb)->nr_frags + 1;
+ spin_lock_bh(&lp->tx_lock);
+
+ if (nr_frags > lp->tx_bd_freecnt) {
+ netif_stop_queue(ndev); /* stop send queue */
+ spin_unlock_bh(&lp->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ if(xemacps_clear_csum(skb,ndev)) {
+ spin_unlock_bh(&lp->tx_lock);
+ kfree(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bd_tail = lp->tx_bd_tail;
+ cur_p = &lp->tx_bd[bd_tail];
+ lp->tx_bd_freecnt -= nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ for (i = 0; i < nr_frags; i++) {
+ if (i == 0) {
+ len = skb_headlen(skb);
+ mapping = dma_map_single(&lp->pdev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ } else {
+ len = skb_frag_size(frag);
+ virt_addr = skb_frag_address(frag);
+ mapping = dma_map_single(&lp->pdev->dev, virt_addr,
+ len, DMA_TO_DEVICE);
+ frag++;
+ skb_get(skb);
+ }
+
+ lp->tx_skb[lp->tx_bd_tail].skb = skb;
+ lp->tx_skb[lp->tx_bd_tail].mapping = mapping;
+ lp->tx_skb[lp->tx_bd_tail].len = len;
+ cur_p->addr = mapping;
+
+ /* preserve critical status bits */
+ regval = cur_p->ctrl;
+ regval &= (XEMACPS_TXBUF_USED_MASK | XEMACPS_TXBUF_WRAP_MASK);
+ /* update length field */
+ regval |= ((regval & ~XEMACPS_TXBUF_LEN_MASK) | len);
+ /* commit second to last buffer to hardware */
+ if (i != 0)
+ regval &= ~XEMACPS_TXBUF_USED_MASK;
+ /* last fragment of this packet? */
+ if (i == (nr_frags - 1))
+ regval |= XEMACPS_TXBUF_LAST_MASK;
+ cur_p->ctrl = regval;
+
+ lp->tx_bd_tail++;
+ lp->tx_bd_tail = lp->tx_bd_tail % XEMACPS_SEND_BD_CNT;
+ cur_p = &(lp->tx_bd[lp->tx_bd_tail]);
+ }
+ wmb();
+
+ /* commit first buffer to hardware -- do this after
+ * committing the other buffers to avoid an underrun */
+ cur_p = &lp->tx_bd[bd_tail];
+ regval = cur_p->ctrl;
+ regval &= ~XEMACPS_TXBUF_USED_MASK;
+ cur_p->ctrl = regval;
+ wmb();
+
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ (regval | XEMACPS_NWCTRL_STARTTX_MASK));
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+
+ spin_unlock_bh(&lp->tx_lock);
+ ndev->trans_start = jiffies;
+ return 0;
+}
+
+/*
+ * Get the MAC Address bit from the specified position
+ */
+static unsigned get_bit(u8 *mac, unsigned bit)
+{
+ unsigned byte;
+
+ byte = mac[bit / 8];
+ byte >>= (bit & 0x7);
+ byte &= 1;
+
+ return byte;
+}
+
+/*
+ * Calculate a GEM MAC Address hash index
+ */
+static unsigned calc_mac_hash(u8 *mac)
+{
+ int index_bit, mac_bit;
+ unsigned hash_index;
+
+ hash_index = 0;
+ mac_bit = 5;
+ for (index_bit = 5; index_bit >= 0; index_bit--) {
+ hash_index |= (get_bit(mac, mac_bit) ^
+ get_bit(mac, mac_bit + 6) ^
+ get_bit(mac, mac_bit + 12) ^
+ get_bit(mac, mac_bit + 18) ^
+ get_bit(mac, mac_bit + 24) ^
+ get_bit(mac, mac_bit + 30) ^
+ get_bit(mac, mac_bit + 36) ^
+ get_bit(mac, mac_bit + 42))
+ << index_bit;
+ mac_bit--;
+ }
+
+ return hash_index;
+}
+
+/**
+ * xemacps_set_hashtable - Add multicast addresses to the internal
+ * multicast-hash table. Called from xemac_set_rx_mode().
+ * @ndev: network interface device structure
+ *
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map. The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function. The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received. If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast. A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register. A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register. To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ **/
+static void xemacps_set_hashtable(struct net_device *ndev)
+{
+ struct netdev_hw_addr *curr;
+ u32 regvalh, regvall, hash_index;
+ u8 *mc_addr;
+ struct net_local *lp;
+
+ lp = netdev_priv(ndev);
+
+ regvalh = regvall = 0;
+
+ netdev_for_each_mc_addr(curr, ndev) {
+ if (!curr) /* end of list */
+ break;
+ mc_addr = curr->addr;
+ hash_index = calc_mac_hash(mc_addr);
+
+ if (hash_index >= XEMACPS_MAX_HASH_BITS) {
+ dev_err(&lp->pdev->dev,
+ "hash calculation out of range %d\n",
+ hash_index);
+ break;
+ }
+ if (hash_index < 32)
+ regvall |= (1 << hash_index);
+ else
+ regvalh |= (1 << (hash_index - 32));
+ }
+
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, regvall);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, regvalh);
+}
+
+/**
+ * xemacps_set_rx_mode - enable/disable promiscuous and multicast modes
+ * @ndev: network interface device structure
+ **/
+static void xemacps_set_rx_mode(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+
+ /* promisc mode */
+ if (ndev->flags & IFF_PROMISC)
+ regval |= XEMACPS_NWCFG_COPYALLEN_MASK;
+ if (!(ndev->flags & IFF_PROMISC))
+ regval &= ~XEMACPS_NWCFG_COPYALLEN_MASK;
+
+ /* All multicast mode */
+ if (ndev->flags & IFF_ALLMULTI) {
+ regval |= XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, ~0UL);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, ~0UL);
+ /* Specific multicast mode */
+ } else if ((ndev->flags & IFF_MULTICAST)
+ && (netdev_mc_count(ndev) > 0)) {
+ regval |= XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ xemacps_set_hashtable(ndev);
+ /* Disable multicast mode */
+ } else {
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, 0x0);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, 0x0);
+ regval &= ~XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ }
+
+ /* broadcast mode */
+ if (ndev->flags & IFF_BROADCAST)
+ regval &= ~XEMACPS_NWCFG_BCASTDI_MASK;
+ /* No broadcast */
+ if (!(ndev->flags & IFF_BROADCAST))
+ regval |= XEMACPS_NWCFG_BCASTDI_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+}
+
+#define MIN_MTU 60
+#define MAX_MTU 1500
+/**
+ * xemacps_change_mtu - Change maximum transfer unit
+ * @ndev: network interface device structure
+ * @new_mtu: new vlaue for maximum frame size
+ * return: 0 on success, negative value if error.
+ **/
+static int xemacps_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if ((new_mtu < MIN_MTU) ||
+ ((new_mtu + ndev->hard_header_len) > MAX_MTU))
+ return -EINVAL;
+
+ ndev->mtu = new_mtu; /* change mtu in net_device structure */
+ return 0;
+}
+
+/**
+ * xemacps_get_settings - get device specific settings.
+ * Usage: Issue "ethtool ethX" under linux prompt.
+ * @ndev: network device
+ * @ecmd: ethtool command structure
+ * return: 0 on success, negative value if error.
+ **/
+static int
+xemacps_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+/**
+ * xemacps_set_settings - set device specific settings.
+ * Usage: Issue "ethtool -s ethX speed 1000" under linux prompt
+ * to change speed
+ * @ndev: network device
+ * @ecmd: ethtool command structure
+ * return: 0 on success, negative value if error.
+ **/
+static int
+xemacps_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+/**
+ * xemacps_get_drvinfo - report driver information
+ * Usage: Issue "ethtool -i ethX" under linux prompt
+ * @ndev: network device
+ * @ed: device driver information structure
+ **/
+static void
+xemacps_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(ed->driver, lp->pdev->dev.driver->name);
+ strcpy(ed->version, DRIVER_VERSION);
+}
+
+/**
+ * xemacps_get_ringparam - get device dma ring information.
+ * Usage: Issue "ethtool -g ethX" under linux prompt
+ * @ndev: network device
+ * @erp: ethtool ring parameter structure
+ **/
+static void
+xemacps_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+
+ erp->rx_max_pending = XEMACPS_RECV_BD_CNT;
+ erp->tx_max_pending = XEMACPS_SEND_BD_CNT;
+ erp->rx_pending = 0;
+ erp->tx_pending = 0;
+}
+
+/**
+ * xemacps_get_wol - get device wake on lan status
+ * Usage: Issue "ethtool ethX" under linux prompt
+ * @ndev: network device
+ * @ewol: wol status
+ **/
+static void
+xemacps_get_wol(struct net_device *ndev, struct ethtool_wolinfo *ewol)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ ewol->supported = WAKE_MAGIC | WAKE_ARP | WAKE_UCAST | WAKE_MCAST;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_WOL_OFFSET);
+ if (regval | XEMACPS_WOL_MCAST_MASK)
+ ewol->wolopts |= WAKE_MCAST;
+ if (regval | XEMACPS_WOL_ARP_MASK)
+ ewol->wolopts |= WAKE_ARP;
+ if (regval | XEMACPS_WOL_SPEREG1_MASK)
+ ewol->wolopts |= WAKE_UCAST;
+ if (regval | XEMACPS_WOL_MAGIC_MASK)
+ ewol->wolopts |= WAKE_MAGIC;
+
+}
+
+/**
+ * xemacps_set_wol - set device wake on lan configuration
+ * Usage: Issue "ethtool -s ethX wol u|m|b|g" under linux prompt to enable
+ * specified type of packet.
+ * Usage: Issue "ethtool -s ethX wol d" under linux prompt to disable
+ * this feature.
+ * @ndev: network device
+ * @ewol: wol status
+ * return 0 on success, negative value if not supported
+ **/
+static int
+xemacps_set_wol(struct net_device *ndev, struct ethtool_wolinfo *ewol)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ if (ewol->wolopts & ~(WAKE_MAGIC | WAKE_ARP | WAKE_UCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_WOL_OFFSET);
+ regval &= ~(XEMACPS_WOL_MCAST_MASK | XEMACPS_WOL_ARP_MASK |
+ XEMACPS_WOL_SPEREG1_MASK | XEMACPS_WOL_MAGIC_MASK);
+
+ if (ewol->wolopts & WAKE_MAGIC)
+ regval |= XEMACPS_WOL_MAGIC_MASK;
+ if (ewol->wolopts & WAKE_ARP)
+ regval |= XEMACPS_WOL_ARP_MASK;
+ if (ewol->wolopts & WAKE_UCAST)
+ regval |= XEMACPS_WOL_SPEREG1_MASK;
+ if (ewol->wolopts & WAKE_MCAST)
+ regval |= XEMACPS_WOL_MCAST_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_WOL_OFFSET, regval);
+
+ return 0;
+}
+
+/**
+ * xemacps_get_pauseparam - get device pause status
+ * Usage: Issue "ethtool -a ethX" under linux prompt
+ * @ndev: network device
+ * @epauseparam: pause parameter
+ *
+ * note: hardware supports only tx flow control
+ **/
+static void
+xemacps_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ epauseparm->autoneg = 0;
+ epauseparm->rx_pause = 0;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+ epauseparm->tx_pause = regval & XEMACPS_NWCFG_PAUSEEN_MASK;
+}
+
+/**
+ * xemacps_set_pauseparam - set device pause parameter(flow control)
+ * Usage: Issue "ethtool -A ethX tx on|off" under linux prompt
+ * @ndev: network device
+ * @epauseparam: pause parameter
+ * return 0 on success, negative value if not supported
+ *
+ * note: hardware supports only tx flow control
+ **/
+static int
+xemacps_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ if (netif_running(ndev)) {
+ dev_err(&lp->pdev->dev,
+ "Please stop netif before apply configruation\n");
+ return -EFAULT;
+ }
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+
+ if (epauseparm->tx_pause)
+ regval |= XEMACPS_NWCFG_PAUSEEN_MASK;
+ if (!(epauseparm->tx_pause))
+ regval &= ~XEMACPS_NWCFG_PAUSEEN_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+ return 0;
+}
+
+/**
+ * xemacps_get_stats - get device statistic raw data in 64bit mode
+ * @ndev: network device
+ **/
+static struct net_device_stats
+*xemacps_get_stats(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct net_device_stats *nstat = &lp->stats;
+
+ xemacps_update_stats((unsigned long)lp);
+ return nstat;
+}
+
+static struct ethtool_ops xemacps_ethtool_ops = {
+ .get_settings = xemacps_get_settings,
+ .set_settings = xemacps_set_settings,
+ .get_drvinfo = xemacps_get_drvinfo,
+ .get_link = ethtool_op_get_link, /* ethtool default */
+ .get_ringparam = xemacps_get_ringparam,
+ .get_wol = xemacps_get_wol,
+ .set_wol = xemacps_set_wol,
+ .get_pauseparam = xemacps_get_pauseparam,
+ .set_pauseparam = xemacps_set_pauseparam,
+};
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+static int xemacps_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct net_local *lp;
+ u32 regval;
+
+ lp = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ (regval | XEMACPS_NWCTRL_RXTSTAMP_MASK));
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ config.tx_type = HWTSTAMP_TX_ON;
+ lp->hwtstamp_config = config;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+/**
+ * xemacps_ioctl - ioctl entry point
+ * @ndev: network device
+ * @rq: interface request ioctl
+ * @cmd: command code
+ *
+ * Called when user issues an ioctl request to the network device.
+ **/
+static int xemacps_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(phydev, rq, cmd);
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ case SIOCSHWTSTAMP:
+ return xemacps_hwtstamp_ioctl(ndev, rq, cmd);
+#endif
+ default:
+ dev_info(&lp->pdev->dev, "ioctl %d not implemented.\n", cmd);
+ return -EOPNOTSUPP;
+ }
+
+}
+
+/**
+ * xemacps_probe - Platform driver probe
+ * @pdev: Pointer to platform device structure
+ *
+ * Return 0 on success, negative value if error
+ */
+static int xemacps_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem = NULL;
+ struct resource *r_irq = NULL;
+ struct net_device *ndev;
+ struct net_local *lp;
+ u32 regval = 0;
+ int rc = -ENXIO;
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r_mem || !r_irq) {
+ dev_err(&pdev->dev, "no IO resource defined.\n");
+ return -ENXIO;
+ }
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&pdev->dev, "etherdev allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ lp = netdev_priv(ndev);
+ lp->pdev = pdev;
+ lp->ndev = ndev;
+
+ spin_lock_init(&lp->tx_lock);
+ spin_lock_init(&lp->rx_lock);
+ spin_lock_init(&lp->nwctrlreg_lock);
+
+ lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->baseaddr)) {
+ dev_err(&pdev->dev, "failed to map baseaddress.\n");
+ rc = PTR_ERR(lp->baseaddr);
+ goto err_out_free_netdev;
+ }
+
+ dev_dbg(&lp->pdev->dev, "BASEADDRESS hw: %p virt: %p\n",
+ (void *)r_mem->start, lp->baseaddr);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+
+ ndev->netdev_ops = &netdev_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->ethtool_ops = &xemacps_ethtool_ops;
+ ndev->base_addr = r_mem->start;
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
+ netif_napi_add(ndev, &lp->napi, xemacps_rx_poll, XEMACPS_NAPI_WEIGHT);
+
+ lp->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_netdev;
+ }
+
+ if (ndev->irq == 54)
+ lp->enetnum = 0;
+ else
+ lp->enetnum = 1;
+
+ lp->aperclk = devm_clk_get(&pdev->dev, "aper_clk");
+ if (IS_ERR(lp->aperclk)) {
+ dev_err(&pdev->dev, "aper_clk clock not found.\n");
+ rc = PTR_ERR(lp->aperclk);
+ goto err_out_unregister_netdev;
+ }
+ lp->devclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(lp->devclk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ rc = PTR_ERR(lp->devclk);
+ goto err_out_unregister_netdev;
+ }
+
+ rc = clk_prepare_enable(lp->aperclk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+ goto err_out_unregister_netdev;
+ }
+ rc = clk_prepare_enable(lp->devclk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto err_out_clk_dis_aper;
+ }
+
+ lp->clk_rate_change_nb.notifier_call = xemacps_clk_notifier_cb;
+ lp->clk_rate_change_nb.next = NULL;
+ if (clk_notifier_register(lp->devclk, &lp->clk_rate_change_nb))
+ dev_warn(&pdev->dev,
+ "Unable to register clock notifier.\n");
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ prop = of_get_property(lp->pdev->dev.of_node,
+ "xlnx,ptp-enet-clock", NULL);
+ if (prop)
+ lp->ptpenetclk = (u32)be32_to_cpup(prop);
+ else
+ lp->ptpenetclk = 133333328;
+#endif
+
+ lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "phy-handle", 0);
+ lp->gmii2rgmii_phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "gmii2rgmii-phy-handle", 0);
+ rc = of_get_phy_mode(lp->pdev->dev.of_node);
+ if (rc < 0) {
+ dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ lp->phy_interface = rc;
+
+ /* Set MDIO clock divider */
+ regval = (MDC_DIV_224 << XEMACPS_NWCFG_MDC_SHIFT_MASK);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+
+
+ regval = XEMACPS_NWCTRL_MDEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+
+ rc = xemacps_mii_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev, "error in xemacps_mii_init\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ xemacps_update_hwaddr(lp);
+ tasklet_init(&lp->tx_bdreclaim_tasklet, xemacps_tx_poll,
+ (unsigned long) ndev);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+ lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ INIT_WORK(&lp->txtimeout_reinit, xemacps_reinit_for_txtimeout);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+ pdev->id, ndev->base_addr, ndev->irq);
+
+ rc = devm_request_irq(&pdev->dev, ndev->irq, &xemacps_interrupt, 0,
+ ndev->name, ndev);
+ if (rc) {
+ dev_err(&lp->pdev->dev, "Unable to request IRQ %p, error %d\n",
+ r_irq, rc);
+ goto err_out_unregister_clk_notifier;
+ }
+
+ return 0;
+
+err_out_unregister_clk_notifier:
+ clk_notifier_unregister(lp->devclk, &lp->clk_rate_change_nb);
+ clk_disable_unprepare(lp->devclk);
+err_out_clk_dis_aper:
+ clk_disable_unprepare(lp->aperclk);
+err_out_unregister_netdev:
+ unregister_netdev(ndev);
+err_out_free_netdev:
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+}
+
+/**
+ * xemacps_remove - called when platform driver is unregistered
+ * @pdev: Pointer to the platform device structure
+ *
+ * return: 0 on success
+ */
+static int xemacps_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp;
+
+ if (ndev) {
+ lp = netdev_priv(ndev);
+
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ unregister_netdev(ndev);
+
+ clk_notifier_unregister(lp->devclk, &lp->clk_rate_change_nb);
+ if (!pm_runtime_suspended(&pdev->dev)) {
+ clk_disable_unprepare(lp->devclk);
+ clk_disable_unprepare(lp->aperclk);
+ } else {
+ clk_unprepare(lp->devclk);
+ clk_unprepare(lp->aperclk);
+ }
+
+ free_netdev(ndev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+/**
+ * xemacps_suspend - Suspend event
+ * @device: Pointer to device structure
+ *
+ * Return 0
+ */
+static int xemacps_suspend(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ netif_device_detach(ndev);
+ if (!pm_runtime_suspended(device)) {
+ clk_disable(lp->devclk);
+ clk_disable(lp->aperclk);
+ }
+ return 0;
+}
+
+/**
+ * xemacps_resume - Resume after previous suspend
+ * @pdev: Pointer to platform device structure
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+static int xemacps_resume(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (!pm_runtime_suspended(device)) {
+ int ret;
+
+ ret = clk_enable(lp->aperclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(lp->devclk);
+ if (ret) {
+ clk_disable(lp->aperclk);
+ return ret;
+ }
+ }
+ netif_device_attach(ndev);
+ return 0;
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int xemacps_runtime_idle(struct device *dev)
+{
+ return pm_schedule_suspend(dev, 1);
+}
+
+static int xemacps_runtime_resume(struct device *device)
+{
+ int ret;
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ ret = clk_enable(lp->aperclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(lp->devclk);
+ if (ret) {
+ clk_disable(lp->aperclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xemacps_runtime_suspend(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ clk_disable(lp->devclk);
+ clk_disable(lp->aperclk);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops xemacps_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xemacps_suspend, xemacps_resume)
+ SET_RUNTIME_PM_OPS(xemacps_runtime_suspend, xemacps_runtime_resume,
+ xemacps_runtime_idle)
+};
+#define XEMACPS_PM (&xemacps_dev_pm_ops)
+#else /* ! CONFIG_PM */
+#define XEMACPS_PM NULL
+#endif /* ! CONFIG_PM */
+
+static struct net_device_ops netdev_ops = {
+ .ndo_open = xemacps_open,
+ .ndo_stop = xemacps_close,
+ .ndo_start_xmit = xemacps_start_xmit,
+ .ndo_set_rx_mode = xemacps_set_rx_mode,
+ .ndo_set_mac_address = xemacps_set_mac_address,
+ .ndo_do_ioctl = xemacps_ioctl,
+ .ndo_change_mtu = xemacps_change_mtu,
+ .ndo_tx_timeout = xemacps_tx_timeout,
+ .ndo_get_stats = xemacps_get_stats,
+};
+
+static struct of_device_id xemacps_of_match[] = {
+ { .compatible = "xlnx,ps7-ethernet-1.00.a", },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, xemacps_of_match);
+
+static struct platform_driver xemacps_driver = {
+ .probe = xemacps_probe,
+ .remove = xemacps_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xemacps_of_match,
+ .pm = XEMACPS_PM,
+ },
+};
+
+module_platform_driver(xemacps_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Ethernet driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,10 @@
+#
+# Makefile for the Xilinx Tri-mode ethernet driver
+#
+
+EXTRA_CFLAGS += -Idrivers/xilinx_common
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_temac-objs := xlltemac_main.o xlltemac.o xlltemac_control.o
+
+obj-$(CONFIG_XILINX_LLTEMAC) := xilinx_temac.o
new file mode 100644
@@ -0,0 +1,1387 @@
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2008 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac.c
+ *
+ * The XLlTemac driver. Functions in this file are the minimum required functions
+ * for this driver. See xlltemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ ******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+#include "xlltemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void InitHw(XLlTemac *InstancePtr); /* HW reset */
+
+/************************** Variable Definitions *****************************/
+
+xdbg_stmnt(int indent_on = 0;
+
+ )
+ xdbg_stmnt(u32 _xlltemac_rir_value;
+
+ )
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_CfgInitialize initializes a TEMAC channel along with the
+ * <i>InstancePtr</i> that references it. Each TEMAC channel is treated as a
+ * separate device from the point of view of this driver.
+ *
+ * The PHY is setup independently from the TEMAC. Use the MII or whatever other
+ * interface may be present for setup.
+ *
+ * @param InstancePtr references the memory instance to be associated with
+ * the TEMAC channel upon initialization.
+ * @param CfgPtr references the structure holding the hardware configuration
+ * for the TEMAC channel to initialize.
+ * @param EffectiveAddress is the processor address used to access the
+ * base address of the TEMAC channel. In systems with an MMU and virtual
+ * memory, <i>EffectiveAddress</i> is the virtual address mapped to the
+ * physical in <code>ConfigPtr->Config.BaseAddress</code>. In systems
+ * without an active MMU, <i>EffectiveAddress</i> should be set to the
+ * same value as <code>ConfigPtr->Config.BaseAddress</code>.
+ *
+ * @return XLlTemac_CfgInitialize returns XST_SUCCESS.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ *
+ ******************************************************************************/
+ int XLlTemac_CfgInitialize(XLlTemac *InstancePtr,
+ XLlTemac_Config *CfgPtr, u32 EffectiveAddress)
+{
+ /* Verify arguments */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Clear instance memory and make copy of configuration */
+ memset(InstancePtr, 0, sizeof(XLlTemac));
+ memcpy(&InstancePtr->Config, CfgPtr, sizeof(XLlTemac_Config));
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_CfgInitialize\n");
+ /* Set device base address */
+ InstancePtr->Config.BaseAddress = EffectiveAddress;
+
+ /* Reset the hardware and set default options */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ XLlTemac_Reset(InstancePtr, XTE_NORESET_HARD);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Temac_CfgInitialize: returning SUCCESS\n");
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Start starts the TEMAC channel as follows:
+ * - Enable transmitter if XTE_TRANSMIT_ENABLE_OPTION is set
+ * - Enable receiver if XTE_RECEIVER_ENABLE_OPTION is set
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Start(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ /* Assert bad arguments and conditions */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* If already started, then there is nothing to do */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Start\n");
+ /* Enable transmitter if not already enabled */
+ if (InstancePtr->Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "enabling transmitter\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ if (!(Reg & XTE_TC_TX_MASK)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "transmitter not enabled, enabling now\n");
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.
+ BaseAddress, XTE_TC_OFFSET,
+ Reg | XTE_TC_TX_MASK);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL, "transmitter enabled\n");
+ }
+
+ /* Enable receiver */
+ if (InstancePtr->Options & XTE_RECEIVER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "enabling receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ if (!(Reg & XTE_RCW1_RX_MASK)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "receiver not enabled, enabling now\n");
+
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.
+ BaseAddress, XTE_RCW1_OFFSET,
+ Reg | XTE_RCW1_RX_MASK);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL, "receiver enabled\n");
+ }
+
+ /* Mark as started */
+ InstancePtr->IsStarted = XCOMPONENT_IS_STARTED;
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Start: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Stop gracefully stops the TEMAC channel as follows:
+ * - Disable all interrupts from this device
+ * - Disable the receiver
+ *
+ * XLlTemac_Stop does not modify any of the current device options.
+ *
+ * Since the transmitter is not disabled, frames currently in internal buffers
+ * or in process by a DMA engine are allowed to be transmitted.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Stop(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* If already stopped, then there is nothing to do */
+ if (InstancePtr->IsStarted == 0) {
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_Stop: disabling interrupts\n");
+ /* Disable interrupts */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, 0);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop: disabling receiver\n");
+ /* Disable the receiver */
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg &= ~XTE_RCW1_RX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /* Stopping the receiver in mid-packet causes a dropped packet indication
+ * from HW. Clear it.
+ */
+ /* get the interrupt pending register */
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IP_OFFSET);
+ if (Reg & XTE_INT_RXRJECT_MASK) {
+ /* set the interrupt status register to clear the interrupt */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_IS_OFFSET, XTE_INT_RXRJECT_MASK);
+ }
+
+ /* Mark as stopped */
+ InstancePtr->IsStarted = 0;
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop: done\n");
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Reset performs a reset of the TEMAC channel, specified by
+ * <i>InstancePtr</i>, or both channels if <i>HardCoreAction</i> is set to
+ * XTE_RESET_HARD.
+ *
+ * XLlTemac_Reset also resets the TEMAC channel's options to their default values.
+ *
+ * The calling software is responsible for re-configuring the TEMAC channel
+ * (if necessary) and restarting the MAC after the reset.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param HardCoreAction describes how XLlTemac_Reset should treat the hard core
+ * block of the TEMAC.<br><br>
+ *
+ * If XTE_RESET_HARD is set to XTE_RESET_HARD, then XLlTemac_Reset asserts
+ * the reset signal to the hard core block which will reset both channels
+ * of the TEMAC. This, of course, will bork any activity that may be
+ * occuring on the other channel. So, be careful here.<br><br>
+ *
+ * Otherwise, XLlTemac_Reset resets just the transmitter and receiver of
+ * this TEMAC channel.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction)
+{
+ u32 Reg;
+ u32 TimeoutCount = 2;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Reset\n");
+ /* Stop the device and reset HW */
+ XLlTemac_Stop(InstancePtr);
+ InstancePtr->Options = XTE_DEFAULT_OPTIONS;
+
+ /* Reset the receiver */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "resetting the receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg |= XTE_RCW1_RST_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /* Reset the transmitter */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "resetting the transmitter\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ Reg |= XTE_TC_RST_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, Reg);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "waiting until reset is done\n");
+ /* Poll until the reset is done */
+ while (Reg & (XTE_RCW1_RST_MASK | XTE_TC_RST_MASK)) {
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg |= XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ }
+
+ /* Reset hard core if required */
+ /* Resetting hard core will cause both channels to reset :-( */
+ if (HardCoreAction == XTE_RESET_HARD) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "hard reset\n");
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg | XTE_RAF_HTRST_MASK);
+ while (TimeoutCount &&
+ (!(XLlTemac_ReadReg
+ (InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ udelay(XTE_RESET_HARD_DELAY_US);
+ TimeoutCount--;
+ }
+ }
+
+ /* Setup HW */
+ InitHw(InstancePtr);
+}
+
+
+/******************************************************************************
+ * InitHw (internal use only) performs a one-time setup of a TEMAC channel. The
+ * setup performed here only need to occur once after any reset.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+static void InitHw(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw\n");
+ /* Disable the receiver */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac InitHw: disabling receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg &= ~XTE_RCW1_RX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /*
+ * Stopping the receiver in mid-packet causes a dropped packet
+ * indication from HW. Clear it.
+ */
+ /* get the interrupt pending register */
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IP_OFFSET);
+ if (Reg & XTE_INT_RXRJECT_MASK) {
+ /*
+ * set the interrupt status register to clear the pending
+ * interrupt
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_IS_OFFSET, XTE_INT_RXRJECT_MASK);
+ }
+
+ /* Sync default options with HW but leave receiver and transmitter
+ * disabled. They get enabled with XLlTemac_Start() if
+ * XTE_TRANSMITTER_ENABLE_OPTION and XTE_RECEIVER_ENABLE_OPTION are set
+ */
+ XLlTemac_SetOptions(InstancePtr, InstancePtr->Options &
+ ~(XTE_TRANSMITTER_ENABLE_OPTION |
+ XTE_RECEIVER_ENABLE_OPTION));
+
+ XLlTemac_ClearOptions(InstancePtr, ~InstancePtr->Options);
+
+ /* Set default MDIO divisor */
+ XLlTemac_PhySetMdioDivisor(InstancePtr, XTE_MDIO_DIV_DFT);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetMacAddress sets the MAC address for the TEMAC channel, specified
+ * by <i>InstancePtr</i> to the MAC address specified by <i>AddressPtr</i>.
+ * The TEMAC channel must be stopped before calling this function.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a reference to the 6-byte MAC address to set.
+ *
+ * @return On successful completion, XLlTemac_SetMacAddress returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel has not stopped,
+ * XLlTemac_SetMacAddress returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetMacAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(AddressPtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacAddress: setting mac address to: 0x%08x%8x%8x%8x%8x%8x\n",
+ Aptr[0], Aptr[1], Aptr[2], Aptr[3], Aptr[4], Aptr[5]);
+ /*
+ * Set the MAC bits [31:0] in UAW0
+ * Having Aptr be unsigned type prevents the following operations from sign extending
+ */
+ MacAddr = Aptr[0];
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW0_OFFSET, MacAddr);
+
+ /* There are reserved bits in UAW1 so don't affect them */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET);
+ MacAddr &= ~XTE_UAW1_UNICASTADDR_MASK;
+
+ /* Set MAC bits [47:32] in UAW1 */
+ MacAddr |= Aptr[4];
+ MacAddr |= Aptr[5] << 8;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET, MacAddr);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetMacAddress gets the MAC address for the TEMAC channel, specified
+ * by <i>InstancePtr</i> into the memory buffer specified by <i>AddressPtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved MAC
+ * address. This memory buffer must be at least 6 bytes in length.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_GetMacAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Read MAC bits [31:0] in UAW0 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in UAW1 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetOptions enables the options, <i>Options</i> for the TEMAC channel,
+ * specified by <i>InstancePtr</i>. The TEMAC channel should be stopped with
+ * XLlTemac_Stop() before changing options.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Options is a bitmask of OR'd XTE_*_OPTION values for options to
+ * set. Options not specified are not affected.
+ *
+ * @return On successful completion, XLlTemac_SetOptions returns XST_SUCCESS.
+ * Otherwise, if the device has not been stopped, XLlTemac_SetOptions
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ u32 Reg; /* Generic register contents */
+ u32 RegRcw1; /* Reflects original contents of RCW1 */
+ u32 RegTc; /* Reflects original contents of TC */
+ u32 RegNewRcw1; /* Reflects new contents of RCW1 */
+ u32 RegNewTc; /* Reflects new contents of TC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOptions\n");
+ /* Many of these options will change the RCW1 or TC registers.
+ * To reduce the amount of IO to the device, group these options here
+ * and change them all at once.
+ */
+
+ /* Grab current register contents */
+ RegRcw1 = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ RegTc = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ RegNewRcw1 = RegRcw1;
+ RegNewTc = RegTc;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "current control regs: RCW1: 0x%0x; TC: 0x%0x\n", RegRcw1,
+ RegTc);
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Options: 0x%0x; default options: 0x%0x\n", Options,
+ XTE_DEFAULT_OPTIONS);
+
+ /* Turn on jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ RegNewTc |= XTE_TC_JUM_MASK;
+ RegNewRcw1 |= XTE_RCW1_JUM_MASK;
+ }
+
+ /* Turn on VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ RegNewTc |= XTE_TC_VLAN_MASK;
+ RegNewRcw1 |= XTE_RCW1_VLAN_MASK;
+ }
+
+ /* Turn on FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: enabling fcs stripping\n");
+ RegNewRcw1 &= ~XTE_RCW1_FCS_MASK;
+ }
+
+ /* Turn on FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: enabling fcs insertion\n");
+ RegNewTc &= ~XTE_TC_FCS_MASK;
+ }
+
+ /* Turn on length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ RegNewRcw1 &= ~XTE_RCW1_LT_DIS_MASK;
+ }
+
+ /* Enable transmitter */
+ if (Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ RegNewTc |= XTE_TC_TX_MASK;
+ }
+
+ /* Enable receiver */
+ if (Options & XTE_RECEIVER_ENABLE_OPTION) {
+ RegNewRcw1 |= XTE_RCW1_RX_MASK;
+ }
+
+ /* Change the TC or RCW1 registers if they need to be modified */
+ if (RegTc != RegNewTc) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: writting tc: 0x%0x\n", RegNewTc);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, RegNewTc);
+ }
+
+ if (RegRcw1 != RegNewRcw1) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: writting rcw1: 0x%0x\n", RegNewRcw1);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, RegNewRcw1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn on flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: endabling flow control\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET);
+ Reg |= XTE_FCC_FCRX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (fcc): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ if (Options & XTE_PROMISC_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: endabling promiscuous mode\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET);
+ Reg |= XTE_AFM_PM_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (afm): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Allow broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg &= ~XTE_RAF_BCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (raf): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Allow multicast address filtering */
+ if (Options & XTE_MULTICAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg &= ~XTE_RAF_MCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (raf2): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options |= Options;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (end): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+ xdbg_printf(XDBG_DEBUG_GENERAL, "setOptions: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_ClearOptions clears the options, <i>Options</i> for the TEMAC channel,
+ * specified by <i>InstancePtr</i>. The TEMAC channel should be stopped with
+ * XLlTemac_Stop() before changing options.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Options is a bitmask of OR'd XTE_*_OPTION values for options to
+ * clear. Options not specified are not affected.
+ *
+ * @return On successful completion, XLlTemac_ClearOptions returns XST_SUCCESS.
+ * Otherwise, if the device has not been stopped, XLlTemac_ClearOptions
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ u32 Reg; /* Generic */
+ u32 RegRcw1; /* Reflects original contents of RCW1 */
+ u32 RegTc; /* Reflects original contents of TC */
+ u32 RegNewRcw1; /* Reflects new contents of RCW1 */
+ u32 RegNewTc; /* Reflects new contents of TC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Xtemac_ClearOptions: 0x%08x\n",
+ Options);
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Many of these options will change the RCW1 or TC registers.
+ * Group these options here and change them all at once. What we are
+ * trying to accomplish is to reduce the amount of IO to the device
+ */
+
+ /* Grab current register contents */
+ RegRcw1 = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ RegTc = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ RegNewRcw1 = RegRcw1;
+ RegNewTc = RegTc;
+
+ /* Turn off jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling jumbo\n");
+ RegNewTc &= ~XTE_TC_JUM_MASK;
+ RegNewRcw1 &= ~XTE_RCW1_JUM_MASK;
+ }
+
+ /* Turn off VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling vlan\n");
+ RegNewTc &= ~XTE_TC_VLAN_MASK;
+ RegNewRcw1 &= ~XTE_RCW1_VLAN_MASK;
+ }
+
+ /* Turn off FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling fcs strip\n");
+ RegNewRcw1 |= XTE_RCW1_FCS_MASK;
+ }
+
+ /* Turn off FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling fcs insert\n");
+ RegNewTc |= XTE_TC_FCS_MASK;
+ }
+
+ /* Turn off length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling lentype err\n");
+ RegNewRcw1 |= XTE_RCW1_LT_DIS_MASK;
+ }
+
+ /* Disable transmitter */
+ if (Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling transmitter\n");
+ RegNewTc &= ~XTE_TC_TX_MASK;
+ }
+
+ /* Disable receiver */
+ if (Options & XTE_RECEIVER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling receiver\n");
+ RegNewRcw1 &= ~XTE_RCW1_RX_MASK;
+ }
+
+ /* Change the TC and RCW1 registers if they need to be
+ * modified
+ */
+ if (RegTc != RegNewTc) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting TC: 0x%0x\n",
+ RegNewTc);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, RegNewTc);
+ }
+
+ if (RegRcw1 != RegNewRcw1) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting RCW1: 0x%0x\n",
+ RegNewRcw1);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, RegNewRcw1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn off flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET);
+ Reg &= ~XTE_FCC_FCRX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET, Reg);
+ }
+
+ /* Turn off promiscuous frame filtering */
+ if (Options & XTE_PROMISC_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling promiscuous mode\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET);
+ Reg &= ~XTE_AFM_PM_MASK;
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting AFM: 0x%0x\n", Reg);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET, Reg);
+ }
+
+ /* Disable broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg |= XTE_RAF_BCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+
+ /* Disable multicast address filtering */
+ if (Options & XTE_MULTICAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg |= XTE_RAF_MCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options &= ~Options;
+
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetOptions returns the current option settings.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetOptions returns a bitmask of XTE_*_OPTION constants,
+ * each bit specifying an option that is currently active.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ ******************************************************************************/
+u32 XLlTemac_GetOptions(XLlTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return (InstancePtr->Options);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetOperatingSpeed gets the current operating link speed. This may be
+ * the value set by XLlTemac_SetOperatingSpeed() or a hardware default.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetOperatingSpeed returns the link speed in units of megabits
+ * per second.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+u16 XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetOperatingSpeed\n");
+ switch (XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET) &
+ XTE_EMMC_LINKSPEED_MASK) {
+ case XTE_EMMC_LINKSPD_1000:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 1000\n");
+ return (1000);
+
+ case XTE_EMMC_LINKSPD_100:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 100\n");
+ return (100);
+
+ case XTE_EMMC_LINKSPD_10:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 10\n");
+ return (10);
+
+ default:
+ return (0);
+ }
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetOperatingSpeed sets the current operating link speed. For any
+ * traffic to be passed, this speed must match the current MII/GMII/SGMII/RGMII
+ * link speed.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Speed is the speed to set in units of Mbps. Valid values are 10, 100,
+ * or 1000. XLlTemac_SetOperatingSpeed ignores invalid values.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed)
+{
+ u32 EmmcReg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID((Speed == 10) || (Speed == 100) || (Speed == 1000));
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOperatingSpeed\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: setting speed to: %d (0x%0x)\n",
+ Speed, Speed);
+ /* Get the current contents of the EMAC config register and zero out
+ * speed bits
+ */
+ EmmcReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET) &
+ ~XTE_EMMC_LINKSPEED_MASK;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: current speed: 0x%0x\n",
+ EmmcReg);
+ switch (Speed) {
+ case 10:
+ break;
+
+ case 100:
+ EmmcReg |= XTE_EMMC_LINKSPD_100;
+ break;
+
+ case 1000:
+ EmmcReg |= XTE_EMMC_LINKSPD_1000;
+ break;
+
+ default:
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: new speed: 0x%0x\n", EmmcReg);
+ /* Set register and return */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET, EmmcReg);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOperatingSpeed: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_PhySetMdioDivisor sets the MDIO clock divisor in the TEMAC channel,
+ * specified by <i>InstancePtr</i> to the value, <i>Divisor</i>. This function
+ * must be called once after each reset prior to accessing MII PHY registers.
+ *
+ * From the Virtex-4 Embedded Tri-Mode Ethernet MAC User's Guide, the
+ * following equation governs the MDIO clock to the PHY:
+ *
+ * <pre>
+ * f[HOSTCLK]
+ * f[MDC] = -----------------
+ * (1 + Divisor) * 2
+ * </pre>
+ *
+ * where f[HOSTCLK] is the bus clock frequency in MHz, and f[MDC] is the
+ * MDIO clock frequency in MHz to the PHY. Typically, f[MDC] should not
+ * exceed 2.5 MHz. Some PHYs can tolerate faster speeds which means faster
+ * access.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Divisor is the divisor value to set within the range of 0 to
+ * XTE_MC_CLK_DVD_MAX.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY)
+ XASSERT_VOID(Divisor <= XTE_MC_CLOCK_DIVIDE_MAX);
+
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_PhySetMdioDivisor\n");
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MC_OFFSET,
+ (u32) Divisor | XTE_MC_MDIOEN_MASK);
+}
+
+/*****************************************************************************/
+/*
+ * XLlTemac_PhyRead reads the specified PHY register, <i>RegiseterNum</i> on the
+ * PHY specified by <i>PhyAddress</i> into <i>PhyDataPtr</i>. This Ethernet
+ * driver does not require the device to be stopped before reading from the PHY.
+ * It is the responsibility of the calling code to stop the device if it is
+ * deemed necessary.
+ *
+ * Note that the TEMAC hardware provides the ability to talk to a PHY that
+ * adheres to the Media Independent Interface (MII) as defined in the IEEE 802.3
+ * standard.
+ *
+ * <b>It is important that calling code set up the MDIO clock with
+ * XLlTemac_PhySetMdioDivisor() prior to accessing the PHY with this function.</b>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PhyAddress is the address of the PHY to be written (multiple
+ * PHYs supported).
+ * @param RegisterNum is the register number, 0-31, of the specific PHY register
+ * to write.
+ * @param PhyDataPtr is a reference to the location where the 16-bit result
+ * value is stored.
+ *
+ * @return N/A
+ *
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.<br><br>
+ *
+ * There is the possibility that this function will not return if the hardware
+ * is broken (i.e., it never sets the status bit indicating that the write is
+ * done). If this is of concern, the calling code should provide a mechanism
+ * suitable for recovery.
+ *
+ ******************************************************************************/
+void XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ u32 MiiReg;
+ u32 Rdy;
+ u32 Ie;
+ u32 Tis;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: BaseAddress: 0x%08x\n",
+ InstancePtr->Config.BaseAddress);
+ /*
+ * XLlTemac_PhyRead saves the state of the IE register so that it can
+ * clear the HardAcsCmplt bit and later restore the state of the IE
+ * register. Since XLlTemac_PhyRead will poll for the status already, the
+ * HardAcsCmplt bit is cleared in the IE register so that the
+ * application code above doesn't also receive the interrupt.
+ */
+ Ie = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET,
+ Ie & ~XTE_INT_HARDACSCMPLT_MASK);
+
+ /*
+ * This is a double indirect mechanism. We indirectly write the
+ * PHYAD and REGAD so we can read the PHY register back out in
+ * the LSW register.
+ *
+ * In this case, the method of reading the data is a little unusual.
+ * Normally to write to a TEMAC register, one would set the WEN bit
+ * in the CTL register so that the values of the LSW will be written.
+ *
+ * In this case, the WEN bit is not set, and the PHYAD and REGAD
+ * values in the LSW will still get sent to the PHY before actually
+ * reading the result in the LSW.
+ *
+ * What needs to be done, is the following:
+ * 1) Write lsw reg with the phyad, and the regad
+ * 2) write the ctl reg with the miimai value (BUT WEN bit set to 0!!!)
+ * 3) poll the ready bit
+ * 4) get the value out of lsw
+ */
+ MiiReg = RegisterNum & XTE_MIIM_REGAD_MASK;
+ MiiReg |= ((PhyAddress << XTE_MIIM_PHYAD_SHIFT) & XTE_MIIM_PHYAD_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: Mii Reg: 0x%0x; Value written: 0x%0x\n",
+ RegisterNum, MiiReg);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_LSW_OFFSET,
+ MiiReg);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_CTL_OFFSET,
+ XTE_MIIMAI_OFFSET);
+
+ /*
+ * Wait here polling, until the value is ready to be read.
+ */
+ do {
+ Rdy = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET);
+ } while (!(Rdy & XTE_RSE_MIIM_RR_MASK));
+
+ /* Read data */
+ *PhyDataPtr = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_LSW_OFFSET);
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: Value retrieved: 0x%0x\n", *PhyDataPtr);
+
+ /*
+ * Clear MII status bits. The TIS register in the hard TEMAC doesn't
+ * use the 'write a 1 to clear' method, so we need to read the TIS
+ * register, clear the MIIM RST bit, and then write it back out.
+ */
+ Tis = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET);
+ Tis &= ~XTE_RSE_MIIM_RR_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET, Tis);
+
+ /*
+ * restore the state of the IE reg
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, Ie);
+}
+
+
+/*****************************************************************************/
+/*
+ * XLlTemac_PhyWrite writes <i>PhyData</i> to the specified PHY register,
+ * <i>RegiseterNum</i> on the PHY specified by <i>PhyAddress</i>. This Ethernet
+ * driver does not require the device to be stopped before writing to the PHY.
+ * It is the responsibility of the calling code to stop the device if it is
+ * deemed necessary.
+ *
+ * Note that the TEMAC hardware provides the ability to talk to a PHY that
+ * adheres to the Media Independent Interface (MII) as defined in the IEEE 802.3
+ * standard.
+ *
+ * <b>It is important that calling code set up the MDIO clock with
+ * XLlTemac_PhySetMdioDivisor() prior to accessing the PHY with this function.</b>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PhyAddress is the address of the PHY to be written (multiple
+ * PHYs supported).
+ * @param RegisterNum is the register number, 0-31, of the specific PHY register
+ * to write.
+ * @param PhyData is the 16-bit value that will be written to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.<br><br>
+ *
+ * There is the possibility that this function will not return if the hardware
+ * is broken (i.e., it never sets the status bit indicating that the write is
+ * done). If this is of concern, the calling code should provide a mechanism
+ * suitable for recovery.
+ *
+ ******************************************************************************/
+void XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ u32 MiiReg;
+ u32 Rdy;
+ u32 Ie;
+ u32 Tis;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_PhyWrite\n");
+ /*
+ * XLlTemac_PhyWrite saves the state of the IE register so that it can
+ * clear the HardAcsCmplt bit and later restore the state of the IE
+ * register. Since XLlTemac_PhyWrite will poll for the status already, the
+ * HardAcsCmplt bit is cleared in the IE register so that the
+ * application code above doesn't also receive the interrupt.
+ */
+ Ie = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET,
+ Ie & ~XTE_INT_HARDACSCMPLT_MASK);
+
+ /*
+ * This is a double indirect mechanism. We indirectly write the
+ * PhyData to the MIIMWD register, and then indirectly write PHYAD and
+ * REGAD so the value in MIIMWD will get written to the PHY.
+ */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MIIMWD_OFFSET, PhyData);
+
+ MiiReg = RegisterNum & XTE_MIIM_REGAD_MASK;
+ MiiReg |= ((PhyAddress << XTE_MIIM_PHYAD_SHIFT) & XTE_MIIM_PHYAD_MASK);
+
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MIIMAI_OFFSET, MiiReg);
+
+ /*
+ * Wait here polling, until the value is ready to be read.
+ */
+ do {
+ Rdy = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET);
+ } while (!(Rdy & XTE_RSE_MIIM_WR_MASK));
+
+ /*
+ * Clear MII status bits. The TIS register in the hard TEMAC doesn't
+ * use the 'write a 1 to clear' method, so we need to read the TIS
+ * register, clear the MIIM WST bit, and then write it back out.
+ */
+ Tis = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET);
+ Tis &= XTE_RSE_MIIM_WR_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET, Tis);
+
+ /*
+ * restore the state of the IE reg
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, Ie);
+}
new file mode 100644
@@ -0,0 +1,785 @@
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2008 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac.h
+ *
+ * The Xilinx Tri-Mode Ethernet driver component. This driver supports the
+ * Virtex-5(TM) and Virtex-4(TM) 10/100/1000 MAC (TEMAC).
+ *
+ * For a full description of TEMAC features, please see the hardware spec. This driver
+ * supports the following features:
+ * - Memory mapped access to host interface registers
+ * - Virtual memory support
+ * - Unicast, broadcast, and multicast receive address filtering
+ * - Full duplex operation (half duplex not supported)
+ * - Automatic source address insertion or overwrite (programmable)
+ * - Automatic PAD & FCS insertion and stripping (programmable)
+ * - Flow control
+ * - VLAN frame support
+ * - Pause frame support
+ * - Jumbo frame support
+ * - Checksum offload
+ *
+ * <h2>Driver Description</h2>
+ *
+ * The device driver enables higher layer software (e.g., an application) to
+ * configure a TEMAC channel. It is intended that this driver be used in
+ * cooperation with another driver (FIFO or DMA) for data communication. This
+ * device driver can support multiple devices even when those devices have
+ * significantly different configurations.
+ *
+ * <h2>Initialization & Configuration</h2>
+ *
+ * The XLlTemac_Config structure can be used by the driver to configure itself.
+ * This configuration structure is typically created by the tool-chain based on
+ * hardware build properties, although, other methods are allowed and currently
+ * used in some systems.
+ *
+ * To support multiple runtime loading and initialization strategies employed
+ * by various operating systems, the driver instance can be initialized using
+ * the XLlTemac_CfgInitialze() routine.
+ *
+ * <h2>Interrupts and Asynchronous Callbacks</h2>
+ *
+ * The driver has no dependencies on the interrupt controller. It provides
+ * no interrupt handlers. The application/OS software should set up its own
+ * interrupt handlers if required.
+ *
+ * <h2>Device Reset</h2>
+ *
+ * When a TEMAC channel is connected up to a FIFO or DMA core in hardware,
+ * errors may be reported on one of those cores (FIFO or DMA) such that it can
+ * be determined that the TEMAC channel needs to be reset. If a reset is
+ * performed, the calling code should also reconfigure and reapply the proper
+ * settings in the TEMAC channel.
+ *
+ * When a TEMAC channel reset is required, XLlTemac_Reset() should be utilized.
+ *
+ * <h2>Virtual Memory</h2>
+ *
+ * This driver may be used in systems with virtual memory support by passing
+ * the appropriate value for the <i>EffectiveAddress</i> parameter to the
+ * XLlTemac_CfgInitialize() routine.
+ *
+ * <h2>Transfering Data</h2>
+ *
+ * The TEMAC core by itself is not cabable of transmitting or receiving data in
+ * any meaninful way. Instead one or both TEMAC channels need to be connected
+ * to a FIFO or DMA core in hardware.
+ *
+ * This TEMAC driver is modeled in a similar fashion where the application code
+ * or O/S adapter driver needs to make use of a separte FIFO or DMA driver in
+ * connection with this driver to establish meaningful communication over
+ * ethernet.
+ *
+ * <h2>Checksum Offloading</h2>
+ *
+ * If configured, the device can compute a 16-bit checksum from frame data. In
+ * most circumstances this can lead to a substantial gain in throughput.
+ *
+ * The checksum offload settings for each frame sent or recieved are
+ * transmitted through the LocalLink interface in hardware. What this means is
+ * that the checksum offload feature is indirectly controlled in the TEMAC
+ * channel through the driver for the FIFO or DMA core connected to the TEMAC
+ * channel.
+ *
+ * Refer to the documentation for the FIFO or DMA driver used for data
+ * communication on how to set the values for the relevant LocalLink header
+ * words.
+ *
+ * Since this hardware implementation is general purpose in nature system software must
+ * perform pre and post frame processing to obtain the desired results for the
+ * types of packets being transferred. Most of the time this will be TCP/IP
+ * traffic.
+ *
+ * TCP/IP and UDP/IP frames contain separate checksums for the IP header and
+ * UDP/TCP header+data. With this hardware implementation, the IP header checksum
+ * cannot be offloaded. Many stacks that support offloading will compute the IP
+ * header if required and use hardware to compute the UDP/TCP header+data checksum.
+ * There are other complications concerning the IP pseudo header that must be
+ * taken into consideration. Readers should consult a TCP/IP design reference
+ * for more details.
+ *
+ * There are certain device options that will affect the checksum calculation
+ * performed by hardware for Tx:
+ *
+ * - FCS insertion disabled (XTE_FCS_INSERT_OPTION): software is required to
+ * calculate and insert the FCS value at the end of the frame, but the
+ * checksum must be known ahead of time prior to calculating the FCS.
+ * Therefore checksum offloading cannot be used in this situation.
+ *
+ * And for Rx:
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): The 4 byte FCS at the
+ * end of frame will be included in the hardware calculated checksum. software must
+ * subtract out this data.
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): For frames smaller
+ * than 64 bytes, padding will be included in the hardware calculated checksum.
+ * software must subtract out this data. It may be better to allow the TCP/IP
+ * stack verify checksums for this type of packet.
+ *
+ * - VLAN enabled (XTE_VLAN_OPTION): The 4 extra bytes in the Ethernet header
+ * affect the hardware calculated checksum. software must subtract out the 1st two
+ * 16-bit words starting at the 15th byte.
+ *
+ * <h3>Transmit Checksum Offloading</h3>
+ *
+ * For transmit, the software can specify where in the frame the checksum
+ * calculation is to start, where the result should be inserted, and a seed
+ * value. The checksum is calculated from the start point through the end of
+ * frame.
+ *
+ * The checsum offloading settings are sent in the transmit LocalLink header
+ * words. The relevant LocalLink header words are described in brief below.
+ * Refer to the XPS_LL_TEMAC v1.00a hardware specification for more details.
+ *
+ * <h4>LocalLink header word 3:</h4>
+ * <pre>
+ * Bits 31 (MSB): Transmit Checksum Enable: 1 - enabled, 0 - disabled
+ * Bits 0-30 (LSB): Reserved
+ * </pre>
+ *
+ * <h4>LocalLink header word 4:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Transmit Checksum Insertion Point: Frame offset where the
+ * computed checksum value is stored, which should be in the
+ * TCP or UDP header
+ * Bits 0-15 (LSB): Transmit Checksum Calculation Starting Point: Offset
+ * in the frame where checksum calculation should begin
+ * </pre>
+ *
+ * <h4>LocalLink header word 5:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Transmit Checksum Calculation Initial Value: Checksum
+ * seed value
+ * Bits 0-15 (LSB): Reserved
+ * </pre>
+ *
+ * <h3>Receive Checksum Offloading</h3>
+ *
+ * For Receive, the 15th byte to end of frame is checksummed. This range of
+ * bytes is the entire Ethernet payload (for non-VLAN frames).
+ *
+ * The checsum offloading information is sent in the receive LocalLink header
+ * words. The relevant LocalLink header words are described in brief below.
+ * Refer to the XPS_LL_TEMAC v1.00a hardware specification for more details.
+ *
+ * <h4>LocalLink header word 6:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Receive Raw Checksum: Computed checksum value
+ * Bits 0-15 (LSB): Reserved
+ * </pre>
+ *
+ * <h2>PHY Communication</h2>
+ *
+ * Prior to PHY access, the MDIO clock must be setup. This driver will set a
+ * safe default that should work with PLB bus speeds of up to 150 MHz and keep
+ * the MDIO clock below 2.5 MHz. If the user wishes faster access to the PHY
+ * then the clock divisor can be set to a different value (see
+ * XLlTemac_PhySetMdioDivisor()).
+ *
+ * MII register access is performed through the functions XLlTemac_PhyRead() and
+ * XLlTemac_PhyWrite().
+ *
+ * <h2>Link Sync</h2>
+ *
+ * When the device is used in a multispeed environment, the link speed must be
+ * explicitly set using XLlTemac_SetOperatingSpeed() and must match the speed the
+ * PHY has negotiated. If the speeds are mismatched, then the MAC will not pass
+ * traffic.
+ *
+ * The application/OS software may use the AutoNegotiation interrupt to be
+ * notified when the PHY has completed auto-negotiation.
+ *
+ * <h2>Asserts</h2>
+ *
+ * Asserts are used within all Xilinx drivers to enforce constraints on argument
+ * values. Asserts can be turned off on a system-wide basis by defining, at
+ * compile time, the NDEBUG identifier. By default, asserts are turned on and it
+ * is recommended that users leave asserts on during development. For deployment
+ * use -DNDEBUG compiler switch to remove assert code.
+ *
+ * <h2>Driver Errata</h2>
+ *
+ * - A dropped receive frame indication may be reported by the driver after
+ * calling XLlTemac_Stop() followed by XLlTemac_Start(). This can occur if a
+ * frame is arriving when stop is called.
+ * - On Rx with checksum offloading enabled and FCS/PAD stripping disabled,
+ * FCS and PAD data will be included in the checksum result.
+ * - On Tx with checksum offloading enabled and auto FCS insertion disabled,
+ * the user calculated FCS will be included in the checksum result.
+ *
+ * @note
+ *
+ * Xilinx drivers are typically composed of two components, one is the driver
+ * and the other is the adapter. The driver is independent of OS and processor
+ * and is intended to be highly portable. The adapter is OS-specific and
+ * facilitates communication between the driver and an OS.
+ * <br><br>
+ * This driver is intended to be RTOS and processor independent. Any needs for
+ * dynamic memory management, threads or thread mutual exclusion, or cache
+ * control must be satisfied by the layer above this driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * 1.00a rpm 06/08/07 Added interrupt IDs to config structure for convenience
+ * </pre>
+ *
+ *****************************************************************************/
+
+#ifndef XTEMAC_H /* prevent circular inclusions */
+#define XTEMAC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xenv.h"
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xlltemac_hw.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Device information
+ */
+#define XTE_DEVICE_NAME "xlltemac"
+#define XTE_DEVICE_DESC "Xilinx Tri-speed 10/100/1000 MAC"
+
+/* LocalLink TYPE Enumerations */
+#define XPAR_LL_FIFO 1
+#define XPAR_LL_DMA 2
+
+/** @name Configuration options
+ *
+ * The following are device configuration options. See the
+ * <i>XLlTemac_SetOptions</i>, <i>XLlTemac_ClearOptions</i> and
+ * <i>XLlTemac_GetOptions</i> routines for information on how to use options.
+ *
+ * The default state of the options are also noted below.
+ *
+ * @{
+ */
+
+#define XTE_PROMISC_OPTION 0x00000001
+/**< XTE_PROMISC_OPTION specifies the TEMAC channel to accept all incoming
+ * packets.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_JUMBO_OPTION 0x00000002
+/**< XTE_JUMBO_OPTION specifies the TEMAC channel to accept jumbo frames
+ * for transmit and receive.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_VLAN_OPTION 0x00000004
+/**< XTE_VLAN_OPTION specifies the TEMAC channel to enable VLAN support for
+ * transmit and receive.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_FLOW_CONTROL_OPTION 0x00000008
+/**< XTE_FLOW_CONTROL_OPTION specifies the TEMAC channel to recognize
+ * received flow control frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_FCS_STRIP_OPTION 0x00000010
+/**< XTE_FCS_STRIP_OPTION specifies the TEMAC channel to strip FCS and PAD
+ * from received frames. Note that PAD from VLAN frames is not stripped.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_FCS_INSERT_OPTION 0x00000020
+/**< XTE_FCS_INSERT_OPTION specifies the TEMAC channel to generate the FCS
+ * field and add PAD automatically for outgoing frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_LENTYPE_ERR_OPTION 0x00000040
+/**< XTE_LENTYPE_ERR_OPTION specifies the TEMAC channel to enable
+ * Length/Type error checking (mismatched type/length field) for received
+ * frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_TRANSMITTER_ENABLE_OPTION 0x00000080
+/**< XTE_TRANSMITTER_ENABLE_OPTION specifies the TEMAC channel transmitter
+ * to be enabled.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_RECEIVER_ENABLE_OPTION 0x00000100
+/**< XTE_RECEIVER_ENABLE_OPTION specifies the TEMAC channel receiver to be
+ * enabled.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_BROADCAST_OPTION 0x00000200
+/**< XTE_BROADCAST_OPTION specifies the TEMAC channel to receive frames
+ * sent to the broadcast Ethernet address.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_MULTICAST_OPTION 0x00000400
+/**< XTE_MULTICAST_OPTION specifies the TEMAC channel to receive frames
+ * sent to Ethernet addresses that are programmed into the Multicast Address
+ * Table (MAT).
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_DEFAULT_OPTIONS \
+ (XTE_FLOW_CONTROL_OPTION | \
+ XTE_BROADCAST_OPTION | \
+ XTE_FCS_INSERT_OPTION | \
+ XTE_FCS_STRIP_OPTION | \
+ XTE_LENTYPE_ERR_OPTION | \
+ XTE_TRANSMITTER_ENABLE_OPTION | \
+ XTE_RECEIVER_ENABLE_OPTION)
+/**< XTE_DEFAULT_OPTIONS specify the options set in XLlTemac_Reset() and
+ * XLlTemac_CfgInitialize() */
+
+/*@}*/
+
+/** @name Reset parameters
+ *
+ * These are used by function XLlTemac_Reset().
+ * @{
+ */
+#define XTE_RESET_HARD 1
+#define XTE_NORESET_HARD 0
+/*@}*/
+
+#define XTE_MULTI_MAT_ENTRIES 4 /* Number of storable addresses in
+ the Multicast Address Table */
+
+#define XTE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
+
+/* The next few constants help upper layers determine the size of memory
+ * pools used for Ethernet buffers and descriptor lists.
+ */
+#define XTE_MAC_ADDR_SIZE 6 /* MAC addresses are 6 bytes */
+#define XTE_MTU 1500 /* max MTU size of an Ethernet frame */
+#define XTE_JUMBO_MTU 8982 /* max MTU size of a jumbo Ethernet frame */
+#define XTE_HDR_SIZE 14 /* size of an Ethernet header */
+#define XTE_HDR_VLAN_SIZE 18 /* size of an Ethernet header with VLAN */
+#define XTE_TRL_SIZE 4 /* size of an Ethernet trailer (FCS) */
+#define XTE_MAX_FRAME_SIZE (XTE_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_VLAN_FRAME_SIZE (XTE_MTU + XTE_HDR_VLAN_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Constant values returned by XLlTemac_mGetPhysicalInterface(). Note that these
+ * values match design parameters from the PLB_TEMAC spec
+ */
+#define XTE_PHY_TYPE_MII 0
+#define XTE_PHY_TYPE_GMII 1
+#define XTE_PHY_TYPE_RGMII_1_3 2
+#define XTE_PHY_TYPE_RGMII_2_0 3
+#define XTE_PHY_TYPE_SGMII 4
+#define XTE_PHY_TYPE_1000BASE_X 5
+
+/**************************** Type Definitions *******************************/
+
+
+/**
+ * This typedef contains configuration information for a TEMAC channel.
+ * Each channel is treated as a separate device from the point of view of this
+ * driver.
+ */
+typedef struct {
+ /** u16 DeviceId; < DeviceId is the unique ID of the device */
+ u32 BaseAddress;/**< BaseAddress is the physical base address of the
+ * channel's registers
+ */
+ u8 TxCsum; /**< TxCsum indicates that the channel has checksum
+ * offload on the Tx channel or not.
+ */
+ u8 RxCsum; /**< RxCsum indicates that the channel has checksum
+ * offload on the Rx channel or not.
+ */
+ u8 PhyType; /**< PhyType indicates which type of PHY interface is
+ * used (MII, GMII, RGMII, ect.
+ */
+ u8 TemacIntr; /**< TEMAC interrupt ID */
+
+ int LLDevType; /**< LLDevType is the type of device attached to the
+ * temac's local link interface.
+ */
+ u32 LLDevBaseAddress; /**< LLDevBaseAddress is the base address of then
+ * device attached to the temac's local link
+ * interface.
+ */
+ u8 LLFifoIntr; /**< LL FIFO interrupt ID (unused if DMA) */
+ u8 LLDmaRxIntr; /**< LL DMA RX interrupt ID (unused if FIFO) */
+ u8 LLDmaTxIntr; /**< LL DMA TX interrupt ID (unused if FIFO) */
+
+} XLlTemac_Config;
+
+
+/**
+ * struct XLlTemac is the type for TEMAC driver instance data. The calling code
+ * is required to use a unique instance of this structure for every TEMAC
+ * channel used in the system. Each channel is treated as a separate device
+ * from the point of view of this driver. A reference to a structure of this
+ * type is then passed to the driver API functions.
+ */
+typedef struct XLlTemac {
+ XLlTemac_Config Config; /* hardware configuration */
+ u32 IsStarted; /* Device is currently started */
+ u32 IsReady; /* Device is initialized and ready */
+ u32 Options; /* Current options word */
+ u32 Flags; /* Internal driver flags */
+} XLlTemac;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsStarted reports if the device is in the started or stopped state. To
+ * be in the started state, the calling code must have made a successful call to
+ * <i>XLlTemac_Start</i>. To be in the stopped state, <i>XLlTemac_Stop</i> or
+ * <i>XLlTemac_CfgInitialize</i> function must have been called.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsStarted returns TRUE if the device has been started.
+ * Otherwise, XLlTemac_IsStarted returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsStarted(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsStarted(InstancePtr) \
+ (((InstancePtr)->IsStarted == XCOMPONENT_IS_STARTED) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_IsDma reports if the device is currently connected to DMA.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsDma returns TRUE if the device is connected DMA. Otherwise,
+* XLlTemac_IsDma returns FALSE.
+*
+* @note
+*
+* Signature: u32 XLlTemac_IsDma(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_IsDma(InstancePtr) \
+ (((InstancePtr)->Config.LLDevType == XPAR_LL_DMA) ? TRUE: FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_IsFifo reports if the device is currently connected to a fifo core.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsFifo returns TRUE if the device is connected to a fifo core.
+* Otherwise, XLlTemac_IsFifo returns FALSE.
+*
+* @note
+*
+* Signature: u32 XLlTemac_IsFifo(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_IsFifo(InstancePtr) \
+ (((InstancePtr)->Config.LLDevType == XPAR_LL_FIFO) ? TRUE: FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_LlDevBaseAddress reports the base address of the core connected to
+* the TEMAC's local link interface.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsFifo returns the base address of the core connected to
+* the TEMAC's local link interface.
+*
+* @note
+*
+* Signature: u32 XLlTemac_LlDevBaseAddress(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_LlDevBaseAddress(InstancePtr) \
+ ((InstancePtr)->Config.LLDevBaseAddress)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsRecvFrameDropped determines if the device thinks it has dropped a
+ * receive frame.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsRecvFrameDropped returns TRUE if the device interrupt
+ * status register reports that a frame has been dropped. Otherwise,
+ * XLlTemac_IsRecvFrameDropped returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsRecvFrameDropped(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsRecvFrameDropped(InstancePtr) \
+ ((XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET) \
+ & XTE_INT_RXRJECT_MASK) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsRxCsum determines if the device is configured with checksum
+ * offloading on the receive channel.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsRxCsum returns TRUE if the device is configured with
+ * checksum offloading on the receive channel. Otherwise,
+ * XLlTemac_IsRxCsum returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsRxCsum(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsRxCsum(InstancePtr) (((InstancePtr)->Config.RxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsTxCsum determines if the device is configured with checksum
+ * offloading on the transmit channel.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsTxCsum returns TRUE if the device is configured with
+ * checksum offloading on the transmit channel. Otherwise,
+ * XLlTemac_IsTxCsum returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsTxCsum(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsTxCsum(InstancePtr) (((InstancePtr)->Config.TxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_GetPhysicalInterface returns the type of PHY interface being used by
+ * the given instance, specified by <i>InstancePtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetPhysicalInterface returns one of XTE_PHY_TYPE_<x> where
+ * <x> is MII, GMII, RGMII_1_3, RGMII_2_0, SGMII, or 1000BASE_X (defined in
+ * xlltemac.h).
+ *
+ * @note
+ *
+ * Signature: int XLlTemac_GetPhysicalInterface(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_GetPhysicalInterface(InstancePtr) \
+ ((InstancePtr)->Config.PhyType)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_Status returns a bit mask of the interrupt status register (ISR).
+* XLlTemac_Status can be used to query the status without having to have
+* interrupts enabled.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IntStatus returns a bit mask of the status conditions.
+* The mask will be a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlTemac_IntStatus(XLlTemac *InstancePtr)
+*
+*****************************************************************************/
+#define XLlTemac_Status(InstancePtr) \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntEnable enables the interrupts specified in <i>Mask</i>. The
+* corresponding interrupt for each bit set to 1 in <i>Mask</i>, will be
+* enabled.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to enable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntEnable(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntEnable(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IE_OFFSET, \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, \
+ XTE_IE_OFFSET) | ((Mask) & XTE_INT_ALL_MASK)); \
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntDisable disables the interrupts specified in <i>Mask</i>. The
+* corresponding interrupt for each bit set to 1 in <i>Mask</i>, will be
+* disabled. In other words, XLlTemac_IntDisable uses the "set a bit to clear it"
+* scheme.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to disable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntDisable(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntDisable(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IE_OFFSET, \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, \
+ XTE_IE_OFFSET) & ~((Mask) & XTE_INT_ALL_MASK)); \
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntPending returns a bit mask of the pending interrupts. Each bit
+* set to 1 in the return value represents a pending interrupt.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IntPending returns a bit mask of the interrupts that are
+* pending. The mask will be a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlTemac_IntPending(XLlTemac *InstancePtr)
+*
+*****************************************************************************/
+#define XLlTemac_IntPending(InstancePtr) \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IP_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntClear clears pending interrupts specified in <i>Mask</i>.
+* The corresponding pending interrupt for each bit set to 1 in <i>Mask</i>,
+* will be cleared. In other words, XLlTemac_IntClear uses the "set a bit to
+* clear it" scheme.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the pending interrupts to clear. The
+* mask can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntClear(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntClear(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET, \
+ ((Mask) & XTE_INT_ALL_MASK))
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xlltemac.c
+ */
+int XLlTemac_CfgInitialize(XLlTemac *InstancePtr, XLlTemac_Config *CfgPtr,
+ u32 VirtualAddress);
+void XLlTemac_Start(XLlTemac *InstancePtr);
+void XLlTemac_Stop(XLlTemac *InstancePtr);
+void XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction);
+
+/*
+ * Initialization functions in xlltemac_sinit.c
+ */
+XLlTemac_Config *XLlTemac_LookupConfig(u16 DeviceId);
+
+/*
+ * MAC configuration/control functions in xlltemac_control.c
+ */
+int XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options);
+int XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options);
+u32 XLlTemac_GetOptions(XLlTemac *InstancePtr);
+
+int XLlTemac_SetMacAddress(XLlTemac *InstancePtr, void *AddressPtr);
+void XLlTemac_GetMacAddress(XLlTemac *InstancePtr, void *AddressPtr);
+
+int XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr);
+void XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr);
+int XLlTemac_SendPausePacket(XLlTemac *InstancePtr, u16 PauseValue);
+
+int XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr);
+int XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr,
+ int *IsFullDuplexPtr, int *IsLinkUpPtr);
+u16 XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr);
+void XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed);
+
+void XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor);
+void XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress, u32 RegisterNum,
+ u16 *PhyDataPtr);
+void XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress, u32 RegisterNum,
+ u16 PhyData);
+int XLlTemac_MulticastAdd(XLlTemac *InstancePtr, void *AddressPtr, int Entry);
+void XLlTemac_MulticastGet(XLlTemac *InstancePtr, void *AddressPtr, int Entry);
+int XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
new file mode 100644
@@ -0,0 +1,679 @@
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2008 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac_control.c
+ *
+ * Functions in this file implement general purpose command and control related
+ * functionality. See xlltemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ *****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xlltemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * in the TEMAC channel's multicast filter list.
+ *
+ * XLlTemac_MulticastAdd adds the Ethernet address, <i>AddressPtr</i> to the
+ * TEMAC channel's multicast filter list, at list index <i>Entry</i>. The
+ * address referenced by <i>AddressPtr</i> may be of any unicast, multicast, or
+ * broadcast address form. The harware for the TEMAC channel can hold up to
+ * XTE_MULTI_MAT_ENTRIES addresses in this filter list.<br><br>
+ *
+ * The device must be stopped to use this function.<br><br>
+ *
+ * Once an Ethernet address is programmed, the TEMAC channel will begin
+ * receiving data sent from that address. The TEMAC hardware does not have a
+ * control bit to disable multicast filtering. The only way to prevent the
+ * TEMAC channel from receiving messages from an Ethernet address in the
+ * Multicast Address Table (MAT) is to clear it with XLlTemac_MulticastClear().
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a pointer to the 6-byte Ethernet address to set. The
+ * previous address at the location <i>Entry</i> (if any) is overwritten
+ * with the value at <i>AddressPtr</i>.
+ * @param Entry is the hardware storage location to program this address and
+ * must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return On successful completion, XLlTemac_MulticastAdd returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel is not stopped, XLlTemac_MulticastAdd
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_MulticastAdd(XLlTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Maw0Reg;
+ u32 Maw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+ u32 Rdy;
+ int MaxWait = 100;
+ u32 BaseAddress = InstancePtr->Config.BaseAddress;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(AddressPtr != NULL);
+ XASSERT_NONVOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastAdd\n");
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastAdd: returning DEVICE_IS_STARTED\n");
+
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set MAC bits [31:0] */
+ Maw0Reg = Aptr[0];
+ Maw0Reg |= Aptr[1] << 8;
+ Maw0Reg |= Aptr[2] << 16;
+ Maw0Reg |= Aptr[3] << 24;
+
+ /* Set MAC bits [47:32] */
+ Maw1Reg = Aptr[4];
+ Maw1Reg |= Aptr[5] << 8;
+
+ /* Add in MAT address */
+ Maw1Reg |= (Entry << XTE_MAW1_MATADDR_SHIFT_MASK);
+
+ /* Program HW */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Setting MAT entry: %d\n", Entry);
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET, Maw0Reg);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW0_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf (XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+ }
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET,
+ Maw1Reg);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW1_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf (XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastAdd: returning SUCCESS\n");
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_MulticastGet gets the Ethernet address stored at index <i>Entry</i>
+ * in the TEMAC channel's multicast filter list.<br><br>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved
+ * Ethernet address. This memory buffer must be at least 6 bytes in
+ * length.
+ * @param Entry is the hardware storage location from which to retrieve the
+ * address and must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_MulticastGet(XLlTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Maw0Reg;
+ u32 Maw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+ u32 Rdy;
+ int MaxWait = 100;
+ u32 BaseAddress = InstancePtr->Config.BaseAddress;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastGet\n");
+
+ /*
+ * Tell HW to provide address stored in given entry.
+ * In this case, the Access is a little weird, becuase we need to
+ * write the LSW register first, then initiate a write operation,
+ * even though it's a read operation.
+ */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Getting MAT entry: %d\n", Entry);
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET,
+ Entry << XTE_MAW1_MATADDR_SHIFT_MASK | XTE_MAW1_RNW_MASK);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW1_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+
+ }
+ Maw0Reg = XLlTemac_ReadReg(BaseAddress, XTE_LSW_OFFSET);
+ Maw1Reg = XLlTemac_ReadReg(BaseAddress, XTE_MSW_OFFSET);
+
+ /* Copy the address to the user buffer */
+ Aptr[0] = (u8) Maw0Reg;
+ Aptr[1] = (u8) (Maw0Reg >> 8);
+ Aptr[2] = (u8) (Maw0Reg >> 16);
+ Aptr[3] = (u8) (Maw0Reg >> 24);
+ Aptr[4] = (u8) Maw1Reg;
+ Aptr[5] = (u8) (Maw1Reg >> 8);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastGet: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_MulticastClear clears the Ethernet address stored at index <i>Entry</i>
+ * in the TEMAC channel's multicast filter list.<br><br>
+ *
+ * The device must be stopped to use this function.<br><br>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Entry is the HW storage location used when this address was added.
+ * It must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ * @param Entry is the hardware storage location to clear and must be between
+ * 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return On successful completion, XLlTemac_MulticastClear returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel is not stopped, XLlTemac_MulticastClear
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastClear\n");
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastClear: returning DEVICE_IS_STARTED\n");
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Clear the entry by writing 0:0:0:0:0:0 to it */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MAW0_OFFSET, 0);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MAW1_OFFSET, Entry << XTE_MAW1_MATADDR_SHIFT_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastClear: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetMacPauseAddress sets the MAC address used for pause frames to
+ * <i>AddressPtr</i>. <i>AddressPtr</i> will be the address the TEMAC channel
+ * will recognize as being for pause frames. Pause frames transmitted with
+ * XLlTemac_SendPausePacket() will also use this address.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a pointer to the 6-byte Ethernet address to set.
+ *
+ * @return On successful completion, XLlTemac_SetMacPauseAddress returns
+ * XST_SUCCESS. Otherwise, if the TEMAC channel is not stopped,
+ * XLlTemac_SetMacPauseAddress returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacPauseAddress: returning DEVICE_IS_STARTED\n");
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set the MAC bits [31:0] in ERXC0 */
+ MacAddr = Aptr[0];
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW0_OFFSET, MacAddr);
+
+ /* ERCW1 contains other info that must be preserved */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ MacAddr &= ~XTE_RCW1_PAUSEADDR_MASK;
+
+ /* Set MAC bits [47:32] */
+ MacAddr |= Aptr[4];
+ MacAddr |= Aptr[5] << 8;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, MacAddr);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacPauseAddress: returning SUCCESS\n");
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetMacPauseAddress gets the MAC address used for pause frames for the
+ * TEMAC channel specified by <i>InstancePtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved MAC
+ * address. This memory buffer must be at least 6 bytes in length.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+
+ /* Read MAC bits [31:0] in ERXC0 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in RCW1 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SendPausePacket sends a pause packet with the value of
+ * <i>PauseValue</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PauseValue is the pause value in units of 512 bit times.
+ *
+ * @return On successful completion, XLlTemac_SendPausePacket returns
+ * XST_SUCCESS. Otherwise, if the TEMAC channel is not started,
+ * XLlTemac_SendPausePacket returns XST_DEVICE_IS_STOPPED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SendPausePacket(XLlTemac *InstancePtr, u16 PauseValue)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SendPausePacket: returning DEVICE_IS_STOPPED\n");
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Send flow control frame */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_TPF_OFFSET,
+ (u32) PauseValue & XTE_TPF_TPFV_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SendPausePacket: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetSgmiiStatus get the state of the link when using the SGMII media
+ * interface.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param SpeedPtr references the location to store the result, which is the
+ * autonegotiated link speed in units of Mbits/sec, either 0, 10, 100,
+ * or 1000.
+ *
+ * @return On successful completion, XLlTemac_GetSgmiiStatus returns XST_SUCCESS.
+ * Otherwise, if TEMAC channel is not using an SGMII interface,
+ * XLlTemac_GetSgmiiStatus returns XST_NO_FEATURE.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetSgmiiStatus\n");
+ /* Make sure PHY is SGMII */
+ PhyType = XLlTemac_GetPhysicalInterface(InstancePtr);
+ if (PhyType != XTE_PHY_TYPE_SGMII) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetSgmiiStatus: returning NO_FEATURE\n");
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_PHYC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_PHYC_SGMIILINKSPEED_MASK) {
+ case XTE_PHYC_SGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_PHYC_SGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_PHYC_SGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetSgmiiStatus: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetRgmiiStatus get the state of the link when using the RGMII media
+ * interface.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param SpeedPtr references the location to store the result, which is the
+ * autonegotiaged link speed in units of Mbits/sec, either 0, 10, 100,
+ * or 1000.
+ * @param IsFullDuplexPtr references the value to set to indicate full duplex
+ * operation. XLlTemac_GetRgmiiStatus sets <i>IsFullDuplexPtr</i> to TRUE
+ * when the RGMII link is operating in full duplex mode. Otherwise,
+ * XLlTemac_GetRgmiiStatus sets <i>IsFullDuplexPtr</i> to FALSE.
+ * @param IsLinkUpPtr references the value to set to indicate the link status.
+ * XLlTemac_GetRgmiiStatus sets <i>IsLinkUpPtr</i> to TRUE when the RGMII
+ * link up. Otherwise, XLlTemac_GetRgmiiStatus sets <i>IsLinkUpPtr</i> to
+ * FALSE.
+ *
+ * @return On successful completion, XLlTemac_GetRgmiiStatus returns XST_SUCCESS.
+ * Otherwise, if TEMAC channel is not using an RGMII interface,
+ * XLlTemac_GetRgmiiStatus returns XST_NO_FEATURE.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr,
+ int *IsFullDuplexPtr, int *IsLinkUpPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetRgmiiStatus\n");
+ /* Make sure PHY is RGMII */
+ PhyType = XLlTemac_GetPhysicalInterface(InstancePtr);
+ if ((PhyType != XTE_PHY_TYPE_RGMII_1_3) &&
+ (PhyType != XTE_PHY_TYPE_RGMII_2_0)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetRgmiiStatus: returning NO_FEATURE\n");
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_PHYC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_PHYC_RGMIILINKSPEED_MASK) {
+ case XTE_PHYC_RGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_PHYC_RGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_PHYC_RGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ /* Extract duplex and link status */
+ if (EgmicReg & XTE_PHYC_RGMIIHD_MASK) {
+ *IsFullDuplexPtr = FALSE;
+ }
+ else {
+ *IsFullDuplexPtr = TRUE;
+ }
+
+ if (EgmicReg & XTE_PHYC_RGMIILINK_MASK) {
+ *IsLinkUpPtr = TRUE;
+ }
+ else {
+ *IsLinkUpPtr = FALSE;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetRgmiiStatus: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
new file mode 100644
@@ -0,0 +1,562 @@
+/* iId: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2008 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac_hw.h
+ *
+ * This header file contains identifiers and low-level driver functions (or
+ * macros) that can be used to access the Tri-Mode MAC Ethernet (TEMAC) device.
+ * High-level driver functions are defined in xlltemac.h.
+ *
+ * @note
+ *
+ * Some registers are not accessible when a HW instance is configured for SGDMA.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ *
+ ******************************************************************************/
+
+#ifndef XTEMAC_HW_H /* prevent circular inclusions */
+#define XTEMAC_HW_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xdebug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constant Definitions *****************************/
+
+#define XTE_RESET_HARD_DELAY_US 4 /**< Us to delay for hard core reset */
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+/** @name Direct registers
+ * @{
+ */
+#define XTE_RAF_OFFSET 0x00000000 /**< Reset and address filter */
+#define XTE_TPF_OFFSET 0x00000004 /**< Transmit pause frame */
+#define XTE_IFGP_OFFSET 0x00000008 /**< Transmit inter-frame gap adjustment */
+#define XTE_IS_OFFSET 0x0000000C /**< Interrupt status */
+#define XTE_IP_OFFSET 0x00000010 /**< Interrupt pending */
+#define XTE_IE_OFFSET 0x00000014 /**< Interrupt enable */
+
+#define XTE_MSW_OFFSET 0x00000020 /**< Most significant word data */
+#define XTE_LSW_OFFSET 0x00000024 /**< Least significant word data */
+#define XTE_CTL_OFFSET 0x00000028 /**< Control */
+#define XTE_RDY_OFFSET 0x0000002C /**< Ready status */
+/*@}*/
+
+
+/** @name HARD_TEMAC Core Registers
+ * These are registers defined within the device's hard core located in the
+ * processor block. They are accessed indirectly through the registers, MSW,
+ * LSW, and CTL.
+ *
+ * Access to these registers should go through macros XLlTemac_ReadIndirectReg()
+ * and XLlTemac_WriteIndirectReg() to guarantee proper access.
+ * @{
+ */
+#define XTE_RCW0_OFFSET 0x00000200 /**< Rx configuration word 0 */
+#define XTE_RCW1_OFFSET 0x00000240 /**< Rx configuration word 1 */
+#define XTE_TC_OFFSET 0x00000280 /**< Tx configuration */
+#define XTE_FCC_OFFSET 0x000002C0 /**< Flow control configuration */
+#define XTE_EMMC_OFFSET 0x00000300 /**< EMAC mode configuration */
+#define XTE_PHYC_OFFSET 0x00000320 /**< RGMII/SGMII configuration */
+#define XTE_MC_OFFSET 0x00000340 /**< Management configuration */
+#define XTE_UAW0_OFFSET 0x00000380 /**< Unicast address word 0 */
+#define XTE_UAW1_OFFSET 0x00000384 /**< Unicast address word 1 */
+#define XTE_MAW0_OFFSET 0x00000388 /**< Multicast address word 0 */
+#define XTE_MAW1_OFFSET 0x0000038C /**< Multicast address word 1 */
+#define XTE_AFM_OFFSET 0x00000390 /**< Address Filter (promiscuous) mode */
+#define XTE_TIS_OFFSET 0x000003A0 /**< Interrupt status */
+#define XTE_TIE_OFFSET 0x000003A4 /**< Interrupt enable */
+#define XTE_MIIMWD_OFFSET 0x000003B0 /**< MII management write data */
+#define XTE_MIIMAI_OFFSET 0x000003B4 /**< MII management access initiate */
+/*@}*/
+
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the HW spec.
+ */
+
+/** @name Reset and Address Filter bits
+ * These bits are associated with the XTE_RAF_OFFSET register.
+ * @{
+ */
+#define XTE_RAF_HTRST_MASK 0x00000001 /**< Hard TEMAC Reset */
+#define XTE_RAF_MCSTREJ_MASK 0x00000002 /**< Reject receive multicast destination address */
+#define XTE_RAF_BCSTREJ_MASK 0x00000004 /**< Reject receive broadcast destination address */
+/*@}*/
+
+/** @name Transmit Pause Frame Register (TPF)
+ * @{
+ */
+#define XTE_TPF_TPFV_MASK 0x0000FFFF /**< Tx pause frame value */
+/*@}*/
+
+/** @name Transmit Inter-Frame Gap Adjustement Register (TFGP)
+ * @{
+ */
+#define XTE_TFGP_IFGP_MASK 0x0000007F /**< Transmit inter-frame gap adjustment value */
+/*@}*/
+
+/** @name Interrupt bits
+ * These bits are associated with the XTE_IS_OFFSET, XTE_IP_OFFSET, and
+ * XTE_IE_OFFSET registers.
+ * @{
+ */
+#define XTE_INT_HARDACSCMPLT_MASK 0x00000001 /**< Hard register access complete */
+#define XTE_INT_AUTONEG_MASK 0x00000002 /**< Auto negotiation complete */
+#define XTE_INT_RC_MASK 0x00000004 /**< Receive complete */
+#define XTE_INT_RXRJECT_MASK 0x00000008 /**< Receive frame rejected */
+#define XTE_INT_RXFIFOOVR_MASK 0x00000010 /**< Receive fifo overrun */
+#define XTE_INT_TC_MASK 0x00000020 /**< Transmit complete */
+#define XTE_INT_ALL_MASK 0x0000003f /**< All the ints */
+/*@}*/
+
+
+#define XTE_INT_RECV_ERROR_MASK \
+ (XTE_INT_RXRJECT_MASK | XTE_INT_RXFIFOOVR_MASK) /**< INT bits that indicate receive errors */
+/*@}*/
+
+
+/** @name Control Register (CTL)
+ * @{
+ */
+#define XTE_CTL_WEN_MASK 0x00008000 /**< Write Enable */
+/*@}*/
+
+
+/** @name Ready Status, TEMAC Interrupt Status, TEMAC Interrupt Enable Registers
+ * (RDY, TIS, TIE)
+ * @{
+ */
+#define XTE_RSE_FABR_RR_MASK 0x00000001 /**< Fabric read ready */
+#define XTE_RSE_MIIM_RR_MASK 0x00000002 /**< MII management read ready */
+#define XTE_RSE_MIIM_WR_MASK 0x00000004 /**< MII management write ready */
+#define XTE_RSE_AF_RR_MASK 0x00000008 /**< Address filter read ready*/
+#define XTE_RSE_AF_WR_MASK 0x00000010 /**< Address filter write ready*/
+#define XTE_RSE_CFG_RR_MASK 0x00000020 /**< Configuration register read ready*/
+#define XTE_RSE_CFG_WR_MASK 0x00000040 /**< Configuration register write ready*/
+#define XTE_RDY_HARD_ACS_RDY_MASK 0x00010000 /**< Hard register access ready */
+#define XTE_RDY_ALL (XTE_RSE_FABR_RR_MASK | \
+ XTE_RSE_MIIM_RR_MASK | \
+ XTE_RSE_MIIM_WR_MASK | \
+ XTE_RSE_AF_RR_MASK | \
+ XTE_RSE_AF_WR_MASK | \
+ XTE_RSE_CFG_RR_MASK | \
+ XTE_RSE_CFG_WR_MASK | \
+ XTE_RDY_HARD_ACS_RDY_MASK)
+/*@}*/
+
+
+/** @name Receive Configuration Word 1 (RCW1)
+ * @{
+ */
+#define XTE_RCW1_RST_MASK 0x80000000 /**< Reset */
+#define XTE_RCW1_JUM_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_RCW1_FCS_MASK 0x20000000 /**< In-Band FCS enable (FCS not stripped) */
+#define XTE_RCW1_RX_MASK 0x10000000 /**< Receiver enable */
+#define XTE_RCW1_VLAN_MASK 0x08000000 /**< VLAN frame enable */
+#define XTE_RCW1_HD_MASK 0x04000000 /**< Half duplex mode */
+#define XTE_RCW1_LT_DIS_MASK 0x02000000 /**< Length/type field valid check disable */
+#define XTE_RCW1_PAUSEADDR_MASK 0x0000FFFF /**< Pause frame source address
+ bits [47:32]. Bits [31:0]
+ are stored in register
+ RCW0 */
+/*@}*/
+
+
+/** @name Transmitter Configuration (TC)
+ * @{
+ */
+#define XTE_TC_RST_MASK 0x80000000 /**< reset */
+#define XTE_TC_JUM_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_TC_FCS_MASK 0x20000000 /**< In-Band FCS enable (FCS not generated) */
+#define XTE_TC_TX_MASK 0x10000000 /**< Transmitter enable */
+#define XTE_TC_VLAN_MASK 0x08000000 /**< VLAN frame enable */
+#define XTE_TC_HD_MASK 0x04000000 /**< Half duplex mode */
+#define XTE_TC_IFG_MASK 0x02000000 /**< Inter-frame gap adjustment enable */
+/*@}*/
+
+
+/** @name Flow Control Configuration (FCC)
+ * @{
+ */
+#define XTE_FCC_FCRX_MASK 0x20000000 /**< Rx flow control enable */
+#define XTE_FCC_FCTX_MASK 0x40000000 /**< Tx flow control enable */
+/*@}*/
+
+
+/** @name EMAC Configuration (EMMC)
+ * @{
+ */
+#define XTE_EMMC_LINKSPEED_MASK 0xC0000000 /**< Link speed */
+#define XTE_EMMC_RGMII_MASK 0x20000000 /**< RGMII mode enable */
+#define XTE_EMMC_SGMII_MASK 0x10000000 /**< SGMII mode enable */
+#define XTE_EMMC_GPCS_MASK 0x08000000 /**< 1000BaseX mode enable */
+#define XTE_EMMC_HOST_MASK 0x04000000 /**< Host interface enable */
+#define XTE_EMMC_TX16BIT 0x02000000 /**< 16 bit Tx client enable */
+#define XTE_EMMC_RX16BIT 0x01000000 /**< 16 bit Rx client enable */
+
+#define XTE_EMMC_LINKSPD_10 0x00000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 10 Mbit */
+#define XTE_EMMC_LINKSPD_100 0x40000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 100 Mbit */
+#define XTE_EMMC_LINKSPD_1000 0x80000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC RGMII/SGMII Configuration (PHYC)
+ * @{
+ */
+#define XTE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /**< SGMII link speed */
+#define XTE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /**< RGMII link speed */
+#define XTE_PHYC_RGMIIHD_MASK 0x00000002 /**< RGMII Half-duplex mode */
+#define XTE_PHYC_RGMIILINK_MASK 0x00000001 /**< RGMII link status */
+
+#define XTE_PHYC_RGLINKSPD_10 0x00000000 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_PHYC_RGLINKSPD_100 0x00000004 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_PHYC_RGLINKSPD_1000 0x00000008 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+#define XTE_PHYC_SGLINKSPD_10 0x00000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_PHYC_SGLINKSPD_100 0x40000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_PHYC_SGLINKSPD_1000 0x80000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC Management Configuration (MC)
+ * @{
+ */
+#define XTE_MC_MDIOEN_MASK 0x00000040 /**< MII management enable */
+#define XTE_MC_CLOCK_DIVIDE_MAX 0x3F /**< Maximum MDIO divisor */
+/*@}*/
+
+
+/** @name EMAC Unicast Address Register Word 1 (UAW1)
+ * @{
+ */
+#define XTE_UAW1_UNICASTADDR_MASK 0x0000FFFF /**< Station address bits [47:32]
+ Station address bits [31:0]
+ are stored in register
+ UAW0 */
+/*@}*/
+
+
+/** @name EMAC Multicast Address Register Word 1 (MAW1)
+ * @{
+ */
+#define XTE_MAW1_RNW_MASK 0x00800000 /**< Multicast address table register read enable */
+#define XTE_MAW1_ADDR_MASK 0x00030000 /**< Multicast address table register address */
+#define XTE_MAW1_MULTICADDR_MASK 0x0000FFFF /**< Multicast address bits [47:32]
+ Multicast address bits [31:0]
+ are stored in register
+ MAW0 */
+#define XTE_MAW1_MATADDR_SHIFT_MASK 16 /**< Number of bits to shift right
+ to align with
+ XTE_MAW1_CAMADDR_MASK */
+/*@}*/
+
+
+/** @name EMAC Address Filter Mode (AFM)
+ * @{
+ */
+#define XTE_AFM_PM_MASK 0x80000000 /**< Promiscuous mode enable */
+/*@}*/
+
+
+/** @name Media Independent Interface Management (MIIM)
+ * @{
+ */
+#define XTE_MIIM_REGAD_MASK 0x1F /**< MII Phy register address (REGAD) */
+#define XTE_MIIM_PHYAD_MASK 0x03E0 /**< MII Phy address (PHYAD) */
+#define XTE_MIIM_PHYAD_SHIFT 5 /**< MII Shift bits for PHYAD */
+/*@}*/
+
+
+/** @name Checksum offload buffer descriptor extensions
+ * @{
+ */
+/** Byte offset where checksum should begin (16 bit word) */
+#define XTE_BD_TX_CSBEGIN_OFFSET XDMAV3_BD_USR0_OFFSET
+
+/** Offset where checksum should be inserted (16 bit word) */
+#define XTE_BD_TX_CSINSERT_OFFSET (XDMAV3_BD_USR0_OFFSET + 2)
+
+/** Checksum offload control for transmit (16 bit word) */
+#define XTE_BD_TX_CSCNTRL_OFFSET XDMAV3_BD_USR1_OFFSET
+
+/** Seed value for checksum calculation (16 bit word) */
+#define XTE_BD_TX_CSINIT_OFFSET (XDMAV3_BD_USR1_OFFSET + 2)
+
+/** Receive frame checksum calculation (16 bit word) */
+#define XTE_BD_RX_CSRAW_OFFSET (XDMAV3_BD_USR5_OFFSET + 2)
+
+/*@}*/
+
+/** @name TX_CSCNTRL bit mask
+ * @{
+ */
+#define XTE_BD_TX_CSCNTRL_CALC_MASK 0x0001 /**< Enable/disable Tx
+ checksum */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+xdbg_stmnt(extern int indent_on);
+
+#define XLlTemac_indent(RegOffset) \
+ ((indent_on && ((RegOffset) >= XTE_RAF_OFFSET) && ((RegOffset) <= XTE_RDY_OFFSET)) ? "\t" : "")
+
+#define XLlTemac_reg_name(RegOffset) \
+ (((RegOffset) == XTE_RAF_OFFSET) ? "XTE_RAF_OFFSET": \
+ ((RegOffset) == XTE_TPF_OFFSET) ? "XTE_TPF_OFFSET": \
+ ((RegOffset) == XTE_IFGP_OFFSET) ? "XTE_IFGP_OFFSET": \
+ ((RegOffset) == XTE_IS_OFFSET) ? "XTE_IS_OFFSET": \
+ ((RegOffset) == XTE_IP_OFFSET) ? "XTE_IP_OFFSET": \
+ ((RegOffset) == XTE_IE_OFFSET) ? "XTE_IE_OFFSET": \
+ ((RegOffset) == XTE_MSW_OFFSET) ? "XTE_MSW_OFFSET": \
+ ((RegOffset) == XTE_LSW_OFFSET) ? "XTE_LSW_OFFSET": \
+ ((RegOffset) == XTE_CTL_OFFSET) ? "XTE_CTL_OFFSET": \
+ ((RegOffset) == XTE_RDY_OFFSET) ? "XTE_RDY_OFFSET": \
+ ((RegOffset) == XTE_RCW0_OFFSET) ? "XTE_RCW0_OFFSET": \
+ ((RegOffset) == XTE_RCW1_OFFSET) ? "XTE_RCW1_OFFSET": \
+ ((RegOffset) == XTE_TC_OFFSET) ? "XTE_TC_OFFSET": \
+ ((RegOffset) == XTE_FCC_OFFSET) ? "XTE_FCC_OFFSET": \
+ ((RegOffset) == XTE_EMMC_OFFSET) ? "XTE_EMMC_OFFSET": \
+ ((RegOffset) == XTE_PHYC_OFFSET) ? "XTE_PHYC_OFFSET": \
+ ((RegOffset) == XTE_MC_OFFSET) ? "XTE_MC_OFFSET": \
+ ((RegOffset) == XTE_UAW0_OFFSET) ? "XTE_UAW0_OFFSET": \
+ ((RegOffset) == XTE_UAW1_OFFSET) ? "XTE_UAW1_OFFSET": \
+ ((RegOffset) == XTE_MAW0_OFFSET) ? "XTE_MAW0_OFFSET": \
+ ((RegOffset) == XTE_MAW1_OFFSET) ? "XTE_MAW1_OFFSET": \
+ ((RegOffset) == XTE_AFM_OFFSET) ? "XTE_AFM_OFFSET": \
+ ((RegOffset) == XTE_TIS_OFFSET) ? "XTE_TIS_OFFSET": \
+ ((RegOffset) == XTE_TIE_OFFSET) ? "XTE_TIE_OFFSET": \
+ ((RegOffset) == XTE_MIIMWD_OFFSET) ? "XTE_MIIMWD_OFFSET": \
+ ((RegOffset) == XTE_MIIMAI_OFFSET) ? "XTE_MIIMAI_OFFSET": \
+ "unknown")
+
+#define XLlTemac_print_reg_o(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_TEMAC_REG, "%s0x%0x -> %s(0x%0x)\n", \
+ XLlTemac_indent(RegOffset), (Value), \
+ XLlTemac_reg_name(RegOffset), (RegOffset)) \
+
+#define XLlTemac_print_reg_i(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_TEMAC_REG, "%s%s(0x%0x) -> 0x%0x\n", \
+ XLlTemac_indent(RegOffset), XLlTemac_reg_name(RegOffset), \
+ (RegOffset), (Value)) \
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_ReadReg returns the value read from the register specified by
+ * <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the register to be read.
+ *
+ * @return XLlTemac_ReadReg returns the 32-bit value of the register.
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlTemac_mReadReg(u32 BaseAddress, u32 RegOffset)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_ReadReg(BaseAddress, RegOffset) \
+({ \
+ u32 value; \
+ if ((RegOffset) > 0x2c) { \
+ printf ("readreg: Woah! wrong reg addr: 0x%0x\n", (RegOffset)); \
+ } \
+ value = XIo_In32(((BaseAddress) + (RegOffset))); \
+ XLlTemac_print_reg_i((BaseAddress), (RegOffset), value); \
+ value; \
+})
+#else
+#define XLlTemac_ReadReg(BaseAddress, RegOffset) \
+ (XIo_In32(((BaseAddress) + (RegOffset))))
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_WriteReg, writes <i>Data</i> to the register specified by
+ * <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the register to be written.
+ * @param Data is the 32-bit value to write to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ * C-style signature:
+ * void XLlTemac_mWriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_WriteReg(BaseAddress, RegOffset, Data) \
+({ \
+ if ((RegOffset) > 0x2c) { \
+ printf ("writereg: Woah! wrong reg addr: 0x%0x\n", (RegOffset)); \
+ } \
+ XLlTemac_print_reg_o((BaseAddress), (RegOffset), (Data)); \
+ XIo_Out32(((BaseAddress) + (RegOffset)), (Data)); \
+})
+#else
+#define XLlTemac_WriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32(((BaseAddress) + (RegOffset)), (Data))
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_ReadIndirectReg returns the value read from the hard TEMAC register
+ * specified by <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the hard TEMAC register to be read.
+ *
+ * @return XLlTemac_ReadIndirectReg returns the 32-bit value of the register.
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlTemac_mReadIndirectReg(u32 BaseAddress, u32 RegOffset)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+extern u32 _xlltemac_rir_value;
+
+#define XLlTemac_ReadIndirectReg(BaseAddress, RegOffset) \
+( \
+ indent_on = 1, \
+ (((RegOffset) < 0x200) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ (((RegOffset) > 0x3b4) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, (RegOffset)), \
+ _xlltemac_rir_value = XLlTemac_ReadReg((BaseAddress), XTE_LSW_OFFSET), \
+ XLlTemac_print_reg_i((BaseAddress), (RegOffset), _xlltemac_rir_value), \
+ indent_on = 0, \
+ _xlltemac_rir_value \
+)
+#else
+#define XLlTemac_ReadIndirectReg(BaseAddress, RegOffset) \
+( \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, (RegOffset)), \
+ XLlTemac_ReadReg((BaseAddress), XTE_LSW_OFFSET) \
+)
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_WriteIndirectReg, writes <i>Data</i> to the hard TEMAC register
+ * specified by <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the hard TEMAC register to be written.
+ * @param Data is the 32-bit value to write to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ * C-style signature:
+ * void XLlTemac_WriteIndirectReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_WriteIndirectReg(BaseAddress, RegOffset, Data) \
+( \
+ indent_on = 1, \
+ (((RegOffset) < 0x200) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ (((RegOffset) > 0x3b4) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ XLlTemac_print_reg_o((BaseAddress), (RegOffset), (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_LSW_OFFSET, (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, \
+ ((RegOffset) | XTE_CTL_WEN_MASK)), \
+ ((XLlTemac_ReadReg((BaseAddress), XTE_RDY_OFFSET) & \
+ XTE_RDY_HARD_ACS_RDY_MASK) ? \
+ ((XLlTemac_ReadIndirectReg((BaseAddress), (RegOffset)) != (Data)) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "data written is not read back: Reg: 0x%0x\n", \
+ (RegOffset)) \
+ : 0) \
+ : xdbg_printf(XDBG_DEBUG_ERROR, "(temac_wi) RDY reg not initially ready\n")), \
+ indent_on = 0 \
+)
+#else
+#define XLlTemac_WriteIndirectReg(BaseAddress, RegOffset, Data) \
+ XLlTemac_WriteReg((BaseAddress), XTE_LSW_OFFSET, (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, \
+ ((RegOffset) | XTE_CTL_WEN_MASK))
+#endif
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* end of protection macro */
new file mode 100644
@@ -0,0 +1,3850 @@
+/*
+ * Xilinx Ethernet: Linux driver for the XPS_LLTEMAC core.
+ *
+ * Author: Xilinx, Inc.
+ *
+ * 2006-2007 (c) Xilinx, Inc. This file is licensed uner the terms of the GNU
+ * General Public License version 2.1. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 05/08/05 First release
+ * </pre>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_OF
+// For open firmware.
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#endif
+
+#include "xbasic_types.h"
+#include "xlltemac.h"
+#include "xllfifo.h"
+#include "xlldma.h"
+#include "xlldma_bdring.h"
+
+#define LOCAL_FEATURE_RX_CSUM 0x01
+
+/*
+ * Default SEND and RECV buffer descriptors (BD) numbers.
+ * BD Space needed is (XTE_SEND_BD_CNT+XTE_RECV_BD_CNT)*Sizeof(XLlDma_Bd).
+ * Each XLlDma_Bd instance currently takes 40 bytes.
+ */
+#define XTE_SEND_BD_CNT 256
+#define XTE_RECV_BD_CNT 256
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xilinx_lltemac"
+#define DRIVER_DESCRIPTION "Xilinx Tri-Mode Ethernet MAC driver"
+#define DRIVER_VERSION "1.00a"
+
+#define TX_TIMEOUT (3*HZ) /* Transmission timeout is 3 seconds. */
+
+/*
+ * This version of the Xilinx TEMAC uses external DMA or FIFO cores.
+ * Currently neither the DMA or FIFO cores used require any memory alignment
+ * restrictions.
+ */
+/*
+ * ALIGNMENT_RECV = the alignement required to receive
+ * ALIGNMENT_SEND = the alignement required to send
+ * ALIGNMENT_SEND_PERF = tx alignment for better performance
+ *
+ * ALIGNMENT_SEND is used to see if we *need* to copy the data to re-align.
+ * ALIGNMENT_SEND_PERF is used if we've decided we need to copy anyway, we just
+ * copy to this alignment for better performance.
+ */
+
+#define ALIGNMENT_RECV 34
+#define ALIGNMENT_SEND 8
+#define ALIGNMENT_SEND_PERF 32
+
+#define XTE_SEND 1
+#define XTE_RECV 2
+
+/* SGDMA buffer descriptors must be aligned on a 8-byte boundary. */
+#define ALIGNMENT_BD XLLDMA_BD_MINIMUM_ALIGNMENT
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGNSEND(adr) ((ALIGNMENT_SEND - ((u32) adr)) % ALIGNMENT_SEND)
+#define BUFFER_ALIGNSEND_PERF(adr) ((ALIGNMENT_SEND_PERF - ((u32) adr)) % 32)
+#define BUFFER_ALIGNRECV(adr) ((ALIGNMENT_RECV - ((u32) adr)) % 32)
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define DFT_TX_THRESHOLD 24
+#define DFT_TX_WAITBOUND 254
+#define DFT_RX_THRESHOLD 4
+#define DFT_RX_WAITBOUND 254
+
+#define XTE_AUTOSTRIPPING 1
+
+/* Put Buffer Descriptors in BRAM?
+ * NOTE:
+ * Putting BDs in BRAM only works if there is only ONE instance of the TEMAC
+ * in hardware. The code does not handle multiple instances, e.g. it does
+ * not manage the memory in BRAM.
+ */
+#define BD_IN_BRAM 0
+#define BRAM_BASEADDR 0xffff8000
+
+
+/*
+ * Checksum offload macros
+ */
+#define BdCsumEnable(BdPtr) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET, \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) | 1 )
+
+/* Used for debugging */
+#define BdCsumEnabled(BdPtr) \
+ ((XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) & 1)
+
+#define BdCsumDisable(BdPtr) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET, \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) & 0xFFFFFFFE )
+
+#define BdCsumSetup(BdPtr, Start, Insert) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_USR1_OFFSET, ((Start) << 16) | (Insert))
+
+/* Used for debugging */
+#define BdCsumInsert(BdPtr) \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR1_OFFSET) & 0xffff)
+
+#define BdCsumSeed(BdPtr, Seed) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_USR2_OFFSET, 0)
+
+#define BdCsumGet(BdPtr) \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR3_OFFSET) & 0xffff)
+
+#define BdGetRxLen(BdPtr) \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR4_OFFSET) & 0x3fff)
+
+/* LLTEMAC platform data */
+struct xlltemac_platform_data {
+ u8 tx_csum;
+ u8 rx_csum;
+ u8 phy_type;
+ u8 dcr_host;
+ u8 ll_dev_type;
+ u32 ll_dev_baseaddress;
+ u32 ll_dev_dma_rx_irq;
+ u32 ll_dev_dma_tx_irq;
+ u32 ll_dev_fifo_irq;
+
+ u8 mac_addr[6];
+};
+
+/*
+ * Our private per device data. When a net_device is allocated we will
+ * ask for enough extra space for this.
+ */
+struct net_local {
+ struct list_head rcv;
+ struct list_head xmit;
+
+ struct net_device *ndev; /* this device */
+ struct net_device *next_dev; /* The next device in dev_list */
+ struct net_device_stats stats; /* Statistics for this device */
+ struct timer_list phy_timer; /* PHY monitoring timer */
+
+ u32 index; /* Which interface is this */
+#if 0
+ XInterruptHandler Isr; /* Pointer to the XLlTemac ISR routine */
+#endif
+ u8 gmii_addr; /* The GMII address of the PHY */
+ u32 virt_dma_addr; /* Virtual address to mapped dma */
+
+ /* The underlying OS independent code needs space as well. A
+ * pointer to the following XLlTemac structure will be passed to
+ * any XLlTemac_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure). */
+ XLlFifo Fifo;
+ XLlDma Dma;
+ XLlTemac Emac;
+
+ unsigned int fifo_irq; /* fifo irq */
+ unsigned int dma_irq_s; /* send irq */
+ unsigned int dma_irq_r; /* recv irq */
+ unsigned int frame_size; /* actual frame size = mtu + padding */
+
+ int cur_speed;
+
+ /* Buffer Descriptor space for both TX and RX BD ring */
+ void *desc_space; /* virtual address of BD space */
+ dma_addr_t desc_space_handle; /* physical address of BD space */
+ int desc_space_size; /* size of BD space */
+
+ /* buffer for one skb in case no room is available for transmission */
+ struct sk_buff *deferred_skb;
+
+ /* send buffers for non tx-dre hw */
+ void **tx_orig_buffers; /* Buffer addresses as returned by
+ dma_alloc_coherent() */
+ void **tx_buffers; /* Buffers addresses aligned for DMA */
+ dma_addr_t *tx_phys_buffers; /* Buffer addresses in physical memory */
+ size_t tx_buffers_cur; /* Index of current buffer used */
+
+ /* stats */
+ int max_frags_in_a_packet;
+ unsigned long realignments;
+ unsigned long tx_hw_csums;
+ unsigned long rx_hw_csums;
+ unsigned long local_features;
+#if ! XTE_AUTOSTRIPPING
+ unsigned long stripping;
+#endif
+};
+
+static u32 dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK;
+static u32 dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK;
+
+/* for exclusion of all program flows (processes, ISRs and BHs) */
+static spinlock_t XTE_spinlock; /* = SPIN_LOCK_UNLOCKED; */
+static spinlock_t XTE_tx_spinlock; /* = SPIN_LOCK_UNLOCKED; */
+static spinlock_t XTE_rx_spinlock; /* = SPIN_LOCK_UNLOCKED; */
+
+/*
+ * ethtool has a status reporting feature where we can report any sort of
+ * status information we'd like. This is the list of strings used for that
+ * status reporting. ETH_GSTRING_LEN is defined in ethtool.h
+ */
+static char xenet_ethtool_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "txpkts", "txdropped", "txerr", "txfifoerr",
+ "rxpkts", "rxdropped", "rxerr", "rxfifoerr",
+ "rxrejerr", "max_frags", "tx_hw_csums", "rx_hw_csums",
+};
+
+#define XENET_STATS_LEN sizeof(xenet_ethtool_gstrings_stats) / ETH_GSTRING_LEN
+
+/* Helper function to determine if a given XLlTemac error warrants a reset. */
+extern inline int status_requires_reset(int s)
+{
+ return (s == XST_FIFO_ERROR ||
+ s == XST_PFIFO_DEADLOCK ||
+ s == XST_DMA_ERROR || s == XST_IPIF_ERROR);
+}
+
+/* Queues with locks */
+static LIST_HEAD(receivedQueue);
+static spinlock_t receivedQueueSpin; // = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(sentQueue);
+static spinlock_t sentQueueSpin; // = SPIN_LOCK_UNLOCKED;
+
+
+/* from mii.h
+ *
+ * Items in mii.h but not in gmii.h
+ */
+#define ADVERTISE_100FULL 0x0100
+#define ADVERTISE_100HALF 0x0080
+#define ADVERTISE_10FULL 0x0040
+#define ADVERTISE_10HALF 0x0020
+#define ADVERTISE_CSMA 0x0001
+
+#define EX_ADVERTISE_1000FULL 0x0200
+#define EX_ADVERTISE_1000HALF 0x0100
+
+/*
+ * items not in mii.h nor gmii.h but should be
+ */
+#define MII_EXADVERTISE 0x09
+
+/*
+ * Wrap certain temac routines with a lock, so access to the shared hard temac
+ * interface is accessed mutually exclusive for dual channel temac support.
+ */
+
+static inline void _XLlTemac_Start(XLlTemac *InstancePtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Start(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_Stop(XLlTemac *InstancePtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Stop(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Reset(InstancePtr, HardCoreAction);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_SetMacAddress(XLlTemac *InstancePtr,
+ void *AddressPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetMacAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline void _XLlTemac_GetMacAddress(XLlTemac *InstancePtr,
+ void *AddressPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_GetMacAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetOptions(InstancePtr, Options);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_ClearOptions(InstancePtr, Options);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline u16 _XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr)
+{
+ u16 speed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ speed = XLlTemac_GetOperatingSpeed(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return speed;
+}
+
+static inline void _XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_SetOperatingSpeed(InstancePtr, Speed);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ /* We need a delay after we set the speed. Otherwise the PHY will not be ready. */
+ udelay(10000);
+}
+
+static inline void _XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhySetMdioDivisor(InstancePtr, Divisor);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhyRead(InstancePtr, PhyAddress, RegisterNum, PhyDataPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhyWrite(InstancePtr, PhyAddress, RegisterNum, PhyData);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+
+static inline int _XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_MulticastClear(InstancePtr, Entry);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetMacPauseAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline void _XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_GetMacPauseAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_GetSgmiiStatus(InstancePtr, SpeedPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr,
+ u16 *SpeedPtr,
+ int *IsFullDuplexPtr,
+ int *IsLinkUpPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_GetRgmiiStatus(InstancePtr, SpeedPtr, IsFullDuplexPtr, IsLinkUpPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+
+#ifdef CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+#define MARVELL_88E1111_EXTENDED_PHY_CTL_REG_OFFSET 20
+#define MARVELL_88E1111_EXTENDED_PHY_STATUS_REG_OFFSET 27
+#endif
+
+#ifdef CONFIG_XILINX_LLTEMAC_NATIONAL_DP83865_GMII
+# define NATIONAL_DP83865_CONTROL_INIT 0x9200
+# define NATIONAL_DP83865_CONTROL 0
+# define NATIONAL_DP83865_STATUS 1
+# define NATIONAL_DP83865_STATUS_LINK 0x04
+# define NATIONAL_DP83865_STATUS_AUTONEGEND 0x20
+# define NATIONAL_DP83865_STATUS_AUTONEG 0x11
+# define NATIONAL_DP83865_LINKSPEED_1000M 0x10
+# define NATIONAL_DP83865_LINKSPEED_100M 0x8
+# define NATIONAL_DP83865_LINKSPEED_MASK 0x18
+# define NATIONAL_DP83865_RETRIES 5
+#endif
+
+#define DEBUG_ERROR KERN_ERR
+#define DEBUG_LOG(level, ...) printk(level __VA_ARGS__)
+
+/*
+ * Perform any necessary special phy setup. In the gmii case, nothing needs to
+ * be done.
+ */
+static void phy_setup(struct net_local *lp)
+{
+#ifdef CONFIG_XILINX_LLTEMAC_NATIONAL_DP83865_GMII
+ u16 RegValue;
+
+ printk(KERN_INFO "NATIONAL DP83865 PHY\n");
+ RegValue = NATIONAL_DP83865_CONTROL_INIT;
+ /*Do not reset phy*/
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_CONTROL, RegValue);
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_STATUS, &RegValue);
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_STATUS, &RegValue);
+#endif
+#ifdef CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+ u16 Register;
+
+ /*
+ * Set up MAC interface
+ *
+ * Write 0x0cc3 to reg 20 in PHY
+ * 5432 1098 7654 3210
+ * ---- ---- ---- ----
+ * 0cc3=0000 1100 1100 0011
+ * downshift counter (bits 11-9): 110 = 7 times
+ * downshift enable (bit 8): 0 = enable
+ * RGMII timing control (bit 7): 1 = add delay to rx clk ro rxd
+ * outputs
+ * Default Mac interface speed (bits 6-4): 100 = 10mbps 2.5 mhz
+ * (between phy and temac - gets renegotiated)
+ * reserved (bit 3)
+ * DTE detect (bit 2): 0 disabled
+ * RGMII transmit timing control (bit 1): 1 = add delay
+ * to tx clk ro txd outputs
+ * Transmitter Disable (bit 0): 1 = enabled
+ */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MARVELL_88E1111_EXTENDED_PHY_CTL_REG_OFFSET, 0x0cc3);
+
+ /*
+ * Set RGMII to copper with correct hysterisis and correct mode
+ * Disable fiber/copper auto sel, choose copper
+ * RGMII /Modified MII to copper mode
+ *
+ * Write 0x848b to reg 27
+ * 5432 1098 7654 3210
+ * ---- ---- ---- ----
+ * 848b=1000 0100 1000 1011
+ * Fiber/Copper Auto Selection (bit 15): 1 = disable auto selection
+ * Interrupt Polarity (bit 10): 1 = int active low
+ * DTE detect status drop hysteresis (bts 8-5): 0100 = report 20s after DTE power status drop
+ * HWCFG mode (bits 3-0): 1011 = RGMII/Modified MII to Copper
+ */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MARVELL_88E1111_EXTENDED_PHY_STATUS_REG_OFFSET, 0x848b);
+
+ /*
+ * Reset the PHY
+ */
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, &Register);
+ Register |= BMCR_RESET;
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR, Register);
+
+#endif /* CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII */
+
+#ifdef CONFIG_XILINX_LLTEMAC_XILINX_1000BASEX
+ u16 Register;
+
+ /*
+ * Setup the PHY control register
+ */
+ Register = BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE;
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR, Register);
+
+#endif /* CONFIG_XILINX_LLTEMAC_XILINX_1000BASEX */
+}
+
+
+typedef enum DUPLEX { UNKNOWN_DUPLEX, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+
+static int renegotiate_speed(struct net_device *dev, int speed, DUPLEX duplex)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ int retries = 2;
+ int wait_count;
+ u16 phy_reg0 = BMCR_ANENABLE | BMCR_ANRESTART;
+ u16 phy_reg1;
+ u16 phy_reg4;
+ u16 phy_reg9 = 0;
+
+
+ /*
+ * It appears that the 10baset full and half duplex settings
+ * are overloaded for gigabit ethernet
+ */
+ if ((duplex == FULL_DUPLEX) && (speed == 10)) {
+ phy_reg4 = ADVERTISE_10FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 100)) {
+ phy_reg4 = ADVERTISE_100FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 1000)) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000FULL;
+ }
+ else if (speed == 10) {
+ phy_reg4 = ADVERTISE_10HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 100) {
+ phy_reg4 = ADVERTISE_100HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 1000) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000HALF;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XLlTemac: unsupported speed requested: %d\n",
+ dev->name, speed);
+ return -1;
+ }
+
+ /*
+ * link status in register 1:
+ * first read / second read:
+ * 0 0 link is down
+ * 0 1 link is up (but it was down earlier)
+ * 1 0 link is down (but it was just up)
+ * 1 1 link is up
+ *
+ */
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_ADVERTISE, phy_reg4);
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_EXADVERTISE, phy_reg9);
+
+#ifndef CONFIG_XILINX_LLTEMAC_NATIONAL_DP83865_GMII
+ while (retries--) {
+ /* initiate an autonegotiation of the speed */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR, phy_reg0);
+
+ wait_count = 10; /* so we don't loop forever */
+ while (wait_count--) {
+ /* wait a bit for the negotiation to complete */
+ mdelay(500);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR,
+ &phy_reg1);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR,
+ &phy_reg1);
+
+ if ((phy_reg1 & BMSR_LSTATUS) &&
+ (phy_reg1 & BMSR_ANEGCOMPLETE))
+ break;
+
+ }
+
+ if (phy_reg1 & BMSR_LSTATUS) {
+ printk(KERN_INFO
+ "%s: XLlTemac: We renegotiated the speed to: %d\n",
+ dev->name, speed);
+ return 0;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XLlTemac: Not able to set the speed to %d (status: 0x%0x)\n",
+ dev->name, speed, phy_reg1);
+ return -1;
+ }
+ }
+
+ printk(KERN_ERR
+ "%s: XLlTemac: Not able to set the speed to %d\n", dev->name,
+ speed);
+ return -1;
+#else
+ printk(KERN_INFO
+ "%s: XLlTemac: We renegotiated the speed to: %d\n",
+ dev->name, speed);
+ return 0;
+#endif
+}
+
+/*
+ * This function sets up MAC's speed according to link speed of PHY
+ */
+static void set_mac_speed(struct net_local *lp)
+{
+ u16 phylinkspeed;
+ struct net_device *dev = lp->ndev;
+
+#ifdef CONFIG_XILINX_LLTEMAC_NATIONAL_DP83865_GMII
+ u16 RegValue;
+ int i;
+
+ for (i = 0; i < NATIONAL_DP83865_RETRIES*10; i++) {
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_STATUS, &RegValue);
+ if (RegValue & (NATIONAL_DP83865_STATUS_AUTONEGEND
+ |NATIONAL_DP83865_STATUS_LINK))
+ break;
+ udelay(1 * 100000);
+ }
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_STATUS_AUTONEG, &RegValue);
+ /* Get current link speed */
+ phylinkspeed = (RegValue & NATIONAL_DP83865_LINKSPEED_MASK);
+
+ /* Update TEMAC speed accordingly */
+ switch (phylinkspeed) {
+ case (NATIONAL_DP83865_LINKSPEED_1000M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "XLlTemac: speed set to 1000Mb/s\n");
+ break;
+ case (NATIONAL_DP83865_LINKSPEED_100M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 100);
+ printk(KERN_INFO "XLlTemac: speed set to 100Mb/s\n");
+ break;
+ default:
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 10);
+ printk(KERN_INFO "XLlTemac: speed set to 10Mb/s\n");
+ break;
+ }
+
+ return;
+
+#elif CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_GMII
+ /*
+ * This function is specific to MARVELL 88E1111 PHY chip on
+ * many Xilinx boards and assumes GMII interface is being used
+ * by the TEMAC.
+ */
+
+#define MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET 17
+#define MARVELL_88E1111_LINKSPEED_MARK 0xC000
+#define MARVELL_88E1111_LINKSPEED_SHIFT 14
+#define MARVELL_88E1111_LINKSPEED_1000M 0x0002
+#define MARVELL_88E1111_LINKSPEED_100M 0x0001
+#define MARVELL_88E1111_LINKSPEED_10M 0x0000
+ u16 RegValue;
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET,
+ &RegValue);
+ /* Get current link speed */
+ phylinkspeed = (RegValue & MARVELL_88E1111_LINKSPEED_MARK)
+ >> MARVELL_88E1111_LINKSPEED_SHIFT;
+
+ /* Update TEMAC speed accordingly */
+ switch (phylinkspeed) {
+ case (MARVELL_88E1111_LINKSPEED_1000M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_100M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 100);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 100Mb/s\n",
+ dev->name);
+ lp->cur_speed = 100;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_10M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 10);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 10Mb/s\n",
+ dev->name);
+ lp->cur_speed = 10;
+ break;
+ default:
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XLlTemac: speed defaults to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ }
+
+#elif defined CONFIG_XILINX_LLTEMAC_XILINX_1000BASEX
+ /*
+ * This function is specific to Xilinx 1000Base-X PHY,
+ * which only supports 1000Mbps, Full duplex links
+ */
+
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 1000Mb/s\n", dev->name);
+ lp->cur_speed = 1000;
+
+#else /* generic PHY, there have been issues with 10Mbit with this code */
+ int ret;
+ int retry_count = 1;
+
+ if (XLlTemac_GetPhysicalInterface(&lp->Emac) == XTE_PHY_TYPE_MII) {
+ phylinkspeed = 100;
+ }
+ else {
+ phylinkspeed = 1000;
+ }
+
+ /*
+ * Try to renegotiate the speed until something sticks
+ */
+ while (phylinkspeed > 1) {
+ ret = renegotiate_speed(dev, phylinkspeed, FULL_DUPLEX);
+ /*
+ * ret == 1 - try it again
+ * ret == 0 - it worked
+ * ret < 0 - there was some failure negotiating the speed
+ */
+ if (ret == 0) {
+ /* it worked, get out of the loop */
+ break;
+ }
+
+ /* it didn't work this time, but it may work if we try again */
+ if ((ret == 1) && (retry_count)) {
+ retry_count--;
+ printk("trying again...\n");
+ continue;
+ }
+ /* reset the retry_count, becuase we're about to try a lower speed */
+ retry_count = 1;
+ phylinkspeed /= 10;
+ }
+ if (phylinkspeed == 1) {
+ printk(KERN_INFO "%s: XLlTemac: could not negotiate speed\n",
+ dev->name);
+ lp->cur_speed = 0;
+
+ return;
+ }
+
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, phylinkspeed);
+ printk(KERN_INFO "%s: XLlTemac: speed set to %dMb/s\n", dev->name,
+ phylinkspeed);
+ lp->cur_speed = phylinkspeed;
+#endif
+}
+
+/*
+ * Helper function to reset the underlying hardware. This is called
+ * when we get into such deep trouble that we don't know how to handle
+ * otherwise.
+ */
+static void reset(struct net_device *dev, u32 line_num)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ u32 TxThreshold, TxWaitBound, RxThreshold, RxWaitBound;
+ u32 Options;
+ static u32 reset_cnt = 0;
+ int status;
+
+ printk(KERN_INFO "%s: XLlTemac: resets (#%u) from adapter code line %d\n",
+ dev->name, ++reset_cnt, line_num);
+
+ /* Shouldn't really be necessary, but shouldn't hurt. */
+ netif_stop_queue(dev);
+
+ /* Stop device */
+ _XLlTemac_Stop(&lp->Emac);
+
+ /*
+ * XLlTemac_Reset puts the device back to the default state. We need
+ * to save all the settings we don't already know, reset, restore
+ * the settings, and then restart the TEMAC.
+ */
+ Options = XLlTemac_GetOptions(&lp->Emac);
+
+ /*
+ * Capture the dma coalesce settings (if needed) and reset the
+ * connected core, dma or fifo
+ */
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ XLlDma_BdRingGetCoalesce(&XLlDma_mGetRxRing(&lp->Dma),
+ &RxThreshold, &RxWaitBound);
+ XLlDma_BdRingGetCoalesce(&XLlDma_mGetTxRing(&lp->Dma),
+ &TxThreshold, &TxWaitBound);
+
+ XLlDma_Reset(&lp->Dma);
+ } else {
+ XLlFifo_Reset(&lp->Fifo);
+ }
+
+ /* now we can reset the device */
+ _XLlTemac_Reset(&lp->Emac, XTE_NORESET_HARD);
+
+ /* Reset on TEMAC also resets PHY. Give it some time to finish negotiation
+ * before we move on */
+ mdelay(2000);
+
+ /*
+ * The following four functions will return an error if the
+ * EMAC is already started. We just stopped it by calling
+ * _XLlTemac_Reset() so we can safely ignore the return values.
+ */
+ (int) _XLlTemac_SetMacAddress(&lp->Emac, dev->dev_addr);
+ (int) _XLlTemac_SetOptions(&lp->Emac, Options);
+ (int) _XLlTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XLlTemac: Options: 0x%x\n", dev->name, Options);
+
+ phy_setup(lp);
+ set_mac_speed(lp);
+
+ if (XLlTemac_IsDma(&lp->Emac)) { /* SG DMA mode */
+ status = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing,
+ RxThreshold, RxWaitBound);
+ status |= XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing,
+ TxThreshold, TxWaitBound);
+ if (status != XST_SUCCESS) {
+ /* Print the error, but keep on going as it's not a fatal error. */
+ printk(KERN_ERR "%s: XLlTemac: error setting coalesce values (probably out of range). status: %d\n",
+ dev->name, status);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+ } else { /* FIFO interrupt mode */
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK |
+ XLLF_INT_RC_MASK | XLLF_INT_RXERROR_MASK |
+ XLLF_INT_TXERROR_MASK);
+ }
+ XLlTemac_IntDisable(&lp->Emac, XTE_INT_ALL_MASK);
+
+ if (lp->deferred_skb) {
+ dev_kfree_skb_any(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_errors++;
+ }
+
+ /*
+ * XLlTemac_Start returns an error when: if configured for
+ * scatter-gather DMA and a descriptor list has not yet been created
+ * for the send or receive channel, or if no receive buffer descriptors
+ * have been initialized. Those are not happening. so ignore the returned
+ * result checking.
+ */
+ _XLlTemac_Start(&lp->Emac);
+
+ /* We're all ready to go. Start the queue in case it was stopped. */
+ netif_wake_queue(dev);
+}
+
+/*
+ * The PHY registers read here should be standard registers in all PHY chips
+ */
+static int get_phy_status(struct net_device *dev, DUPLEX * duplex, int *linkup)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ u16 reg;
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, ®);
+ *duplex = FULL_DUPLEX;
+
+#ifdef CONFIG_XILINX_LLTEMAC_NATIONAL_DP83865_GMII
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ NATIONAL_DP83865_STATUS, ®);
+ *linkup=(reg & NATIONAL_DP83865_STATUS_LINK) != 0;
+
+#else
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, ®);
+ *linkup = (reg & BMSR_LSTATUS) != 0;
+#endif
+
+ return 0;
+}
+
+/*
+ * This routine is used for two purposes. The first is to keep the
+ * EMAC's duplex setting in sync with the PHY's. The second is to keep
+ * the system apprised of the state of the link. Note that this driver
+ * does not configure the PHY. Either the PHY should be configured for
+ * auto-negotiation or it should be handled by something like mii-tool. */
+static void poll_gmii(unsigned long data)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ DUPLEX phy_duplex;
+ int phy_carrier;
+ int netif_carrier;
+
+ dev = (struct net_device *) data;
+ lp = (struct net_local *) netdev_priv(dev);
+
+ /* First, find out what's going on with the PHY. */
+ if (get_phy_status(dev, &phy_duplex, &phy_carrier)) {
+ printk(KERN_ERR "%s: XLlTemac: terminating link monitoring.\n",
+ dev->name);
+ return;
+ }
+ netif_carrier = netif_carrier_ok(dev) != 0;
+ if (phy_carrier != netif_carrier) {
+ if (phy_carrier) {
+ set_mac_speed(lp);
+ printk(KERN_INFO
+ "%s: XLlTemac: PHY Link carrier restored.\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+ else {
+ printk(KERN_INFO "%s: XLlTemac: PHY Link carrier lost.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* Set up the timer so we'll get called again in 2 seconds. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+}
+
+static irqreturn_t xenet_temac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+
+ /*
+ * All we care about here is the RxRject interrupts. Explanation below:
+ *
+ * Interrupt Usage Description
+ * --------- -----------------
+ * TxCmplt: Fifo or DMA will have completion interrupts. We'll use
+ * those and not the TEMAC ones.
+ * RxFifoOvr: if the RX fifo is overflowing, the last thing we need
+ * is more interrupts to handle.
+ * RxRJect: We're keeping stats on rejected packets (we could
+ * choose not to).
+ * RxCmplt: Fifo or DMA will have completion interrupts. We'll use
+ * those and not the TEMAC ones.
+ * AutoNeg: This driver doesn't make use of the autonegotation
+ * completion interrupt.
+ * HardAcsCmplt: This driver just polls the RDY register for this
+ * information instead of using an interrupt handler.
+ * CfgWst, CfgRst,
+ * AfWst, AfRst,
+ * MiimWst, MiimRst,
+ * FabrRst: All of these registers indicate when access (read or
+ * write) to one or other of the Hard Temac Core
+ * registers is complete. Instead of relying on an
+ * interrupt context switch to be notified that the
+ * access is complete, this driver instead polls for the
+ * status, which, in most cases, should be faster.
+ */
+ XLlTemac_IntClear(&lp->Emac, XTE_INT_ALL_MASK);
+
+ lp->stats.rx_errors++;
+ lp->stats.rx_crc_errors++;
+
+
+ return IRQ_HANDLED;
+}
+
+static void FifoSendHandler(struct net_device *dev);
+static void FifoRecvHandler(unsigned long p /*struct net_device *dev*/);
+
+static DECLARE_TASKLET(FifoRecvBH, FifoRecvHandler, 0);
+
+static irqreturn_t xenet_fifo_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ u32 irq_status;
+
+ unsigned long flags;
+
+ /*
+ * Need to:
+ * 1) Read the FIFO IS register
+ * 2) clear all bits in the FIFO IS register
+ * 3) loop on each bit in the IS register, and handle each interrupt event
+ *
+ */
+ irq_status = XLlFifo_IntPending(&lp->Fifo);
+ XLlFifo_IntClear(&lp->Fifo, irq_status);
+ while (irq_status) {
+ if (irq_status & XLLF_INT_RC_MASK) {
+ /* handle the receive completion */
+ struct list_head *cur_lp;
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ list_add_tail(&lp->rcv, &receivedQueue);
+ XLlFifo_IntDisable(&lp->Fifo, XLLF_INT_ALL_MASK);
+ tasklet_schedule(&FifoRecvBH);
+ }
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ irq_status &= ~XLLF_INT_RC_MASK;
+ } else if (irq_status & XLLF_INT_TC_MASK) {
+ /* handle the transmit completion */
+ FifoSendHandler(dev);
+ irq_status &= ~XLLF_INT_TC_MASK;
+ } else if (irq_status & XLLF_INT_TXERROR_MASK) {
+ lp->stats.tx_errors++;
+ lp->stats.tx_fifo_errors++;
+ XLlFifo_Reset(&lp->Fifo);
+ irq_status &= ~XLLF_INT_TXERROR_MASK;
+ } else if (irq_status & XLLF_INT_RXERROR_MASK) {
+ lp->stats.rx_errors++;
+ XLlFifo_Reset(&lp->Fifo);
+ irq_status &= ~XLLF_INT_RXERROR_MASK;
+ } else {
+ /* debug
+ * if (irq_status == 0) printk("Temac: spurious fifo int\n");
+ */
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* The callback function for completed frames sent in SGDMA mode. */
+static void DmaSendHandlerBH(unsigned long p);
+static void DmaRecvHandlerBH(unsigned long p);
+
+static DECLARE_TASKLET(DmaSendBH, DmaSendHandlerBH, 0);
+static DECLARE_TASKLET(DmaRecvBH, DmaRecvHandlerBH, 0);
+
+static irqreturn_t xenet_dma_rx_interrupt(int irq, void *dev_id)
+{
+ u32 irq_status;
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct list_head *cur_lp;
+
+ unsigned long flags;
+
+ /* Read pending interrupts */
+ irq_status = XLlDma_mBdRingGetIrq(&lp->Dma.RxBdRing);
+
+ XLlDma_mBdRingAckIrq(&lp->Dma.RxBdRing, irq_status);
+
+ if ((irq_status & XLLDMA_IRQ_ALL_ERR_MASK)) {
+ XLlDma_Reset(&lp->Dma);
+ return IRQ_HANDLED;
+ }
+ if ((irq_status & (XLLDMA_IRQ_DELAY_MASK | XLLDMA_IRQ_COALESCE_MASK))) {
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ list_add_tail(&lp->rcv, &receivedQueue);
+ XLlDma_mBdRingIntDisable(&lp->Dma.RxBdRing,
+ XLLDMA_CR_IRQ_ALL_EN_MASK);
+ tasklet_schedule(&DmaRecvBH);
+ }
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xenet_dma_tx_interrupt(int irq, void *dev_id)
+{
+ u32 irq_status;
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct list_head *cur_lp;
+
+ unsigned long flags;
+
+ /* Read pending interrupts */
+ irq_status = XLlDma_mBdRingGetIrq(&(lp->Dma.TxBdRing));
+
+ XLlDma_mBdRingAckIrq(&(lp->Dma.TxBdRing), irq_status);
+
+ if ((irq_status & XLLDMA_IRQ_ALL_ERR_MASK)) {
+ XLlDma_Reset(&lp->Dma);
+ return IRQ_HANDLED;
+ }
+
+ if ((irq_status & (XLLDMA_IRQ_DELAY_MASK | XLLDMA_IRQ_COALESCE_MASK))) {
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ list_for_each(cur_lp, &sentQueue) {
+ if (cur_lp == &(lp->xmit)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->xmit)) {
+ list_add_tail(&lp->xmit, &sentQueue);
+ XLlDma_mBdRingIntDisable(&lp->Dma.TxBdRing,
+ XLLDMA_CR_IRQ_ALL_EN_MASK);
+ tasklet_schedule(&DmaSendBH);
+ }
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Q:
+ * Why doesn't this linux driver use an interrupt handler for the TEMAC itself?
+ *
+ * A:
+ * Let's take a look at all the possible events that could be signaled by the
+ * TEMAC core.
+ *
+ * possible events:
+ * Transmit Complete (TxCmplt) [not handled by this driver]
+ * The TEMAC TxCmplt interrupt status is ignored by software in favor of
+ * paying attention to the transmit complete status in the connected DMA
+ * or FIFO core.
+ * Receive Fifo Overflow (RxFifoOver) [not handled by this driver]
+ * We have discovered that the overhead of an interrupt context switch
+ * to attempt to handle this sort of event actually worsens the
+ * condition, and cuases further dropped packets further increasing the
+ * time spent in this interrupt handler.
+ * Receive Frame Rejected (RxRject) [not handled by this driver]
+ * We could possibly handle this interrupt and gather statistics
+ * information based on these events that occur. However it is not that
+ * critical.
+ * Receive Complete (RxCmplt) [not handled by this driver]
+ * The TEMAC RxCmplt interrupt status is ignored by software in favor of
+ * paying attention to the receive complete status in the connected DMA
+ * or FIFO core.
+ * Autonegotiaion Complete (AutoNeg) [not handled by this driver]
+ * Autonegotiation on the TEMAC is a bit complicated, and is handled in
+ * a way that does not require the use of this interrupt event.
+ * Hard Temac Core Access Complete (HardAcsCmplt) [not handled by this driver]
+ * This event really just indicates if there are any events in the TIS
+ * register. As can be seen below, none of the events from the TIS
+ * register are handled, so there is no need to handle this event
+ * either.
+ * Configuration Write Complete (CfgWst) [not handled by this driver]
+ * Configuration Read Complete (CfgRst) [not handled by this driver]
+ * Address Filter Write Complete (AfWst) [not handled by this driver]
+ * Address Filter Read Complete (AfRst) [not handled by this driver]
+ * MII Management Write Complete (MiimWst) [not handled by this driver]
+ * MII Management Read Complete (MiimRst) [not handled by this driver]
+ * Fabric Read Complete (FabrRst) [not handled by this driver]
+ * All of the above registers indicate when access (read or write) to
+ * one or other of the Hard Temac Core registers is complete. Instead of
+ * relying on an interrupt context switch to be notified that the access
+ * is complete, this driver instead polls for the status, which, in most
+ * cases, should be faster.
+ */
+
+static int xenet_open(struct net_device *dev)
+{
+ struct net_local *lp;
+ u32 Options;
+ int irqval = 0;
+
+ /*
+ * Just to be safe, stop TX queue and the device first. If the device is
+ * already stopped, an error will be returned. In this case, we don't
+ * really care.
+ */
+ netif_stop_queue(dev);
+ lp = (struct net_local *) netdev_priv(dev);
+ _XLlTemac_Stop(&lp->Emac);
+
+ INIT_LIST_HEAD(&(lp->rcv));
+ INIT_LIST_HEAD(&(lp->xmit));
+
+ /* Set the MAC address each time opened. */
+ if (_XLlTemac_SetMacAddress(&lp->Emac, dev->dev_addr) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlTemac: could not set MAC address.\n",
+ dev->name);
+ return -EIO;
+ }
+
+ /*
+ * If the device is not configured for polled mode, connect to the
+ * interrupt controller and enable interrupts. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ Options |= XTE_FLOW_CONTROL_OPTION;
+ /* Enabling jumbo packets shouldn't be a problem if MTU is smaller */
+ Options |= XTE_JUMBO_OPTION;
+ Options |= XTE_TRANSMITTER_ENABLE_OPTION;
+ Options |= XTE_RECEIVER_ENABLE_OPTION;
+#if XTE_AUTOSTRIPPING
+ Options |= XTE_FCS_STRIP_OPTION;
+#endif
+
+ (int) _XLlTemac_SetOptions(&lp->Emac, Options);
+ (int) _XLlTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XLlTemac: Options: 0x%x\n", dev->name, Options);
+
+ /* Just use interrupt driven methods - no polled mode */
+
+ irqval = request_irq(dev->irq, &xenet_temac_interrupt, 0, dev->name, dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, dev->irq);
+ return irqval;
+ }
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for dma mode tx.\n",
+ dev->name, lp->dma_irq_s);
+ irqval = request_irq(lp->dma_irq_s,
+ &xenet_dma_tx_interrupt, 0, "xilinx_dma_tx_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->dma_irq_s);
+ return irqval;
+ }
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for dma mode rx.\n",
+ dev->name, lp->dma_irq_r);
+ irqval = request_irq(lp->dma_irq_r,
+ &xenet_dma_rx_interrupt, 0, "xilinx_dma_rx_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->dma_irq_r);
+ return irqval;
+ }
+ } else {
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for fifo mode.\n",
+ dev->name, lp->fifo_irq);
+ /* With the way interrupts are issued on the fifo core, this needs to be
+ * fast interrupt handler.
+ */
+ irqval = request_irq(lp->fifo_irq,
+ &xenet_fifo_interrupt, 0, "xilinx_fifo_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->fifo_irq);
+ return irqval;
+ }
+ }
+
+ /* give the system enough time to establish a link */
+ mdelay(2000);
+
+ phy_setup(lp);
+ set_mac_speed(lp);
+
+ /* Enable interrupts - no polled mode */
+ if (XLlTemac_IsFifo(&lp->Emac)) { /* fifo direct interrupt driver mode */
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK |
+ XLLF_INT_RC_MASK | XLLF_INT_RXERROR_MASK |
+ XLLF_INT_TXERROR_MASK);
+ } else { /* SG DMA mode */
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+ }
+ /*
+ * Make sure all temac interrupts are disabled. These
+ * interrupts are not data flow releated.
+ */
+ XLlTemac_IntDisable(&lp->Emac, XTE_INT_ALL_MASK);
+
+ /* Start TEMAC device */
+ _XLlTemac_Start(&lp->Emac);
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ u32 threshold_s, timer_s, threshold_r, timer_r;
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold_s, &timer_s);
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold_r, &timer_r);
+ printk(KERN_INFO
+ "%s: XLlTemac: Send Threshold = %d, Receive Threshold = %d\n",
+ dev->name, threshold_s, threshold_r);
+ printk(KERN_INFO
+ "%s: XLlTemac: Send Wait bound = %d, Receive Wait bound = %d\n",
+ dev->name, timer_s, timer_r);
+ if (XLlDma_BdRingStart(&lp->Dma.TxBdRing) == XST_FAILURE) {
+ printk(KERN_ERR "%s: XLlTemac: could not start dma tx channel\n", dev->name);
+ return -EIO;
+ }
+ if (XLlDma_BdRingStart(&lp->Dma.RxBdRing) == XST_FAILURE) {
+ printk(KERN_ERR "%s: XLlTemac: could not start dma rx channel\n", dev->name);
+ return -EIO;
+ }
+ }
+
+ /* We're ready to go. */
+ netif_start_queue(dev);
+
+ /* Set up the PHY monitoring timer. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ lp->phy_timer.data = (unsigned long) dev;
+ lp->phy_timer.function = &poll_gmii;
+ init_timer(&lp->phy_timer);
+ add_timer(&lp->phy_timer);
+ return 0;
+}
+
+static int xenet_close(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags;
+
+ lp = (struct net_local *) netdev_priv(dev);
+
+ /* Shut down the PHY monitoring timer. */
+ del_timer_sync(&lp->phy_timer);
+
+ /* Stop Send queue */
+ netif_stop_queue(dev);
+
+ /* Now we could stop the device */
+ _XLlTemac_Stop(&lp->Emac);
+
+ /*
+ * Free the interrupt - not polled mode.
+ */
+ free_irq(dev->irq, dev);
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ free_irq(lp->dma_irq_s, dev);
+ free_irq(lp->dma_irq_r, dev);
+ } else {
+ free_irq(lp->fifo_irq, dev);
+ }
+
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_del(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ list_del(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *xenet_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+
+ return &lp->stats;
+}
+
+static int descriptor_init(struct net_device *dev);
+static void free_descriptor_skb(struct net_device *dev);
+
+static void xenet_set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ int i;
+ u32 Options;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Stop(&lp->Emac);
+
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ Options &= ~XTE_PROMISC_OPTION;
+ Options &= ~XTE_MULTICAST_OPTION;
+ for (i = 0; i < 4; i++)
+ XLlTemac_MulticastClear(&lp->Emac, i);
+
+ if ((dev->flags & IFF_PROMISC) ||
+ (dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 4)) {
+ Options |= XTE_PROMISC_OPTION;
+ goto done;
+ }
+
+ if (dev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ XLlTemac_MulticastAdd(&lp->Emac, ha->addr, i++);
+ }
+ Options |= XTE_MULTICAST_OPTION;
+ }
+
+done:
+ XLlTemac_SetOptions(&lp->Emac, Options);
+
+ XLlTemac_Start(&lp->Emac);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static int xenet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int result;
+ int device_enable = 0;
+#ifdef CONFIG_XILINX_GIGE_VLAN
+ int head_size = XTE_HDR_VLAN_SIZE;
+#else
+ int head_size = XTE_HDR_SIZE;
+#endif
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ int max_frame = new_mtu + head_size + XTE_TRL_SIZE;
+ int min_frame = 1 + head_size + XTE_TRL_SIZE;
+
+#ifdef CONFIG_PPC
+ if (netif_running(dev)) {
+ printk("STOP device first!!!\n");
+ return -1;
+ }
+#endif
+ if (max_frame < min_frame)
+ return -EINVAL;
+
+ if (max_frame > XTE_MAX_JUMBO_FRAME_SIZE) {
+ printk(KERN_INFO "Wrong MTU packet size. Use %d size\n",
+ XTE_JUMBO_MTU);
+ new_mtu = XTE_JUMBO_MTU;
+ }
+
+ dev->mtu = new_mtu; /* change mtu in net_device structure */
+
+ /* stop driver */
+ if (netif_running(dev)) {
+ device_enable = 1;
+ xenet_close(dev);
+ }
+ /* free all created descriptors for previous size */
+ free_descriptor_skb(dev);
+ /* setup new frame size */
+ lp->frame_size = dev->mtu + XTE_HDR_SIZE + XTE_TRL_SIZE;
+ XLlDma_Initialize(&lp->Dma, lp->virt_dma_addr); /* initialize dma */
+
+ result = descriptor_init(dev); /* create new skb with new size */
+ if (result) {
+ printk(KERN_ERR "Descriptor initialization failed.\n");
+ return -EINVAL;
+ }
+
+ if (device_enable)
+ xenet_open(dev); /* open the device */
+ return 0;
+}
+
+static int xenet_FifoSend(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags, fifo_free_bytes;
+ int total_frags = skb_shinfo(skb)->nr_frags + 1;
+ unsigned int total_len;
+ skb_frag_t *frag;
+ int i;
+ void *virt_addr;
+
+ total_len = skb_headlen(skb);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ total_len += frag->size;
+ }
+
+ /* The following lock is used to protect TxVacancy, Write
+ * and TxSetLen sequence which could happen from FifoSendHandler
+ * or other processor in SMP case.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ lp = (struct net_local *) netdev_priv(dev);
+
+ fifo_free_bytes = XLlFifo_TxVacancy(&lp->Fifo) * 4;
+ if (fifo_free_bytes < total_len) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return 0;
+ }
+
+ /* Write frame data to FIFO */
+ XLlFifo_Write(&lp->Fifo, (void *) skb->data, skb_headlen(skb));
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ virt_addr = skb_frag_address(frag);
+ XLlFifo_Write(&lp->Fifo, virt_addr, frag->size);
+ }
+
+ /* Initiate transmit */
+ XLlFifo_TxSetLen(&lp->Fifo, total_len);
+ lp->stats.tx_bytes += total_len;
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+
+ dev_kfree_skb(skb); /* free skb */
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* Callback function for completed frames sent in FIFO interrupt driven mode */
+static void FifoSendHandler(struct net_device *dev)
+{
+ struct net_local *lp;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ lp = (struct net_local *) netdev_priv(dev);
+ lp->stats.tx_packets++;
+
+ /*Send out the deferred skb and wake up send queue if a deferred skb exists */
+ if (lp->deferred_skb) {
+ int total_frags;
+ unsigned int total_len;
+ unsigned long fifo_free_bytes;
+ skb_frag_t *frag;
+ int i;
+ void *virt_addr;
+
+ skb = lp->deferred_skb;
+ total_frags = skb_shinfo(skb)->nr_frags + 1;
+ total_len = skb_headlen(skb);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ total_len += frag->size;
+ }
+
+ fifo_free_bytes = XLlFifo_TxVacancy(&lp->Fifo) * 4;
+ if (fifo_free_bytes < total_len) {
+ /* If still no room for the deferred packet, return */
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return;
+ }
+
+ /* Write frame data to FIFO */
+ XLlFifo_Write(&lp->Fifo, (void *) skb->data, skb_headlen(skb));
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ virt_addr = skb_frag_address(frag);
+ XLlFifo_Write(&lp->Fifo, virt_addr, frag->size);
+ }
+
+ /* Initiate transmit */
+ XLlFifo_TxSetLen(&lp->Fifo, total_len);
+
+ dev_kfree_skb(skb); /* free skb */
+ lp->deferred_skb = NULL;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += total_len;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+}
+
+#if 0
+/*
+ * These are used for debugging purposes, left here in case they are useful
+ * for further debugging
+ */
+static unsigned int _xenet_tx_csum(struct sk_buff *skb)
+{
+ unsigned int csum = 0;
+ long csstart = skb_transport_header(skb) - skb->data;
+
+ if (csstart != skb->len) {
+ csum = skb_checksum(skb, csstart, skb->len - csstart, 0);
+ }
+
+ return csum;
+}
+
+static inline unsigned int _xenet_rx_csum(struct sk_buff *skb)
+{
+ return skb_checksum(skb, 0, skb->len, 0);
+}
+#endif
+
+/*
+ * xenet_DmaSend_internal is an internal use, send routine.
+ * Any locks that need to be acquired, should be acquired
+ * prior to calling this routine.
+ */
+static int xenet_DmaSend_internal(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ XLlDma_Bd *bd_ptr;
+ int result;
+ int total_frags;
+ int i;
+ void *virt_addr;
+ size_t len;
+ dma_addr_t phy_addr;
+ XLlDma_Bd *first_bd_ptr;
+ XLlDma_Bd *last_bd_ptr;
+ skb_frag_t *frag;
+
+ lp = (struct net_local *) netdev_priv(dev);
+
+ /* get skb_shinfo(skb)->nr_frags + 1 buffer descriptors */
+ total_frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* stats */
+ if (lp->max_frags_in_a_packet < total_frags) {
+ lp->max_frags_in_a_packet = total_frags;
+ }
+
+ if (total_frags < XTE_SEND_BD_CNT) {
+ result = XLlDma_BdRingAlloc(&lp->Dma.TxBdRing, total_frags,
+ &bd_ptr);
+
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ return result;
+ }
+ } else {
+ dev_kfree_skb(skb);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not send TX socket buffers (too many fragments).\n",
+ dev->name);
+ return XST_FAILURE;
+ }
+
+ len = skb_headlen(skb);
+
+ /* get the physical address of the header */
+ phy_addr = (u32) dma_map_single(dev->dev.parent, skb->data, len,
+ DMA_TO_DEVICE);
+
+ /* get the header fragment, it's in the skb differently */
+ XLlDma_mBdSetBufAddr(bd_ptr, phy_addr);
+ XLlDma_mBdSetLength(bd_ptr, len);
+ XLlDma_mBdSetId(bd_ptr, skb);
+
+ /*
+ * if tx checksum offloading is enabled, when the ethernet stack
+ * wants us to perform the checksum in hardware,
+ * skb->ip_summed is CHECKSUM_PARTIAL. Otherwise skb->ip_summed is
+ * CHECKSUM_NONE, meaning the checksum is already done, or
+ * CHECKSUM_UNNECESSARY, meaning checksumming is turned off (e.g.
+ * loopback interface)
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is the offset
+ * into the buffer (skb_transport_header(skb)) to place the csum value.
+ * On receive this feild gets set to the actual csum value, before it's
+ * passed up the stack.
+ *
+ * When we get here, the ethernet stack above will have already
+ * computed the pseudoheader csum value and have placed it in the
+ * TCP/UDP header.
+ *
+ * The IP header csum has also already been computed and inserted.
+ *
+ * Since the IP header with it's own csum should compute to a null
+ * csum, it should be ok to include it in the hw csum. If it is decided
+ * to change this scheme, skb should be examined before dma_map_single()
+ * is called, which flushes the page from the cpu's cache.
+ *
+ * skb->data points to the beginning of the whole packet
+ * skb_transport_header(skb) points to the beginning of the ip header
+ *
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+
+ unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_index_off = csum_start_off + skb->csum_offset;
+
+#if 0
+ {
+ unsigned int csum = _xenet_tx_csum(skb);
+
+ *((unsigned short *) (raw + skb->csum)) =
+ csum_fold(csum);
+ BdCsumDisable(bd_ptr);
+ }
+#else
+ BdCsumEnable(bd_ptr);
+ BdCsumSetup(bd_ptr, csum_start_off, csum_index_off);
+
+#endif
+ lp->tx_hw_csums++;
+ }
+ else {
+ /*
+ * This routine will do no harm even if hardware checksum capability is
+ * off.
+ */
+ BdCsumDisable(bd_ptr);
+ }
+
+ first_bd_ptr = bd_ptr;
+ last_bd_ptr = bd_ptr;
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ for (i = 1; i < total_frags; i++, frag++) {
+ bd_ptr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, bd_ptr);
+ last_bd_ptr = bd_ptr;
+
+ virt_addr = skb_frag_address(frag);
+ phy_addr =
+ (u32) dma_map_single(dev->dev.parent, virt_addr,
+ frag->size, DMA_TO_DEVICE);
+
+ XLlDma_mBdSetBufAddr(bd_ptr, phy_addr);
+ XLlDma_mBdSetLength(bd_ptr, frag->size);
+ XLlDma_mBdSetId(bd_ptr, NULL);
+ BdCsumDisable(bd_ptr);
+ XLlDma_mBdSetStsCtrl(bd_ptr, 0);
+ }
+
+ if (first_bd_ptr == last_bd_ptr) {
+ XLlDma_mBdSetStsCtrl(last_bd_ptr,
+ XLLDMA_BD_STSCTRL_SOP_MASK |
+ XLLDMA_BD_STSCTRL_EOP_MASK);
+ } else {
+ XLlDma_mBdSetStsCtrl(first_bd_ptr, XLLDMA_BD_STSCTRL_SOP_MASK);
+ XLlDma_mBdSetStsCtrl(last_bd_ptr, XLLDMA_BD_STSCTRL_EOP_MASK);
+ }
+
+
+ /* Enqueue to HW */
+ result = XLlDma_BdRingToHw(&lp->Dma.TxBdRing, total_frags,
+ first_bd_ptr);
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ dev_kfree_skb(skb);
+ XLlDma_mBdSetId(first_bd_ptr, NULL);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not send commit TX buffer descriptor (%d).\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+
+ return XST_FAILURE;
+ }
+
+ dev->trans_start = jiffies;
+
+ return XST_SUCCESS;
+}
+
+/* The send function for frames sent in DMA mode */
+static int xenet_DmaSend(struct sk_buff *skb, struct net_device *dev)
+{
+ /* The following spin_lock protects
+ * SgAlloc, SgCommit sequence, which also exists in DmaSendHandlerBH Bottom
+ * Half, or triggered by other processor in SMP case.
+ */
+ spin_lock_bh(&XTE_tx_spinlock);
+
+ xenet_DmaSend_internal(skb, dev);
+
+ spin_unlock_bh(&XTE_tx_spinlock);
+
+ return 0;
+}
+
+
+static void DmaSendHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ unsigned long len;
+ unsigned long flags;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ int result = XST_SUCCESS;
+ unsigned int bd_processed, bd_processed_save;
+
+ while (1) {
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ if (list_empty(&sentQueue)) {
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+ break;
+ }
+
+ lp = list_entry(sentQueue.next, struct net_local, xmit);
+
+ list_del_init(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ dev = lp->ndev;
+ bd_processed_save = 0;
+ while ((bd_processed =
+ XLlDma_BdRingFromHw(&lp->Dma.TxBdRing, XTE_SEND_BD_CNT,
+ &BdPtr)) > 0) {
+
+ bd_processed_save = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ len = XLlDma_mBdGetLength(BdCurPtr);
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdCurPtr);
+ dma_unmap_single(dev->dev.parent, skb_dma_addr,
+ len, DMA_TO_DEVICE);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XLlDma_mBdGetId(BdCurPtr);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ /* reset BD id */
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+
+ lp->stats.tx_bytes += len;
+ if (XLlDma_mBdGetStsCtrl(BdCurPtr) & XLLDMA_BD_STSCTRL_EOP_MASK) {
+ lp->stats.tx_packets++;
+ }
+
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+ result = XLlDma_BdRingFree(&lp->Dma.TxBdRing,
+ bd_processed_save, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: BdRingFree() error %d.\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return;
+ }
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ /* Send out the deferred skb if it exists */
+ if ((lp->deferred_skb) && bd_processed_save) {
+ skb = lp->deferred_skb;
+ lp->deferred_skb = NULL;
+
+ result = xenet_DmaSend_internal(skb, dev);
+ }
+
+ if (result == XST_SUCCESS) {
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ }
+}
+
+static void xenet_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags;
+
+ /*
+ * Make sure that no interrupts come in that could cause reentrancy
+ * problems in reset.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+
+ lp = (struct net_local *) netdev_priv(dev);
+ printk(KERN_ERR
+ "%s: XLlTemac: exceeded transmit timeout of %lu ms. Resetting emac.\n",
+ dev->name, TX_TIMEOUT * 1000UL / HZ);
+ lp->stats.tx_errors++;
+
+ reset(dev, __LINE__);
+
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+}
+
+/* The callback function for frames received when in FIFO mode. */
+static void FifoRecvHandler(unsigned long p)
+{
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len;
+
+ struct net_device *dev;
+ unsigned long flags;
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ return;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ dev = lp->ndev;
+
+ while (XLlFifo_RxOccupancy(&lp->Fifo) != 0) {
+
+ len = XLlFifo_RxGetLen(&lp->Fifo);
+
+ /*
+ * TODO: Hm this is odd, if we can't allocate the skb, we throw away the next packet. Why?
+ */
+ if (!(skb = /*dev_ */ alloc_skb(len + ALIGNMENT_RECV, GFP_ATOMIC))) {
+#define XTE_RX_SINK_BUFFER_SIZE 1024
+ static u32 rx_buffer_sink[XTE_RX_SINK_BUFFER_SIZE / sizeof(u32)];
+
+ /* Couldn't get memory. */
+ lp->stats.rx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate receive buffer.\n",
+ dev->name);
+
+ /* consume data in Xilinx TEMAC RX data fifo so it is sync with RX length fifo */
+ for (; len > XTE_RX_SINK_BUFFER_SIZE;
+ len -= XTE_RX_SINK_BUFFER_SIZE) {
+ XLlFifo_Read(&lp->Fifo, rx_buffer_sink,
+ XTE_RX_SINK_BUFFER_SIZE);
+ }
+ XLlFifo_Read(&lp->Fifo, rx_buffer_sink, len);
+ break;
+ }
+
+ /* Read the packet data */
+ XLlFifo_Read(&lp->Fifo, skb->data, len);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev; /* Fill out required meta-data. */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb); /* Send the packet upstream. */
+ }
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK | XLLF_INT_RC_MASK |
+ XLLF_INT_RXERROR_MASK | XLLF_INT_TXERROR_MASK);
+
+}
+
+
+/*
+ * _xenet_DmaSetupRecvBuffers allocates as many socket buffers (sk_buff's) as it
+ * can up to the number of free RX buffer descriptors. Then it sets up the RX
+ * buffer descriptors to DMA into the socket_buffers.
+ *
+ * The net_device, dev, indcates on which device to operate for buffer
+ * descriptor allocation.
+ */
+static void _xenet_DmaSetupRecvBuffers(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+
+ int free_bd_count = XLlDma_mBdRingGetFreeCnt(&lp->Dma.RxBdRing);
+ int num_sk_buffs;
+ struct sk_buff_head sk_buff_list;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ u32 align;
+ int result;
+
+#if 0
+ int align_max = ALIGNMENT_RECV;
+#else
+ int align_max = 0;
+#endif
+
+
+ skb_queue_head_init(&sk_buff_list);
+ for (num_sk_buffs = 0; num_sk_buffs < free_bd_count; num_sk_buffs++) {
+ new_skb = alloc_skb(lp->frame_size + align_max, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ break;
+ }
+ /*
+ * I think the XTE_spinlock, and Recv DMA int disabled will protect this
+ * list as well, so we can use the __ version just fine
+ */
+ __skb_queue_tail(&sk_buff_list, new_skb);
+ }
+ if (!num_sk_buffs) {
+ printk(KERN_ERR "%s: XLlTemac: alloc_skb unsuccessful\n",
+ dev->name);
+ return;
+ }
+
+ /* now we got a bunch o' sk_buffs */
+ result = XLlDma_BdRingAlloc(&lp->Dma.RxBdRing, num_sk_buffs, &BdPtr);
+ if (result != XST_SUCCESS) {
+ /* we really shouldn't get this */
+ skb_queue_purge(&sk_buff_list);
+ printk(KERN_ERR "%s: XLlDma: BdRingAlloc unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ return;
+ }
+
+ BdCurPtr = BdPtr;
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ while (new_skb) {
+ /* make sure we're long-word aligned */
+ align = BUFFER_ALIGNRECV(new_skb->data);
+ if (align) {
+ skb_reserve(new_skb, align);
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(dev->dev.parent,
+ new_skb->data, lp->frame_size,
+ DMA_FROM_DEVICE);
+ XLlDma_mBdSetBufAddr(BdCurPtr, new_skb_baddr);
+ XLlDma_mBdSetLength(BdCurPtr, lp->frame_size);
+ XLlDma_mBdSetId(BdCurPtr, new_skb);
+ XLlDma_mBdSetStsCtrl(BdCurPtr,
+ XLLDMA_BD_STSCTRL_SOP_MASK |
+ XLLDMA_BD_STSCTRL_EOP_MASK);
+
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing, BdCurPtr);
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ }
+
+ /* enqueue RxBD with the attached skb buffers such that it is
+ * ready for frame reception */
+ result = XLlDma_BdRingToHw(&lp->Dma.RxBdRing, num_sk_buffs, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: (DmaSetupRecvBuffers) BdRingToHw unsuccessful (%d)\n",
+ dev->name, result);
+ skb_queue_purge(&sk_buff_list);
+ BdCurPtr = BdPtr;
+ while (num_sk_buffs > 0) {
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing,
+ BdCurPtr);
+ num_sk_buffs--;
+ }
+ reset(dev, __LINE__);
+ return;
+ }
+}
+
+static void DmaRecvHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len, skb_baddr;
+ int result;
+ unsigned long flags;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ unsigned int bd_processed, bd_processed_saved;
+
+ while (1) {
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ break;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ dev = lp->ndev;
+
+ spin_lock_irqsave(&XTE_rx_spinlock, flags);
+ if ((bd_processed =
+ XLlDma_BdRingFromHw(&lp->Dma.RxBdRing, XTE_RECV_BD_CNT, &BdPtr)) > 0) {
+
+ bd_processed_saved = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ /*
+ * Regular length field not updated on rx,
+ * USR4 updated instead.
+ */
+ len = BdGetRxLen(BdCurPtr);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XLlDma_mBdGetId(BdCurPtr);
+
+ /* get and free up dma handle used by skb->data */
+ skb_baddr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdCurPtr);
+ dma_unmap_single(dev->dev.parent, skb_baddr,
+ lp->frame_size,
+ DMA_FROM_DEVICE);
+
+ /* reset ID */
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+
+ /* setup received skb and send it upstream */
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev;
+
+ /* this routine adjusts skb->data to skip the header */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* default the ip_summed value */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->local_features & LOCAL_FEATURE_RX_CSUM) != 0) &&
+ (skb->protocol == __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+ unsigned int csum;
+
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is
+ * the offset into the buffer (skb_transport_header(skb))
+ * to place the csum value. On receive this feild gets set
+ * to the actual csum value, before it's passed up the stack.
+ *
+ * If we set skb->ip_summed to CHECKSUM_COMPLETE, the ethernet
+ * stack above will compute the pseudoheader csum value and
+ * add it to the partial checksum already computed (to be
+ * placed in skb->csum) and verify it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_NONE means that the
+ * cheksum didn't verify and the stack will (re)check it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_UNNECESSARY means
+ * that the cheksum was verified/assumed to be good and the
+ * stack does not need to (re)check it.
+ *
+ * The ethernet stack above will (re)compute the checksum
+ * under the following conditions:
+ * 1) skb->ip_summed was set to CHECKSUM_NONE
+ * 2) skb->len does not match the length of the ethernet
+ * packet determined by parsing the packet. In this case
+ * the ethernet stack will assume any prior checksum
+ * value was miscomputed and throw it away.
+ * 3) skb->ip_summed was set to CHECKSUM_COMPLETE, skb->csum was
+ * set, but the result does not check out ok by the
+ * ethernet stack.
+ *
+ * If the TEMAC hardware stripping feature is off, each
+ * packet will contain an FCS feild which will have been
+ * computed by the hardware checksum operation. This 4 byte
+ * FCS value needs to be subtracted back out of the checksum
+ * value computed by hardware as it's not included in a
+ * normal ethernet packet checksum.
+ *
+ * The minimum transfer packet size over the wire is 64
+ * bytes. If the packet is sent as exactly 64 bytes, then
+ * it probably contains some random padding bytes. It's
+ * somewhat difficult to determine the actual length of the
+ * real packet data, so we just let the stack recheck the
+ * checksum for us.
+ *
+ * After the call to eth_type_trans(), the following holds
+ * true:
+ * skb->data points to the beginning of the ip header
+ */
+ csum = BdCsumGet(BdCurPtr);
+
+#if ! XTE_AUTOSTRIPPING
+ if (!lp->stripping) {
+ /* take off the FCS */
+ u16 *data;
+
+ /* FCS is 4 bytes */
+ skb_put(skb, -4);
+
+ data = (u16 *) (&skb->
+ data[skb->len]);
+
+ /* subtract out the FCS from the csum value */
+ csum = csum_sub(csum, *data /* & 0xffff */);
+ data++;
+ csum = csum_sub(csum, *data /* & 0xffff */);
+ }
+#endif
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ lp->rx_hw_csums++;
+ }
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ netif_rx(skb); /* Send the packet upstream. */
+
+ BdCurPtr =
+ XLlDma_mBdRingNext(&lp->Dma.RxBdRing,
+ BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+ /* give the descriptor back to the driver */
+ result = XLlDma_BdRingFree(&lp->Dma.RxBdRing,
+ bd_processed_saved, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: BdRingFree unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock_irqrestore(&XTE_rx_spinlock, flags);
+ return;
+ }
+
+ _xenet_DmaSetupRecvBuffers(dev);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ spin_unlock_irqrestore(&XTE_rx_spinlock, flags);
+ }
+}
+
+static int descriptor_init(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ int recvsize, sendsize;
+ int dftsize;
+ u32 *recvpoolptr, *sendpoolptr;
+ void *recvpoolphy, *sendpoolphy;
+ int result;
+
+/*
+ * Buffer Descriptr
+ * word byte description
+ * 0 0h next ptr
+ * 1 4h buffer addr
+ * 2 8h buffer len
+ * 3 ch sts/ctrl | app data (0) [tx csum enable (bit 31 LSB)]
+ * 4 10h app data (1) [tx csum begin (bits 0-15 MSB) | csum insert (bits 16-31 LSB)]
+ * 5 14h app data (2) [tx csum seed (bits 16-31 LSB)]
+ * 6 18h app data (3) [rx raw csum (bits 16-31 LSB)]
+ * 7 1ch app data (4) [rx recv length (bits 18-31 LSB)]
+ */
+#if 0
+ int XferType = XDMAV3_DMACR_TYPE_BFBURST_MASK;
+ int XferWidth = XDMAV3_DMACR_DSIZE_64_MASK;
+#endif
+
+ /* calc size of descriptor space pool; alloc from non-cached memory */
+ dftsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD,
+ XTE_RECV_BD_CNT + XTE_SEND_BD_CNT);
+ printk(KERN_INFO "XLlTemac: buffer descriptor size: %d (0x%0x)\n",
+ dftsize, dftsize);
+
+#if BD_IN_BRAM == 0
+ /*
+ * Allow buffer descriptors to be cached.
+ * Old method w/cache on buffer descriptors disabled:
+ * lp->desc_space = dma_alloc_coherent(NULL, dftsize,
+ * &lp->desc_space_handle, GFP_KERNEL);
+ * (note if going back to dma_alloc_coherent() the CACHE macros in
+ * xenv_linux.h need to be disabled.
+ */
+
+ printk(KERN_INFO "XLlTemac: Allocating DMA descriptors with kmalloc");
+ lp->desc_space = kmalloc(dftsize, GFP_KERNEL);
+ lp->desc_space_handle = (dma_addr_t) page_to_phys(virt_to_page(lp->desc_space));
+#else
+ printk(KERN_INFO "XLlTemac: Allocating DMA descriptors in Block Ram");
+ lp->desc_space_handle = BRAM_BASEADDR;
+ lp->desc_space = ioremap(lp->desc_space_handle, dftsize);
+#endif
+ if (lp->desc_space == NULL)
+ return -1;
+
+ lp->desc_space_size = dftsize;
+
+ printk(KERN_INFO
+ "XLlTemac: (buffer_descriptor_init) phy: 0x%x, virt: 0x%x, size: 0x%x\n",
+ lp->desc_space_handle, (unsigned int) lp->desc_space,
+ lp->desc_space_size);
+
+ /* calc size of send and recv descriptor space */
+ recvsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD, XTE_RECV_BD_CNT);
+ sendsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD, XTE_SEND_BD_CNT);
+
+ recvpoolptr = lp->desc_space;
+ sendpoolptr = (void *) ((u32) lp->desc_space + recvsize);
+
+ recvpoolphy = (void *) lp->desc_space_handle;
+ sendpoolphy = (void *) ((u32) lp->desc_space_handle + recvsize);
+
+ result = XLlDma_BdRingCreate(&lp->Dma.RxBdRing, (u32) recvpoolphy,
+ (u32) recvpoolptr, ALIGNMENT_BD,
+ XTE_RECV_BD_CNT);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR "XLlTemac: DMA Ring Create (RECV). Error: %d\n", result);
+ return -EIO;
+ }
+
+ result = XLlDma_BdRingCreate(&lp->Dma.TxBdRing, (u32) sendpoolphy,
+ (u32) sendpoolptr, ALIGNMENT_BD,
+ XTE_SEND_BD_CNT);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR "XLlTemac: DMA Ring Create (SEND). Error: %d\n", result);
+ return -EIO;
+ }
+
+ _xenet_DmaSetupRecvBuffers(dev);
+ return 0;
+}
+
+static void free_descriptor_skb(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ XLlDma_Bd *BdPtr;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ u32 len, i;
+
+ /* Unmap and free skb's allocated and mapped in descriptor_init() */
+
+ /* Get the virtual address of the 1st BD in the DMA RX BD ring */
+ BdPtr = (XLlDma_Bd *) lp->Dma.RxBdRing.FirstBdAddr;
+
+ for (i = 0; i < XTE_RECV_BD_CNT; i++) {
+ skb = (struct sk_buff *) XLlDma_mBdGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdPtr);
+ dma_unmap_single(dev->dev.parent, skb_dma_addr,
+ lp->frame_size, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA RX BD ring */
+ BdPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing, BdPtr);
+ }
+
+ /* Unmap and free TX skb's that have not had a chance to be freed
+ * in DmaSendHandlerBH(). This could happen when TX Threshold is larger
+ * than 1 and TX waitbound is 0
+ */
+
+ /* Get the virtual address of the 1st BD in the DMA TX BD ring */
+ BdPtr = (XLlDma_Bd *) lp->Dma.TxBdRing.FirstBdAddr;
+
+ for (i = 0; i < XTE_SEND_BD_CNT; i++) {
+ skb = (struct sk_buff *) XLlDma_mBdGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdPtr);
+ len = XLlDma_mBdGetLength(BdPtr);
+ dma_unmap_single(dev->dev.parent, skb_dma_addr, len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA TX BD ring */
+ BdPtr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, BdPtr);
+ }
+
+#if BD_IN_BRAM == 0
+ kfree(lp->desc_space);
+/* this is old approach which was removed */
+/* dma_free_coherent(NULL,
+ lp->desc_space_size,
+ lp->desc_space, lp->desc_space_handle); */
+#else
+ iounmap(lp->desc_space);
+#endif
+}
+
+static int
+xenet_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ u32 mac_options;
+ u32 threshold, timer;
+ u16 gmii_cmd, gmii_status, gmii_advControl;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+
+ mac_options = XLlTemac_GetOptions(&(lp->Emac));
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, &gmii_cmd);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &gmii_status);
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_ADVERTISE, &gmii_advControl);
+
+ ecmd->duplex = DUPLEX_FULL;
+
+ ecmd->supported |= SUPPORTED_MII;
+
+ ecmd->port = PORT_MII;
+
+ ecmd->speed = lp->cur_speed;
+
+ if (gmii_status & BMSR_ANEGCAPABLE) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ }
+ if (gmii_status & BMSR_ANEGCOMPLETE) {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ }
+ else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+ ecmd->phy_address = lp->Emac.Config.BaseAddress;
+ ecmd->transceiver = XCVR_INTERNAL;
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ /* get TX threshold */
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold, &timer);
+ ecmd->maxtxpkt = threshold;
+
+ /* get RX threshold */
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold, &timer);
+ ecmd->maxrxpkt = threshold;
+ }
+
+ ecmd->supported |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
+
+ return 0;
+}
+
+static int
+xenet_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+
+ if ((ecmd->duplex != DUPLEX_FULL) ||
+ (ecmd->transceiver != XCVR_INTERNAL) ||
+ (ecmd->phy_address &&
+ (ecmd->phy_address != lp->Emac.Config.BaseAddress))) {
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->speed != 1000) && (ecmd->speed != 100) &&
+ (ecmd->speed != 10)) {
+ printk(KERN_ERR
+ "%s: XLlTemac: xenet_ethtool_set_settings speed not supported: %d\n",
+ dev->name, ecmd->speed);
+ return -EOPNOTSUPP;
+ }
+
+ if (ecmd->speed != lp->cur_speed) {
+ renegotiate_speed(dev, ecmd->speed, FULL_DUPLEX);
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, ecmd->speed);
+ lp->cur_speed = ecmd->speed;
+ }
+ return 0;
+}
+
+static int
+xenet_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ u32 threshold, waitbound;
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ if (!(XLlTemac_IsDma(&lp->Emac))) {
+ return -EIO;
+ }
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold, &waitbound);
+ ec->rx_max_coalesced_frames = threshold;
+ ec->rx_coalesce_usecs = waitbound;
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold, &waitbound);
+ ec->tx_max_coalesced_frames = threshold;
+ ec->tx_coalesce_usecs = waitbound;
+
+ return 0;
+}
+
+#if 0
+static void disp_bd_ring(XLlDma_BdRing *bd_ring)
+{
+ int num_bds = bd_ring->AllCnt;
+ u32 *cur_bd_ptr = (u32 *) bd_ring->FirstBdAddr;
+ int idx;
+
+ printk("ChanBase: %p\n", (void *) bd_ring->ChanBase);
+ printk("FirstBdPhysAddr: %p\n", (void *) bd_ring->FirstBdPhysAddr);
+ printk("FirstBdAddr: %p\n", (void *) bd_ring->FirstBdAddr);
+ printk("LastBdAddr: %p\n", (void *) bd_ring->LastBdAddr);
+ printk("Length: %d (0x%0x)\n", bd_ring->Length, bd_ring->Length);
+ printk("RunState: %d (0x%0x)\n", bd_ring->RunState, bd_ring->RunState);
+ printk("Separation: %d (0x%0x)\n", bd_ring->Separation,
+ bd_ring->Separation);
+ printk("BD Count: %d\n", bd_ring->AllCnt);
+
+ printk("\n");
+
+ printk("FreeHead: %p\n", (void *) bd_ring->FreeHead);
+ printk("PreHead: %p\n", (void *) bd_ring->PreHead);
+ printk("HwHead: %p\n", (void *) bd_ring->HwHead);
+ printk("HwTail: %p\n", (void *) bd_ring->HwTail);
+ printk("PostHead: %p\n", (void *) bd_ring->PostHead);
+ printk("BdaRestart: %p\n", (void *) bd_ring->BdaRestart);
+
+ printk("Ring Contents:\n");
+/*
+ * Buffer Descriptr
+ * word byte description
+ * 0 0h next ptr
+ * 1 4h buffer addr
+ * 2 8h buffer len
+ * 3 ch sts/ctrl | app data (0) [tx csum enable (bit 31 LSB)]
+ * 4 10h app data (1) [tx csum begin (bits 0-15 MSB) | csum insert (bits 16-31 LSB)]
+ * 5 14h app data (2) [tx csum seed (bits 16-31 LSB)]
+ * 6 18h app data (3) [rx raw csum (bits 16-31 LSB)]
+ * 7 1ch app data (4) [rx recv length (bits 18-31 LSB)]
+ * 8 20h sw app data (0) [id]
+ */
+ printk("Idx NextBD BuffAddr Length CTL/CSE CSUM B/I CSUMSeed Raw CSUM RecvLen ID\n");
+ printk("--- -------- -------- -------- -------- -------- -------- -------- -------- --------\n");
+
+ for (idx = 0; idx < num_bds; idx++) {
+ printk("%3d %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ idx,
+ cur_bd_ptr[XLLDMA_BD_NDESC_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_BUFA_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_BUFL_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_STSCTRL_USR0_OFFSET /
+ sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR1_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR2_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR3_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR4_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_ID_OFFSET / sizeof(*cur_bd_ptr)]);
+
+ cur_bd_ptr += bd_ring->Separation / sizeof(int);
+ }
+ printk("--------------------------------------- Done ---------------------------------------\n");
+}
+#endif
+
+static int
+xenet_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp;
+
+ lp = (struct net_local *) netdev_priv(dev);
+
+ if (!(XLlTemac_IsDma(&lp->Emac))) {
+ return -EIO;
+ }
+
+ if (ec->rx_coalesce_usecs == 0) {
+ ec->rx_coalesce_usecs = 1;
+ dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing,
+ (u16) (ec->rx_max_coalesced_frames),
+ (u16) (ec->rx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlDma: BdRingSetCoalesce error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+
+ if (ec->tx_coalesce_usecs == 0) {
+ ec->tx_coalesce_usecs = 1;
+ dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing,
+ (u16) (ec->tx_max_coalesced_frames),
+ (u16) (ec->tx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlDma: BdRingSetCoalesce error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ return 0;
+}
+
+static void
+xenet_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+
+ erp->rx_max_pending = XTE_RECV_BD_CNT;
+ erp->tx_max_pending = XTE_SEND_BD_CNT;
+ erp->rx_pending = XTE_RECV_BD_CNT;
+ erp->tx_pending = XTE_SEND_BD_CNT;
+}
+
+static void
+xenet_ethtool_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epp)
+{
+ u32 Options;
+ u16 gmii_status;
+ struct net_local *lp = netdev_priv(dev);
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &gmii_status);
+
+ /* I suspect that the expected value is that autonegotiation is
+ * enabled, not completed.
+ * As seen in xenet_do_ethtool_ioctl() */
+ if (gmii_status & BMSR_ANEGCOMPLETE) {
+ epp->autoneg = AUTONEG_ENABLE;
+ }
+ else {
+ epp->autoneg = AUTONEG_DISABLE;
+ }
+
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ epp->rx_pause = 1;
+ epp->tx_pause = 1;
+ }
+ else {
+ epp->rx_pause = 0;
+ epp->tx_pause = 0;
+ }
+}
+
+#if 0
+static u32
+xenet_ethtool_get_rx_csum(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ u32 retval;
+
+ retval = (lp->local_features & LOCAL_FEATURE_RX_CSUM) != 0;
+
+ return retval;
+}
+
+static int
+xenet_ethtool_set_rx_csum(struct net_device *dev, u32 onoff)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (onoff) {
+ if (XLlTemac_IsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |=
+ LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+ else {
+ lp->local_features &= ~LOCAL_FEATURE_RX_CSUM;
+ }
+
+ return 0;
+}
+
+static u32
+xenet_ethtool_get_tx_csum(struct net_device *dev)
+{
+ u32 retval;
+
+ retval = (dev->features & NETIF_F_IP_CSUM) != 0;
+ return retval;
+}
+
+static int
+xenet_ethtool_set_tx_csum(struct net_device *dev, u32 onoff)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (onoff) {
+ if (XLlTemac_IsTxCsum(&lp->Emac) == TRUE) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ }
+ else {
+ dev->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ return 0;
+}
+
+static u32
+xenet_ethtool_get_sg(struct net_device *dev)
+{
+ u32 retval;
+
+ retval = (dev->features & NETIF_F_SG) != 0;
+
+ return retval;
+}
+
+static int
+xenet_ethtool_set_sg(struct net_device *dev, u32 onoff)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (onoff) {
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ dev->features |=
+ NETIF_F_SG | NETIF_F_FRAGLIST;
+ }
+ }
+ else {
+ dev->features &=
+ ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ }
+
+ return 0;
+}
+#endif
+static void
+xenet_ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *strings)
+{
+ *strings = 0;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(strings,
+ &xenet_ethtool_gstrings_stats,
+ sizeof(xenet_ethtool_gstrings_stats));
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+xenet_ethtool_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ data[0] = lp->stats.tx_packets;
+ data[1] = lp->stats.tx_dropped;
+ data[2] = lp->stats.tx_errors;
+ data[3] = lp->stats.tx_fifo_errors;
+ data[4] = lp->stats.rx_packets;
+ data[5] = lp->stats.rx_dropped;
+ data[6] = lp->stats.rx_errors;
+ data[7] = lp->stats.rx_fifo_errors;
+ data[8] = lp->stats.rx_crc_errors;
+ data[9] = lp->max_frags_in_a_packet;
+ data[10] = lp->tx_hw_csums;
+ data[11] = lp->rx_hw_csums;
+}
+
+static int
+xenet_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return XENET_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+
+#define EMAC_REGS_N 32
+struct mac_regsDump {
+ struct ethtool_regs hd;
+ u16 data[EMAC_REGS_N];
+};
+
+static int
+xenet_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (sizeof(u16) * EMAC_REGS_N);
+}
+
+static void
+xenet_ethtool_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ret)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct mac_regsDump *dump = (struct mac_regsDump *) regs;
+ int i;
+
+ dump->hd.version = 0;
+ dump->hd.len = sizeof(dump->data);
+ memset(dump->data, 0, sizeof(dump->data));
+
+ for (i = 0; i < EMAC_REGS_N; i++) {
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, i, &(dump->data[i]));
+ }
+
+ *(int *) ret = 0;
+}
+
+static void
+xenet_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *ed)
+{
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strncpy(ed->driver, DRIVER_NAME, sizeof(ed->driver) - 1);
+ strncpy(ed->version, DRIVER_VERSION, sizeof(ed->version) - 1);
+ /* Also tell how much memory is needed for dumping register values */
+ ed->regdump_len = sizeof(u16) * EMAC_REGS_N;
+ ed->n_stats = XENET_STATS_LEN;
+}
+
+/*
+ * xenet_do_ethtool_ioctl:
+ * DEPRECATED
+ */
+static int xenet_do_ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct ethtool_cmd ecmd;
+ struct ethtool_coalesce eco;
+ struct ethtool_drvinfo edrv;
+ struct ethtool_ringparam erp;
+ struct ethtool_pauseparam epp;
+ struct mac_regsDump regs;
+ int ret = -EOPNOTSUPP;
+ u32 Options;
+
+ if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
+ return -EFAULT;
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET: /* Get setting. No command option needed w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return -EIO;
+ if (copy_to_user(rq->ifr_data, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SSET: /* Change setting. Use "-s" command option w/ ethtool */
+ ret = xenet_ethtool_set_settings(dev, &ecmd);
+ break;
+ case ETHTOOL_GPAUSEPARAM: /* Get pause parameter information. Use "-a" w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return ret;
+ epp.cmd = ecmd.cmd;
+ epp.autoneg = ecmd.autoneg;
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ epp.rx_pause = 1;
+ epp.tx_pause = 1;
+ }
+ else {
+ epp.rx_pause = 0;
+ epp.tx_pause = 0;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &epp, sizeof(struct ethtool_pauseparam)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SPAUSEPARAM: /* Set pause parameter. Use "-A" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GRXCSUM:{ /* Get rx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ edata.data =
+ (lp->local_features & LOCAL_FEATURE_RX_CSUM) !=
+ 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SRXCSUM:{ /* Set rx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |=
+ LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+ else {
+ lp->local_features &= ~LOCAL_FEATURE_RX_CSUM;
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GTXCSUM:{ /* Get tx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ edata.data = (dev->features & NETIF_F_IP_CSUM) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_STXCSUM:{ /* Set tx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsTxCsum(&lp->Emac) == TRUE) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ }
+ else {
+ dev->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSG:{ /* Get ScatterGather info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ edata.data = (dev->features & NETIF_F_SG) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SSG:{ /* Set ScatterGather info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ dev->features |=
+ NETIF_F_SG | NETIF_F_FRAGLIST;
+ }
+ }
+ else {
+ dev->features &=
+ ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GCOALESCE: /* Get coalescing info. Use "-c" w/ ethtool */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ break;
+ eco.cmd = ecmd.cmd;
+ ret = xenet_ethtool_get_coalesce(dev, &eco);
+ if (ret < 0) {
+ return -EIO;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &eco, sizeof(struct ethtool_coalesce))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_SCOALESCE: /* Set coalescing info. Use "-C" w/ ethtool */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ break;
+ if (copy_from_user
+ (&eco, rq->ifr_data, sizeof(struct ethtool_coalesce)))
+ return -EFAULT;
+ ret = xenet_ethtool_set_coalesce(dev, &eco);
+ break;
+ case ETHTOOL_GDRVINFO: /* Get driver information. Use "-i" w/ ethtool */
+ edrv.cmd = edrv.cmd;
+ xenet_ethtool_get_drvinfo(dev, &edrv);
+ edrv.n_stats = XENET_STATS_LEN;
+ if (copy_to_user
+ (rq->ifr_data, &edrv, sizeof(struct ethtool_drvinfo))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GREGS: /* Get register values. Use "-d" with ethtool */
+ regs.hd.cmd = edrv.cmd;
+ xenet_ethtool_get_regs(dev, &(regs.hd), &ret);
+ if (ret < 0) {
+ return ret;
+ }
+ if (copy_to_user
+ (rq->ifr_data, ®s, sizeof(struct mac_regsDump))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GRINGPARAM: /* Get RX/TX ring parameters. Use "-g" w/ ethtool */
+ erp.cmd = edrv.cmd;
+ xenet_ethtool_get_ringparam(dev, &(erp));
+ if (copy_to_user
+ (rq->ifr_data, &erp, sizeof(struct ethtool_ringparam))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_NWAY_RST: /* Restart auto negotiation if enabled. Use "-r" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GSTRINGS:{
+ struct ethtool_gstrings gstrings = { ETHTOOL_GSTRINGS };
+ void __user *addr = rq->ifr_data;
+ char *strings = NULL;
+
+ if (copy_from_user(&gstrings, addr, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ switch (gstrings.string_set) {
+ case ETH_SS_STATS:
+ gstrings.len = XENET_STATS_LEN;
+ strings = *xenet_ethtool_gstrings_stats;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (copy_to_user(addr, &gstrings, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ addr += offsetof(struct ethtool_gstrings, data);
+ if (copy_to_user
+ (addr, strings, gstrings.len * ETH_GSTRING_LEN)) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSTATS:{
+ struct {
+ struct ethtool_stats cmd;
+ uint64_t data[XENET_STATS_LEN];
+ } stats = { {
+ ETHTOOL_GSTATS, XENET_STATS_LEN}};
+
+ stats.data[0] = lp->stats.tx_packets;
+ stats.data[1] = lp->stats.tx_dropped;
+ stats.data[2] = lp->stats.tx_errors;
+ stats.data[3] = lp->stats.tx_fifo_errors;
+ stats.data[4] = lp->stats.rx_packets;
+ stats.data[5] = lp->stats.rx_dropped;
+ stats.data[6] = lp->stats.rx_errors;
+ stats.data[7] = lp->stats.rx_fifo_errors;
+ stats.data[8] = lp->stats.rx_crc_errors;
+ stats.data[9] = lp->max_frags_in_a_packet;
+ stats.data[10] = lp->tx_hw_csums;
+ stats.data[11] = lp->rx_hw_csums;
+
+ if (copy_to_user(rq->ifr_data, &stats, sizeof(stats))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP; /* All other operations not supported */
+ }
+ return ret;
+}
+
+static int xenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+
+ /* gmii_ioctl_data has 4 u16 fields: phy_id, reg_num, val_in & val_out */
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+ struct {
+ __u16 threshold;
+ __u32 direction;
+ } thr_arg;
+ struct {
+ __u16 waitbound;
+ __u32 direction;
+ } wbnd_arg;
+
+ int ret;
+ u32 threshold, timer;
+ XLlDma_BdRing *RingPtr;
+ u32 *dma_int_mask_ptr;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ /* DEPRECATED */
+ return xenet_do_ethtool_ioctl(dev, rq);
+ case SIOCGMIIPHY: /* Get address of GMII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = lp->gmii_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read GMII PHY register. */
+ case SIOCDEVPRIVATE + 1: /* for binary compat, remove in 2.5 */
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+
+ _XLlTemac_PhyRead(&lp->Emac, data->phy_id, data->reg_num,
+ &data->val_out);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ return 0;
+
+ case SIOCSMIIREG: /* Write GMII PHY register. */
+ case SIOCDEVPRIVATE + 2: /* for binary compat, remove in 2.5 */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+
+ _XLlTemac_PhyWrite(&lp->Emac, data->phy_id, data->reg_num,
+ data->val_in);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ return 0;
+
+ case SIOCDEVPRIVATE + 3: /* set THRESHOLD */
+ if (XLlTemac_IsFifo(&lp->Emac))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold, &timer);
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(RingPtr, thr_arg.threshold,
+ timer)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 4: /* set WAITBOUND */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg)))
+ return -EFAULT;
+
+ if (wbnd_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold, &timer);
+ if (wbnd_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ dma_int_mask_ptr = &dma_tx_int_mask;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ dma_int_mask_ptr = &dma_rx_int_mask;
+ }
+ if (wbnd_arg.waitbound == 0) {
+ wbnd_arg.waitbound = 1;
+ *dma_int_mask_ptr = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(RingPtr, threshold,
+ wbnd_arg.waitbound)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(RingPtr, *dma_int_mask_ptr);
+
+ return 0;
+
+ case SIOCDEVPRIVATE + 5: /* get THRESHOLD */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr,
+ (u32 *) &(thr_arg.threshold), &timer);
+ if (copy_to_user(rq->ifr_data, &thr_arg, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 6: /* get WAITBOUND */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold,
+ (u32 *) &(wbnd_arg.waitbound));
+ if (copy_to_user(rq->ifr_data, &wbnd_arg, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/******************************************************************************
+ *
+ * NEW FUNCTIONS FROM LINUX 2.6
+ *
+ ******************************************************************************/
+
+static void xtenet_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (XLlTemac_IsDma(&lp->Emac) && (lp->desc_space))
+ free_descriptor_skb(ndev);
+
+ iounmap((__force void __iomem *) (lp->Emac.Config.BaseAddress));
+ free_netdev(ndev);
+ }
+}
+
+static int xtenet_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ unregister_netdev(ndev);
+ xtenet_remove_ndev(ndev);
+
+ return 0; /* success */
+}
+
+/* Detect the PHY address by scanning addresses 0 to 31 and
+ * looking at the MII status register (register 1) and assuming
+ * the PHY supports 10Mbps full/half duplex. Feel free to change
+ * this code to match your PHY, or hardcode the address if needed.
+ */
+/* Use MII register 1 (MII status register) to detect PHY */
+#define PHY_DETECT_REG 1
+
+#ifdef CONFIG_XILINX_LLTEMAC_XILINX_1000BASEX
+/* Mask used to verify certain PHY features (or register contents)
+ * in the register above:
+ * 0x0100: Extended register support
+ * 0x0180: Unidirectional support
+ * 0x0040: MF Preamble suppression support
+ * 0x0008: Auto-negotiation support
+ */
+#define PHY_DETECT_MASK 0x01C8
+#else
+/* Mask used to verify certain PHY features (or register contents)
+ * in the register above:
+ * 0x1000: 10Mbps full duplex support
+ * 0x0800: 10Mbps half duplex support
+ * 0x0008: Auto-negotiation support
+ */
+#define PHY_DETECT_MASK 0x1808
+#endif
+
+static int detect_phy(struct net_local *lp, char *dev_name)
+{
+ u16 phy_reg;
+ u32 phy_addr;
+
+ for (phy_addr = 31; phy_addr > 0; phy_addr--) {
+ _XLlTemac_PhyRead(&lp->Emac, phy_addr, PHY_DETECT_REG, &phy_reg);
+
+ if ((phy_reg != 0xFFFF) &&
+ ((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
+ /* Found a valid PHY address */
+ printk(KERN_INFO "XTemac: PHY detected at address %d.\n", phy_addr);
+ return phy_addr;
+ }
+ }
+
+ printk(KERN_WARNING "XTemac: No PHY detected. Assuming a PHY at address 0\n");
+ return 0; /* default to zero */
+}
+
+static struct net_device_ops xilinx_netdev_ops;
+
+/* From include/linux/ethtool.h */
+static struct ethtool_ops ethtool_ops = {
+ .get_settings = xenet_ethtool_get_settings,
+ .set_settings = xenet_ethtool_set_settings,
+ .get_drvinfo = xenet_ethtool_get_drvinfo,
+ .get_regs_len = xenet_ethtool_get_regs_len,
+ .get_regs = xenet_ethtool_get_regs,
+ .get_coalesce = xenet_ethtool_get_coalesce,
+ .set_coalesce = xenet_ethtool_set_coalesce,
+ .get_ringparam = xenet_ethtool_get_ringparam,
+ .get_pauseparam = xenet_ethtool_get_pauseparam,
+/* .get_rx_csum = xenet_ethtool_get_rx_csum,
+ .set_rx_csum = xenet_ethtool_set_rx_csum,
+ .get_tx_csum = xenet_ethtool_get_tx_csum,
+ .set_tx_csum = xenet_ethtool_set_tx_csum,
+ .get_sg = xenet_ethtool_get_sg,
+ .set_sg = xenet_ethtool_set_sg, */
+ .get_strings = xenet_ethtool_get_strings,
+ .get_ethtool_stats = xenet_ethtool_get_ethtool_stats,
+ .get_sset_count = xenet_ethtool_get_sset_count,
+};
+
+/** Shared device initialization code */
+static int xtenet_setup(
+ struct device *dev,
+ struct resource *r_mem,
+ struct resource *r_irq,
+ struct xlltemac_platform_data *pdata) {
+ int xs;
+ u32 virt_baddr; /* virtual base address of TEMAC */
+
+ XLlTemac_Config Temac_Config;
+
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+
+ int rc = 0;
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ dev_err(dev, "Could not allocate net device.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ dev_set_drvdata(dev, ndev);
+
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->irq = r_irq->start;
+
+ /* Initialize the private data used by XEmac_LookupConfig().
+ * The private data are zeroed out by alloc_etherdev() already.
+ */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dma_irq_r = pdata->ll_dev_dma_rx_irq;
+ lp->dma_irq_s = pdata->ll_dev_dma_tx_irq;
+ lp->fifo_irq = pdata->ll_dev_fifo_irq;
+
+ /* Setup the Config structure for the XLlTemac_CfgInitialize() call. */
+ Temac_Config.BaseAddress = r_mem->start;
+#if 0
+ Config.RxPktFifoDepth = pdata->rx_pkt_fifo_depth;
+ Config.TxPktFifoDepth = pdata->tx_pkt_fifo_depth;
+ Config.MacFifoDepth = pdata->mac_fifo_depth;
+ Config.IpIfDmaConfig = pdata->dma_mode;
+#endif
+ Temac_Config.TxCsum = pdata->tx_csum;
+ Temac_Config.RxCsum = pdata->rx_csum;
+ Temac_Config.LLDevType = pdata->ll_dev_type;
+ Temac_Config.LLDevBaseAddress = pdata->ll_dev_baseaddress;
+ Temac_Config.PhyType = pdata->phy_type;
+
+ /* Get the virtual base address for the device */
+ virt_baddr = (__force u32) ioremap(r_mem->start,
+ r_mem->end - r_mem->start + 1);
+ if (0 == virt_baddr) {
+ dev_err(dev, "XLlTemac: Could not allocate iomem.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ if (XLlTemac_CfgInitialize(&lp->Emac, &Temac_Config, virt_baddr) !=
+ XST_SUCCESS) {
+ dev_err(dev, "XLlTemac: Could not initialize device.\n");
+
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Set the MAC address from platform data */
+ memcpy(ndev->dev_addr, pdata->mac_addr, 6);
+
+ if (_XLlTemac_SetMacAddress(&lp->Emac, ndev->dev_addr) != XST_SUCCESS) {
+ /* should not fail right after an initialize */
+ dev_err(dev, "XLlTemac: could not set MAC address.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ dev_info(dev,
+ "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
+ pdata->mac_addr[0], pdata->mac_addr[1],
+ pdata->mac_addr[2], pdata->mac_addr[3],
+ pdata->mac_addr[4], pdata->mac_addr[5]);
+
+ if (ndev->mtu > XTE_JUMBO_MTU)
+ ndev->mtu = XTE_JUMBO_MTU;
+
+ lp->frame_size = ndev->mtu + XTE_HDR_SIZE + XTE_TRL_SIZE;
+
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ int result;
+
+ dev_err(dev, "XLlTemac: using DMA mode.\n");
+
+ if (pdata->dcr_host) {
+ printk("XLlTemac: DCR address: 0x%0x\n", pdata->ll_dev_baseaddress);
+ XLlDma_Initialize(&lp->Dma, pdata->ll_dev_baseaddress);
+ lp->virt_dma_addr = pdata->ll_dev_baseaddress;
+ } else {
+ virt_baddr = (__force u32) ioremap(pdata->ll_dev_baseaddress, 4096);
+ lp->virt_dma_addr = virt_baddr;
+ if (0 == virt_baddr) {
+ dev_err(dev,
+ "XLlTemac: Could not allocate iomem for local link connected device.\n");
+ rc = -EIO;
+ goto error;
+ }
+ printk("XLlTemac: Dma base address: phy: 0x%x, virt: 0x%x\n", pdata->ll_dev_baseaddress, virt_baddr);
+ XLlDma_Initialize(&lp->Dma, virt_baddr);
+ }
+
+ xilinx_netdev_ops.ndo_start_xmit = xenet_DmaSend;
+
+ result = descriptor_init(ndev);
+ if (result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ /* set the packet threshold and wait bound for both TX/RX directions */
+ if (DFT_TX_WAITBOUND == 0) {
+ dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing, DFT_TX_THRESHOLD, 1);
+ } else {
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing, DFT_TX_THRESHOLD, DFT_TX_WAITBOUND);
+ }
+ if (xs != XST_SUCCESS) {
+ dev_err(dev,
+ "XLlTemac: could not set SEND pkt threshold/waitbound, ERROR %d",
+ xs);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ if (DFT_RX_WAITBOUND == 0) {
+ dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing, DFT_RX_THRESHOLD, 1);
+ } else {
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing, DFT_RX_THRESHOLD, DFT_RX_WAITBOUND);
+ }
+ if (xs != XST_SUCCESS) {
+ dev_err(dev,
+ "XLlTemac: Could not set RECV pkt threshold/waitbound ERROR %d",
+ xs);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ }
+ else {
+ dev_err(dev,
+ "XLlTemac: using FIFO direct interrupt driven mode.\n");
+
+ virt_baddr = (__force u32) ioremap(pdata->ll_dev_baseaddress,
+ 4096);
+ if (0 == virt_baddr) {
+ dev_err(dev,
+ "XLlTemac: Could not allocate iomem for local link connected device.\n");
+ rc = -EIO;
+ goto error;
+ }
+ printk("XLlTemac: Fifo base address: 0x%0x\n", virt_baddr);
+ XLlFifo_Initialize(&lp->Fifo, virt_baddr);
+
+ xilinx_netdev_ops.ndo_start_xmit = xenet_FifoSend;
+ }
+
+ /** Scan to find the PHY */
+ lp->gmii_addr = detect_phy(lp, ndev->name);
+
+
+ /* initialize the netdev structure */
+
+ ndev->netdev_ops = &xilinx_netdev_ops;
+
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+
+ if (XLlTemac_IsTxCsum(&lp->Emac) == TRUE) {
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ if (XLlTemac_IsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |= LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* init the stats */
+ lp->max_frags_in_a_packet = 0;
+ lp->tx_hw_csums = 0;
+ lp->rx_hw_csums = 0;
+
+#if ! XTE_AUTOSTRIPPING
+ lp->stripping =
+ (XLlTemac_GetOptions(&(lp->Emac)) & XTE_FCS_STRIP_OPTION) != 0;
+#endif
+
+ /* Set ethtool IOCTL handler vectors.
+ * xenet_do_ethtool_ioctl() is deprecated.
+ */
+ SET_ETHTOOL_OPS(ndev, ðtool_ops);
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(dev,
+ "%s: Cannot register net device, aborting.\n",
+ ndev->name);
+ goto error; /* rc is already set here... */
+ }
+
+ dev_info(dev,
+ "%s: Xilinx TEMAC at 0x%08X mapped to 0x%08X, irq=%d\n",
+ ndev->name,
+ (unsigned int)r_mem->start,
+ lp->Emac.Config.BaseAddress,
+ ndev->irq);
+
+ return 0;
+
+error:
+ if (ndev) {
+ xtenet_remove_ndev(ndev);
+ }
+ return rc;
+}
+
+#if 0
+static int xtenet_probe(struct device *dev)
+{
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+ struct xlltemac_platform_data *pdata;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* param check */
+ if (!pdev) {
+ dev_err(dev, "Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ pdata = (struct xlltemac_platform_data *) pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "Couldn't find platform data.\n");
+
+ return -ENODEV;
+ }
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ dev_err(dev, "IO resource(s) not found.\n");
+ return -ENODEV;
+ }
+
+ return xtenet_setup(dev, r_mem, r_irq, pdata);
+}
+#endif
+
+
+#ifdef CONFIG_OF
+static u32 get_u32(struct platform_device *op, const char *s) {
+ u32 *p = (u32 *)of_get_property(op->dev.of_node, s, NULL);
+ if(p) {
+ return *p;
+ } else {
+ dev_warn(&op->dev, "Parameter %s not found, defaulting to false.\n", s);
+ return FALSE;
+ }
+}
+
+static struct net_device_ops xilinx_netdev_ops = {
+ .ndo_open = xenet_open,
+ .ndo_stop = xenet_close,
+ .ndo_start_xmit = NULL,
+ .ndo_do_ioctl = xenet_ioctl,
+ .ndo_change_mtu = xenet_change_mtu,
+ .ndo_tx_timeout = xenet_tx_timeout,
+ .ndo_get_stats = xenet_get_stats,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = xenet_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct of_device_id xtenet_fifo_of_match[] = {
+ { .compatible = "xlnx,xps-ll-fifo-1.00.a", },
+ { .compatible = "xlnx,xps-ll-fifo-1.00.b", },
+ { .compatible = "xlnx,xps-ll-fifo-1.01.a", },
+ { /* end of list */ },
+};
+
+static struct of_device_id xtenet_sdma_of_match[] = {
+ { .compatible = "xlnx,ll-dma-1.00.a", },
+ { /* end of list */ },
+};
+
+static int xtenet_of_probe(struct platform_device *op)
+{
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+ struct resource r_connected_mem_struct;
+ struct resource r_connected_irq_struct;
+ struct xlltemac_platform_data pdata_struct;
+
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ struct xlltemac_platform_data *pdata = &pdata_struct;
+ const void *mac_address;
+ int rc = 0;
+ const phandle *llink_connected_handle;
+ struct device_node *llink_connected_node;
+ u32 *dcrreg_property;
+
+ /*
+ * Make sure the locks are initialized
+ */
+ spin_lock_init(&XTE_spinlock);
+ spin_lock_init(&XTE_tx_spinlock);
+ spin_lock_init(&XTE_rx_spinlock);
+
+ INIT_LIST_HEAD(&sentQueue);
+ INIT_LIST_HEAD(&receivedQueue);
+
+ spin_lock_init(&sentQueueSpin);
+ spin_lock_init(&receivedQueueSpin);
+
+ printk(KERN_INFO "Device Tree Probing \'%s\'\n",
+ op->dev.of_node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(op->dev.of_node, 0, r_mem);
+ if(rc) {
+ dev_warn(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(op->dev.of_node, 0, r_irq);
+ if(!rc) {
+ dev_warn(&op->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ pdata_struct.tx_csum = get_u32(op, "xlnx,txcsum");
+ pdata_struct.rx_csum = get_u32(op, "xlnx,rxcsum");
+ pdata_struct.phy_type = get_u32(op, "xlnx,phy-type");
+ llink_connected_handle =
+ of_get_property(op->dev.of_node, "llink-connected", NULL);
+ if(!llink_connected_handle) {
+ dev_warn(&op->dev, "no Locallink connection found.\n");
+ return rc;
+ }
+
+ llink_connected_node =
+ of_find_node_by_phandle(*llink_connected_handle);
+ rc = of_address_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_mem_struct);
+
+ /** Get the right information from whatever the locallink is
+ connected to. */
+ if(of_match_node(xtenet_fifo_of_match, llink_connected_node)) {
+ /** Connected to a fifo. */
+
+ if(rc) {
+ dev_warn(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ pdata_struct.ll_dev_baseaddress = r_connected_mem_struct.start;
+ pdata_struct.ll_dev_type = XPAR_LL_FIFO;
+ pdata_struct.ll_dev_dma_rx_irq = 0;
+ pdata_struct.ll_dev_dma_tx_irq = 0;
+
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_irq_struct);
+ if(!rc) {
+ dev_warn(&op->dev, "no IRQ found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_fifo_irq = r_connected_irq_struct.start;
+ pdata_struct.dcr_host = 0x0;
+ } else if(of_match_node(xtenet_sdma_of_match, llink_connected_node)) {
+ /** Connected to a dma port, default to 405 type dma */
+ pdata->dcr_host = 0;
+ if(rc) {
+ /* no address was found, might be 440, check for dcr reg */
+
+ dcrreg_property = (u32 *)of_get_property(llink_connected_node, "dcr-reg", NULL);
+ if(dcrreg_property) {
+ r_connected_mem_struct.start = *dcrreg_property;
+ pdata->dcr_host = 0xFF;
+ } else {
+ dev_warn(&op->dev, "invalid address\n");
+ return rc;
+ }
+ }
+
+ pdata_struct.ll_dev_baseaddress = r_connected_mem_struct.start;
+ pdata_struct.ll_dev_type = XPAR_LL_DMA;
+
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_irq_struct);
+ if(!rc) {
+ dev_warn(&op->dev, "First IRQ not found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_dma_rx_irq = r_connected_irq_struct.start;
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 1,
+ &r_connected_irq_struct);
+ if(!rc) {
+ dev_warn(&op->dev, "Second IRQ not found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_dma_tx_irq = r_connected_irq_struct.start;
+
+ pdata_struct.ll_dev_fifo_irq = 0;
+ } else {
+ dev_warn(&op->dev, "Locallink connection not matched.\n");
+ return rc;
+ }
+
+ of_node_put(llink_connected_node);
+ mac_address = of_get_mac_address(op->dev.of_node);
+ if(mac_address) {
+ memcpy(pdata_struct.mac_addr, mac_address, 6);
+ } else {
+ dev_warn(&op->dev, "No MAC address found.\n");
+ }
+
+ return xtenet_setup(&op->dev, r_mem, r_irq, pdata);
+}
+
+static int xtenet_of_remove(struct platform_device *op)
+{
+ return xtenet_remove(&op->dev);
+}
+
+static struct of_device_id xtenet_of_match[] = {
+ { .compatible = "xlnx,xps-ll-temac-1.00.a", },
+ { .compatible = "xlnx,xps-ll-temac-1.00.b", },
+ { .compatible = "xlnx,xps-ll-temac-1.01.a", },
+ { .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xtenet_of_match);
+#endif
+
+static struct platform_driver xtenet_of_driver = {
+ .probe = xtenet_of_probe,
+ .remove = xtenet_of_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xtenet_of_match),
+ },
+};
+
+module_platform_driver(xtenet_of_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");