summaryrefslogtreecommitdiffstats
path: root/drivers/net/ks8842.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ks8842.c')
-rw-r--r--drivers/net/ks8842.c706
1 files changed, 614 insertions, 92 deletions
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index f852ab3ae9cf..928b2b83cef5 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -18,6 +18,7 @@
/* Supports:
* The Micrel KS8842 behind the timberdale FPGA
+ * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -29,11 +30,19 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/ks8842.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
#define DRV_NAME "ks8842"
/* Timberdale specific Registers */
-#define REG_TIMB_RST 0x1c
+#define REG_TIMB_RST 0x1c
+#define REG_TIMB_FIFO 0x20
+#define REG_TIMB_ISR 0x24
+#define REG_TIMB_IER 0x28
+#define REG_TIMB_IAR 0x2C
+#define REQ_TIMB_DMA_RESUME 0x30
/* KS8842 registers */
@@ -76,6 +85,15 @@
#define IRQ_RX_ERROR 0x0080
#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+/* When running via timberdale in DMA mode, the RX interrupt should be
+ enabled in the KS8842, but not in the FPGA IP, since the IP handles
+ RX DMA internally.
+ TX interrupts are not needed it is handled by the FPGA the driver is
+ notified via DMA callbacks.
+*/
+#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
#define REG_ISR 0x02
#define REG_RXSR 0x04
#define RXSR_VALID 0x8000
@@ -114,14 +132,53 @@
#define REG_P1CR4 0x02
#define REG_P1SR 0x04
+/* flags passed by platform_device for configuration */
+#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
+#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
+
+#define DMA_BUFFER_SIZE 2048
+
+struct ks8842_tx_dma_ctl {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *adesc;
+ void *buf;
+ struct scatterlist sg;
+ int channel;
+};
+
+struct ks8842_rx_dma_ctl {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *adesc;
+ struct sk_buff *skb;
+ struct scatterlist sg;
+ struct tasklet_struct tasklet;
+ int channel;
+};
+
+#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
+ ((adapter)->dma_rx.channel != -1))
+
struct ks8842_adapter {
void __iomem *hw_addr;
int irq;
+ unsigned long conf_flags; /* copy of platform_device config */
struct tasklet_struct tasklet;
spinlock_t lock; /* spinlock to be interrupt safe */
- struct platform_device *pdev;
+ struct work_struct timeout_work;
+ struct net_device *netdev;
+ struct device *dev;
+ struct ks8842_tx_dma_ctl dma_tx;
+ struct ks8842_rx_dma_ctl dma_rx;
};
+static void ks8842_dma_rx_cb(void *data);
+static void ks8842_dma_tx_cb(void *data);
+
+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
+{
+ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
+}
+
static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
{
iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -191,16 +248,21 @@ static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
static void ks8842_reset(struct ks8842_adapter *adapter)
{
- /* The KS8842 goes haywire when doing softare reset
- * a work around in the timberdale IP is implemented to
- * do a hardware reset instead
- ks8842_write16(adapter, 3, 1, REG_GRR);
- msleep(10);
- iowrite16(0, adapter->hw_addr + REG_GRR);
- */
- iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
- iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
- msleep(20);
+ if (adapter->conf_flags & MICREL_KS884X) {
+ ks8842_write16(adapter, 3, 1, REG_GRR);
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ } else {
+ /* The KS8842 goes haywire when doing softare reset
+ * a work around in the timberdale IP is implemented to
+ * do a hardware reset instead
+ ks8842_write16(adapter, 3, 1, REG_GRR);
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ */
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
+ msleep(20);
+ }
}
static void ks8842_update_link_status(struct net_device *netdev,
@@ -269,8 +331,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* restart port auto-negotiation */
ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
- /* only advertise 10Mbps */
- ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
/* Enable the transmitter */
ks8842_enable_tx(adapter);
@@ -282,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
ks8842_write16(adapter, 18, 0xffff, REG_ISR);
/* enable interrupts */
- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
-
+ if (KS8842_USE_DMA(adapter)) {
+ /* When running in DMA Mode the RX interrupt is not enabled in
+ timberdale because RX data is received by DMA callbacks
+ it must still be enabled in the KS8842 because it indicates
+ to timberdale when there is RX data for it's DMA FIFOs */
+ iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+ } else {
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ iowrite16(ENABLED_IRQS,
+ adapter->hw_addr + REG_TIMB_IER);
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+ }
/* enable the switch */
ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
}
@@ -296,13 +367,28 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
for (i = 0; i < ETH_ALEN; i++)
dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
- /* make sure the switch port uses the same MAC as the QMU */
- mac = ks8842_read16(adapter, 2, REG_MARL);
- ks8842_write16(adapter, 39, mac, REG_MACAR1);
- mac = ks8842_read16(adapter, 2, REG_MARM);
- ks8842_write16(adapter, 39, mac, REG_MACAR2);
- mac = ks8842_read16(adapter, 2, REG_MARH);
- ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ if (adapter->conf_flags & MICREL_KS884X) {
+ /*
+ the sequence of saving mac addr between MAC and Switch is
+ different.
+ */
+
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ } else {
+
+ /* make sure the switch port uses the same MAC as the QMU */
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ }
}
static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
@@ -313,8 +399,25 @@ static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
spin_lock_irqsave(&adapter->lock, flags);
for (i = 0; i < ETH_ALEN; i++) {
ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
- REG_MACAR1 + i);
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+
+ if (adapter->conf_flags & MICREL_KS884X) {
+ /*
+ the sequence of saving mac addr between MAC and Switch is
+ different.
+ */
+
+ u16 mac;
+
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
}
spin_unlock_irqrestore(&adapter->lock, flags);
}
@@ -324,15 +427,59 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
}
+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+ u8 *buf = ctl->buf;
+
+ if (ctl->adesc) {
+ netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
+ /* transfer ongoing */
+ return NETDEV_TX_BUSY;
+ }
+
+ sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
+
+ /* copy data to the TX buffer */
+ /* the control word, enable IRQ, port 1 and the length */
+ *buf++ = 0x00;
+ *buf++ = 0x01; /* Port 1 */
+ *buf++ = skb->len & 0xff;
+ *buf++ = (skb->len >> 8) & 0xff;
+ skb_copy_from_linear_data(skb, buf, skb->len);
+
+ dma_sync_single_range_for_device(adapter->dev,
+ sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
+ DMA_TO_DEVICE);
+
+ /* make sure the length is a multiple of 4 */
+ if (sg_dma_len(&ctl->sg) % 4)
+ sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
+
+ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ &ctl->sg, 1, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ if (!ctl->adesc)
+ return NETDEV_TX_BUSY;
+
+ ctl->adesc->callback_param = netdev;
+ ctl->adesc->callback = ks8842_dma_tx_cb;
+ ctl->adesc->tx_submit(ctl->adesc);
+
+ netdev->stats.tx_bytes += skb->len;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
int len = skb->len;
- u32 *ptr = (u32 *)skb->data;
- u32 ctrl;
- dev_dbg(&adapter->pdev->dev,
- "%s: len %u head %p data %p tail %p end %p\n",
+ netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
__func__, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -340,17 +487,34 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
if (ks8842_tx_fifo_space(adapter) < len + 8)
return NETDEV_TX_BUSY;
- /* the control word, enable IRQ, port 1 and the length */
- ctrl = 0x8000 | 0x100 | (len << 16);
- ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
+ if (adapter->conf_flags & KS884X_16BIT) {
+ u16 *ptr16 = (u16 *)skb->data;
+ ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
+ ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
+ netdev->stats.tx_bytes += len;
+
+ /* copy buffer */
+ while (len > 0) {
+ iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
+ iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
+ len -= sizeof(u32);
+ }
+ } else {
- netdev->stats.tx_bytes += len;
+ u32 *ptr = (u32 *)skb->data;
+ u32 ctrl;
+ /* the control word, enable IRQ, port 1 and the length */
+ ctrl = 0x8000 | 0x100 | (len << 16);
+ ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
- /* copy buffer */
- while (len > 0) {
- iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
- len -= sizeof(u32);
- ptr++;
+ netdev->stats.tx_bytes += len;
+
+ /* copy buffer */
+ while (len > 0) {
+ iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ ptr++;
+ }
}
/* enqueue packet */
@@ -361,54 +525,174 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
-static void ks8842_rx_frame(struct net_device *netdev,
- struct ks8842_adapter *adapter)
+static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
+{
+ netdev_dbg(netdev, "RX error, status: %x\n", status);
+
+ netdev->stats.rx_errors++;
+ if (status & RXSR_TOO_LONG)
+ netdev->stats.rx_length_errors++;
+ if (status & RXSR_CRC_ERROR)
+ netdev->stats.rx_crc_errors++;
+ if (status & RXSR_RUNT)
+ netdev->stats.rx_frame_errors++;
+}
+
+static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
+ int len)
+{
+ netdev_dbg(netdev, "RX packet, len: %d\n", len);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+ if (status & RXSR_MULTICAST)
+ netdev->stats.multicast++;
+}
+
+static int __ks8842_start_new_rx_dma(struct net_device *netdev)
{
- u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
- int len = (status >> 16) & 0x7ff;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+ struct scatterlist *sg = &ctl->sg;
+ int err;
- status &= 0xffff;
+ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
+ if (ctl->skb) {
+ sg_init_table(sg, 1);
+ sg_dma_address(sg) = dma_map_single(adapter->dev,
+ ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
+ if (unlikely(err)) {
+ sg_dma_address(sg) = 0;
+ goto out;
+ }
+
+ sg_dma_len(sg) = DMA_BUFFER_SIZE;
+
+ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+
+ if (!ctl->adesc)
+ goto out;
+
+ ctl->adesc->callback_param = netdev;
+ ctl->adesc->callback = ks8842_dma_rx_cb;
+ ctl->adesc->tx_submit(ctl->adesc);
+ } else {
+ err = -ENOMEM;
+ sg_dma_address(sg) = 0;
+ goto out;
+ }
- dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
- __func__, status);
+ return err;
+out:
+ if (sg_dma_address(sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(sg),
+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ sg_dma_address(sg) = 0;
+ if (ctl->skb)
+ dev_kfree_skb(ctl->skb);
+
+ ctl->skb = NULL;
+
+ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
+ return err;
+}
+
+static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+ struct sk_buff *skb = ctl->skb;
+ dma_addr_t addr = sg_dma_address(&ctl->sg);
+ u32 status;
+
+ ctl->adesc = NULL;
+
+ /* kick next transfer going */
+ __ks8842_start_new_rx_dma(netdev);
+
+ /* now handle the data we got */
+ dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ status = *((u32 *)skb->data);
+
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status & 0xffff);
/* check the status */
if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+ int len = (status >> 16) & 0x7ff;
- dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
- __func__, len);
- if (skb) {
- u32 *data;
+ ks8842_update_rx_counters(netdev, status, len);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += len;
- if (status & RXSR_MULTICAST)
- netdev->stats.multicast++;
+ /* reserve 4 bytes which is the status word */
+ skb_reserve(skb, 4);
+ skb_put(skb, len);
- data = (u32 *)skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else {
+ ks8842_update_rx_err_counters(netdev, status);
+ dev_kfree_skb(skb);
+ }
+}
- ks8842_select_bank(adapter, 17);
- while (len > 0) {
- *data++ = ioread32(adapter->hw_addr +
- REG_QMU_DATA_LO);
- len -= sizeof(u32);
- }
+static void ks8842_rx_frame(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ u32 status;
+ int len;
+
+ if (adapter->conf_flags & KS884X_16BIT) {
+ status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
+ len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status);
+ } else {
+ status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
+ len = (status >> 16) & 0x7ff;
+ status &= 0xffff;
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status);
+ }
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+
+ if (skb) {
+
+ ks8842_update_rx_counters(netdev, status, len);
+
+ if (adapter->conf_flags & KS884X_16BIT) {
+ u16 *data16 = (u16 *)skb_put(skb, len);
+ ks8842_select_bank(adapter, 17);
+ while (len > 0) {
+ *data16++ = ioread16(adapter->hw_addr +
+ REG_QMU_DATA_LO);
+ *data16++ = ioread16(adapter->hw_addr +
+ REG_QMU_DATA_HI);
+ len -= sizeof(u32);
+ }
+ } else {
+ u32 *data = (u32 *)skb_put(skb, len);
+
+ ks8842_select_bank(adapter, 17);
+ while (len > 0) {
+ *data++ = ioread32(adapter->hw_addr +
+ REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ }
+ }
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
} else
netdev->stats.rx_dropped++;
- } else {
- dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
- netdev->stats.rx_errors++;
- if (status & RXSR_TOO_LONG)
- netdev->stats.rx_length_errors++;
- if (status & RXSR_CRC_ERROR)
- netdev->stats.rx_crc_errors++;
- if (status & RXSR_RUNT)
- netdev->stats.rx_frame_errors++;
- }
+ } else
+ ks8842_update_rx_err_counters(netdev, status);
/* set high watermark to 3K */
ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -423,8 +707,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
{
u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
- dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
- __func__, rx_data);
+ netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
while (rx_data) {
ks8842_rx_frame(netdev, adapter);
rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
@@ -434,7 +717,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
{
u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
- dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
+ netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
netdev->stats.tx_packets++;
if (netif_queue_stopped(netdev))
netif_wake_queue(netdev);
@@ -443,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
void ks8842_handle_rx_overrun(struct net_device *netdev,
struct ks8842_adapter *adapter)
{
- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+ netdev_dbg(netdev, "%s: entry\n", __func__);
netdev->stats.rx_errors++;
netdev->stats.rx_fifo_errors++;
}
@@ -462,20 +745,32 @@ void ks8842_tasklet(unsigned long arg)
spin_unlock_irqrestore(&adapter->lock, flags);
isr = ks8842_read16(adapter, 18, REG_ISR);
- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+ netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ /* when running in DMA mode, do not ack RX interrupts, it is handled
+ internally by timberdale, otherwise it's DMA FIFO:s would stop
+ */
+ if (KS8842_USE_DMA(adapter))
+ isr &= ~IRQ_RX;
/* Ack */
ks8842_write16(adapter, 18, isr, REG_ISR);
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ /* Ack in the timberdale IP as well */
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
+
if (!netif_running(netdev))
return;
if (isr & IRQ_LINK_CHANGE)
ks8842_update_link_status(netdev, adapter);
- if (isr & (IRQ_RX | IRQ_RX_ERROR))
+ /* should not get IRQ_RX when running DMA mode */
+ if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
ks8842_handle_rx(netdev, adapter);
+ /* should only happen when in PIO mode */
if (isr & IRQ_TX)
ks8842_handle_tx(netdev, adapter);
@@ -494,24 +789,38 @@ void ks8842_tasklet(unsigned long arg)
/* re-enable interrupts, put back the bank selection register */
spin_lock_irqsave(&adapter->lock, flags);
- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+ if (KS8842_USE_DMA(adapter))
+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+ else
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+ /* Make sure timberdale continues DMA operations, they are stopped while
+ we are handling the ks8842 because we might change bank */
+ if (KS8842_USE_DMA(adapter))
+ ks8842_resume_dma(adapter);
+
spin_unlock_irqrestore(&adapter->lock, flags);
}
static irqreturn_t ks8842_irq(int irq, void *devid)
{
- struct ks8842_adapter *adapter = devid;
+ struct net_device *netdev = devid;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
u16 isr;
u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
irqreturn_t ret = IRQ_NONE;
isr = ks8842_read16(adapter, 18, REG_ISR);
- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
+ netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
if (isr) {
- /* disable IRQ */
- ks8842_write16(adapter, 18, 0x00, REG_IER);
+ if (KS8842_USE_DMA(adapter))
+ /* disable all but RX IRQ, since the FPGA relies on it*/
+ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
+ else
+ /* disable IRQ */
+ ks8842_write16(adapter, 18, 0x00, REG_IER);
/* schedule tasklet */
tasklet_schedule(&adapter->tasklet);
@@ -521,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+ /* After an interrupt, tell timberdale to continue DMA operations.
+ DMA is disabled while we are handling the ks8842 because we might
+ change bank */
+ ks8842_resume_dma(adapter);
+
return ret;
}
+static void ks8842_dma_rx_cb(void *data)
+{
+ struct net_device *netdev = data;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "RX DMA finished\n");
+ /* schedule tasklet */
+ if (adapter->dma_rx.adesc)
+ tasklet_schedule(&adapter->dma_rx.tasklet);
+}
+
+static void ks8842_dma_tx_cb(void *data)
+{
+ struct net_device *netdev = data;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+
+ netdev_dbg(netdev, "TX DMA finished\n");
+
+ if (!ctl->adesc)
+ return;
+
+ netdev->stats.tx_packets++;
+ ctl->adesc = NULL;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+static void ks8842_stop_dma(struct ks8842_adapter *adapter)
+{
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+ tx_ctl->adesc = NULL;
+ if (tx_ctl->chan)
+ tx_ctl->chan->device->device_control(tx_ctl->chan,
+ DMA_TERMINATE_ALL, 0);
+
+ rx_ctl->adesc = NULL;
+ if (rx_ctl->chan)
+ rx_ctl->chan->device->device_control(rx_ctl->chan,
+ DMA_TERMINATE_ALL, 0);
+
+ if (sg_dma_address(&rx_ctl->sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ sg_dma_address(&rx_ctl->sg) = 0;
+
+ dev_kfree_skb(rx_ctl->skb);
+ rx_ctl->skb = NULL;
+}
+
+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
+{
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+ ks8842_stop_dma(adapter);
+
+ if (tx_ctl->chan)
+ dma_release_channel(tx_ctl->chan);
+ tx_ctl->chan = NULL;
+
+ if (rx_ctl->chan)
+ dma_release_channel(rx_ctl->chan);
+ rx_ctl->chan = NULL;
+
+ tasklet_kill(&rx_ctl->tasklet);
+
+ if (sg_dma_address(&tx_ctl->sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ sg_dma_address(&tx_ctl->sg) = 0;
+
+ kfree(tx_ctl->buf);
+ tx_ctl->buf = NULL;
+}
+
+static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
+{
+ return chan->chan_id == (long)filter_param;
+}
+
+static int ks8842_alloc_dma_bufs(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+ int err;
+
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_PRIVATE, mask);
+
+ sg_init_table(&tx_ctl->sg, 1);
+
+ tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+ (void *)(long)tx_ctl->channel);
+ if (!tx_ctl->chan) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ /* allocate DMA buffer */
+ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!tx_ctl->buf) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
+ tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ err = dma_mapping_error(adapter->dev,
+ sg_dma_address(&tx_ctl->sg));
+ if (err) {
+ sg_dma_address(&tx_ctl->sg) = 0;
+ goto err;
+ }
+
+ rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+ (void *)(long)rx_ctl->channel);
+ if (!rx_ctl->chan) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
+ (unsigned long)netdev);
+
+ return 0;
+err:
+ ks8842_dealloc_dma_bufs(adapter);
+ return err;
+}
/* Netdevice operations */
@@ -532,7 +983,26 @@ static int ks8842_open(struct net_device *netdev)
struct ks8842_adapter *adapter = netdev_priv(netdev);
int err;
- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+ netdev_dbg(netdev, "%s - entry\n", __func__);
+
+ if (KS8842_USE_DMA(adapter)) {
+ err = ks8842_alloc_dma_bufs(netdev);
+
+ if (!err) {
+ /* start RX dma */
+ err = __ks8842_start_new_rx_dma(netdev);
+ if (err)
+ ks8842_dealloc_dma_bufs(adapter);
+ }
+
+ if (err) {
+ printk(KERN_WARNING DRV_NAME
+ ": Failed to initiate DMA, running PIO\n");
+ ks8842_dealloc_dma_bufs(adapter);
+ adapter->dma_rx.channel = -1;
+ adapter->dma_tx.channel = -1;
+ }
+ }
/* reset the HW */
ks8842_reset_hw(adapter);
@@ -542,7 +1012,7 @@ static int ks8842_open(struct net_device *netdev)
ks8842_update_link_status(netdev, adapter);
err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
- adapter);
+ netdev);
if (err) {
pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
return err;
@@ -555,10 +1025,15 @@ static int ks8842_close(struct net_device *netdev)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
+ netdev_dbg(netdev, "%s - entry\n", __func__);
+
+ cancel_work_sync(&adapter->timeout_work);
+
+ if (KS8842_USE_DMA(adapter))
+ ks8842_dealloc_dma_bufs(adapter);
/* free the irq */
- free_irq(adapter->irq, adapter);
+ free_irq(adapter->irq, netdev);
/* disable the switch */
ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
@@ -572,7 +1047,18 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
int ret;
struct ks8842_adapter *adapter = netdev_priv(netdev);
- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ if (KS8842_USE_DMA(adapter)) {
+ unsigned long flags;
+ ret = ks8842_tx_frame_dma(skb, netdev);
+ /* for now only allow one transfer at the time */
+ spin_lock_irqsave(&adapter->lock, flags);
+ if (adapter->dma_tx.adesc)
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return ret;
+ }
ret = ks8842_tx_frame(skb, netdev);
@@ -588,7 +1074,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
char *mac = (u8 *)addr->sa_data;
- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+ netdev_dbg(netdev, "%s: entry\n", __func__);
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -599,17 +1085,26 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
return 0;
}
-static void ks8842_tx_timeout(struct net_device *netdev)
+static void ks8842_tx_timeout_work(struct work_struct *work)
{
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter =
+ container_of(work, struct ks8842_adapter, timeout_work);
+ struct net_device *netdev = adapter->netdev;
unsigned long flags;
- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+ netdev_dbg(netdev, "%s: entry\n", __func__);
spin_lock_irqsave(&adapter->lock, flags);
+
+ if (KS8842_USE_DMA(adapter))
+ ks8842_stop_dma(adapter);
+
/* disable interrupts */
ks8842_write16(adapter, 18, 0, REG_IER);
ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
+
+ netif_stop_queue(netdev);
+
spin_unlock_irqrestore(&adapter->lock, flags);
ks8842_reset_hw(adapter);
@@ -617,6 +1112,18 @@ static void ks8842_tx_timeout(struct net_device *netdev)
ks8842_write_mac_addr(adapter, netdev->dev_addr);
ks8842_update_link_status(netdev, adapter);
+
+ if (KS8842_USE_DMA(adapter))
+ __ks8842_start_new_rx_dma(netdev);
+}
+
+static void ks8842_tx_timeout(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ schedule_work(&adapter->timeout_work);
}
static const struct net_device_ops ks8842_netdev_ops = {
@@ -653,7 +1160,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
+ adapter->conf_flags = iomem->flags;
+
if (!adapter->hw_addr)
goto err_ioremap;
@@ -663,7 +1174,18 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
goto err_get_irq;
}
- adapter->pdev = pdev;
+ adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
+
+ /* DMA is only supported when accessed via timberdale */
+ if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
+ (pdata->tx_dma_channel != -1) &&
+ (pdata->rx_dma_channel != -1)) {
+ adapter->dma_rx.channel = pdata->rx_dma_channel;
+ adapter->dma_tx.channel = pdata->tx_dma_channel;
+ } else {
+ adapter->dma_rx.channel = -1;
+ adapter->dma_tx.channel = -1;
+ }
tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
spin_lock_init(&adapter->lock);