summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Gleixner2014-03-18 18:19:10 +0100
committerMarc Kleine-Budde2014-04-01 11:54:58 +0200
commit64f08f2f3544eb8b6b14fd35e6087d7d3ede77cd (patch)
treeea75d2125f91762caedc2d287ad90b496046ebd2 /drivers
parentcan: c_can: Make it SMP safe (diff)
downloadkernel-qcow2-linux-64f08f2f3544eb8b6b14fd35e6087d7d3ede77cd.tar.gz
kernel-qcow2-linux-64f08f2f3544eb8b6b14fd35e6087d7d3ede77cd.tar.xz
kernel-qcow2-linux-64f08f2f3544eb8b6b14fd35e6087d7d3ede77cd.zip
can: c_can: Fix buffer ordering
The buffer handling of c_can has been broken forever. That leads to message reordering: ksoftirqd/0-3 [000] ..s. 79.123776: c_can_poll: rx_poll: val: 00007fff ksoftirqd/0-3 [000] ..s. 79.124101: c_can_poll: rx_poll: val: 00008001 What happens is: CPU HW queue new packet into obj 16 (0-15 are busy) read obj 1-15 return because pending is 0 set pending obj 16 -> pending reg 8000 queue new packet into obj 1 set pending obj 1 -> pending reg 8001 So the current algorithmus reads the newest message first, which violates the ordering rules of CAN. Add proper handling of that situation by analyzing the contents of the pending register for gaps. This does NOT fix the message object corruption which can lead to interrupt storms. Thats addressed in the next patches. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [mkl: adjusted subject] Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/can/c_can/c_can.c52
1 files changed, 50 insertions, 2 deletions
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 38f9adaf15ac..cef9967eff93 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -817,6 +817,38 @@ static void c_can_do_tx(struct net_device *dev)
}
/*
+ * If we have a gap in the pending bits, that means we either
+ * raced with the hardware or failed to readout all upper
+ * objects in the last run due to quota limit.
+ */
+static u32 c_can_adjust_pending(u32 pend)
+{
+ u32 weight, lasts;
+
+ if (pend == RECEIVE_OBJECT_BITS)
+ return pend;
+
+ /*
+ * If the last set bit is larger than the number of pending
+ * bits we have a gap.
+ */
+ weight = hweight32(pend);
+ lasts = fls(pend);
+
+ /* If the bits are linear, nothing to do */
+ if (lasts == weight)
+ return pend;
+
+ /*
+ * Find the first set bit after the gap. We walk backwards
+ * from the last set bit.
+ */
+ for (lasts--; pend & (1 << (lasts - 1)); lasts--);
+
+ return pend & ~((1 << lasts) - 1);
+}
+
+/*
* theory of operation:
*
* c_can core saves a received CAN message into the first free message
@@ -843,7 +875,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u16 val;
+ u32 val, pend = 0;
/*
* It is faster to read only one 16bit register. This is only possible
@@ -852,7 +884,23 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
"Implementation does not support more message objects than 16");
- while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
+ while (quota > 0) {
+
+ if (!pend) {
+ pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+ if (!pend)
+ return num_rx_pkts;
+ /*
+ * If the pending field has a gap, handle the
+ * bits above the gap first.
+ */
+ val = c_can_adjust_pending(pend);
+ } else {
+ val = pend;
+ }
+ /* Remove the bits from pend */
+ pend &= ~val;
+
while ((msg_obj = ffs(val)) && quota > 0) {
val &= ~BIT(msg_obj - 1);