summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorDavid Vrabel2014-10-22 15:08:54 +0200
committerDavid S. Miller2014-10-25 20:15:20 +0200
commitf48da8b14d04ca87ffcffe68829afd45f926ec6a (patch)
tree0311447e90c3bfd85d2177637031dc3d1e81a054 /drivers/net/xen-netback/interface.c
parentxen-netback: make feature-rx-notify mandatory (diff)
downloadkernel-qcow2-linux-f48da8b14d04ca87ffcffe68829afd45f926ec6a.tar.gz
kernel-qcow2-linux-f48da8b14d04ca87ffcffe68829afd45f926ec6a.tar.xz
kernel-qcow2-linux-f48da8b14d04ca87ffcffe68829afd45f926ec6a.zip
xen-netback: fix unlimited guest Rx internal queue and carrier flapping
Netback needs to discard old to-guest skb's (guest Rx queue drain) and it needs detect guest Rx stalls (to disable the carrier so packets are discarded earlier), but the current implementation is very broken. 1. The check in hard_start_xmit of the slot availability did not consider the number of packets that were already in the guest Rx queue. This could allow the queue to grow without bound. The guest stops consuming packets and the ring was allowed to fill leaving S slot free. Netback queues a packet requiring more than S slots (ensuring that the ring stays with S slots free). Netback queue indefinately packets provided that then require S or fewer slots. 2. The Rx stall detection is not triggered in this case since the (host) Tx queue is not stopped. 3. If the Tx queue is stopped and a guest Rx interrupt occurs, netback will consider this an Rx purge event which may result in it taking the carrier down unnecessarily. It also considers a queue with only 1 slot free as unstalled (even though the next packet might not fit in this). The internal guest Rx queue is limited by a byte length (to 512 Kib, enough for half the ring). The (host) Tx queue is stopped and started based on this limit. This sets an upper bound on the amount of memory used by packets on the internal queue. This allows the estimatation of the number of slots for an skb to be removed (it wasn't a very good estimate anyway). Instead, the guest Rx thread just waits for enough free slots for a maximum sized packet. skbs queued on the internal queue have an 'expires' time (set to the current time plus the drain timeout). The guest Rx thread will detect when the skb at the head of the queue has expired and discard expired skbs. This sets a clear upper bound on the length of time an skb can be queued for. For a guest being destroyed the maximum time needed to wait for all the packets it sent to be dropped is still the drain timeout (10 s) since it will not be sending new packets. Rx stall detection is reintroduced in a later commit. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c59
1 files changed, 11 insertions, 48 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index c6759b1ec18d..a134d52f55b4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,9 @@
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
+/* Number of bytes allowed on the internal guest Rx queue. */
+#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
@@ -63,7 +66,8 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
- test_bit(VIF_STATUS_CONNECTED, &vif->status);
+ test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+ !vif->disabled;
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -104,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
- struct netdev_queue *net_queue =
- netdev_get_tx_queue(queue->vif->dev, queue->id);
- /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
- * the carrier went down and this queue was previously blocked
- */
- if (unlikely(netif_tx_queue_stopped(net_queue) ||
- (!netif_carrier_ok(queue->vif->dev) &&
- test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
- set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
xenvif_kick_thread(queue);
return IRQ_HANDLED;
@@ -141,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
-/* Callback to wake the queue's thread and turn the carrier off on timeout */
-static void xenvif_rx_stalled(unsigned long data)
-{
- struct xenvif_queue *queue = (struct xenvif_queue *)data;
-
- if (xenvif_queue_stopped(queue)) {
- set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
- xenvif_kick_thread(queue);
- }
-}
-
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
u16 index;
- int min_slots_needed;
+ struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
@@ -181,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
!xenvif_schedulable(vif))
goto drop;
- /* At best we'll need one slot for the header and one for each
- * frag.
- */
- min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
+ cb = XENVIF_RX_CB(skb);
+ cb->expires = jiffies + rx_drain_timeout_jiffies;
- /* If the skb is GSO then we'll also need an extra slot for the
- * metadata.
- */
- if (skb_is_gso(skb))
- min_slots_needed++;
-
- /* If the skb can't possibly fit in the remaining slots
- * then turn off the queue to give the ring a chance to
- * drain.
- */
- if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
- queue->rx_stalled.function = xenvif_rx_stalled;
- queue->rx_stalled.data = (unsigned long)queue;
- netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
- mod_timer(&queue->rx_stalled,
- jiffies + rx_drain_timeout_jiffies);
- }
-
- skb_queue_tail(&queue->rx_queue, skb);
+ xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
@@ -498,6 +462,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
init_timer(&queue->credit_timeout);
queue->credit_window_start = get_jiffies_64();
+ queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
+
skb_queue_head_init(&queue->rx_queue);
skb_queue_head_init(&queue->tx_queue);
@@ -529,8 +495,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
}
- init_timer(&queue->rx_stalled);
-
return 0;
}
@@ -664,7 +628,6 @@ void xenvif_disconnect(struct xenvif *vif)
netif_napi_del(&queue->napi);
if (queue->task) {
- del_timer_sync(&queue->rx_stalled);
kthread_stop(queue->task);
queue->task = NULL;
}