summaryrefslogtreecommitdiffstats
path: root/src/net/netdevice.c
diff options
context:
space:
mode:
authorMichael Brown2007-01-09 22:47:01 +0100
committerMichael Brown2007-01-09 22:47:01 +0100
commitc65fae2475ca652ef7948f286881b0c06bce861b (patch)
tree5588ec4b947ecc79201ee613d1cd0a0a7ca6d1d8 /src/net/netdevice.c
parentAutopadding was sometimes overwriting the struct list_head at the end (diff)
downloadipxe-c65fae2475ca652ef7948f286881b0c06bce861b.tar.gz
ipxe-c65fae2475ca652ef7948f286881b0c06bce861b.tar.xz
ipxe-c65fae2475ca652ef7948f286881b0c06bce861b.zip
Add RX quotas to the net device poll() method. This avoids the problem
of alloc_pkb() exhaustion when e.g. an iSCSI-booted DOS session is left idle for a long time at the C:\ prompt and builds up a huge packet backlog.
Diffstat (limited to 'src/net/netdevice.c')
-rw-r--r--src/net/netdevice.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/src/net/netdevice.c b/src/net/netdevice.c
index 6da2ddfd..40f836d4 100644
--- a/src/net/netdevice.c
+++ b/src/net/netdevice.c
@@ -126,16 +126,17 @@ void netdev_rx ( struct net_device *netdev, struct pk_buff *pkb ) {
* Poll for packet on network device
*
* @v netdev Network device
+ * @v rx_quota Maximum number of packets to receive
* @ret True There are packets present in the receive queue
* @ret False There are no packets present in the receive queue
*
* Polls the network device for received packets. Any received
* packets will be added to the RX packet queue via netdev_rx().
*/
-int netdev_poll ( struct net_device *netdev ) {
+int netdev_poll ( struct net_device *netdev, unsigned int rx_quota ) {
if ( netdev->state & NETDEV_OPEN )
- netdev->poll ( netdev );
+ netdev->poll ( netdev, rx_quota );
return ( ! list_empty ( &netdev->rx_queue ) );
}
@@ -351,14 +352,9 @@ int net_rx ( struct pk_buff *pkb, struct net_device *netdev,
*
* @v process Network stack process
*
- * This polls all interfaces for any received packets, and processes
- * at most one packet from the RX queue.
+ * This polls all interfaces for received packets, and processes
+ * packets from the RX queue.
*
- * We avoid processing all received packets, because processing the
- * received packet can trigger transmission of a new packet (e.g. an
- * ARP response). Since TX completions will be processed as part of
- * the poll operation, it is easy to overflow small TX queues if
- * multiple packets are processed per poll.
*/
static void net_step ( struct process *process ) {
struct net_device *netdev;
@@ -367,10 +363,28 @@ static void net_step ( struct process *process ) {
/* Poll and process each network device */
list_for_each_entry ( netdev, &net_devices, list ) {
- /* Poll for new packets */
- netdev_poll ( netdev );
-
- /* Handle at most one received packet per poll */
+ /* Poll for new packets. Limit RX queue size to a
+ * single packet, because otherwise most drivers are
+ * in serious danger of running out of memory and
+ * having to drop packets.
+ *
+ * This limitation isn't relevant to devices that
+ * preallocate packet buffers (i.e. devices with
+ * descriptor-based RX datapaths). We might at some
+ * point want to relax the quota for such devices.
+ */
+ netdev_poll ( netdev,
+ ( list_empty ( &netdev->rx_queue ) ? 1 : 0 ) );
+
+ /* Handle at most one received packet per poll. We
+ * avoid processing more than one packet per call to
+ * netdev_poll(), because processing the received
+ * packet can trigger transmission of a new packet
+ * (e.g. an ARP response). Since TX completions will
+ * be processed as part of the poll operation, it is
+ * easy to overflow small TX queues if multiple
+ * packets are processed per poll.
+ */
if ( ( pkb = netdev_rx_dequeue ( netdev ) ) ) {
DBGC ( netdev, "NETDEV %p processing %p\n",
netdev, pkb );