summaryrefslogtreecommitdiffstats
path: root/src/net/netdevice.c
diff options
context:
space:
mode:
authorMichael Brown2013-05-01 15:05:42 +0200
committerMichael Brown2013-05-01 15:05:42 +0200
commit2095ed413ec213e2b3874575004eee24f62c2ff4 (patch)
tree65dfe118491e4ea1c0128d95d0fb47c93e7623f6 /src/net/netdevice.c
parent[dhcp] Remove obsolete bootp.h header (diff)
downloadipxe-2095ed413ec213e2b3874575004eee24f62c2ff4.tar.gz
ipxe-2095ed413ec213e2b3874575004eee24f62c2ff4.tar.xz
ipxe-2095ed413ec213e2b3874575004eee24f62c2ff4.zip
[netdevice] Add netdev_tx_defer() to allow drivers to defer transmissions
Devices with small transmit descriptor rings may temporarily run out of space. Provide netdev_tx_defer() to allow drivers to defer packets for retransmission as soon as a descriptor becomes available. Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src/net/netdevice.c')
-rw-r--r--src/net/netdevice.c90
1 files changed, 87 insertions, 3 deletions
diff --git a/src/net/netdevice.c b/src/net/netdevice.c
index 88ca93f9..1191ebc1 100644
--- a/src/net/netdevice.c
+++ b/src/net/netdevice.c
@@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <ipxe/tables.h>
#include <ipxe/process.h>
#include <ipxe/init.h>
+#include <ipxe/malloc.h>
#include <ipxe/device.h>
#include <ipxe/errortab.h>
#include <ipxe/vlan.h>
@@ -213,6 +214,43 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) {
}
/**
+ * Defer transmitted packet
+ *
+ * @v netdev Network device
+ * @v iobuf I/O buffer
+ *
+ * Drivers may call netdev_tx_defer() if there is insufficient space
+ * in the transmit descriptor ring. Any packets deferred in this way
+ * will be automatically retransmitted as soon as space becomes
+ * available (i.e. as soon as the driver calls netdev_tx_complete()).
+ *
+ * The packet must currently be in the network device's TX queue.
+ *
+ * Drivers utilising netdev_tx_defer() must ensure that space in the
+ * transmit descriptor ring is freed up @b before calling
+ * netdev_tx_complete(). For example, if the ring is modelled using a
+ * producer counter and a consumer counter, then the consumer counter
+ * must be incremented before the call to netdev_tx_complete().
+ * Failure to do this will cause the retransmitted packet to be
+ * immediately redeferred (which will result in out-of-order
+ * transmissions and other nastiness).
+ */
+void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
+
+ /* Catch data corruption as early as possible */
+ list_check_contains_entry ( iobuf, &netdev->tx_queue, list );
+
+ /* Remove from transmit queue */
+ list_del ( &iobuf->list );
+
+ /* Add to deferred transmit queue */
+ list_add_tail ( &iobuf->list, &netdev->tx_deferred );
+
+ /* Record "out of space" statistic */
+ netdev_tx_err ( netdev, NULL, -ENOBUFS );
+}
+
+/**
* Discard transmitted packet
*
* @v netdev Network device
@@ -257,6 +295,13 @@ void netdev_tx_complete_err ( struct net_device *netdev,
/* Dequeue and free I/O buffer */
list_del ( &iobuf->list );
netdev_tx_err ( netdev, iobuf, rc );
+
+ /* Transmit first pending packet, if any */
+ if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
+ struct io_buffer, list ) ) != NULL ) {
+ list_del ( &iobuf->list );
+ netdev_tx ( netdev, iobuf );
+ }
}
/**
@@ -270,9 +315,9 @@ void netdev_tx_complete_err ( struct net_device *netdev,
void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
struct io_buffer *iobuf;
- list_for_each_entry ( iobuf, &netdev->tx_queue, list ) {
+ if ( ( iobuf = list_first_entry ( &netdev->tx_queue, struct io_buffer,
+ list ) ) != NULL ) {
netdev_tx_complete_err ( netdev, iobuf, rc );
- return;
}
}
@@ -283,10 +328,15 @@ void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
*/
static void netdev_tx_flush ( struct net_device *netdev ) {
- /* Discard any packets in the TX queue */
+ /* Discard any packets in the TX queue. This will also cause
+ * any packets in the deferred TX queue to be discarded
+ * automatically.
+ */
while ( ! list_empty ( &netdev->tx_queue ) ) {
netdev_tx_complete_next_err ( netdev, -ECANCELED );
}
+ assert ( list_empty ( &netdev->tx_queue ) );
+ assert ( list_empty ( &netdev->tx_deferred ) );
}
/**
@@ -424,6 +474,7 @@ struct net_device * alloc_netdev ( size_t priv_size ) {
ref_init ( &netdev->refcnt, free_netdev );
netdev->link_rc = -EUNKNOWN_LINK_STATUS;
INIT_LIST_HEAD ( &netdev->tx_queue );
+ INIT_LIST_HEAD ( &netdev->tx_deferred );
INIT_LIST_HEAD ( &netdev->rx_queue );
netdev_settings_init ( netdev );
netdev->priv = ( ( ( void * ) netdev ) + sizeof ( *netdev ) );
@@ -817,3 +868,36 @@ __weak struct net_device * vlan_find ( struct net_device *trunk __unused,
/** Networking stack process */
PERMANENT_PROCESS ( net_process, net_step );
+
+/**
+ * Discard some cached network device data
+ *
+ * @ret discarded Number of cached items discarded
+ */
+static unsigned int net_discard ( void ) {
+ struct net_device *netdev;
+ struct io_buffer *iobuf;
+ unsigned int discarded = 0;
+
+ /* Try to drop one deferred TX packet from each network device */
+ for_each_netdev ( netdev ) {
+ if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
+ struct io_buffer,
+ list ) ) != NULL ) {
+
+ /* Discard first deferred packet */
+ list_del ( &iobuf->list );
+ free ( iobuf );
+
+ /* Report discard */
+ discarded++;
+ }
+ }
+
+ return discarded;
+}
+
+/** Network device cache discarder */
+struct cache_discarder net_discarder __cache_discarder ( CACHE_NORMAL ) = {
+ .discard = net_discard,
+};