summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra2008-10-07 23:22:33 +0200
committerDavid S. Miller2008-10-07 23:22:33 +0200
commit654bed16cf86a9ef94495d9e6131b7ff7840a3dd (patch)
tree7d64fdb8de3e893b1acf5f420890a503a4e0327f
parentnet: wrap sk->sk_backlog_rcv() (diff)
downloadkernel-qcow2-linux-654bed16cf86a9ef94495d9e6131b7ff7840a3dd.tar.gz
kernel-qcow2-linux-654bed16cf86a9ef94495d9e6131b7ff7840a3dd.tar.xz
kernel-qcow2-linux-654bed16cf86a9ef94495d9e6131b7ff7840a3dd.zip
net: packet split receive api
Add some packet-split receive hooks. For one this allows to do NUMA node affine page allocs. Later on these hooks will be extended to do emergency reserve allocations for fragments. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h23
-rw-r--r--net/core/skbuff.c20
2 files changed, 43 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 720b688c22b6..2725f4e5a9bf 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -968,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
+extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size);
+
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
@@ -1382,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
+extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
+
+/**
+ * netdev_alloc_page - allocate a page for ps-rx on a specific device
+ * @dev: network device to receive on
+ *
+ * Allocate a new page node local to the specified device.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+static inline struct page *netdev_alloc_page(struct net_device *dev)
+{
+ return __netdev_alloc_page(dev, GFP_ATOMIC);
+}
+
+static inline void netdev_free_page(struct net_device *dev, struct page *page)
+{
+ __free_page(page);
+}
+
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8bd248a64879..7f7bb1a636d9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -263,6 +263,26 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
return skb;
}
+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ struct page *page;
+
+ page = alloc_pages_node(node, gfp_mask, 0);
+ return page;
+}
+EXPORT_SYMBOL(__netdev_alloc_page);
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size)
+{
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+}
+EXPORT_SYMBOL(skb_add_rx_frag);
+
/**
* dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate