diff options
author | Wedson Almeida Filho | 2013-06-24 08:33:48 +0200 |
---|---|---|
committer | David S. Miller | 2013-06-24 10:46:01 +0200 |
commit | aeb193ea6cef28e33589de05ef932424f8e19bde (patch) | |
tree | 222421861caac0d53654984cd227c6946c1a4241 /net | |
parent | Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/lin... (diff) | |
download | kernel-qcow2-linux-aeb193ea6cef28e33589de05ef932424f8e19bde.tar.gz kernel-qcow2-linux-aeb193ea6cef28e33589de05ef932424f8e19bde.tar.xz kernel-qcow2-linux-aeb193ea6cef28e33589de05ef932424f8e19bde.zip |
net: Unmap fragment page once iterator is done
Callers of skb_seq_read() are currently forced to call skb_abort_seq_read()
even when consuming all the data because the last call to skb_seq_read (the
one that returns 0 to indicate the end) fails to unmap the last fragment page.
With this patch callers will be allowed to traverse the SKB data by calling
skb_prepare_seq_read() once and repeatedly calling skb_seq_read() as originally
intended (and documented in the original commit 677e90eda), that is, only call
skb_abort_seq_read() if the sequential read is actually aborted.
Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/batman-adv/main.c | 1 | ||||
-rw-r--r-- | net/core/skbuff.c | 7 |
2 files changed, 6 insertions, 2 deletions
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 51aafd669cbb..08125f3f6064 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -473,7 +473,6 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) crc = crc32c(crc, data, len); consumed += len; } - skb_abort_seq_read(&st); return htonl(crc); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index edf37578e21e..9f73eca29fbe 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2541,8 +2541,13 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, unsigned int block_limit, abs_offset = consumed + st->lower_offset; skb_frag_t *frag; - if (unlikely(abs_offset >= st->upper_offset)) + if (unlikely(abs_offset >= st->upper_offset)) { + if (st->frag_data) { + kunmap_atomic(st->frag_data); + st->frag_data = NULL; + } return 0; + } next_skb: block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |