From c24fb5e68ebfd485ecaa8b5d40cb4721a1d137ee Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Wed, 20 Jun 2018 16:04:34 +0200 Subject: bpfilter: fix user mode helper cross compilation Use $(OBJDUMP) instead of literal 'objdump' to avoid using host toolchain when cross compiling. Fixes: 421780fd4983 ("bpfilter: fix build error") Signed-off-by: Matteo Croce Reported-by: Stefano Brivio Signed-off-by: David S. Miller --- net/bpfilter/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index dd86b022eff0..051dc18b8ccb 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile @@ -22,7 +22,7 @@ endif quiet_cmd_copy_umh = GEN $@ cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \ $(OBJCOPY) -I binary \ - `LC_ALL=C objdump -f net/bpfilter/bpfilter_umh \ + `LC_ALL=C $(OBJDUMP) -f net/bpfilter/bpfilter_umh \ |awk -F' |,' '/file format/{print "-O",$$NF} \ /^architecture:/{print "-B",$$2}'` \ --rename-section .data=.init.rodata $< $@ -- cgit v1.2.3-55-g7522 From 456488cd952c23d451b0bdc5b5226b0707db80f7 Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Thu, 21 Jun 2018 03:29:49 +0530 Subject: strparser: Don't schedule in workqueue in paused state In function strp_data_ready(), it is useless to call queue_work if the state of strparser is already paused. The state checking should be done before calling queue_work. The change reduces the context switches and improves the ktls-rx throughput by approx 20% (measured on cortex-a53 based platform). Signed-off-by: Vakul Garg Acked-by: Dave Watson Signed-off-by: David S. Miller --- net/strparser/strparser.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'net') diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 1a9695183599..373836615c57 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -392,7 +392,7 @@ static int strp_read_sock(struct strparser *strp) /* Lower sock lock held */ void strp_data_ready(struct strparser *strp) { - if (unlikely(strp->stopped)) + if (unlikely(strp->stopped) || strp->paused) return; /* This check is needed to synchronize with do_strp_work. @@ -407,9 +407,6 @@ void strp_data_ready(struct strparser *strp) return; } - if (strp->paused) - return; - if (strp->need_bytes) { if (strp_peek_len(strp) < strp->need_bytes) return; -- cgit v1.2.3-55-g7522 From fedb1bd3d274b33c432cb83c80c6b3cf54d509c8 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Wed, 20 Jun 2018 12:47:52 -0300 Subject: sctp: fix erroneous inc of snmp SctpFragUsrMsgs Currently it is incrementing SctpFragUsrMsgs when the user message size is of the exactly same size as the maximum fragment size, which is wrong. The fix is to increment it only when user message is bigger than the maximum fragment size. Fixes: bfd2e4b8734d ("sctp: refactor sctp_datamsg_from_user") Signed-off-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/sctp/chunk.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 79daa98208c3..bfb9f812e2ef 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, /* Account for a different sized first fragment */ if (msg_len >= first_len) { msg->can_delay = 0; - SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + if (msg_len > first_len) + SCTP_INC_STATS(sock_net(asoc->base.sk), + SCTP_MIB_FRAGUSRMSGS); } else { /* Which may be the only one... */ first_len = msg_len; -- cgit v1.2.3-55-g7522 From 945d015ee0c3095d2290e845565a23dedfd8027c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 21 Jun 2018 14:16:02 -0700 Subject: net/packet: fix use-after-free We should put copy_skb in receive_queue only after a successful call to virtio_net_hdr_from_skb(). syzbot report : BUG: KASAN: use-after-free in __skb_unlink include/linux/skbuff.h:1843 [inline] BUG: KASAN: use-after-free in __skb_dequeue include/linux/skbuff.h:1863 [inline] BUG: KASAN: use-after-free in skb_dequeue+0x16a/0x180 net/core/skbuff.c:2815 Read of size 8 at addr ffff8801b044ecc0 by task syz-executor217/4553 CPU: 0 PID: 4553 Comm: syz-executor217 Not tainted 4.18.0-rc1+ #111 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113 print_address_description+0x6c/0x20b mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:354 [inline] kasan_report.cold.7+0x242/0x2fe mm/kasan/report.c:412 __asan_report_load8_noabort+0x14/0x20 mm/kasan/report.c:433 __skb_unlink include/linux/skbuff.h:1843 [inline] __skb_dequeue include/linux/skbuff.h:1863 [inline] skb_dequeue+0x16a/0x180 net/core/skbuff.c:2815 skb_queue_purge+0x26/0x40 net/core/skbuff.c:2852 packet_set_ring+0x675/0x1da0 net/packet/af_packet.c:4331 packet_release+0x630/0xd90 net/packet/af_packet.c:2991 __sock_release+0xd7/0x260 net/socket.c:603 sock_close+0x19/0x20 net/socket.c:1186 __fput+0x35b/0x8b0 fs/file_table.c:209 ____fput+0x15/0x20 fs/file_table.c:243 task_work_run+0x1ec/0x2a0 kernel/task_work.c:113 exit_task_work include/linux/task_work.h:22 [inline] do_exit+0x1b08/0x2750 kernel/exit.c:865 do_group_exit+0x177/0x440 kernel/exit.c:968 __do_sys_exit_group kernel/exit.c:979 [inline] __se_sys_exit_group kernel/exit.c:977 [inline] __x64_sys_exit_group+0x3e/0x50 kernel/exit.c:977 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x4448e9 Code: Bad RIP value. RSP: 002b:00007ffd5f777ca8 EFLAGS: 00000202 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00000000004448e9 RDX: 00000000004448e9 RSI: 000000000000fcfb RDI: 0000000000000001 RBP: 00000000006cf018 R08: 00007ffd0000a45b R09: 0000000000000000 R10: 00007ffd5f777e48 R11: 0000000000000202 R12: 00000000004021f0 R13: 0000000000402280 R14: 0000000000000000 R15: 0000000000000000 Allocated by task 4553: save_stack+0x43/0xd0 mm/kasan/kasan.c:448 set_track mm/kasan/kasan.c:460 [inline] kasan_kmalloc+0xc4/0xe0 mm/kasan/kasan.c:553 kasan_slab_alloc+0x12/0x20 mm/kasan/kasan.c:490 kmem_cache_alloc+0x12e/0x760 mm/slab.c:3554 skb_clone+0x1f5/0x500 net/core/skbuff.c:1282 tpacket_rcv+0x28f7/0x3200 net/packet/af_packet.c:2221 deliver_skb net/core/dev.c:1925 [inline] deliver_ptype_list_skb net/core/dev.c:1940 [inline] __netif_receive_skb_core+0x1bfb/0x3680 net/core/dev.c:4611 __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693 netif_receive_skb_internal+0x12e/0x7d0 net/core/dev.c:4767 netif_receive_skb+0xbf/0x420 net/core/dev.c:4791 tun_rx_batched.isra.55+0x4ba/0x8c0 drivers/net/tun.c:1571 tun_get_user+0x2af1/0x42f0 drivers/net/tun.c:1981 tun_chr_write_iter+0xb9/0x154 drivers/net/tun.c:2009 call_write_iter include/linux/fs.h:1795 [inline] new_sync_write fs/read_write.c:474 [inline] __vfs_write+0x6c6/0x9f0 fs/read_write.c:487 vfs_write+0x1f8/0x560 fs/read_write.c:549 ksys_write+0x101/0x260 fs/read_write.c:598 __do_sys_write fs/read_write.c:610 [inline] __se_sys_write fs/read_write.c:607 [inline] __x64_sys_write+0x73/0xb0 fs/read_write.c:607 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe Freed by task 4553: save_stack+0x43/0xd0 mm/kasan/kasan.c:448 set_track mm/kasan/kasan.c:460 [inline] __kasan_slab_free+0x11a/0x170 mm/kasan/kasan.c:521 kasan_slab_free+0xe/0x10 mm/kasan/kasan.c:528 __cache_free mm/slab.c:3498 [inline] kmem_cache_free+0x86/0x2d0 mm/slab.c:3756 kfree_skbmem+0x154/0x230 net/core/skbuff.c:582 __kfree_skb net/core/skbuff.c:642 [inline] kfree_skb+0x1a5/0x580 net/core/skbuff.c:659 tpacket_rcv+0x189e/0x3200 net/packet/af_packet.c:2385 deliver_skb net/core/dev.c:1925 [inline] deliver_ptype_list_skb net/core/dev.c:1940 [inline] __netif_receive_skb_core+0x1bfb/0x3680 net/core/dev.c:4611 __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693 netif_receive_skb_internal+0x12e/0x7d0 net/core/dev.c:4767 netif_receive_skb+0xbf/0x420 net/core/dev.c:4791 tun_rx_batched.isra.55+0x4ba/0x8c0 drivers/net/tun.c:1571 tun_get_user+0x2af1/0x42f0 drivers/net/tun.c:1981 tun_chr_write_iter+0xb9/0x154 drivers/net/tun.c:2009 call_write_iter include/linux/fs.h:1795 [inline] new_sync_write fs/read_write.c:474 [inline] __vfs_write+0x6c6/0x9f0 fs/read_write.c:487 vfs_write+0x1f8/0x560 fs/read_write.c:549 ksys_write+0x101/0x260 fs/read_write.c:598 __do_sys_write fs/read_write.c:610 [inline] __se_sys_write fs/read_write.c:607 [inline] __x64_sys_write+0x73/0xb0 fs/read_write.c:607 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe The buggy address belongs to the object at ffff8801b044ecc0 which belongs to the cache skbuff_head_cache of size 232 The buggy address is located 0 bytes inside of 232-byte region [ffff8801b044ecc0, ffff8801b044eda8) The buggy address belongs to the page: page:ffffea0006c11380 count:1 mapcount:0 mapping:ffff8801d9be96c0 index:0x0 flags: 0x2fffc0000000100(slab) raw: 02fffc0000000100 ffffea0006c17988 ffff8801d9bec248 ffff8801d9be96c0 raw: 0000000000000000 ffff8801b044e040 000000010000000c 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8801b044eb80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff8801b044ec00: 00 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc >ffff8801b044ec80: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb ^ ffff8801b044ed00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8801b044ed80: fb fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc Fixes: 58d19b19cd99 ("packet: vnet_hdr support for tpacket_rcv") Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Willem de Bruijn Signed-off-by: David S. Miller --- net/packet/af_packet.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 50809748c127..ff8e7e245c37 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } + + if (do_vnet && + virtio_net_hdr_from_skb(skb, h.raw + macoff - + sizeof(struct virtio_net_hdr), + vio_le(), true, 0)) + goto drop_n_account; + po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; @@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } spin_unlock(&sk->sk_receive_queue.lock); - if (do_vnet) { - if (virtio_net_hdr_from_skb(skb, h.raw + macoff - - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) { - spin_lock(&sk->sk_receive_queue.lock); - goto drop_n_account; - } - } - skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) -- cgit v1.2.3-55-g7522 From 44a5cd436ec067c9a62988aca3331ac6eee5268d Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 21 Jun 2018 20:02:16 +0200 Subject: cls_flower: fix use after free in flower S/W path If flower filter is created without the skip_sw flag, fl_mask_put() can race with fl_classify() and we can destroy the mask rhashtable while a lookup operation is accessing it. BUG: unable to handle kernel paging request at 00000000000911d1 PGD 0 P4D 0 SMP PTI CPU: 3 PID: 5582 Comm: vhost-5541 Not tainted 4.18.0-rc1.vanilla+ #1950 Hardware name: Dell Inc. PowerEdge R730/072T6D, BIOS 2.1.7 06/16/2016 RIP: 0010:rht_bucket_nested+0x20/0x60 Code: 31 c8 c1 c1 18 29 c8 c3 66 90 8b 4f 04 ba 01 00 00 00 8b 07 48 8b bf 80 00 00 0 RSP: 0018:ffffafc5cfbb7a48 EFLAGS: 00010206 RAX: 0000000000001978 RBX: ffff9f12dff88a00 RCX: 00000000ffff9f12 RDX: 00000000000911d1 RSI: 0000000000000148 RDI: 0000000000000001 RBP: ffff9f12dff88a00 R08: 000000005f1cc119 R09: 00000000a715fae2 R10: ffffafc5cfbb7aa8 R11: ffff9f1cb4be804e R12: ffff9f1265e13000 R13: 0000000000000000 R14: ffffafc5cfbb7b48 R15: ffff9f12dff88b68 FS: 0000000000000000(0000) GS:ffff9f1d3f0c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000911d1 CR3: 0000001575a94006 CR4: 00000000001626e0 Call Trace: fl_lookup+0x134/0x140 [cls_flower] fl_classify+0xf3/0x180 [cls_flower] tcf_classify+0x78/0x150 __netif_receive_skb_core+0x69e/0xa50 netif_receive_skb_internal+0x42/0xf0 tun_get_user+0xdd5/0xfd0 [tun] tun_sendmsg+0x52/0x70 [tun] handle_tx+0x2b3/0x5f0 [vhost_net] vhost_worker+0xab/0x100 [vhost] kthread+0xf8/0x130 ret_from_fork+0x35/0x40 Modules linked in: act_mirred act_gact cls_flower vhost_net vhost tap sch_ingress CR2: 00000000000911d1 Fix the above waiting for a RCU grace period before destroying the rhashtable: we need to use tcf_queue_work(), as rhashtable_destroy() must run in process context, as pointed out by Cong Wang. v1 -> v2: use tcf_queue_work to run rhashtable_destroy(). Fixes: 05cd271fd61a ("cls_flower: Support multiple masks per priority") Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 2b5be42a9f1c..9e8b26a80fb3 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -66,7 +66,7 @@ struct fl_flow_mask { struct rhashtable_params filter_ht_params; struct flow_dissector dissector; struct list_head filters; - struct rcu_head rcu; + struct rcu_work rwork; struct list_head list; }; @@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp) return rhashtable_init(&head->ht, &mask_ht_params); } +static void fl_mask_free(struct fl_flow_mask *mask) +{ + rhashtable_destroy(&mask->ht); + kfree(mask); +} + +static void fl_mask_free_work(struct work_struct *work) +{ + struct fl_flow_mask *mask = container_of(to_rcu_work(work), + struct fl_flow_mask, rwork); + + fl_mask_free(mask); +} + static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask, bool async) { @@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask, return false; rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); - rhashtable_destroy(&mask->ht); list_del_rcu(&mask->list); if (async) - kfree_rcu(mask, rcu); + tcf_queue_work(&mask->rwork, fl_mask_free_work); else - kfree(mask); + fl_mask_free(mask); return true; } -- cgit v1.2.3-55-g7522 From e5ab564c9ebee77794842ca7d7476147b83d6a27 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Wed, 20 Jun 2018 15:51:51 +0200 Subject: VSOCK: fix loopback on big-endian systems The dst_cid and src_cid are 64 bits, therefore 64 bit accessors should be used, and in fact in virtio_transport_common.c only 64 bit accessors are used. Using 32 bit accessors for 64 bit values breaks big endian systems. This patch fixes a wrong use of le32_to_cpu in virtio_transport_send_pkt. Fixes: b9116823189e85ccf384 ("VSOCK: add loopback to virtio_transport") Signed-off-by: Claudio Imbrenda Reviewed-by: Stefan Hajnoczi Signed-off-by: David S. Miller --- net/vmw_vsock/virtio_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 8e03bd3f3668..5d3cce9e8744 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) return -ENODEV; } - if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) return virtio_transport_send_pkt_loopback(vsock, pkt); if (pkt->reply) -- cgit v1.2.3-55-g7522 From 6c6da92808442908287fae8ebb0ca041a52469f4 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 21 Jun 2018 19:49:36 +0800 Subject: ipv6: mcast: fix unsolicited report interval after receiving querys After recieving MLD querys, we update idev->mc_maxdelay with max_delay from query header. This make the later unsolicited reports have the same interval with mc_maxdelay, which means we may send unsolicited reports with long interval time instead of default configured interval time. Also as we will not call ipv6_mc_reset() after device up. This issue will be there even after leave the group and join other groups. Fixes: fc4eba58b4c14 ("ipv6: make unsolicited report intervals configurable for mld") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- net/ipv6/mcast.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 975021df7c1c..c0c74088f2af 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2082,7 +2082,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev) mld_send_initial_cr(idev); idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } } @@ -2094,7 +2095,8 @@ static void mld_dad_timer_expire(struct timer_list *t) if (idev->mc_dad_count) { idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } @@ -2452,7 +2454,8 @@ static void mld_ifc_timer_expire(struct timer_list *t) if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) - mld_ifc_start_timer(idev, idev->mc_maxdelay); + mld_ifc_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } -- cgit v1.2.3-55-g7522 From 74174fe5634ffbf645a7ca5a261571f700b2f332 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Jun 2018 06:44:14 -0700 Subject: net: dccp: avoid crash in ccid3_hc_rx_send_feedback() On fast hosts or malicious bots, we trigger a DCCP_BUG() which seems excessive. syzbot reported : BUG: delta (-6195) <= 0 at net/dccp/ccids/ccid3.c:628/ccid3_hc_rx_send_feedback() CPU: 1 PID: 18 Comm: ksoftirqd/1 Not tainted 4.18.0-rc1+ #112 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113 ccid3_hc_rx_send_feedback net/dccp/ccids/ccid3.c:628 [inline] ccid3_hc_rx_packet_recv.cold.16+0x38/0x71 net/dccp/ccids/ccid3.c:793 ccid_hc_rx_packet_recv net/dccp/ccid.h:185 [inline] dccp_deliver_input_to_ccids+0xf0/0x280 net/dccp/input.c:180 dccp_rcv_established+0x87/0xb0 net/dccp/input.c:378 dccp_v4_do_rcv+0x153/0x180 net/dccp/ipv4.c:654 sk_backlog_rcv include/net/sock.h:914 [inline] __sk_receive_skb+0x3ba/0xd80 net/core/sock.c:517 dccp_v4_rcv+0x10f9/0x1f58 net/dccp/ipv4.c:875 ip_local_deliver_finish+0x2eb/0xda0 net/ipv4/ip_input.c:215 NF_HOOK include/linux/netfilter.h:287 [inline] ip_local_deliver+0x1e9/0x750 net/ipv4/ip_input.c:256 dst_input include/net/dst.h:450 [inline] ip_rcv_finish+0x823/0x2220 net/ipv4/ip_input.c:396 NF_HOOK include/linux/netfilter.h:287 [inline] ip_rcv+0xa18/0x1284 net/ipv4/ip_input.c:492 __netif_receive_skb_core+0x2488/0x3680 net/core/dev.c:4628 __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693 process_backlog+0x219/0x760 net/core/dev.c:5373 napi_poll net/core/dev.c:5771 [inline] net_rx_action+0x7da/0x1980 net/core/dev.c:5837 __do_softirq+0x2e8/0xb17 kernel/softirq.c:284 run_ksoftirqd+0x86/0x100 kernel/softirq.c:645 smpboot_thread_fn+0x417/0x870 kernel/smpboot.c:164 kthread+0x345/0x410 kernel/kthread.c:240 ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:412 Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Gerrit Renker Cc: dccp@vger.kernel.org Signed-off-by: David S. Miller --- net/dccp/ccids/ccid3.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 8b5ba6dffac7..d57a2be1e2e0 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -625,9 +625,8 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) - DCCP_BUG("delta (%ld) <= 0", (long)delta); - else - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); + delta = 1; + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); break; default: return; -- cgit v1.2.3-55-g7522 From 0ce4e70ff00662ad7490e545ba0cd8c1fa179fca Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Jun 2018 06:44:15 -0700 Subject: net: dccp: switch rx_tstamp_last_feedback to monotonic clock To compute delays, better not use time of the day which can be changed by admins or malicious programs. Also change ccid3_first_li() to use s64 type for delta variable to avoid potential overflows. Signed-off-by: Eric Dumazet Cc: Gerrit Renker Cc: dccp@vger.kernel.org Signed-off-by: David S. Miller --- net/dccp/ccids/ccid3.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index d57a2be1e2e0..12877a1514e7 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - ktime_t now = ktime_get_real(); + ktime_t now = ktime_get(); s64 delta = 0; switch (fbtype) { @@ -632,7 +632,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, return; } - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta, hc->rx_x_recv, hc->rx_pinv); hc->rx_tstamp_last_feedback = now; @@ -679,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) static u32 ccid3_first_li(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); - u32 x_recv, p, delta; + u32 x_recv, p; + s64 delta; u64 fval; if (hc->rx_rtt == 0) { @@ -687,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk) hc->rx_rtt = DCCP_FALLBACK_RTT; } - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback); + if (delta <= 0) + delta = 1; x_recv = scaled_div32(hc->rx_bytes_recv, delta); if (x_recv == 0) { /* would also trigger divide-by-zero */ DCCP_WARN("X_recv==0\n"); -- cgit v1.2.3-55-g7522 From 35b42da69e35536da603a50e40aa6c41b2f7b0f8 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 22 Jun 2018 14:33:16 -0700 Subject: net_sched: remove a bogus warning in hfsc In update_vf(): cftree_remove(cl); update_cfmin(cl->cl_parent); the cl_cfmin of cl->cl_parent is intentionally updated to 0 when that parent only has one child. And if this parent is root qdisc, we could end up, in hfsc_schedule_watchdog(), that we can't decide the next schedule time for qdisc watchdog. But it seems safe that we can just skip it, as this watchdog is not always scheduled anyway. Thanks to Marco for testing all the cases, nothing is broken. Reported-by: Marco Berizzi Tested-by: Marco Berizzi Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/sch_hfsc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3ae9877ea205..3278a76f6861 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch) if (next_time == 0 || next_time > q->root.cl_cfmin) next_time = q->root.cl_cfmin; } - WARN_ON(next_time == 0); - qdisc_watchdog_schedule(&q->watchdog, next_time); + if (next_time) + qdisc_watchdog_schedule(&q->watchdog, next_time); } static int -- cgit v1.2.3-55-g7522