]> git.hungrycats.org Git - linux/commitdiff
udp: use limited socket backlog
authorZhu Yi <yi.zhu@intel.com>
Thu, 4 Mar 2010 18:01:42 +0000 (18:01 +0000)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 1 Apr 2010 23:02:02 +0000 (16:02 -0700)
[ Upstream commit 55349790d7cbf0d381873a7ece1dcafcffd4aaa9 ]

Make udp adapt to the limited socket backlog change.

Cc: "David S. Miller" <davem@davemloft.net>
Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Cc: "Pekka Savola (ipv6)" <pekkas@netcore.fi>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
net/ipv4/udp.c
net/ipv6/udp.c

index f0126fdd7e04ba8fd3da4b90aadd663411b0617d..7bb45686f3c57447703a4c8a709055f2253a86b4 100644 (file)
@@ -1372,8 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
-       else
-               sk_add_backlog(sk, skb);
+       else if (sk_add_backlog_limited(sk, skb)) {
+               bh_unlock_sock(sk);
+               goto drop;
+       }
        bh_unlock_sock(sk);
 
        return rc;
index 69ebdbe78c47cb1e24657d21710b7f4b81af8ea7..bf88ce073d268f8fe999cef594212ca0316cc6f5 100644 (file)
@@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
                        bh_lock_sock(sk);
                        if (!sock_owned_by_user(sk))
                                udpv6_queue_rcv_skb(sk, skb1);
-                       else
-                               sk_add_backlog(sk, skb1);
+                       else if (sk_add_backlog_limited(sk, skb1)) {
+                               kfree_skb(skb1);
+                               bh_unlock_sock(sk);
+                               goto drop;
+                       }
                        bh_unlock_sock(sk);
-               } else {
-                       atomic_inc(&sk->sk_drops);
-                       UDP6_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
-                       UDP6_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_INERRORS, IS_UDPLITE(sk));
+                       continue;
                }
+drop:
+               atomic_inc(&sk->sk_drops);
+               UDP6_INC_STATS_BH(sock_net(sk),
+                               UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+               UDP6_INC_STATS_BH(sock_net(sk),
+                               UDP_MIB_INERRORS, IS_UDPLITE(sk));
        }
 }
 /*
@@ -756,8 +760,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
-       else
-               sk_add_backlog(sk, skb);
+       else if (sk_add_backlog_limited(sk, skb)) {
+               atomic_inc(&sk->sk_drops);
+               bh_unlock_sock(sk);
+               sock_put(sk);
+               goto discard;
+       }
        bh_unlock_sock(sk);
        sock_put(sk);
        return 0;