]> git.hungrycats.org Git - linux/commitdiff
hyperv: Fix a bug in netvsc_send()
authorKY Srinivasan <kys@microsoft.com>
Sun, 5 Oct 2014 17:42:51 +0000 (10:42 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Oct 2014 10:05:30 +0000 (12:05 +0200)
[ Upstream commit 3a67c9ccad926a168d8b7891537a452018368a5b ]

After the packet is successfully sent, we should not touch the packet
as it may have been freed. This patch is based on the work done by
Long Li <longli@microsoft.com>.

David, please queue this up for stable.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reported-by: Sitsofe Wheeler <sitsofe@yahoo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/hyperv/netvsc.c

index d97d5f39a04e4a307bb1e5b53eaab18a85b6b828..7edf976ecfa04363a2a2848aedef80767bab8961 100644 (file)
@@ -708,6 +708,7 @@ int netvsc_send(struct hv_device *device,
        unsigned int section_index = NETVSC_INVALID_INDEX;
        u32 msg_size = 0;
        struct sk_buff *skb;
+       u16 q_idx = packet->q_idx;
 
 
        net_device = get_outbound_net_device(device);
@@ -772,24 +773,24 @@ int netvsc_send(struct hv_device *device,
 
        if (ret == 0) {
                atomic_inc(&net_device->num_outstanding_sends);
-               atomic_inc(&net_device->queue_sends[packet->q_idx]);
+               atomic_inc(&net_device->queue_sends[q_idx]);
 
                if (hv_ringbuf_avail_percent(&out_channel->outbound) <
                        RING_AVAIL_PERCENT_LOWATER) {
                        netif_tx_stop_queue(netdev_get_tx_queue(
-                                           ndev, packet->q_idx));
+                                           ndev, q_idx));
 
                        if (atomic_read(&net_device->
-                               queue_sends[packet->q_idx]) < 1)
+                               queue_sends[q_idx]) < 1)
                                netif_tx_wake_queue(netdev_get_tx_queue(
-                                                   ndev, packet->q_idx));
+                                                   ndev, q_idx));
                }
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(netdev_get_tx_queue(
-                                   ndev, packet->q_idx));
-               if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
+                                   ndev, q_idx));
+               if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
                        netif_tx_wake_queue(netdev_get_tx_queue(
-                                           ndev, packet->q_idx));
+                                           ndev, q_idx));
                        ret = -ENOSPC;
                }
        } else {