]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
vmbus: remove conditional locking of vmbus_write
authorStephen Hemminger <stephen@networkplumber.org>
Sun, 12 Feb 2017 06:02:22 +0000 (23:02 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 Feb 2017 18:20:35 +0000 (10:20 -0800)
All current usage of vmbus write uses the acquire_lock flag, therefore
having it be optional is unnecessary. This also fixes a sparse warning
since sparse doesn't like when a function has conditional locking.

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/hyperv_vmbus.h
drivers/hv/ring_buffer.c
include/linux/hyperv.h

index 18cc1c78260d42c0c8fb1fda0fed11e38d590977..81a80c82f1bd2b6a55df393a3df55376d709adfd 100644 (file)
@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
        int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock);
+       return hv_ringbuffer_write(channel, bufferlist, num_vecs);
 }
 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
 
@@ -716,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
 
        if (pagecount > MAX_PAGE_BUFFER_COUNT)
                return -EINVAL;
 
-
        /*
         * Adjust the size down since vmbus_channel_packet_page_buffer is the
         * largest size we support
@@ -753,7 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
 
@@ -789,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
 
        packetlen = desc_size + bufferlen;
        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -809,7 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
 
@@ -827,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        u32 packetlen_aligned;
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
-       bool lock = channel->acquire_ring_lock;
        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
                                         multi_pagebuffer->len);
 
@@ -866,7 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+       return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
 
index b2bb5aafaa2fb60fe55a94081a6c90646f1c9560..f33465d78a025680d7515978fdbcc432d51a8062 100644 (file)
@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
        if (!channel)
                return NULL;
 
-       channel->acquire_ring_lock = true;
        spin_lock_init(&channel->inbound_lock);
        spin_lock_init(&channel->lock);
 
index 558a798c407c90ab7353016cdfc2c18771c4c988..6a9b546772189590e08162ab0d56330b1f417684 100644 (file)
@@ -283,8 +283,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-                       struct kvec *kv_list,
-                       u32 kv_count, bool lock);
+                       struct kvec *kv_list, u32 kv_count);
 
 int hv_ringbuffer_read(struct vmbus_channel *channel,
                       void *buffer, u32 buflen, u32 *buffer_actual_len,
index 1b70e034ef92817f224dd45eb0f638b9650e6658..1a1e70a4514668e2b94df730d1e94fe900c701d6 100644 (file)
@@ -284,7 +284,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-                       struct kvec *kv_list, u32 kv_count, bool lock)
+                       struct kvec *kv_list, u32 kv_count)
 {
        int i = 0;
        u32 bytes_avail_towrite;
@@ -304,8 +304,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 
        totalbytes_towrite += sizeof(u64);
 
-       if (lock)
-               spin_lock_irqsave(&outring_info->ring_lock, flags);
+       spin_lock_irqsave(&outring_info->ring_lock, flags);
 
        bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
 
@@ -315,8 +314,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
         * is empty since the read index == write index.
         */
        if (bytes_avail_towrite <= totalbytes_towrite) {
-               if (lock)
-                       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
                return -EAGAIN;
        }
 
@@ -347,8 +345,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
        hv_set_next_write_location(outring_info, next_write_location);
 
 
-       if (lock)
-               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
        hv_signal_on_write(old_write, channel);
 
index e5aac5c051f7d800a3b8cd8d4678fa3c56ac6511..466374dbc98fc95fbb2b64682b5e8824676da26c 100644 (file)
@@ -845,16 +845,6 @@ struct vmbus_channel {
         * link up channels based on their CPU affinity.
         */
        struct list_head percpu_list;
-       /*
-        * On the channel send side, many of the VMBUS
-        * device drivers explicity serialize access to the
-        * outgoing ring buffer. Give more control to the
-        * VMBUS device drivers in terms how to serialize
-        * accesss to the outgoing ring buffer.
-        * The default behavior will be to aquire the
-        * ring lock to preserve the current behavior.
-        */
-       bool acquire_ring_lock;
        /*
         * For performance critical channels (storage, networking
         * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -895,11 +885,6 @@ struct vmbus_channel {
 
 };
 
-static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
-{
-       c->acquire_ring_lock = state;
-}
-
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 {
        return !!(c->offermsg.offer.chn_flags &