u64 aligned_data = 0;
        int ret;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
        int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
        ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
-                                 &signal);
+                                 &signal, lock);
 
        /*
         * Signalling the host is conditional on many factors:
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
 
        if (pagecount > MAX_PAGE_BUFFER_COUNT)
                return -EINVAL;
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        /*
         * Signalling the host is conditional on many factors:
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
 
        packetlen = desc_size + bufferlen;
        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        if (ret == 0 && signal)
                vmbus_setevent(channel);
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
                                         multi_pagebuffer->len);
 
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        if (ret == 0 && signal)
                vmbus_setevent(channel);
 
                return NULL;
 
        channel->id = atomic_inc_return(&chan_num);
+       channel->acquire_ring_lock = true;
        spin_lock_init(&channel->inbound_lock);
        spin_lock_init(&channel->lock);
 
 
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
                    struct kvec *kv_list,
-                   u32 kv_count, bool *signal);
+                   u32 kv_count, bool *signal, bool lock);
 
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
                       void *buffer, u32 buflen, u32 *buffer_actual_len,
 
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-                   struct kvec *kv_list, u32 kv_count, bool *signal)
+                   struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
 {
        int i = 0;
        u32 bytes_avail_towrite;
        u32 next_write_location;
        u32 old_write;
        u64 prev_indices = 0;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        for (i = 0; i < kv_count; i++)
                totalbytes_towrite += kv_list[i].iov_len;
 
        totalbytes_towrite += sizeof(u64);
 
-       spin_lock_irqsave(&outring_info->ring_lock, flags);
+       if (lock)
+               spin_lock_irqsave(&outring_info->ring_lock, flags);
 
        hv_get_ringbuffer_availbytes(outring_info,
                                &bytes_avail_toread,
         * is empty since the read index == write index.
         */
        if (bytes_avail_towrite <= totalbytes_towrite) {
-               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+               if (lock)
+                       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
                return -EAGAIN;
        }
 
        hv_set_next_write_location(outring_info, next_write_location);
 
 
-       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+       if (lock)
+               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
        *signal = hv_need_to_signal(old_write, outring_info);
        return 0;
 
         * signaling control.
         */
        enum hv_signal_policy  signal_policy;
+       /*
+        * On the channel send side, many of the VMBUS
+        * device drivers explicity serialize access to the
+        * outgoing ring buffer. Give more control to the
+        * VMBUS device drivers in terms how to serialize
+        * accesss to the outgoing ring buffer.
+        * The default behavior will be to aquire the
+        * ring lock to preserve the current behavior.
+        */
+       bool acquire_ring_lock;
+
 };
 
+static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
+{
+       c->acquire_ring_lock = state;
+}
+
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 {
        return !!(c->offermsg.offer.chn_flags &