xref: /linux/drivers/hv/ring_buffer.c (revision 5f054ef2e0f1ca7d32ac48e275d08e2ac29d84f3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (c) 2009, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
20 #include <linux/io.h>
21 #include <linux/export.h>
22 #include <asm/mshyperv.h>
23 
24 #include "hyperv_vmbus.h"
25 
26 #define VMBUS_PKT_TRAILER	8
27 
28 /*
29  * When we write to the ring buffer, check if the host needs to
30  * be signaled. Here is the details of this protocol:
31  *
32  *	1. The host guarantees that while it is draining the
33  *	   ring buffer, it will set the interrupt_mask to
34  *	   indicate it does not need to be interrupted when
35  *	   new data is placed.
36  *
37  *	2. The host guarantees that it will completely drain
38  *	   the ring buffer before exiting the read loop. Further,
39  *	   once the ring buffer is empty, it will clear the
40  *	   interrupt_mask and re-check to see if new data has
41  *	   arrived.
42  *
43  * KYS: Oct. 30, 2016:
44  * It looks like Windows hosts have logic to deal with DOS attacks that
45  * can be triggered if it receives interrupts when it is not expecting
46  * the interrupt. The host expects interrupts only when the ring
47  * transitions from empty to non-empty (or full to non full on the guest
48  * to host ring).
49  * So, base the signaling decision solely on the ring state until the
50  * host logic is fixed.
51  */
52 
hv_signal_on_write(u32 old_write,struct vmbus_channel * channel)53 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
54 {
55 	struct hv_ring_buffer_info *rbi = &channel->outbound;
56 
57 	virt_mb();
58 	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
59 		return;
60 
61 	/* check interrupt_mask before read_index */
62 	virt_rmb();
63 	/*
64 	 * This is the only case we need to signal when the
65 	 * ring transitions from being empty to non-empty.
66 	 */
67 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
68 		++channel->intr_out_empty;
69 		vmbus_setevent(channel);
70 	}
71 }
72 
73 /* Get the next write location for the specified ring buffer. */
74 static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info * ring_info)75 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
76 {
77 	u32 next = ring_info->ring_buffer->write_index;
78 
79 	return next;
80 }
81 
82 /* Set the next write location for the specified ring buffer. */
83 static inline void
hv_set_next_write_location(struct hv_ring_buffer_info * ring_info,u32 next_write_location)84 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
85 		     u32 next_write_location)
86 {
87 	ring_info->ring_buffer->write_index = next_write_location;
88 }
89 
90 /* Get the size of the ring buffer. */
91 static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info * ring_info)92 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
93 {
94 	return ring_info->ring_datasize;
95 }
96 
97 /* Get the read and write indices as u64 of the specified ring buffer. */
98 static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info * ring_info)99 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
100 {
101 	return (u64)ring_info->ring_buffer->write_index << 32;
102 }
103 
104 /*
105  * Helper routine to copy from source to ring buffer.
106  * Assume there is enough room. Handles wrap-around in dest case only!!
107  */
hv_copyto_ringbuffer(struct hv_ring_buffer_info * ring_info,u32 start_write_offset,const void * src,u32 srclen)108 static u32 hv_copyto_ringbuffer(
109 	struct hv_ring_buffer_info	*ring_info,
110 	u32				start_write_offset,
111 	const void			*src,
112 	u32				srclen)
113 {
114 	void *ring_buffer = hv_get_ring_buffer(ring_info);
115 	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
116 
117 	memcpy(ring_buffer + start_write_offset, src, srclen);
118 
119 	start_write_offset += srclen;
120 	if (start_write_offset >= ring_buffer_size)
121 		start_write_offset -= ring_buffer_size;
122 
123 	return start_write_offset;
124 }
125 
126 /*
127  *
128  * hv_get_ringbuffer_availbytes()
129  *
130  * Get number of bytes available to read and to write to
131  * for the specified ring buffer
132  */
133 static void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info * rbi,u32 * read,u32 * write)134 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
135 			     u32 *read, u32 *write)
136 {
137 	u32 read_loc, write_loc, dsize;
138 
139 	/* Capture the read/write indices before they changed */
140 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
141 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
142 	dsize = rbi->ring_datasize;
143 
144 	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
145 		read_loc - write_loc;
146 	*read = dsize - *write;
147 }
148 
149 /* Get various debug metrics for the specified ring buffer. */
hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info * ring_info,struct hv_ring_buffer_debug_info * debug_info)150 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
151 				struct hv_ring_buffer_debug_info *debug_info)
152 {
153 	u32 bytes_avail_towrite;
154 	u32 bytes_avail_toread;
155 
156 	mutex_lock(&ring_info->ring_buffer_mutex);
157 
158 	if (!ring_info->ring_buffer) {
159 		mutex_unlock(&ring_info->ring_buffer_mutex);
160 		return -EINVAL;
161 	}
162 
163 	hv_get_ringbuffer_availbytes(ring_info,
164 				     &bytes_avail_toread,
165 				     &bytes_avail_towrite);
166 	debug_info->bytes_avail_toread = bytes_avail_toread;
167 	debug_info->bytes_avail_towrite = bytes_avail_towrite;
168 	debug_info->current_read_index = ring_info->ring_buffer->read_index;
169 	debug_info->current_write_index = ring_info->ring_buffer->write_index;
170 	debug_info->current_interrupt_mask
171 		= ring_info->ring_buffer->interrupt_mask;
172 	mutex_unlock(&ring_info->ring_buffer_mutex);
173 
174 	return 0;
175 }
176 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
177 
178 /* Initialize a channel's ring buffer info mutex locks */
hv_ringbuffer_pre_init(struct vmbus_channel * channel)179 void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
180 {
181 	mutex_init(&channel->inbound.ring_buffer_mutex);
182 	mutex_init(&channel->outbound.ring_buffer_mutex);
183 }
184 
185 /* Initialize the ring buffer. */
hv_ringbuffer_init(struct hv_ring_buffer_info * ring_info,struct page * pages,u32 page_cnt,u32 max_pkt_size)186 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
187 		       struct page *pages, u32 page_cnt, u32 max_pkt_size)
188 {
189 	struct page **pages_wraparound;
190 	int i;
191 
192 	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
193 
194 	/*
195 	 * First page holds struct hv_ring_buffer, do wraparound mapping for
196 	 * the rest.
197 	 */
198 	pages_wraparound = kcalloc(page_cnt * 2 - 1,
199 				   sizeof(struct page *),
200 				   GFP_KERNEL);
201 	if (!pages_wraparound)
202 		return -ENOMEM;
203 
204 	pages_wraparound[0] = pages;
205 	for (i = 0; i < 2 * (page_cnt - 1); i++)
206 		pages_wraparound[i + 1] =
207 			&pages[i % (page_cnt - 1) + 1];
208 
209 	ring_info->ring_buffer = (struct hv_ring_buffer *)
210 		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
211 			pgprot_decrypted(PAGE_KERNEL));
212 
213 	kfree(pages_wraparound);
214 	if (!ring_info->ring_buffer)
215 		return -ENOMEM;
216 
217 	/*
218 	 * Ensure the header page is zero'ed since
219 	 * encryption status may have changed.
220 	 */
221 	memset(ring_info->ring_buffer, 0, HV_HYP_PAGE_SIZE);
222 
223 	ring_info->ring_buffer->read_index =
224 		ring_info->ring_buffer->write_index = 0;
225 
226 	/* Set the feature bit for enabling flow control. */
227 	ring_info->ring_buffer->feature_bits.value = 1;
228 
229 	ring_info->ring_size = page_cnt << PAGE_SHIFT;
230 	ring_info->ring_size_div10_reciprocal =
231 		reciprocal_value(ring_info->ring_size / 10);
232 	ring_info->ring_datasize = ring_info->ring_size -
233 		sizeof(struct hv_ring_buffer);
234 	ring_info->priv_read_index = 0;
235 
236 	/* Initialize buffer that holds copies of incoming packets */
237 	if (max_pkt_size) {
238 		ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
239 		if (!ring_info->pkt_buffer)
240 			return -ENOMEM;
241 		ring_info->pkt_buffer_size = max_pkt_size;
242 	}
243 
244 	spin_lock_init(&ring_info->ring_lock);
245 
246 	return 0;
247 }
248 
249 /* Cleanup the ring buffer. */
hv_ringbuffer_cleanup(struct hv_ring_buffer_info * ring_info)250 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
251 {
252 	mutex_lock(&ring_info->ring_buffer_mutex);
253 	vunmap(ring_info->ring_buffer);
254 	ring_info->ring_buffer = NULL;
255 	mutex_unlock(&ring_info->ring_buffer_mutex);
256 
257 	kfree(ring_info->pkt_buffer);
258 	ring_info->pkt_buffer = NULL;
259 	ring_info->pkt_buffer_size = 0;
260 }
261 
262 /*
263  * Check if the ring buffer spinlock is available to take or not; used on
264  * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
265  */
266 
hv_ringbuffer_spinlock_busy(struct vmbus_channel * channel)267 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
268 {
269 	struct hv_ring_buffer_info *rinfo = &channel->outbound;
270 
271 	return spin_is_locked(&rinfo->ring_lock);
272 }
273 EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
274 
275 /* Write to the ring buffer. */
hv_ringbuffer_write(struct vmbus_channel * channel,const struct kvec * kv_list,u32 kv_count,u64 requestid,u64 * trans_id)276 int hv_ringbuffer_write(struct vmbus_channel *channel,
277 			const struct kvec *kv_list, u32 kv_count,
278 			u64 requestid, u64 *trans_id)
279 {
280 	int i;
281 	u32 bytes_avail_towrite;
282 	u32 totalbytes_towrite = sizeof(u64);
283 	u32 next_write_location;
284 	u32 old_write;
285 	u64 prev_indices;
286 	unsigned long flags;
287 	struct hv_ring_buffer_info *outring_info = &channel->outbound;
288 	struct vmpacket_descriptor *desc = kv_list[0].iov_base;
289 	u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
290 
291 	if (channel->rescind)
292 		return -ENODEV;
293 
294 	for (i = 0; i < kv_count; i++)
295 		totalbytes_towrite += kv_list[i].iov_len;
296 
297 	spin_lock_irqsave(&outring_info->ring_lock, flags);
298 
299 	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
300 
301 	/*
302 	 * If there is only room for the packet, assume it is full.
303 	 * Otherwise, the next time around, we think the ring buffer
304 	 * is empty since the read index == write index.
305 	 */
306 	if (bytes_avail_towrite <= totalbytes_towrite) {
307 		++channel->out_full_total;
308 
309 		if (!channel->out_full_flag) {
310 			++channel->out_full_first;
311 			channel->out_full_flag = true;
312 		}
313 
314 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
315 		return -EAGAIN;
316 	}
317 
318 	channel->out_full_flag = false;
319 
320 	/* Write to the ring buffer */
321 	next_write_location = hv_get_next_write_location(outring_info);
322 
323 	old_write = next_write_location;
324 
325 	for (i = 0; i < kv_count; i++) {
326 		next_write_location = hv_copyto_ringbuffer(outring_info,
327 						     next_write_location,
328 						     kv_list[i].iov_base,
329 						     kv_list[i].iov_len);
330 	}
331 
332 	/*
333 	 * Allocate the request ID after the data has been copied into the
334 	 * ring buffer.  Once this request ID is allocated, the completion
335 	 * path could find the data and free it.
336 	 */
337 
338 	if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
339 		if (channel->next_request_id_callback != NULL) {
340 			rqst_id = channel->next_request_id_callback(channel, requestid);
341 			if (rqst_id == VMBUS_RQST_ERROR) {
342 				spin_unlock_irqrestore(&outring_info->ring_lock, flags);
343 				return -EAGAIN;
344 			}
345 		}
346 	}
347 	desc = hv_get_ring_buffer(outring_info) + old_write;
348 	__trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
349 	/*
350 	 * Ensure the compiler doesn't generate code that reads the value of
351 	 * the transaction ID from the ring buffer, which is shared with the
352 	 * Hyper-V host and subject to being changed at any time.
353 	 */
354 	WRITE_ONCE(desc->trans_id, __trans_id);
355 	if (trans_id)
356 		*trans_id = __trans_id;
357 
358 	/* Set previous packet start */
359 	prev_indices = hv_get_ring_bufferindices(outring_info);
360 
361 	next_write_location = hv_copyto_ringbuffer(outring_info,
362 					     next_write_location,
363 					     &prev_indices,
364 					     sizeof(u64));
365 
366 	/* Issue a full memory barrier before updating the write index */
367 	virt_mb();
368 
369 	/* Now, update the write location */
370 	hv_set_next_write_location(outring_info, next_write_location);
371 
372 
373 	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
374 
375 	hv_signal_on_write(old_write, channel);
376 
377 	if (channel->rescind) {
378 		if (rqst_id != VMBUS_NO_RQSTOR) {
379 			/* Reclaim request ID to avoid leak of IDs */
380 			if (channel->request_addr_callback != NULL)
381 				channel->request_addr_callback(channel, rqst_id);
382 		}
383 		return -ENODEV;
384 	}
385 
386 	return 0;
387 }
388 
hv_ringbuffer_read(struct vmbus_channel * channel,void * buffer,u32 buflen,u32 * buffer_actual_len,u64 * requestid,bool raw)389 int hv_ringbuffer_read(struct vmbus_channel *channel,
390 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
391 		       u64 *requestid, bool raw)
392 {
393 	struct vmpacket_descriptor *desc;
394 	u32 packetlen, offset;
395 
396 	if (unlikely(buflen == 0))
397 		return -EINVAL;
398 
399 	*buffer_actual_len = 0;
400 	*requestid = 0;
401 
402 	/* Make sure there is something to read */
403 	desc = hv_pkt_iter_first(channel);
404 	if (desc == NULL) {
405 		/*
406 		 * No error is set when there is even no header, drivers are
407 		 * supposed to analyze buffer_actual_len.
408 		 */
409 		return 0;
410 	}
411 
412 	offset = raw ? 0 : (desc->offset8 << 3);
413 	packetlen = (desc->len8 << 3) - offset;
414 	*buffer_actual_len = packetlen;
415 	*requestid = desc->trans_id;
416 
417 	if (unlikely(packetlen > buflen))
418 		return -ENOBUFS;
419 
420 	/* since ring is double mapped, only one copy is necessary */
421 	memcpy(buffer, (const char *)desc + offset, packetlen);
422 
423 	/* Advance ring index to next packet descriptor */
424 	__hv_pkt_iter_next(channel, desc);
425 
426 	/* Notify host of update */
427 	hv_pkt_iter_close(channel);
428 
429 	return 0;
430 }
431 
432 /*
433  * Determine number of bytes available in ring buffer after
434  * the current iterator (priv_read_index) location.
435  *
436  * This is similar to hv_get_bytes_to_read but with private
437  * read index instead.
438  */
hv_pkt_iter_avail(const struct hv_ring_buffer_info * rbi)439 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
440 {
441 	u32 priv_read_loc = rbi->priv_read_index;
442 	u32 write_loc;
443 
444 	/*
445 	 * The Hyper-V host writes the packet data, then uses
446 	 * store_release() to update the write_index.  Use load_acquire()
447 	 * here to prevent loads of the packet data from being re-ordered
448 	 * before the read of the write_index and potentially getting
449 	 * stale data.
450 	 */
451 	write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
452 
453 	if (write_loc >= priv_read_loc)
454 		return write_loc - priv_read_loc;
455 	else
456 		return (rbi->ring_datasize - priv_read_loc) + write_loc;
457 }
458 
459 /*
460  * Get first vmbus packet from ring buffer after read_index
461  *
462  * If ring buffer is empty, returns NULL and no other action needed.
463  */
hv_pkt_iter_first(struct vmbus_channel * channel)464 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
465 {
466 	struct hv_ring_buffer_info *rbi = &channel->inbound;
467 	struct vmpacket_descriptor *desc, *desc_copy;
468 	u32 bytes_avail, pkt_len, pkt_offset;
469 
470 	hv_debug_delay_test(channel, MESSAGE_DELAY);
471 
472 	bytes_avail = hv_pkt_iter_avail(rbi);
473 	if (bytes_avail < sizeof(struct vmpacket_descriptor))
474 		return NULL;
475 	bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
476 
477 	desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
478 
479 	/*
480 	 * Ensure the compiler does not use references to incoming Hyper-V values (which
481 	 * could change at any moment) when reading local variables later in the code
482 	 */
483 	pkt_len = READ_ONCE(desc->len8) << 3;
484 	pkt_offset = READ_ONCE(desc->offset8) << 3;
485 
486 	/*
487 	 * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
488 	 * rbi->pkt_buffer_size
489 	 */
490 	if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
491 		pkt_len = bytes_avail;
492 
493 	/*
494 	 * If pkt_offset is invalid, arbitrarily set it to
495 	 * the size of vmpacket_descriptor
496 	 */
497 	if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
498 		pkt_offset = sizeof(struct vmpacket_descriptor);
499 
500 	/* Copy the Hyper-V packet out of the ring buffer */
501 	desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
502 	memcpy(desc_copy, desc, pkt_len);
503 
504 	/*
505 	 * Hyper-V could still change len8 and offset8 after the earlier read.
506 	 * Ensure that desc_copy has legal values for len8 and offset8 that
507 	 * are consistent with the copy we just made
508 	 */
509 	desc_copy->len8 = pkt_len >> 3;
510 	desc_copy->offset8 = pkt_offset >> 3;
511 
512 	return desc_copy;
513 }
514 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
515 
516 /*
517  * Get next vmbus packet from ring buffer.
518  *
519  * Advances the current location (priv_read_index) and checks for more
520  * data. If the end of the ring buffer is reached, then return NULL.
521  */
522 struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * desc)523 __hv_pkt_iter_next(struct vmbus_channel *channel,
524 		   const struct vmpacket_descriptor *desc)
525 {
526 	struct hv_ring_buffer_info *rbi = &channel->inbound;
527 	u32 packetlen = desc->len8 << 3;
528 	u32 dsize = rbi->ring_datasize;
529 
530 	hv_debug_delay_test(channel, MESSAGE_DELAY);
531 	/* bump offset to next potential packet */
532 	rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
533 	if (rbi->priv_read_index >= dsize)
534 		rbi->priv_read_index -= dsize;
535 
536 	/* more data? */
537 	return hv_pkt_iter_first(channel);
538 }
539 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
540 
541 /* How many bytes were read in this iterator cycle */
hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info * rbi,u32 start_read_index)542 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
543 					u32 start_read_index)
544 {
545 	if (rbi->priv_read_index >= start_read_index)
546 		return rbi->priv_read_index - start_read_index;
547 	else
548 		return rbi->ring_datasize - start_read_index +
549 			rbi->priv_read_index;
550 }
551 
552 /*
553  * Update host ring buffer after iterating over packets. If the host has
554  * stopped queuing new entries because it found the ring buffer full, and
555  * sufficient space is being freed up, signal the host. But be careful to
556  * only signal the host when necessary, both for performance reasons and
557  * because Hyper-V protects itself by throttling guests that signal
558  * inappropriately.
559  *
560  * Determining when to signal is tricky. There are three key data inputs
561  * that must be handled in this order to avoid race conditions:
562  *
563  * 1. Update the read_index
564  * 2. Read the pending_send_sz
565  * 3. Read the current write_index
566  *
567  * The interrupt_mask is not used to determine when to signal. The
568  * interrupt_mask is used only on the guest->host ring buffer when
569  * sending requests to the host. The host does not use it on the host->
570  * guest ring buffer to indicate whether it should be signaled.
571  */
hv_pkt_iter_close(struct vmbus_channel * channel)572 void hv_pkt_iter_close(struct vmbus_channel *channel)
573 {
574 	struct hv_ring_buffer_info *rbi = &channel->inbound;
575 	u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
576 
577 	/*
578 	 * Make sure all reads are done before we update the read index since
579 	 * the writer may start writing to the read area once the read index
580 	 * is updated.
581 	 */
582 	virt_rmb();
583 	start_read_index = rbi->ring_buffer->read_index;
584 	rbi->ring_buffer->read_index = rbi->priv_read_index;
585 
586 	/*
587 	 * Older versions of Hyper-V (before WS2102 and Win8) do not
588 	 * implement pending_send_sz and simply poll if the host->guest
589 	 * ring buffer is full.  No signaling is needed or expected.
590 	 */
591 	if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
592 		return;
593 
594 	/*
595 	 * Issue a full memory barrier before making the signaling decision.
596 	 * If reading pending_send_sz were to be reordered and happen
597 	 * before we commit the new read_index, a race could occur.  If the
598 	 * host were to set the pending_send_sz after we have sampled
599 	 * pending_send_sz, and the ring buffer blocks before we commit the
600 	 * read index, we could miss sending the interrupt. Issue a full
601 	 * memory barrier to address this.
602 	 */
603 	virt_mb();
604 
605 	/*
606 	 * If the pending_send_sz is zero, then the ring buffer is not
607 	 * blocked and there is no need to signal.  This is far by the
608 	 * most common case, so exit quickly for best performance.
609 	 */
610 	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
611 	if (!pending_sz)
612 		return;
613 
614 	/*
615 	 * Ensure the read of write_index in hv_get_bytes_to_write()
616 	 * happens after the read of pending_send_sz.
617 	 */
618 	virt_rmb();
619 	curr_write_sz = hv_get_bytes_to_write(rbi);
620 	bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
621 
622 	/*
623 	 * We want to signal the host only if we're transitioning
624 	 * from a "not enough free space" state to a "enough free
625 	 * space" state.  For example, it's possible that this function
626 	 * could run and free up enough space to signal the host, and then
627 	 * run again and free up additional space before the host has a
628 	 * chance to clear the pending_send_sz.  The 2nd invocation would
629 	 * be a null transition from "enough free space" to "enough free
630 	 * space", which doesn't warrant a signal.
631 	 *
632 	 * Exactly filling the ring buffer is treated as "not enough
633 	 * space". The ring buffer always must have at least one byte
634 	 * empty so the empty and full conditions are distinguishable.
635 	 * hv_get_bytes_to_write() doesn't fully tell the truth in
636 	 * this regard.
637 	 *
638 	 * So first check if we were in the "enough free space" state
639 	 * before we began the iteration. If so, the host was not
640 	 * blocked, and there's no need to signal.
641 	 */
642 	if (curr_write_sz - bytes_read > pending_sz)
643 		return;
644 
645 	/*
646 	 * Similarly, if the new state is "not enough space", then
647 	 * there's no need to signal.
648 	 */
649 	if (curr_write_sz <= pending_sz)
650 		return;
651 
652 	++channel->intr_in_full;
653 	vmbus_setevent(channel);
654 }
655 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
656