1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 33 #include "hyperv_vmbus.h" 34 35 #define VMBUS_PKT_TRAILER 8 36 37 /* 38 * When we write to the ring buffer, check if the host needs to 39 * be signaled. Here is the details of this protocol: 40 * 41 * 1. The host guarantees that while it is draining the 42 * ring buffer, it will set the interrupt_mask to 43 * indicate it does not need to be interrupted when 44 * new data is placed. 45 * 46 * 2. The host guarantees that it will completely drain 47 * the ring buffer before exiting the read loop. Further, 48 * once the ring buffer is empty, it will clear the 49 * interrupt_mask and re-check to see if new data has 50 * arrived. 51 * 52 * KYS: Oct. 30, 2016: 53 * It looks like Windows hosts have logic to deal with DOS attacks that 54 * can be triggered if it receives interrupts when it is not expecting 55 * the interrupt. The host expects interrupts only when the ring 56 * transitions from empty to non-empty (or full to non full on the guest 57 * to host ring). 58 * So, base the signaling decision solely on the ring state until the 59 * host logic is fixed. 60 */ 61 62 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) 63 { 64 struct hv_ring_buffer_info *rbi = &channel->outbound; 65 66 virt_mb(); 67 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 68 return; 69 70 /* check interrupt_mask before read_index */ 71 virt_rmb(); 72 /* 73 * This is the only case we need to signal when the 74 * ring transitions from being empty to non-empty. 75 */ 76 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 77 vmbus_setevent(channel); 78 } 79 80 /* Get the next write location for the specified ring buffer. */ 81 static inline u32 82 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 83 { 84 u32 next = ring_info->ring_buffer->write_index; 85 86 return next; 87 } 88 89 /* Set the next write location for the specified ring buffer. */ 90 static inline void 91 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 92 u32 next_write_location) 93 { 94 ring_info->ring_buffer->write_index = next_write_location; 95 } 96 97 /* Get the next read location for the specified ring buffer. */ 98 static inline u32 99 hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info) 100 { 101 return ring_info->ring_buffer->read_index; 102 } 103 104 /* 105 * Get the next read location + offset for the specified ring buffer. 106 * This allows the caller to skip. 107 */ 108 static inline u32 109 hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info, 110 u32 offset) 111 { 112 u32 next = ring_info->ring_buffer->read_index; 113 114 next += offset; 115 if (next >= ring_info->ring_datasize) 116 next -= ring_info->ring_datasize; 117 118 return next; 119 } 120 121 /* Set the next read location for the specified ring buffer. */ 122 static inline void 123 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 124 u32 next_read_location) 125 { 126 ring_info->ring_buffer->read_index = next_read_location; 127 ring_info->priv_read_index = next_read_location; 128 } 129 130 /* Get the size of the ring buffer. */ 131 static inline u32 132 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) 133 { 134 return ring_info->ring_datasize; 135 } 136 137 /* Get the read and write indices as u64 of the specified ring buffer. */ 138 static inline u64 139 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 140 { 141 return (u64)ring_info->ring_buffer->write_index << 32; 142 } 143 144 /* 145 * Helper routine to copy to source from ring buffer. 146 * Assume there is enough room. Handles wrap-around in src case only!! 147 */ 148 static u32 hv_copyfrom_ringbuffer( 149 const struct hv_ring_buffer_info *ring_info, 150 void *dest, 151 u32 destlen, 152 u32 start_read_offset) 153 { 154 void *ring_buffer = hv_get_ring_buffer(ring_info); 155 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 156 157 memcpy(dest, ring_buffer + start_read_offset, destlen); 158 159 start_read_offset += destlen; 160 if (start_read_offset >= ring_buffer_size) 161 start_read_offset -= ring_buffer_size; 162 163 return start_read_offset; 164 } 165 166 167 /* 168 * Helper routine to copy from source to ring buffer. 169 * Assume there is enough room. Handles wrap-around in dest case only!! 170 */ 171 static u32 hv_copyto_ringbuffer( 172 struct hv_ring_buffer_info *ring_info, 173 u32 start_write_offset, 174 const void *src, 175 u32 srclen) 176 { 177 void *ring_buffer = hv_get_ring_buffer(ring_info); 178 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 179 180 memcpy(ring_buffer + start_write_offset, src, srclen); 181 182 start_write_offset += srclen; 183 if (start_write_offset >= ring_buffer_size) 184 start_write_offset -= ring_buffer_size; 185 186 return start_write_offset; 187 } 188 189 /* Get various debug metrics for the specified ring buffer. */ 190 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 191 struct hv_ring_buffer_debug_info *debug_info) 192 { 193 u32 bytes_avail_towrite; 194 u32 bytes_avail_toread; 195 196 if (ring_info->ring_buffer) { 197 hv_get_ringbuffer_availbytes(ring_info, 198 &bytes_avail_toread, 199 &bytes_avail_towrite); 200 201 debug_info->bytes_avail_toread = bytes_avail_toread; 202 debug_info->bytes_avail_towrite = bytes_avail_towrite; 203 debug_info->current_read_index = 204 ring_info->ring_buffer->read_index; 205 debug_info->current_write_index = 206 ring_info->ring_buffer->write_index; 207 debug_info->current_interrupt_mask = 208 ring_info->ring_buffer->interrupt_mask; 209 } 210 } 211 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 212 213 /* Initialize the ring buffer. */ 214 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 215 struct page *pages, u32 page_cnt) 216 { 217 int i; 218 struct page **pages_wraparound; 219 220 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); 221 222 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 223 224 /* 225 * First page holds struct hv_ring_buffer, do wraparound mapping for 226 * the rest. 227 */ 228 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), 229 GFP_KERNEL); 230 if (!pages_wraparound) 231 return -ENOMEM; 232 233 pages_wraparound[0] = pages; 234 for (i = 0; i < 2 * (page_cnt - 1); i++) 235 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; 236 237 ring_info->ring_buffer = (struct hv_ring_buffer *) 238 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); 239 240 kfree(pages_wraparound); 241 242 243 if (!ring_info->ring_buffer) 244 return -ENOMEM; 245 246 ring_info->ring_buffer->read_index = 247 ring_info->ring_buffer->write_index = 0; 248 249 /* Set the feature bit for enabling flow control. */ 250 ring_info->ring_buffer->feature_bits.value = 1; 251 252 ring_info->ring_size = page_cnt << PAGE_SHIFT; 253 ring_info->ring_datasize = ring_info->ring_size - 254 sizeof(struct hv_ring_buffer); 255 256 spin_lock_init(&ring_info->ring_lock); 257 258 return 0; 259 } 260 261 /* Cleanup the ring buffer. */ 262 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 263 { 264 vunmap(ring_info->ring_buffer); 265 } 266 267 /* Write to the ring buffer. */ 268 int hv_ringbuffer_write(struct vmbus_channel *channel, 269 const struct kvec *kv_list, u32 kv_count) 270 { 271 int i; 272 u32 bytes_avail_towrite; 273 u32 totalbytes_towrite = sizeof(u64); 274 u32 next_write_location; 275 u32 old_write; 276 u64 prev_indices; 277 unsigned long flags; 278 struct hv_ring_buffer_info *outring_info = &channel->outbound; 279 280 if (channel->rescind) 281 return -ENODEV; 282 283 for (i = 0; i < kv_count; i++) 284 totalbytes_towrite += kv_list[i].iov_len; 285 286 spin_lock_irqsave(&outring_info->ring_lock, flags); 287 288 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 289 290 /* 291 * If there is only room for the packet, assume it is full. 292 * Otherwise, the next time around, we think the ring buffer 293 * is empty since the read index == write index. 294 */ 295 if (bytes_avail_towrite <= totalbytes_towrite) { 296 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 297 return -EAGAIN; 298 } 299 300 /* Write to the ring buffer */ 301 next_write_location = hv_get_next_write_location(outring_info); 302 303 old_write = next_write_location; 304 305 for (i = 0; i < kv_count; i++) { 306 next_write_location = hv_copyto_ringbuffer(outring_info, 307 next_write_location, 308 kv_list[i].iov_base, 309 kv_list[i].iov_len); 310 } 311 312 /* Set previous packet start */ 313 prev_indices = hv_get_ring_bufferindices(outring_info); 314 315 next_write_location = hv_copyto_ringbuffer(outring_info, 316 next_write_location, 317 &prev_indices, 318 sizeof(u64)); 319 320 /* Issue a full memory barrier before updating the write index */ 321 virt_mb(); 322 323 /* Now, update the write location */ 324 hv_set_next_write_location(outring_info, next_write_location); 325 326 327 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 328 329 hv_signal_on_write(old_write, channel); 330 331 if (channel->rescind) 332 return -ENODEV; 333 334 return 0; 335 } 336 337 static inline void 338 init_cached_read_index(struct hv_ring_buffer_info *rbi) 339 { 340 rbi->cached_read_index = rbi->ring_buffer->read_index; 341 } 342 343 int hv_ringbuffer_read(struct vmbus_channel *channel, 344 void *buffer, u32 buflen, u32 *buffer_actual_len, 345 u64 *requestid, bool raw) 346 { 347 u32 bytes_avail_toread; 348 u32 next_read_location; 349 u64 prev_indices = 0; 350 struct vmpacket_descriptor desc; 351 u32 offset; 352 u32 packetlen; 353 struct hv_ring_buffer_info *inring_info = &channel->inbound; 354 355 if (buflen <= 0) 356 return -EINVAL; 357 358 *buffer_actual_len = 0; 359 *requestid = 0; 360 361 bytes_avail_toread = hv_get_bytes_to_read(inring_info); 362 /* Make sure there is something to read */ 363 if (bytes_avail_toread < sizeof(desc)) { 364 /* 365 * No error is set when there is even no header, drivers are 366 * supposed to analyze buffer_actual_len. 367 */ 368 return 0; 369 } 370 371 init_cached_read_index(inring_info); 372 373 next_read_location = hv_get_next_read_location(inring_info); 374 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 375 sizeof(desc), 376 next_read_location); 377 378 offset = raw ? 0 : (desc.offset8 << 3); 379 packetlen = (desc.len8 << 3) - offset; 380 *buffer_actual_len = packetlen; 381 *requestid = desc.trans_id; 382 383 if (bytes_avail_toread < packetlen + offset) 384 return -EAGAIN; 385 386 if (packetlen > buflen) 387 return -ENOBUFS; 388 389 next_read_location = 390 hv_get_next_readlocation_withoffset(inring_info, offset); 391 392 next_read_location = hv_copyfrom_ringbuffer(inring_info, 393 buffer, 394 packetlen, 395 next_read_location); 396 397 next_read_location = hv_copyfrom_ringbuffer(inring_info, 398 &prev_indices, 399 sizeof(u64), 400 next_read_location); 401 402 /* 403 * Make sure all reads are done before we update the read index since 404 * the writer may start writing to the read area once the read index 405 * is updated. 406 */ 407 virt_mb(); 408 409 /* Update the read index */ 410 hv_set_next_read_location(inring_info, next_read_location); 411 412 hv_signal_on_read(channel); 413 414 return 0; 415 } 416 417 /* 418 * Determine number of bytes available in ring buffer after 419 * the current iterator (priv_read_index) location. 420 * 421 * This is similar to hv_get_bytes_to_read but with private 422 * read index instead. 423 */ 424 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) 425 { 426 u32 priv_read_loc = rbi->priv_read_index; 427 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 428 429 if (write_loc >= priv_read_loc) 430 return write_loc - priv_read_loc; 431 else 432 return (rbi->ring_datasize - priv_read_loc) + write_loc; 433 } 434 435 /* 436 * Get first vmbus packet from ring buffer after read_index 437 * 438 * If ring buffer is empty, returns NULL and no other action needed. 439 */ 440 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) 441 { 442 struct hv_ring_buffer_info *rbi = &channel->inbound; 443 444 /* set state for later hv_signal_on_read() */ 445 init_cached_read_index(rbi); 446 447 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) 448 return NULL; 449 450 return hv_get_ring_buffer(rbi) + rbi->priv_read_index; 451 } 452 EXPORT_SYMBOL_GPL(hv_pkt_iter_first); 453 454 /* 455 * Get next vmbus packet from ring buffer. 456 * 457 * Advances the current location (priv_read_index) and checks for more 458 * data. If the end of the ring buffer is reached, then return NULL. 459 */ 460 struct vmpacket_descriptor * 461 __hv_pkt_iter_next(struct vmbus_channel *channel, 462 const struct vmpacket_descriptor *desc) 463 { 464 struct hv_ring_buffer_info *rbi = &channel->inbound; 465 u32 packetlen = desc->len8 << 3; 466 u32 dsize = rbi->ring_datasize; 467 468 /* bump offset to next potential packet */ 469 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; 470 if (rbi->priv_read_index >= dsize) 471 rbi->priv_read_index -= dsize; 472 473 /* more data? */ 474 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) 475 return NULL; 476 else 477 return hv_get_ring_buffer(rbi) + rbi->priv_read_index; 478 } 479 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 480 481 /* 482 * Update host ring buffer after iterating over packets. 483 */ 484 void hv_pkt_iter_close(struct vmbus_channel *channel) 485 { 486 struct hv_ring_buffer_info *rbi = &channel->inbound; 487 488 /* 489 * Make sure all reads are done before we update the read index since 490 * the writer may start writing to the read area once the read index 491 * is updated. 492 */ 493 virt_rmb(); 494 rbi->ring_buffer->read_index = rbi->priv_read_index; 495 496 hv_signal_on_read(channel); 497 } 498 EXPORT_SYMBOL_GPL(hv_pkt_iter_close); 499