1 /* 2 * 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/kernel.h> 27 #include <linux/mm.h> 28 #include <linux/hyperv.h> 29 #include <linux/uio.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 33 #include "hyperv_vmbus.h" 34 35 /* 36 * When we write to the ring buffer, check if the host needs to 37 * be signaled. Here is the details of this protocol: 38 * 39 * 1. The host guarantees that while it is draining the 40 * ring buffer, it will set the interrupt_mask to 41 * indicate it does not need to be interrupted when 42 * new data is placed. 43 * 44 * 2. The host guarantees that it will completely drain 45 * the ring buffer before exiting the read loop. Further, 46 * once the ring buffer is empty, it will clear the 47 * interrupt_mask and re-check to see if new data has 48 * arrived. 49 * 50 * KYS: Oct. 30, 2016: 51 * It looks like Windows hosts have logic to deal with DOS attacks that 52 * can be triggered if it receives interrupts when it is not expecting 53 * the interrupt. The host expects interrupts only when the ring 54 * transitions from empty to non-empty (or full to non full on the guest 55 * to host ring). 56 * So, base the signaling decision solely on the ring state until the 57 * host logic is fixed. 58 */ 59 60 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) 61 { 62 struct hv_ring_buffer_info *rbi = &channel->outbound; 63 64 virt_mb(); 65 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 66 return; 67 68 /* check interrupt_mask before read_index */ 69 virt_rmb(); 70 /* 71 * This is the only case we need to signal when the 72 * ring transitions from being empty to non-empty. 73 */ 74 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 75 vmbus_setevent(channel); 76 } 77 78 /* Get the next write location for the specified ring buffer. */ 79 static inline u32 80 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) 81 { 82 u32 next = ring_info->ring_buffer->write_index; 83 84 return next; 85 } 86 87 /* Set the next write location for the specified ring buffer. */ 88 static inline void 89 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, 90 u32 next_write_location) 91 { 92 ring_info->ring_buffer->write_index = next_write_location; 93 } 94 95 /* Get the next read location for the specified ring buffer. */ 96 static inline u32 97 hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info) 98 { 99 return ring_info->ring_buffer->read_index; 100 } 101 102 /* 103 * Get the next read location + offset for the specified ring buffer. 104 * This allows the caller to skip. 105 */ 106 static inline u32 107 hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info, 108 u32 offset) 109 { 110 u32 next = ring_info->ring_buffer->read_index; 111 112 next += offset; 113 if (next >= ring_info->ring_datasize) 114 next -= ring_info->ring_datasize; 115 116 return next; 117 } 118 119 /* Set the next read location for the specified ring buffer. */ 120 static inline void 121 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, 122 u32 next_read_location) 123 { 124 ring_info->ring_buffer->read_index = next_read_location; 125 ring_info->priv_read_index = next_read_location; 126 } 127 128 /* Get the size of the ring buffer. */ 129 static inline u32 130 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) 131 { 132 return ring_info->ring_datasize; 133 } 134 135 /* Get the read and write indices as u64 of the specified ring buffer. */ 136 static inline u64 137 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) 138 { 139 return (u64)ring_info->ring_buffer->write_index << 32; 140 } 141 142 /* 143 * Helper routine to copy to source from ring buffer. 144 * Assume there is enough room. Handles wrap-around in src case only!! 145 */ 146 static u32 hv_copyfrom_ringbuffer( 147 const struct hv_ring_buffer_info *ring_info, 148 void *dest, 149 u32 destlen, 150 u32 start_read_offset) 151 { 152 void *ring_buffer = hv_get_ring_buffer(ring_info); 153 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 154 155 memcpy(dest, ring_buffer + start_read_offset, destlen); 156 157 start_read_offset += destlen; 158 if (start_read_offset >= ring_buffer_size) 159 start_read_offset -= ring_buffer_size; 160 161 return start_read_offset; 162 } 163 164 165 /* 166 * Helper routine to copy from source to ring buffer. 167 * Assume there is enough room. Handles wrap-around in dest case only!! 168 */ 169 static u32 hv_copyto_ringbuffer( 170 struct hv_ring_buffer_info *ring_info, 171 u32 start_write_offset, 172 const void *src, 173 u32 srclen) 174 { 175 void *ring_buffer = hv_get_ring_buffer(ring_info); 176 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); 177 178 memcpy(ring_buffer + start_write_offset, src, srclen); 179 180 start_write_offset += srclen; 181 if (start_write_offset >= ring_buffer_size) 182 start_write_offset -= ring_buffer_size; 183 184 return start_write_offset; 185 } 186 187 /* Get various debug metrics for the specified ring buffer. */ 188 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 189 struct hv_ring_buffer_debug_info *debug_info) 190 { 191 u32 bytes_avail_towrite; 192 u32 bytes_avail_toread; 193 194 if (ring_info->ring_buffer) { 195 hv_get_ringbuffer_availbytes(ring_info, 196 &bytes_avail_toread, 197 &bytes_avail_towrite); 198 199 debug_info->bytes_avail_toread = bytes_avail_toread; 200 debug_info->bytes_avail_towrite = bytes_avail_towrite; 201 debug_info->current_read_index = 202 ring_info->ring_buffer->read_index; 203 debug_info->current_write_index = 204 ring_info->ring_buffer->write_index; 205 debug_info->current_interrupt_mask = 206 ring_info->ring_buffer->interrupt_mask; 207 } 208 } 209 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 210 211 /* Initialize the ring buffer. */ 212 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 213 struct page *pages, u32 page_cnt) 214 { 215 int i; 216 struct page **pages_wraparound; 217 218 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); 219 220 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 221 222 /* 223 * First page holds struct hv_ring_buffer, do wraparound mapping for 224 * the rest. 225 */ 226 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), 227 GFP_KERNEL); 228 if (!pages_wraparound) 229 return -ENOMEM; 230 231 pages_wraparound[0] = pages; 232 for (i = 0; i < 2 * (page_cnt - 1); i++) 233 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; 234 235 ring_info->ring_buffer = (struct hv_ring_buffer *) 236 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); 237 238 kfree(pages_wraparound); 239 240 241 if (!ring_info->ring_buffer) 242 return -ENOMEM; 243 244 ring_info->ring_buffer->read_index = 245 ring_info->ring_buffer->write_index = 0; 246 247 /* Set the feature bit for enabling flow control. */ 248 ring_info->ring_buffer->feature_bits.value = 1; 249 250 ring_info->ring_size = page_cnt << PAGE_SHIFT; 251 ring_info->ring_datasize = ring_info->ring_size - 252 sizeof(struct hv_ring_buffer); 253 254 spin_lock_init(&ring_info->ring_lock); 255 256 return 0; 257 } 258 259 /* Cleanup the ring buffer. */ 260 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 261 { 262 vunmap(ring_info->ring_buffer); 263 } 264 265 /* Write to the ring buffer. */ 266 int hv_ringbuffer_write(struct vmbus_channel *channel, 267 const struct kvec *kv_list, u32 kv_count) 268 { 269 int i; 270 u32 bytes_avail_towrite; 271 u32 totalbytes_towrite = sizeof(u64); 272 u32 next_write_location; 273 u32 old_write; 274 u64 prev_indices; 275 unsigned long flags; 276 struct hv_ring_buffer_info *outring_info = &channel->outbound; 277 278 if (channel->rescind) 279 return -ENODEV; 280 281 for (i = 0; i < kv_count; i++) 282 totalbytes_towrite += kv_list[i].iov_len; 283 284 spin_lock_irqsave(&outring_info->ring_lock, flags); 285 286 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 287 288 /* 289 * If there is only room for the packet, assume it is full. 290 * Otherwise, the next time around, we think the ring buffer 291 * is empty since the read index == write index. 292 */ 293 if (bytes_avail_towrite <= totalbytes_towrite) { 294 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 295 return -EAGAIN; 296 } 297 298 /* Write to the ring buffer */ 299 next_write_location = hv_get_next_write_location(outring_info); 300 301 old_write = next_write_location; 302 303 for (i = 0; i < kv_count; i++) { 304 next_write_location = hv_copyto_ringbuffer(outring_info, 305 next_write_location, 306 kv_list[i].iov_base, 307 kv_list[i].iov_len); 308 } 309 310 /* Set previous packet start */ 311 prev_indices = hv_get_ring_bufferindices(outring_info); 312 313 next_write_location = hv_copyto_ringbuffer(outring_info, 314 next_write_location, 315 &prev_indices, 316 sizeof(u64)); 317 318 /* Issue a full memory barrier before updating the write index */ 319 virt_mb(); 320 321 /* Now, update the write location */ 322 hv_set_next_write_location(outring_info, next_write_location); 323 324 325 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 326 327 hv_signal_on_write(old_write, channel); 328 329 if (channel->rescind) 330 return -ENODEV; 331 332 return 0; 333 } 334 335 int hv_ringbuffer_read(struct vmbus_channel *channel, 336 void *buffer, u32 buflen, u32 *buffer_actual_len, 337 u64 *requestid, bool raw) 338 { 339 u32 bytes_avail_toread; 340 u32 next_read_location; 341 u64 prev_indices = 0; 342 struct vmpacket_descriptor desc; 343 u32 offset; 344 u32 packetlen; 345 struct hv_ring_buffer_info *inring_info = &channel->inbound; 346 347 if (buflen <= 0) 348 return -EINVAL; 349 350 *buffer_actual_len = 0; 351 *requestid = 0; 352 353 bytes_avail_toread = hv_get_bytes_to_read(inring_info); 354 /* Make sure there is something to read */ 355 if (bytes_avail_toread < sizeof(desc)) { 356 /* 357 * No error is set when there is even no header, drivers are 358 * supposed to analyze buffer_actual_len. 359 */ 360 return 0; 361 } 362 363 init_cached_read_index(channel); 364 next_read_location = hv_get_next_read_location(inring_info); 365 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 366 sizeof(desc), 367 next_read_location); 368 369 offset = raw ? 0 : (desc.offset8 << 3); 370 packetlen = (desc.len8 << 3) - offset; 371 *buffer_actual_len = packetlen; 372 *requestid = desc.trans_id; 373 374 if (bytes_avail_toread < packetlen + offset) 375 return -EAGAIN; 376 377 if (packetlen > buflen) 378 return -ENOBUFS; 379 380 next_read_location = 381 hv_get_next_readlocation_withoffset(inring_info, offset); 382 383 next_read_location = hv_copyfrom_ringbuffer(inring_info, 384 buffer, 385 packetlen, 386 next_read_location); 387 388 next_read_location = hv_copyfrom_ringbuffer(inring_info, 389 &prev_indices, 390 sizeof(u64), 391 next_read_location); 392 393 /* 394 * Make sure all reads are done before we update the read index since 395 * the writer may start writing to the read area once the read index 396 * is updated. 397 */ 398 virt_mb(); 399 400 /* Update the read index */ 401 hv_set_next_read_location(inring_info, next_read_location); 402 403 hv_signal_on_read(channel); 404 405 return 0; 406 } 407