1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware VMCI Driver 4 * 5 * Copyright (C) 2012 VMware, Inc. All rights reserved. 6 */ 7 8 #include <linux/vmw_vmci_defs.h> 9 #include <linux/vmw_vmci_api.h> 10 #include <linux/highmem.h> 11 #include <linux/kernel.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/pagemap.h> 16 #include <linux/pci.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/uio.h> 20 #include <linux/wait.h> 21 #include <linux/vmalloc.h> 22 #include <linux/skbuff.h> 23 24 #include "vmci_handle_array.h" 25 #include "vmci_queue_pair.h" 26 #include "vmci_datagram.h" 27 #include "vmci_resource.h" 28 #include "vmci_context.h" 29 #include "vmci_driver.h" 30 #include "vmci_event.h" 31 #include "vmci_route.h" 32 33 /* 34 * In the following, we will distinguish between two kinds of VMX processes - 35 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 36 * VMCI page files in the VMX and supporting VM to VM communication and the 37 * newer ones that use the guest memory directly. We will in the following 38 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 39 * new-style VMX'en. 40 * 41 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 42 * removed for readability) - see below for more details on the transtions: 43 * 44 * -------------- NEW ------------- 45 * | | 46 * \_/ \_/ 47 * CREATED_NO_MEM <-----------------> CREATED_MEM 48 * | | | 49 * | o-----------------------o | 50 * | | | 51 * \_/ \_/ \_/ 52 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 53 * | | | 54 * | o----------------------o | 55 * | | | 56 * \_/ \_/ \_/ 57 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 58 * | | 59 * | | 60 * -------------> gone <------------- 61 * 62 * In more detail. When a VMCI queue pair is first created, it will be in the 63 * VMCIQPB_NEW state. It will then move into one of the following states: 64 * 65 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 66 * 67 * - the created was performed by a host endpoint, in which case there is 68 * no backing memory yet. 69 * 70 * - the create was initiated by an old-style VMX, that uses 71 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 72 * a later point in time. This state can be distinguished from the one 73 * above by the context ID of the creator. A host side is not allowed to 74 * attach until the page store has been set. 75 * 76 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 77 * is created by a VMX using the queue pair device backend that 78 * sets the UVAs of the queue pair immediately and stores the 79 * information for later attachers. At this point, it is ready for 80 * the host side to attach to it. 81 * 82 * Once the queue pair is in one of the created states (with the exception of 83 * the case mentioned for older VMX'en above), it is possible to attach to the 84 * queue pair. Again we have two new states possible: 85 * 86 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 87 * paths: 88 * 89 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 90 * pair, and attaches to a queue pair previously created by the host side. 91 * 92 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 93 * already created by a guest. 94 * 95 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 96 * vmci_qp_broker_set_page_store (see below). 97 * 98 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 99 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 100 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 101 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 102 * will be entered. 103 * 104 * From the attached queue pair, the queue pair can enter the shutdown states 105 * when either side of the queue pair detaches. If the guest side detaches 106 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 107 * the content of the queue pair will no longer be available. If the host 108 * side detaches first, the queue pair will either enter the 109 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 110 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 111 * (e.g., the host detaches while a guest is stunned). 112 * 113 * New-style VMX'en will also unmap guest memory, if the guest is 114 * quiesced, e.g., during a snapshot operation. In that case, the guest 115 * memory will no longer be available, and the queue pair will transition from 116 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 117 * in which case the queue pair will transition from the *_NO_MEM state at that 118 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 119 * since the peer may have either attached or detached in the meantime. The 120 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 121 * *_MEM state, and vice versa. 122 */ 123 124 /* The Kernel specific component of the struct vmci_queue structure. */ 125 struct vmci_queue_kern_if { 126 struct mutex __mutex; /* Protects the queue. */ 127 struct mutex *mutex; /* Shared by producer and consumer queues. */ 128 size_t num_pages; /* Number of pages incl. header. */ 129 bool host; /* Host or guest? */ 130 union { 131 struct { 132 dma_addr_t *pas; 133 void **vas; 134 } g; /* Used by the guest. */ 135 struct { 136 struct page **page; 137 struct page **header_page; 138 } h; /* Used by the host. */ 139 } u; 140 }; 141 142 /* 143 * This structure is opaque to the clients. 144 */ 145 struct vmci_qp { 146 struct vmci_handle handle; 147 struct vmci_queue *produce_q; 148 struct vmci_queue *consume_q; 149 u64 produce_q_size; 150 u64 consume_q_size; 151 u32 peer; 152 u32 flags; 153 u32 priv_flags; 154 bool guest_endpoint; 155 unsigned int blocked; 156 unsigned int generation; 157 wait_queue_head_t event; 158 }; 159 160 enum qp_broker_state { 161 VMCIQPB_NEW, 162 VMCIQPB_CREATED_NO_MEM, 163 VMCIQPB_CREATED_MEM, 164 VMCIQPB_ATTACHED_NO_MEM, 165 VMCIQPB_ATTACHED_MEM, 166 VMCIQPB_SHUTDOWN_NO_MEM, 167 VMCIQPB_SHUTDOWN_MEM, 168 VMCIQPB_GONE 169 }; 170 171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 172 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 173 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 174 175 /* 176 * In the queue pair broker, we always use the guest point of view for 177 * the produce and consume queue values and references, e.g., the 178 * produce queue size stored is the guests produce queue size. The 179 * host endpoint will need to swap these around. The only exception is 180 * the local queue pairs on the host, in which case the host endpoint 181 * that creates the queue pair will have the right orientation, and 182 * the attaching host endpoint will need to swap. 183 */ 184 struct qp_entry { 185 struct list_head list_item; 186 struct vmci_handle handle; 187 u32 peer; 188 u32 flags; 189 u64 produce_size; 190 u64 consume_size; 191 u32 ref_count; 192 }; 193 194 struct qp_broker_entry { 195 struct vmci_resource resource; 196 struct qp_entry qp; 197 u32 create_id; 198 u32 attach_id; 199 enum qp_broker_state state; 200 bool require_trusted_attach; 201 bool created_by_trusted; 202 bool vmci_page_files; /* Created by VMX using VMCI page files */ 203 struct vmci_queue *produce_q; 204 struct vmci_queue *consume_q; 205 struct vmci_queue_header saved_produce_q; 206 struct vmci_queue_header saved_consume_q; 207 vmci_event_release_cb wakeup_cb; 208 void *client_data; 209 void *local_mem; /* Kernel memory for local queue pair */ 210 }; 211 212 struct qp_guest_endpoint { 213 struct vmci_resource resource; 214 struct qp_entry qp; 215 u64 num_ppns; 216 void *produce_q; 217 void *consume_q; 218 struct ppn_set ppn_set; 219 }; 220 221 struct qp_list { 222 struct list_head head; 223 struct mutex mutex; /* Protect queue list. */ 224 }; 225 226 static struct qp_list qp_broker_list = { 227 .head = LIST_HEAD_INIT(qp_broker_list.head), 228 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 229 }; 230 231 static struct qp_list qp_guest_endpoints = { 232 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 233 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 234 }; 235 236 #define INVALID_VMCI_GUEST_MEM_ID 0 237 #define QPE_NUM_PAGES(_QPE) ((u32) \ 238 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 239 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 240 #define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \ 241 ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \ 242 (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY) 243 244 /* 245 * Frees kernel VA space for a given queue and its queue header, and 246 * frees physical data pages. 247 */ 248 static void qp_free_queue(void *q, u64 size) 249 { 250 struct vmci_queue *queue = q; 251 252 if (queue) { 253 u64 i; 254 255 /* Given size does not include header, so add in a page here. */ 256 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 257 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 258 queue->kernel_if->u.g.vas[i], 259 queue->kernel_if->u.g.pas[i]); 260 } 261 262 vfree(queue); 263 } 264 } 265 266 /* 267 * Allocates kernel queue pages of specified size with IOMMU mappings, 268 * plus space for the queue structure/kernel interface and the queue 269 * header. 270 */ 271 static void *qp_alloc_queue(u64 size, u32 flags) 272 { 273 u64 i; 274 struct vmci_queue *queue; 275 size_t pas_size; 276 size_t vas_size; 277 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 278 u64 num_pages; 279 280 if (size > SIZE_MAX - PAGE_SIZE) 281 return NULL; 282 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 283 if (num_pages > 284 (SIZE_MAX - queue_size) / 285 (sizeof(*queue->kernel_if->u.g.pas) + 286 sizeof(*queue->kernel_if->u.g.vas))) 287 return NULL; 288 289 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 290 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 291 queue_size += pas_size + vas_size; 292 293 queue = vmalloc(queue_size); 294 if (!queue) 295 return NULL; 296 297 queue->q_header = NULL; 298 queue->saved_header = NULL; 299 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 300 queue->kernel_if->mutex = NULL; 301 queue->kernel_if->num_pages = num_pages; 302 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 303 queue->kernel_if->u.g.vas = 304 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 305 queue->kernel_if->host = false; 306 307 for (i = 0; i < num_pages; i++) { 308 queue->kernel_if->u.g.vas[i] = 309 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 310 &queue->kernel_if->u.g.pas[i], 311 GFP_KERNEL); 312 if (!queue->kernel_if->u.g.vas[i]) { 313 /* Size excl. the header. */ 314 qp_free_queue(queue, i * PAGE_SIZE); 315 return NULL; 316 } 317 } 318 319 /* Queue header is the first page. */ 320 queue->q_header = queue->kernel_if->u.g.vas[0]; 321 322 return queue; 323 } 324 325 /* 326 * Copies from a given buffer or iovector to a VMCI Queue. Uses 327 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 328 * by traversing the offset -> page translation structure for the queue. 329 * Assumes that offset + size does not wrap around in the queue. 330 */ 331 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, 332 u64 queue_offset, 333 struct iov_iter *from, 334 size_t size) 335 { 336 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 337 size_t bytes_copied = 0; 338 339 while (bytes_copied < size) { 340 const u64 page_index = 341 (queue_offset + bytes_copied) / PAGE_SIZE; 342 const size_t page_offset = 343 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 344 void *va; 345 size_t to_copy; 346 347 if (kernel_if->host) 348 va = kmap(kernel_if->u.h.page[page_index]); 349 else 350 va = kernel_if->u.g.vas[page_index + 1]; 351 /* Skip header. */ 352 353 if (size - bytes_copied > PAGE_SIZE - page_offset) 354 /* Enough payload to fill up from this page. */ 355 to_copy = PAGE_SIZE - page_offset; 356 else 357 to_copy = size - bytes_copied; 358 359 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, 360 from)) { 361 if (kernel_if->host) 362 kunmap(kernel_if->u.h.page[page_index]); 363 return VMCI_ERROR_INVALID_ARGS; 364 } 365 bytes_copied += to_copy; 366 if (kernel_if->host) 367 kunmap(kernel_if->u.h.page[page_index]); 368 } 369 370 return VMCI_SUCCESS; 371 } 372 373 /* 374 * Copies to a given buffer or iovector from a VMCI Queue. Uses 375 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 376 * by traversing the offset -> page translation structure for the queue. 377 * Assumes that offset + size does not wrap around in the queue. 378 */ 379 static int qp_memcpy_from_queue_iter(struct iov_iter *to, 380 const struct vmci_queue *queue, 381 u64 queue_offset, size_t size) 382 { 383 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 384 size_t bytes_copied = 0; 385 386 while (bytes_copied < size) { 387 const u64 page_index = 388 (queue_offset + bytes_copied) / PAGE_SIZE; 389 const size_t page_offset = 390 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 391 void *va; 392 size_t to_copy; 393 int err; 394 395 if (kernel_if->host) 396 va = kmap(kernel_if->u.h.page[page_index]); 397 else 398 va = kernel_if->u.g.vas[page_index + 1]; 399 /* Skip header. */ 400 401 if (size - bytes_copied > PAGE_SIZE - page_offset) 402 /* Enough payload to fill up this page. */ 403 to_copy = PAGE_SIZE - page_offset; 404 else 405 to_copy = size - bytes_copied; 406 407 err = copy_to_iter((u8 *)va + page_offset, to_copy, to); 408 if (err != to_copy) { 409 if (kernel_if->host) 410 kunmap(kernel_if->u.h.page[page_index]); 411 return VMCI_ERROR_INVALID_ARGS; 412 } 413 bytes_copied += to_copy; 414 if (kernel_if->host) 415 kunmap(kernel_if->u.h.page[page_index]); 416 } 417 418 return VMCI_SUCCESS; 419 } 420 421 /* 422 * Allocates two list of PPNs --- one for the pages in the produce queue, 423 * and the other for the pages in the consume queue. Intializes the list 424 * of PPNs with the page frame numbers of the KVA for the two queues (and 425 * the queue headers). 426 */ 427 static int qp_alloc_ppn_set(void *prod_q, 428 u64 num_produce_pages, 429 void *cons_q, 430 u64 num_consume_pages, struct ppn_set *ppn_set) 431 { 432 u64 *produce_ppns; 433 u64 *consume_ppns; 434 struct vmci_queue *produce_q = prod_q; 435 struct vmci_queue *consume_q = cons_q; 436 u64 i; 437 438 if (!produce_q || !num_produce_pages || !consume_q || 439 !num_consume_pages || !ppn_set) 440 return VMCI_ERROR_INVALID_ARGS; 441 442 if (ppn_set->initialized) 443 return VMCI_ERROR_ALREADY_EXISTS; 444 445 produce_ppns = 446 kmalloc_array(num_produce_pages, sizeof(*produce_ppns), 447 GFP_KERNEL); 448 if (!produce_ppns) 449 return VMCI_ERROR_NO_MEM; 450 451 consume_ppns = 452 kmalloc_array(num_consume_pages, sizeof(*consume_ppns), 453 GFP_KERNEL); 454 if (!consume_ppns) { 455 kfree(produce_ppns); 456 return VMCI_ERROR_NO_MEM; 457 } 458 459 for (i = 0; i < num_produce_pages; i++) 460 produce_ppns[i] = 461 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 462 463 for (i = 0; i < num_consume_pages; i++) 464 consume_ppns[i] = 465 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 466 467 ppn_set->num_produce_pages = num_produce_pages; 468 ppn_set->num_consume_pages = num_consume_pages; 469 ppn_set->produce_ppns = produce_ppns; 470 ppn_set->consume_ppns = consume_ppns; 471 ppn_set->initialized = true; 472 return VMCI_SUCCESS; 473 } 474 475 /* 476 * Frees the two list of PPNs for a queue pair. 477 */ 478 static void qp_free_ppn_set(struct ppn_set *ppn_set) 479 { 480 if (ppn_set->initialized) { 481 /* Do not call these functions on NULL inputs. */ 482 kfree(ppn_set->produce_ppns); 483 kfree(ppn_set->consume_ppns); 484 } 485 memset(ppn_set, 0, sizeof(*ppn_set)); 486 } 487 488 /* 489 * Populates the list of PPNs in the hypercall structure with the PPNS 490 * of the produce queue and the consume queue. 491 */ 492 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 493 { 494 if (vmci_use_ppn64()) { 495 memcpy(call_buf, ppn_set->produce_ppns, 496 ppn_set->num_produce_pages * 497 sizeof(*ppn_set->produce_ppns)); 498 memcpy(call_buf + 499 ppn_set->num_produce_pages * 500 sizeof(*ppn_set->produce_ppns), 501 ppn_set->consume_ppns, 502 ppn_set->num_consume_pages * 503 sizeof(*ppn_set->consume_ppns)); 504 } else { 505 int i; 506 u32 *ppns = (u32 *) call_buf; 507 508 for (i = 0; i < ppn_set->num_produce_pages; i++) 509 ppns[i] = (u32) ppn_set->produce_ppns[i]; 510 511 ppns = &ppns[ppn_set->num_produce_pages]; 512 513 for (i = 0; i < ppn_set->num_consume_pages; i++) 514 ppns[i] = (u32) ppn_set->consume_ppns[i]; 515 } 516 517 return VMCI_SUCCESS; 518 } 519 520 /* 521 * Allocates kernel VA space of specified size plus space for the queue 522 * and kernel interface. This is different from the guest queue allocator, 523 * because we do not allocate our own queue header/data pages here but 524 * share those of the guest. 525 */ 526 static struct vmci_queue *qp_host_alloc_queue(u64 size) 527 { 528 struct vmci_queue *queue; 529 size_t queue_page_size; 530 u64 num_pages; 531 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 532 533 if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) 534 return NULL; 535 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 536 if (num_pages > (SIZE_MAX - queue_size) / 537 sizeof(*queue->kernel_if->u.h.page)) 538 return NULL; 539 540 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 541 542 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 543 if (queue) { 544 queue->q_header = NULL; 545 queue->saved_header = NULL; 546 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 547 queue->kernel_if->host = true; 548 queue->kernel_if->mutex = NULL; 549 queue->kernel_if->num_pages = num_pages; 550 queue->kernel_if->u.h.header_page = 551 (struct page **)((u8 *)queue + queue_size); 552 queue->kernel_if->u.h.page = 553 &queue->kernel_if->u.h.header_page[1]; 554 } 555 556 return queue; 557 } 558 559 /* 560 * Frees kernel memory for a given queue (header plus translation 561 * structure). 562 */ 563 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 564 { 565 kfree(queue); 566 } 567 568 /* 569 * Initialize the mutex for the pair of queues. This mutex is used to 570 * protect the q_header and the buffer from changing out from under any 571 * users of either queue. Of course, it's only any good if the mutexes 572 * are actually acquired. Queue structure must lie on non-paged memory 573 * or we cannot guarantee access to the mutex. 574 */ 575 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 576 struct vmci_queue *consume_q) 577 { 578 /* 579 * Only the host queue has shared state - the guest queues do not 580 * need to synchronize access using a queue mutex. 581 */ 582 583 if (produce_q->kernel_if->host) { 584 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 585 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 586 mutex_init(produce_q->kernel_if->mutex); 587 } 588 } 589 590 /* 591 * Cleans up the mutex for the pair of queues. 592 */ 593 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 594 struct vmci_queue *consume_q) 595 { 596 if (produce_q->kernel_if->host) { 597 produce_q->kernel_if->mutex = NULL; 598 consume_q->kernel_if->mutex = NULL; 599 } 600 } 601 602 /* 603 * Acquire the mutex for the queue. Note that the produce_q and 604 * the consume_q share a mutex. So, only one of the two need to 605 * be passed in to this routine. Either will work just fine. 606 */ 607 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 608 { 609 if (queue->kernel_if->host) 610 mutex_lock(queue->kernel_if->mutex); 611 } 612 613 /* 614 * Release the mutex for the queue. Note that the produce_q and 615 * the consume_q share a mutex. So, only one of the two need to 616 * be passed in to this routine. Either will work just fine. 617 */ 618 static void qp_release_queue_mutex(struct vmci_queue *queue) 619 { 620 if (queue->kernel_if->host) 621 mutex_unlock(queue->kernel_if->mutex); 622 } 623 624 /* 625 * Helper function to release pages in the PageStoreAttachInfo 626 * previously obtained using get_user_pages. 627 */ 628 static void qp_release_pages(struct page **pages, 629 u64 num_pages, bool dirty) 630 { 631 int i; 632 633 for (i = 0; i < num_pages; i++) { 634 if (dirty) 635 set_page_dirty_lock(pages[i]); 636 637 put_page(pages[i]); 638 pages[i] = NULL; 639 } 640 } 641 642 /* 643 * Lock the user pages referenced by the {produce,consume}Buffer 644 * struct into memory and populate the {produce,consume}Pages 645 * arrays in the attach structure with them. 646 */ 647 static int qp_host_get_user_memory(u64 produce_uva, 648 u64 consume_uva, 649 struct vmci_queue *produce_q, 650 struct vmci_queue *consume_q) 651 { 652 int retval; 653 int err = VMCI_SUCCESS; 654 655 retval = get_user_pages_fast((uintptr_t) produce_uva, 656 produce_q->kernel_if->num_pages, 657 FOLL_WRITE, 658 produce_q->kernel_if->u.h.header_page); 659 if (retval < (int)produce_q->kernel_if->num_pages) { 660 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", 661 retval); 662 if (retval > 0) 663 qp_release_pages(produce_q->kernel_if->u.h.header_page, 664 retval, false); 665 err = VMCI_ERROR_NO_MEM; 666 goto out; 667 } 668 669 retval = get_user_pages_fast((uintptr_t) consume_uva, 670 consume_q->kernel_if->num_pages, 671 FOLL_WRITE, 672 consume_q->kernel_if->u.h.header_page); 673 if (retval < (int)consume_q->kernel_if->num_pages) { 674 pr_debug("get_user_pages_fast(consume) failed (retval=%d)", 675 retval); 676 if (retval > 0) 677 qp_release_pages(consume_q->kernel_if->u.h.header_page, 678 retval, false); 679 qp_release_pages(produce_q->kernel_if->u.h.header_page, 680 produce_q->kernel_if->num_pages, false); 681 err = VMCI_ERROR_NO_MEM; 682 } 683 684 out: 685 return err; 686 } 687 688 /* 689 * Registers the specification of the user pages used for backing a queue 690 * pair. Enough information to map in pages is stored in the OS specific 691 * part of the struct vmci_queue structure. 692 */ 693 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 694 struct vmci_queue *produce_q, 695 struct vmci_queue *consume_q) 696 { 697 u64 produce_uva; 698 u64 consume_uva; 699 700 /* 701 * The new style and the old style mapping only differs in 702 * that we either get a single or two UVAs, so we split the 703 * single UVA range at the appropriate spot. 704 */ 705 produce_uva = page_store->pages; 706 consume_uva = page_store->pages + 707 produce_q->kernel_if->num_pages * PAGE_SIZE; 708 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 709 consume_q); 710 } 711 712 /* 713 * Releases and removes the references to user pages stored in the attach 714 * struct. Pages are released from the page cache and may become 715 * swappable again. 716 */ 717 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 718 struct vmci_queue *consume_q) 719 { 720 qp_release_pages(produce_q->kernel_if->u.h.header_page, 721 produce_q->kernel_if->num_pages, true); 722 memset(produce_q->kernel_if->u.h.header_page, 0, 723 sizeof(*produce_q->kernel_if->u.h.header_page) * 724 produce_q->kernel_if->num_pages); 725 qp_release_pages(consume_q->kernel_if->u.h.header_page, 726 consume_q->kernel_if->num_pages, true); 727 memset(consume_q->kernel_if->u.h.header_page, 0, 728 sizeof(*consume_q->kernel_if->u.h.header_page) * 729 consume_q->kernel_if->num_pages); 730 } 731 732 /* 733 * Once qp_host_register_user_memory has been performed on a 734 * queue, the queue pair headers can be mapped into the 735 * kernel. Once mapped, they must be unmapped with 736 * qp_host_unmap_queues prior to calling 737 * qp_host_unregister_user_memory. 738 * Pages are pinned. 739 */ 740 static int qp_host_map_queues(struct vmci_queue *produce_q, 741 struct vmci_queue *consume_q) 742 { 743 int result; 744 745 if (!produce_q->q_header || !consume_q->q_header) { 746 struct page *headers[2]; 747 748 if (produce_q->q_header != consume_q->q_header) 749 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 750 751 if (produce_q->kernel_if->u.h.header_page == NULL || 752 *produce_q->kernel_if->u.h.header_page == NULL) 753 return VMCI_ERROR_UNAVAILABLE; 754 755 headers[0] = *produce_q->kernel_if->u.h.header_page; 756 headers[1] = *consume_q->kernel_if->u.h.header_page; 757 758 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 759 if (produce_q->q_header != NULL) { 760 consume_q->q_header = 761 (struct vmci_queue_header *)((u8 *) 762 produce_q->q_header + 763 PAGE_SIZE); 764 result = VMCI_SUCCESS; 765 } else { 766 pr_warn("vmap failed\n"); 767 result = VMCI_ERROR_NO_MEM; 768 } 769 } else { 770 result = VMCI_SUCCESS; 771 } 772 773 return result; 774 } 775 776 /* 777 * Unmaps previously mapped queue pair headers from the kernel. 778 * Pages are unpinned. 779 */ 780 static int qp_host_unmap_queues(u32 gid, 781 struct vmci_queue *produce_q, 782 struct vmci_queue *consume_q) 783 { 784 if (produce_q->q_header) { 785 if (produce_q->q_header < consume_q->q_header) 786 vunmap(produce_q->q_header); 787 else 788 vunmap(consume_q->q_header); 789 790 produce_q->q_header = NULL; 791 consume_q->q_header = NULL; 792 } 793 794 return VMCI_SUCCESS; 795 } 796 797 /* 798 * Finds the entry in the list corresponding to a given handle. Assumes 799 * that the list is locked. 800 */ 801 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 802 struct vmci_handle handle) 803 { 804 struct qp_entry *entry; 805 806 if (vmci_handle_is_invalid(handle)) 807 return NULL; 808 809 list_for_each_entry(entry, &qp_list->head, list_item) { 810 if (vmci_handle_is_equal(entry->handle, handle)) 811 return entry; 812 } 813 814 return NULL; 815 } 816 817 /* 818 * Finds the entry in the list corresponding to a given handle. 819 */ 820 static struct qp_guest_endpoint * 821 qp_guest_handle_to_entry(struct vmci_handle handle) 822 { 823 struct qp_guest_endpoint *entry; 824 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 825 826 entry = qp ? container_of( 827 qp, struct qp_guest_endpoint, qp) : NULL; 828 return entry; 829 } 830 831 /* 832 * Finds the entry in the list corresponding to a given handle. 833 */ 834 static struct qp_broker_entry * 835 qp_broker_handle_to_entry(struct vmci_handle handle) 836 { 837 struct qp_broker_entry *entry; 838 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 839 840 entry = qp ? container_of( 841 qp, struct qp_broker_entry, qp) : NULL; 842 return entry; 843 } 844 845 /* 846 * Dispatches a queue pair event message directly into the local event 847 * queue. 848 */ 849 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 850 { 851 u32 context_id = vmci_get_context_id(); 852 struct vmci_event_qp ev; 853 854 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 855 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 856 VMCI_CONTEXT_RESOURCE_ID); 857 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 858 ev.msg.event_data.event = 859 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 860 ev.payload.peer_id = context_id; 861 ev.payload.handle = handle; 862 863 return vmci_event_dispatch(&ev.msg.hdr); 864 } 865 866 /* 867 * Allocates and initializes a qp_guest_endpoint structure. 868 * Allocates a queue_pair rid (and handle) iff the given entry has 869 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 870 * are reserved handles. Assumes that the QP list mutex is held 871 * by the caller. 872 */ 873 static struct qp_guest_endpoint * 874 qp_guest_endpoint_create(struct vmci_handle handle, 875 u32 peer, 876 u32 flags, 877 u64 produce_size, 878 u64 consume_size, 879 void *produce_q, 880 void *consume_q) 881 { 882 int result; 883 struct qp_guest_endpoint *entry; 884 /* One page each for the queue headers. */ 885 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 886 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 887 888 if (vmci_handle_is_invalid(handle)) { 889 u32 context_id = vmci_get_context_id(); 890 891 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 892 } 893 894 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 895 if (entry) { 896 entry->qp.peer = peer; 897 entry->qp.flags = flags; 898 entry->qp.produce_size = produce_size; 899 entry->qp.consume_size = consume_size; 900 entry->qp.ref_count = 0; 901 entry->num_ppns = num_ppns; 902 entry->produce_q = produce_q; 903 entry->consume_q = consume_q; 904 INIT_LIST_HEAD(&entry->qp.list_item); 905 906 /* Add resource obj */ 907 result = vmci_resource_add(&entry->resource, 908 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 909 handle); 910 entry->qp.handle = vmci_resource_handle(&entry->resource); 911 if ((result != VMCI_SUCCESS) || 912 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 913 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 914 handle.context, handle.resource, result); 915 kfree(entry); 916 entry = NULL; 917 } 918 } 919 return entry; 920 } 921 922 /* 923 * Frees a qp_guest_endpoint structure. 924 */ 925 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 926 { 927 qp_free_ppn_set(&entry->ppn_set); 928 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 929 qp_free_queue(entry->produce_q, entry->qp.produce_size); 930 qp_free_queue(entry->consume_q, entry->qp.consume_size); 931 /* Unlink from resource hash table and free callback */ 932 vmci_resource_remove(&entry->resource); 933 934 kfree(entry); 935 } 936 937 /* 938 * Helper to make a queue_pairAlloc hypercall when the driver is 939 * supporting a guest device. 940 */ 941 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 942 { 943 struct vmci_qp_alloc_msg *alloc_msg; 944 size_t msg_size; 945 size_t ppn_size; 946 int result; 947 948 if (!entry || entry->num_ppns <= 2) 949 return VMCI_ERROR_INVALID_ARGS; 950 951 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32); 952 msg_size = sizeof(*alloc_msg) + 953 (size_t) entry->num_ppns * ppn_size; 954 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 955 if (!alloc_msg) 956 return VMCI_ERROR_NO_MEM; 957 958 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 959 VMCI_QUEUEPAIR_ALLOC); 960 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 961 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 962 alloc_msg->handle = entry->qp.handle; 963 alloc_msg->peer = entry->qp.peer; 964 alloc_msg->flags = entry->qp.flags; 965 alloc_msg->produce_size = entry->qp.produce_size; 966 alloc_msg->consume_size = entry->qp.consume_size; 967 alloc_msg->num_ppns = entry->num_ppns; 968 969 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 970 &entry->ppn_set); 971 if (result == VMCI_SUCCESS) 972 result = vmci_send_datagram(&alloc_msg->hdr); 973 974 kfree(alloc_msg); 975 976 return result; 977 } 978 979 /* 980 * Helper to make a queue_pairDetach hypercall when the driver is 981 * supporting a guest device. 982 */ 983 static int qp_detatch_hypercall(struct vmci_handle handle) 984 { 985 struct vmci_qp_detach_msg detach_msg; 986 987 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 988 VMCI_QUEUEPAIR_DETACH); 989 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 990 detach_msg.hdr.payload_size = sizeof(handle); 991 detach_msg.handle = handle; 992 993 return vmci_send_datagram(&detach_msg.hdr); 994 } 995 996 /* 997 * Adds the given entry to the list. Assumes that the list is locked. 998 */ 999 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1000 { 1001 if (entry) 1002 list_add(&entry->list_item, &qp_list->head); 1003 } 1004 1005 /* 1006 * Removes the given entry from the list. Assumes that the list is locked. 1007 */ 1008 static void qp_list_remove_entry(struct qp_list *qp_list, 1009 struct qp_entry *entry) 1010 { 1011 if (entry) 1012 list_del(&entry->list_item); 1013 } 1014 1015 /* 1016 * Helper for VMCI queue_pair detach interface. Frees the physical 1017 * pages for the queue pair. 1018 */ 1019 static int qp_detatch_guest_work(struct vmci_handle handle) 1020 { 1021 int result; 1022 struct qp_guest_endpoint *entry; 1023 u32 ref_count = ~0; /* To avoid compiler warning below */ 1024 1025 mutex_lock(&qp_guest_endpoints.mutex); 1026 1027 entry = qp_guest_handle_to_entry(handle); 1028 if (!entry) { 1029 mutex_unlock(&qp_guest_endpoints.mutex); 1030 return VMCI_ERROR_NOT_FOUND; 1031 } 1032 1033 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1034 result = VMCI_SUCCESS; 1035 1036 if (entry->qp.ref_count > 1) { 1037 result = qp_notify_peer_local(false, handle); 1038 /* 1039 * We can fail to notify a local queuepair 1040 * because we can't allocate. We still want 1041 * to release the entry if that happens, so 1042 * don't bail out yet. 1043 */ 1044 } 1045 } else { 1046 result = qp_detatch_hypercall(handle); 1047 if (result < VMCI_SUCCESS) { 1048 /* 1049 * We failed to notify a non-local queuepair. 1050 * That other queuepair might still be 1051 * accessing the shared memory, so don't 1052 * release the entry yet. It will get cleaned 1053 * up by VMCIqueue_pair_Exit() if necessary 1054 * (assuming we are going away, otherwise why 1055 * did this fail?). 1056 */ 1057 1058 mutex_unlock(&qp_guest_endpoints.mutex); 1059 return result; 1060 } 1061 } 1062 1063 /* 1064 * If we get here then we either failed to notify a local queuepair, or 1065 * we succeeded in all cases. Release the entry if required. 1066 */ 1067 1068 entry->qp.ref_count--; 1069 if (entry->qp.ref_count == 0) 1070 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1071 1072 /* If we didn't remove the entry, this could change once we unlock. */ 1073 if (entry) 1074 ref_count = entry->qp.ref_count; 1075 1076 mutex_unlock(&qp_guest_endpoints.mutex); 1077 1078 if (ref_count == 0) 1079 qp_guest_endpoint_destroy(entry); 1080 1081 return result; 1082 } 1083 1084 /* 1085 * This functions handles the actual allocation of a VMCI queue 1086 * pair guest endpoint. Allocates physical pages for the queue 1087 * pair. It makes OS dependent calls through generic wrappers. 1088 */ 1089 static int qp_alloc_guest_work(struct vmci_handle *handle, 1090 struct vmci_queue **produce_q, 1091 u64 produce_size, 1092 struct vmci_queue **consume_q, 1093 u64 consume_size, 1094 u32 peer, 1095 u32 flags, 1096 u32 priv_flags) 1097 { 1098 const u64 num_produce_pages = 1099 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1100 const u64 num_consume_pages = 1101 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1102 void *my_produce_q = NULL; 1103 void *my_consume_q = NULL; 1104 int result; 1105 struct qp_guest_endpoint *queue_pair_entry = NULL; 1106 1107 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1108 return VMCI_ERROR_NO_ACCESS; 1109 1110 mutex_lock(&qp_guest_endpoints.mutex); 1111 1112 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1113 if (queue_pair_entry) { 1114 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1115 /* Local attach case. */ 1116 if (queue_pair_entry->qp.ref_count > 1) { 1117 pr_devel("Error attempting to attach more than once\n"); 1118 result = VMCI_ERROR_UNAVAILABLE; 1119 goto error_keep_entry; 1120 } 1121 1122 if (queue_pair_entry->qp.produce_size != consume_size || 1123 queue_pair_entry->qp.consume_size != 1124 produce_size || 1125 queue_pair_entry->qp.flags != 1126 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1127 pr_devel("Error mismatched queue pair in local attach\n"); 1128 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1129 goto error_keep_entry; 1130 } 1131 1132 /* 1133 * Do a local attach. We swap the consume and 1134 * produce queues for the attacher and deliver 1135 * an attach event. 1136 */ 1137 result = qp_notify_peer_local(true, *handle); 1138 if (result < VMCI_SUCCESS) 1139 goto error_keep_entry; 1140 1141 my_produce_q = queue_pair_entry->consume_q; 1142 my_consume_q = queue_pair_entry->produce_q; 1143 goto out; 1144 } 1145 1146 result = VMCI_ERROR_ALREADY_EXISTS; 1147 goto error_keep_entry; 1148 } 1149 1150 my_produce_q = qp_alloc_queue(produce_size, flags); 1151 if (!my_produce_q) { 1152 pr_warn("Error allocating pages for produce queue\n"); 1153 result = VMCI_ERROR_NO_MEM; 1154 goto error; 1155 } 1156 1157 my_consume_q = qp_alloc_queue(consume_size, flags); 1158 if (!my_consume_q) { 1159 pr_warn("Error allocating pages for consume queue\n"); 1160 result = VMCI_ERROR_NO_MEM; 1161 goto error; 1162 } 1163 1164 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1165 produce_size, consume_size, 1166 my_produce_q, my_consume_q); 1167 if (!queue_pair_entry) { 1168 pr_warn("Error allocating memory in %s\n", __func__); 1169 result = VMCI_ERROR_NO_MEM; 1170 goto error; 1171 } 1172 1173 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1174 num_consume_pages, 1175 &queue_pair_entry->ppn_set); 1176 if (result < VMCI_SUCCESS) { 1177 pr_warn("qp_alloc_ppn_set failed\n"); 1178 goto error; 1179 } 1180 1181 /* 1182 * It's only necessary to notify the host if this queue pair will be 1183 * attached to from another context. 1184 */ 1185 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1186 /* Local create case. */ 1187 u32 context_id = vmci_get_context_id(); 1188 1189 /* 1190 * Enforce similar checks on local queue pairs as we 1191 * do for regular ones. The handle's context must 1192 * match the creator or attacher context id (here they 1193 * are both the current context id) and the 1194 * attach-only flag cannot exist during create. We 1195 * also ensure specified peer is this context or an 1196 * invalid one. 1197 */ 1198 if (queue_pair_entry->qp.handle.context != context_id || 1199 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1200 queue_pair_entry->qp.peer != context_id)) { 1201 result = VMCI_ERROR_NO_ACCESS; 1202 goto error; 1203 } 1204 1205 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1206 result = VMCI_ERROR_NOT_FOUND; 1207 goto error; 1208 } 1209 } else { 1210 result = qp_alloc_hypercall(queue_pair_entry); 1211 if (result < VMCI_SUCCESS) { 1212 pr_devel("qp_alloc_hypercall result = %d\n", result); 1213 goto error; 1214 } 1215 } 1216 1217 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1218 (struct vmci_queue *)my_consume_q); 1219 1220 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1221 1222 out: 1223 queue_pair_entry->qp.ref_count++; 1224 *handle = queue_pair_entry->qp.handle; 1225 *produce_q = (struct vmci_queue *)my_produce_q; 1226 *consume_q = (struct vmci_queue *)my_consume_q; 1227 1228 /* 1229 * We should initialize the queue pair header pages on a local 1230 * queue pair create. For non-local queue pairs, the 1231 * hypervisor initializes the header pages in the create step. 1232 */ 1233 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1234 queue_pair_entry->qp.ref_count == 1) { 1235 vmci_q_header_init((*produce_q)->q_header, *handle); 1236 vmci_q_header_init((*consume_q)->q_header, *handle); 1237 } 1238 1239 mutex_unlock(&qp_guest_endpoints.mutex); 1240 1241 return VMCI_SUCCESS; 1242 1243 error: 1244 mutex_unlock(&qp_guest_endpoints.mutex); 1245 if (queue_pair_entry) { 1246 /* The queues will be freed inside the destroy routine. */ 1247 qp_guest_endpoint_destroy(queue_pair_entry); 1248 } else { 1249 qp_free_queue(my_produce_q, produce_size); 1250 qp_free_queue(my_consume_q, consume_size); 1251 } 1252 return result; 1253 1254 error_keep_entry: 1255 /* This path should only be used when an existing entry was found. */ 1256 mutex_unlock(&qp_guest_endpoints.mutex); 1257 return result; 1258 } 1259 1260 /* 1261 * The first endpoint issuing a queue pair allocation will create the state 1262 * of the queue pair in the queue pair broker. 1263 * 1264 * If the creator is a guest, it will associate a VMX virtual address range 1265 * with the queue pair as specified by the page_store. For compatibility with 1266 * older VMX'en, that would use a separate step to set the VMX virtual 1267 * address range, the virtual address range can be registered later using 1268 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1269 * used. 1270 * 1271 * If the creator is the host, a page_store of NULL should be used as well, 1272 * since the host is not able to supply a page store for the queue pair. 1273 * 1274 * For older VMX and host callers, the queue pair will be created in the 1275 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1276 * created in VMCOQPB_CREATED_MEM state. 1277 */ 1278 static int qp_broker_create(struct vmci_handle handle, 1279 u32 peer, 1280 u32 flags, 1281 u32 priv_flags, 1282 u64 produce_size, 1283 u64 consume_size, 1284 struct vmci_qp_page_store *page_store, 1285 struct vmci_ctx *context, 1286 vmci_event_release_cb wakeup_cb, 1287 void *client_data, struct qp_broker_entry **ent) 1288 { 1289 struct qp_broker_entry *entry = NULL; 1290 const u32 context_id = vmci_ctx_get_id(context); 1291 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1292 int result; 1293 u64 guest_produce_size; 1294 u64 guest_consume_size; 1295 1296 /* Do not create if the caller asked not to. */ 1297 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1298 return VMCI_ERROR_NOT_FOUND; 1299 1300 /* 1301 * Creator's context ID should match handle's context ID or the creator 1302 * must allow the context in handle's context ID as the "peer". 1303 */ 1304 if (handle.context != context_id && handle.context != peer) 1305 return VMCI_ERROR_NO_ACCESS; 1306 1307 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1308 return VMCI_ERROR_DST_UNREACHABLE; 1309 1310 /* 1311 * Creator's context ID for local queue pairs should match the 1312 * peer, if a peer is specified. 1313 */ 1314 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1315 return VMCI_ERROR_NO_ACCESS; 1316 1317 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1318 if (!entry) 1319 return VMCI_ERROR_NO_MEM; 1320 1321 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1322 /* 1323 * The queue pair broker entry stores values from the guest 1324 * point of view, so a creating host side endpoint should swap 1325 * produce and consume values -- unless it is a local queue 1326 * pair, in which case no swapping is necessary, since the local 1327 * attacher will swap queues. 1328 */ 1329 1330 guest_produce_size = consume_size; 1331 guest_consume_size = produce_size; 1332 } else { 1333 guest_produce_size = produce_size; 1334 guest_consume_size = consume_size; 1335 } 1336 1337 entry->qp.handle = handle; 1338 entry->qp.peer = peer; 1339 entry->qp.flags = flags; 1340 entry->qp.produce_size = guest_produce_size; 1341 entry->qp.consume_size = guest_consume_size; 1342 entry->qp.ref_count = 1; 1343 entry->create_id = context_id; 1344 entry->attach_id = VMCI_INVALID_ID; 1345 entry->state = VMCIQPB_NEW; 1346 entry->require_trusted_attach = 1347 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1348 entry->created_by_trusted = 1349 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1350 entry->vmci_page_files = false; 1351 entry->wakeup_cb = wakeup_cb; 1352 entry->client_data = client_data; 1353 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1354 if (entry->produce_q == NULL) { 1355 result = VMCI_ERROR_NO_MEM; 1356 goto error; 1357 } 1358 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1359 if (entry->consume_q == NULL) { 1360 result = VMCI_ERROR_NO_MEM; 1361 goto error; 1362 } 1363 1364 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1365 1366 INIT_LIST_HEAD(&entry->qp.list_item); 1367 1368 if (is_local) { 1369 u8 *tmp; 1370 1371 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1372 PAGE_SIZE, GFP_KERNEL); 1373 if (entry->local_mem == NULL) { 1374 result = VMCI_ERROR_NO_MEM; 1375 goto error; 1376 } 1377 entry->state = VMCIQPB_CREATED_MEM; 1378 entry->produce_q->q_header = entry->local_mem; 1379 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1380 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1381 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1382 } else if (page_store) { 1383 /* 1384 * The VMX already initialized the queue pair headers, so no 1385 * need for the kernel side to do that. 1386 */ 1387 result = qp_host_register_user_memory(page_store, 1388 entry->produce_q, 1389 entry->consume_q); 1390 if (result < VMCI_SUCCESS) 1391 goto error; 1392 1393 entry->state = VMCIQPB_CREATED_MEM; 1394 } else { 1395 /* 1396 * A create without a page_store may be either a host 1397 * side create (in which case we are waiting for the 1398 * guest side to supply the memory) or an old style 1399 * queue pair create (in which case we will expect a 1400 * set page store call as the next step). 1401 */ 1402 entry->state = VMCIQPB_CREATED_NO_MEM; 1403 } 1404 1405 qp_list_add_entry(&qp_broker_list, &entry->qp); 1406 if (ent != NULL) 1407 *ent = entry; 1408 1409 /* Add to resource obj */ 1410 result = vmci_resource_add(&entry->resource, 1411 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1412 handle); 1413 if (result != VMCI_SUCCESS) { 1414 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1415 handle.context, handle.resource, result); 1416 goto error; 1417 } 1418 1419 entry->qp.handle = vmci_resource_handle(&entry->resource); 1420 if (is_local) { 1421 vmci_q_header_init(entry->produce_q->q_header, 1422 entry->qp.handle); 1423 vmci_q_header_init(entry->consume_q->q_header, 1424 entry->qp.handle); 1425 } 1426 1427 vmci_ctx_qp_create(context, entry->qp.handle); 1428 1429 return VMCI_SUCCESS; 1430 1431 error: 1432 if (entry != NULL) { 1433 qp_host_free_queue(entry->produce_q, guest_produce_size); 1434 qp_host_free_queue(entry->consume_q, guest_consume_size); 1435 kfree(entry); 1436 } 1437 1438 return result; 1439 } 1440 1441 /* 1442 * Enqueues an event datagram to notify the peer VM attached to 1443 * the given queue pair handle about attach/detach event by the 1444 * given VM. Returns Payload size of datagram enqueued on 1445 * success, error code otherwise. 1446 */ 1447 static int qp_notify_peer(bool attach, 1448 struct vmci_handle handle, 1449 u32 my_id, 1450 u32 peer_id) 1451 { 1452 int rv; 1453 struct vmci_event_qp ev; 1454 1455 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1456 peer_id == VMCI_INVALID_ID) 1457 return VMCI_ERROR_INVALID_ARGS; 1458 1459 /* 1460 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1461 * number of pending events from the hypervisor to a given VM 1462 * otherwise a rogue VM could do an arbitrary number of attach 1463 * and detach operations causing memory pressure in the host 1464 * kernel. 1465 */ 1466 1467 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1468 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1469 VMCI_CONTEXT_RESOURCE_ID); 1470 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1471 ev.msg.event_data.event = attach ? 1472 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1473 ev.payload.handle = handle; 1474 ev.payload.peer_id = my_id; 1475 1476 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1477 &ev.msg.hdr, false); 1478 if (rv < VMCI_SUCCESS) 1479 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1480 attach ? "ATTACH" : "DETACH", peer_id); 1481 1482 return rv; 1483 } 1484 1485 /* 1486 * The second endpoint issuing a queue pair allocation will attach to 1487 * the queue pair registered with the queue pair broker. 1488 * 1489 * If the attacher is a guest, it will associate a VMX virtual address 1490 * range with the queue pair as specified by the page_store. At this 1491 * point, the already attach host endpoint may start using the queue 1492 * pair, and an attach event is sent to it. For compatibility with 1493 * older VMX'en, that used a separate step to set the VMX virtual 1494 * address range, the virtual address range can be registered later 1495 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1496 * NULL should be used, and the attach event will be generated once 1497 * the actual page store has been set. 1498 * 1499 * If the attacher is the host, a page_store of NULL should be used as 1500 * well, since the page store information is already set by the guest. 1501 * 1502 * For new VMX and host callers, the queue pair will be moved to the 1503 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1504 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1505 */ 1506 static int qp_broker_attach(struct qp_broker_entry *entry, 1507 u32 peer, 1508 u32 flags, 1509 u32 priv_flags, 1510 u64 produce_size, 1511 u64 consume_size, 1512 struct vmci_qp_page_store *page_store, 1513 struct vmci_ctx *context, 1514 vmci_event_release_cb wakeup_cb, 1515 void *client_data, 1516 struct qp_broker_entry **ent) 1517 { 1518 const u32 context_id = vmci_ctx_get_id(context); 1519 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1520 int result; 1521 1522 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1523 entry->state != VMCIQPB_CREATED_MEM) 1524 return VMCI_ERROR_UNAVAILABLE; 1525 1526 if (is_local) { 1527 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1528 context_id != entry->create_id) { 1529 return VMCI_ERROR_INVALID_ARGS; 1530 } 1531 } else if (context_id == entry->create_id || 1532 context_id == entry->attach_id) { 1533 return VMCI_ERROR_ALREADY_EXISTS; 1534 } 1535 1536 if (VMCI_CONTEXT_IS_VM(context_id) && 1537 VMCI_CONTEXT_IS_VM(entry->create_id)) 1538 return VMCI_ERROR_DST_UNREACHABLE; 1539 1540 /* 1541 * If we are attaching from a restricted context then the queuepair 1542 * must have been created by a trusted endpoint. 1543 */ 1544 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1545 !entry->created_by_trusted) 1546 return VMCI_ERROR_NO_ACCESS; 1547 1548 /* 1549 * If we are attaching to a queuepair that was created by a restricted 1550 * context then we must be trusted. 1551 */ 1552 if (entry->require_trusted_attach && 1553 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1554 return VMCI_ERROR_NO_ACCESS; 1555 1556 /* 1557 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1558 * control check is not performed. 1559 */ 1560 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1561 return VMCI_ERROR_NO_ACCESS; 1562 1563 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1564 /* 1565 * Do not attach if the caller doesn't support Host Queue Pairs 1566 * and a host created this queue pair. 1567 */ 1568 1569 if (!vmci_ctx_supports_host_qp(context)) 1570 return VMCI_ERROR_INVALID_RESOURCE; 1571 1572 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1573 struct vmci_ctx *create_context; 1574 bool supports_host_qp; 1575 1576 /* 1577 * Do not attach a host to a user created queue pair if that 1578 * user doesn't support host queue pair end points. 1579 */ 1580 1581 create_context = vmci_ctx_get(entry->create_id); 1582 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1583 vmci_ctx_put(create_context); 1584 1585 if (!supports_host_qp) 1586 return VMCI_ERROR_INVALID_RESOURCE; 1587 } 1588 1589 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1590 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1591 1592 if (context_id != VMCI_HOST_CONTEXT_ID) { 1593 /* 1594 * The queue pair broker entry stores values from the guest 1595 * point of view, so an attaching guest should match the values 1596 * stored in the entry. 1597 */ 1598 1599 if (entry->qp.produce_size != produce_size || 1600 entry->qp.consume_size != consume_size) { 1601 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1602 } 1603 } else if (entry->qp.produce_size != consume_size || 1604 entry->qp.consume_size != produce_size) { 1605 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1606 } 1607 1608 if (context_id != VMCI_HOST_CONTEXT_ID) { 1609 /* 1610 * If a guest attached to a queue pair, it will supply 1611 * the backing memory. If this is a pre NOVMVM vmx, 1612 * the backing memory will be supplied by calling 1613 * vmci_qp_broker_set_page_store() following the 1614 * return of the vmci_qp_broker_alloc() call. If it is 1615 * a vmx of version NOVMVM or later, the page store 1616 * must be supplied as part of the 1617 * vmci_qp_broker_alloc call. Under all circumstances 1618 * must the initially created queue pair not have any 1619 * memory associated with it already. 1620 */ 1621 1622 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1623 return VMCI_ERROR_INVALID_ARGS; 1624 1625 if (page_store != NULL) { 1626 /* 1627 * Patch up host state to point to guest 1628 * supplied memory. The VMX already 1629 * initialized the queue pair headers, so no 1630 * need for the kernel side to do that. 1631 */ 1632 1633 result = qp_host_register_user_memory(page_store, 1634 entry->produce_q, 1635 entry->consume_q); 1636 if (result < VMCI_SUCCESS) 1637 return result; 1638 1639 entry->state = VMCIQPB_ATTACHED_MEM; 1640 } else { 1641 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1642 } 1643 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1644 /* 1645 * The host side is attempting to attach to a queue 1646 * pair that doesn't have any memory associated with 1647 * it. This must be a pre NOVMVM vmx that hasn't set 1648 * the page store information yet, or a quiesced VM. 1649 */ 1650 1651 return VMCI_ERROR_UNAVAILABLE; 1652 } else { 1653 /* The host side has successfully attached to a queue pair. */ 1654 entry->state = VMCIQPB_ATTACHED_MEM; 1655 } 1656 1657 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1658 result = 1659 qp_notify_peer(true, entry->qp.handle, context_id, 1660 entry->create_id); 1661 if (result < VMCI_SUCCESS) 1662 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1663 entry->create_id, entry->qp.handle.context, 1664 entry->qp.handle.resource); 1665 } 1666 1667 entry->attach_id = context_id; 1668 entry->qp.ref_count++; 1669 if (wakeup_cb) { 1670 entry->wakeup_cb = wakeup_cb; 1671 entry->client_data = client_data; 1672 } 1673 1674 /* 1675 * When attaching to local queue pairs, the context already has 1676 * an entry tracking the queue pair, so don't add another one. 1677 */ 1678 if (!is_local) 1679 vmci_ctx_qp_create(context, entry->qp.handle); 1680 1681 if (ent != NULL) 1682 *ent = entry; 1683 1684 return VMCI_SUCCESS; 1685 } 1686 1687 /* 1688 * queue_pair_Alloc for use when setting up queue pair endpoints 1689 * on the host. 1690 */ 1691 static int qp_broker_alloc(struct vmci_handle handle, 1692 u32 peer, 1693 u32 flags, 1694 u32 priv_flags, 1695 u64 produce_size, 1696 u64 consume_size, 1697 struct vmci_qp_page_store *page_store, 1698 struct vmci_ctx *context, 1699 vmci_event_release_cb wakeup_cb, 1700 void *client_data, 1701 struct qp_broker_entry **ent, 1702 bool *swap) 1703 { 1704 const u32 context_id = vmci_ctx_get_id(context); 1705 bool create; 1706 struct qp_broker_entry *entry = NULL; 1707 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1708 int result; 1709 1710 if (vmci_handle_is_invalid(handle) || 1711 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1712 !(produce_size || consume_size) || 1713 !context || context_id == VMCI_INVALID_ID || 1714 handle.context == VMCI_INVALID_ID) { 1715 return VMCI_ERROR_INVALID_ARGS; 1716 } 1717 1718 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1719 return VMCI_ERROR_INVALID_ARGS; 1720 1721 /* 1722 * In the initial argument check, we ensure that non-vmkernel hosts 1723 * are not allowed to create local queue pairs. 1724 */ 1725 1726 mutex_lock(&qp_broker_list.mutex); 1727 1728 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1729 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1730 context_id, handle.context, handle.resource); 1731 mutex_unlock(&qp_broker_list.mutex); 1732 return VMCI_ERROR_ALREADY_EXISTS; 1733 } 1734 1735 if (handle.resource != VMCI_INVALID_ID) 1736 entry = qp_broker_handle_to_entry(handle); 1737 1738 if (!entry) { 1739 create = true; 1740 result = 1741 qp_broker_create(handle, peer, flags, priv_flags, 1742 produce_size, consume_size, page_store, 1743 context, wakeup_cb, client_data, ent); 1744 } else { 1745 create = false; 1746 result = 1747 qp_broker_attach(entry, peer, flags, priv_flags, 1748 produce_size, consume_size, page_store, 1749 context, wakeup_cb, client_data, ent); 1750 } 1751 1752 mutex_unlock(&qp_broker_list.mutex); 1753 1754 if (swap) 1755 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1756 !(create && is_local); 1757 1758 return result; 1759 } 1760 1761 /* 1762 * This function implements the kernel API for allocating a queue 1763 * pair. 1764 */ 1765 static int qp_alloc_host_work(struct vmci_handle *handle, 1766 struct vmci_queue **produce_q, 1767 u64 produce_size, 1768 struct vmci_queue **consume_q, 1769 u64 consume_size, 1770 u32 peer, 1771 u32 flags, 1772 u32 priv_flags, 1773 vmci_event_release_cb wakeup_cb, 1774 void *client_data) 1775 { 1776 struct vmci_handle new_handle; 1777 struct vmci_ctx *context; 1778 struct qp_broker_entry *entry; 1779 int result; 1780 bool swap; 1781 1782 if (vmci_handle_is_invalid(*handle)) { 1783 new_handle = vmci_make_handle( 1784 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1785 } else 1786 new_handle = *handle; 1787 1788 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1789 entry = NULL; 1790 result = 1791 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1792 produce_size, consume_size, NULL, context, 1793 wakeup_cb, client_data, &entry, &swap); 1794 if (result == VMCI_SUCCESS) { 1795 if (swap) { 1796 /* 1797 * If this is a local queue pair, the attacher 1798 * will swap around produce and consume 1799 * queues. 1800 */ 1801 1802 *produce_q = entry->consume_q; 1803 *consume_q = entry->produce_q; 1804 } else { 1805 *produce_q = entry->produce_q; 1806 *consume_q = entry->consume_q; 1807 } 1808 1809 *handle = vmci_resource_handle(&entry->resource); 1810 } else { 1811 *handle = VMCI_INVALID_HANDLE; 1812 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1813 result); 1814 } 1815 vmci_ctx_put(context); 1816 return result; 1817 } 1818 1819 /* 1820 * Allocates a VMCI queue_pair. Only checks validity of input 1821 * arguments. The real work is done in the host or guest 1822 * specific function. 1823 */ 1824 int vmci_qp_alloc(struct vmci_handle *handle, 1825 struct vmci_queue **produce_q, 1826 u64 produce_size, 1827 struct vmci_queue **consume_q, 1828 u64 consume_size, 1829 u32 peer, 1830 u32 flags, 1831 u32 priv_flags, 1832 bool guest_endpoint, 1833 vmci_event_release_cb wakeup_cb, 1834 void *client_data) 1835 { 1836 if (!handle || !produce_q || !consume_q || 1837 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1838 return VMCI_ERROR_INVALID_ARGS; 1839 1840 if (guest_endpoint) { 1841 return qp_alloc_guest_work(handle, produce_q, 1842 produce_size, consume_q, 1843 consume_size, peer, 1844 flags, priv_flags); 1845 } else { 1846 return qp_alloc_host_work(handle, produce_q, 1847 produce_size, consume_q, 1848 consume_size, peer, flags, 1849 priv_flags, wakeup_cb, client_data); 1850 } 1851 } 1852 1853 /* 1854 * This function implements the host kernel API for detaching from 1855 * a queue pair. 1856 */ 1857 static int qp_detatch_host_work(struct vmci_handle handle) 1858 { 1859 int result; 1860 struct vmci_ctx *context; 1861 1862 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1863 1864 result = vmci_qp_broker_detach(handle, context); 1865 1866 vmci_ctx_put(context); 1867 return result; 1868 } 1869 1870 /* 1871 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1872 * Real work is done in the host or guest specific function. 1873 */ 1874 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1875 { 1876 if (vmci_handle_is_invalid(handle)) 1877 return VMCI_ERROR_INVALID_ARGS; 1878 1879 if (guest_endpoint) 1880 return qp_detatch_guest_work(handle); 1881 else 1882 return qp_detatch_host_work(handle); 1883 } 1884 1885 /* 1886 * Returns the entry from the head of the list. Assumes that the list is 1887 * locked. 1888 */ 1889 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1890 { 1891 if (!list_empty(&qp_list->head)) { 1892 struct qp_entry *entry = 1893 list_first_entry(&qp_list->head, struct qp_entry, 1894 list_item); 1895 return entry; 1896 } 1897 1898 return NULL; 1899 } 1900 1901 void vmci_qp_broker_exit(void) 1902 { 1903 struct qp_entry *entry; 1904 struct qp_broker_entry *be; 1905 1906 mutex_lock(&qp_broker_list.mutex); 1907 1908 while ((entry = qp_list_get_head(&qp_broker_list))) { 1909 be = (struct qp_broker_entry *)entry; 1910 1911 qp_list_remove_entry(&qp_broker_list, entry); 1912 kfree(be); 1913 } 1914 1915 mutex_unlock(&qp_broker_list.mutex); 1916 } 1917 1918 /* 1919 * Requests that a queue pair be allocated with the VMCI queue 1920 * pair broker. Allocates a queue pair entry if one does not 1921 * exist. Attaches to one if it exists, and retrieves the page 1922 * files backing that queue_pair. Assumes that the queue pair 1923 * broker lock is held. 1924 */ 1925 int vmci_qp_broker_alloc(struct vmci_handle handle, 1926 u32 peer, 1927 u32 flags, 1928 u32 priv_flags, 1929 u64 produce_size, 1930 u64 consume_size, 1931 struct vmci_qp_page_store *page_store, 1932 struct vmci_ctx *context) 1933 { 1934 if (!QP_SIZES_ARE_VALID(produce_size, consume_size)) 1935 return VMCI_ERROR_NO_RESOURCES; 1936 1937 return qp_broker_alloc(handle, peer, flags, priv_flags, 1938 produce_size, consume_size, 1939 page_store, context, NULL, NULL, NULL, NULL); 1940 } 1941 1942 /* 1943 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 1944 * step to add the UVAs of the VMX mapping of the queue pair. This function 1945 * provides backwards compatibility with such VMX'en, and takes care of 1946 * registering the page store for a queue pair previously allocated by the 1947 * VMX during create or attach. This function will move the queue pair state 1948 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 1949 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 1950 * attached state with memory, the queue pair is ready to be used by the 1951 * host peer, and an attached event will be generated. 1952 * 1953 * Assumes that the queue pair broker lock is held. 1954 * 1955 * This function is only used by the hosted platform, since there is no 1956 * issue with backwards compatibility for vmkernel. 1957 */ 1958 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 1959 u64 produce_uva, 1960 u64 consume_uva, 1961 struct vmci_ctx *context) 1962 { 1963 struct qp_broker_entry *entry; 1964 int result; 1965 const u32 context_id = vmci_ctx_get_id(context); 1966 1967 if (vmci_handle_is_invalid(handle) || !context || 1968 context_id == VMCI_INVALID_ID) 1969 return VMCI_ERROR_INVALID_ARGS; 1970 1971 /* 1972 * We only support guest to host queue pairs, so the VMX must 1973 * supply UVAs for the mapped page files. 1974 */ 1975 1976 if (produce_uva == 0 || consume_uva == 0) 1977 return VMCI_ERROR_INVALID_ARGS; 1978 1979 mutex_lock(&qp_broker_list.mutex); 1980 1981 if (!vmci_ctx_qp_exists(context, handle)) { 1982 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 1983 context_id, handle.context, handle.resource); 1984 result = VMCI_ERROR_NOT_FOUND; 1985 goto out; 1986 } 1987 1988 entry = qp_broker_handle_to_entry(handle); 1989 if (!entry) { 1990 result = VMCI_ERROR_NOT_FOUND; 1991 goto out; 1992 } 1993 1994 /* 1995 * If I'm the owner then I can set the page store. 1996 * 1997 * Or, if a host created the queue_pair and I'm the attached peer 1998 * then I can set the page store. 1999 */ 2000 if (entry->create_id != context_id && 2001 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2002 entry->attach_id != context_id)) { 2003 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2004 goto out; 2005 } 2006 2007 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2008 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2009 result = VMCI_ERROR_UNAVAILABLE; 2010 goto out; 2011 } 2012 2013 result = qp_host_get_user_memory(produce_uva, consume_uva, 2014 entry->produce_q, entry->consume_q); 2015 if (result < VMCI_SUCCESS) 2016 goto out; 2017 2018 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2019 if (result < VMCI_SUCCESS) { 2020 qp_host_unregister_user_memory(entry->produce_q, 2021 entry->consume_q); 2022 goto out; 2023 } 2024 2025 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2026 entry->state = VMCIQPB_CREATED_MEM; 2027 else 2028 entry->state = VMCIQPB_ATTACHED_MEM; 2029 2030 entry->vmci_page_files = true; 2031 2032 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2033 result = 2034 qp_notify_peer(true, handle, context_id, entry->create_id); 2035 if (result < VMCI_SUCCESS) { 2036 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2037 entry->create_id, entry->qp.handle.context, 2038 entry->qp.handle.resource); 2039 } 2040 } 2041 2042 result = VMCI_SUCCESS; 2043 out: 2044 mutex_unlock(&qp_broker_list.mutex); 2045 return result; 2046 } 2047 2048 /* 2049 * Resets saved queue headers for the given QP broker 2050 * entry. Should be used when guest memory becomes available 2051 * again, or the guest detaches. 2052 */ 2053 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2054 { 2055 entry->produce_q->saved_header = NULL; 2056 entry->consume_q->saved_header = NULL; 2057 } 2058 2059 /* 2060 * The main entry point for detaching from a queue pair registered with the 2061 * queue pair broker. If more than one endpoint is attached to the queue 2062 * pair, the first endpoint will mainly decrement a reference count and 2063 * generate a notification to its peer. The last endpoint will clean up 2064 * the queue pair state registered with the broker. 2065 * 2066 * When a guest endpoint detaches, it will unmap and unregister the guest 2067 * memory backing the queue pair. If the host is still attached, it will 2068 * no longer be able to access the queue pair content. 2069 * 2070 * If the queue pair is already in a state where there is no memory 2071 * registered for the queue pair (any *_NO_MEM state), it will transition to 2072 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2073 * endpoint is the first of two endpoints to detach. If the host endpoint is 2074 * the first out of two to detach, the queue pair will move to the 2075 * VMCIQPB_SHUTDOWN_MEM state. 2076 */ 2077 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2078 { 2079 struct qp_broker_entry *entry; 2080 const u32 context_id = vmci_ctx_get_id(context); 2081 u32 peer_id; 2082 bool is_local = false; 2083 int result; 2084 2085 if (vmci_handle_is_invalid(handle) || !context || 2086 context_id == VMCI_INVALID_ID) { 2087 return VMCI_ERROR_INVALID_ARGS; 2088 } 2089 2090 mutex_lock(&qp_broker_list.mutex); 2091 2092 if (!vmci_ctx_qp_exists(context, handle)) { 2093 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2094 context_id, handle.context, handle.resource); 2095 result = VMCI_ERROR_NOT_FOUND; 2096 goto out; 2097 } 2098 2099 entry = qp_broker_handle_to_entry(handle); 2100 if (!entry) { 2101 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2102 context_id, handle.context, handle.resource); 2103 result = VMCI_ERROR_NOT_FOUND; 2104 goto out; 2105 } 2106 2107 if (context_id != entry->create_id && context_id != entry->attach_id) { 2108 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2109 goto out; 2110 } 2111 2112 if (context_id == entry->create_id) { 2113 peer_id = entry->attach_id; 2114 entry->create_id = VMCI_INVALID_ID; 2115 } else { 2116 peer_id = entry->create_id; 2117 entry->attach_id = VMCI_INVALID_ID; 2118 } 2119 entry->qp.ref_count--; 2120 2121 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2122 2123 if (context_id != VMCI_HOST_CONTEXT_ID) { 2124 bool headers_mapped; 2125 2126 /* 2127 * Pre NOVMVM vmx'en may detach from a queue pair 2128 * before setting the page store, and in that case 2129 * there is no user memory to detach from. Also, more 2130 * recent VMX'en may detach from a queue pair in the 2131 * quiesced state. 2132 */ 2133 2134 qp_acquire_queue_mutex(entry->produce_q); 2135 headers_mapped = entry->produce_q->q_header || 2136 entry->consume_q->q_header; 2137 if (QPBROKERSTATE_HAS_MEM(entry)) { 2138 result = 2139 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2140 entry->produce_q, 2141 entry->consume_q); 2142 if (result < VMCI_SUCCESS) 2143 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2144 handle.context, handle.resource, 2145 result); 2146 2147 qp_host_unregister_user_memory(entry->produce_q, 2148 entry->consume_q); 2149 2150 } 2151 2152 if (!headers_mapped) 2153 qp_reset_saved_headers(entry); 2154 2155 qp_release_queue_mutex(entry->produce_q); 2156 2157 if (!headers_mapped && entry->wakeup_cb) 2158 entry->wakeup_cb(entry->client_data); 2159 2160 } else { 2161 if (entry->wakeup_cb) { 2162 entry->wakeup_cb = NULL; 2163 entry->client_data = NULL; 2164 } 2165 } 2166 2167 if (entry->qp.ref_count == 0) { 2168 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2169 2170 if (is_local) 2171 kfree(entry->local_mem); 2172 2173 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2174 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2175 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2176 /* Unlink from resource hash table and free callback */ 2177 vmci_resource_remove(&entry->resource); 2178 2179 kfree(entry); 2180 2181 vmci_ctx_qp_destroy(context, handle); 2182 } else { 2183 qp_notify_peer(false, handle, context_id, peer_id); 2184 if (context_id == VMCI_HOST_CONTEXT_ID && 2185 QPBROKERSTATE_HAS_MEM(entry)) { 2186 entry->state = VMCIQPB_SHUTDOWN_MEM; 2187 } else { 2188 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2189 } 2190 2191 if (!is_local) 2192 vmci_ctx_qp_destroy(context, handle); 2193 2194 } 2195 result = VMCI_SUCCESS; 2196 out: 2197 mutex_unlock(&qp_broker_list.mutex); 2198 return result; 2199 } 2200 2201 /* 2202 * Establishes the necessary mappings for a queue pair given a 2203 * reference to the queue pair guest memory. This is usually 2204 * called when a guest is unquiesced and the VMX is allowed to 2205 * map guest memory once again. 2206 */ 2207 int vmci_qp_broker_map(struct vmci_handle handle, 2208 struct vmci_ctx *context, 2209 u64 guest_mem) 2210 { 2211 struct qp_broker_entry *entry; 2212 const u32 context_id = vmci_ctx_get_id(context); 2213 int result; 2214 2215 if (vmci_handle_is_invalid(handle) || !context || 2216 context_id == VMCI_INVALID_ID) 2217 return VMCI_ERROR_INVALID_ARGS; 2218 2219 mutex_lock(&qp_broker_list.mutex); 2220 2221 if (!vmci_ctx_qp_exists(context, handle)) { 2222 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2223 context_id, handle.context, handle.resource); 2224 result = VMCI_ERROR_NOT_FOUND; 2225 goto out; 2226 } 2227 2228 entry = qp_broker_handle_to_entry(handle); 2229 if (!entry) { 2230 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2231 context_id, handle.context, handle.resource); 2232 result = VMCI_ERROR_NOT_FOUND; 2233 goto out; 2234 } 2235 2236 if (context_id != entry->create_id && context_id != entry->attach_id) { 2237 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2238 goto out; 2239 } 2240 2241 result = VMCI_SUCCESS; 2242 2243 if (context_id != VMCI_HOST_CONTEXT_ID) { 2244 struct vmci_qp_page_store page_store; 2245 2246 page_store.pages = guest_mem; 2247 page_store.len = QPE_NUM_PAGES(entry->qp); 2248 2249 qp_acquire_queue_mutex(entry->produce_q); 2250 qp_reset_saved_headers(entry); 2251 result = 2252 qp_host_register_user_memory(&page_store, 2253 entry->produce_q, 2254 entry->consume_q); 2255 qp_release_queue_mutex(entry->produce_q); 2256 if (result == VMCI_SUCCESS) { 2257 /* Move state from *_NO_MEM to *_MEM */ 2258 2259 entry->state++; 2260 2261 if (entry->wakeup_cb) 2262 entry->wakeup_cb(entry->client_data); 2263 } 2264 } 2265 2266 out: 2267 mutex_unlock(&qp_broker_list.mutex); 2268 return result; 2269 } 2270 2271 /* 2272 * Saves a snapshot of the queue headers for the given QP broker 2273 * entry. Should be used when guest memory is unmapped. 2274 * Results: 2275 * VMCI_SUCCESS on success, appropriate error code if guest memory 2276 * can't be accessed.. 2277 */ 2278 static int qp_save_headers(struct qp_broker_entry *entry) 2279 { 2280 int result; 2281 2282 if (entry->produce_q->saved_header != NULL && 2283 entry->consume_q->saved_header != NULL) { 2284 /* 2285 * If the headers have already been saved, we don't need to do 2286 * it again, and we don't want to map in the headers 2287 * unnecessarily. 2288 */ 2289 2290 return VMCI_SUCCESS; 2291 } 2292 2293 if (NULL == entry->produce_q->q_header || 2294 NULL == entry->consume_q->q_header) { 2295 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2296 if (result < VMCI_SUCCESS) 2297 return result; 2298 } 2299 2300 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2301 sizeof(entry->saved_produce_q)); 2302 entry->produce_q->saved_header = &entry->saved_produce_q; 2303 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2304 sizeof(entry->saved_consume_q)); 2305 entry->consume_q->saved_header = &entry->saved_consume_q; 2306 2307 return VMCI_SUCCESS; 2308 } 2309 2310 /* 2311 * Removes all references to the guest memory of a given queue pair, and 2312 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2313 * called when a VM is being quiesced where access to guest memory should 2314 * avoided. 2315 */ 2316 int vmci_qp_broker_unmap(struct vmci_handle handle, 2317 struct vmci_ctx *context, 2318 u32 gid) 2319 { 2320 struct qp_broker_entry *entry; 2321 const u32 context_id = vmci_ctx_get_id(context); 2322 int result; 2323 2324 if (vmci_handle_is_invalid(handle) || !context || 2325 context_id == VMCI_INVALID_ID) 2326 return VMCI_ERROR_INVALID_ARGS; 2327 2328 mutex_lock(&qp_broker_list.mutex); 2329 2330 if (!vmci_ctx_qp_exists(context, handle)) { 2331 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2332 context_id, handle.context, handle.resource); 2333 result = VMCI_ERROR_NOT_FOUND; 2334 goto out; 2335 } 2336 2337 entry = qp_broker_handle_to_entry(handle); 2338 if (!entry) { 2339 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2340 context_id, handle.context, handle.resource); 2341 result = VMCI_ERROR_NOT_FOUND; 2342 goto out; 2343 } 2344 2345 if (context_id != entry->create_id && context_id != entry->attach_id) { 2346 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2347 goto out; 2348 } 2349 2350 if (context_id != VMCI_HOST_CONTEXT_ID) { 2351 qp_acquire_queue_mutex(entry->produce_q); 2352 result = qp_save_headers(entry); 2353 if (result < VMCI_SUCCESS) 2354 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2355 handle.context, handle.resource, result); 2356 2357 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2358 2359 /* 2360 * On hosted, when we unmap queue pairs, the VMX will also 2361 * unmap the guest memory, so we invalidate the previously 2362 * registered memory. If the queue pair is mapped again at a 2363 * later point in time, we will need to reregister the user 2364 * memory with a possibly new user VA. 2365 */ 2366 qp_host_unregister_user_memory(entry->produce_q, 2367 entry->consume_q); 2368 2369 /* 2370 * Move state from *_MEM to *_NO_MEM. 2371 */ 2372 entry->state--; 2373 2374 qp_release_queue_mutex(entry->produce_q); 2375 } 2376 2377 result = VMCI_SUCCESS; 2378 2379 out: 2380 mutex_unlock(&qp_broker_list.mutex); 2381 return result; 2382 } 2383 2384 /* 2385 * Destroys all guest queue pair endpoints. If active guest queue 2386 * pairs still exist, hypercalls to attempt detach from these 2387 * queue pairs will be made. Any failure to detach is silently 2388 * ignored. 2389 */ 2390 void vmci_qp_guest_endpoints_exit(void) 2391 { 2392 struct qp_entry *entry; 2393 struct qp_guest_endpoint *ep; 2394 2395 mutex_lock(&qp_guest_endpoints.mutex); 2396 2397 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2398 ep = (struct qp_guest_endpoint *)entry; 2399 2400 /* Don't make a hypercall for local queue_pairs. */ 2401 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2402 qp_detatch_hypercall(entry->handle); 2403 2404 /* We cannot fail the exit, so let's reset ref_count. */ 2405 entry->ref_count = 0; 2406 qp_list_remove_entry(&qp_guest_endpoints, entry); 2407 2408 qp_guest_endpoint_destroy(ep); 2409 } 2410 2411 mutex_unlock(&qp_guest_endpoints.mutex); 2412 } 2413 2414 /* 2415 * Helper routine that will lock the queue pair before subsequent 2416 * operations. 2417 * Note: Non-blocking on the host side is currently only implemented in ESX. 2418 * Since non-blocking isn't yet implemented on the host personality we 2419 * have no reason to acquire a spin lock. So to avoid the use of an 2420 * unnecessary lock only acquire the mutex if we can block. 2421 */ 2422 static void qp_lock(const struct vmci_qp *qpair) 2423 { 2424 qp_acquire_queue_mutex(qpair->produce_q); 2425 } 2426 2427 /* 2428 * Helper routine that unlocks the queue pair after calling 2429 * qp_lock. 2430 */ 2431 static void qp_unlock(const struct vmci_qp *qpair) 2432 { 2433 qp_release_queue_mutex(qpair->produce_q); 2434 } 2435 2436 /* 2437 * The queue headers may not be mapped at all times. If a queue is 2438 * currently not mapped, it will be attempted to do so. 2439 */ 2440 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2441 struct vmci_queue *consume_q) 2442 { 2443 int result; 2444 2445 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2446 result = qp_host_map_queues(produce_q, consume_q); 2447 if (result < VMCI_SUCCESS) 2448 return (produce_q->saved_header && 2449 consume_q->saved_header) ? 2450 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2451 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2452 } 2453 2454 return VMCI_SUCCESS; 2455 } 2456 2457 /* 2458 * Helper routine that will retrieve the produce and consume 2459 * headers of a given queue pair. If the guest memory of the 2460 * queue pair is currently not available, the saved queue headers 2461 * will be returned, if these are available. 2462 */ 2463 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2464 struct vmci_queue_header **produce_q_header, 2465 struct vmci_queue_header **consume_q_header) 2466 { 2467 int result; 2468 2469 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2470 if (result == VMCI_SUCCESS) { 2471 *produce_q_header = qpair->produce_q->q_header; 2472 *consume_q_header = qpair->consume_q->q_header; 2473 } else if (qpair->produce_q->saved_header && 2474 qpair->consume_q->saved_header) { 2475 *produce_q_header = qpair->produce_q->saved_header; 2476 *consume_q_header = qpair->consume_q->saved_header; 2477 result = VMCI_SUCCESS; 2478 } 2479 2480 return result; 2481 } 2482 2483 /* 2484 * Callback from VMCI queue pair broker indicating that a queue 2485 * pair that was previously not ready, now either is ready or 2486 * gone forever. 2487 */ 2488 static int qp_wakeup_cb(void *client_data) 2489 { 2490 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2491 2492 qp_lock(qpair); 2493 while (qpair->blocked > 0) { 2494 qpair->blocked--; 2495 qpair->generation++; 2496 wake_up(&qpair->event); 2497 } 2498 qp_unlock(qpair); 2499 2500 return VMCI_SUCCESS; 2501 } 2502 2503 /* 2504 * Makes the calling thread wait for the queue pair to become 2505 * ready for host side access. Returns true when thread is 2506 * woken up after queue pair state change, false otherwise. 2507 */ 2508 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2509 { 2510 unsigned int generation; 2511 2512 qpair->blocked++; 2513 generation = qpair->generation; 2514 qp_unlock(qpair); 2515 wait_event(qpair->event, generation != qpair->generation); 2516 qp_lock(qpair); 2517 2518 return true; 2519 } 2520 2521 /* 2522 * Enqueues a given buffer to the produce queue using the provided 2523 * function. As many bytes as possible (space available in the queue) 2524 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2525 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2526 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2527 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2528 * an error occured when accessing the buffer, 2529 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2530 * available. Otherwise, the number of bytes written to the queue is 2531 * returned. Updates the tail pointer of the produce queue. 2532 */ 2533 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2534 struct vmci_queue *consume_q, 2535 const u64 produce_q_size, 2536 struct iov_iter *from) 2537 { 2538 s64 free_space; 2539 u64 tail; 2540 size_t buf_size = iov_iter_count(from); 2541 size_t written; 2542 ssize_t result; 2543 2544 result = qp_map_queue_headers(produce_q, consume_q); 2545 if (unlikely(result != VMCI_SUCCESS)) 2546 return result; 2547 2548 free_space = vmci_q_header_free_space(produce_q->q_header, 2549 consume_q->q_header, 2550 produce_q_size); 2551 if (free_space == 0) 2552 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2553 2554 if (free_space < VMCI_SUCCESS) 2555 return (ssize_t) free_space; 2556 2557 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2558 tail = vmci_q_header_producer_tail(produce_q->q_header); 2559 if (likely(tail + written < produce_q_size)) { 2560 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); 2561 } else { 2562 /* Tail pointer wraps around. */ 2563 2564 const size_t tmp = (size_t) (produce_q_size - tail); 2565 2566 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); 2567 if (result >= VMCI_SUCCESS) 2568 result = qp_memcpy_to_queue_iter(produce_q, 0, from, 2569 written - tmp); 2570 } 2571 2572 if (result < VMCI_SUCCESS) 2573 return result; 2574 2575 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2576 produce_q_size); 2577 return written; 2578 } 2579 2580 /* 2581 * Dequeues data (if available) from the given consume queue. Writes data 2582 * to the user provided buffer using the provided function. 2583 * Assumes the queue->mutex has been acquired. 2584 * Results: 2585 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2586 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2587 * (as defined by the queue size). 2588 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2589 * Otherwise the number of bytes dequeued is returned. 2590 * Side effects: 2591 * Updates the head pointer of the consume queue. 2592 */ 2593 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2594 struct vmci_queue *consume_q, 2595 const u64 consume_q_size, 2596 struct iov_iter *to, 2597 bool update_consumer) 2598 { 2599 size_t buf_size = iov_iter_count(to); 2600 s64 buf_ready; 2601 u64 head; 2602 size_t read; 2603 ssize_t result; 2604 2605 result = qp_map_queue_headers(produce_q, consume_q); 2606 if (unlikely(result != VMCI_SUCCESS)) 2607 return result; 2608 2609 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2610 produce_q->q_header, 2611 consume_q_size); 2612 if (buf_ready == 0) 2613 return VMCI_ERROR_QUEUEPAIR_NODATA; 2614 2615 if (buf_ready < VMCI_SUCCESS) 2616 return (ssize_t) buf_ready; 2617 2618 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2619 head = vmci_q_header_consumer_head(produce_q->q_header); 2620 if (likely(head + read < consume_q_size)) { 2621 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); 2622 } else { 2623 /* Head pointer wraps around. */ 2624 2625 const size_t tmp = (size_t) (consume_q_size - head); 2626 2627 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); 2628 if (result >= VMCI_SUCCESS) 2629 result = qp_memcpy_from_queue_iter(to, consume_q, 0, 2630 read - tmp); 2631 2632 } 2633 2634 if (result < VMCI_SUCCESS) 2635 return result; 2636 2637 if (update_consumer) 2638 vmci_q_header_add_consumer_head(produce_q->q_header, 2639 read, consume_q_size); 2640 2641 return read; 2642 } 2643 2644 /* 2645 * vmci_qpair_alloc() - Allocates a queue pair. 2646 * @qpair: Pointer for the new vmci_qp struct. 2647 * @handle: Handle to track the resource. 2648 * @produce_qsize: Desired size of the producer queue. 2649 * @consume_qsize: Desired size of the consumer queue. 2650 * @peer: ContextID of the peer. 2651 * @flags: VMCI flags. 2652 * @priv_flags: VMCI priviledge flags. 2653 * 2654 * This is the client interface for allocating the memory for a 2655 * vmci_qp structure and then attaching to the underlying 2656 * queue. If an error occurs allocating the memory for the 2657 * vmci_qp structure no attempt is made to attach. If an 2658 * error occurs attaching, then the structure is freed. 2659 */ 2660 int vmci_qpair_alloc(struct vmci_qp **qpair, 2661 struct vmci_handle *handle, 2662 u64 produce_qsize, 2663 u64 consume_qsize, 2664 u32 peer, 2665 u32 flags, 2666 u32 priv_flags) 2667 { 2668 struct vmci_qp *my_qpair; 2669 int retval; 2670 struct vmci_handle src = VMCI_INVALID_HANDLE; 2671 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2672 enum vmci_route route; 2673 vmci_event_release_cb wakeup_cb; 2674 void *client_data; 2675 2676 /* 2677 * Restrict the size of a queuepair. The device already 2678 * enforces a limit on the total amount of memory that can be 2679 * allocated to queuepairs for a guest. However, we try to 2680 * allocate this memory before we make the queuepair 2681 * allocation hypercall. On Linux, we allocate each page 2682 * separately, which means rather than fail, the guest will 2683 * thrash while it tries to allocate, and will become 2684 * increasingly unresponsive to the point where it appears to 2685 * be hung. So we place a limit on the size of an individual 2686 * queuepair here, and leave the device to enforce the 2687 * restriction on total queuepair memory. (Note that this 2688 * doesn't prevent all cases; a user with only this much 2689 * physical memory could still get into trouble.) The error 2690 * used by the device is NO_RESOURCES, so use that here too. 2691 */ 2692 2693 if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize)) 2694 return VMCI_ERROR_NO_RESOURCES; 2695 2696 retval = vmci_route(&src, &dst, false, &route); 2697 if (retval < VMCI_SUCCESS) 2698 route = vmci_guest_code_active() ? 2699 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2700 2701 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2702 pr_devel("NONBLOCK OR PINNED set"); 2703 return VMCI_ERROR_INVALID_ARGS; 2704 } 2705 2706 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2707 if (!my_qpair) 2708 return VMCI_ERROR_NO_MEM; 2709 2710 my_qpair->produce_q_size = produce_qsize; 2711 my_qpair->consume_q_size = consume_qsize; 2712 my_qpair->peer = peer; 2713 my_qpair->flags = flags; 2714 my_qpair->priv_flags = priv_flags; 2715 2716 wakeup_cb = NULL; 2717 client_data = NULL; 2718 2719 if (VMCI_ROUTE_AS_HOST == route) { 2720 my_qpair->guest_endpoint = false; 2721 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2722 my_qpair->blocked = 0; 2723 my_qpair->generation = 0; 2724 init_waitqueue_head(&my_qpair->event); 2725 wakeup_cb = qp_wakeup_cb; 2726 client_data = (void *)my_qpair; 2727 } 2728 } else { 2729 my_qpair->guest_endpoint = true; 2730 } 2731 2732 retval = vmci_qp_alloc(handle, 2733 &my_qpair->produce_q, 2734 my_qpair->produce_q_size, 2735 &my_qpair->consume_q, 2736 my_qpair->consume_q_size, 2737 my_qpair->peer, 2738 my_qpair->flags, 2739 my_qpair->priv_flags, 2740 my_qpair->guest_endpoint, 2741 wakeup_cb, client_data); 2742 2743 if (retval < VMCI_SUCCESS) { 2744 kfree(my_qpair); 2745 return retval; 2746 } 2747 2748 *qpair = my_qpair; 2749 my_qpair->handle = *handle; 2750 2751 return retval; 2752 } 2753 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2754 2755 /* 2756 * vmci_qpair_detach() - Detatches the client from a queue pair. 2757 * @qpair: Reference of a pointer to the qpair struct. 2758 * 2759 * This is the client interface for detaching from a VMCIQPair. 2760 * Note that this routine will free the memory allocated for the 2761 * vmci_qp structure too. 2762 */ 2763 int vmci_qpair_detach(struct vmci_qp **qpair) 2764 { 2765 int result; 2766 struct vmci_qp *old_qpair; 2767 2768 if (!qpair || !(*qpair)) 2769 return VMCI_ERROR_INVALID_ARGS; 2770 2771 old_qpair = *qpair; 2772 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2773 2774 /* 2775 * The guest can fail to detach for a number of reasons, and 2776 * if it does so, it will cleanup the entry (if there is one). 2777 * The host can fail too, but it won't cleanup the entry 2778 * immediately, it will do that later when the context is 2779 * freed. Either way, we need to release the qpair struct 2780 * here; there isn't much the caller can do, and we don't want 2781 * to leak. 2782 */ 2783 2784 memset(old_qpair, 0, sizeof(*old_qpair)); 2785 old_qpair->handle = VMCI_INVALID_HANDLE; 2786 old_qpair->peer = VMCI_INVALID_ID; 2787 kfree(old_qpair); 2788 *qpair = NULL; 2789 2790 return result; 2791 } 2792 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2793 2794 /* 2795 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2796 * @qpair: Pointer to the queue pair struct. 2797 * @producer_tail: Reference used for storing producer tail index. 2798 * @consumer_head: Reference used for storing the consumer head index. 2799 * 2800 * This is the client interface for getting the current indexes of the 2801 * QPair from the point of the view of the caller as the producer. 2802 */ 2803 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2804 u64 *producer_tail, 2805 u64 *consumer_head) 2806 { 2807 struct vmci_queue_header *produce_q_header; 2808 struct vmci_queue_header *consume_q_header; 2809 int result; 2810 2811 if (!qpair) 2812 return VMCI_ERROR_INVALID_ARGS; 2813 2814 qp_lock(qpair); 2815 result = 2816 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2817 if (result == VMCI_SUCCESS) 2818 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2819 producer_tail, consumer_head); 2820 qp_unlock(qpair); 2821 2822 if (result == VMCI_SUCCESS && 2823 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2824 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2825 return VMCI_ERROR_INVALID_SIZE; 2826 2827 return result; 2828 } 2829 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2830 2831 /* 2832 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. 2833 * @qpair: Pointer to the queue pair struct. 2834 * @consumer_tail: Reference used for storing consumer tail index. 2835 * @producer_head: Reference used for storing the producer head index. 2836 * 2837 * This is the client interface for getting the current indexes of the 2838 * QPair from the point of the view of the caller as the consumer. 2839 */ 2840 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2841 u64 *consumer_tail, 2842 u64 *producer_head) 2843 { 2844 struct vmci_queue_header *produce_q_header; 2845 struct vmci_queue_header *consume_q_header; 2846 int result; 2847 2848 if (!qpair) 2849 return VMCI_ERROR_INVALID_ARGS; 2850 2851 qp_lock(qpair); 2852 result = 2853 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2854 if (result == VMCI_SUCCESS) 2855 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2856 consumer_tail, producer_head); 2857 qp_unlock(qpair); 2858 2859 if (result == VMCI_SUCCESS && 2860 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2861 (producer_head && *producer_head >= qpair->consume_q_size))) 2862 return VMCI_ERROR_INVALID_SIZE; 2863 2864 return result; 2865 } 2866 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2867 2868 /* 2869 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2870 * @qpair: Pointer to the queue pair struct. 2871 * 2872 * This is the client interface for getting the amount of free 2873 * space in the QPair from the point of the view of the caller as 2874 * the producer which is the common case. Returns < 0 if err, else 2875 * available bytes into which data can be enqueued if > 0. 2876 */ 2877 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2878 { 2879 struct vmci_queue_header *produce_q_header; 2880 struct vmci_queue_header *consume_q_header; 2881 s64 result; 2882 2883 if (!qpair) 2884 return VMCI_ERROR_INVALID_ARGS; 2885 2886 qp_lock(qpair); 2887 result = 2888 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2889 if (result == VMCI_SUCCESS) 2890 result = vmci_q_header_free_space(produce_q_header, 2891 consume_q_header, 2892 qpair->produce_q_size); 2893 else 2894 result = 0; 2895 2896 qp_unlock(qpair); 2897 2898 return result; 2899 } 2900 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2901 2902 /* 2903 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2904 * @qpair: Pointer to the queue pair struct. 2905 * 2906 * This is the client interface for getting the amount of free 2907 * space in the QPair from the point of the view of the caller as 2908 * the consumer which is not the common case. Returns < 0 if err, else 2909 * available bytes into which data can be enqueued if > 0. 2910 */ 2911 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 2912 { 2913 struct vmci_queue_header *produce_q_header; 2914 struct vmci_queue_header *consume_q_header; 2915 s64 result; 2916 2917 if (!qpair) 2918 return VMCI_ERROR_INVALID_ARGS; 2919 2920 qp_lock(qpair); 2921 result = 2922 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2923 if (result == VMCI_SUCCESS) 2924 result = vmci_q_header_free_space(consume_q_header, 2925 produce_q_header, 2926 qpair->consume_q_size); 2927 else 2928 result = 0; 2929 2930 qp_unlock(qpair); 2931 2932 return result; 2933 } 2934 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 2935 2936 /* 2937 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 2938 * producer queue. 2939 * @qpair: Pointer to the queue pair struct. 2940 * 2941 * This is the client interface for getting the amount of 2942 * enqueued data in the QPair from the point of the view of the 2943 * caller as the producer which is not the common case. Returns < 0 if err, 2944 * else available bytes that may be read. 2945 */ 2946 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 2947 { 2948 struct vmci_queue_header *produce_q_header; 2949 struct vmci_queue_header *consume_q_header; 2950 s64 result; 2951 2952 if (!qpair) 2953 return VMCI_ERROR_INVALID_ARGS; 2954 2955 qp_lock(qpair); 2956 result = 2957 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2958 if (result == VMCI_SUCCESS) 2959 result = vmci_q_header_buf_ready(produce_q_header, 2960 consume_q_header, 2961 qpair->produce_q_size); 2962 else 2963 result = 0; 2964 2965 qp_unlock(qpair); 2966 2967 return result; 2968 } 2969 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 2970 2971 /* 2972 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 2973 * consumer queue. 2974 * @qpair: Pointer to the queue pair struct. 2975 * 2976 * This is the client interface for getting the amount of 2977 * enqueued data in the QPair from the point of the view of the 2978 * caller as the consumer which is the normal case. Returns < 0 if err, 2979 * else available bytes that may be read. 2980 */ 2981 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 2982 { 2983 struct vmci_queue_header *produce_q_header; 2984 struct vmci_queue_header *consume_q_header; 2985 s64 result; 2986 2987 if (!qpair) 2988 return VMCI_ERROR_INVALID_ARGS; 2989 2990 qp_lock(qpair); 2991 result = 2992 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2993 if (result == VMCI_SUCCESS) 2994 result = vmci_q_header_buf_ready(consume_q_header, 2995 produce_q_header, 2996 qpair->consume_q_size); 2997 else 2998 result = 0; 2999 3000 qp_unlock(qpair); 3001 3002 return result; 3003 } 3004 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3005 3006 /* 3007 * vmci_qpair_enqueue() - Throw data on the queue. 3008 * @qpair: Pointer to the queue pair struct. 3009 * @buf: Pointer to buffer containing data 3010 * @buf_size: Length of buffer. 3011 * @buf_type: Buffer type (Unused). 3012 * 3013 * This is the client interface for enqueueing data into the queue. 3014 * Returns number of bytes enqueued or < 0 on error. 3015 */ 3016 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3017 const void *buf, 3018 size_t buf_size, 3019 int buf_type) 3020 { 3021 ssize_t result; 3022 struct iov_iter from; 3023 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; 3024 3025 if (!qpair || !buf) 3026 return VMCI_ERROR_INVALID_ARGS; 3027 3028 iov_iter_kvec(&from, WRITE, &v, 1, buf_size); 3029 3030 qp_lock(qpair); 3031 3032 do { 3033 result = qp_enqueue_locked(qpair->produce_q, 3034 qpair->consume_q, 3035 qpair->produce_q_size, 3036 &from); 3037 3038 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3039 !qp_wait_for_ready_queue(qpair)) 3040 result = VMCI_ERROR_WOULD_BLOCK; 3041 3042 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3043 3044 qp_unlock(qpair); 3045 3046 return result; 3047 } 3048 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3049 3050 /* 3051 * vmci_qpair_dequeue() - Get data from the queue. 3052 * @qpair: Pointer to the queue pair struct. 3053 * @buf: Pointer to buffer for the data 3054 * @buf_size: Length of buffer. 3055 * @buf_type: Buffer type (Unused). 3056 * 3057 * This is the client interface for dequeueing data from the queue. 3058 * Returns number of bytes dequeued or < 0 on error. 3059 */ 3060 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3061 void *buf, 3062 size_t buf_size, 3063 int buf_type) 3064 { 3065 ssize_t result; 3066 struct iov_iter to; 3067 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3068 3069 if (!qpair || !buf) 3070 return VMCI_ERROR_INVALID_ARGS; 3071 3072 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3073 3074 qp_lock(qpair); 3075 3076 do { 3077 result = qp_dequeue_locked(qpair->produce_q, 3078 qpair->consume_q, 3079 qpair->consume_q_size, 3080 &to, true); 3081 3082 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3083 !qp_wait_for_ready_queue(qpair)) 3084 result = VMCI_ERROR_WOULD_BLOCK; 3085 3086 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3087 3088 qp_unlock(qpair); 3089 3090 return result; 3091 } 3092 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3093 3094 /* 3095 * vmci_qpair_peek() - Peek at the data in the queue. 3096 * @qpair: Pointer to the queue pair struct. 3097 * @buf: Pointer to buffer for the data 3098 * @buf_size: Length of buffer. 3099 * @buf_type: Buffer type (Unused on Linux). 3100 * 3101 * This is the client interface for peeking into a queue. (I.e., 3102 * copy data from the queue without updating the head pointer.) 3103 * Returns number of bytes dequeued or < 0 on error. 3104 */ 3105 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3106 void *buf, 3107 size_t buf_size, 3108 int buf_type) 3109 { 3110 struct iov_iter to; 3111 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3112 ssize_t result; 3113 3114 if (!qpair || !buf) 3115 return VMCI_ERROR_INVALID_ARGS; 3116 3117 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3118 3119 qp_lock(qpair); 3120 3121 do { 3122 result = qp_dequeue_locked(qpair->produce_q, 3123 qpair->consume_q, 3124 qpair->consume_q_size, 3125 &to, false); 3126 3127 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3128 !qp_wait_for_ready_queue(qpair)) 3129 result = VMCI_ERROR_WOULD_BLOCK; 3130 3131 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3132 3133 qp_unlock(qpair); 3134 3135 return result; 3136 } 3137 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3138 3139 /* 3140 * vmci_qpair_enquev() - Throw data on the queue using iov. 3141 * @qpair: Pointer to the queue pair struct. 3142 * @iov: Pointer to buffer containing data 3143 * @iov_size: Length of buffer. 3144 * @buf_type: Buffer type (Unused). 3145 * 3146 * This is the client interface for enqueueing data into the queue. 3147 * This function uses IO vectors to handle the work. Returns number 3148 * of bytes enqueued or < 0 on error. 3149 */ 3150 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3151 struct msghdr *msg, 3152 size_t iov_size, 3153 int buf_type) 3154 { 3155 ssize_t result; 3156 3157 if (!qpair) 3158 return VMCI_ERROR_INVALID_ARGS; 3159 3160 qp_lock(qpair); 3161 3162 do { 3163 result = qp_enqueue_locked(qpair->produce_q, 3164 qpair->consume_q, 3165 qpair->produce_q_size, 3166 &msg->msg_iter); 3167 3168 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3169 !qp_wait_for_ready_queue(qpair)) 3170 result = VMCI_ERROR_WOULD_BLOCK; 3171 3172 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3173 3174 qp_unlock(qpair); 3175 3176 return result; 3177 } 3178 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3179 3180 /* 3181 * vmci_qpair_dequev() - Get data from the queue using iov. 3182 * @qpair: Pointer to the queue pair struct. 3183 * @iov: Pointer to buffer for the data 3184 * @iov_size: Length of buffer. 3185 * @buf_type: Buffer type (Unused). 3186 * 3187 * This is the client interface for dequeueing data from the queue. 3188 * This function uses IO vectors to handle the work. Returns number 3189 * of bytes dequeued or < 0 on error. 3190 */ 3191 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3192 struct msghdr *msg, 3193 size_t iov_size, 3194 int buf_type) 3195 { 3196 ssize_t result; 3197 3198 if (!qpair) 3199 return VMCI_ERROR_INVALID_ARGS; 3200 3201 qp_lock(qpair); 3202 3203 do { 3204 result = qp_dequeue_locked(qpair->produce_q, 3205 qpair->consume_q, 3206 qpair->consume_q_size, 3207 &msg->msg_iter, true); 3208 3209 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3210 !qp_wait_for_ready_queue(qpair)) 3211 result = VMCI_ERROR_WOULD_BLOCK; 3212 3213 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3214 3215 qp_unlock(qpair); 3216 3217 return result; 3218 } 3219 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3220 3221 /* 3222 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3223 * @qpair: Pointer to the queue pair struct. 3224 * @iov: Pointer to buffer for the data 3225 * @iov_size: Length of buffer. 3226 * @buf_type: Buffer type (Unused on Linux). 3227 * 3228 * This is the client interface for peeking into a queue. (I.e., 3229 * copy data from the queue without updating the head pointer.) 3230 * This function uses IO vectors to handle the work. Returns number 3231 * of bytes peeked or < 0 on error. 3232 */ 3233 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3234 struct msghdr *msg, 3235 size_t iov_size, 3236 int buf_type) 3237 { 3238 ssize_t result; 3239 3240 if (!qpair) 3241 return VMCI_ERROR_INVALID_ARGS; 3242 3243 qp_lock(qpair); 3244 3245 do { 3246 result = qp_dequeue_locked(qpair->produce_q, 3247 qpair->consume_q, 3248 qpair->consume_q_size, 3249 &msg->msg_iter, false); 3250 3251 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3252 !qp_wait_for_ready_queue(qpair)) 3253 result = VMCI_ERROR_WOULD_BLOCK; 3254 3255 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3256 3257 qp_unlock(qpair); 3258 return result; 3259 } 3260 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3261