1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 #include <linux/skbuff.h> 31 32 #include "vmci_handle_array.h" 33 #include "vmci_queue_pair.h" 34 #include "vmci_datagram.h" 35 #include "vmci_resource.h" 36 #include "vmci_context.h" 37 #include "vmci_driver.h" 38 #include "vmci_event.h" 39 #include "vmci_route.h" 40 41 /* 42 * In the following, we will distinguish between two kinds of VMX processes - 43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 44 * VMCI page files in the VMX and supporting VM to VM communication and the 45 * newer ones that use the guest memory directly. We will in the following 46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 47 * new-style VMX'en. 48 * 49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 50 * removed for readability) - see below for more details on the transtions: 51 * 52 * -------------- NEW ------------- 53 * | | 54 * \_/ \_/ 55 * CREATED_NO_MEM <-----------------> CREATED_MEM 56 * | | | 57 * | o-----------------------o | 58 * | | | 59 * \_/ \_/ \_/ 60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 61 * | | | 62 * | o----------------------o | 63 * | | | 64 * \_/ \_/ \_/ 65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 66 * | | 67 * | | 68 * -------------> gone <------------- 69 * 70 * In more detail. When a VMCI queue pair is first created, it will be in the 71 * VMCIQPB_NEW state. It will then move into one of the following states: 72 * 73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 74 * 75 * - the created was performed by a host endpoint, in which case there is 76 * no backing memory yet. 77 * 78 * - the create was initiated by an old-style VMX, that uses 79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 80 * a later point in time. This state can be distinguished from the one 81 * above by the context ID of the creator. A host side is not allowed to 82 * attach until the page store has been set. 83 * 84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 85 * is created by a VMX using the queue pair device backend that 86 * sets the UVAs of the queue pair immediately and stores the 87 * information for later attachers. At this point, it is ready for 88 * the host side to attach to it. 89 * 90 * Once the queue pair is in one of the created states (with the exception of 91 * the case mentioned for older VMX'en above), it is possible to attach to the 92 * queue pair. Again we have two new states possible: 93 * 94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 95 * paths: 96 * 97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 98 * pair, and attaches to a queue pair previously created by the host side. 99 * 100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 101 * already created by a guest. 102 * 103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 104 * vmci_qp_broker_set_page_store (see below). 105 * 106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 110 * will be entered. 111 * 112 * From the attached queue pair, the queue pair can enter the shutdown states 113 * when either side of the queue pair detaches. If the guest side detaches 114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 115 * the content of the queue pair will no longer be available. If the host 116 * side detaches first, the queue pair will either enter the 117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 119 * (e.g., the host detaches while a guest is stunned). 120 * 121 * New-style VMX'en will also unmap guest memory, if the guest is 122 * quiesced, e.g., during a snapshot operation. In that case, the guest 123 * memory will no longer be available, and the queue pair will transition from 124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 125 * in which case the queue pair will transition from the *_NO_MEM state at that 126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 127 * since the peer may have either attached or detached in the meantime. The 128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 129 * *_MEM state, and vice versa. 130 */ 131 132 /* 133 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these 134 * types are passed around to enqueue and dequeue routines. Note that 135 * often the functions passed are simply wrappers around memcpy 136 * itself. 137 * 138 * Note: In order for the memcpy typedefs to be compatible with the VMKernel, 139 * there's an unused last parameter for the hosted side. In 140 * ESX, that parameter holds a buffer type. 141 */ 142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, 143 u64 queue_offset, const void *src, 144 size_t src_offset, size_t size); 145 typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, 146 const struct vmci_queue *queue, 147 u64 queue_offset, size_t size); 148 149 /* The Kernel specific component of the struct vmci_queue structure. */ 150 struct vmci_queue_kern_if { 151 struct mutex __mutex; /* Protects the queue. */ 152 struct mutex *mutex; /* Shared by producer and consumer queues. */ 153 size_t num_pages; /* Number of pages incl. header. */ 154 bool host; /* Host or guest? */ 155 union { 156 struct { 157 dma_addr_t *pas; 158 void **vas; 159 } g; /* Used by the guest. */ 160 struct { 161 struct page **page; 162 struct page **header_page; 163 } h; /* Used by the host. */ 164 } u; 165 }; 166 167 /* 168 * This structure is opaque to the clients. 169 */ 170 struct vmci_qp { 171 struct vmci_handle handle; 172 struct vmci_queue *produce_q; 173 struct vmci_queue *consume_q; 174 u64 produce_q_size; 175 u64 consume_q_size; 176 u32 peer; 177 u32 flags; 178 u32 priv_flags; 179 bool guest_endpoint; 180 unsigned int blocked; 181 unsigned int generation; 182 wait_queue_head_t event; 183 }; 184 185 enum qp_broker_state { 186 VMCIQPB_NEW, 187 VMCIQPB_CREATED_NO_MEM, 188 VMCIQPB_CREATED_MEM, 189 VMCIQPB_ATTACHED_NO_MEM, 190 VMCIQPB_ATTACHED_MEM, 191 VMCIQPB_SHUTDOWN_NO_MEM, 192 VMCIQPB_SHUTDOWN_MEM, 193 VMCIQPB_GONE 194 }; 195 196 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 197 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 198 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 199 200 /* 201 * In the queue pair broker, we always use the guest point of view for 202 * the produce and consume queue values and references, e.g., the 203 * produce queue size stored is the guests produce queue size. The 204 * host endpoint will need to swap these around. The only exception is 205 * the local queue pairs on the host, in which case the host endpoint 206 * that creates the queue pair will have the right orientation, and 207 * the attaching host endpoint will need to swap. 208 */ 209 struct qp_entry { 210 struct list_head list_item; 211 struct vmci_handle handle; 212 u32 peer; 213 u32 flags; 214 u64 produce_size; 215 u64 consume_size; 216 u32 ref_count; 217 }; 218 219 struct qp_broker_entry { 220 struct vmci_resource resource; 221 struct qp_entry qp; 222 u32 create_id; 223 u32 attach_id; 224 enum qp_broker_state state; 225 bool require_trusted_attach; 226 bool created_by_trusted; 227 bool vmci_page_files; /* Created by VMX using VMCI page files */ 228 struct vmci_queue *produce_q; 229 struct vmci_queue *consume_q; 230 struct vmci_queue_header saved_produce_q; 231 struct vmci_queue_header saved_consume_q; 232 vmci_event_release_cb wakeup_cb; 233 void *client_data; 234 void *local_mem; /* Kernel memory for local queue pair */ 235 }; 236 237 struct qp_guest_endpoint { 238 struct vmci_resource resource; 239 struct qp_entry qp; 240 u64 num_ppns; 241 void *produce_q; 242 void *consume_q; 243 struct ppn_set ppn_set; 244 }; 245 246 struct qp_list { 247 struct list_head head; 248 struct mutex mutex; /* Protect queue list. */ 249 }; 250 251 static struct qp_list qp_broker_list = { 252 .head = LIST_HEAD_INIT(qp_broker_list.head), 253 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 254 }; 255 256 static struct qp_list qp_guest_endpoints = { 257 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 258 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 259 }; 260 261 #define INVALID_VMCI_GUEST_MEM_ID 0 262 #define QPE_NUM_PAGES(_QPE) ((u32) \ 263 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 264 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 265 266 267 /* 268 * Frees kernel VA space for a given queue and its queue header, and 269 * frees physical data pages. 270 */ 271 static void qp_free_queue(void *q, u64 size) 272 { 273 struct vmci_queue *queue = q; 274 275 if (queue) { 276 u64 i; 277 278 /* Given size does not include header, so add in a page here. */ 279 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 280 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 281 queue->kernel_if->u.g.vas[i], 282 queue->kernel_if->u.g.pas[i]); 283 } 284 285 vfree(queue); 286 } 287 } 288 289 /* 290 * Allocates kernel queue pages of specified size with IOMMU mappings, 291 * plus space for the queue structure/kernel interface and the queue 292 * header. 293 */ 294 static void *qp_alloc_queue(u64 size, u32 flags) 295 { 296 u64 i; 297 struct vmci_queue *queue; 298 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 299 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 300 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 301 const size_t queue_size = 302 sizeof(*queue) + sizeof(*queue->kernel_if) + 303 pas_size + vas_size; 304 305 queue = vmalloc(queue_size); 306 if (!queue) 307 return NULL; 308 309 queue->q_header = NULL; 310 queue->saved_header = NULL; 311 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 312 queue->kernel_if->mutex = NULL; 313 queue->kernel_if->num_pages = num_pages; 314 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 315 queue->kernel_if->u.g.vas = 316 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 317 queue->kernel_if->host = false; 318 319 for (i = 0; i < num_pages; i++) { 320 queue->kernel_if->u.g.vas[i] = 321 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 322 &queue->kernel_if->u.g.pas[i], 323 GFP_KERNEL); 324 if (!queue->kernel_if->u.g.vas[i]) { 325 /* Size excl. the header. */ 326 qp_free_queue(queue, i * PAGE_SIZE); 327 return NULL; 328 } 329 } 330 331 /* Queue header is the first page. */ 332 queue->q_header = queue->kernel_if->u.g.vas[0]; 333 334 return queue; 335 } 336 337 /* 338 * Copies from a given buffer or iovector to a VMCI Queue. Uses 339 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 340 * by traversing the offset -> page translation structure for the queue. 341 * Assumes that offset + size does not wrap around in the queue. 342 */ 343 static int __qp_memcpy_to_queue(struct vmci_queue *queue, 344 u64 queue_offset, 345 const void *src, 346 size_t size, 347 bool is_iovec) 348 { 349 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 350 size_t bytes_copied = 0; 351 352 while (bytes_copied < size) { 353 const u64 page_index = 354 (queue_offset + bytes_copied) / PAGE_SIZE; 355 const size_t page_offset = 356 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 357 void *va; 358 size_t to_copy; 359 360 if (kernel_if->host) 361 va = kmap(kernel_if->u.h.page[page_index]); 362 else 363 va = kernel_if->u.g.vas[page_index + 1]; 364 /* Skip header. */ 365 366 if (size - bytes_copied > PAGE_SIZE - page_offset) 367 /* Enough payload to fill up from this page. */ 368 to_copy = PAGE_SIZE - page_offset; 369 else 370 to_copy = size - bytes_copied; 371 372 if (is_iovec) { 373 struct msghdr *msg = (struct msghdr *)src; 374 int err; 375 376 /* The iovec will track bytes_copied internally. */ 377 err = memcpy_from_msg((u8 *)va + page_offset, 378 msg, to_copy); 379 if (err != 0) { 380 if (kernel_if->host) 381 kunmap(kernel_if->u.h.page[page_index]); 382 return VMCI_ERROR_INVALID_ARGS; 383 } 384 } else { 385 memcpy((u8 *)va + page_offset, 386 (u8 *)src + bytes_copied, to_copy); 387 } 388 389 bytes_copied += to_copy; 390 if (kernel_if->host) 391 kunmap(kernel_if->u.h.page[page_index]); 392 } 393 394 return VMCI_SUCCESS; 395 } 396 397 /* 398 * Copies to a given buffer or iovector from a VMCI Queue. Uses 399 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 400 * by traversing the offset -> page translation structure for the queue. 401 * Assumes that offset + size does not wrap around in the queue. 402 */ 403 static int __qp_memcpy_from_queue(void *dest, 404 const struct vmci_queue *queue, 405 u64 queue_offset, 406 size_t size, 407 bool is_iovec) 408 { 409 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 410 size_t bytes_copied = 0; 411 412 while (bytes_copied < size) { 413 const u64 page_index = 414 (queue_offset + bytes_copied) / PAGE_SIZE; 415 const size_t page_offset = 416 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 417 void *va; 418 size_t to_copy; 419 420 if (kernel_if->host) 421 va = kmap(kernel_if->u.h.page[page_index]); 422 else 423 va = kernel_if->u.g.vas[page_index + 1]; 424 /* Skip header. */ 425 426 if (size - bytes_copied > PAGE_SIZE - page_offset) 427 /* Enough payload to fill up this page. */ 428 to_copy = PAGE_SIZE - page_offset; 429 else 430 to_copy = size - bytes_copied; 431 432 if (is_iovec) { 433 struct msghdr *msg = dest; 434 int err; 435 436 /* The iovec will track bytes_copied internally. */ 437 err = memcpy_to_msg(msg, (u8 *)va + page_offset, 438 to_copy); 439 if (err != 0) { 440 if (kernel_if->host) 441 kunmap(kernel_if->u.h.page[page_index]); 442 return VMCI_ERROR_INVALID_ARGS; 443 } 444 } else { 445 memcpy((u8 *)dest + bytes_copied, 446 (u8 *)va + page_offset, to_copy); 447 } 448 449 bytes_copied += to_copy; 450 if (kernel_if->host) 451 kunmap(kernel_if->u.h.page[page_index]); 452 } 453 454 return VMCI_SUCCESS; 455 } 456 457 /* 458 * Allocates two list of PPNs --- one for the pages in the produce queue, 459 * and the other for the pages in the consume queue. Intializes the list 460 * of PPNs with the page frame numbers of the KVA for the two queues (and 461 * the queue headers). 462 */ 463 static int qp_alloc_ppn_set(void *prod_q, 464 u64 num_produce_pages, 465 void *cons_q, 466 u64 num_consume_pages, struct ppn_set *ppn_set) 467 { 468 u32 *produce_ppns; 469 u32 *consume_ppns; 470 struct vmci_queue *produce_q = prod_q; 471 struct vmci_queue *consume_q = cons_q; 472 u64 i; 473 474 if (!produce_q || !num_produce_pages || !consume_q || 475 !num_consume_pages || !ppn_set) 476 return VMCI_ERROR_INVALID_ARGS; 477 478 if (ppn_set->initialized) 479 return VMCI_ERROR_ALREADY_EXISTS; 480 481 produce_ppns = 482 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); 483 if (!produce_ppns) 484 return VMCI_ERROR_NO_MEM; 485 486 consume_ppns = 487 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); 488 if (!consume_ppns) { 489 kfree(produce_ppns); 490 return VMCI_ERROR_NO_MEM; 491 } 492 493 for (i = 0; i < num_produce_pages; i++) { 494 unsigned long pfn; 495 496 produce_ppns[i] = 497 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 498 pfn = produce_ppns[i]; 499 500 /* Fail allocation if PFN isn't supported by hypervisor. */ 501 if (sizeof(pfn) > sizeof(*produce_ppns) 502 && pfn != produce_ppns[i]) 503 goto ppn_error; 504 } 505 506 for (i = 0; i < num_consume_pages; i++) { 507 unsigned long pfn; 508 509 consume_ppns[i] = 510 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 511 pfn = consume_ppns[i]; 512 513 /* Fail allocation if PFN isn't supported by hypervisor. */ 514 if (sizeof(pfn) > sizeof(*consume_ppns) 515 && pfn != consume_ppns[i]) 516 goto ppn_error; 517 } 518 519 ppn_set->num_produce_pages = num_produce_pages; 520 ppn_set->num_consume_pages = num_consume_pages; 521 ppn_set->produce_ppns = produce_ppns; 522 ppn_set->consume_ppns = consume_ppns; 523 ppn_set->initialized = true; 524 return VMCI_SUCCESS; 525 526 ppn_error: 527 kfree(produce_ppns); 528 kfree(consume_ppns); 529 return VMCI_ERROR_INVALID_ARGS; 530 } 531 532 /* 533 * Frees the two list of PPNs for a queue pair. 534 */ 535 static void qp_free_ppn_set(struct ppn_set *ppn_set) 536 { 537 if (ppn_set->initialized) { 538 /* Do not call these functions on NULL inputs. */ 539 kfree(ppn_set->produce_ppns); 540 kfree(ppn_set->consume_ppns); 541 } 542 memset(ppn_set, 0, sizeof(*ppn_set)); 543 } 544 545 /* 546 * Populates the list of PPNs in the hypercall structure with the PPNS 547 * of the produce queue and the consume queue. 548 */ 549 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 550 { 551 memcpy(call_buf, ppn_set->produce_ppns, 552 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 553 memcpy(call_buf + 554 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 555 ppn_set->consume_ppns, 556 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 557 558 return VMCI_SUCCESS; 559 } 560 561 static int qp_memcpy_to_queue(struct vmci_queue *queue, 562 u64 queue_offset, 563 const void *src, size_t src_offset, size_t size) 564 { 565 return __qp_memcpy_to_queue(queue, queue_offset, 566 (u8 *)src + src_offset, size, false); 567 } 568 569 static int qp_memcpy_from_queue(void *dest, 570 size_t dest_offset, 571 const struct vmci_queue *queue, 572 u64 queue_offset, size_t size) 573 { 574 return __qp_memcpy_from_queue((u8 *)dest + dest_offset, 575 queue, queue_offset, size, false); 576 } 577 578 /* 579 * Copies from a given iovec from a VMCI Queue. 580 */ 581 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, 582 u64 queue_offset, 583 const void *msg, 584 size_t src_offset, size_t size) 585 { 586 587 /* 588 * We ignore src_offset because src is really a struct iovec * and will 589 * maintain offset internally. 590 */ 591 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); 592 } 593 594 /* 595 * Copies to a given iovec from a VMCI Queue. 596 */ 597 static int qp_memcpy_from_queue_iov(void *dest, 598 size_t dest_offset, 599 const struct vmci_queue *queue, 600 u64 queue_offset, size_t size) 601 { 602 /* 603 * We ignore dest_offset because dest is really a struct iovec * and 604 * will maintain offset internally. 605 */ 606 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); 607 } 608 609 /* 610 * Allocates kernel VA space of specified size plus space for the queue 611 * and kernel interface. This is different from the guest queue allocator, 612 * because we do not allocate our own queue header/data pages here but 613 * share those of the guest. 614 */ 615 static struct vmci_queue *qp_host_alloc_queue(u64 size) 616 { 617 struct vmci_queue *queue; 618 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 619 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 620 const size_t queue_page_size = 621 num_pages * sizeof(*queue->kernel_if->u.h.page); 622 623 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 624 if (queue) { 625 queue->q_header = NULL; 626 queue->saved_header = NULL; 627 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 628 queue->kernel_if->host = true; 629 queue->kernel_if->mutex = NULL; 630 queue->kernel_if->num_pages = num_pages; 631 queue->kernel_if->u.h.header_page = 632 (struct page **)((u8 *)queue + queue_size); 633 queue->kernel_if->u.h.page = 634 &queue->kernel_if->u.h.header_page[1]; 635 } 636 637 return queue; 638 } 639 640 /* 641 * Frees kernel memory for a given queue (header plus translation 642 * structure). 643 */ 644 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 645 { 646 kfree(queue); 647 } 648 649 /* 650 * Initialize the mutex for the pair of queues. This mutex is used to 651 * protect the q_header and the buffer from changing out from under any 652 * users of either queue. Of course, it's only any good if the mutexes 653 * are actually acquired. Queue structure must lie on non-paged memory 654 * or we cannot guarantee access to the mutex. 655 */ 656 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 657 struct vmci_queue *consume_q) 658 { 659 /* 660 * Only the host queue has shared state - the guest queues do not 661 * need to synchronize access using a queue mutex. 662 */ 663 664 if (produce_q->kernel_if->host) { 665 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 666 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 667 mutex_init(produce_q->kernel_if->mutex); 668 } 669 } 670 671 /* 672 * Cleans up the mutex for the pair of queues. 673 */ 674 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 675 struct vmci_queue *consume_q) 676 { 677 if (produce_q->kernel_if->host) { 678 produce_q->kernel_if->mutex = NULL; 679 consume_q->kernel_if->mutex = NULL; 680 } 681 } 682 683 /* 684 * Acquire the mutex for the queue. Note that the produce_q and 685 * the consume_q share a mutex. So, only one of the two need to 686 * be passed in to this routine. Either will work just fine. 687 */ 688 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 689 { 690 if (queue->kernel_if->host) 691 mutex_lock(queue->kernel_if->mutex); 692 } 693 694 /* 695 * Release the mutex for the queue. Note that the produce_q and 696 * the consume_q share a mutex. So, only one of the two need to 697 * be passed in to this routine. Either will work just fine. 698 */ 699 static void qp_release_queue_mutex(struct vmci_queue *queue) 700 { 701 if (queue->kernel_if->host) 702 mutex_unlock(queue->kernel_if->mutex); 703 } 704 705 /* 706 * Helper function to release pages in the PageStoreAttachInfo 707 * previously obtained using get_user_pages. 708 */ 709 static void qp_release_pages(struct page **pages, 710 u64 num_pages, bool dirty) 711 { 712 int i; 713 714 for (i = 0; i < num_pages; i++) { 715 if (dirty) 716 set_page_dirty(pages[i]); 717 718 page_cache_release(pages[i]); 719 pages[i] = NULL; 720 } 721 } 722 723 /* 724 * Lock the user pages referenced by the {produce,consume}Buffer 725 * struct into memory and populate the {produce,consume}Pages 726 * arrays in the attach structure with them. 727 */ 728 static int qp_host_get_user_memory(u64 produce_uva, 729 u64 consume_uva, 730 struct vmci_queue *produce_q, 731 struct vmci_queue *consume_q) 732 { 733 int retval; 734 int err = VMCI_SUCCESS; 735 736 retval = get_user_pages_fast((uintptr_t) produce_uva, 737 produce_q->kernel_if->num_pages, 1, 738 produce_q->kernel_if->u.h.header_page); 739 if (retval < produce_q->kernel_if->num_pages) { 740 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 741 qp_release_pages(produce_q->kernel_if->u.h.header_page, 742 retval, false); 743 err = VMCI_ERROR_NO_MEM; 744 goto out; 745 } 746 747 retval = get_user_pages_fast((uintptr_t) consume_uva, 748 consume_q->kernel_if->num_pages, 1, 749 consume_q->kernel_if->u.h.header_page); 750 if (retval < consume_q->kernel_if->num_pages) { 751 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 752 qp_release_pages(consume_q->kernel_if->u.h.header_page, 753 retval, false); 754 qp_release_pages(produce_q->kernel_if->u.h.header_page, 755 produce_q->kernel_if->num_pages, false); 756 err = VMCI_ERROR_NO_MEM; 757 } 758 759 out: 760 return err; 761 } 762 763 /* 764 * Registers the specification of the user pages used for backing a queue 765 * pair. Enough information to map in pages is stored in the OS specific 766 * part of the struct vmci_queue structure. 767 */ 768 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 769 struct vmci_queue *produce_q, 770 struct vmci_queue *consume_q) 771 { 772 u64 produce_uva; 773 u64 consume_uva; 774 775 /* 776 * The new style and the old style mapping only differs in 777 * that we either get a single or two UVAs, so we split the 778 * single UVA range at the appropriate spot. 779 */ 780 produce_uva = page_store->pages; 781 consume_uva = page_store->pages + 782 produce_q->kernel_if->num_pages * PAGE_SIZE; 783 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 784 consume_q); 785 } 786 787 /* 788 * Releases and removes the references to user pages stored in the attach 789 * struct. Pages are released from the page cache and may become 790 * swappable again. 791 */ 792 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 793 struct vmci_queue *consume_q) 794 { 795 qp_release_pages(produce_q->kernel_if->u.h.header_page, 796 produce_q->kernel_if->num_pages, true); 797 memset(produce_q->kernel_if->u.h.header_page, 0, 798 sizeof(*produce_q->kernel_if->u.h.header_page) * 799 produce_q->kernel_if->num_pages); 800 qp_release_pages(consume_q->kernel_if->u.h.header_page, 801 consume_q->kernel_if->num_pages, true); 802 memset(consume_q->kernel_if->u.h.header_page, 0, 803 sizeof(*consume_q->kernel_if->u.h.header_page) * 804 consume_q->kernel_if->num_pages); 805 } 806 807 /* 808 * Once qp_host_register_user_memory has been performed on a 809 * queue, the queue pair headers can be mapped into the 810 * kernel. Once mapped, they must be unmapped with 811 * qp_host_unmap_queues prior to calling 812 * qp_host_unregister_user_memory. 813 * Pages are pinned. 814 */ 815 static int qp_host_map_queues(struct vmci_queue *produce_q, 816 struct vmci_queue *consume_q) 817 { 818 int result; 819 820 if (!produce_q->q_header || !consume_q->q_header) { 821 struct page *headers[2]; 822 823 if (produce_q->q_header != consume_q->q_header) 824 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 825 826 if (produce_q->kernel_if->u.h.header_page == NULL || 827 *produce_q->kernel_if->u.h.header_page == NULL) 828 return VMCI_ERROR_UNAVAILABLE; 829 830 headers[0] = *produce_q->kernel_if->u.h.header_page; 831 headers[1] = *consume_q->kernel_if->u.h.header_page; 832 833 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 834 if (produce_q->q_header != NULL) { 835 consume_q->q_header = 836 (struct vmci_queue_header *)((u8 *) 837 produce_q->q_header + 838 PAGE_SIZE); 839 result = VMCI_SUCCESS; 840 } else { 841 pr_warn("vmap failed\n"); 842 result = VMCI_ERROR_NO_MEM; 843 } 844 } else { 845 result = VMCI_SUCCESS; 846 } 847 848 return result; 849 } 850 851 /* 852 * Unmaps previously mapped queue pair headers from the kernel. 853 * Pages are unpinned. 854 */ 855 static int qp_host_unmap_queues(u32 gid, 856 struct vmci_queue *produce_q, 857 struct vmci_queue *consume_q) 858 { 859 if (produce_q->q_header) { 860 if (produce_q->q_header < consume_q->q_header) 861 vunmap(produce_q->q_header); 862 else 863 vunmap(consume_q->q_header); 864 865 produce_q->q_header = NULL; 866 consume_q->q_header = NULL; 867 } 868 869 return VMCI_SUCCESS; 870 } 871 872 /* 873 * Finds the entry in the list corresponding to a given handle. Assumes 874 * that the list is locked. 875 */ 876 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 877 struct vmci_handle handle) 878 { 879 struct qp_entry *entry; 880 881 if (vmci_handle_is_invalid(handle)) 882 return NULL; 883 884 list_for_each_entry(entry, &qp_list->head, list_item) { 885 if (vmci_handle_is_equal(entry->handle, handle)) 886 return entry; 887 } 888 889 return NULL; 890 } 891 892 /* 893 * Finds the entry in the list corresponding to a given handle. 894 */ 895 static struct qp_guest_endpoint * 896 qp_guest_handle_to_entry(struct vmci_handle handle) 897 { 898 struct qp_guest_endpoint *entry; 899 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 900 901 entry = qp ? container_of( 902 qp, struct qp_guest_endpoint, qp) : NULL; 903 return entry; 904 } 905 906 /* 907 * Finds the entry in the list corresponding to a given handle. 908 */ 909 static struct qp_broker_entry * 910 qp_broker_handle_to_entry(struct vmci_handle handle) 911 { 912 struct qp_broker_entry *entry; 913 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 914 915 entry = qp ? container_of( 916 qp, struct qp_broker_entry, qp) : NULL; 917 return entry; 918 } 919 920 /* 921 * Dispatches a queue pair event message directly into the local event 922 * queue. 923 */ 924 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 925 { 926 u32 context_id = vmci_get_context_id(); 927 struct vmci_event_qp ev; 928 929 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 930 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 931 VMCI_CONTEXT_RESOURCE_ID); 932 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 933 ev.msg.event_data.event = 934 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 935 ev.payload.peer_id = context_id; 936 ev.payload.handle = handle; 937 938 return vmci_event_dispatch(&ev.msg.hdr); 939 } 940 941 /* 942 * Allocates and initializes a qp_guest_endpoint structure. 943 * Allocates a queue_pair rid (and handle) iff the given entry has 944 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 945 * are reserved handles. Assumes that the QP list mutex is held 946 * by the caller. 947 */ 948 static struct qp_guest_endpoint * 949 qp_guest_endpoint_create(struct vmci_handle handle, 950 u32 peer, 951 u32 flags, 952 u64 produce_size, 953 u64 consume_size, 954 void *produce_q, 955 void *consume_q) 956 { 957 int result; 958 struct qp_guest_endpoint *entry; 959 /* One page each for the queue headers. */ 960 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 961 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 962 963 if (vmci_handle_is_invalid(handle)) { 964 u32 context_id = vmci_get_context_id(); 965 966 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 967 } 968 969 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 970 if (entry) { 971 entry->qp.peer = peer; 972 entry->qp.flags = flags; 973 entry->qp.produce_size = produce_size; 974 entry->qp.consume_size = consume_size; 975 entry->qp.ref_count = 0; 976 entry->num_ppns = num_ppns; 977 entry->produce_q = produce_q; 978 entry->consume_q = consume_q; 979 INIT_LIST_HEAD(&entry->qp.list_item); 980 981 /* Add resource obj */ 982 result = vmci_resource_add(&entry->resource, 983 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 984 handle); 985 entry->qp.handle = vmci_resource_handle(&entry->resource); 986 if ((result != VMCI_SUCCESS) || 987 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 988 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 989 handle.context, handle.resource, result); 990 kfree(entry); 991 entry = NULL; 992 } 993 } 994 return entry; 995 } 996 997 /* 998 * Frees a qp_guest_endpoint structure. 999 */ 1000 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 1001 { 1002 qp_free_ppn_set(&entry->ppn_set); 1003 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 1004 qp_free_queue(entry->produce_q, entry->qp.produce_size); 1005 qp_free_queue(entry->consume_q, entry->qp.consume_size); 1006 /* Unlink from resource hash table and free callback */ 1007 vmci_resource_remove(&entry->resource); 1008 1009 kfree(entry); 1010 } 1011 1012 /* 1013 * Helper to make a queue_pairAlloc hypercall when the driver is 1014 * supporting a guest device. 1015 */ 1016 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 1017 { 1018 struct vmci_qp_alloc_msg *alloc_msg; 1019 size_t msg_size; 1020 int result; 1021 1022 if (!entry || entry->num_ppns <= 2) 1023 return VMCI_ERROR_INVALID_ARGS; 1024 1025 msg_size = sizeof(*alloc_msg) + 1026 (size_t) entry->num_ppns * sizeof(u32); 1027 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 1028 if (!alloc_msg) 1029 return VMCI_ERROR_NO_MEM; 1030 1031 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1032 VMCI_QUEUEPAIR_ALLOC); 1033 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 1034 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 1035 alloc_msg->handle = entry->qp.handle; 1036 alloc_msg->peer = entry->qp.peer; 1037 alloc_msg->flags = entry->qp.flags; 1038 alloc_msg->produce_size = entry->qp.produce_size; 1039 alloc_msg->consume_size = entry->qp.consume_size; 1040 alloc_msg->num_ppns = entry->num_ppns; 1041 1042 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 1043 &entry->ppn_set); 1044 if (result == VMCI_SUCCESS) 1045 result = vmci_send_datagram(&alloc_msg->hdr); 1046 1047 kfree(alloc_msg); 1048 1049 return result; 1050 } 1051 1052 /* 1053 * Helper to make a queue_pairDetach hypercall when the driver is 1054 * supporting a guest device. 1055 */ 1056 static int qp_detatch_hypercall(struct vmci_handle handle) 1057 { 1058 struct vmci_qp_detach_msg detach_msg; 1059 1060 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1061 VMCI_QUEUEPAIR_DETACH); 1062 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 1063 detach_msg.hdr.payload_size = sizeof(handle); 1064 detach_msg.handle = handle; 1065 1066 return vmci_send_datagram(&detach_msg.hdr); 1067 } 1068 1069 /* 1070 * Adds the given entry to the list. Assumes that the list is locked. 1071 */ 1072 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1073 { 1074 if (entry) 1075 list_add(&entry->list_item, &qp_list->head); 1076 } 1077 1078 /* 1079 * Removes the given entry from the list. Assumes that the list is locked. 1080 */ 1081 static void qp_list_remove_entry(struct qp_list *qp_list, 1082 struct qp_entry *entry) 1083 { 1084 if (entry) 1085 list_del(&entry->list_item); 1086 } 1087 1088 /* 1089 * Helper for VMCI queue_pair detach interface. Frees the physical 1090 * pages for the queue pair. 1091 */ 1092 static int qp_detatch_guest_work(struct vmci_handle handle) 1093 { 1094 int result; 1095 struct qp_guest_endpoint *entry; 1096 u32 ref_count = ~0; /* To avoid compiler warning below */ 1097 1098 mutex_lock(&qp_guest_endpoints.mutex); 1099 1100 entry = qp_guest_handle_to_entry(handle); 1101 if (!entry) { 1102 mutex_unlock(&qp_guest_endpoints.mutex); 1103 return VMCI_ERROR_NOT_FOUND; 1104 } 1105 1106 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1107 result = VMCI_SUCCESS; 1108 1109 if (entry->qp.ref_count > 1) { 1110 result = qp_notify_peer_local(false, handle); 1111 /* 1112 * We can fail to notify a local queuepair 1113 * because we can't allocate. We still want 1114 * to release the entry if that happens, so 1115 * don't bail out yet. 1116 */ 1117 } 1118 } else { 1119 result = qp_detatch_hypercall(handle); 1120 if (result < VMCI_SUCCESS) { 1121 /* 1122 * We failed to notify a non-local queuepair. 1123 * That other queuepair might still be 1124 * accessing the shared memory, so don't 1125 * release the entry yet. It will get cleaned 1126 * up by VMCIqueue_pair_Exit() if necessary 1127 * (assuming we are going away, otherwise why 1128 * did this fail?). 1129 */ 1130 1131 mutex_unlock(&qp_guest_endpoints.mutex); 1132 return result; 1133 } 1134 } 1135 1136 /* 1137 * If we get here then we either failed to notify a local queuepair, or 1138 * we succeeded in all cases. Release the entry if required. 1139 */ 1140 1141 entry->qp.ref_count--; 1142 if (entry->qp.ref_count == 0) 1143 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1144 1145 /* If we didn't remove the entry, this could change once we unlock. */ 1146 if (entry) 1147 ref_count = entry->qp.ref_count; 1148 1149 mutex_unlock(&qp_guest_endpoints.mutex); 1150 1151 if (ref_count == 0) 1152 qp_guest_endpoint_destroy(entry); 1153 1154 return result; 1155 } 1156 1157 /* 1158 * This functions handles the actual allocation of a VMCI queue 1159 * pair guest endpoint. Allocates physical pages for the queue 1160 * pair. It makes OS dependent calls through generic wrappers. 1161 */ 1162 static int qp_alloc_guest_work(struct vmci_handle *handle, 1163 struct vmci_queue **produce_q, 1164 u64 produce_size, 1165 struct vmci_queue **consume_q, 1166 u64 consume_size, 1167 u32 peer, 1168 u32 flags, 1169 u32 priv_flags) 1170 { 1171 const u64 num_produce_pages = 1172 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1173 const u64 num_consume_pages = 1174 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1175 void *my_produce_q = NULL; 1176 void *my_consume_q = NULL; 1177 int result; 1178 struct qp_guest_endpoint *queue_pair_entry = NULL; 1179 1180 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1181 return VMCI_ERROR_NO_ACCESS; 1182 1183 mutex_lock(&qp_guest_endpoints.mutex); 1184 1185 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1186 if (queue_pair_entry) { 1187 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1188 /* Local attach case. */ 1189 if (queue_pair_entry->qp.ref_count > 1) { 1190 pr_devel("Error attempting to attach more than once\n"); 1191 result = VMCI_ERROR_UNAVAILABLE; 1192 goto error_keep_entry; 1193 } 1194 1195 if (queue_pair_entry->qp.produce_size != consume_size || 1196 queue_pair_entry->qp.consume_size != 1197 produce_size || 1198 queue_pair_entry->qp.flags != 1199 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1200 pr_devel("Error mismatched queue pair in local attach\n"); 1201 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1202 goto error_keep_entry; 1203 } 1204 1205 /* 1206 * Do a local attach. We swap the consume and 1207 * produce queues for the attacher and deliver 1208 * an attach event. 1209 */ 1210 result = qp_notify_peer_local(true, *handle); 1211 if (result < VMCI_SUCCESS) 1212 goto error_keep_entry; 1213 1214 my_produce_q = queue_pair_entry->consume_q; 1215 my_consume_q = queue_pair_entry->produce_q; 1216 goto out; 1217 } 1218 1219 result = VMCI_ERROR_ALREADY_EXISTS; 1220 goto error_keep_entry; 1221 } 1222 1223 my_produce_q = qp_alloc_queue(produce_size, flags); 1224 if (!my_produce_q) { 1225 pr_warn("Error allocating pages for produce queue\n"); 1226 result = VMCI_ERROR_NO_MEM; 1227 goto error; 1228 } 1229 1230 my_consume_q = qp_alloc_queue(consume_size, flags); 1231 if (!my_consume_q) { 1232 pr_warn("Error allocating pages for consume queue\n"); 1233 result = VMCI_ERROR_NO_MEM; 1234 goto error; 1235 } 1236 1237 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1238 produce_size, consume_size, 1239 my_produce_q, my_consume_q); 1240 if (!queue_pair_entry) { 1241 pr_warn("Error allocating memory in %s\n", __func__); 1242 result = VMCI_ERROR_NO_MEM; 1243 goto error; 1244 } 1245 1246 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1247 num_consume_pages, 1248 &queue_pair_entry->ppn_set); 1249 if (result < VMCI_SUCCESS) { 1250 pr_warn("qp_alloc_ppn_set failed\n"); 1251 goto error; 1252 } 1253 1254 /* 1255 * It's only necessary to notify the host if this queue pair will be 1256 * attached to from another context. 1257 */ 1258 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1259 /* Local create case. */ 1260 u32 context_id = vmci_get_context_id(); 1261 1262 /* 1263 * Enforce similar checks on local queue pairs as we 1264 * do for regular ones. The handle's context must 1265 * match the creator or attacher context id (here they 1266 * are both the current context id) and the 1267 * attach-only flag cannot exist during create. We 1268 * also ensure specified peer is this context or an 1269 * invalid one. 1270 */ 1271 if (queue_pair_entry->qp.handle.context != context_id || 1272 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1273 queue_pair_entry->qp.peer != context_id)) { 1274 result = VMCI_ERROR_NO_ACCESS; 1275 goto error; 1276 } 1277 1278 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1279 result = VMCI_ERROR_NOT_FOUND; 1280 goto error; 1281 } 1282 } else { 1283 result = qp_alloc_hypercall(queue_pair_entry); 1284 if (result < VMCI_SUCCESS) { 1285 pr_warn("qp_alloc_hypercall result = %d\n", result); 1286 goto error; 1287 } 1288 } 1289 1290 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1291 (struct vmci_queue *)my_consume_q); 1292 1293 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1294 1295 out: 1296 queue_pair_entry->qp.ref_count++; 1297 *handle = queue_pair_entry->qp.handle; 1298 *produce_q = (struct vmci_queue *)my_produce_q; 1299 *consume_q = (struct vmci_queue *)my_consume_q; 1300 1301 /* 1302 * We should initialize the queue pair header pages on a local 1303 * queue pair create. For non-local queue pairs, the 1304 * hypervisor initializes the header pages in the create step. 1305 */ 1306 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1307 queue_pair_entry->qp.ref_count == 1) { 1308 vmci_q_header_init((*produce_q)->q_header, *handle); 1309 vmci_q_header_init((*consume_q)->q_header, *handle); 1310 } 1311 1312 mutex_unlock(&qp_guest_endpoints.mutex); 1313 1314 return VMCI_SUCCESS; 1315 1316 error: 1317 mutex_unlock(&qp_guest_endpoints.mutex); 1318 if (queue_pair_entry) { 1319 /* The queues will be freed inside the destroy routine. */ 1320 qp_guest_endpoint_destroy(queue_pair_entry); 1321 } else { 1322 qp_free_queue(my_produce_q, produce_size); 1323 qp_free_queue(my_consume_q, consume_size); 1324 } 1325 return result; 1326 1327 error_keep_entry: 1328 /* This path should only be used when an existing entry was found. */ 1329 mutex_unlock(&qp_guest_endpoints.mutex); 1330 return result; 1331 } 1332 1333 /* 1334 * The first endpoint issuing a queue pair allocation will create the state 1335 * of the queue pair in the queue pair broker. 1336 * 1337 * If the creator is a guest, it will associate a VMX virtual address range 1338 * with the queue pair as specified by the page_store. For compatibility with 1339 * older VMX'en, that would use a separate step to set the VMX virtual 1340 * address range, the virtual address range can be registered later using 1341 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1342 * used. 1343 * 1344 * If the creator is the host, a page_store of NULL should be used as well, 1345 * since the host is not able to supply a page store for the queue pair. 1346 * 1347 * For older VMX and host callers, the queue pair will be created in the 1348 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1349 * created in VMCOQPB_CREATED_MEM state. 1350 */ 1351 static int qp_broker_create(struct vmci_handle handle, 1352 u32 peer, 1353 u32 flags, 1354 u32 priv_flags, 1355 u64 produce_size, 1356 u64 consume_size, 1357 struct vmci_qp_page_store *page_store, 1358 struct vmci_ctx *context, 1359 vmci_event_release_cb wakeup_cb, 1360 void *client_data, struct qp_broker_entry **ent) 1361 { 1362 struct qp_broker_entry *entry = NULL; 1363 const u32 context_id = vmci_ctx_get_id(context); 1364 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1365 int result; 1366 u64 guest_produce_size; 1367 u64 guest_consume_size; 1368 1369 /* Do not create if the caller asked not to. */ 1370 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1371 return VMCI_ERROR_NOT_FOUND; 1372 1373 /* 1374 * Creator's context ID should match handle's context ID or the creator 1375 * must allow the context in handle's context ID as the "peer". 1376 */ 1377 if (handle.context != context_id && handle.context != peer) 1378 return VMCI_ERROR_NO_ACCESS; 1379 1380 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1381 return VMCI_ERROR_DST_UNREACHABLE; 1382 1383 /* 1384 * Creator's context ID for local queue pairs should match the 1385 * peer, if a peer is specified. 1386 */ 1387 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1388 return VMCI_ERROR_NO_ACCESS; 1389 1390 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1391 if (!entry) 1392 return VMCI_ERROR_NO_MEM; 1393 1394 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1395 /* 1396 * The queue pair broker entry stores values from the guest 1397 * point of view, so a creating host side endpoint should swap 1398 * produce and consume values -- unless it is a local queue 1399 * pair, in which case no swapping is necessary, since the local 1400 * attacher will swap queues. 1401 */ 1402 1403 guest_produce_size = consume_size; 1404 guest_consume_size = produce_size; 1405 } else { 1406 guest_produce_size = produce_size; 1407 guest_consume_size = consume_size; 1408 } 1409 1410 entry->qp.handle = handle; 1411 entry->qp.peer = peer; 1412 entry->qp.flags = flags; 1413 entry->qp.produce_size = guest_produce_size; 1414 entry->qp.consume_size = guest_consume_size; 1415 entry->qp.ref_count = 1; 1416 entry->create_id = context_id; 1417 entry->attach_id = VMCI_INVALID_ID; 1418 entry->state = VMCIQPB_NEW; 1419 entry->require_trusted_attach = 1420 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1421 entry->created_by_trusted = 1422 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1423 entry->vmci_page_files = false; 1424 entry->wakeup_cb = wakeup_cb; 1425 entry->client_data = client_data; 1426 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1427 if (entry->produce_q == NULL) { 1428 result = VMCI_ERROR_NO_MEM; 1429 goto error; 1430 } 1431 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1432 if (entry->consume_q == NULL) { 1433 result = VMCI_ERROR_NO_MEM; 1434 goto error; 1435 } 1436 1437 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1438 1439 INIT_LIST_HEAD(&entry->qp.list_item); 1440 1441 if (is_local) { 1442 u8 *tmp; 1443 1444 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1445 PAGE_SIZE, GFP_KERNEL); 1446 if (entry->local_mem == NULL) { 1447 result = VMCI_ERROR_NO_MEM; 1448 goto error; 1449 } 1450 entry->state = VMCIQPB_CREATED_MEM; 1451 entry->produce_q->q_header = entry->local_mem; 1452 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1453 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1454 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1455 } else if (page_store) { 1456 /* 1457 * The VMX already initialized the queue pair headers, so no 1458 * need for the kernel side to do that. 1459 */ 1460 result = qp_host_register_user_memory(page_store, 1461 entry->produce_q, 1462 entry->consume_q); 1463 if (result < VMCI_SUCCESS) 1464 goto error; 1465 1466 entry->state = VMCIQPB_CREATED_MEM; 1467 } else { 1468 /* 1469 * A create without a page_store may be either a host 1470 * side create (in which case we are waiting for the 1471 * guest side to supply the memory) or an old style 1472 * queue pair create (in which case we will expect a 1473 * set page store call as the next step). 1474 */ 1475 entry->state = VMCIQPB_CREATED_NO_MEM; 1476 } 1477 1478 qp_list_add_entry(&qp_broker_list, &entry->qp); 1479 if (ent != NULL) 1480 *ent = entry; 1481 1482 /* Add to resource obj */ 1483 result = vmci_resource_add(&entry->resource, 1484 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1485 handle); 1486 if (result != VMCI_SUCCESS) { 1487 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1488 handle.context, handle.resource, result); 1489 goto error; 1490 } 1491 1492 entry->qp.handle = vmci_resource_handle(&entry->resource); 1493 if (is_local) { 1494 vmci_q_header_init(entry->produce_q->q_header, 1495 entry->qp.handle); 1496 vmci_q_header_init(entry->consume_q->q_header, 1497 entry->qp.handle); 1498 } 1499 1500 vmci_ctx_qp_create(context, entry->qp.handle); 1501 1502 return VMCI_SUCCESS; 1503 1504 error: 1505 if (entry != NULL) { 1506 qp_host_free_queue(entry->produce_q, guest_produce_size); 1507 qp_host_free_queue(entry->consume_q, guest_consume_size); 1508 kfree(entry); 1509 } 1510 1511 return result; 1512 } 1513 1514 /* 1515 * Enqueues an event datagram to notify the peer VM attached to 1516 * the given queue pair handle about attach/detach event by the 1517 * given VM. Returns Payload size of datagram enqueued on 1518 * success, error code otherwise. 1519 */ 1520 static int qp_notify_peer(bool attach, 1521 struct vmci_handle handle, 1522 u32 my_id, 1523 u32 peer_id) 1524 { 1525 int rv; 1526 struct vmci_event_qp ev; 1527 1528 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1529 peer_id == VMCI_INVALID_ID) 1530 return VMCI_ERROR_INVALID_ARGS; 1531 1532 /* 1533 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1534 * number of pending events from the hypervisor to a given VM 1535 * otherwise a rogue VM could do an arbitrary number of attach 1536 * and detach operations causing memory pressure in the host 1537 * kernel. 1538 */ 1539 1540 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1541 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1542 VMCI_CONTEXT_RESOURCE_ID); 1543 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1544 ev.msg.event_data.event = attach ? 1545 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1546 ev.payload.handle = handle; 1547 ev.payload.peer_id = my_id; 1548 1549 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1550 &ev.msg.hdr, false); 1551 if (rv < VMCI_SUCCESS) 1552 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1553 attach ? "ATTACH" : "DETACH", peer_id); 1554 1555 return rv; 1556 } 1557 1558 /* 1559 * The second endpoint issuing a queue pair allocation will attach to 1560 * the queue pair registered with the queue pair broker. 1561 * 1562 * If the attacher is a guest, it will associate a VMX virtual address 1563 * range with the queue pair as specified by the page_store. At this 1564 * point, the already attach host endpoint may start using the queue 1565 * pair, and an attach event is sent to it. For compatibility with 1566 * older VMX'en, that used a separate step to set the VMX virtual 1567 * address range, the virtual address range can be registered later 1568 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1569 * NULL should be used, and the attach event will be generated once 1570 * the actual page store has been set. 1571 * 1572 * If the attacher is the host, a page_store of NULL should be used as 1573 * well, since the page store information is already set by the guest. 1574 * 1575 * For new VMX and host callers, the queue pair will be moved to the 1576 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1577 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1578 */ 1579 static int qp_broker_attach(struct qp_broker_entry *entry, 1580 u32 peer, 1581 u32 flags, 1582 u32 priv_flags, 1583 u64 produce_size, 1584 u64 consume_size, 1585 struct vmci_qp_page_store *page_store, 1586 struct vmci_ctx *context, 1587 vmci_event_release_cb wakeup_cb, 1588 void *client_data, 1589 struct qp_broker_entry **ent) 1590 { 1591 const u32 context_id = vmci_ctx_get_id(context); 1592 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1593 int result; 1594 1595 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1596 entry->state != VMCIQPB_CREATED_MEM) 1597 return VMCI_ERROR_UNAVAILABLE; 1598 1599 if (is_local) { 1600 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1601 context_id != entry->create_id) { 1602 return VMCI_ERROR_INVALID_ARGS; 1603 } 1604 } else if (context_id == entry->create_id || 1605 context_id == entry->attach_id) { 1606 return VMCI_ERROR_ALREADY_EXISTS; 1607 } 1608 1609 if (VMCI_CONTEXT_IS_VM(context_id) && 1610 VMCI_CONTEXT_IS_VM(entry->create_id)) 1611 return VMCI_ERROR_DST_UNREACHABLE; 1612 1613 /* 1614 * If we are attaching from a restricted context then the queuepair 1615 * must have been created by a trusted endpoint. 1616 */ 1617 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1618 !entry->created_by_trusted) 1619 return VMCI_ERROR_NO_ACCESS; 1620 1621 /* 1622 * If we are attaching to a queuepair that was created by a restricted 1623 * context then we must be trusted. 1624 */ 1625 if (entry->require_trusted_attach && 1626 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1627 return VMCI_ERROR_NO_ACCESS; 1628 1629 /* 1630 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1631 * control check is not performed. 1632 */ 1633 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1634 return VMCI_ERROR_NO_ACCESS; 1635 1636 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1637 /* 1638 * Do not attach if the caller doesn't support Host Queue Pairs 1639 * and a host created this queue pair. 1640 */ 1641 1642 if (!vmci_ctx_supports_host_qp(context)) 1643 return VMCI_ERROR_INVALID_RESOURCE; 1644 1645 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1646 struct vmci_ctx *create_context; 1647 bool supports_host_qp; 1648 1649 /* 1650 * Do not attach a host to a user created queue pair if that 1651 * user doesn't support host queue pair end points. 1652 */ 1653 1654 create_context = vmci_ctx_get(entry->create_id); 1655 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1656 vmci_ctx_put(create_context); 1657 1658 if (!supports_host_qp) 1659 return VMCI_ERROR_INVALID_RESOURCE; 1660 } 1661 1662 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1663 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1664 1665 if (context_id != VMCI_HOST_CONTEXT_ID) { 1666 /* 1667 * The queue pair broker entry stores values from the guest 1668 * point of view, so an attaching guest should match the values 1669 * stored in the entry. 1670 */ 1671 1672 if (entry->qp.produce_size != produce_size || 1673 entry->qp.consume_size != consume_size) { 1674 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1675 } 1676 } else if (entry->qp.produce_size != consume_size || 1677 entry->qp.consume_size != produce_size) { 1678 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1679 } 1680 1681 if (context_id != VMCI_HOST_CONTEXT_ID) { 1682 /* 1683 * If a guest attached to a queue pair, it will supply 1684 * the backing memory. If this is a pre NOVMVM vmx, 1685 * the backing memory will be supplied by calling 1686 * vmci_qp_broker_set_page_store() following the 1687 * return of the vmci_qp_broker_alloc() call. If it is 1688 * a vmx of version NOVMVM or later, the page store 1689 * must be supplied as part of the 1690 * vmci_qp_broker_alloc call. Under all circumstances 1691 * must the initially created queue pair not have any 1692 * memory associated with it already. 1693 */ 1694 1695 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1696 return VMCI_ERROR_INVALID_ARGS; 1697 1698 if (page_store != NULL) { 1699 /* 1700 * Patch up host state to point to guest 1701 * supplied memory. The VMX already 1702 * initialized the queue pair headers, so no 1703 * need for the kernel side to do that. 1704 */ 1705 1706 result = qp_host_register_user_memory(page_store, 1707 entry->produce_q, 1708 entry->consume_q); 1709 if (result < VMCI_SUCCESS) 1710 return result; 1711 1712 entry->state = VMCIQPB_ATTACHED_MEM; 1713 } else { 1714 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1715 } 1716 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1717 /* 1718 * The host side is attempting to attach to a queue 1719 * pair that doesn't have any memory associated with 1720 * it. This must be a pre NOVMVM vmx that hasn't set 1721 * the page store information yet, or a quiesced VM. 1722 */ 1723 1724 return VMCI_ERROR_UNAVAILABLE; 1725 } else { 1726 /* The host side has successfully attached to a queue pair. */ 1727 entry->state = VMCIQPB_ATTACHED_MEM; 1728 } 1729 1730 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1731 result = 1732 qp_notify_peer(true, entry->qp.handle, context_id, 1733 entry->create_id); 1734 if (result < VMCI_SUCCESS) 1735 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1736 entry->create_id, entry->qp.handle.context, 1737 entry->qp.handle.resource); 1738 } 1739 1740 entry->attach_id = context_id; 1741 entry->qp.ref_count++; 1742 if (wakeup_cb) { 1743 entry->wakeup_cb = wakeup_cb; 1744 entry->client_data = client_data; 1745 } 1746 1747 /* 1748 * When attaching to local queue pairs, the context already has 1749 * an entry tracking the queue pair, so don't add another one. 1750 */ 1751 if (!is_local) 1752 vmci_ctx_qp_create(context, entry->qp.handle); 1753 1754 if (ent != NULL) 1755 *ent = entry; 1756 1757 return VMCI_SUCCESS; 1758 } 1759 1760 /* 1761 * queue_pair_Alloc for use when setting up queue pair endpoints 1762 * on the host. 1763 */ 1764 static int qp_broker_alloc(struct vmci_handle handle, 1765 u32 peer, 1766 u32 flags, 1767 u32 priv_flags, 1768 u64 produce_size, 1769 u64 consume_size, 1770 struct vmci_qp_page_store *page_store, 1771 struct vmci_ctx *context, 1772 vmci_event_release_cb wakeup_cb, 1773 void *client_data, 1774 struct qp_broker_entry **ent, 1775 bool *swap) 1776 { 1777 const u32 context_id = vmci_ctx_get_id(context); 1778 bool create; 1779 struct qp_broker_entry *entry = NULL; 1780 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1781 int result; 1782 1783 if (vmci_handle_is_invalid(handle) || 1784 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1785 !(produce_size || consume_size) || 1786 !context || context_id == VMCI_INVALID_ID || 1787 handle.context == VMCI_INVALID_ID) { 1788 return VMCI_ERROR_INVALID_ARGS; 1789 } 1790 1791 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1792 return VMCI_ERROR_INVALID_ARGS; 1793 1794 /* 1795 * In the initial argument check, we ensure that non-vmkernel hosts 1796 * are not allowed to create local queue pairs. 1797 */ 1798 1799 mutex_lock(&qp_broker_list.mutex); 1800 1801 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1802 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1803 context_id, handle.context, handle.resource); 1804 mutex_unlock(&qp_broker_list.mutex); 1805 return VMCI_ERROR_ALREADY_EXISTS; 1806 } 1807 1808 if (handle.resource != VMCI_INVALID_ID) 1809 entry = qp_broker_handle_to_entry(handle); 1810 1811 if (!entry) { 1812 create = true; 1813 result = 1814 qp_broker_create(handle, peer, flags, priv_flags, 1815 produce_size, consume_size, page_store, 1816 context, wakeup_cb, client_data, ent); 1817 } else { 1818 create = false; 1819 result = 1820 qp_broker_attach(entry, peer, flags, priv_flags, 1821 produce_size, consume_size, page_store, 1822 context, wakeup_cb, client_data, ent); 1823 } 1824 1825 mutex_unlock(&qp_broker_list.mutex); 1826 1827 if (swap) 1828 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1829 !(create && is_local); 1830 1831 return result; 1832 } 1833 1834 /* 1835 * This function implements the kernel API for allocating a queue 1836 * pair. 1837 */ 1838 static int qp_alloc_host_work(struct vmci_handle *handle, 1839 struct vmci_queue **produce_q, 1840 u64 produce_size, 1841 struct vmci_queue **consume_q, 1842 u64 consume_size, 1843 u32 peer, 1844 u32 flags, 1845 u32 priv_flags, 1846 vmci_event_release_cb wakeup_cb, 1847 void *client_data) 1848 { 1849 struct vmci_handle new_handle; 1850 struct vmci_ctx *context; 1851 struct qp_broker_entry *entry; 1852 int result; 1853 bool swap; 1854 1855 if (vmci_handle_is_invalid(*handle)) { 1856 new_handle = vmci_make_handle( 1857 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1858 } else 1859 new_handle = *handle; 1860 1861 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1862 entry = NULL; 1863 result = 1864 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1865 produce_size, consume_size, NULL, context, 1866 wakeup_cb, client_data, &entry, &swap); 1867 if (result == VMCI_SUCCESS) { 1868 if (swap) { 1869 /* 1870 * If this is a local queue pair, the attacher 1871 * will swap around produce and consume 1872 * queues. 1873 */ 1874 1875 *produce_q = entry->consume_q; 1876 *consume_q = entry->produce_q; 1877 } else { 1878 *produce_q = entry->produce_q; 1879 *consume_q = entry->consume_q; 1880 } 1881 1882 *handle = vmci_resource_handle(&entry->resource); 1883 } else { 1884 *handle = VMCI_INVALID_HANDLE; 1885 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1886 result); 1887 } 1888 vmci_ctx_put(context); 1889 return result; 1890 } 1891 1892 /* 1893 * Allocates a VMCI queue_pair. Only checks validity of input 1894 * arguments. The real work is done in the host or guest 1895 * specific function. 1896 */ 1897 int vmci_qp_alloc(struct vmci_handle *handle, 1898 struct vmci_queue **produce_q, 1899 u64 produce_size, 1900 struct vmci_queue **consume_q, 1901 u64 consume_size, 1902 u32 peer, 1903 u32 flags, 1904 u32 priv_flags, 1905 bool guest_endpoint, 1906 vmci_event_release_cb wakeup_cb, 1907 void *client_data) 1908 { 1909 if (!handle || !produce_q || !consume_q || 1910 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1911 return VMCI_ERROR_INVALID_ARGS; 1912 1913 if (guest_endpoint) { 1914 return qp_alloc_guest_work(handle, produce_q, 1915 produce_size, consume_q, 1916 consume_size, peer, 1917 flags, priv_flags); 1918 } else { 1919 return qp_alloc_host_work(handle, produce_q, 1920 produce_size, consume_q, 1921 consume_size, peer, flags, 1922 priv_flags, wakeup_cb, client_data); 1923 } 1924 } 1925 1926 /* 1927 * This function implements the host kernel API for detaching from 1928 * a queue pair. 1929 */ 1930 static int qp_detatch_host_work(struct vmci_handle handle) 1931 { 1932 int result; 1933 struct vmci_ctx *context; 1934 1935 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1936 1937 result = vmci_qp_broker_detach(handle, context); 1938 1939 vmci_ctx_put(context); 1940 return result; 1941 } 1942 1943 /* 1944 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1945 * Real work is done in the host or guest specific function. 1946 */ 1947 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1948 { 1949 if (vmci_handle_is_invalid(handle)) 1950 return VMCI_ERROR_INVALID_ARGS; 1951 1952 if (guest_endpoint) 1953 return qp_detatch_guest_work(handle); 1954 else 1955 return qp_detatch_host_work(handle); 1956 } 1957 1958 /* 1959 * Returns the entry from the head of the list. Assumes that the list is 1960 * locked. 1961 */ 1962 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1963 { 1964 if (!list_empty(&qp_list->head)) { 1965 struct qp_entry *entry = 1966 list_first_entry(&qp_list->head, struct qp_entry, 1967 list_item); 1968 return entry; 1969 } 1970 1971 return NULL; 1972 } 1973 1974 void vmci_qp_broker_exit(void) 1975 { 1976 struct qp_entry *entry; 1977 struct qp_broker_entry *be; 1978 1979 mutex_lock(&qp_broker_list.mutex); 1980 1981 while ((entry = qp_list_get_head(&qp_broker_list))) { 1982 be = (struct qp_broker_entry *)entry; 1983 1984 qp_list_remove_entry(&qp_broker_list, entry); 1985 kfree(be); 1986 } 1987 1988 mutex_unlock(&qp_broker_list.mutex); 1989 } 1990 1991 /* 1992 * Requests that a queue pair be allocated with the VMCI queue 1993 * pair broker. Allocates a queue pair entry if one does not 1994 * exist. Attaches to one if it exists, and retrieves the page 1995 * files backing that queue_pair. Assumes that the queue pair 1996 * broker lock is held. 1997 */ 1998 int vmci_qp_broker_alloc(struct vmci_handle handle, 1999 u32 peer, 2000 u32 flags, 2001 u32 priv_flags, 2002 u64 produce_size, 2003 u64 consume_size, 2004 struct vmci_qp_page_store *page_store, 2005 struct vmci_ctx *context) 2006 { 2007 return qp_broker_alloc(handle, peer, flags, priv_flags, 2008 produce_size, consume_size, 2009 page_store, context, NULL, NULL, NULL, NULL); 2010 } 2011 2012 /* 2013 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 2014 * step to add the UVAs of the VMX mapping of the queue pair. This function 2015 * provides backwards compatibility with such VMX'en, and takes care of 2016 * registering the page store for a queue pair previously allocated by the 2017 * VMX during create or attach. This function will move the queue pair state 2018 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 2019 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 2020 * attached state with memory, the queue pair is ready to be used by the 2021 * host peer, and an attached event will be generated. 2022 * 2023 * Assumes that the queue pair broker lock is held. 2024 * 2025 * This function is only used by the hosted platform, since there is no 2026 * issue with backwards compatibility for vmkernel. 2027 */ 2028 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 2029 u64 produce_uva, 2030 u64 consume_uva, 2031 struct vmci_ctx *context) 2032 { 2033 struct qp_broker_entry *entry; 2034 int result; 2035 const u32 context_id = vmci_ctx_get_id(context); 2036 2037 if (vmci_handle_is_invalid(handle) || !context || 2038 context_id == VMCI_INVALID_ID) 2039 return VMCI_ERROR_INVALID_ARGS; 2040 2041 /* 2042 * We only support guest to host queue pairs, so the VMX must 2043 * supply UVAs for the mapped page files. 2044 */ 2045 2046 if (produce_uva == 0 || consume_uva == 0) 2047 return VMCI_ERROR_INVALID_ARGS; 2048 2049 mutex_lock(&qp_broker_list.mutex); 2050 2051 if (!vmci_ctx_qp_exists(context, handle)) { 2052 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2053 context_id, handle.context, handle.resource); 2054 result = VMCI_ERROR_NOT_FOUND; 2055 goto out; 2056 } 2057 2058 entry = qp_broker_handle_to_entry(handle); 2059 if (!entry) { 2060 result = VMCI_ERROR_NOT_FOUND; 2061 goto out; 2062 } 2063 2064 /* 2065 * If I'm the owner then I can set the page store. 2066 * 2067 * Or, if a host created the queue_pair and I'm the attached peer 2068 * then I can set the page store. 2069 */ 2070 if (entry->create_id != context_id && 2071 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2072 entry->attach_id != context_id)) { 2073 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2074 goto out; 2075 } 2076 2077 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2078 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2079 result = VMCI_ERROR_UNAVAILABLE; 2080 goto out; 2081 } 2082 2083 result = qp_host_get_user_memory(produce_uva, consume_uva, 2084 entry->produce_q, entry->consume_q); 2085 if (result < VMCI_SUCCESS) 2086 goto out; 2087 2088 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2089 if (result < VMCI_SUCCESS) { 2090 qp_host_unregister_user_memory(entry->produce_q, 2091 entry->consume_q); 2092 goto out; 2093 } 2094 2095 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2096 entry->state = VMCIQPB_CREATED_MEM; 2097 else 2098 entry->state = VMCIQPB_ATTACHED_MEM; 2099 2100 entry->vmci_page_files = true; 2101 2102 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2103 result = 2104 qp_notify_peer(true, handle, context_id, entry->create_id); 2105 if (result < VMCI_SUCCESS) { 2106 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2107 entry->create_id, entry->qp.handle.context, 2108 entry->qp.handle.resource); 2109 } 2110 } 2111 2112 result = VMCI_SUCCESS; 2113 out: 2114 mutex_unlock(&qp_broker_list.mutex); 2115 return result; 2116 } 2117 2118 /* 2119 * Resets saved queue headers for the given QP broker 2120 * entry. Should be used when guest memory becomes available 2121 * again, or the guest detaches. 2122 */ 2123 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2124 { 2125 entry->produce_q->saved_header = NULL; 2126 entry->consume_q->saved_header = NULL; 2127 } 2128 2129 /* 2130 * The main entry point for detaching from a queue pair registered with the 2131 * queue pair broker. If more than one endpoint is attached to the queue 2132 * pair, the first endpoint will mainly decrement a reference count and 2133 * generate a notification to its peer. The last endpoint will clean up 2134 * the queue pair state registered with the broker. 2135 * 2136 * When a guest endpoint detaches, it will unmap and unregister the guest 2137 * memory backing the queue pair. If the host is still attached, it will 2138 * no longer be able to access the queue pair content. 2139 * 2140 * If the queue pair is already in a state where there is no memory 2141 * registered for the queue pair (any *_NO_MEM state), it will transition to 2142 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2143 * endpoint is the first of two endpoints to detach. If the host endpoint is 2144 * the first out of two to detach, the queue pair will move to the 2145 * VMCIQPB_SHUTDOWN_MEM state. 2146 */ 2147 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2148 { 2149 struct qp_broker_entry *entry; 2150 const u32 context_id = vmci_ctx_get_id(context); 2151 u32 peer_id; 2152 bool is_local = false; 2153 int result; 2154 2155 if (vmci_handle_is_invalid(handle) || !context || 2156 context_id == VMCI_INVALID_ID) { 2157 return VMCI_ERROR_INVALID_ARGS; 2158 } 2159 2160 mutex_lock(&qp_broker_list.mutex); 2161 2162 if (!vmci_ctx_qp_exists(context, handle)) { 2163 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2164 context_id, handle.context, handle.resource); 2165 result = VMCI_ERROR_NOT_FOUND; 2166 goto out; 2167 } 2168 2169 entry = qp_broker_handle_to_entry(handle); 2170 if (!entry) { 2171 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2172 context_id, handle.context, handle.resource); 2173 result = VMCI_ERROR_NOT_FOUND; 2174 goto out; 2175 } 2176 2177 if (context_id != entry->create_id && context_id != entry->attach_id) { 2178 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2179 goto out; 2180 } 2181 2182 if (context_id == entry->create_id) { 2183 peer_id = entry->attach_id; 2184 entry->create_id = VMCI_INVALID_ID; 2185 } else { 2186 peer_id = entry->create_id; 2187 entry->attach_id = VMCI_INVALID_ID; 2188 } 2189 entry->qp.ref_count--; 2190 2191 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2192 2193 if (context_id != VMCI_HOST_CONTEXT_ID) { 2194 bool headers_mapped; 2195 2196 /* 2197 * Pre NOVMVM vmx'en may detach from a queue pair 2198 * before setting the page store, and in that case 2199 * there is no user memory to detach from. Also, more 2200 * recent VMX'en may detach from a queue pair in the 2201 * quiesced state. 2202 */ 2203 2204 qp_acquire_queue_mutex(entry->produce_q); 2205 headers_mapped = entry->produce_q->q_header || 2206 entry->consume_q->q_header; 2207 if (QPBROKERSTATE_HAS_MEM(entry)) { 2208 result = 2209 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2210 entry->produce_q, 2211 entry->consume_q); 2212 if (result < VMCI_SUCCESS) 2213 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2214 handle.context, handle.resource, 2215 result); 2216 2217 if (entry->vmci_page_files) 2218 qp_host_unregister_user_memory(entry->produce_q, 2219 entry-> 2220 consume_q); 2221 else 2222 qp_host_unregister_user_memory(entry->produce_q, 2223 entry-> 2224 consume_q); 2225 2226 } 2227 2228 if (!headers_mapped) 2229 qp_reset_saved_headers(entry); 2230 2231 qp_release_queue_mutex(entry->produce_q); 2232 2233 if (!headers_mapped && entry->wakeup_cb) 2234 entry->wakeup_cb(entry->client_data); 2235 2236 } else { 2237 if (entry->wakeup_cb) { 2238 entry->wakeup_cb = NULL; 2239 entry->client_data = NULL; 2240 } 2241 } 2242 2243 if (entry->qp.ref_count == 0) { 2244 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2245 2246 if (is_local) 2247 kfree(entry->local_mem); 2248 2249 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2250 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2251 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2252 /* Unlink from resource hash table and free callback */ 2253 vmci_resource_remove(&entry->resource); 2254 2255 kfree(entry); 2256 2257 vmci_ctx_qp_destroy(context, handle); 2258 } else { 2259 qp_notify_peer(false, handle, context_id, peer_id); 2260 if (context_id == VMCI_HOST_CONTEXT_ID && 2261 QPBROKERSTATE_HAS_MEM(entry)) { 2262 entry->state = VMCIQPB_SHUTDOWN_MEM; 2263 } else { 2264 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2265 } 2266 2267 if (!is_local) 2268 vmci_ctx_qp_destroy(context, handle); 2269 2270 } 2271 result = VMCI_SUCCESS; 2272 out: 2273 mutex_unlock(&qp_broker_list.mutex); 2274 return result; 2275 } 2276 2277 /* 2278 * Establishes the necessary mappings for a queue pair given a 2279 * reference to the queue pair guest memory. This is usually 2280 * called when a guest is unquiesced and the VMX is allowed to 2281 * map guest memory once again. 2282 */ 2283 int vmci_qp_broker_map(struct vmci_handle handle, 2284 struct vmci_ctx *context, 2285 u64 guest_mem) 2286 { 2287 struct qp_broker_entry *entry; 2288 const u32 context_id = vmci_ctx_get_id(context); 2289 bool is_local = false; 2290 int result; 2291 2292 if (vmci_handle_is_invalid(handle) || !context || 2293 context_id == VMCI_INVALID_ID) 2294 return VMCI_ERROR_INVALID_ARGS; 2295 2296 mutex_lock(&qp_broker_list.mutex); 2297 2298 if (!vmci_ctx_qp_exists(context, handle)) { 2299 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2300 context_id, handle.context, handle.resource); 2301 result = VMCI_ERROR_NOT_FOUND; 2302 goto out; 2303 } 2304 2305 entry = qp_broker_handle_to_entry(handle); 2306 if (!entry) { 2307 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2308 context_id, handle.context, handle.resource); 2309 result = VMCI_ERROR_NOT_FOUND; 2310 goto out; 2311 } 2312 2313 if (context_id != entry->create_id && context_id != entry->attach_id) { 2314 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2315 goto out; 2316 } 2317 2318 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2319 result = VMCI_SUCCESS; 2320 2321 if (context_id != VMCI_HOST_CONTEXT_ID) { 2322 struct vmci_qp_page_store page_store; 2323 2324 page_store.pages = guest_mem; 2325 page_store.len = QPE_NUM_PAGES(entry->qp); 2326 2327 qp_acquire_queue_mutex(entry->produce_q); 2328 qp_reset_saved_headers(entry); 2329 result = 2330 qp_host_register_user_memory(&page_store, 2331 entry->produce_q, 2332 entry->consume_q); 2333 qp_release_queue_mutex(entry->produce_q); 2334 if (result == VMCI_SUCCESS) { 2335 /* Move state from *_NO_MEM to *_MEM */ 2336 2337 entry->state++; 2338 2339 if (entry->wakeup_cb) 2340 entry->wakeup_cb(entry->client_data); 2341 } 2342 } 2343 2344 out: 2345 mutex_unlock(&qp_broker_list.mutex); 2346 return result; 2347 } 2348 2349 /* 2350 * Saves a snapshot of the queue headers for the given QP broker 2351 * entry. Should be used when guest memory is unmapped. 2352 * Results: 2353 * VMCI_SUCCESS on success, appropriate error code if guest memory 2354 * can't be accessed.. 2355 */ 2356 static int qp_save_headers(struct qp_broker_entry *entry) 2357 { 2358 int result; 2359 2360 if (entry->produce_q->saved_header != NULL && 2361 entry->consume_q->saved_header != NULL) { 2362 /* 2363 * If the headers have already been saved, we don't need to do 2364 * it again, and we don't want to map in the headers 2365 * unnecessarily. 2366 */ 2367 2368 return VMCI_SUCCESS; 2369 } 2370 2371 if (NULL == entry->produce_q->q_header || 2372 NULL == entry->consume_q->q_header) { 2373 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2374 if (result < VMCI_SUCCESS) 2375 return result; 2376 } 2377 2378 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2379 sizeof(entry->saved_produce_q)); 2380 entry->produce_q->saved_header = &entry->saved_produce_q; 2381 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2382 sizeof(entry->saved_consume_q)); 2383 entry->consume_q->saved_header = &entry->saved_consume_q; 2384 2385 return VMCI_SUCCESS; 2386 } 2387 2388 /* 2389 * Removes all references to the guest memory of a given queue pair, and 2390 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2391 * called when a VM is being quiesced where access to guest memory should 2392 * avoided. 2393 */ 2394 int vmci_qp_broker_unmap(struct vmci_handle handle, 2395 struct vmci_ctx *context, 2396 u32 gid) 2397 { 2398 struct qp_broker_entry *entry; 2399 const u32 context_id = vmci_ctx_get_id(context); 2400 bool is_local = false; 2401 int result; 2402 2403 if (vmci_handle_is_invalid(handle) || !context || 2404 context_id == VMCI_INVALID_ID) 2405 return VMCI_ERROR_INVALID_ARGS; 2406 2407 mutex_lock(&qp_broker_list.mutex); 2408 2409 if (!vmci_ctx_qp_exists(context, handle)) { 2410 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2411 context_id, handle.context, handle.resource); 2412 result = VMCI_ERROR_NOT_FOUND; 2413 goto out; 2414 } 2415 2416 entry = qp_broker_handle_to_entry(handle); 2417 if (!entry) { 2418 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2419 context_id, handle.context, handle.resource); 2420 result = VMCI_ERROR_NOT_FOUND; 2421 goto out; 2422 } 2423 2424 if (context_id != entry->create_id && context_id != entry->attach_id) { 2425 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2426 goto out; 2427 } 2428 2429 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2430 2431 if (context_id != VMCI_HOST_CONTEXT_ID) { 2432 qp_acquire_queue_mutex(entry->produce_q); 2433 result = qp_save_headers(entry); 2434 if (result < VMCI_SUCCESS) 2435 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2436 handle.context, handle.resource, result); 2437 2438 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2439 2440 /* 2441 * On hosted, when we unmap queue pairs, the VMX will also 2442 * unmap the guest memory, so we invalidate the previously 2443 * registered memory. If the queue pair is mapped again at a 2444 * later point in time, we will need to reregister the user 2445 * memory with a possibly new user VA. 2446 */ 2447 qp_host_unregister_user_memory(entry->produce_q, 2448 entry->consume_q); 2449 2450 /* 2451 * Move state from *_MEM to *_NO_MEM. 2452 */ 2453 entry->state--; 2454 2455 qp_release_queue_mutex(entry->produce_q); 2456 } 2457 2458 result = VMCI_SUCCESS; 2459 2460 out: 2461 mutex_unlock(&qp_broker_list.mutex); 2462 return result; 2463 } 2464 2465 /* 2466 * Destroys all guest queue pair endpoints. If active guest queue 2467 * pairs still exist, hypercalls to attempt detach from these 2468 * queue pairs will be made. Any failure to detach is silently 2469 * ignored. 2470 */ 2471 void vmci_qp_guest_endpoints_exit(void) 2472 { 2473 struct qp_entry *entry; 2474 struct qp_guest_endpoint *ep; 2475 2476 mutex_lock(&qp_guest_endpoints.mutex); 2477 2478 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2479 ep = (struct qp_guest_endpoint *)entry; 2480 2481 /* Don't make a hypercall for local queue_pairs. */ 2482 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2483 qp_detatch_hypercall(entry->handle); 2484 2485 /* We cannot fail the exit, so let's reset ref_count. */ 2486 entry->ref_count = 0; 2487 qp_list_remove_entry(&qp_guest_endpoints, entry); 2488 2489 qp_guest_endpoint_destroy(ep); 2490 } 2491 2492 mutex_unlock(&qp_guest_endpoints.mutex); 2493 } 2494 2495 /* 2496 * Helper routine that will lock the queue pair before subsequent 2497 * operations. 2498 * Note: Non-blocking on the host side is currently only implemented in ESX. 2499 * Since non-blocking isn't yet implemented on the host personality we 2500 * have no reason to acquire a spin lock. So to avoid the use of an 2501 * unnecessary lock only acquire the mutex if we can block. 2502 */ 2503 static void qp_lock(const struct vmci_qp *qpair) 2504 { 2505 qp_acquire_queue_mutex(qpair->produce_q); 2506 } 2507 2508 /* 2509 * Helper routine that unlocks the queue pair after calling 2510 * qp_lock. 2511 */ 2512 static void qp_unlock(const struct vmci_qp *qpair) 2513 { 2514 qp_release_queue_mutex(qpair->produce_q); 2515 } 2516 2517 /* 2518 * The queue headers may not be mapped at all times. If a queue is 2519 * currently not mapped, it will be attempted to do so. 2520 */ 2521 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2522 struct vmci_queue *consume_q) 2523 { 2524 int result; 2525 2526 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2527 result = qp_host_map_queues(produce_q, consume_q); 2528 if (result < VMCI_SUCCESS) 2529 return (produce_q->saved_header && 2530 consume_q->saved_header) ? 2531 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2532 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2533 } 2534 2535 return VMCI_SUCCESS; 2536 } 2537 2538 /* 2539 * Helper routine that will retrieve the produce and consume 2540 * headers of a given queue pair. If the guest memory of the 2541 * queue pair is currently not available, the saved queue headers 2542 * will be returned, if these are available. 2543 */ 2544 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2545 struct vmci_queue_header **produce_q_header, 2546 struct vmci_queue_header **consume_q_header) 2547 { 2548 int result; 2549 2550 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2551 if (result == VMCI_SUCCESS) { 2552 *produce_q_header = qpair->produce_q->q_header; 2553 *consume_q_header = qpair->consume_q->q_header; 2554 } else if (qpair->produce_q->saved_header && 2555 qpair->consume_q->saved_header) { 2556 *produce_q_header = qpair->produce_q->saved_header; 2557 *consume_q_header = qpair->consume_q->saved_header; 2558 result = VMCI_SUCCESS; 2559 } 2560 2561 return result; 2562 } 2563 2564 /* 2565 * Callback from VMCI queue pair broker indicating that a queue 2566 * pair that was previously not ready, now either is ready or 2567 * gone forever. 2568 */ 2569 static int qp_wakeup_cb(void *client_data) 2570 { 2571 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2572 2573 qp_lock(qpair); 2574 while (qpair->blocked > 0) { 2575 qpair->blocked--; 2576 qpair->generation++; 2577 wake_up(&qpair->event); 2578 } 2579 qp_unlock(qpair); 2580 2581 return VMCI_SUCCESS; 2582 } 2583 2584 /* 2585 * Makes the calling thread wait for the queue pair to become 2586 * ready for host side access. Returns true when thread is 2587 * woken up after queue pair state change, false otherwise. 2588 */ 2589 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2590 { 2591 unsigned int generation; 2592 2593 qpair->blocked++; 2594 generation = qpair->generation; 2595 qp_unlock(qpair); 2596 wait_event(qpair->event, generation != qpair->generation); 2597 qp_lock(qpair); 2598 2599 return true; 2600 } 2601 2602 /* 2603 * Enqueues a given buffer to the produce queue using the provided 2604 * function. As many bytes as possible (space available in the queue) 2605 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2606 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2607 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2608 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2609 * an error occured when accessing the buffer, 2610 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2611 * available. Otherwise, the number of bytes written to the queue is 2612 * returned. Updates the tail pointer of the produce queue. 2613 */ 2614 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2615 struct vmci_queue *consume_q, 2616 const u64 produce_q_size, 2617 const void *buf, 2618 size_t buf_size, 2619 vmci_memcpy_to_queue_func memcpy_to_queue) 2620 { 2621 s64 free_space; 2622 u64 tail; 2623 size_t written; 2624 ssize_t result; 2625 2626 result = qp_map_queue_headers(produce_q, consume_q); 2627 if (unlikely(result != VMCI_SUCCESS)) 2628 return result; 2629 2630 free_space = vmci_q_header_free_space(produce_q->q_header, 2631 consume_q->q_header, 2632 produce_q_size); 2633 if (free_space == 0) 2634 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2635 2636 if (free_space < VMCI_SUCCESS) 2637 return (ssize_t) free_space; 2638 2639 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2640 tail = vmci_q_header_producer_tail(produce_q->q_header); 2641 if (likely(tail + written < produce_q_size)) { 2642 result = memcpy_to_queue(produce_q, tail, buf, 0, written); 2643 } else { 2644 /* Tail pointer wraps around. */ 2645 2646 const size_t tmp = (size_t) (produce_q_size - tail); 2647 2648 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); 2649 if (result >= VMCI_SUCCESS) 2650 result = memcpy_to_queue(produce_q, 0, buf, tmp, 2651 written - tmp); 2652 } 2653 2654 if (result < VMCI_SUCCESS) 2655 return result; 2656 2657 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2658 produce_q_size); 2659 return written; 2660 } 2661 2662 /* 2663 * Dequeues data (if available) from the given consume queue. Writes data 2664 * to the user provided buffer using the provided function. 2665 * Assumes the queue->mutex has been acquired. 2666 * Results: 2667 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2668 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2669 * (as defined by the queue size). 2670 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2671 * Otherwise the number of bytes dequeued is returned. 2672 * Side effects: 2673 * Updates the head pointer of the consume queue. 2674 */ 2675 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2676 struct vmci_queue *consume_q, 2677 const u64 consume_q_size, 2678 void *buf, 2679 size_t buf_size, 2680 vmci_memcpy_from_queue_func memcpy_from_queue, 2681 bool update_consumer) 2682 { 2683 s64 buf_ready; 2684 u64 head; 2685 size_t read; 2686 ssize_t result; 2687 2688 result = qp_map_queue_headers(produce_q, consume_q); 2689 if (unlikely(result != VMCI_SUCCESS)) 2690 return result; 2691 2692 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2693 produce_q->q_header, 2694 consume_q_size); 2695 if (buf_ready == 0) 2696 return VMCI_ERROR_QUEUEPAIR_NODATA; 2697 2698 if (buf_ready < VMCI_SUCCESS) 2699 return (ssize_t) buf_ready; 2700 2701 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2702 head = vmci_q_header_consumer_head(produce_q->q_header); 2703 if (likely(head + read < consume_q_size)) { 2704 result = memcpy_from_queue(buf, 0, consume_q, head, read); 2705 } else { 2706 /* Head pointer wraps around. */ 2707 2708 const size_t tmp = (size_t) (consume_q_size - head); 2709 2710 result = memcpy_from_queue(buf, 0, consume_q, head, tmp); 2711 if (result >= VMCI_SUCCESS) 2712 result = memcpy_from_queue(buf, tmp, consume_q, 0, 2713 read - tmp); 2714 2715 } 2716 2717 if (result < VMCI_SUCCESS) 2718 return result; 2719 2720 if (update_consumer) 2721 vmci_q_header_add_consumer_head(produce_q->q_header, 2722 read, consume_q_size); 2723 2724 return read; 2725 } 2726 2727 /* 2728 * vmci_qpair_alloc() - Allocates a queue pair. 2729 * @qpair: Pointer for the new vmci_qp struct. 2730 * @handle: Handle to track the resource. 2731 * @produce_qsize: Desired size of the producer queue. 2732 * @consume_qsize: Desired size of the consumer queue. 2733 * @peer: ContextID of the peer. 2734 * @flags: VMCI flags. 2735 * @priv_flags: VMCI priviledge flags. 2736 * 2737 * This is the client interface for allocating the memory for a 2738 * vmci_qp structure and then attaching to the underlying 2739 * queue. If an error occurs allocating the memory for the 2740 * vmci_qp structure no attempt is made to attach. If an 2741 * error occurs attaching, then the structure is freed. 2742 */ 2743 int vmci_qpair_alloc(struct vmci_qp **qpair, 2744 struct vmci_handle *handle, 2745 u64 produce_qsize, 2746 u64 consume_qsize, 2747 u32 peer, 2748 u32 flags, 2749 u32 priv_flags) 2750 { 2751 struct vmci_qp *my_qpair; 2752 int retval; 2753 struct vmci_handle src = VMCI_INVALID_HANDLE; 2754 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2755 enum vmci_route route; 2756 vmci_event_release_cb wakeup_cb; 2757 void *client_data; 2758 2759 /* 2760 * Restrict the size of a queuepair. The device already 2761 * enforces a limit on the total amount of memory that can be 2762 * allocated to queuepairs for a guest. However, we try to 2763 * allocate this memory before we make the queuepair 2764 * allocation hypercall. On Linux, we allocate each page 2765 * separately, which means rather than fail, the guest will 2766 * thrash while it tries to allocate, and will become 2767 * increasingly unresponsive to the point where it appears to 2768 * be hung. So we place a limit on the size of an individual 2769 * queuepair here, and leave the device to enforce the 2770 * restriction on total queuepair memory. (Note that this 2771 * doesn't prevent all cases; a user with only this much 2772 * physical memory could still get into trouble.) The error 2773 * used by the device is NO_RESOURCES, so use that here too. 2774 */ 2775 2776 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2777 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2778 return VMCI_ERROR_NO_RESOURCES; 2779 2780 retval = vmci_route(&src, &dst, false, &route); 2781 if (retval < VMCI_SUCCESS) 2782 route = vmci_guest_code_active() ? 2783 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2784 2785 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2786 pr_devel("NONBLOCK OR PINNED set"); 2787 return VMCI_ERROR_INVALID_ARGS; 2788 } 2789 2790 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2791 if (!my_qpair) 2792 return VMCI_ERROR_NO_MEM; 2793 2794 my_qpair->produce_q_size = produce_qsize; 2795 my_qpair->consume_q_size = consume_qsize; 2796 my_qpair->peer = peer; 2797 my_qpair->flags = flags; 2798 my_qpair->priv_flags = priv_flags; 2799 2800 wakeup_cb = NULL; 2801 client_data = NULL; 2802 2803 if (VMCI_ROUTE_AS_HOST == route) { 2804 my_qpair->guest_endpoint = false; 2805 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2806 my_qpair->blocked = 0; 2807 my_qpair->generation = 0; 2808 init_waitqueue_head(&my_qpair->event); 2809 wakeup_cb = qp_wakeup_cb; 2810 client_data = (void *)my_qpair; 2811 } 2812 } else { 2813 my_qpair->guest_endpoint = true; 2814 } 2815 2816 retval = vmci_qp_alloc(handle, 2817 &my_qpair->produce_q, 2818 my_qpair->produce_q_size, 2819 &my_qpair->consume_q, 2820 my_qpair->consume_q_size, 2821 my_qpair->peer, 2822 my_qpair->flags, 2823 my_qpair->priv_flags, 2824 my_qpair->guest_endpoint, 2825 wakeup_cb, client_data); 2826 2827 if (retval < VMCI_SUCCESS) { 2828 kfree(my_qpair); 2829 return retval; 2830 } 2831 2832 *qpair = my_qpair; 2833 my_qpair->handle = *handle; 2834 2835 return retval; 2836 } 2837 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2838 2839 /* 2840 * vmci_qpair_detach() - Detatches the client from a queue pair. 2841 * @qpair: Reference of a pointer to the qpair struct. 2842 * 2843 * This is the client interface for detaching from a VMCIQPair. 2844 * Note that this routine will free the memory allocated for the 2845 * vmci_qp structure too. 2846 */ 2847 int vmci_qpair_detach(struct vmci_qp **qpair) 2848 { 2849 int result; 2850 struct vmci_qp *old_qpair; 2851 2852 if (!qpair || !(*qpair)) 2853 return VMCI_ERROR_INVALID_ARGS; 2854 2855 old_qpair = *qpair; 2856 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2857 2858 /* 2859 * The guest can fail to detach for a number of reasons, and 2860 * if it does so, it will cleanup the entry (if there is one). 2861 * The host can fail too, but it won't cleanup the entry 2862 * immediately, it will do that later when the context is 2863 * freed. Either way, we need to release the qpair struct 2864 * here; there isn't much the caller can do, and we don't want 2865 * to leak. 2866 */ 2867 2868 memset(old_qpair, 0, sizeof(*old_qpair)); 2869 old_qpair->handle = VMCI_INVALID_HANDLE; 2870 old_qpair->peer = VMCI_INVALID_ID; 2871 kfree(old_qpair); 2872 *qpair = NULL; 2873 2874 return result; 2875 } 2876 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2877 2878 /* 2879 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2880 * @qpair: Pointer to the queue pair struct. 2881 * @producer_tail: Reference used for storing producer tail index. 2882 * @consumer_head: Reference used for storing the consumer head index. 2883 * 2884 * This is the client interface for getting the current indexes of the 2885 * QPair from the point of the view of the caller as the producer. 2886 */ 2887 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2888 u64 *producer_tail, 2889 u64 *consumer_head) 2890 { 2891 struct vmci_queue_header *produce_q_header; 2892 struct vmci_queue_header *consume_q_header; 2893 int result; 2894 2895 if (!qpair) 2896 return VMCI_ERROR_INVALID_ARGS; 2897 2898 qp_lock(qpair); 2899 result = 2900 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2901 if (result == VMCI_SUCCESS) 2902 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2903 producer_tail, consumer_head); 2904 qp_unlock(qpair); 2905 2906 if (result == VMCI_SUCCESS && 2907 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2908 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2909 return VMCI_ERROR_INVALID_SIZE; 2910 2911 return result; 2912 } 2913 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2914 2915 /* 2916 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer. 2917 * @qpair: Pointer to the queue pair struct. 2918 * @consumer_tail: Reference used for storing consumer tail index. 2919 * @producer_head: Reference used for storing the producer head index. 2920 * 2921 * This is the client interface for getting the current indexes of the 2922 * QPair from the point of the view of the caller as the consumer. 2923 */ 2924 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2925 u64 *consumer_tail, 2926 u64 *producer_head) 2927 { 2928 struct vmci_queue_header *produce_q_header; 2929 struct vmci_queue_header *consume_q_header; 2930 int result; 2931 2932 if (!qpair) 2933 return VMCI_ERROR_INVALID_ARGS; 2934 2935 qp_lock(qpair); 2936 result = 2937 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2938 if (result == VMCI_SUCCESS) 2939 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2940 consumer_tail, producer_head); 2941 qp_unlock(qpair); 2942 2943 if (result == VMCI_SUCCESS && 2944 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2945 (producer_head && *producer_head >= qpair->consume_q_size))) 2946 return VMCI_ERROR_INVALID_SIZE; 2947 2948 return result; 2949 } 2950 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2951 2952 /* 2953 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2954 * @qpair: Pointer to the queue pair struct. 2955 * 2956 * This is the client interface for getting the amount of free 2957 * space in the QPair from the point of the view of the caller as 2958 * the producer which is the common case. Returns < 0 if err, else 2959 * available bytes into which data can be enqueued if > 0. 2960 */ 2961 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2962 { 2963 struct vmci_queue_header *produce_q_header; 2964 struct vmci_queue_header *consume_q_header; 2965 s64 result; 2966 2967 if (!qpair) 2968 return VMCI_ERROR_INVALID_ARGS; 2969 2970 qp_lock(qpair); 2971 result = 2972 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2973 if (result == VMCI_SUCCESS) 2974 result = vmci_q_header_free_space(produce_q_header, 2975 consume_q_header, 2976 qpair->produce_q_size); 2977 else 2978 result = 0; 2979 2980 qp_unlock(qpair); 2981 2982 return result; 2983 } 2984 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2985 2986 /* 2987 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2988 * @qpair: Pointer to the queue pair struct. 2989 * 2990 * This is the client interface for getting the amount of free 2991 * space in the QPair from the point of the view of the caller as 2992 * the consumer which is not the common case. Returns < 0 if err, else 2993 * available bytes into which data can be enqueued if > 0. 2994 */ 2995 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 2996 { 2997 struct vmci_queue_header *produce_q_header; 2998 struct vmci_queue_header *consume_q_header; 2999 s64 result; 3000 3001 if (!qpair) 3002 return VMCI_ERROR_INVALID_ARGS; 3003 3004 qp_lock(qpair); 3005 result = 3006 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3007 if (result == VMCI_SUCCESS) 3008 result = vmci_q_header_free_space(consume_q_header, 3009 produce_q_header, 3010 qpair->consume_q_size); 3011 else 3012 result = 0; 3013 3014 qp_unlock(qpair); 3015 3016 return result; 3017 } 3018 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 3019 3020 /* 3021 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 3022 * producer queue. 3023 * @qpair: Pointer to the queue pair struct. 3024 * 3025 * This is the client interface for getting the amount of 3026 * enqueued data in the QPair from the point of the view of the 3027 * caller as the producer which is not the common case. Returns < 0 if err, 3028 * else available bytes that may be read. 3029 */ 3030 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 3031 { 3032 struct vmci_queue_header *produce_q_header; 3033 struct vmci_queue_header *consume_q_header; 3034 s64 result; 3035 3036 if (!qpair) 3037 return VMCI_ERROR_INVALID_ARGS; 3038 3039 qp_lock(qpair); 3040 result = 3041 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3042 if (result == VMCI_SUCCESS) 3043 result = vmci_q_header_buf_ready(produce_q_header, 3044 consume_q_header, 3045 qpair->produce_q_size); 3046 else 3047 result = 0; 3048 3049 qp_unlock(qpair); 3050 3051 return result; 3052 } 3053 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 3054 3055 /* 3056 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 3057 * consumer queue. 3058 * @qpair: Pointer to the queue pair struct. 3059 * 3060 * This is the client interface for getting the amount of 3061 * enqueued data in the QPair from the point of the view of the 3062 * caller as the consumer which is the normal case. Returns < 0 if err, 3063 * else available bytes that may be read. 3064 */ 3065 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 3066 { 3067 struct vmci_queue_header *produce_q_header; 3068 struct vmci_queue_header *consume_q_header; 3069 s64 result; 3070 3071 if (!qpair) 3072 return VMCI_ERROR_INVALID_ARGS; 3073 3074 qp_lock(qpair); 3075 result = 3076 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3077 if (result == VMCI_SUCCESS) 3078 result = vmci_q_header_buf_ready(consume_q_header, 3079 produce_q_header, 3080 qpair->consume_q_size); 3081 else 3082 result = 0; 3083 3084 qp_unlock(qpair); 3085 3086 return result; 3087 } 3088 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3089 3090 /* 3091 * vmci_qpair_enqueue() - Throw data on the queue. 3092 * @qpair: Pointer to the queue pair struct. 3093 * @buf: Pointer to buffer containing data 3094 * @buf_size: Length of buffer. 3095 * @buf_type: Buffer type (Unused). 3096 * 3097 * This is the client interface for enqueueing data into the queue. 3098 * Returns number of bytes enqueued or < 0 on error. 3099 */ 3100 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3101 const void *buf, 3102 size_t buf_size, 3103 int buf_type) 3104 { 3105 ssize_t result; 3106 3107 if (!qpair || !buf) 3108 return VMCI_ERROR_INVALID_ARGS; 3109 3110 qp_lock(qpair); 3111 3112 do { 3113 result = qp_enqueue_locked(qpair->produce_q, 3114 qpair->consume_q, 3115 qpair->produce_q_size, 3116 buf, buf_size, 3117 qp_memcpy_to_queue); 3118 3119 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3120 !qp_wait_for_ready_queue(qpair)) 3121 result = VMCI_ERROR_WOULD_BLOCK; 3122 3123 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3124 3125 qp_unlock(qpair); 3126 3127 return result; 3128 } 3129 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3130 3131 /* 3132 * vmci_qpair_dequeue() - Get data from the queue. 3133 * @qpair: Pointer to the queue pair struct. 3134 * @buf: Pointer to buffer for the data 3135 * @buf_size: Length of buffer. 3136 * @buf_type: Buffer type (Unused). 3137 * 3138 * This is the client interface for dequeueing data from the queue. 3139 * Returns number of bytes dequeued or < 0 on error. 3140 */ 3141 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3142 void *buf, 3143 size_t buf_size, 3144 int buf_type) 3145 { 3146 ssize_t result; 3147 3148 if (!qpair || !buf) 3149 return VMCI_ERROR_INVALID_ARGS; 3150 3151 qp_lock(qpair); 3152 3153 do { 3154 result = qp_dequeue_locked(qpair->produce_q, 3155 qpair->consume_q, 3156 qpair->consume_q_size, 3157 buf, buf_size, 3158 qp_memcpy_from_queue, true); 3159 3160 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3161 !qp_wait_for_ready_queue(qpair)) 3162 result = VMCI_ERROR_WOULD_BLOCK; 3163 3164 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3165 3166 qp_unlock(qpair); 3167 3168 return result; 3169 } 3170 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3171 3172 /* 3173 * vmci_qpair_peek() - Peek at the data in the queue. 3174 * @qpair: Pointer to the queue pair struct. 3175 * @buf: Pointer to buffer for the data 3176 * @buf_size: Length of buffer. 3177 * @buf_type: Buffer type (Unused on Linux). 3178 * 3179 * This is the client interface for peeking into a queue. (I.e., 3180 * copy data from the queue without updating the head pointer.) 3181 * Returns number of bytes dequeued or < 0 on error. 3182 */ 3183 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3184 void *buf, 3185 size_t buf_size, 3186 int buf_type) 3187 { 3188 ssize_t result; 3189 3190 if (!qpair || !buf) 3191 return VMCI_ERROR_INVALID_ARGS; 3192 3193 qp_lock(qpair); 3194 3195 do { 3196 result = qp_dequeue_locked(qpair->produce_q, 3197 qpair->consume_q, 3198 qpair->consume_q_size, 3199 buf, buf_size, 3200 qp_memcpy_from_queue, false); 3201 3202 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3203 !qp_wait_for_ready_queue(qpair)) 3204 result = VMCI_ERROR_WOULD_BLOCK; 3205 3206 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3207 3208 qp_unlock(qpair); 3209 3210 return result; 3211 } 3212 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3213 3214 /* 3215 * vmci_qpair_enquev() - Throw data on the queue using iov. 3216 * @qpair: Pointer to the queue pair struct. 3217 * @iov: Pointer to buffer containing data 3218 * @iov_size: Length of buffer. 3219 * @buf_type: Buffer type (Unused). 3220 * 3221 * This is the client interface for enqueueing data into the queue. 3222 * This function uses IO vectors to handle the work. Returns number 3223 * of bytes enqueued or < 0 on error. 3224 */ 3225 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3226 struct msghdr *msg, 3227 size_t iov_size, 3228 int buf_type) 3229 { 3230 ssize_t result; 3231 3232 if (!qpair) 3233 return VMCI_ERROR_INVALID_ARGS; 3234 3235 qp_lock(qpair); 3236 3237 do { 3238 result = qp_enqueue_locked(qpair->produce_q, 3239 qpair->consume_q, 3240 qpair->produce_q_size, 3241 msg, iov_size, 3242 qp_memcpy_to_queue_iov); 3243 3244 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3245 !qp_wait_for_ready_queue(qpair)) 3246 result = VMCI_ERROR_WOULD_BLOCK; 3247 3248 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3249 3250 qp_unlock(qpair); 3251 3252 return result; 3253 } 3254 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3255 3256 /* 3257 * vmci_qpair_dequev() - Get data from the queue using iov. 3258 * @qpair: Pointer to the queue pair struct. 3259 * @iov: Pointer to buffer for the data 3260 * @iov_size: Length of buffer. 3261 * @buf_type: Buffer type (Unused). 3262 * 3263 * This is the client interface for dequeueing data from the queue. 3264 * This function uses IO vectors to handle the work. Returns number 3265 * of bytes dequeued or < 0 on error. 3266 */ 3267 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3268 struct msghdr *msg, 3269 size_t iov_size, 3270 int buf_type) 3271 { 3272 ssize_t result; 3273 3274 if (!qpair) 3275 return VMCI_ERROR_INVALID_ARGS; 3276 3277 qp_lock(qpair); 3278 3279 do { 3280 result = qp_dequeue_locked(qpair->produce_q, 3281 qpair->consume_q, 3282 qpair->consume_q_size, 3283 msg, iov_size, 3284 qp_memcpy_from_queue_iov, 3285 true); 3286 3287 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3288 !qp_wait_for_ready_queue(qpair)) 3289 result = VMCI_ERROR_WOULD_BLOCK; 3290 3291 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3292 3293 qp_unlock(qpair); 3294 3295 return result; 3296 } 3297 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3298 3299 /* 3300 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3301 * @qpair: Pointer to the queue pair struct. 3302 * @iov: Pointer to buffer for the data 3303 * @iov_size: Length of buffer. 3304 * @buf_type: Buffer type (Unused on Linux). 3305 * 3306 * This is the client interface for peeking into a queue. (I.e., 3307 * copy data from the queue without updating the head pointer.) 3308 * This function uses IO vectors to handle the work. Returns number 3309 * of bytes peeked or < 0 on error. 3310 */ 3311 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3312 struct msghdr *msg, 3313 size_t iov_size, 3314 int buf_type) 3315 { 3316 ssize_t result; 3317 3318 if (!qpair) 3319 return VMCI_ERROR_INVALID_ARGS; 3320 3321 qp_lock(qpair); 3322 3323 do { 3324 result = qp_dequeue_locked(qpair->produce_q, 3325 qpair->consume_q, 3326 qpair->consume_q_size, 3327 msg, iov_size, 3328 qp_memcpy_from_queue_iov, 3329 false); 3330 3331 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3332 !qp_wait_for_ready_queue(qpair)) 3333 result = VMCI_ERROR_WOULD_BLOCK; 3334 3335 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3336 3337 qp_unlock(qpair); 3338 return result; 3339 } 3340 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3341