1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2016 VMware, Inc., Palo Alto, CA., USA 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <linux/objtool.h> 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/slab.h> 31 #include <linux/cc_platform.h> 32 33 #include <asm/hypervisor.h> 34 #include <drm/drm_ioctl.h> 35 36 #include "vmwgfx_drv.h" 37 #include "vmwgfx_msg_x86.h" 38 #include "vmwgfx_msg_arm64.h" 39 #include "vmwgfx_mksstat.h" 40 41 #define MESSAGE_STATUS_SUCCESS 0x0001 42 #define MESSAGE_STATUS_DORECV 0x0002 43 #define MESSAGE_STATUS_CPT 0x0010 44 #define MESSAGE_STATUS_HB 0x0080 45 46 #define RPCI_PROTOCOL_NUM 0x49435052 47 #define GUESTMSG_FLAG_COOKIE 0x80000000 48 49 #define RETRIES 3 50 51 #define VMW_HYPERVISOR_MAGIC 0x564D5868 52 53 #define VMW_PORT_CMD_MSG 30 54 #define VMW_PORT_CMD_HB_MSG 0 55 #define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG) 56 #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG) 57 #define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG) 58 #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG) 59 #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG) 60 61 #define VMW_PORT_CMD_MKS_GUEST_STATS 85 62 #define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 63 #define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 64 #define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS) 65 66 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16) 67 68 #define MAX_USER_MSG_LENGTH PAGE_SIZE 69 70 static u32 vmw_msg_enabled = 1; 71 72 enum rpc_msg_type { 73 MSG_TYPE_OPEN, 74 MSG_TYPE_SENDSIZE, 75 MSG_TYPE_SENDPAYLOAD, 76 MSG_TYPE_RECVSIZE, 77 MSG_TYPE_RECVPAYLOAD, 78 MSG_TYPE_RECVSTATUS, 79 MSG_TYPE_CLOSE, 80 }; 81 82 struct rpc_channel { 83 u16 channel_id; 84 u32 cookie_high; 85 u32 cookie_low; 86 }; 87 88 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 89 /* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */ 90 static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] = 91 { 92 { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" }, 93 { "vmw_cotable_resize", "vmw_cotable_resize" }, 94 }; 95 #endif 96 97 /** 98 * vmw_open_channel 99 * 100 * @channel: RPC channel 101 * @protocol: 102 * 103 * Returns: 0 on success 104 */ 105 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol) 106 { 107 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 108 109 VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL, 110 (protocol | GUESTMSG_FLAG_COOKIE), si, di, 111 0, 112 VMW_HYPERVISOR_MAGIC, 113 eax, ebx, ecx, edx, si, di); 114 115 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 116 return -EINVAL; 117 118 channel->channel_id = HIGH_WORD(edx); 119 channel->cookie_high = si; 120 channel->cookie_low = di; 121 122 return 0; 123 } 124 125 126 127 /** 128 * vmw_close_channel 129 * 130 * @channel: RPC channel 131 * 132 * Returns: 0 on success 133 */ 134 static int vmw_close_channel(struct rpc_channel *channel) 135 { 136 unsigned long eax, ebx, ecx, edx, si, di; 137 138 /* Set up additional parameters */ 139 si = channel->cookie_high; 140 di = channel->cookie_low; 141 142 VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL, 143 0, si, di, 144 channel->channel_id << 16, 145 VMW_HYPERVISOR_MAGIC, 146 eax, ebx, ecx, edx, si, di); 147 148 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 149 return -EINVAL; 150 151 return 0; 152 } 153 154 /** 155 * vmw_port_hb_out - Send the message payload either through the 156 * high-bandwidth port if available, or through the backdoor otherwise. 157 * @channel: The rpc channel. 158 * @msg: NULL-terminated message. 159 * @hb: Whether the high-bandwidth port is available. 160 * 161 * Return: The port status. 162 */ 163 static unsigned long vmw_port_hb_out(struct rpc_channel *channel, 164 const char *msg, bool hb) 165 { 166 unsigned long si, di, eax, ebx, ecx, edx; 167 unsigned long msg_len = strlen(msg); 168 169 /* HB port can't access encrypted memory. */ 170 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 171 unsigned long bp = channel->cookie_high; 172 u32 channel_id = (channel->channel_id << 16); 173 174 si = (uintptr_t) msg; 175 di = channel->cookie_low; 176 177 VMW_PORT_HB_OUT( 178 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, 179 msg_len, si, di, 180 VMWARE_HYPERVISOR_HB | channel_id | 181 VMWARE_HYPERVISOR_OUT, 182 VMW_HYPERVISOR_MAGIC, bp, 183 eax, ebx, ecx, edx, si, di); 184 185 return ebx; 186 } 187 188 /* HB port not available. Send the message 4 bytes at a time. */ 189 ecx = MESSAGE_STATUS_SUCCESS << 16; 190 while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { 191 unsigned int bytes = min_t(size_t, msg_len, 4); 192 unsigned long word = 0; 193 194 memcpy(&word, msg, bytes); 195 msg_len -= bytes; 196 msg += bytes; 197 si = channel->cookie_high; 198 di = channel->cookie_low; 199 200 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), 201 word, si, di, 202 channel->channel_id << 16, 203 VMW_HYPERVISOR_MAGIC, 204 eax, ebx, ecx, edx, si, di); 205 } 206 207 return ecx; 208 } 209 210 /** 211 * vmw_port_hb_in - Receive the message payload either through the 212 * high-bandwidth port if available, or through the backdoor otherwise. 213 * @channel: The rpc channel. 214 * @reply: Pointer to buffer holding reply. 215 * @reply_len: Length of the reply. 216 * @hb: Whether the high-bandwidth port is available. 217 * 218 * Return: The port status. 219 */ 220 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, 221 unsigned long reply_len, bool hb) 222 { 223 unsigned long si, di, eax, ebx, ecx, edx; 224 225 /* HB port can't access encrypted memory */ 226 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 227 unsigned long bp = channel->cookie_low; 228 u32 channel_id = (channel->channel_id << 16); 229 230 si = channel->cookie_high; 231 di = (uintptr_t) reply; 232 233 VMW_PORT_HB_IN( 234 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, 235 reply_len, si, di, 236 VMWARE_HYPERVISOR_HB | channel_id, 237 VMW_HYPERVISOR_MAGIC, bp, 238 eax, ebx, ecx, edx, si, di); 239 240 return ebx; 241 } 242 243 /* HB port not available. Retrieve the message 4 bytes at a time. */ 244 ecx = MESSAGE_STATUS_SUCCESS << 16; 245 while (reply_len) { 246 unsigned int bytes = min_t(unsigned long, reply_len, 4); 247 248 si = channel->cookie_high; 249 di = channel->cookie_low; 250 251 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), 252 MESSAGE_STATUS_SUCCESS, si, di, 253 channel->channel_id << 16, 254 VMW_HYPERVISOR_MAGIC, 255 eax, ebx, ecx, edx, si, di); 256 257 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) 258 break; 259 260 memcpy(reply, &ebx, bytes); 261 reply_len -= bytes; 262 reply += bytes; 263 } 264 265 return ecx; 266 } 267 268 269 /** 270 * vmw_send_msg: Sends a message to the host 271 * 272 * @channel: RPC channel 273 * @msg: NULL terminated string 274 * 275 * Returns: 0 on success 276 */ 277 static int vmw_send_msg(struct rpc_channel *channel, const char *msg) 278 { 279 unsigned long eax, ebx, ecx, edx, si, di; 280 size_t msg_len = strlen(msg); 281 int retries = 0; 282 283 while (retries < RETRIES) { 284 retries++; 285 286 /* Set up additional parameters */ 287 si = channel->cookie_high; 288 di = channel->cookie_low; 289 290 VMW_PORT(VMW_PORT_CMD_SENDSIZE, 291 msg_len, si, di, 292 channel->channel_id << 16, 293 VMW_HYPERVISOR_MAGIC, 294 eax, ebx, ecx, edx, si, di); 295 296 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 297 /* Expected success. Give up. */ 298 return -EINVAL; 299 } 300 301 /* Send msg */ 302 ebx = vmw_port_hb_out(channel, msg, 303 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 304 305 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { 306 return 0; 307 } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 308 /* A checkpoint occurred. Retry. */ 309 continue; 310 } else { 311 break; 312 } 313 } 314 315 return -EINVAL; 316 } 317 STACK_FRAME_NON_STANDARD(vmw_send_msg); 318 319 320 /** 321 * vmw_recv_msg: Receives a message from the host 322 * 323 * Note: It is the caller's responsibility to call kfree() on msg. 324 * 325 * @channel: channel opened by vmw_open_channel 326 * @msg: [OUT] message received from the host 327 * @msg_len: message length 328 */ 329 static int vmw_recv_msg(struct rpc_channel *channel, void **msg, 330 size_t *msg_len) 331 { 332 unsigned long eax, ebx, ecx, edx, si, di; 333 char *reply; 334 size_t reply_len; 335 int retries = 0; 336 337 338 *msg_len = 0; 339 *msg = NULL; 340 341 while (retries < RETRIES) { 342 retries++; 343 344 /* Set up additional parameters */ 345 si = channel->cookie_high; 346 di = channel->cookie_low; 347 348 VMW_PORT(VMW_PORT_CMD_RECVSIZE, 349 0, si, di, 350 channel->channel_id << 16, 351 VMW_HYPERVISOR_MAGIC, 352 eax, ebx, ecx, edx, si, di); 353 354 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 355 DRM_ERROR("Failed to get reply size for host message.\n"); 356 return -EINVAL; 357 } 358 359 /* No reply available. This is okay. */ 360 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0) 361 return 0; 362 363 reply_len = ebx; 364 reply = kzalloc(reply_len + 1, GFP_KERNEL); 365 if (!reply) { 366 DRM_ERROR("Cannot allocate memory for host message reply.\n"); 367 return -ENOMEM; 368 } 369 370 371 /* Receive buffer */ 372 ebx = vmw_port_hb_in(channel, reply, reply_len, 373 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 374 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { 375 kfree(reply); 376 reply = NULL; 377 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 378 /* A checkpoint occurred. Retry. */ 379 continue; 380 } 381 382 return -EINVAL; 383 } 384 385 reply[reply_len] = '\0'; 386 387 388 /* Ack buffer */ 389 si = channel->cookie_high; 390 di = channel->cookie_low; 391 392 VMW_PORT(VMW_PORT_CMD_RECVSTATUS, 393 MESSAGE_STATUS_SUCCESS, si, di, 394 channel->channel_id << 16, 395 VMW_HYPERVISOR_MAGIC, 396 eax, ebx, ecx, edx, si, di); 397 398 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 399 kfree(reply); 400 reply = NULL; 401 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { 402 /* A checkpoint occurred. Retry. */ 403 continue; 404 } 405 406 return -EINVAL; 407 } 408 409 break; 410 } 411 412 if (!reply) 413 return -EINVAL; 414 415 *msg_len = reply_len; 416 *msg = reply; 417 418 return 0; 419 } 420 STACK_FRAME_NON_STANDARD(vmw_recv_msg); 421 422 423 /** 424 * vmw_host_get_guestinfo: Gets a GuestInfo parameter 425 * 426 * Gets the value of a GuestInfo.* parameter. The value returned will be in 427 * a string, and it is up to the caller to post-process. 428 * 429 * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3 430 * @buffer: if NULL, *reply_len will contain reply size. 431 * @length: size of the reply_buf. Set to size of reply upon return 432 * 433 * Returns: 0 on success 434 */ 435 int vmw_host_get_guestinfo(const char *guest_info_param, 436 char *buffer, size_t *length) 437 { 438 struct rpc_channel channel; 439 char *msg, *reply = NULL; 440 size_t reply_len = 0; 441 442 if (!vmw_msg_enabled) 443 return -ENODEV; 444 445 if (!guest_info_param || !length) 446 return -EINVAL; 447 448 msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); 449 if (!msg) { 450 DRM_ERROR("Cannot allocate memory to get guest info \"%s\".", 451 guest_info_param); 452 return -ENOMEM; 453 } 454 455 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) 456 goto out_open; 457 458 if (vmw_send_msg(&channel, msg) || 459 vmw_recv_msg(&channel, (void *) &reply, &reply_len)) 460 goto out_msg; 461 462 vmw_close_channel(&channel); 463 if (buffer && reply && reply_len > 0) { 464 /* Remove reply code, which are the first 2 characters of 465 * the reply 466 */ 467 reply_len = max(reply_len - 2, (size_t) 0); 468 reply_len = min(reply_len, *length); 469 470 if (reply_len > 0) 471 memcpy(buffer, reply + 2, reply_len); 472 } 473 474 *length = reply_len; 475 476 kfree(reply); 477 kfree(msg); 478 479 return 0; 480 481 out_msg: 482 vmw_close_channel(&channel); 483 kfree(reply); 484 out_open: 485 *length = 0; 486 kfree(msg); 487 DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param); 488 489 return -EINVAL; 490 } 491 492 493 /** 494 * vmw_host_printf: Sends a log message to the host 495 * 496 * @fmt: Regular printf format string and arguments 497 * 498 * Returns: 0 on success 499 */ 500 __printf(1, 2) 501 int vmw_host_printf(const char *fmt, ...) 502 { 503 va_list ap; 504 struct rpc_channel channel; 505 char *msg; 506 char *log; 507 int ret = 0; 508 509 if (!vmw_msg_enabled) 510 return -ENODEV; 511 512 if (!fmt) 513 return ret; 514 515 va_start(ap, fmt); 516 log = kvasprintf(GFP_KERNEL, fmt, ap); 517 va_end(ap); 518 if (!log) { 519 DRM_ERROR("Cannot allocate memory for the log message.\n"); 520 return -ENOMEM; 521 } 522 523 msg = kasprintf(GFP_KERNEL, "log %s", log); 524 if (!msg) { 525 DRM_ERROR("Cannot allocate memory for host log message.\n"); 526 kfree(log); 527 return -ENOMEM; 528 } 529 530 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) 531 goto out_open; 532 533 if (vmw_send_msg(&channel, msg)) 534 goto out_msg; 535 536 vmw_close_channel(&channel); 537 kfree(msg); 538 kfree(log); 539 540 return 0; 541 542 out_msg: 543 vmw_close_channel(&channel); 544 out_open: 545 kfree(msg); 546 kfree(log); 547 DRM_ERROR("Failed to send host log message.\n"); 548 549 return -EINVAL; 550 } 551 552 553 /** 554 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space 555 * 556 * Sends a message from user-space to host. 557 * Can also receive a result from host and return that to user-space. 558 * 559 * @dev: Identifies the drm device. 560 * @data: Pointer to the ioctl argument. 561 * @file_priv: Identifies the caller. 562 * Return: Zero on success, negative error code on error. 563 */ 564 565 int vmw_msg_ioctl(struct drm_device *dev, void *data, 566 struct drm_file *file_priv) 567 { 568 struct drm_vmw_msg_arg *arg = 569 (struct drm_vmw_msg_arg *)data; 570 struct rpc_channel channel; 571 char *msg; 572 int length; 573 574 msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL); 575 if (!msg) { 576 DRM_ERROR("Cannot allocate memory for log message.\n"); 577 return -ENOMEM; 578 } 579 580 length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send), 581 MAX_USER_MSG_LENGTH); 582 if (length < 0 || length >= MAX_USER_MSG_LENGTH) { 583 DRM_ERROR("Userspace message access failure.\n"); 584 kfree(msg); 585 return -EINVAL; 586 } 587 588 589 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) { 590 DRM_ERROR("Failed to open channel.\n"); 591 goto out_open; 592 } 593 594 if (vmw_send_msg(&channel, msg)) { 595 DRM_ERROR("Failed to send message to host.\n"); 596 goto out_msg; 597 } 598 599 if (!arg->send_only) { 600 char *reply = NULL; 601 size_t reply_len = 0; 602 603 if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) { 604 DRM_ERROR("Failed to receive message from host.\n"); 605 goto out_msg; 606 } 607 if (reply && reply_len > 0) { 608 if (copy_to_user((void __user *)((unsigned long)arg->receive), 609 reply, reply_len)) { 610 DRM_ERROR("Failed to copy message to userspace.\n"); 611 kfree(reply); 612 goto out_msg; 613 } 614 arg->receive_len = (__u32)reply_len; 615 } 616 kfree(reply); 617 } 618 619 vmw_close_channel(&channel); 620 kfree(msg); 621 622 return 0; 623 624 out_msg: 625 vmw_close_channel(&channel); 626 out_open: 627 kfree(msg); 628 629 return -EINVAL; 630 } 631 632 /** 633 * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content 634 * 635 * @arr: Array to reset. 636 * @size: Array length. 637 */ 638 static inline void reset_ppn_array(PPN64 *arr, size_t size) 639 { 640 size_t i; 641 642 BUG_ON(!arr || size == 0); 643 644 for (i = 0; i < size; ++i) 645 arr[i] = INVALID_PPN64; 646 } 647 648 /** 649 * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from 650 * the hypervisor. All related pages should be subsequently unpinned or freed. 651 * 652 */ 653 static inline void hypervisor_ppn_reset_all(void) 654 { 655 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 656 657 VMW_PORT(VMW_PORT_CMD_MKSGS_RESET, 658 0, si, di, 659 0, 660 VMW_HYPERVISOR_MAGIC, 661 eax, ebx, ecx, edx, si, di); 662 } 663 664 /** 665 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the 666 * hypervisor. Any related userspace pages should be pinned in advance. 667 * 668 * @pfn: Physical page number of the instance descriptor 669 */ 670 static inline void hypervisor_ppn_add(PPN64 pfn) 671 { 672 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 673 674 VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN, 675 (unsigned long)pfn, si, di, 676 0, 677 VMW_HYPERVISOR_MAGIC, 678 eax, ebx, ecx, edx, si, di); 679 } 680 681 /** 682 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from 683 * the hypervisor. All related pages should be subsequently unpinned or freed. 684 * 685 * @pfn: Physical page number of the instance descriptor 686 */ 687 static inline void hypervisor_ppn_remove(PPN64 pfn) 688 { 689 unsigned long eax, ebx, ecx, edx, si = 0, di = 0; 690 691 VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN, 692 (unsigned long)pfn, si, di, 693 0, 694 VMW_HYPERVISOR_MAGIC, 695 eax, ebx, ecx, edx, si, di); 696 } 697 698 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 699 700 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */ 701 #define MKSSTAT_KERNEL_PAGES_ORDER 2 702 /* Header to the text description of mksGuestStat instance descriptor */ 703 #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx" 704 705 /** 706 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record 707 * for the respective mksGuestStat index. 708 * 709 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record. 710 * @pstat: Pointer to array of MKSGuestStatCounterTime. 711 * @pinfo: Pointer to array of MKSGuestStatInfoEntry. 712 * @pstrs: Pointer to current end of the name/description sequence. 713 * Return: Pointer to the new end of the names/description sequence. 714 */ 715 716 static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx, 717 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs) 718 { 719 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1; 720 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]); 721 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]); 722 723 pinfo[stat_idx].name.s = pstrs; 724 pinfo[stat_idx].description.s = pstrd; 725 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME; 726 pinfo[stat_idx].stat.counterTime = &pstat[stat_idx]; 727 728 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1; 729 } 730 731 /** 732 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and 733 * kernel-internal counters. Adds PFN mapping to the hypervisor. 734 * 735 * Create a single mksGuestStat instance descriptor and corresponding structures 736 * for all kernel-internal counters. The corresponding PFNs are mapped with the 737 * hypervisor. 738 * 739 * @ppage: Output pointer to page containing the instance descriptor. 740 * Return: Zero on success, negative error code on error. 741 */ 742 743 static int mksstat_init_kern_id(struct page **ppage) 744 { 745 MKSGuestStatInstanceDescriptor *pdesc; 746 MKSGuestStatCounterTime *pstat; 747 MKSGuestStatInfoEntry *pinfo; 748 char *pstrs, *pstrs_acc; 749 750 /* Allocate pages for the kernel-internal instance descriptor */ 751 struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER); 752 753 if (!page) 754 return -ENOMEM; 755 756 pdesc = page_address(page); 757 pstat = vmw_mksstat_get_kern_pstat(pdesc); 758 pinfo = vmw_mksstat_get_kern_pinfo(pdesc); 759 pstrs = vmw_mksstat_get_kern_pstrs(pdesc); 760 761 /* Set up all kernel-internal counters and corresponding structures */ 762 pstrs_acc = pstrs; 763 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc); 764 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc); 765 766 /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */ 767 768 BUG_ON(pstrs_acc - pstrs > PAGE_SIZE); 769 770 /* Set up the kernel-internal instance descriptor */ 771 pdesc->reservedMBZ = 0; 772 pdesc->statStartVA = (uintptr_t)pstat; 773 pdesc->strsStartVA = (uintptr_t)pstrs; 774 pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT; 775 pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT; 776 pdesc->strsLength = pstrs_acc - pstrs; 777 snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d", 778 MKSSTAT_KERNEL_DESCRIPTION, current->pid); 779 780 pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat)); 781 reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1); 782 783 pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo)); 784 reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1); 785 786 pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs)); 787 reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1); 788 789 *ppage = page; 790 791 hypervisor_ppn_add((PPN64)page_to_pfn(page)); 792 793 return 0; 794 } 795 796 /** 797 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal 798 * mksGuestStat instance descriptor. 799 * 800 * Find a slot for a single kernel-internal mksGuestStat instance descriptor. 801 * In case no such was already present, allocate a new one and set up a kernel- 802 * internal mksGuestStat instance descriptor for the former. 803 * 804 * @pid: Process for which a slot is sought. 805 * @dev_priv: Identifies the drm private device. 806 * Return: Non-negative slot on success, negative error code on error. 807 */ 808 809 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv) 810 { 811 const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2); 812 size_t i; 813 814 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { 815 const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids); 816 817 /* Check if an instance descriptor for this pid is already present */ 818 if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot])) 819 return (int)slot; 820 821 /* Set up a new instance descriptor for this pid */ 822 if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) { 823 const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]); 824 825 if (!ret) { 826 /* Reset top-timer tracking for this slot */ 827 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT; 828 829 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid); 830 return (int)slot; 831 } 832 833 atomic_set(&dev_priv->mksstat_kern_pids[slot], 0); 834 return ret; 835 } 836 } 837 838 return -ENOSPC; 839 } 840 841 #endif 842 843 /** 844 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating 845 * mksGuestStat instance-descriptor page and unpins all related user pages. 846 * 847 * Unpin all user pages realated to this instance descriptor and free 848 * the instance-descriptor page itself. 849 * 850 * @page: Page of the instance descriptor. 851 */ 852 853 static void vmw_mksstat_cleanup_descriptor(struct page *page) 854 { 855 MKSGuestStatInstanceDescriptor *pdesc = page_address(page); 856 size_t i; 857 858 for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i) 859 unpin_user_page(pfn_to_page(pdesc->statPPNs[i])); 860 861 for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i) 862 unpin_user_page(pfn_to_page(pdesc->infoPPNs[i])); 863 864 for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i) 865 unpin_user_page(pfn_to_page(pdesc->strsPPNs[i])); 866 867 __free_page(page); 868 } 869 870 /** 871 * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors 872 * from the hypervisor. 873 * 874 * Discard all hypervisor PFN mappings, containing active mksGuestState instance 875 * descriptors, unpin the related userspace pages and free the related kernel pages. 876 * 877 * @dev_priv: Identifies the drm private device. 878 * Return: Zero on success, negative error code on error. 879 */ 880 881 int vmw_mksstat_remove_all(struct vmw_private *dev_priv) 882 { 883 int ret = 0; 884 size_t i; 885 886 /* Discard all PFN mappings with the hypervisor */ 887 hypervisor_ppn_reset_all(); 888 889 /* Discard all userspace-originating instance descriptors and unpin all related pages */ 890 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) { 891 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]); 892 893 if (!pid0) 894 continue; 895 896 if (pid0 != MKSSTAT_PID_RESERVED) { 897 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED); 898 899 if (!pid1) 900 continue; 901 902 if (pid1 == pid0) { 903 struct page *const page = dev_priv->mksstat_user_pages[i]; 904 905 BUG_ON(!page); 906 907 dev_priv->mksstat_user_pages[i] = NULL; 908 atomic_set(&dev_priv->mksstat_user_pids[i], 0); 909 910 vmw_mksstat_cleanup_descriptor(page); 911 continue; 912 } 913 } 914 915 ret = -EAGAIN; 916 } 917 918 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) 919 /* Discard all kernel-internal instance descriptors and free all related pages */ 920 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { 921 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]); 922 923 if (!pid0) 924 continue; 925 926 if (pid0 != MKSSTAT_PID_RESERVED) { 927 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED); 928 929 if (!pid1) 930 continue; 931 932 if (pid1 == pid0) { 933 struct page *const page = dev_priv->mksstat_kern_pages[i]; 934 935 BUG_ON(!page); 936 937 dev_priv->mksstat_kern_pages[i] = NULL; 938 atomic_set(&dev_priv->mksstat_kern_pids[i], 0); 939 940 __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER); 941 continue; 942 } 943 } 944 945 ret = -EAGAIN; 946 } 947 948 #endif 949 return ret; 950 } 951 952 /** 953 * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors 954 * from the hypervisor. 955 * 956 * Discard all hypervisor PFN mappings, containing active mksGuestStat instance 957 * descriptors, unpin the related userspace pages and free the related kernel pages. 958 * 959 * @dev: Identifies the drm device. 960 * @data: Pointer to the ioctl argument. 961 * @file_priv: Identifies the caller; unused. 962 * Return: Zero on success, negative error code on error. 963 */ 964 965 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, 966 struct drm_file *file_priv) 967 { 968 struct vmw_private *const dev_priv = vmw_priv(dev); 969 return vmw_mksstat_remove_all(dev_priv); 970 } 971 972 /** 973 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat 974 * instance descriptor and registers that with the hypervisor. 975 * 976 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance 977 * descriptor and pin the corresponding userspace pages. 978 * 979 * @dev: Identifies the drm device. 980 * @data: Pointer to the ioctl argument. 981 * @file_priv: Identifies the caller; unused. 982 * Return: Zero on success, negative error code on error. 983 */ 984 985 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, 986 struct drm_file *file_priv) 987 { 988 struct drm_vmw_mksstat_add_arg *arg = 989 (struct drm_vmw_mksstat_add_arg *) data; 990 991 struct vmw_private *const dev_priv = vmw_priv(dev); 992 993 const size_t num_pages_stat = PFN_UP(arg->stat_len); 994 const size_t num_pages_info = PFN_UP(arg->info_len); 995 const size_t num_pages_strs = PFN_UP(arg->strs_len); 996 long desc_len; 997 long nr_pinned_stat; 998 long nr_pinned_info; 999 long nr_pinned_strs; 1000 MKSGuestStatInstanceDescriptor *pdesc; 1001 struct page *page = NULL; 1002 struct page **pages_stat = NULL; 1003 struct page **pages_info = NULL; 1004 struct page **pages_strs = NULL; 1005 size_t i, slot; 1006 int ret_err = -ENOMEM; 1007 1008 arg->id = -1; 1009 1010 if (!arg->stat || !arg->info || !arg->strs) 1011 return -EINVAL; 1012 1013 if (!arg->stat_len || !arg->info_len || !arg->strs_len) 1014 return -EINVAL; 1015 1016 if (!arg->description) 1017 return -EINVAL; 1018 1019 if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) || 1020 num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) || 1021 num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs)) 1022 return -EINVAL; 1023 1024 /* Find an available slot in the mksGuestStats user array and reserve it */ 1025 for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot) 1026 if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED)) 1027 break; 1028 1029 if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids)) 1030 return -ENOSPC; 1031 1032 BUG_ON(dev_priv->mksstat_user_pages[slot]); 1033 1034 /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */ 1035 pages_stat = (struct page **)kmalloc_array( 1036 ARRAY_SIZE(pdesc->statPPNs) + 1037 ARRAY_SIZE(pdesc->infoPPNs) + 1038 ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL); 1039 1040 if (!pages_stat) 1041 goto err_nomem; 1042 1043 pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs); 1044 pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs); 1045 1046 /* Allocate a page for the instance descriptor */ 1047 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1048 1049 if (!page) 1050 goto err_nomem; 1051 1052 /* Set up the instance descriptor */ 1053 pdesc = page_address(page); 1054 1055 pdesc->reservedMBZ = 0; 1056 pdesc->statStartVA = arg->stat; 1057 pdesc->strsStartVA = arg->strs; 1058 pdesc->statLength = arg->stat_len; 1059 pdesc->infoLength = arg->info_len; 1060 pdesc->strsLength = arg->strs_len; 1061 desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description), 1062 ARRAY_SIZE(pdesc->description) - 1); 1063 1064 if (desc_len < 0) { 1065 ret_err = -EFAULT; 1066 goto err_nomem; 1067 } 1068 1069 reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs)); 1070 reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs)); 1071 reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs)); 1072 1073 /* Pin mksGuestStat user pages and store those in the instance descriptor */ 1074 nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat); 1075 if (num_pages_stat != nr_pinned_stat) 1076 goto err_pin_stat; 1077 1078 for (i = 0; i < num_pages_stat; ++i) 1079 pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]); 1080 1081 nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info); 1082 if (num_pages_info != nr_pinned_info) 1083 goto err_pin_info; 1084 1085 for (i = 0; i < num_pages_info; ++i) 1086 pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]); 1087 1088 nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs); 1089 if (num_pages_strs != nr_pinned_strs) 1090 goto err_pin_strs; 1091 1092 for (i = 0; i < num_pages_strs; ++i) 1093 pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]); 1094 1095 /* Send the descriptor to the host via a hypervisor call. The mksGuestStat 1096 pages will remain in use until the user requests a matching remove stats 1097 or a stats reset occurs. */ 1098 hypervisor_ppn_add((PPN64)page_to_pfn(page)); 1099 1100 dev_priv->mksstat_user_pages[slot] = page; 1101 atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current)); 1102 1103 arg->id = slot; 1104 1105 DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot); 1106 1107 kfree(pages_stat); 1108 return 0; 1109 1110 err_pin_strs: 1111 if (nr_pinned_strs > 0) 1112 unpin_user_pages(pages_strs, nr_pinned_strs); 1113 1114 err_pin_info: 1115 if (nr_pinned_info > 0) 1116 unpin_user_pages(pages_info, nr_pinned_info); 1117 1118 err_pin_stat: 1119 if (nr_pinned_stat > 0) 1120 unpin_user_pages(pages_stat, nr_pinned_stat); 1121 1122 err_nomem: 1123 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); 1124 if (page) 1125 __free_page(page); 1126 kfree(pages_stat); 1127 1128 return ret_err; 1129 } 1130 1131 /** 1132 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat 1133 * instance descriptor from the hypervisor. 1134 * 1135 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance 1136 * descriptor and unpin the corresponding userspace pages. 1137 * 1138 * @dev: Identifies the drm device. 1139 * @data: Pointer to the ioctl argument. 1140 * @file_priv: Identifies the caller; unused. 1141 * Return: Zero on success, negative error code on error. 1142 */ 1143 1144 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, 1145 struct drm_file *file_priv) 1146 { 1147 struct drm_vmw_mksstat_remove_arg *arg = 1148 (struct drm_vmw_mksstat_remove_arg *) data; 1149 1150 struct vmw_private *const dev_priv = vmw_priv(dev); 1151 1152 const size_t slot = arg->id; 1153 pid_t pgid, pid; 1154 1155 if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids)) 1156 return -EINVAL; 1157 1158 DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot); 1159 1160 pgid = task_pgrp_vnr(current); 1161 pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED); 1162 1163 if (!pid) 1164 return 0; 1165 1166 if (pid == pgid) { 1167 struct page *const page = dev_priv->mksstat_user_pages[slot]; 1168 1169 BUG_ON(!page); 1170 1171 dev_priv->mksstat_user_pages[slot] = NULL; 1172 atomic_set(&dev_priv->mksstat_user_pids[slot], 0); 1173 1174 hypervisor_ppn_remove((PPN64)page_to_pfn(page)); 1175 1176 vmw_mksstat_cleanup_descriptor(page); 1177 return 0; 1178 } 1179 1180 return -EAGAIN; 1181 } 1182 1183 /** 1184 * vmw_disable_backdoor: Disables all backdoor communication 1185 * with the hypervisor. 1186 */ 1187 void vmw_disable_backdoor(void) 1188 { 1189 vmw_msg_enabled = 0; 1190 } 1191