xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <linux/objtool.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/cc_platform.h>
32 
33 #include <asm/hypervisor.h>
34 #include <drm/drm_ioctl.h>
35 
36 #include "vmwgfx_drv.h"
37 #include "vmwgfx_msg_x86.h"
38 #include "vmwgfx_msg_arm64.h"
39 #include "vmwgfx_mksstat.h"
40 
41 #define MESSAGE_STATUS_SUCCESS  0x0001
42 #define MESSAGE_STATUS_DORECV   0x0002
43 #define MESSAGE_STATUS_CPT      0x0010
44 #define MESSAGE_STATUS_HB       0x0080
45 
46 #define RPCI_PROTOCOL_NUM       0x49435052
47 #define GUESTMSG_FLAG_COOKIE    0x80000000
48 
49 #define RETRIES                 3
50 
51 #define VMW_PORT_CMD_MSG        30
52 #define VMW_PORT_CMD_HB_MSG     0
53 #define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
54 #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
55 #define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
56 #define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
57 #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
58 
59 #define VMW_PORT_CMD_MKS_GUEST_STATS   85
60 #define VMW_PORT_CMD_MKSGS_RESET       (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
61 #define VMW_PORT_CMD_MKSGS_ADD_PPN     (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
62 #define VMW_PORT_CMD_MKSGS_REMOVE_PPN  (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
63 
64 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
65 
66 #define MAX_USER_MSG_LENGTH	PAGE_SIZE
67 
68 static u32 vmw_msg_enabled = 1;
69 
70 enum rpc_msg_type {
71 	MSG_TYPE_OPEN,
72 	MSG_TYPE_SENDSIZE,
73 	MSG_TYPE_SENDPAYLOAD,
74 	MSG_TYPE_RECVSIZE,
75 	MSG_TYPE_RECVPAYLOAD,
76 	MSG_TYPE_RECVSTATUS,
77 	MSG_TYPE_CLOSE,
78 };
79 
80 struct rpc_channel {
81 	u16 channel_id;
82 	u32 cookie_high;
83 	u32 cookie_low;
84 };
85 
86 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
87 /* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
88 static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
89 {
90 	{ "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
91 	{ "vmw_cotable_resize", "vmw_cotable_resize" },
92 };
93 #endif
94 
95 /**
96  * vmw_open_channel
97  *
98  * @channel: RPC channel
99  * @protocol:
100  *
101  * Returns: 0 on success
102  */
vmw_open_channel(struct rpc_channel * channel,unsigned int protocol)103 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
104 {
105 	u32 ecx, edx, esi, edi;
106 
107 	vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL,
108 			  (protocol | GUESTMSG_FLAG_COOKIE), 0,
109 			  &ecx, &edx, &esi, &edi);
110 
111 	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
112 		return -EINVAL;
113 
114 	channel->channel_id  = HIGH_WORD(edx);
115 	channel->cookie_high = esi;
116 	channel->cookie_low  = edi;
117 
118 	return 0;
119 }
120 
121 
122 
123 /**
124  * vmw_close_channel
125  *
126  * @channel: RPC channel
127  *
128  * Returns: 0 on success
129  */
vmw_close_channel(struct rpc_channel * channel)130 static int vmw_close_channel(struct rpc_channel *channel)
131 {
132 	u32 ecx;
133 
134 	vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL,
135 			  0, channel->channel_id << 16,
136 			  channel->cookie_high,
137 			  channel->cookie_low,
138 			  &ecx);
139 
140 	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
141 		return -EINVAL;
142 
143 	return 0;
144 }
145 
146 /**
147  * vmw_port_hb_out - Send the message payload either through the
148  * high-bandwidth port if available, or through the backdoor otherwise.
149  * @channel: The rpc channel.
150  * @msg: NULL-terminated message.
151  * @hb: Whether the high-bandwidth port is available.
152  *
153  * Return: The port status.
154  */
vmw_port_hb_out(struct rpc_channel * channel,const char * msg,bool hb)155 static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
156 				     const char *msg, bool hb)
157 {
158 	u32 ebx, ecx;
159 	unsigned long msg_len = strlen(msg);
160 
161 	/* HB port can't access encrypted memory. */
162 	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
163 		vmware_hypercall_hb_out(
164 			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
165 			msg_len,
166 			channel->channel_id << 16,
167 			(uintptr_t) msg, channel->cookie_low,
168 			channel->cookie_high,
169 			&ebx);
170 
171 		return ebx;
172 	}
173 
174 	/* HB port not available. Send the message 4 bytes at a time. */
175 	ecx = MESSAGE_STATUS_SUCCESS << 16;
176 	while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
177 		unsigned int bytes = min_t(size_t, msg_len, 4);
178 		unsigned long word = 0;
179 
180 		memcpy(&word, msg, bytes);
181 		msg_len -= bytes;
182 		msg += bytes;
183 
184 		vmware_hypercall5(VMW_PORT_CMD_MSG |
185 				  (MSG_TYPE_SENDPAYLOAD << 16),
186 				  word, channel->channel_id << 16,
187 				  channel->cookie_high,
188 				  channel->cookie_low,
189 				  &ecx);
190 	}
191 
192 	return ecx;
193 }
194 
195 /**
196  * vmw_port_hb_in - Receive the message payload either through the
197  * high-bandwidth port if available, or through the backdoor otherwise.
198  * @channel: The rpc channel.
199  * @reply: Pointer to buffer holding reply.
200  * @reply_len: Length of the reply.
201  * @hb: Whether the high-bandwidth port is available.
202  *
203  * Return: The port status.
204  */
vmw_port_hb_in(struct rpc_channel * channel,char * reply,unsigned long reply_len,bool hb)205 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
206 				    unsigned long reply_len, bool hb)
207 {
208 	u32 ebx, ecx, edx;
209 
210 	/* HB port can't access encrypted memory */
211 	if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
212 		vmware_hypercall_hb_in(
213 			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
214 			reply_len,
215 			channel->channel_id << 16,
216 			channel->cookie_high,
217 			(uintptr_t) reply, channel->cookie_low,
218 			&ebx);
219 
220 		return ebx;
221 	}
222 
223 	/* HB port not available. Retrieve the message 4 bytes at a time. */
224 	ecx = MESSAGE_STATUS_SUCCESS << 16;
225 	while (reply_len) {
226 		unsigned int bytes = min_t(unsigned long, reply_len, 4);
227 
228 		vmware_hypercall7(VMW_PORT_CMD_MSG |
229 				  (MSG_TYPE_RECVPAYLOAD << 16),
230 				  MESSAGE_STATUS_SUCCESS,
231 				  channel->channel_id << 16,
232 				  channel->cookie_high,
233 				  channel->cookie_low,
234 				  &ebx, &ecx, &edx);
235 
236 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
237 			break;
238 
239 		memcpy(reply, &ebx, bytes);
240 		reply_len -= bytes;
241 		reply += bytes;
242 	}
243 
244 	return ecx;
245 }
246 
247 
248 /**
249  * vmw_send_msg: Sends a message to the host
250  *
251  * @channel: RPC channel
252  * @msg: NULL terminated string
253  *
254  * Returns: 0 on success
255  */
vmw_send_msg(struct rpc_channel * channel,const char * msg)256 static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
257 {
258 	u32 ebx, ecx;
259 	size_t msg_len = strlen(msg);
260 	int retries = 0;
261 
262 	while (retries < RETRIES) {
263 		retries++;
264 
265 		vmware_hypercall5(VMW_PORT_CMD_SENDSIZE,
266 				  msg_len, channel->channel_id << 16,
267 				  channel->cookie_high,
268 				  channel->cookie_low,
269 				  &ecx);
270 
271 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
272 			/* Expected success. Give up. */
273 			return -EINVAL;
274 		}
275 
276 		/* Send msg */
277 		ebx = vmw_port_hb_out(channel, msg,
278 				      !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
279 
280 		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
281 			return 0;
282 		} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
283 			/* A checkpoint occurred. Retry. */
284 			continue;
285 		} else {
286 			break;
287 		}
288 	}
289 
290 	return -EINVAL;
291 }
292 STACK_FRAME_NON_STANDARD(vmw_send_msg);
293 
294 
295 /**
296  * vmw_recv_msg: Receives a message from the host
297  *
298  * Note:  It is the caller's responsibility to call kfree() on msg.
299  *
300  * @channel:  channel opened by vmw_open_channel
301  * @msg:  [OUT] message received from the host
302  * @msg_len: message length
303  */
vmw_recv_msg(struct rpc_channel * channel,void ** msg,size_t * msg_len)304 static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
305 			size_t *msg_len)
306 {
307 	u32 ebx, ecx, edx;
308 	char *reply;
309 	size_t reply_len;
310 	int retries = 0;
311 
312 
313 	*msg_len = 0;
314 	*msg = NULL;
315 
316 	while (retries < RETRIES) {
317 		retries++;
318 
319 		vmware_hypercall7(VMW_PORT_CMD_RECVSIZE,
320 				  0, channel->channel_id << 16,
321 				  channel->cookie_high,
322 				  channel->cookie_low,
323 				  &ebx, &ecx, &edx);
324 
325 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
326 			DRM_ERROR("Failed to get reply size for host message.\n");
327 			return -EINVAL;
328 		}
329 
330 		/* No reply available.  This is okay. */
331 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
332 			return 0;
333 
334 		reply_len = ebx;
335 		reply     = kzalloc(reply_len + 1, GFP_KERNEL);
336 		if (!reply) {
337 			DRM_ERROR("Cannot allocate memory for host message reply.\n");
338 			return -ENOMEM;
339 		}
340 
341 
342 		/* Receive buffer */
343 		ebx = vmw_port_hb_in(channel, reply, reply_len,
344 				     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
345 		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
346 			kfree(reply);
347 			reply = NULL;
348 			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
349 				/* A checkpoint occurred. Retry. */
350 				continue;
351 			}
352 
353 			return -EINVAL;
354 		}
355 
356 		reply[reply_len] = '\0';
357 
358 		vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS,
359 				  MESSAGE_STATUS_SUCCESS,
360 				  channel->channel_id << 16,
361 				  channel->cookie_high,
362 				  channel->cookie_low,
363 				  &ecx);
364 
365 		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
366 			kfree(reply);
367 			reply = NULL;
368 			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
369 				/* A checkpoint occurred. Retry. */
370 				continue;
371 			}
372 
373 			return -EINVAL;
374 		}
375 
376 		break;
377 	}
378 
379 	if (!reply)
380 		return -EINVAL;
381 
382 	*msg_len = reply_len;
383 	*msg     = reply;
384 
385 	return 0;
386 }
387 STACK_FRAME_NON_STANDARD(vmw_recv_msg);
388 
389 
390 /**
391  * vmw_host_get_guestinfo: Gets a GuestInfo parameter
392  *
393  * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
394  * a string, and it is up to the caller to post-process.
395  *
396  * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
397  * @buffer: if NULL, *reply_len will contain reply size.
398  * @length: size of the reply_buf.  Set to size of reply upon return
399  *
400  * Returns: 0 on success
401  */
vmw_host_get_guestinfo(const char * guest_info_param,char * buffer,size_t * length)402 int vmw_host_get_guestinfo(const char *guest_info_param,
403 			   char *buffer, size_t *length)
404 {
405 	struct rpc_channel channel;
406 	char *msg, *reply = NULL;
407 	size_t reply_len = 0;
408 
409 	if (!vmw_msg_enabled)
410 		return -ENODEV;
411 
412 	if (!guest_info_param || !length)
413 		return -EINVAL;
414 
415 	msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
416 	if (!msg) {
417 		DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
418 			  guest_info_param);
419 		return -ENOMEM;
420 	}
421 
422 	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
423 		goto out_open;
424 
425 	if (vmw_send_msg(&channel, msg) ||
426 	    vmw_recv_msg(&channel, (void *) &reply, &reply_len))
427 		goto out_msg;
428 
429 	vmw_close_channel(&channel);
430 	if (buffer && reply && reply_len > 0) {
431 		/* Remove reply code, which are the first 2 characters of
432 		 * the reply
433 		 */
434 		reply_len = max(reply_len - 2, (size_t) 0);
435 		reply_len = min(reply_len, *length);
436 
437 		if (reply_len > 0)
438 			memcpy(buffer, reply + 2, reply_len);
439 	}
440 
441 	*length = reply_len;
442 
443 	kfree(reply);
444 	kfree(msg);
445 
446 	return 0;
447 
448 out_msg:
449 	vmw_close_channel(&channel);
450 	kfree(reply);
451 out_open:
452 	*length = 0;
453 	kfree(msg);
454 	DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
455 
456 	return -EINVAL;
457 }
458 
459 
460 /**
461  * vmw_host_printf: Sends a log message to the host
462  *
463  * @fmt: Regular printf format string and arguments
464  *
465  * Returns: 0 on success
466  */
467 __printf(1, 2)
vmw_host_printf(const char * fmt,...)468 int vmw_host_printf(const char *fmt, ...)
469 {
470 	va_list ap;
471 	struct rpc_channel channel;
472 	char *msg;
473 	char *log;
474 	int ret = 0;
475 
476 	if (!vmw_msg_enabled)
477 		return -ENODEV;
478 
479 	if (!fmt)
480 		return ret;
481 
482 	va_start(ap, fmt);
483 	log = kvasprintf(GFP_KERNEL, fmt, ap);
484 	va_end(ap);
485 	if (!log) {
486 		DRM_ERROR("Cannot allocate memory for the log message.\n");
487 		return -ENOMEM;
488 	}
489 
490 	msg = kasprintf(GFP_KERNEL, "log %s", log);
491 	if (!msg) {
492 		DRM_ERROR("Cannot allocate memory for host log message.\n");
493 		kfree(log);
494 		return -ENOMEM;
495 	}
496 
497 	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
498 		goto out_open;
499 
500 	if (vmw_send_msg(&channel, msg))
501 		goto out_msg;
502 
503 	vmw_close_channel(&channel);
504 	kfree(msg);
505 	kfree(log);
506 
507 	return 0;
508 
509 out_msg:
510 	vmw_close_channel(&channel);
511 out_open:
512 	kfree(msg);
513 	kfree(log);
514 	DRM_ERROR("Failed to send host log message.\n");
515 
516 	return -EINVAL;
517 }
518 
519 
520 /**
521  * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
522  *
523  * Sends a message from user-space to host.
524  * Can also receive a result from host and return that to user-space.
525  *
526  * @dev: Identifies the drm device.
527  * @data: Pointer to the ioctl argument.
528  * @file_priv: Identifies the caller.
529  * Return: Zero on success, negative error code on error.
530  */
531 
vmw_msg_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)532 int vmw_msg_ioctl(struct drm_device *dev, void *data,
533 		  struct drm_file *file_priv)
534 {
535 	struct drm_vmw_msg_arg *arg =
536 			(struct drm_vmw_msg_arg *)data;
537 	struct rpc_channel channel;
538 	char *msg;
539 	int length;
540 
541 	msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
542 	if (!msg) {
543 		DRM_ERROR("Cannot allocate memory for log message.\n");
544 		return -ENOMEM;
545 	}
546 
547 	length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
548 				   MAX_USER_MSG_LENGTH);
549 	if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
550 		DRM_ERROR("Userspace message access failure.\n");
551 		kfree(msg);
552 		return -EINVAL;
553 	}
554 
555 
556 	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
557 		DRM_ERROR("Failed to open channel.\n");
558 		goto out_open;
559 	}
560 
561 	if (vmw_send_msg(&channel, msg)) {
562 		DRM_ERROR("Failed to send message to host.\n");
563 		goto out_msg;
564 	}
565 
566 	if (!arg->send_only) {
567 		char *reply = NULL;
568 		size_t reply_len = 0;
569 
570 		if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
571 			DRM_ERROR("Failed to receive message from host.\n");
572 			goto out_msg;
573 		}
574 		if (reply && reply_len > 0) {
575 			if (copy_to_user((void __user *)((unsigned long)arg->receive),
576 					 reply, reply_len)) {
577 				DRM_ERROR("Failed to copy message to userspace.\n");
578 				kfree(reply);
579 				goto out_msg;
580 			}
581 			arg->receive_len = (__u32)reply_len;
582 		}
583 		kfree(reply);
584 	}
585 
586 	vmw_close_channel(&channel);
587 	kfree(msg);
588 
589 	return 0;
590 
591 out_msg:
592 	vmw_close_channel(&channel);
593 out_open:
594 	kfree(msg);
595 
596 	return -EINVAL;
597 }
598 
599 /**
600  * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
601  *
602  * @arr: Array to reset.
603  * @size: Array length.
604  */
reset_ppn_array(PPN64 * arr,size_t size)605 static inline void reset_ppn_array(PPN64 *arr, size_t size)
606 {
607 	size_t i;
608 
609 	BUG_ON(!arr || size == 0);
610 
611 	for (i = 0; i < size; ++i)
612 		arr[i] = INVALID_PPN64;
613 }
614 
615 /**
616  * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
617  * the hypervisor. All related pages should be subsequently unpinned or freed.
618  *
619  */
hypervisor_ppn_reset_all(void)620 static inline void hypervisor_ppn_reset_all(void)
621 {
622 	vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
623 }
624 
625 /**
626  * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
627  * hypervisor. Any related userspace pages should be pinned in advance.
628  *
629  * @pfn: Physical page number of the instance descriptor
630  */
hypervisor_ppn_add(PPN64 pfn)631 static inline void hypervisor_ppn_add(PPN64 pfn)
632 {
633 	vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
634 }
635 
636 /**
637  * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
638  * the hypervisor. All related pages should be subsequently unpinned or freed.
639  *
640  * @pfn: Physical page number of the instance descriptor
641  */
hypervisor_ppn_remove(PPN64 pfn)642 static inline void hypervisor_ppn_remove(PPN64 pfn)
643 {
644 	vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
645 }
646 
647 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
648 
649 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
650 #define MKSSTAT_KERNEL_PAGES_ORDER 2
651 /* Header to the text description of mksGuestStat instance descriptor */
652 #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
653 
654 /**
655  * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
656  * for the respective mksGuestStat index.
657  *
658  * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
659  * @pstat: Pointer to array of MKSGuestStatCounterTime.
660  * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
661  * @pstrs: Pointer to current end of the name/description sequence.
662  * Return: Pointer to the new end of the names/description sequence.
663  */
664 
mksstat_init_record_time(mksstat_kern_stats_t stat_idx,MKSGuestStatCounterTime * pstat,MKSGuestStatInfoEntry * pinfo,char * pstrs)665 static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
666 	MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
667 {
668 	char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
669 	strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
670 	strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
671 
672 	pinfo[stat_idx].name.s = pstrs;
673 	pinfo[stat_idx].description.s = pstrd;
674 	pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
675 	pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
676 
677 	return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
678 }
679 
680 /**
681  * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
682  * kernel-internal counters. Adds PFN mapping to the hypervisor.
683  *
684  * Create a single mksGuestStat instance descriptor and corresponding structures
685  * for all kernel-internal counters. The corresponding PFNs are mapped with the
686  * hypervisor.
687  *
688  * @ppage: Output pointer to page containing the instance descriptor.
689  * Return: Zero on success, negative error code on error.
690  */
691 
mksstat_init_kern_id(struct page ** ppage)692 static int mksstat_init_kern_id(struct page **ppage)
693 {
694 	MKSGuestStatInstanceDescriptor *pdesc;
695 	MKSGuestStatCounterTime *pstat;
696 	MKSGuestStatInfoEntry *pinfo;
697 	char *pstrs, *pstrs_acc;
698 
699 	/* Allocate pages for the kernel-internal instance descriptor */
700 	struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
701 
702 	if (!page)
703 		return -ENOMEM;
704 
705 	pdesc = page_address(page);
706 	pstat = vmw_mksstat_get_kern_pstat(pdesc);
707 	pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
708 	pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
709 
710 	/* Set up all kernel-internal counters and corresponding structures */
711 	pstrs_acc = pstrs;
712 	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
713 	pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
714 
715 	/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
716 
717 	BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
718 
719 	/* Set up the kernel-internal instance descriptor */
720 	pdesc->reservedMBZ = 0;
721 	pdesc->statStartVA = (uintptr_t)pstat;
722 	pdesc->strsStartVA = (uintptr_t)pstrs;
723 	pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
724 	pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
725 	pdesc->strsLength = pstrs_acc - pstrs;
726 	snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
727 		MKSSTAT_KERNEL_DESCRIPTION, current->pid);
728 
729 	pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
730 	reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
731 
732 	pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
733 	reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
734 
735 	pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
736 	reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
737 
738 	*ppage = page;
739 
740 	hypervisor_ppn_add((PPN64)page_to_pfn(page));
741 
742 	return 0;
743 }
744 
745 /**
746  * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
747  * mksGuestStat instance descriptor.
748  *
749  * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
750  * In case no such was already present, allocate a new one and set up a kernel-
751  * internal mksGuestStat instance descriptor for the former.
752  *
753  * @pid: Process for which a slot is sought.
754  * @dev_priv: Identifies the drm private device.
755  * Return: Non-negative slot on success, negative error code on error.
756  */
757 
vmw_mksstat_get_kern_slot(pid_t pid,struct vmw_private * dev_priv)758 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
759 {
760 	const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
761 	size_t i;
762 
763 	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
764 		const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
765 
766 		/* Check if an instance descriptor for this pid is already present */
767 		if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
768 			return (int)slot;
769 
770 		/* Set up a new instance descriptor for this pid */
771 		if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
772 			const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
773 
774 			if (!ret) {
775 				/* Reset top-timer tracking for this slot */
776 				dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
777 
778 				atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
779 				return (int)slot;
780 			}
781 
782 			atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
783 			return ret;
784 		}
785 	}
786 
787 	return -ENOSPC;
788 }
789 
790 #endif
791 
792 /**
793  * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
794  * mksGuestStat instance-descriptor page and unpins all related user pages.
795  *
796  * Unpin all user pages realated to this instance descriptor and free
797  * the instance-descriptor page itself.
798  *
799  * @page: Page of the instance descriptor.
800  */
801 
vmw_mksstat_cleanup_descriptor(struct page * page)802 static void vmw_mksstat_cleanup_descriptor(struct page *page)
803 {
804 	MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
805 	size_t i;
806 
807 	for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
808 		unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
809 
810 	for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
811 		unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
812 
813 	for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
814 		unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
815 
816 	__free_page(page);
817 }
818 
819 /**
820  * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
821  * from the hypervisor.
822  *
823  * Discard all hypervisor PFN mappings, containing active mksGuestState instance
824  * descriptors, unpin the related userspace pages and free the related kernel pages.
825  *
826  * @dev_priv: Identifies the drm private device.
827  * Return: Zero on success, negative error code on error.
828  */
829 
vmw_mksstat_remove_all(struct vmw_private * dev_priv)830 int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
831 {
832 	int ret = 0;
833 	size_t i;
834 
835 	/* Discard all PFN mappings with the hypervisor */
836 	hypervisor_ppn_reset_all();
837 
838 	/* Discard all userspace-originating instance descriptors and unpin all related pages */
839 	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
840 		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
841 
842 		if (!pid0)
843 			continue;
844 
845 		if (pid0 != MKSSTAT_PID_RESERVED) {
846 			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
847 
848 			if (!pid1)
849 				continue;
850 
851 			if (pid1 == pid0) {
852 				struct page *const page = dev_priv->mksstat_user_pages[i];
853 
854 				BUG_ON(!page);
855 
856 				dev_priv->mksstat_user_pages[i] = NULL;
857 				atomic_set(&dev_priv->mksstat_user_pids[i], 0);
858 
859 				vmw_mksstat_cleanup_descriptor(page);
860 				continue;
861 			}
862 		}
863 
864 		ret = -EAGAIN;
865 	}
866 
867 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
868 	/* Discard all kernel-internal instance descriptors and free all related pages */
869 	for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
870 		const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
871 
872 		if (!pid0)
873 			continue;
874 
875 		if (pid0 != MKSSTAT_PID_RESERVED) {
876 			const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
877 
878 			if (!pid1)
879 				continue;
880 
881 			if (pid1 == pid0) {
882 				struct page *const page = dev_priv->mksstat_kern_pages[i];
883 
884 				BUG_ON(!page);
885 
886 				dev_priv->mksstat_kern_pages[i] = NULL;
887 				atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
888 
889 				__free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
890 				continue;
891 			}
892 		}
893 
894 		ret = -EAGAIN;
895 	}
896 
897 #endif
898 	return ret;
899 }
900 
901 /**
902  * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
903  * from the hypervisor.
904  *
905  * Discard all hypervisor PFN mappings, containing active mksGuestStat instance
906  * descriptors, unpin the related userspace pages and free the related kernel pages.
907  *
908  * @dev: Identifies the drm device.
909  * @data: Pointer to the ioctl argument.
910  * @file_priv: Identifies the caller; unused.
911  * Return: Zero on success, negative error code on error.
912  */
913 
vmw_mksstat_reset_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)914 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
915 				struct drm_file *file_priv)
916 {
917 	struct vmw_private *const dev_priv = vmw_priv(dev);
918 	return vmw_mksstat_remove_all(dev_priv);
919 }
920 
921 /**
922  * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
923  * instance descriptor and registers that with the hypervisor.
924  *
925  * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
926  * descriptor and pin the corresponding userspace pages.
927  *
928  * @dev: Identifies the drm device.
929  * @data: Pointer to the ioctl argument.
930  * @file_priv: Identifies the caller; unused.
931  * Return: Zero on success, negative error code on error.
932  */
933 
vmw_mksstat_add_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)934 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
935 				struct drm_file *file_priv)
936 {
937 	struct drm_vmw_mksstat_add_arg *arg =
938 		(struct drm_vmw_mksstat_add_arg *) data;
939 
940 	struct vmw_private *const dev_priv = vmw_priv(dev);
941 
942 	const size_t num_pages_stat = PFN_UP(arg->stat_len);
943 	const size_t num_pages_info = PFN_UP(arg->info_len);
944 	const size_t num_pages_strs = PFN_UP(arg->strs_len);
945 	long desc_len;
946 	long nr_pinned_stat;
947 	long nr_pinned_info;
948 	long nr_pinned_strs;
949 	MKSGuestStatInstanceDescriptor *pdesc;
950 	struct page *page = NULL;
951 	struct page **pages_stat = NULL;
952 	struct page **pages_info = NULL;
953 	struct page **pages_strs = NULL;
954 	size_t i, slot;
955 	int ret_err = -ENOMEM;
956 
957 	arg->id = -1;
958 
959 	if (!arg->stat || !arg->info || !arg->strs)
960 		return -EINVAL;
961 
962 	if (!arg->stat_len || !arg->info_len || !arg->strs_len)
963 		return -EINVAL;
964 
965 	if (!arg->description)
966 		return -EINVAL;
967 
968 	if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
969 		num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
970 		num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
971 		return -EINVAL;
972 
973 	/* Find an available slot in the mksGuestStats user array and reserve it */
974 	for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
975 		if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
976 			break;
977 
978 	if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
979 		return -ENOSPC;
980 
981 	BUG_ON(dev_priv->mksstat_user_pages[slot]);
982 
983 	/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
984 	pages_stat = (struct page **)kmalloc_array(
985 		ARRAY_SIZE(pdesc->statPPNs) +
986 		ARRAY_SIZE(pdesc->infoPPNs) +
987 		ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
988 
989 	if (!pages_stat)
990 		goto err_nomem;
991 
992 	pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
993 	pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
994 
995 	/* Allocate a page for the instance descriptor */
996 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
997 
998 	if (!page)
999 		goto err_nomem;
1000 
1001 	/* Set up the instance descriptor */
1002 	pdesc = page_address(page);
1003 
1004 	pdesc->reservedMBZ = 0;
1005 	pdesc->statStartVA = arg->stat;
1006 	pdesc->strsStartVA = arg->strs;
1007 	pdesc->statLength = arg->stat_len;
1008 	pdesc->infoLength = arg->info_len;
1009 	pdesc->strsLength = arg->strs_len;
1010 	desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1011 		ARRAY_SIZE(pdesc->description) - 1);
1012 
1013 	if (desc_len < 0) {
1014 		ret_err = -EFAULT;
1015 		goto err_nomem;
1016 	}
1017 
1018 	reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1019 	reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1020 	reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1021 
1022 	/* Pin mksGuestStat user pages and store those in the instance descriptor */
1023 	nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
1024 	if (num_pages_stat != nr_pinned_stat)
1025 		goto err_pin_stat;
1026 
1027 	for (i = 0; i < num_pages_stat; ++i)
1028 		pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1029 
1030 	nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
1031 	if (num_pages_info != nr_pinned_info)
1032 		goto err_pin_info;
1033 
1034 	for (i = 0; i < num_pages_info; ++i)
1035 		pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1036 
1037 	nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
1038 	if (num_pages_strs != nr_pinned_strs)
1039 		goto err_pin_strs;
1040 
1041 	for (i = 0; i < num_pages_strs; ++i)
1042 		pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1043 
1044 	/* Send the descriptor to the host via a hypervisor call. The mksGuestStat
1045 	   pages will remain in use until the user requests a matching remove stats
1046 	   or a stats reset occurs. */
1047 	hypervisor_ppn_add((PPN64)page_to_pfn(page));
1048 
1049 	dev_priv->mksstat_user_pages[slot] = page;
1050 	atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1051 
1052 	arg->id = slot;
1053 
1054 	DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1055 
1056 	kfree(pages_stat);
1057 	return 0;
1058 
1059 err_pin_strs:
1060 	if (nr_pinned_strs > 0)
1061 		unpin_user_pages(pages_strs, nr_pinned_strs);
1062 
1063 err_pin_info:
1064 	if (nr_pinned_info > 0)
1065 		unpin_user_pages(pages_info, nr_pinned_info);
1066 
1067 err_pin_stat:
1068 	if (nr_pinned_stat > 0)
1069 		unpin_user_pages(pages_stat, nr_pinned_stat);
1070 
1071 err_nomem:
1072 	atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1073 	if (page)
1074 		__free_page(page);
1075 	kfree(pages_stat);
1076 
1077 	return ret_err;
1078 }
1079 
1080 /**
1081  * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1082  * instance descriptor from the hypervisor.
1083  *
1084  * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1085  * descriptor and unpin the corresponding userspace pages.
1086  *
1087  * @dev: Identifies the drm device.
1088  * @data: Pointer to the ioctl argument.
1089  * @file_priv: Identifies the caller; unused.
1090  * Return: Zero on success, negative error code on error.
1091  */
1092 
vmw_mksstat_remove_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1093 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1094 				struct drm_file *file_priv)
1095 {
1096 	struct drm_vmw_mksstat_remove_arg *arg =
1097 		(struct drm_vmw_mksstat_remove_arg *) data;
1098 
1099 	struct vmw_private *const dev_priv = vmw_priv(dev);
1100 
1101 	const size_t slot = arg->id;
1102 	pid_t pgid, pid;
1103 
1104 	if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1105 		return -EINVAL;
1106 
1107 	DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1108 
1109 	pgid = task_pgrp_vnr(current);
1110 	pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1111 
1112 	if (!pid)
1113 		return 0;
1114 
1115 	if (pid == pgid) {
1116 		struct page *const page = dev_priv->mksstat_user_pages[slot];
1117 
1118 		BUG_ON(!page);
1119 
1120 		dev_priv->mksstat_user_pages[slot] = NULL;
1121 		atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1122 
1123 		hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1124 
1125 		vmw_mksstat_cleanup_descriptor(page);
1126 		return 0;
1127 	}
1128 
1129 	return -EAGAIN;
1130 }
1131 
1132 /**
1133  * vmw_disable_backdoor: Disables all backdoor communication
1134  * with the hypervisor.
1135  */
vmw_disable_backdoor(void)1136 void vmw_disable_backdoor(void)
1137 {
1138 	vmw_msg_enabled = 0;
1139 }
1140