xref: /linux/arch/x86/kernel/cpu/resctrl/pseudo_lock.c (revision 570d58b12fbf7bae0ba72d929ccf914a4df5ca7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Resource Director Technology (RDT)
4  *
5  * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
6  *
7  * Copyright (C) 2018 Intel Corporation
8  *
9  * Author: Reinette Chatre <reinette.chatre@intel.com>
10  */
11 
12 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
13 
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/debugfs.h>
17 #include <linux/kthread.h>
18 #include <linux/mman.h>
19 #include <linux/perf_event.h>
20 #include <linux/pm_qos.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/resctrl.h>
27 #include <asm/perf_event.h>
28 #include <asm/msr.h>
29 
30 #include "../../events/perf_event.h" /* For X86_CONFIG() */
31 #include "internal.h"
32 
33 #define CREATE_TRACE_POINTS
34 #include "trace.h"
35 
36 /*
37  * The bits needed to disable hardware prefetching varies based on the
38  * platform. During initialization we will discover which bits to use.
39  */
40 static u64 prefetch_disable_bits;
41 
42 /*
43  * Major number assigned to and shared by all devices exposing
44  * pseudo-locked regions.
45  */
46 static unsigned int pseudo_lock_major;
47 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
48 
49 static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
50 {
51 	const struct rdtgroup *rdtgrp;
52 
53 	rdtgrp = dev_get_drvdata(dev);
54 	if (mode)
55 		*mode = 0600;
56 	guard(mutex)(&rdtgroup_mutex);
57 	return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn));
58 }
59 
60 static const struct class pseudo_lock_class = {
61 	.name = "pseudo_lock",
62 	.devnode = pseudo_lock_devnode,
63 };
64 
65 /**
66  * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
67  *                                          platforms
68  * @void: It takes no parameters.
69  *
70  * Capture the list of platforms that have been validated to support
71  * pseudo-locking. This includes testing to ensure pseudo-locked regions
72  * with low cache miss rates can be created under variety of load conditions
73  * as well as that these pseudo-locked regions can maintain their low cache
74  * miss rates under variety of load conditions for significant lengths of time.
75  *
76  * After a platform has been validated to support pseudo-locking its
77  * hardware prefetch disable bits are included here as they are documented
78  * in the SDM.
79  *
80  * When adding a platform here also add support for its cache events to
81  * resctrl_arch_measure_l*_residency()
82  *
83  * Return:
84  * If platform is supported, the bits to disable hardware prefetchers, 0
85  * if platform is not supported.
86  */
87 u64 resctrl_arch_get_prefetch_disable_bits(void)
88 {
89 	prefetch_disable_bits = 0;
90 
91 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
92 	    boot_cpu_data.x86 != 6)
93 		return 0;
94 
95 	switch (boot_cpu_data.x86_vfm) {
96 	case INTEL_BROADWELL_X:
97 		/*
98 		 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
99 		 * as:
100 		 * 0    L2 Hardware Prefetcher Disable (R/W)
101 		 * 1    L2 Adjacent Cache Line Prefetcher Disable (R/W)
102 		 * 2    DCU Hardware Prefetcher Disable (R/W)
103 		 * 3    DCU IP Prefetcher Disable (R/W)
104 		 * 63:4 Reserved
105 		 */
106 		prefetch_disable_bits = 0xF;
107 		break;
108 	case INTEL_ATOM_GOLDMONT:
109 	case INTEL_ATOM_GOLDMONT_PLUS:
110 		/*
111 		 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
112 		 * as:
113 		 * 0     L2 Hardware Prefetcher Disable (R/W)
114 		 * 1     Reserved
115 		 * 2     DCU Hardware Prefetcher Disable (R/W)
116 		 * 63:3  Reserved
117 		 */
118 		prefetch_disable_bits = 0x5;
119 		break;
120 	}
121 
122 	return prefetch_disable_bits;
123 }
124 
125 /**
126  * pseudo_lock_minor_get - Obtain available minor number
127  * @minor: Pointer to where new minor number will be stored
128  *
129  * A bitmask is used to track available minor numbers. Here the next free
130  * minor number is marked as unavailable and returned.
131  *
132  * Return: 0 on success, <0 on failure.
133  */
134 static int pseudo_lock_minor_get(unsigned int *minor)
135 {
136 	unsigned long first_bit;
137 
138 	first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
139 
140 	if (first_bit == MINORBITS)
141 		return -ENOSPC;
142 
143 	__clear_bit(first_bit, &pseudo_lock_minor_avail);
144 	*minor = first_bit;
145 
146 	return 0;
147 }
148 
149 /**
150  * pseudo_lock_minor_release - Return minor number to available
151  * @minor: The minor number made available
152  */
153 static void pseudo_lock_minor_release(unsigned int minor)
154 {
155 	__set_bit(minor, &pseudo_lock_minor_avail);
156 }
157 
158 /**
159  * region_find_by_minor - Locate a pseudo-lock region by inode minor number
160  * @minor: The minor number of the device representing pseudo-locked region
161  *
162  * When the character device is accessed we need to determine which
163  * pseudo-locked region it belongs to. This is done by matching the minor
164  * number of the device to the pseudo-locked region it belongs.
165  *
166  * Minor numbers are assigned at the time a pseudo-locked region is associated
167  * with a cache instance.
168  *
169  * Return: On success return pointer to resource group owning the pseudo-locked
170  *         region, NULL on failure.
171  */
172 static struct rdtgroup *region_find_by_minor(unsigned int minor)
173 {
174 	struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
175 
176 	list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
177 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
178 			rdtgrp_match = rdtgrp;
179 			break;
180 		}
181 	}
182 	return rdtgrp_match;
183 }
184 
185 /**
186  * struct pseudo_lock_pm_req - A power management QoS request list entry
187  * @list:	Entry within the @pm_reqs list for a pseudo-locked region
188  * @req:	PM QoS request
189  */
190 struct pseudo_lock_pm_req {
191 	struct list_head list;
192 	struct dev_pm_qos_request req;
193 };
194 
195 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
196 {
197 	struct pseudo_lock_pm_req *pm_req, *next;
198 
199 	list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
200 		dev_pm_qos_remove_request(&pm_req->req);
201 		list_del(&pm_req->list);
202 		kfree(pm_req);
203 	}
204 }
205 
206 /**
207  * pseudo_lock_cstates_constrain - Restrict cores from entering C6
208  * @plr: Pseudo-locked region
209  *
210  * To prevent the cache from being affected by power management entering
211  * C6 has to be avoided. This is accomplished by requesting a latency
212  * requirement lower than lowest C6 exit latency of all supported
213  * platforms as found in the cpuidle state tables in the intel_idle driver.
214  * At this time it is possible to do so with a single latency requirement
215  * for all supported platforms.
216  *
217  * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
218  * the ACPI latencies need to be considered while keeping in mind that C2
219  * may be set to map to deeper sleep states. In this case the latency
220  * requirement needs to prevent entering C2 also.
221  *
222  * Return: 0 on success, <0 on failure
223  */
224 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
225 {
226 	struct pseudo_lock_pm_req *pm_req;
227 	int cpu;
228 	int ret;
229 
230 	for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
231 		pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
232 		if (!pm_req) {
233 			rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
234 			ret = -ENOMEM;
235 			goto out_err;
236 		}
237 		ret = dev_pm_qos_add_request(get_cpu_device(cpu),
238 					     &pm_req->req,
239 					     DEV_PM_QOS_RESUME_LATENCY,
240 					     30);
241 		if (ret < 0) {
242 			rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
243 					    cpu);
244 			kfree(pm_req);
245 			ret = -1;
246 			goto out_err;
247 		}
248 		list_add(&pm_req->list, &plr->pm_reqs);
249 	}
250 
251 	return 0;
252 
253 out_err:
254 	pseudo_lock_cstates_relax(plr);
255 	return ret;
256 }
257 
258 /**
259  * pseudo_lock_region_clear - Reset pseudo-lock region data
260  * @plr: pseudo-lock region
261  *
262  * All content of the pseudo-locked region is reset - any memory allocated
263  * freed.
264  *
265  * Return: void
266  */
267 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
268 {
269 	plr->size = 0;
270 	plr->line_size = 0;
271 	kfree(plr->kmem);
272 	plr->kmem = NULL;
273 	plr->s = NULL;
274 	if (plr->d)
275 		plr->d->plr = NULL;
276 	plr->d = NULL;
277 	plr->cbm = 0;
278 	plr->debugfs_dir = NULL;
279 }
280 
281 /**
282  * pseudo_lock_region_init - Initialize pseudo-lock region information
283  * @plr: pseudo-lock region
284  *
285  * Called after user provided a schemata to be pseudo-locked. From the
286  * schemata the &struct pseudo_lock_region is on entry already initialized
287  * with the resource, domain, and capacity bitmask. Here the information
288  * required for pseudo-locking is deduced from this data and &struct
289  * pseudo_lock_region initialized further. This information includes:
290  * - size in bytes of the region to be pseudo-locked
291  * - cache line size to know the stride with which data needs to be accessed
292  *   to be pseudo-locked
293  * - a cpu associated with the cache instance on which the pseudo-locking
294  *   flow can be executed
295  *
296  * Return: 0 on success, <0 on failure. Descriptive error will be written
297  * to last_cmd_status buffer.
298  */
299 static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
300 {
301 	enum resctrl_scope scope = plr->s->res->ctrl_scope;
302 	struct cacheinfo *ci;
303 	int ret;
304 
305 	if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE))
306 		return -ENODEV;
307 
308 	/* Pick the first cpu we find that is associated with the cache. */
309 	plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask);
310 
311 	if (!cpu_online(plr->cpu)) {
312 		rdt_last_cmd_printf("CPU %u associated with cache not online\n",
313 				    plr->cpu);
314 		ret = -ENODEV;
315 		goto out_region;
316 	}
317 
318 	ci = get_cpu_cacheinfo_level(plr->cpu, scope);
319 	if (ci) {
320 		plr->line_size = ci->coherency_line_size;
321 		plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
322 		return 0;
323 	}
324 
325 	ret = -1;
326 	rdt_last_cmd_puts("Unable to determine cache line size\n");
327 out_region:
328 	pseudo_lock_region_clear(plr);
329 	return ret;
330 }
331 
332 /**
333  * pseudo_lock_init - Initialize a pseudo-lock region
334  * @rdtgrp: resource group to which new pseudo-locked region will belong
335  *
336  * A pseudo-locked region is associated with a resource group. When this
337  * association is created the pseudo-locked region is initialized. The
338  * details of the pseudo-locked region are not known at this time so only
339  * allocation is done and association established.
340  *
341  * Return: 0 on success, <0 on failure
342  */
343 static int pseudo_lock_init(struct rdtgroup *rdtgrp)
344 {
345 	struct pseudo_lock_region *plr;
346 
347 	plr = kzalloc(sizeof(*plr), GFP_KERNEL);
348 	if (!plr)
349 		return -ENOMEM;
350 
351 	init_waitqueue_head(&plr->lock_thread_wq);
352 	INIT_LIST_HEAD(&plr->pm_reqs);
353 	rdtgrp->plr = plr;
354 	return 0;
355 }
356 
357 /**
358  * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
359  * @plr: pseudo-lock region
360  *
361  * Initialize the details required to set up the pseudo-locked region and
362  * allocate the contiguous memory that will be pseudo-locked to the cache.
363  *
364  * Return: 0 on success, <0 on failure.  Descriptive error will be written
365  * to last_cmd_status buffer.
366  */
367 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
368 {
369 	int ret;
370 
371 	ret = pseudo_lock_region_init(plr);
372 	if (ret < 0)
373 		return ret;
374 
375 	/*
376 	 * We do not yet support contiguous regions larger than
377 	 * KMALLOC_MAX_SIZE.
378 	 */
379 	if (plr->size > KMALLOC_MAX_SIZE) {
380 		rdt_last_cmd_puts("Requested region exceeds maximum size\n");
381 		ret = -E2BIG;
382 		goto out_region;
383 	}
384 
385 	plr->kmem = kzalloc(plr->size, GFP_KERNEL);
386 	if (!plr->kmem) {
387 		rdt_last_cmd_puts("Unable to allocate memory\n");
388 		ret = -ENOMEM;
389 		goto out_region;
390 	}
391 
392 	ret = 0;
393 	goto out;
394 out_region:
395 	pseudo_lock_region_clear(plr);
396 out:
397 	return ret;
398 }
399 
400 /**
401  * pseudo_lock_free - Free a pseudo-locked region
402  * @rdtgrp: resource group to which pseudo-locked region belonged
403  *
404  * The pseudo-locked region's resources have already been released, or not
405  * yet created at this point. Now it can be freed and disassociated from the
406  * resource group.
407  *
408  * Return: void
409  */
410 static void pseudo_lock_free(struct rdtgroup *rdtgrp)
411 {
412 	pseudo_lock_region_clear(rdtgrp->plr);
413 	kfree(rdtgrp->plr);
414 	rdtgrp->plr = NULL;
415 }
416 
417 /**
418  * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
419  * @_plr: the pseudo-lock region descriptor
420  *
421  * This is the core pseudo-locking flow.
422  *
423  * First we ensure that the kernel memory cannot be found in the cache.
424  * Then, while taking care that there will be as little interference as
425  * possible, the memory to be loaded is accessed while core is running
426  * with class of service set to the bitmask of the pseudo-locked region.
427  * After this is complete no future CAT allocations will be allowed to
428  * overlap with this bitmask.
429  *
430  * Local register variables are utilized to ensure that the memory region
431  * to be locked is the only memory access made during the critical locking
432  * loop.
433  *
434  * Return: 0. Waiter on waitqueue will be woken on completion.
435  */
436 int resctrl_arch_pseudo_lock_fn(void *_plr)
437 {
438 	struct pseudo_lock_region *plr = _plr;
439 	u32 rmid_p, closid_p;
440 	unsigned long i;
441 	u64 saved_msr;
442 #ifdef CONFIG_KASAN
443 	/*
444 	 * The registers used for local register variables are also used
445 	 * when KASAN is active. When KASAN is active we use a regular
446 	 * variable to ensure we always use a valid pointer, but the cost
447 	 * is that this variable will enter the cache through evicting the
448 	 * memory we are trying to lock into the cache. Thus expect lower
449 	 * pseudo-locking success rate when KASAN is active.
450 	 */
451 	unsigned int line_size;
452 	unsigned int size;
453 	void *mem_r;
454 #else
455 	register unsigned int line_size asm("esi");
456 	register unsigned int size asm("edi");
457 	register void *mem_r asm(_ASM_BX);
458 #endif /* CONFIG_KASAN */
459 
460 	/*
461 	 * Make sure none of the allocated memory is cached. If it is we
462 	 * will get a cache hit in below loop from outside of pseudo-locked
463 	 * region.
464 	 * wbinvd (as opposed to clflush/clflushopt) is required to
465 	 * increase likelihood that allocated cache portion will be filled
466 	 * with associated memory.
467 	 */
468 	wbinvd();
469 
470 	/*
471 	 * Always called with interrupts enabled. By disabling interrupts
472 	 * ensure that we will not be preempted during this critical section.
473 	 */
474 	local_irq_disable();
475 
476 	/*
477 	 * Call wrmsr and rdmsr as directly as possible to avoid tracing
478 	 * clobbering local register variables or affecting cache accesses.
479 	 *
480 	 * Disable the hardware prefetcher so that when the end of the memory
481 	 * being pseudo-locked is reached the hardware will not read beyond
482 	 * the buffer and evict pseudo-locked memory read earlier from the
483 	 * cache.
484 	 */
485 	saved_msr = native_rdmsrq(MSR_MISC_FEATURE_CONTROL);
486 	native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
487 	closid_p = this_cpu_read(pqr_state.cur_closid);
488 	rmid_p = this_cpu_read(pqr_state.cur_rmid);
489 	mem_r = plr->kmem;
490 	size = plr->size;
491 	line_size = plr->line_size;
492 	/*
493 	 * Critical section begin: start by writing the closid associated
494 	 * with the capacity bitmask of the cache region being
495 	 * pseudo-locked followed by reading of kernel memory to load it
496 	 * into the cache.
497 	 */
498 	native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
499 
500 	/*
501 	 * Cache was flushed earlier. Now access kernel memory to read it
502 	 * into cache region associated with just activated plr->closid.
503 	 * Loop over data twice:
504 	 * - In first loop the cache region is shared with the page walker
505 	 *   as it populates the paging structure caches (including TLB).
506 	 * - In the second loop the paging structure caches are used and
507 	 *   cache region is populated with the memory being referenced.
508 	 */
509 	for (i = 0; i < size; i += PAGE_SIZE) {
510 		/*
511 		 * Add a barrier to prevent speculative execution of this
512 		 * loop reading beyond the end of the buffer.
513 		 */
514 		rmb();
515 		asm volatile("mov (%0,%1,1), %%eax\n\t"
516 			:
517 			: "r" (mem_r), "r" (i)
518 			: "%eax", "memory");
519 	}
520 	for (i = 0; i < size; i += line_size) {
521 		/*
522 		 * Add a barrier to prevent speculative execution of this
523 		 * loop reading beyond the end of the buffer.
524 		 */
525 		rmb();
526 		asm volatile("mov (%0,%1,1), %%eax\n\t"
527 			:
528 			: "r" (mem_r), "r" (i)
529 			: "%eax", "memory");
530 	}
531 	/*
532 	 * Critical section end: restore closid with capacity bitmask that
533 	 * does not overlap with pseudo-locked region.
534 	 */
535 	native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
536 
537 	/* Re-enable the hardware prefetcher(s) */
538 	wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);
539 	local_irq_enable();
540 
541 	plr->thread_done = 1;
542 	wake_up_interruptible(&plr->lock_thread_wq);
543 	return 0;
544 }
545 
546 /**
547  * rdtgroup_monitor_in_progress - Test if monitoring in progress
548  * @rdtgrp: resource group being queried
549  *
550  * Return: 1 if monitor groups have been created for this resource
551  * group, 0 otherwise.
552  */
553 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
554 {
555 	return !list_empty(&rdtgrp->mon.crdtgrp_list);
556 }
557 
558 /**
559  * rdtgroup_locksetup_user_restrict - Restrict user access to group
560  * @rdtgrp: resource group needing access restricted
561  *
562  * A resource group used for cache pseudo-locking cannot have cpus or tasks
563  * assigned to it. This is communicated to the user by restricting access
564  * to all the files that can be used to make such changes.
565  *
566  * Permissions restored with rdtgroup_locksetup_user_restore()
567  *
568  * Return: 0 on success, <0 on failure. If a failure occurs during the
569  * restriction of access an attempt will be made to restore permissions but
570  * the state of the mode of these files will be uncertain when a failure
571  * occurs.
572  */
573 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
574 {
575 	int ret;
576 
577 	ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
578 	if (ret)
579 		return ret;
580 
581 	ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
582 	if (ret)
583 		goto err_tasks;
584 
585 	ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
586 	if (ret)
587 		goto err_cpus;
588 
589 	if (resctrl_arch_mon_capable()) {
590 		ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
591 		if (ret)
592 			goto err_cpus_list;
593 	}
594 
595 	ret = 0;
596 	goto out;
597 
598 err_cpus_list:
599 	rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
600 err_cpus:
601 	rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
602 err_tasks:
603 	rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
604 out:
605 	return ret;
606 }
607 
608 /**
609  * rdtgroup_locksetup_user_restore - Restore user access to group
610  * @rdtgrp: resource group needing access restored
611  *
612  * Restore all file access previously removed using
613  * rdtgroup_locksetup_user_restrict()
614  *
615  * Return: 0 on success, <0 on failure.  If a failure occurs during the
616  * restoration of access an attempt will be made to restrict permissions
617  * again but the state of the mode of these files will be uncertain when
618  * a failure occurs.
619  */
620 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
621 {
622 	int ret;
623 
624 	ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
625 	if (ret)
626 		return ret;
627 
628 	ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
629 	if (ret)
630 		goto err_tasks;
631 
632 	ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
633 	if (ret)
634 		goto err_cpus;
635 
636 	if (resctrl_arch_mon_capable()) {
637 		ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
638 		if (ret)
639 			goto err_cpus_list;
640 	}
641 
642 	ret = 0;
643 	goto out;
644 
645 err_cpus_list:
646 	rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
647 err_cpus:
648 	rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
649 err_tasks:
650 	rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
651 out:
652 	return ret;
653 }
654 
655 /**
656  * rdtgroup_locksetup_enter - Resource group enters locksetup mode
657  * @rdtgrp: resource group requested to enter locksetup mode
658  *
659  * A resource group enters locksetup mode to reflect that it would be used
660  * to represent a pseudo-locked region and is in the process of being set
661  * up to do so. A resource group used for a pseudo-locked region would
662  * lose the closid associated with it so we cannot allow it to have any
663  * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
664  * future. Monitoring of a pseudo-locked region is not allowed either.
665  *
666  * The above and more restrictions on a pseudo-locked region are checked
667  * for and enforced before the resource group enters the locksetup mode.
668  *
669  * Returns: 0 if the resource group successfully entered locksetup mode, <0
670  * on failure. On failure the last_cmd_status buffer is updated with text to
671  * communicate details of failure to the user.
672  */
673 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
674 {
675 	int ret;
676 
677 	/*
678 	 * The default resource group can neither be removed nor lose the
679 	 * default closid associated with it.
680 	 */
681 	if (rdtgrp == &rdtgroup_default) {
682 		rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
683 		return -EINVAL;
684 	}
685 
686 	/*
687 	 * Cache Pseudo-locking not supported when CDP is enabled.
688 	 *
689 	 * Some things to consider if you would like to enable this
690 	 * support (using L3 CDP as example):
691 	 * - When CDP is enabled two separate resources are exposed,
692 	 *   L3DATA and L3CODE, but they are actually on the same cache.
693 	 *   The implication for pseudo-locking is that if a
694 	 *   pseudo-locked region is created on a domain of one
695 	 *   resource (eg. L3CODE), then a pseudo-locked region cannot
696 	 *   be created on that same domain of the other resource
697 	 *   (eg. L3DATA). This is because the creation of a
698 	 *   pseudo-locked region involves a call to wbinvd that will
699 	 *   affect all cache allocations on particular domain.
700 	 * - Considering the previous, it may be possible to only
701 	 *   expose one of the CDP resources to pseudo-locking and
702 	 *   hide the other. For example, we could consider to only
703 	 *   expose L3DATA and since the L3 cache is unified it is
704 	 *   still possible to place instructions there are execute it.
705 	 * - If only one region is exposed to pseudo-locking we should
706 	 *   still keep in mind that availability of a portion of cache
707 	 *   for pseudo-locking should take into account both resources.
708 	 *   Similarly, if a pseudo-locked region is created in one
709 	 *   resource, the portion of cache used by it should be made
710 	 *   unavailable to all future allocations from both resources.
711 	 */
712 	if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
713 	    resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
714 		rdt_last_cmd_puts("CDP enabled\n");
715 		return -EINVAL;
716 	}
717 
718 	/*
719 	 * Not knowing the bits to disable prefetching implies that this
720 	 * platform does not support Cache Pseudo-Locking.
721 	 */
722 	if (resctrl_arch_get_prefetch_disable_bits() == 0) {
723 		rdt_last_cmd_puts("Pseudo-locking not supported\n");
724 		return -EINVAL;
725 	}
726 
727 	if (rdtgroup_monitor_in_progress(rdtgrp)) {
728 		rdt_last_cmd_puts("Monitoring in progress\n");
729 		return -EINVAL;
730 	}
731 
732 	if (rdtgroup_tasks_assigned(rdtgrp)) {
733 		rdt_last_cmd_puts("Tasks assigned to resource group\n");
734 		return -EINVAL;
735 	}
736 
737 	if (!cpumask_empty(&rdtgrp->cpu_mask)) {
738 		rdt_last_cmd_puts("CPUs assigned to resource group\n");
739 		return -EINVAL;
740 	}
741 
742 	if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
743 		rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
744 		return -EIO;
745 	}
746 
747 	ret = pseudo_lock_init(rdtgrp);
748 	if (ret) {
749 		rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
750 		goto out_release;
751 	}
752 
753 	/*
754 	 * If this system is capable of monitoring a rmid would have been
755 	 * allocated when the control group was created. This is not needed
756 	 * anymore when this group would be used for pseudo-locking. This
757 	 * is safe to call on platforms not capable of monitoring.
758 	 */
759 	free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
760 
761 	ret = 0;
762 	goto out;
763 
764 out_release:
765 	rdtgroup_locksetup_user_restore(rdtgrp);
766 out:
767 	return ret;
768 }
769 
770 /**
771  * rdtgroup_locksetup_exit - resource group exist locksetup mode
772  * @rdtgrp: resource group
773  *
774  * When a resource group exits locksetup mode the earlier restrictions are
775  * lifted.
776  *
777  * Return: 0 on success, <0 on failure
778  */
779 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
780 {
781 	int ret;
782 
783 	if (resctrl_arch_mon_capable()) {
784 		ret = alloc_rmid(rdtgrp->closid);
785 		if (ret < 0) {
786 			rdt_last_cmd_puts("Out of RMIDs\n");
787 			return ret;
788 		}
789 		rdtgrp->mon.rmid = ret;
790 	}
791 
792 	ret = rdtgroup_locksetup_user_restore(rdtgrp);
793 	if (ret) {
794 		free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
795 		return ret;
796 	}
797 
798 	pseudo_lock_free(rdtgrp);
799 	return 0;
800 }
801 
802 /**
803  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
804  * @d: RDT domain
805  * @cbm: CBM to test
806  *
807  * @d represents a cache instance and @cbm a capacity bitmask that is
808  * considered for it. Determine if @cbm overlaps with any existing
809  * pseudo-locked region on @d.
810  *
811  * @cbm is unsigned long, even if only 32 bits are used, to make the
812  * bitmap functions work correctly.
813  *
814  * Return: true if @cbm overlaps with pseudo-locked region on @d, false
815  * otherwise.
816  */
817 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
818 {
819 	unsigned int cbm_len;
820 	unsigned long cbm_b;
821 
822 	if (d->plr) {
823 		cbm_len = d->plr->s->res->cache.cbm_len;
824 		cbm_b = d->plr->cbm;
825 		if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
826 			return true;
827 	}
828 	return false;
829 }
830 
831 /**
832  * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
833  * @d: RDT domain under test
834  *
835  * The setup of a pseudo-locked region affects all cache instances within
836  * the hierarchy of the region. It is thus essential to know if any
837  * pseudo-locked regions exist within a cache hierarchy to prevent any
838  * attempts to create new pseudo-locked regions in the same hierarchy.
839  *
840  * Return: true if a pseudo-locked region exists in the hierarchy of @d or
841  *         if it is not possible to test due to memory allocation issue,
842  *         false otherwise.
843  */
844 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
845 {
846 	struct rdt_ctrl_domain *d_i;
847 	cpumask_var_t cpu_with_psl;
848 	struct rdt_resource *r;
849 	bool ret = false;
850 
851 	/* Walking r->domains, ensure it can't race with cpuhp */
852 	lockdep_assert_cpus_held();
853 
854 	if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
855 		return true;
856 
857 	/*
858 	 * First determine which cpus have pseudo-locked regions
859 	 * associated with them.
860 	 */
861 	for_each_alloc_capable_rdt_resource(r) {
862 		list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) {
863 			if (d_i->plr)
864 				cpumask_or(cpu_with_psl, cpu_with_psl,
865 					   &d_i->hdr.cpu_mask);
866 		}
867 	}
868 
869 	/*
870 	 * Next test if new pseudo-locked region would intersect with
871 	 * existing region.
872 	 */
873 	if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl))
874 		ret = true;
875 
876 	free_cpumask_var(cpu_with_psl);
877 	return ret;
878 }
879 
880 /**
881  * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
882  *                                      pseudo-locked memory
883  * @_plr: pseudo-lock region to measure
884  *
885  * There is no deterministic way to test if a memory region is cached. One
886  * way is to measure how long it takes to read the memory, the speed of
887  * access is a good way to learn how close to the cpu the data was. Even
888  * more, if the prefetcher is disabled and the memory is read at a stride
889  * of half the cache line, then a cache miss will be easy to spot since the
890  * read of the first half would be significantly slower than the read of
891  * the second half.
892  *
893  * Return: 0. Waiter on waitqueue will be woken on completion.
894  */
895 int resctrl_arch_measure_cycles_lat_fn(void *_plr)
896 {
897 	struct pseudo_lock_region *plr = _plr;
898 	u32 saved_low, saved_high;
899 	unsigned long i;
900 	u64 start, end;
901 	void *mem_r;
902 
903 	local_irq_disable();
904 	/*
905 	 * Disable hardware prefetchers.
906 	 */
907 	rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
908 	wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
909 	mem_r = READ_ONCE(plr->kmem);
910 	/*
911 	 * Dummy execute of the time measurement to load the needed
912 	 * instructions into the L1 instruction cache.
913 	 */
914 	start = rdtsc_ordered();
915 	for (i = 0; i < plr->size; i += 32) {
916 		start = rdtsc_ordered();
917 		asm volatile("mov (%0,%1,1), %%eax\n\t"
918 			     :
919 			     : "r" (mem_r), "r" (i)
920 			     : "%eax", "memory");
921 		end = rdtsc_ordered();
922 		trace_pseudo_lock_mem_latency((u32)(end - start));
923 	}
924 	wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
925 	local_irq_enable();
926 	plr->thread_done = 1;
927 	wake_up_interruptible(&plr->lock_thread_wq);
928 	return 0;
929 }
930 
931 /*
932  * Create a perf_event_attr for the hit and miss perf events that will
933  * be used during the performance measurement. A perf_event maintains
934  * a pointer to its perf_event_attr so a unique attribute structure is
935  * created for each perf_event.
936  *
937  * The actual configuration of the event is set right before use in order
938  * to use the X86_CONFIG macro.
939  */
940 static struct perf_event_attr perf_miss_attr = {
941 	.type		= PERF_TYPE_RAW,
942 	.size		= sizeof(struct perf_event_attr),
943 	.pinned		= 1,
944 	.disabled	= 0,
945 	.exclude_user	= 1,
946 };
947 
948 static struct perf_event_attr perf_hit_attr = {
949 	.type		= PERF_TYPE_RAW,
950 	.size		= sizeof(struct perf_event_attr),
951 	.pinned		= 1,
952 	.disabled	= 0,
953 	.exclude_user	= 1,
954 };
955 
956 struct residency_counts {
957 	u64 miss_before, hits_before;
958 	u64 miss_after,  hits_after;
959 };
960 
961 static int measure_residency_fn(struct perf_event_attr *miss_attr,
962 				struct perf_event_attr *hit_attr,
963 				struct pseudo_lock_region *plr,
964 				struct residency_counts *counts)
965 {
966 	u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
967 	struct perf_event *miss_event, *hit_event;
968 	int hit_pmcnum, miss_pmcnum;
969 	u32 saved_low, saved_high;
970 	unsigned int line_size;
971 	unsigned int size;
972 	unsigned long i;
973 	void *mem_r;
974 	u64 tmp;
975 
976 	miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
977 						      NULL, NULL, NULL);
978 	if (IS_ERR(miss_event))
979 		goto out;
980 
981 	hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
982 						     NULL, NULL, NULL);
983 	if (IS_ERR(hit_event))
984 		goto out_miss;
985 
986 	local_irq_disable();
987 	/*
988 	 * Check any possible error state of events used by performing
989 	 * one local read.
990 	 */
991 	if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
992 		local_irq_enable();
993 		goto out_hit;
994 	}
995 	if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
996 		local_irq_enable();
997 		goto out_hit;
998 	}
999 
1000 	/*
1001 	 * Disable hardware prefetchers.
1002 	 */
1003 	rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
1004 	wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
1005 
1006 	/* Initialize rest of local variables */
1007 	/*
1008 	 * Performance event has been validated right before this with
1009 	 * interrupts disabled - it is thus safe to read the counter index.
1010 	 */
1011 	miss_pmcnum = x86_perf_rdpmc_index(miss_event);
1012 	hit_pmcnum = x86_perf_rdpmc_index(hit_event);
1013 	line_size = READ_ONCE(plr->line_size);
1014 	mem_r = READ_ONCE(plr->kmem);
1015 	size = READ_ONCE(plr->size);
1016 
1017 	/*
1018 	 * Read counter variables twice - first to load the instructions
1019 	 * used in L1 cache, second to capture accurate value that does not
1020 	 * include cache misses incurred because of instruction loads.
1021 	 */
1022 	hits_before = rdpmc(hit_pmcnum);
1023 	miss_before = rdpmc(miss_pmcnum);
1024 	/*
1025 	 * From SDM: Performing back-to-back fast reads are not guaranteed
1026 	 * to be monotonic.
1027 	 * Use LFENCE to ensure all previous instructions are retired
1028 	 * before proceeding.
1029 	 */
1030 	rmb();
1031 	hits_before = rdpmc(hit_pmcnum);
1032 	miss_before = rdpmc(miss_pmcnum);
1033 	/*
1034 	 * Use LFENCE to ensure all previous instructions are retired
1035 	 * before proceeding.
1036 	 */
1037 	rmb();
1038 	for (i = 0; i < size; i += line_size) {
1039 		/*
1040 		 * Add a barrier to prevent speculative execution of this
1041 		 * loop reading beyond the end of the buffer.
1042 		 */
1043 		rmb();
1044 		asm volatile("mov (%0,%1,1), %%eax\n\t"
1045 			     :
1046 			     : "r" (mem_r), "r" (i)
1047 			     : "%eax", "memory");
1048 	}
1049 	/*
1050 	 * Use LFENCE to ensure all previous instructions are retired
1051 	 * before proceeding.
1052 	 */
1053 	rmb();
1054 	hits_after = rdpmc(hit_pmcnum);
1055 	miss_after = rdpmc(miss_pmcnum);
1056 	/*
1057 	 * Use LFENCE to ensure all previous instructions are retired
1058 	 * before proceeding.
1059 	 */
1060 	rmb();
1061 	/* Re-enable hardware prefetchers */
1062 	wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
1063 	local_irq_enable();
1064 out_hit:
1065 	perf_event_release_kernel(hit_event);
1066 out_miss:
1067 	perf_event_release_kernel(miss_event);
1068 out:
1069 	/*
1070 	 * All counts will be zero on failure.
1071 	 */
1072 	counts->miss_before = miss_before;
1073 	counts->hits_before = hits_before;
1074 	counts->miss_after  = miss_after;
1075 	counts->hits_after  = hits_after;
1076 	return 0;
1077 }
1078 
1079 int resctrl_arch_measure_l2_residency(void *_plr)
1080 {
1081 	struct pseudo_lock_region *plr = _plr;
1082 	struct residency_counts counts = {0};
1083 
1084 	/*
1085 	 * Non-architectural event for the Goldmont Microarchitecture
1086 	 * from Intel x86 Architecture Software Developer Manual (SDM):
1087 	 * MEM_LOAD_UOPS_RETIRED D1H (event number)
1088 	 * Umask values:
1089 	 *     L2_HIT   02H
1090 	 *     L2_MISS  10H
1091 	 */
1092 	switch (boot_cpu_data.x86_vfm) {
1093 	case INTEL_ATOM_GOLDMONT:
1094 	case INTEL_ATOM_GOLDMONT_PLUS:
1095 		perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
1096 						   .umask = 0x10);
1097 		perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
1098 						  .umask = 0x2);
1099 		break;
1100 	default:
1101 		goto out;
1102 	}
1103 
1104 	measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1105 	/*
1106 	 * If a failure prevented the measurements from succeeding
1107 	 * tracepoints will still be written and all counts will be zero.
1108 	 */
1109 	trace_pseudo_lock_l2(counts.hits_after - counts.hits_before,
1110 			     counts.miss_after - counts.miss_before);
1111 out:
1112 	plr->thread_done = 1;
1113 	wake_up_interruptible(&plr->lock_thread_wq);
1114 	return 0;
1115 }
1116 
1117 int resctrl_arch_measure_l3_residency(void *_plr)
1118 {
1119 	struct pseudo_lock_region *plr = _plr;
1120 	struct residency_counts counts = {0};
1121 
1122 	/*
1123 	 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
1124 	 * has two "no fix" errata associated with it: BDM35 and BDM100. On
1125 	 * this platform the following events are used instead:
1126 	 * LONGEST_LAT_CACHE 2EH (Documented in SDM)
1127 	 *       REFERENCE 4FH
1128 	 *       MISS      41H
1129 	 */
1130 
1131 	switch (boot_cpu_data.x86_vfm) {
1132 	case INTEL_BROADWELL_X:
1133 		/* On BDW the hit event counts references, not hits */
1134 		perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
1135 						  .umask = 0x4f);
1136 		perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
1137 						   .umask = 0x41);
1138 		break;
1139 	default:
1140 		goto out;
1141 	}
1142 
1143 	measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1144 	/*
1145 	 * If a failure prevented the measurements from succeeding
1146 	 * tracepoints will still be written and all counts will be zero.
1147 	 */
1148 
1149 	counts.miss_after -= counts.miss_before;
1150 	if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) {
1151 		/*
1152 		 * On BDW references and misses are counted, need to adjust.
1153 		 * Sometimes the "hits" counter is a bit more than the
1154 		 * references, for example, x references but x + 1 hits.
1155 		 * To not report invalid hit values in this case we treat
1156 		 * that as misses equal to references.
1157 		 */
1158 		/* First compute the number of cache references measured */
1159 		counts.hits_after -= counts.hits_before;
1160 		/* Next convert references to cache hits */
1161 		counts.hits_after -= min(counts.miss_after, counts.hits_after);
1162 	} else {
1163 		counts.hits_after -= counts.hits_before;
1164 	}
1165 
1166 	trace_pseudo_lock_l3(counts.hits_after, counts.miss_after);
1167 out:
1168 	plr->thread_done = 1;
1169 	wake_up_interruptible(&plr->lock_thread_wq);
1170 	return 0;
1171 }
1172 
1173 /**
1174  * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1175  * @rdtgrp: Resource group to which the pseudo-locked region belongs.
1176  * @sel: Selector of which measurement to perform on a pseudo-locked region.
1177  *
1178  * The measurement of latency to access a pseudo-locked region should be
1179  * done from a cpu that is associated with that pseudo-locked region.
1180  * Determine which cpu is associated with this region and start a thread on
1181  * that cpu to perform the measurement, wait for that thread to complete.
1182  *
1183  * Return: 0 on success, <0 on failure
1184  */
1185 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1186 {
1187 	struct pseudo_lock_region *plr = rdtgrp->plr;
1188 	struct task_struct *thread;
1189 	unsigned int cpu;
1190 	int ret = -1;
1191 
1192 	cpus_read_lock();
1193 	mutex_lock(&rdtgroup_mutex);
1194 
1195 	if (rdtgrp->flags & RDT_DELETED) {
1196 		ret = -ENODEV;
1197 		goto out;
1198 	}
1199 
1200 	if (!plr->d) {
1201 		ret = -ENODEV;
1202 		goto out;
1203 	}
1204 
1205 	plr->thread_done = 0;
1206 	cpu = cpumask_first(&plr->d->hdr.cpu_mask);
1207 	if (!cpu_online(cpu)) {
1208 		ret = -ENODEV;
1209 		goto out;
1210 	}
1211 
1212 	plr->cpu = cpu;
1213 
1214 	if (sel == 1)
1215 		thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
1216 					    plr, cpu, "pseudo_lock_measure/%u");
1217 	else if (sel == 2)
1218 		thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
1219 					    plr, cpu, "pseudo_lock_measure/%u");
1220 	else if (sel == 3)
1221 		thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
1222 					    plr, cpu, "pseudo_lock_measure/%u");
1223 	else
1224 		goto out;
1225 
1226 	if (IS_ERR(thread)) {
1227 		ret = PTR_ERR(thread);
1228 		goto out;
1229 	}
1230 
1231 	ret = wait_event_interruptible(plr->lock_thread_wq,
1232 				       plr->thread_done == 1);
1233 	if (ret < 0)
1234 		goto out;
1235 
1236 	ret = 0;
1237 
1238 out:
1239 	mutex_unlock(&rdtgroup_mutex);
1240 	cpus_read_unlock();
1241 	return ret;
1242 }
1243 
1244 static ssize_t pseudo_lock_measure_trigger(struct file *file,
1245 					   const char __user *user_buf,
1246 					   size_t count, loff_t *ppos)
1247 {
1248 	struct rdtgroup *rdtgrp = file->private_data;
1249 	size_t buf_size;
1250 	char buf[32];
1251 	int ret;
1252 	int sel;
1253 
1254 	buf_size = min(count, (sizeof(buf) - 1));
1255 	if (copy_from_user(buf, user_buf, buf_size))
1256 		return -EFAULT;
1257 
1258 	buf[buf_size] = '\0';
1259 	ret = kstrtoint(buf, 10, &sel);
1260 	if (ret == 0) {
1261 		if (sel != 1 && sel != 2 && sel != 3)
1262 			return -EINVAL;
1263 		ret = debugfs_file_get(file->f_path.dentry);
1264 		if (ret)
1265 			return ret;
1266 		ret = pseudo_lock_measure_cycles(rdtgrp, sel);
1267 		if (ret == 0)
1268 			ret = count;
1269 		debugfs_file_put(file->f_path.dentry);
1270 	}
1271 
1272 	return ret;
1273 }
1274 
1275 static const struct file_operations pseudo_measure_fops = {
1276 	.write = pseudo_lock_measure_trigger,
1277 	.open = simple_open,
1278 	.llseek = default_llseek,
1279 };
1280 
1281 /**
1282  * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1283  * @rdtgrp: resource group to which pseudo-lock region belongs
1284  *
1285  * Called when a resource group in the pseudo-locksetup mode receives a
1286  * valid schemata that should be pseudo-locked. Since the resource group is
1287  * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1288  * allocated and initialized with the essential information. If a failure
1289  * occurs the resource group remains in the pseudo-locksetup mode with the
1290  * &struct pseudo_lock_region associated with it, but cleared from all
1291  * information and ready for the user to re-attempt pseudo-locking by
1292  * writing the schemata again.
1293  *
1294  * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1295  * on failure. Descriptive error will be written to last_cmd_status buffer.
1296  */
1297 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1298 {
1299 	struct pseudo_lock_region *plr = rdtgrp->plr;
1300 	struct task_struct *thread;
1301 	unsigned int new_minor;
1302 	struct device *dev;
1303 	char *kn_name __free(kfree) = NULL;
1304 	int ret;
1305 
1306 	ret = pseudo_lock_region_alloc(plr);
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	ret = pseudo_lock_cstates_constrain(plr);
1311 	if (ret < 0) {
1312 		ret = -EINVAL;
1313 		goto out_region;
1314 	}
1315 	kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL);
1316 	if (!kn_name) {
1317 		ret = -ENOMEM;
1318 		goto out_cstates;
1319 	}
1320 
1321 	plr->thread_done = 0;
1322 
1323 	thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, plr,
1324 				    plr->cpu, "pseudo_lock/%u");
1325 	if (IS_ERR(thread)) {
1326 		ret = PTR_ERR(thread);
1327 		rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
1328 		goto out_cstates;
1329 	}
1330 
1331 	ret = wait_event_interruptible(plr->lock_thread_wq,
1332 				       plr->thread_done == 1);
1333 	if (ret < 0) {
1334 		/*
1335 		 * If the thread does not get on the CPU for whatever
1336 		 * reason and the process which sets up the region is
1337 		 * interrupted then this will leave the thread in runnable
1338 		 * state and once it gets on the CPU it will dereference
1339 		 * the cleared, but not freed, plr struct resulting in an
1340 		 * empty pseudo-locking loop.
1341 		 */
1342 		rdt_last_cmd_puts("Locking thread interrupted\n");
1343 		goto out_cstates;
1344 	}
1345 
1346 	ret = pseudo_lock_minor_get(&new_minor);
1347 	if (ret < 0) {
1348 		rdt_last_cmd_puts("Unable to obtain a new minor number\n");
1349 		goto out_cstates;
1350 	}
1351 
1352 	/*
1353 	 * Unlock access but do not release the reference. The
1354 	 * pseudo-locked region will still be here on return.
1355 	 *
1356 	 * The mutex has to be released temporarily to avoid a potential
1357 	 * deadlock with the mm->mmap_lock which is obtained in the
1358 	 * device_create() and debugfs_create_dir() callpath below as well as
1359 	 * before the mmap() callback is called.
1360 	 */
1361 	mutex_unlock(&rdtgroup_mutex);
1362 
1363 	if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
1364 		plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl);
1365 		if (!IS_ERR_OR_NULL(plr->debugfs_dir))
1366 			debugfs_create_file("pseudo_lock_measure", 0200,
1367 					    plr->debugfs_dir, rdtgrp,
1368 					    &pseudo_measure_fops);
1369 	}
1370 
1371 	dev = device_create(&pseudo_lock_class, NULL,
1372 			    MKDEV(pseudo_lock_major, new_minor),
1373 			    rdtgrp, "%s", kn_name);
1374 
1375 	mutex_lock(&rdtgroup_mutex);
1376 
1377 	if (IS_ERR(dev)) {
1378 		ret = PTR_ERR(dev);
1379 		rdt_last_cmd_printf("Failed to create character device: %d\n",
1380 				    ret);
1381 		goto out_debugfs;
1382 	}
1383 
1384 	/* We released the mutex - check if group was removed while we did so */
1385 	if (rdtgrp->flags & RDT_DELETED) {
1386 		ret = -ENODEV;
1387 		goto out_device;
1388 	}
1389 
1390 	plr->minor = new_minor;
1391 
1392 	rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
1393 	closid_free(rdtgrp->closid);
1394 	rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
1395 	rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
1396 
1397 	ret = 0;
1398 	goto out;
1399 
1400 out_device:
1401 	device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
1402 out_debugfs:
1403 	debugfs_remove_recursive(plr->debugfs_dir);
1404 	pseudo_lock_minor_release(new_minor);
1405 out_cstates:
1406 	pseudo_lock_cstates_relax(plr);
1407 out_region:
1408 	pseudo_lock_region_clear(plr);
1409 out:
1410 	return ret;
1411 }
1412 
1413 /**
1414  * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1415  * @rdtgrp: resource group to which the pseudo-locked region belongs
1416  *
1417  * The removal of a pseudo-locked region can be initiated when the resource
1418  * group is removed from user space via a "rmdir" from userspace or the
1419  * unmount of the resctrl filesystem. On removal the resource group does
1420  * not go back to pseudo-locksetup mode before it is removed, instead it is
1421  * removed directly. There is thus asymmetry with the creation where the
1422  * &struct pseudo_lock_region is removed here while it was not created in
1423  * rdtgroup_pseudo_lock_create().
1424  *
1425  * Return: void
1426  */
1427 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
1428 {
1429 	struct pseudo_lock_region *plr = rdtgrp->plr;
1430 
1431 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1432 		/*
1433 		 * Default group cannot be a pseudo-locked region so we can
1434 		 * free closid here.
1435 		 */
1436 		closid_free(rdtgrp->closid);
1437 		goto free;
1438 	}
1439 
1440 	pseudo_lock_cstates_relax(plr);
1441 	debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
1442 	device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
1443 	pseudo_lock_minor_release(plr->minor);
1444 
1445 free:
1446 	pseudo_lock_free(rdtgrp);
1447 }
1448 
1449 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
1450 {
1451 	struct rdtgroup *rdtgrp;
1452 
1453 	mutex_lock(&rdtgroup_mutex);
1454 
1455 	rdtgrp = region_find_by_minor(iminor(inode));
1456 	if (!rdtgrp) {
1457 		mutex_unlock(&rdtgroup_mutex);
1458 		return -ENODEV;
1459 	}
1460 
1461 	filp->private_data = rdtgrp;
1462 	atomic_inc(&rdtgrp->waitcount);
1463 	/* Perform a non-seekable open - llseek is not supported */
1464 	filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1465 
1466 	mutex_unlock(&rdtgroup_mutex);
1467 
1468 	return 0;
1469 }
1470 
1471 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
1472 {
1473 	struct rdtgroup *rdtgrp;
1474 
1475 	mutex_lock(&rdtgroup_mutex);
1476 	rdtgrp = filp->private_data;
1477 	WARN_ON(!rdtgrp);
1478 	if (!rdtgrp) {
1479 		mutex_unlock(&rdtgroup_mutex);
1480 		return -ENODEV;
1481 	}
1482 	filp->private_data = NULL;
1483 	atomic_dec(&rdtgrp->waitcount);
1484 	mutex_unlock(&rdtgroup_mutex);
1485 	return 0;
1486 }
1487 
1488 static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
1489 {
1490 	/* Not supported */
1491 	return -EINVAL;
1492 }
1493 
1494 static const struct vm_operations_struct pseudo_mmap_ops = {
1495 	.mremap = pseudo_lock_dev_mremap,
1496 };
1497 
1498 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1499 {
1500 	unsigned long vsize = vma->vm_end - vma->vm_start;
1501 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1502 	struct pseudo_lock_region *plr;
1503 	struct rdtgroup *rdtgrp;
1504 	unsigned long physical;
1505 	unsigned long psize;
1506 
1507 	mutex_lock(&rdtgroup_mutex);
1508 
1509 	rdtgrp = filp->private_data;
1510 	WARN_ON(!rdtgrp);
1511 	if (!rdtgrp) {
1512 		mutex_unlock(&rdtgroup_mutex);
1513 		return -ENODEV;
1514 	}
1515 
1516 	plr = rdtgrp->plr;
1517 
1518 	if (!plr->d) {
1519 		mutex_unlock(&rdtgroup_mutex);
1520 		return -ENODEV;
1521 	}
1522 
1523 	/*
1524 	 * Task is required to run with affinity to the cpus associated
1525 	 * with the pseudo-locked region. If this is not the case the task
1526 	 * may be scheduled elsewhere and invalidate entries in the
1527 	 * pseudo-locked region.
1528 	 */
1529 	if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) {
1530 		mutex_unlock(&rdtgroup_mutex);
1531 		return -EINVAL;
1532 	}
1533 
1534 	physical = __pa(plr->kmem) >> PAGE_SHIFT;
1535 	psize = plr->size - off;
1536 
1537 	if (off > plr->size) {
1538 		mutex_unlock(&rdtgroup_mutex);
1539 		return -ENOSPC;
1540 	}
1541 
1542 	/*
1543 	 * Ensure changes are carried directly to the memory being mapped,
1544 	 * do not allow copy-on-write mapping.
1545 	 */
1546 	if (!(vma->vm_flags & VM_SHARED)) {
1547 		mutex_unlock(&rdtgroup_mutex);
1548 		return -EINVAL;
1549 	}
1550 
1551 	if (vsize > psize) {
1552 		mutex_unlock(&rdtgroup_mutex);
1553 		return -ENOSPC;
1554 	}
1555 
1556 	memset(plr->kmem + off, 0, vsize);
1557 
1558 	if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
1559 			    vsize, vma->vm_page_prot)) {
1560 		mutex_unlock(&rdtgroup_mutex);
1561 		return -EAGAIN;
1562 	}
1563 	vma->vm_ops = &pseudo_mmap_ops;
1564 	mutex_unlock(&rdtgroup_mutex);
1565 	return 0;
1566 }
1567 
1568 static const struct file_operations pseudo_lock_dev_fops = {
1569 	.owner =	THIS_MODULE,
1570 	.read =		NULL,
1571 	.write =	NULL,
1572 	.open =		pseudo_lock_dev_open,
1573 	.release =	pseudo_lock_dev_release,
1574 	.mmap =		pseudo_lock_dev_mmap,
1575 };
1576 
1577 int rdt_pseudo_lock_init(void)
1578 {
1579 	int ret;
1580 
1581 	ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
1582 	if (ret < 0)
1583 		return ret;
1584 
1585 	pseudo_lock_major = ret;
1586 
1587 	ret = class_register(&pseudo_lock_class);
1588 	if (ret) {
1589 		unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1590 		return ret;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 void rdt_pseudo_lock_release(void)
1597 {
1598 	class_unregister(&pseudo_lock_class);
1599 	unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1600 	pseudo_lock_major = 0;
1601 }
1602