1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Resource Director Technology (RDT) 4 * 5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT) 6 * 7 * Copyright (C) 2018 Intel Corporation 8 * 9 * Author: Reinette Chatre <reinette.chatre@intel.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/cpu.h> 15 #include <linux/cpumask.h> 16 #include <linux/debugfs.h> 17 #include <linux/kthread.h> 18 #include <linux/mman.h> 19 #include <linux/perf_event.h> 20 #include <linux/pm_qos.h> 21 #include <linux/slab.h> 22 #include <linux/uaccess.h> 23 24 #include <asm/cacheflush.h> 25 #include <asm/cpu_device_id.h> 26 #include <asm/resctrl.h> 27 #include <asm/perf_event.h> 28 29 #include "../../events/perf_event.h" /* For X86_CONFIG() */ 30 #include "internal.h" 31 32 #define CREATE_TRACE_POINTS 33 #include "trace.h" 34 35 /* 36 * The bits needed to disable hardware prefetching varies based on the 37 * platform. During initialization we will discover which bits to use. 38 */ 39 static u64 prefetch_disable_bits; 40 41 /* 42 * Major number assigned to and shared by all devices exposing 43 * pseudo-locked regions. 44 */ 45 static unsigned int pseudo_lock_major; 46 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); 47 48 static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) 49 { 50 const struct rdtgroup *rdtgrp; 51 52 rdtgrp = dev_get_drvdata(dev); 53 if (mode) 54 *mode = 0600; 55 guard(mutex)(&rdtgroup_mutex); 56 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn)); 57 } 58 59 static const struct class pseudo_lock_class = { 60 .name = "pseudo_lock", 61 .devnode = pseudo_lock_devnode, 62 }; 63 64 /** 65 * get_prefetch_disable_bits - prefetch disable bits of supported platforms 66 * @void: It takes no parameters. 67 * 68 * Capture the list of platforms that have been validated to support 69 * pseudo-locking. This includes testing to ensure pseudo-locked regions 70 * with low cache miss rates can be created under variety of load conditions 71 * as well as that these pseudo-locked regions can maintain their low cache 72 * miss rates under variety of load conditions for significant lengths of time. 73 * 74 * After a platform has been validated to support pseudo-locking its 75 * hardware prefetch disable bits are included here as they are documented 76 * in the SDM. 77 * 78 * When adding a platform here also add support for its cache events to 79 * measure_cycles_perf_fn() 80 * 81 * Return: 82 * If platform is supported, the bits to disable hardware prefetchers, 0 83 * if platform is not supported. 84 */ 85 static u64 get_prefetch_disable_bits(void) 86 { 87 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 88 boot_cpu_data.x86 != 6) 89 return 0; 90 91 switch (boot_cpu_data.x86_vfm) { 92 case INTEL_BROADWELL_X: 93 /* 94 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register 95 * as: 96 * 0 L2 Hardware Prefetcher Disable (R/W) 97 * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W) 98 * 2 DCU Hardware Prefetcher Disable (R/W) 99 * 3 DCU IP Prefetcher Disable (R/W) 100 * 63:4 Reserved 101 */ 102 return 0xF; 103 case INTEL_ATOM_GOLDMONT: 104 case INTEL_ATOM_GOLDMONT_PLUS: 105 /* 106 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register 107 * as: 108 * 0 L2 Hardware Prefetcher Disable (R/W) 109 * 1 Reserved 110 * 2 DCU Hardware Prefetcher Disable (R/W) 111 * 63:3 Reserved 112 */ 113 return 0x5; 114 } 115 116 return 0; 117 } 118 119 /** 120 * pseudo_lock_minor_get - Obtain available minor number 121 * @minor: Pointer to where new minor number will be stored 122 * 123 * A bitmask is used to track available minor numbers. Here the next free 124 * minor number is marked as unavailable and returned. 125 * 126 * Return: 0 on success, <0 on failure. 127 */ 128 static int pseudo_lock_minor_get(unsigned int *minor) 129 { 130 unsigned long first_bit; 131 132 first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); 133 134 if (first_bit == MINORBITS) 135 return -ENOSPC; 136 137 __clear_bit(first_bit, &pseudo_lock_minor_avail); 138 *minor = first_bit; 139 140 return 0; 141 } 142 143 /** 144 * pseudo_lock_minor_release - Return minor number to available 145 * @minor: The minor number made available 146 */ 147 static void pseudo_lock_minor_release(unsigned int minor) 148 { 149 __set_bit(minor, &pseudo_lock_minor_avail); 150 } 151 152 /** 153 * region_find_by_minor - Locate a pseudo-lock region by inode minor number 154 * @minor: The minor number of the device representing pseudo-locked region 155 * 156 * When the character device is accessed we need to determine which 157 * pseudo-locked region it belongs to. This is done by matching the minor 158 * number of the device to the pseudo-locked region it belongs. 159 * 160 * Minor numbers are assigned at the time a pseudo-locked region is associated 161 * with a cache instance. 162 * 163 * Return: On success return pointer to resource group owning the pseudo-locked 164 * region, NULL on failure. 165 */ 166 static struct rdtgroup *region_find_by_minor(unsigned int minor) 167 { 168 struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; 169 170 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 171 if (rdtgrp->plr && rdtgrp->plr->minor == minor) { 172 rdtgrp_match = rdtgrp; 173 break; 174 } 175 } 176 return rdtgrp_match; 177 } 178 179 /** 180 * struct pseudo_lock_pm_req - A power management QoS request list entry 181 * @list: Entry within the @pm_reqs list for a pseudo-locked region 182 * @req: PM QoS request 183 */ 184 struct pseudo_lock_pm_req { 185 struct list_head list; 186 struct dev_pm_qos_request req; 187 }; 188 189 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) 190 { 191 struct pseudo_lock_pm_req *pm_req, *next; 192 193 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { 194 dev_pm_qos_remove_request(&pm_req->req); 195 list_del(&pm_req->list); 196 kfree(pm_req); 197 } 198 } 199 200 /** 201 * pseudo_lock_cstates_constrain - Restrict cores from entering C6 202 * @plr: Pseudo-locked region 203 * 204 * To prevent the cache from being affected by power management entering 205 * C6 has to be avoided. This is accomplished by requesting a latency 206 * requirement lower than lowest C6 exit latency of all supported 207 * platforms as found in the cpuidle state tables in the intel_idle driver. 208 * At this time it is possible to do so with a single latency requirement 209 * for all supported platforms. 210 * 211 * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, 212 * the ACPI latencies need to be considered while keeping in mind that C2 213 * may be set to map to deeper sleep states. In this case the latency 214 * requirement needs to prevent entering C2 also. 215 * 216 * Return: 0 on success, <0 on failure 217 */ 218 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) 219 { 220 struct pseudo_lock_pm_req *pm_req; 221 int cpu; 222 int ret; 223 224 for_each_cpu(cpu, &plr->d->hdr.cpu_mask) { 225 pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); 226 if (!pm_req) { 227 rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); 228 ret = -ENOMEM; 229 goto out_err; 230 } 231 ret = dev_pm_qos_add_request(get_cpu_device(cpu), 232 &pm_req->req, 233 DEV_PM_QOS_RESUME_LATENCY, 234 30); 235 if (ret < 0) { 236 rdt_last_cmd_printf("Failed to add latency req CPU%d\n", 237 cpu); 238 kfree(pm_req); 239 ret = -1; 240 goto out_err; 241 } 242 list_add(&pm_req->list, &plr->pm_reqs); 243 } 244 245 return 0; 246 247 out_err: 248 pseudo_lock_cstates_relax(plr); 249 return ret; 250 } 251 252 /** 253 * pseudo_lock_region_clear - Reset pseudo-lock region data 254 * @plr: pseudo-lock region 255 * 256 * All content of the pseudo-locked region is reset - any memory allocated 257 * freed. 258 * 259 * Return: void 260 */ 261 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) 262 { 263 plr->size = 0; 264 plr->line_size = 0; 265 kfree(plr->kmem); 266 plr->kmem = NULL; 267 plr->s = NULL; 268 if (plr->d) 269 plr->d->plr = NULL; 270 plr->d = NULL; 271 plr->cbm = 0; 272 plr->debugfs_dir = NULL; 273 } 274 275 /** 276 * pseudo_lock_region_init - Initialize pseudo-lock region information 277 * @plr: pseudo-lock region 278 * 279 * Called after user provided a schemata to be pseudo-locked. From the 280 * schemata the &struct pseudo_lock_region is on entry already initialized 281 * with the resource, domain, and capacity bitmask. Here the information 282 * required for pseudo-locking is deduced from this data and &struct 283 * pseudo_lock_region initialized further. This information includes: 284 * - size in bytes of the region to be pseudo-locked 285 * - cache line size to know the stride with which data needs to be accessed 286 * to be pseudo-locked 287 * - a cpu associated with the cache instance on which the pseudo-locking 288 * flow can be executed 289 * 290 * Return: 0 on success, <0 on failure. Descriptive error will be written 291 * to last_cmd_status buffer. 292 */ 293 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) 294 { 295 enum resctrl_scope scope = plr->s->res->ctrl_scope; 296 struct cacheinfo *ci; 297 int ret; 298 299 if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE)) 300 return -ENODEV; 301 302 /* Pick the first cpu we find that is associated with the cache. */ 303 plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask); 304 305 if (!cpu_online(plr->cpu)) { 306 rdt_last_cmd_printf("CPU %u associated with cache not online\n", 307 plr->cpu); 308 ret = -ENODEV; 309 goto out_region; 310 } 311 312 ci = get_cpu_cacheinfo_level(plr->cpu, scope); 313 if (ci) { 314 plr->line_size = ci->coherency_line_size; 315 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); 316 return 0; 317 } 318 319 ret = -1; 320 rdt_last_cmd_puts("Unable to determine cache line size\n"); 321 out_region: 322 pseudo_lock_region_clear(plr); 323 return ret; 324 } 325 326 /** 327 * pseudo_lock_init - Initialize a pseudo-lock region 328 * @rdtgrp: resource group to which new pseudo-locked region will belong 329 * 330 * A pseudo-locked region is associated with a resource group. When this 331 * association is created the pseudo-locked region is initialized. The 332 * details of the pseudo-locked region are not known at this time so only 333 * allocation is done and association established. 334 * 335 * Return: 0 on success, <0 on failure 336 */ 337 static int pseudo_lock_init(struct rdtgroup *rdtgrp) 338 { 339 struct pseudo_lock_region *plr; 340 341 plr = kzalloc(sizeof(*plr), GFP_KERNEL); 342 if (!plr) 343 return -ENOMEM; 344 345 init_waitqueue_head(&plr->lock_thread_wq); 346 INIT_LIST_HEAD(&plr->pm_reqs); 347 rdtgrp->plr = plr; 348 return 0; 349 } 350 351 /** 352 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked 353 * @plr: pseudo-lock region 354 * 355 * Initialize the details required to set up the pseudo-locked region and 356 * allocate the contiguous memory that will be pseudo-locked to the cache. 357 * 358 * Return: 0 on success, <0 on failure. Descriptive error will be written 359 * to last_cmd_status buffer. 360 */ 361 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) 362 { 363 int ret; 364 365 ret = pseudo_lock_region_init(plr); 366 if (ret < 0) 367 return ret; 368 369 /* 370 * We do not yet support contiguous regions larger than 371 * KMALLOC_MAX_SIZE. 372 */ 373 if (plr->size > KMALLOC_MAX_SIZE) { 374 rdt_last_cmd_puts("Requested region exceeds maximum size\n"); 375 ret = -E2BIG; 376 goto out_region; 377 } 378 379 plr->kmem = kzalloc(plr->size, GFP_KERNEL); 380 if (!plr->kmem) { 381 rdt_last_cmd_puts("Unable to allocate memory\n"); 382 ret = -ENOMEM; 383 goto out_region; 384 } 385 386 ret = 0; 387 goto out; 388 out_region: 389 pseudo_lock_region_clear(plr); 390 out: 391 return ret; 392 } 393 394 /** 395 * pseudo_lock_free - Free a pseudo-locked region 396 * @rdtgrp: resource group to which pseudo-locked region belonged 397 * 398 * The pseudo-locked region's resources have already been released, or not 399 * yet created at this point. Now it can be freed and disassociated from the 400 * resource group. 401 * 402 * Return: void 403 */ 404 static void pseudo_lock_free(struct rdtgroup *rdtgrp) 405 { 406 pseudo_lock_region_clear(rdtgrp->plr); 407 kfree(rdtgrp->plr); 408 rdtgrp->plr = NULL; 409 } 410 411 /** 412 * pseudo_lock_fn - Load kernel memory into cache 413 * @_rdtgrp: resource group to which pseudo-lock region belongs 414 * 415 * This is the core pseudo-locking flow. 416 * 417 * First we ensure that the kernel memory cannot be found in the cache. 418 * Then, while taking care that there will be as little interference as 419 * possible, the memory to be loaded is accessed while core is running 420 * with class of service set to the bitmask of the pseudo-locked region. 421 * After this is complete no future CAT allocations will be allowed to 422 * overlap with this bitmask. 423 * 424 * Local register variables are utilized to ensure that the memory region 425 * to be locked is the only memory access made during the critical locking 426 * loop. 427 * 428 * Return: 0. Waiter on waitqueue will be woken on completion. 429 */ 430 static int pseudo_lock_fn(void *_rdtgrp) 431 { 432 struct rdtgroup *rdtgrp = _rdtgrp; 433 struct pseudo_lock_region *plr = rdtgrp->plr; 434 u32 rmid_p, closid_p; 435 unsigned long i; 436 u64 saved_msr; 437 #ifdef CONFIG_KASAN 438 /* 439 * The registers used for local register variables are also used 440 * when KASAN is active. When KASAN is active we use a regular 441 * variable to ensure we always use a valid pointer, but the cost 442 * is that this variable will enter the cache through evicting the 443 * memory we are trying to lock into the cache. Thus expect lower 444 * pseudo-locking success rate when KASAN is active. 445 */ 446 unsigned int line_size; 447 unsigned int size; 448 void *mem_r; 449 #else 450 register unsigned int line_size asm("esi"); 451 register unsigned int size asm("edi"); 452 register void *mem_r asm(_ASM_BX); 453 #endif /* CONFIG_KASAN */ 454 455 /* 456 * Make sure none of the allocated memory is cached. If it is we 457 * will get a cache hit in below loop from outside of pseudo-locked 458 * region. 459 * wbinvd (as opposed to clflush/clflushopt) is required to 460 * increase likelihood that allocated cache portion will be filled 461 * with associated memory. 462 */ 463 wbinvd(); 464 465 /* 466 * Always called with interrupts enabled. By disabling interrupts 467 * ensure that we will not be preempted during this critical section. 468 */ 469 local_irq_disable(); 470 471 /* 472 * Call wrmsr and rdmsr as directly as possible to avoid tracing 473 * clobbering local register variables or affecting cache accesses. 474 * 475 * Disable the hardware prefetcher so that when the end of the memory 476 * being pseudo-locked is reached the hardware will not read beyond 477 * the buffer and evict pseudo-locked memory read earlier from the 478 * cache. 479 */ 480 saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); 481 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); 482 closid_p = this_cpu_read(pqr_state.cur_closid); 483 rmid_p = this_cpu_read(pqr_state.cur_rmid); 484 mem_r = plr->kmem; 485 size = plr->size; 486 line_size = plr->line_size; 487 /* 488 * Critical section begin: start by writing the closid associated 489 * with the capacity bitmask of the cache region being 490 * pseudo-locked followed by reading of kernel memory to load it 491 * into the cache. 492 */ 493 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); 494 /* 495 * Cache was flushed earlier. Now access kernel memory to read it 496 * into cache region associated with just activated plr->closid. 497 * Loop over data twice: 498 * - In first loop the cache region is shared with the page walker 499 * as it populates the paging structure caches (including TLB). 500 * - In the second loop the paging structure caches are used and 501 * cache region is populated with the memory being referenced. 502 */ 503 for (i = 0; i < size; i += PAGE_SIZE) { 504 /* 505 * Add a barrier to prevent speculative execution of this 506 * loop reading beyond the end of the buffer. 507 */ 508 rmb(); 509 asm volatile("mov (%0,%1,1), %%eax\n\t" 510 : 511 : "r" (mem_r), "r" (i) 512 : "%eax", "memory"); 513 } 514 for (i = 0; i < size; i += line_size) { 515 /* 516 * Add a barrier to prevent speculative execution of this 517 * loop reading beyond the end of the buffer. 518 */ 519 rmb(); 520 asm volatile("mov (%0,%1,1), %%eax\n\t" 521 : 522 : "r" (mem_r), "r" (i) 523 : "%eax", "memory"); 524 } 525 /* 526 * Critical section end: restore closid with capacity bitmask that 527 * does not overlap with pseudo-locked region. 528 */ 529 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); 530 531 /* Re-enable the hardware prefetcher(s) */ 532 wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); 533 local_irq_enable(); 534 535 plr->thread_done = 1; 536 wake_up_interruptible(&plr->lock_thread_wq); 537 return 0; 538 } 539 540 /** 541 * rdtgroup_monitor_in_progress - Test if monitoring in progress 542 * @rdtgrp: resource group being queried 543 * 544 * Return: 1 if monitor groups have been created for this resource 545 * group, 0 otherwise. 546 */ 547 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) 548 { 549 return !list_empty(&rdtgrp->mon.crdtgrp_list); 550 } 551 552 /** 553 * rdtgroup_locksetup_user_restrict - Restrict user access to group 554 * @rdtgrp: resource group needing access restricted 555 * 556 * A resource group used for cache pseudo-locking cannot have cpus or tasks 557 * assigned to it. This is communicated to the user by restricting access 558 * to all the files that can be used to make such changes. 559 * 560 * Permissions restored with rdtgroup_locksetup_user_restore() 561 * 562 * Return: 0 on success, <0 on failure. If a failure occurs during the 563 * restriction of access an attempt will be made to restore permissions but 564 * the state of the mode of these files will be uncertain when a failure 565 * occurs. 566 */ 567 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) 568 { 569 int ret; 570 571 ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); 572 if (ret) 573 return ret; 574 575 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); 576 if (ret) 577 goto err_tasks; 578 579 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); 580 if (ret) 581 goto err_cpus; 582 583 if (resctrl_arch_mon_capable()) { 584 ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); 585 if (ret) 586 goto err_cpus_list; 587 } 588 589 ret = 0; 590 goto out; 591 592 err_cpus_list: 593 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); 594 err_cpus: 595 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); 596 err_tasks: 597 rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); 598 out: 599 return ret; 600 } 601 602 /** 603 * rdtgroup_locksetup_user_restore - Restore user access to group 604 * @rdtgrp: resource group needing access restored 605 * 606 * Restore all file access previously removed using 607 * rdtgroup_locksetup_user_restrict() 608 * 609 * Return: 0 on success, <0 on failure. If a failure occurs during the 610 * restoration of access an attempt will be made to restrict permissions 611 * again but the state of the mode of these files will be uncertain when 612 * a failure occurs. 613 */ 614 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) 615 { 616 int ret; 617 618 ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); 619 if (ret) 620 return ret; 621 622 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); 623 if (ret) 624 goto err_tasks; 625 626 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); 627 if (ret) 628 goto err_cpus; 629 630 if (resctrl_arch_mon_capable()) { 631 ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); 632 if (ret) 633 goto err_cpus_list; 634 } 635 636 ret = 0; 637 goto out; 638 639 err_cpus_list: 640 rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); 641 err_cpus: 642 rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); 643 err_tasks: 644 rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); 645 out: 646 return ret; 647 } 648 649 /** 650 * rdtgroup_locksetup_enter - Resource group enters locksetup mode 651 * @rdtgrp: resource group requested to enter locksetup mode 652 * 653 * A resource group enters locksetup mode to reflect that it would be used 654 * to represent a pseudo-locked region and is in the process of being set 655 * up to do so. A resource group used for a pseudo-locked region would 656 * lose the closid associated with it so we cannot allow it to have any 657 * tasks or cpus assigned nor permit tasks or cpus to be assigned in the 658 * future. Monitoring of a pseudo-locked region is not allowed either. 659 * 660 * The above and more restrictions on a pseudo-locked region are checked 661 * for and enforced before the resource group enters the locksetup mode. 662 * 663 * Returns: 0 if the resource group successfully entered locksetup mode, <0 664 * on failure. On failure the last_cmd_status buffer is updated with text to 665 * communicate details of failure to the user. 666 */ 667 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) 668 { 669 int ret; 670 671 /* 672 * The default resource group can neither be removed nor lose the 673 * default closid associated with it. 674 */ 675 if (rdtgrp == &rdtgroup_default) { 676 rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); 677 return -EINVAL; 678 } 679 680 /* 681 * Cache Pseudo-locking not supported when CDP is enabled. 682 * 683 * Some things to consider if you would like to enable this 684 * support (using L3 CDP as example): 685 * - When CDP is enabled two separate resources are exposed, 686 * L3DATA and L3CODE, but they are actually on the same cache. 687 * The implication for pseudo-locking is that if a 688 * pseudo-locked region is created on a domain of one 689 * resource (eg. L3CODE), then a pseudo-locked region cannot 690 * be created on that same domain of the other resource 691 * (eg. L3DATA). This is because the creation of a 692 * pseudo-locked region involves a call to wbinvd that will 693 * affect all cache allocations on particular domain. 694 * - Considering the previous, it may be possible to only 695 * expose one of the CDP resources to pseudo-locking and 696 * hide the other. For example, we could consider to only 697 * expose L3DATA and since the L3 cache is unified it is 698 * still possible to place instructions there are execute it. 699 * - If only one region is exposed to pseudo-locking we should 700 * still keep in mind that availability of a portion of cache 701 * for pseudo-locking should take into account both resources. 702 * Similarly, if a pseudo-locked region is created in one 703 * resource, the portion of cache used by it should be made 704 * unavailable to all future allocations from both resources. 705 */ 706 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || 707 resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { 708 rdt_last_cmd_puts("CDP enabled\n"); 709 return -EINVAL; 710 } 711 712 /* 713 * Not knowing the bits to disable prefetching implies that this 714 * platform does not support Cache Pseudo-Locking. 715 */ 716 prefetch_disable_bits = get_prefetch_disable_bits(); 717 if (prefetch_disable_bits == 0) { 718 rdt_last_cmd_puts("Pseudo-locking not supported\n"); 719 return -EINVAL; 720 } 721 722 if (rdtgroup_monitor_in_progress(rdtgrp)) { 723 rdt_last_cmd_puts("Monitoring in progress\n"); 724 return -EINVAL; 725 } 726 727 if (rdtgroup_tasks_assigned(rdtgrp)) { 728 rdt_last_cmd_puts("Tasks assigned to resource group\n"); 729 return -EINVAL; 730 } 731 732 if (!cpumask_empty(&rdtgrp->cpu_mask)) { 733 rdt_last_cmd_puts("CPUs assigned to resource group\n"); 734 return -EINVAL; 735 } 736 737 if (rdtgroup_locksetup_user_restrict(rdtgrp)) { 738 rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); 739 return -EIO; 740 } 741 742 ret = pseudo_lock_init(rdtgrp); 743 if (ret) { 744 rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); 745 goto out_release; 746 } 747 748 /* 749 * If this system is capable of monitoring a rmid would have been 750 * allocated when the control group was created. This is not needed 751 * anymore when this group would be used for pseudo-locking. This 752 * is safe to call on platforms not capable of monitoring. 753 */ 754 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 755 756 ret = 0; 757 goto out; 758 759 out_release: 760 rdtgroup_locksetup_user_restore(rdtgrp); 761 out: 762 return ret; 763 } 764 765 /** 766 * rdtgroup_locksetup_exit - resource group exist locksetup mode 767 * @rdtgrp: resource group 768 * 769 * When a resource group exits locksetup mode the earlier restrictions are 770 * lifted. 771 * 772 * Return: 0 on success, <0 on failure 773 */ 774 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) 775 { 776 int ret; 777 778 if (resctrl_arch_mon_capable()) { 779 ret = alloc_rmid(rdtgrp->closid); 780 if (ret < 0) { 781 rdt_last_cmd_puts("Out of RMIDs\n"); 782 return ret; 783 } 784 rdtgrp->mon.rmid = ret; 785 } 786 787 ret = rdtgroup_locksetup_user_restore(rdtgrp); 788 if (ret) { 789 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); 790 return ret; 791 } 792 793 pseudo_lock_free(rdtgrp); 794 return 0; 795 } 796 797 /** 798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked 799 * @d: RDT domain 800 * @cbm: CBM to test 801 * 802 * @d represents a cache instance and @cbm a capacity bitmask that is 803 * considered for it. Determine if @cbm overlaps with any existing 804 * pseudo-locked region on @d. 805 * 806 * @cbm is unsigned long, even if only 32 bits are used, to make the 807 * bitmap functions work correctly. 808 * 809 * Return: true if @cbm overlaps with pseudo-locked region on @d, false 810 * otherwise. 811 */ 812 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm) 813 { 814 unsigned int cbm_len; 815 unsigned long cbm_b; 816 817 if (d->plr) { 818 cbm_len = d->plr->s->res->cache.cbm_len; 819 cbm_b = d->plr->cbm; 820 if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) 821 return true; 822 } 823 return false; 824 } 825 826 /** 827 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy 828 * @d: RDT domain under test 829 * 830 * The setup of a pseudo-locked region affects all cache instances within 831 * the hierarchy of the region. It is thus essential to know if any 832 * pseudo-locked regions exist within a cache hierarchy to prevent any 833 * attempts to create new pseudo-locked regions in the same hierarchy. 834 * 835 * Return: true if a pseudo-locked region exists in the hierarchy of @d or 836 * if it is not possible to test due to memory allocation issue, 837 * false otherwise. 838 */ 839 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d) 840 { 841 struct rdt_ctrl_domain *d_i; 842 cpumask_var_t cpu_with_psl; 843 struct rdt_resource *r; 844 bool ret = false; 845 846 /* Walking r->domains, ensure it can't race with cpuhp */ 847 lockdep_assert_cpus_held(); 848 849 if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) 850 return true; 851 852 /* 853 * First determine which cpus have pseudo-locked regions 854 * associated with them. 855 */ 856 for_each_alloc_capable_rdt_resource(r) { 857 list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) { 858 if (d_i->plr) 859 cpumask_or(cpu_with_psl, cpu_with_psl, 860 &d_i->hdr.cpu_mask); 861 } 862 } 863 864 /* 865 * Next test if new pseudo-locked region would intersect with 866 * existing region. 867 */ 868 if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl)) 869 ret = true; 870 871 free_cpumask_var(cpu_with_psl); 872 return ret; 873 } 874 875 /** 876 * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory 877 * @_plr: pseudo-lock region to measure 878 * 879 * There is no deterministic way to test if a memory region is cached. One 880 * way is to measure how long it takes to read the memory, the speed of 881 * access is a good way to learn how close to the cpu the data was. Even 882 * more, if the prefetcher is disabled and the memory is read at a stride 883 * of half the cache line, then a cache miss will be easy to spot since the 884 * read of the first half would be significantly slower than the read of 885 * the second half. 886 * 887 * Return: 0. Waiter on waitqueue will be woken on completion. 888 */ 889 static int measure_cycles_lat_fn(void *_plr) 890 { 891 struct pseudo_lock_region *plr = _plr; 892 u32 saved_low, saved_high; 893 unsigned long i; 894 u64 start, end; 895 void *mem_r; 896 897 local_irq_disable(); 898 /* 899 * Disable hardware prefetchers. 900 */ 901 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 902 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); 903 mem_r = READ_ONCE(plr->kmem); 904 /* 905 * Dummy execute of the time measurement to load the needed 906 * instructions into the L1 instruction cache. 907 */ 908 start = rdtsc_ordered(); 909 for (i = 0; i < plr->size; i += 32) { 910 start = rdtsc_ordered(); 911 asm volatile("mov (%0,%1,1), %%eax\n\t" 912 : 913 : "r" (mem_r), "r" (i) 914 : "%eax", "memory"); 915 end = rdtsc_ordered(); 916 trace_pseudo_lock_mem_latency((u32)(end - start)); 917 } 918 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 919 local_irq_enable(); 920 plr->thread_done = 1; 921 wake_up_interruptible(&plr->lock_thread_wq); 922 return 0; 923 } 924 925 /* 926 * Create a perf_event_attr for the hit and miss perf events that will 927 * be used during the performance measurement. A perf_event maintains 928 * a pointer to its perf_event_attr so a unique attribute structure is 929 * created for each perf_event. 930 * 931 * The actual configuration of the event is set right before use in order 932 * to use the X86_CONFIG macro. 933 */ 934 static struct perf_event_attr perf_miss_attr = { 935 .type = PERF_TYPE_RAW, 936 .size = sizeof(struct perf_event_attr), 937 .pinned = 1, 938 .disabled = 0, 939 .exclude_user = 1, 940 }; 941 942 static struct perf_event_attr perf_hit_attr = { 943 .type = PERF_TYPE_RAW, 944 .size = sizeof(struct perf_event_attr), 945 .pinned = 1, 946 .disabled = 0, 947 .exclude_user = 1, 948 }; 949 950 struct residency_counts { 951 u64 miss_before, hits_before; 952 u64 miss_after, hits_after; 953 }; 954 955 static int measure_residency_fn(struct perf_event_attr *miss_attr, 956 struct perf_event_attr *hit_attr, 957 struct pseudo_lock_region *plr, 958 struct residency_counts *counts) 959 { 960 u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0; 961 struct perf_event *miss_event, *hit_event; 962 int hit_pmcnum, miss_pmcnum; 963 u32 saved_low, saved_high; 964 unsigned int line_size; 965 unsigned int size; 966 unsigned long i; 967 void *mem_r; 968 u64 tmp; 969 970 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, 971 NULL, NULL, NULL); 972 if (IS_ERR(miss_event)) 973 goto out; 974 975 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, 976 NULL, NULL, NULL); 977 if (IS_ERR(hit_event)) 978 goto out_miss; 979 980 local_irq_disable(); 981 /* 982 * Check any possible error state of events used by performing 983 * one local read. 984 */ 985 if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) { 986 local_irq_enable(); 987 goto out_hit; 988 } 989 if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) { 990 local_irq_enable(); 991 goto out_hit; 992 } 993 994 /* 995 * Disable hardware prefetchers. 996 */ 997 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 998 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); 999 1000 /* Initialize rest of local variables */ 1001 /* 1002 * Performance event has been validated right before this with 1003 * interrupts disabled - it is thus safe to read the counter index. 1004 */ 1005 miss_pmcnum = x86_perf_rdpmc_index(miss_event); 1006 hit_pmcnum = x86_perf_rdpmc_index(hit_event); 1007 line_size = READ_ONCE(plr->line_size); 1008 mem_r = READ_ONCE(plr->kmem); 1009 size = READ_ONCE(plr->size); 1010 1011 /* 1012 * Read counter variables twice - first to load the instructions 1013 * used in L1 cache, second to capture accurate value that does not 1014 * include cache misses incurred because of instruction loads. 1015 */ 1016 rdpmcl(hit_pmcnum, hits_before); 1017 rdpmcl(miss_pmcnum, miss_before); 1018 /* 1019 * From SDM: Performing back-to-back fast reads are not guaranteed 1020 * to be monotonic. 1021 * Use LFENCE to ensure all previous instructions are retired 1022 * before proceeding. 1023 */ 1024 rmb(); 1025 rdpmcl(hit_pmcnum, hits_before); 1026 rdpmcl(miss_pmcnum, miss_before); 1027 /* 1028 * Use LFENCE to ensure all previous instructions are retired 1029 * before proceeding. 1030 */ 1031 rmb(); 1032 for (i = 0; i < size; i += line_size) { 1033 /* 1034 * Add a barrier to prevent speculative execution of this 1035 * loop reading beyond the end of the buffer. 1036 */ 1037 rmb(); 1038 asm volatile("mov (%0,%1,1), %%eax\n\t" 1039 : 1040 : "r" (mem_r), "r" (i) 1041 : "%eax", "memory"); 1042 } 1043 /* 1044 * Use LFENCE to ensure all previous instructions are retired 1045 * before proceeding. 1046 */ 1047 rmb(); 1048 rdpmcl(hit_pmcnum, hits_after); 1049 rdpmcl(miss_pmcnum, miss_after); 1050 /* 1051 * Use LFENCE to ensure all previous instructions are retired 1052 * before proceeding. 1053 */ 1054 rmb(); 1055 /* Re-enable hardware prefetchers */ 1056 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); 1057 local_irq_enable(); 1058 out_hit: 1059 perf_event_release_kernel(hit_event); 1060 out_miss: 1061 perf_event_release_kernel(miss_event); 1062 out: 1063 /* 1064 * All counts will be zero on failure. 1065 */ 1066 counts->miss_before = miss_before; 1067 counts->hits_before = hits_before; 1068 counts->miss_after = miss_after; 1069 counts->hits_after = hits_after; 1070 return 0; 1071 } 1072 1073 static int measure_l2_residency(void *_plr) 1074 { 1075 struct pseudo_lock_region *plr = _plr; 1076 struct residency_counts counts = {0}; 1077 1078 /* 1079 * Non-architectural event for the Goldmont Microarchitecture 1080 * from Intel x86 Architecture Software Developer Manual (SDM): 1081 * MEM_LOAD_UOPS_RETIRED D1H (event number) 1082 * Umask values: 1083 * L2_HIT 02H 1084 * L2_MISS 10H 1085 */ 1086 switch (boot_cpu_data.x86_vfm) { 1087 case INTEL_ATOM_GOLDMONT: 1088 case INTEL_ATOM_GOLDMONT_PLUS: 1089 perf_miss_attr.config = X86_CONFIG(.event = 0xd1, 1090 .umask = 0x10); 1091 perf_hit_attr.config = X86_CONFIG(.event = 0xd1, 1092 .umask = 0x2); 1093 break; 1094 default: 1095 goto out; 1096 } 1097 1098 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); 1099 /* 1100 * If a failure prevented the measurements from succeeding 1101 * tracepoints will still be written and all counts will be zero. 1102 */ 1103 trace_pseudo_lock_l2(counts.hits_after - counts.hits_before, 1104 counts.miss_after - counts.miss_before); 1105 out: 1106 plr->thread_done = 1; 1107 wake_up_interruptible(&plr->lock_thread_wq); 1108 return 0; 1109 } 1110 1111 static int measure_l3_residency(void *_plr) 1112 { 1113 struct pseudo_lock_region *plr = _plr; 1114 struct residency_counts counts = {0}; 1115 1116 /* 1117 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event 1118 * has two "no fix" errata associated with it: BDM35 and BDM100. On 1119 * this platform the following events are used instead: 1120 * LONGEST_LAT_CACHE 2EH (Documented in SDM) 1121 * REFERENCE 4FH 1122 * MISS 41H 1123 */ 1124 1125 switch (boot_cpu_data.x86_vfm) { 1126 case INTEL_BROADWELL_X: 1127 /* On BDW the hit event counts references, not hits */ 1128 perf_hit_attr.config = X86_CONFIG(.event = 0x2e, 1129 .umask = 0x4f); 1130 perf_miss_attr.config = X86_CONFIG(.event = 0x2e, 1131 .umask = 0x41); 1132 break; 1133 default: 1134 goto out; 1135 } 1136 1137 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); 1138 /* 1139 * If a failure prevented the measurements from succeeding 1140 * tracepoints will still be written and all counts will be zero. 1141 */ 1142 1143 counts.miss_after -= counts.miss_before; 1144 if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_X) { 1145 /* 1146 * On BDW references and misses are counted, need to adjust. 1147 * Sometimes the "hits" counter is a bit more than the 1148 * references, for example, x references but x + 1 hits. 1149 * To not report invalid hit values in this case we treat 1150 * that as misses equal to references. 1151 */ 1152 /* First compute the number of cache references measured */ 1153 counts.hits_after -= counts.hits_before; 1154 /* Next convert references to cache hits */ 1155 counts.hits_after -= min(counts.miss_after, counts.hits_after); 1156 } else { 1157 counts.hits_after -= counts.hits_before; 1158 } 1159 1160 trace_pseudo_lock_l3(counts.hits_after, counts.miss_after); 1161 out: 1162 plr->thread_done = 1; 1163 wake_up_interruptible(&plr->lock_thread_wq); 1164 return 0; 1165 } 1166 1167 /** 1168 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region 1169 * @rdtgrp: Resource group to which the pseudo-locked region belongs. 1170 * @sel: Selector of which measurement to perform on a pseudo-locked region. 1171 * 1172 * The measurement of latency to access a pseudo-locked region should be 1173 * done from a cpu that is associated with that pseudo-locked region. 1174 * Determine which cpu is associated with this region and start a thread on 1175 * that cpu to perform the measurement, wait for that thread to complete. 1176 * 1177 * Return: 0 on success, <0 on failure 1178 */ 1179 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) 1180 { 1181 struct pseudo_lock_region *plr = rdtgrp->plr; 1182 struct task_struct *thread; 1183 unsigned int cpu; 1184 int ret = -1; 1185 1186 cpus_read_lock(); 1187 mutex_lock(&rdtgroup_mutex); 1188 1189 if (rdtgrp->flags & RDT_DELETED) { 1190 ret = -ENODEV; 1191 goto out; 1192 } 1193 1194 if (!plr->d) { 1195 ret = -ENODEV; 1196 goto out; 1197 } 1198 1199 plr->thread_done = 0; 1200 cpu = cpumask_first(&plr->d->hdr.cpu_mask); 1201 if (!cpu_online(cpu)) { 1202 ret = -ENODEV; 1203 goto out; 1204 } 1205 1206 plr->cpu = cpu; 1207 1208 if (sel == 1) 1209 thread = kthread_run_on_cpu(measure_cycles_lat_fn, plr, 1210 cpu, "pseudo_lock_measure/%u"); 1211 else if (sel == 2) 1212 thread = kthread_run_on_cpu(measure_l2_residency, plr, 1213 cpu, "pseudo_lock_measure/%u"); 1214 else if (sel == 3) 1215 thread = kthread_run_on_cpu(measure_l3_residency, plr, 1216 cpu, "pseudo_lock_measure/%u"); 1217 else 1218 goto out; 1219 1220 if (IS_ERR(thread)) { 1221 ret = PTR_ERR(thread); 1222 goto out; 1223 } 1224 1225 ret = wait_event_interruptible(plr->lock_thread_wq, 1226 plr->thread_done == 1); 1227 if (ret < 0) 1228 goto out; 1229 1230 ret = 0; 1231 1232 out: 1233 mutex_unlock(&rdtgroup_mutex); 1234 cpus_read_unlock(); 1235 return ret; 1236 } 1237 1238 static ssize_t pseudo_lock_measure_trigger(struct file *file, 1239 const char __user *user_buf, 1240 size_t count, loff_t *ppos) 1241 { 1242 struct rdtgroup *rdtgrp = file->private_data; 1243 size_t buf_size; 1244 char buf[32]; 1245 int ret; 1246 int sel; 1247 1248 buf_size = min(count, (sizeof(buf) - 1)); 1249 if (copy_from_user(buf, user_buf, buf_size)) 1250 return -EFAULT; 1251 1252 buf[buf_size] = '\0'; 1253 ret = kstrtoint(buf, 10, &sel); 1254 if (ret == 0) { 1255 if (sel != 1 && sel != 2 && sel != 3) 1256 return -EINVAL; 1257 ret = debugfs_file_get(file->f_path.dentry); 1258 if (ret) 1259 return ret; 1260 ret = pseudo_lock_measure_cycles(rdtgrp, sel); 1261 if (ret == 0) 1262 ret = count; 1263 debugfs_file_put(file->f_path.dentry); 1264 } 1265 1266 return ret; 1267 } 1268 1269 static const struct file_operations pseudo_measure_fops = { 1270 .write = pseudo_lock_measure_trigger, 1271 .open = simple_open, 1272 .llseek = default_llseek, 1273 }; 1274 1275 /** 1276 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region 1277 * @rdtgrp: resource group to which pseudo-lock region belongs 1278 * 1279 * Called when a resource group in the pseudo-locksetup mode receives a 1280 * valid schemata that should be pseudo-locked. Since the resource group is 1281 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been 1282 * allocated and initialized with the essential information. If a failure 1283 * occurs the resource group remains in the pseudo-locksetup mode with the 1284 * &struct pseudo_lock_region associated with it, but cleared from all 1285 * information and ready for the user to re-attempt pseudo-locking by 1286 * writing the schemata again. 1287 * 1288 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 1289 * on failure. Descriptive error will be written to last_cmd_status buffer. 1290 */ 1291 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) 1292 { 1293 struct pseudo_lock_region *plr = rdtgrp->plr; 1294 struct task_struct *thread; 1295 unsigned int new_minor; 1296 struct device *dev; 1297 char *kn_name __free(kfree) = NULL; 1298 int ret; 1299 1300 ret = pseudo_lock_region_alloc(plr); 1301 if (ret < 0) 1302 return ret; 1303 1304 ret = pseudo_lock_cstates_constrain(plr); 1305 if (ret < 0) { 1306 ret = -EINVAL; 1307 goto out_region; 1308 } 1309 kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL); 1310 if (!kn_name) { 1311 ret = -ENOMEM; 1312 goto out_cstates; 1313 } 1314 1315 plr->thread_done = 0; 1316 1317 thread = kthread_run_on_cpu(pseudo_lock_fn, rdtgrp, 1318 plr->cpu, "pseudo_lock/%u"); 1319 if (IS_ERR(thread)) { 1320 ret = PTR_ERR(thread); 1321 rdt_last_cmd_printf("Locking thread returned error %d\n", ret); 1322 goto out_cstates; 1323 } 1324 1325 ret = wait_event_interruptible(plr->lock_thread_wq, 1326 plr->thread_done == 1); 1327 if (ret < 0) { 1328 /* 1329 * If the thread does not get on the CPU for whatever 1330 * reason and the process which sets up the region is 1331 * interrupted then this will leave the thread in runnable 1332 * state and once it gets on the CPU it will dereference 1333 * the cleared, but not freed, plr struct resulting in an 1334 * empty pseudo-locking loop. 1335 */ 1336 rdt_last_cmd_puts("Locking thread interrupted\n"); 1337 goto out_cstates; 1338 } 1339 1340 ret = pseudo_lock_minor_get(&new_minor); 1341 if (ret < 0) { 1342 rdt_last_cmd_puts("Unable to obtain a new minor number\n"); 1343 goto out_cstates; 1344 } 1345 1346 /* 1347 * Unlock access but do not release the reference. The 1348 * pseudo-locked region will still be here on return. 1349 * 1350 * The mutex has to be released temporarily to avoid a potential 1351 * deadlock with the mm->mmap_lock which is obtained in the 1352 * device_create() and debugfs_create_dir() callpath below as well as 1353 * before the mmap() callback is called. 1354 */ 1355 mutex_unlock(&rdtgroup_mutex); 1356 1357 if (!IS_ERR_OR_NULL(debugfs_resctrl)) { 1358 plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl); 1359 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) 1360 debugfs_create_file("pseudo_lock_measure", 0200, 1361 plr->debugfs_dir, rdtgrp, 1362 &pseudo_measure_fops); 1363 } 1364 1365 dev = device_create(&pseudo_lock_class, NULL, 1366 MKDEV(pseudo_lock_major, new_minor), 1367 rdtgrp, "%s", kn_name); 1368 1369 mutex_lock(&rdtgroup_mutex); 1370 1371 if (IS_ERR(dev)) { 1372 ret = PTR_ERR(dev); 1373 rdt_last_cmd_printf("Failed to create character device: %d\n", 1374 ret); 1375 goto out_debugfs; 1376 } 1377 1378 /* We released the mutex - check if group was removed while we did so */ 1379 if (rdtgrp->flags & RDT_DELETED) { 1380 ret = -ENODEV; 1381 goto out_device; 1382 } 1383 1384 plr->minor = new_minor; 1385 1386 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; 1387 closid_free(rdtgrp->closid); 1388 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); 1389 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); 1390 1391 ret = 0; 1392 goto out; 1393 1394 out_device: 1395 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); 1396 out_debugfs: 1397 debugfs_remove_recursive(plr->debugfs_dir); 1398 pseudo_lock_minor_release(new_minor); 1399 out_cstates: 1400 pseudo_lock_cstates_relax(plr); 1401 out_region: 1402 pseudo_lock_region_clear(plr); 1403 out: 1404 return ret; 1405 } 1406 1407 /** 1408 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region 1409 * @rdtgrp: resource group to which the pseudo-locked region belongs 1410 * 1411 * The removal of a pseudo-locked region can be initiated when the resource 1412 * group is removed from user space via a "rmdir" from userspace or the 1413 * unmount of the resctrl filesystem. On removal the resource group does 1414 * not go back to pseudo-locksetup mode before it is removed, instead it is 1415 * removed directly. There is thus asymmetry with the creation where the 1416 * &struct pseudo_lock_region is removed here while it was not created in 1417 * rdtgroup_pseudo_lock_create(). 1418 * 1419 * Return: void 1420 */ 1421 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) 1422 { 1423 struct pseudo_lock_region *plr = rdtgrp->plr; 1424 1425 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1426 /* 1427 * Default group cannot be a pseudo-locked region so we can 1428 * free closid here. 1429 */ 1430 closid_free(rdtgrp->closid); 1431 goto free; 1432 } 1433 1434 pseudo_lock_cstates_relax(plr); 1435 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); 1436 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); 1437 pseudo_lock_minor_release(plr->minor); 1438 1439 free: 1440 pseudo_lock_free(rdtgrp); 1441 } 1442 1443 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) 1444 { 1445 struct rdtgroup *rdtgrp; 1446 1447 mutex_lock(&rdtgroup_mutex); 1448 1449 rdtgrp = region_find_by_minor(iminor(inode)); 1450 if (!rdtgrp) { 1451 mutex_unlock(&rdtgroup_mutex); 1452 return -ENODEV; 1453 } 1454 1455 filp->private_data = rdtgrp; 1456 atomic_inc(&rdtgrp->waitcount); 1457 /* Perform a non-seekable open - llseek is not supported */ 1458 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1459 1460 mutex_unlock(&rdtgroup_mutex); 1461 1462 return 0; 1463 } 1464 1465 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) 1466 { 1467 struct rdtgroup *rdtgrp; 1468 1469 mutex_lock(&rdtgroup_mutex); 1470 rdtgrp = filp->private_data; 1471 WARN_ON(!rdtgrp); 1472 if (!rdtgrp) { 1473 mutex_unlock(&rdtgroup_mutex); 1474 return -ENODEV; 1475 } 1476 filp->private_data = NULL; 1477 atomic_dec(&rdtgrp->waitcount); 1478 mutex_unlock(&rdtgroup_mutex); 1479 return 0; 1480 } 1481 1482 static int pseudo_lock_dev_mremap(struct vm_area_struct *area) 1483 { 1484 /* Not supported */ 1485 return -EINVAL; 1486 } 1487 1488 static const struct vm_operations_struct pseudo_mmap_ops = { 1489 .mremap = pseudo_lock_dev_mremap, 1490 }; 1491 1492 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) 1493 { 1494 unsigned long vsize = vma->vm_end - vma->vm_start; 1495 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 1496 struct pseudo_lock_region *plr; 1497 struct rdtgroup *rdtgrp; 1498 unsigned long physical; 1499 unsigned long psize; 1500 1501 mutex_lock(&rdtgroup_mutex); 1502 1503 rdtgrp = filp->private_data; 1504 WARN_ON(!rdtgrp); 1505 if (!rdtgrp) { 1506 mutex_unlock(&rdtgroup_mutex); 1507 return -ENODEV; 1508 } 1509 1510 plr = rdtgrp->plr; 1511 1512 if (!plr->d) { 1513 mutex_unlock(&rdtgroup_mutex); 1514 return -ENODEV; 1515 } 1516 1517 /* 1518 * Task is required to run with affinity to the cpus associated 1519 * with the pseudo-locked region. If this is not the case the task 1520 * may be scheduled elsewhere and invalidate entries in the 1521 * pseudo-locked region. 1522 */ 1523 if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) { 1524 mutex_unlock(&rdtgroup_mutex); 1525 return -EINVAL; 1526 } 1527 1528 physical = __pa(plr->kmem) >> PAGE_SHIFT; 1529 psize = plr->size - off; 1530 1531 if (off > plr->size) { 1532 mutex_unlock(&rdtgroup_mutex); 1533 return -ENOSPC; 1534 } 1535 1536 /* 1537 * Ensure changes are carried directly to the memory being mapped, 1538 * do not allow copy-on-write mapping. 1539 */ 1540 if (!(vma->vm_flags & VM_SHARED)) { 1541 mutex_unlock(&rdtgroup_mutex); 1542 return -EINVAL; 1543 } 1544 1545 if (vsize > psize) { 1546 mutex_unlock(&rdtgroup_mutex); 1547 return -ENOSPC; 1548 } 1549 1550 memset(plr->kmem + off, 0, vsize); 1551 1552 if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, 1553 vsize, vma->vm_page_prot)) { 1554 mutex_unlock(&rdtgroup_mutex); 1555 return -EAGAIN; 1556 } 1557 vma->vm_ops = &pseudo_mmap_ops; 1558 mutex_unlock(&rdtgroup_mutex); 1559 return 0; 1560 } 1561 1562 static const struct file_operations pseudo_lock_dev_fops = { 1563 .owner = THIS_MODULE, 1564 .read = NULL, 1565 .write = NULL, 1566 .open = pseudo_lock_dev_open, 1567 .release = pseudo_lock_dev_release, 1568 .mmap = pseudo_lock_dev_mmap, 1569 }; 1570 1571 int rdt_pseudo_lock_init(void) 1572 { 1573 int ret; 1574 1575 ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); 1576 if (ret < 0) 1577 return ret; 1578 1579 pseudo_lock_major = ret; 1580 1581 ret = class_register(&pseudo_lock_class); 1582 if (ret) { 1583 unregister_chrdev(pseudo_lock_major, "pseudo_lock"); 1584 return ret; 1585 } 1586 1587 return 0; 1588 } 1589 1590 void rdt_pseudo_lock_release(void) 1591 { 1592 class_unregister(&pseudo_lock_class); 1593 unregister_chrdev(pseudo_lock_major, "pseudo_lock"); 1594 pseudo_lock_major = 0; 1595 } 1596