Lines Matching +full:non +full:- +full:descriptive
1 // SPDX-License-Identifier: GPL-2.0
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
29 * pseudo-locked regions.
43 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn)); in pseudo_lock_devnode()
52 * pseudo_lock_minor_get - Obtain available minor number
67 return -ENOSPC; in pseudo_lock_minor_get()
76 * pseudo_lock_minor_release - Return minor number to available
85 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
86 * @minor: The minor number of the device representing pseudo-locked region
89 * pseudo-locked region it belongs to. This is done by matching the minor
90 * number of the device to the pseudo-locked region it belongs.
92 * Minor numbers are assigned at the time a pseudo-locked region is associated
95 * Return: On success return pointer to resource group owning the pseudo-locked
103 if (rdtgrp->plr && rdtgrp->plr->minor == minor) { in region_find_by_minor()
112 * struct pseudo_lock_pm_req - A power management QoS request list entry
113 * @list: Entry within the @pm_reqs list for a pseudo-locked region
125 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
126 dev_pm_qos_remove_request(&pm_req->req); in pseudo_lock_cstates_relax()
127 list_del(&pm_req->list); in pseudo_lock_cstates_relax()
133 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
134 * @plr: Pseudo-locked region
156 for_each_cpu(cpu, &plr->d->hdr.cpu_mask) { in pseudo_lock_cstates_constrain()
160 ret = -ENOMEM; in pseudo_lock_cstates_constrain()
164 &pm_req->req, in pseudo_lock_cstates_constrain()
171 ret = -1; in pseudo_lock_cstates_constrain()
174 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
185 * pseudo_lock_region_clear - Reset pseudo-lock region data
186 * @plr: pseudo-lock region
188 * All content of the pseudo-locked region is reset - any memory allocated
195 plr->size = 0; in pseudo_lock_region_clear()
196 plr->line_size = 0; in pseudo_lock_region_clear()
197 kfree(plr->kmem); in pseudo_lock_region_clear()
198 plr->kmem = NULL; in pseudo_lock_region_clear()
199 plr->s = NULL; in pseudo_lock_region_clear()
200 if (plr->d) in pseudo_lock_region_clear()
201 plr->d->plr = NULL; in pseudo_lock_region_clear()
202 plr->d = NULL; in pseudo_lock_region_clear()
203 plr->cbm = 0; in pseudo_lock_region_clear()
204 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
208 * pseudo_lock_region_init - Initialize pseudo-lock region information
209 * @plr: pseudo-lock region
211 * Called after user provided a schemata to be pseudo-locked. From the
214 * required for pseudo-locking is deduced from this data and &struct
216 * - size in bytes of the region to be pseudo-locked
217 * - cache line size to know the stride with which data needs to be accessed
218 * to be pseudo-locked
219 * - a cpu associated with the cache instance on which the pseudo-locking
222 * Return: 0 on success, <0 on failure. Descriptive error will be written
227 enum resctrl_scope scope = plr->s->res->ctrl_scope; in pseudo_lock_region_init()
232 return -ENODEV; in pseudo_lock_region_init()
235 plr->cpu = cpumask_first(&plr->d->hdr.cpu_mask); in pseudo_lock_region_init()
237 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
239 plr->cpu); in pseudo_lock_region_init()
240 ret = -ENODEV; in pseudo_lock_region_init()
244 ci = get_cpu_cacheinfo_level(plr->cpu, scope); in pseudo_lock_region_init()
246 plr->line_size = ci->coherency_line_size; in pseudo_lock_region_init()
247 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); in pseudo_lock_region_init()
251 ret = -1; in pseudo_lock_region_init()
259 * pseudo_lock_init - Initialize a pseudo-lock region
260 * @rdtgrp: resource group to which new pseudo-locked region will belong
262 * A pseudo-locked region is associated with a resource group. When this
263 * association is created the pseudo-locked region is initialized. The
264 * details of the pseudo-locked region are not known at this time so only
275 return -ENOMEM; in pseudo_lock_init()
277 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
278 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
279 rdtgrp->plr = plr; in pseudo_lock_init()
284 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
285 * @plr: pseudo-lock region
287 * Initialize the details required to set up the pseudo-locked region and
288 * allocate the contiguous memory that will be pseudo-locked to the cache.
290 * Return: 0 on success, <0 on failure. Descriptive error will be written
305 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
307 ret = -E2BIG; in pseudo_lock_region_alloc()
311 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
312 if (!plr->kmem) { in pseudo_lock_region_alloc()
314 ret = -ENOMEM; in pseudo_lock_region_alloc()
327 * pseudo_lock_free - Free a pseudo-locked region
328 * @rdtgrp: resource group to which pseudo-locked region belonged
330 * The pseudo-locked region's resources have already been released, or not
338 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
339 kfree(rdtgrp->plr); in pseudo_lock_free()
340 rdtgrp->plr = NULL; in pseudo_lock_free()
344 * rdtgroup_monitor_in_progress - Test if monitoring in progress
352 return !list_empty(&rdtgrp->mon.crdtgrp_list); in rdtgroup_monitor_in_progress()
356 * rdtgroup_locksetup_user_restrict - Restrict user access to group
359 * A resource group used for cache pseudo-locking cannot have cpus or tasks
406 * rdtgroup_locksetup_user_restore - Restore user access to group
453 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
457 * to represent a pseudo-locked region and is in the process of being set
458 * up to do so. A resource group used for a pseudo-locked region would
461 * future. Monitoring of a pseudo-locked region is not allowed either.
463 * The above and more restrictions on a pseudo-locked region are checked
479 rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); in rdtgroup_locksetup_enter()
480 return -EINVAL; in rdtgroup_locksetup_enter()
484 * Cache Pseudo-locking not supported when CDP is enabled. in rdtgroup_locksetup_enter()
488 * - When CDP is enabled two separate resources are exposed, in rdtgroup_locksetup_enter()
490 * The implication for pseudo-locking is that if a in rdtgroup_locksetup_enter()
491 * pseudo-locked region is created on a domain of one in rdtgroup_locksetup_enter()
492 * resource (eg. L3CODE), then a pseudo-locked region cannot in rdtgroup_locksetup_enter()
495 * pseudo-locked region involves a call to wbinvd that will in rdtgroup_locksetup_enter()
497 * - Considering the previous, it may be possible to only in rdtgroup_locksetup_enter()
498 * expose one of the CDP resources to pseudo-locking and in rdtgroup_locksetup_enter()
502 * - If only one region is exposed to pseudo-locking we should in rdtgroup_locksetup_enter()
504 * for pseudo-locking should take into account both resources. in rdtgroup_locksetup_enter()
505 * Similarly, if a pseudo-locked region is created in one in rdtgroup_locksetup_enter()
512 return -EINVAL; in rdtgroup_locksetup_enter()
517 * platform does not support Cache Pseudo-Locking. in rdtgroup_locksetup_enter()
520 rdt_last_cmd_puts("Pseudo-locking not supported\n"); in rdtgroup_locksetup_enter()
521 return -EINVAL; in rdtgroup_locksetup_enter()
526 return -EINVAL; in rdtgroup_locksetup_enter()
531 return -EINVAL; in rdtgroup_locksetup_enter()
534 if (!cpumask_empty(&rdtgrp->cpu_mask)) { in rdtgroup_locksetup_enter()
536 return -EINVAL; in rdtgroup_locksetup_enter()
541 return -EIO; in rdtgroup_locksetup_enter()
546 rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); in rdtgroup_locksetup_enter()
553 * anymore when this group would be used for pseudo-locking. This in rdtgroup_locksetup_enter()
556 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_locksetup_enter()
568 * rdtgroup_locksetup_exit - resource group exist locksetup mode
581 ret = alloc_rmid(rdtgrp->closid); in rdtgroup_locksetup_exit()
586 rdtgrp->mon.rmid = ret; in rdtgroup_locksetup_exit()
591 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); in rdtgroup_locksetup_exit()
600 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
606 * pseudo-locked region on @d.
611 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
619 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
620 cbm_len = d->plr->s->res->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
621 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
629 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
632 * The setup of a pseudo-locked region affects all cache instances within
634 * pseudo-locked regions exist within a cache hierarchy to prevent any
635 * attempts to create new pseudo-locked regions in the same hierarchy.
637 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
648 /* Walking r->domains, ensure it can't race with cpuhp */ in rdtgroup_pseudo_locked_in_hierarchy()
655 * First determine which cpus have pseudo-locked regions in rdtgroup_pseudo_locked_in_hierarchy()
659 list_for_each_entry(d_i, &r->ctrl_domains, hdr.list) { in rdtgroup_pseudo_locked_in_hierarchy()
660 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
662 &d_i->hdr.cpu_mask); in rdtgroup_pseudo_locked_in_hierarchy()
667 * Next test if new pseudo-locked region would intersect with in rdtgroup_pseudo_locked_in_hierarchy()
670 if (cpumask_intersects(&d->hdr.cpu_mask, cpu_with_psl)) in rdtgroup_pseudo_locked_in_hierarchy()
678 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
679 * @rdtgrp: Resource group to which the pseudo-locked region belongs.
680 * @sel: Selector of which measurement to perform on a pseudo-locked region.
682 * The measurement of latency to access a pseudo-locked region should be
683 * done from a cpu that is associated with that pseudo-locked region.
691 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles()
694 int ret = -1; in pseudo_lock_measure_cycles()
699 if (rdtgrp->flags & RDT_DELETED) { in pseudo_lock_measure_cycles()
700 ret = -ENODEV; in pseudo_lock_measure_cycles()
704 if (!plr->d) { in pseudo_lock_measure_cycles()
705 ret = -ENODEV; in pseudo_lock_measure_cycles()
709 plr->thread_done = 0; in pseudo_lock_measure_cycles()
710 cpu = cpumask_first(&plr->d->hdr.cpu_mask); in pseudo_lock_measure_cycles()
712 ret = -ENODEV; in pseudo_lock_measure_cycles()
716 plr->cpu = cpu; in pseudo_lock_measure_cycles()
735 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
736 plr->thread_done == 1); in pseudo_lock_measure_cycles()
752 struct rdtgroup *rdtgrp = file->private_data; in pseudo_lock_measure_trigger()
758 buf_size = min(count, (sizeof(buf) - 1)); in pseudo_lock_measure_trigger()
760 return -EFAULT; in pseudo_lock_measure_trigger()
766 return -EINVAL; in pseudo_lock_measure_trigger()
782 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
783 * @rdtgrp: resource group to which pseudo-lock region belongs
785 * Called when a resource group in the pseudo-locksetup mode receives a
786 * valid schemata that should be pseudo-locked. Since the resource group is
787 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
789 * occurs the resource group remains in the pseudo-locksetup mode with the
791 * information and ready for the user to re-attempt pseudo-locking by
794 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
795 * on failure. Descriptive error will be written to last_cmd_status buffer.
799 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create()
812 ret = -EINVAL; in rdtgroup_pseudo_lock_create()
815 kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL); in rdtgroup_pseudo_lock_create()
817 ret = -ENOMEM; in rdtgroup_pseudo_lock_create()
821 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
824 plr->cpu, "pseudo_lock/%u"); in rdtgroup_pseudo_lock_create()
831 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
832 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
840 * empty pseudo-locking loop. in rdtgroup_pseudo_lock_create()
854 * pseudo-locked region will still be here on return. in rdtgroup_pseudo_lock_create()
857 * deadlock with the mm->mmap_lock which is obtained in the in rdtgroup_pseudo_lock_create()
864 plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl); in rdtgroup_pseudo_lock_create()
865 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
867 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
884 /* We released the mutex - check if group was removed while we did so */ in rdtgroup_pseudo_lock_create()
885 if (rdtgrp->flags & RDT_DELETED) { in rdtgroup_pseudo_lock_create()
886 ret = -ENODEV; in rdtgroup_pseudo_lock_create()
890 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
892 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; in rdtgroup_pseudo_lock_create()
893 closid_free(rdtgrp->closid); in rdtgroup_pseudo_lock_create()
903 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
914 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
915 * @rdtgrp: resource group to which the pseudo-locked region belongs
917 * The removal of a pseudo-locked region can be initiated when the resource
920 * not go back to pseudo-locksetup mode before it is removed, instead it is
929 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove()
931 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { in rdtgroup_pseudo_lock_remove()
933 * Default group cannot be a pseudo-locked region so we can in rdtgroup_pseudo_lock_remove()
936 closid_free(rdtgrp->closid); in rdtgroup_pseudo_lock_remove()
941 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
942 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
943 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
958 return -ENODEV; in pseudo_lock_dev_open()
961 filp->private_data = rdtgrp; in pseudo_lock_dev_open()
962 atomic_inc(&rdtgrp->waitcount); in pseudo_lock_dev_open()
963 /* Perform a non-seekable open - llseek is not supported */ in pseudo_lock_dev_open()
964 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); in pseudo_lock_dev_open()
976 rdtgrp = filp->private_data; in pseudo_lock_dev_release()
980 return -ENODEV; in pseudo_lock_dev_release()
982 filp->private_data = NULL; in pseudo_lock_dev_release()
983 atomic_dec(&rdtgrp->waitcount); in pseudo_lock_dev_release()
991 return -EINVAL; in pseudo_lock_dev_mremap()
1000 unsigned long off = desc->pgoff << PAGE_SHIFT; in pseudo_lock_dev_mmap_prepare()
1002 struct file *filp = desc->file; in pseudo_lock_dev_mmap_prepare()
1010 rdtgrp = filp->private_data; in pseudo_lock_dev_mmap_prepare()
1014 return -ENODEV; in pseudo_lock_dev_mmap_prepare()
1017 plr = rdtgrp->plr; in pseudo_lock_dev_mmap_prepare()
1019 if (!plr->d) { in pseudo_lock_dev_mmap_prepare()
1021 return -ENODEV; in pseudo_lock_dev_mmap_prepare()
1026 * with the pseudo-locked region. If this is not the case the task in pseudo_lock_dev_mmap_prepare()
1028 * pseudo-locked region. in pseudo_lock_dev_mmap_prepare()
1030 if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) { in pseudo_lock_dev_mmap_prepare()
1032 return -EINVAL; in pseudo_lock_dev_mmap_prepare()
1035 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap_prepare()
1036 psize = plr->size - off; in pseudo_lock_dev_mmap_prepare()
1038 if (off > plr->size) { in pseudo_lock_dev_mmap_prepare()
1040 return -ENOSPC; in pseudo_lock_dev_mmap_prepare()
1045 * do not allow copy-on-write mapping. in pseudo_lock_dev_mmap_prepare()
1047 if (!(desc->vm_flags & VM_SHARED)) { in pseudo_lock_dev_mmap_prepare()
1049 return -EINVAL; in pseudo_lock_dev_mmap_prepare()
1054 return -ENOSPC; in pseudo_lock_dev_mmap_prepare()
1057 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap_prepare()
1059 desc->vm_ops = &pseudo_mmap_ops; in pseudo_lock_dev_mmap_prepare()
1060 mmap_action_remap_full(desc, physical + desc->pgoff); in pseudo_lock_dev_mmap_prepare()