1d7f687fcSJeff Roberson /*- 2*8a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3*8a36da99SPedro F. Giffuni * 4d7f687fcSJeff Roberson * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org> 5d7f687fcSJeff Roberson * All rights reserved. 6d7f687fcSJeff Roberson * 73bc8c68dSJeff Roberson * Copyright (c) 2008 Nokia Corporation 83bc8c68dSJeff Roberson * All rights reserved. 93bc8c68dSJeff Roberson * 10d7f687fcSJeff Roberson * Redistribution and use in source and binary forms, with or without 11d7f687fcSJeff Roberson * modification, are permitted provided that the following conditions 12d7f687fcSJeff Roberson * are met: 13d7f687fcSJeff Roberson * 1. Redistributions of source code must retain the above copyright 14d7f687fcSJeff Roberson * notice unmodified, this list of conditions, and the following 15d7f687fcSJeff Roberson * disclaimer. 16d7f687fcSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 17d7f687fcSJeff Roberson * notice, this list of conditions and the following disclaimer in the 18d7f687fcSJeff Roberson * documentation and/or other materials provided with the distribution. 19d7f687fcSJeff Roberson * 20d7f687fcSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21d7f687fcSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22d7f687fcSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23d7f687fcSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24d7f687fcSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25d7f687fcSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26d7f687fcSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27d7f687fcSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28d7f687fcSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29d7f687fcSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30d7f687fcSJeff Roberson * 31d7f687fcSJeff Roberson */ 32d7f687fcSJeff Roberson 33d7f687fcSJeff Roberson #include <sys/cdefs.h> 34d7f687fcSJeff Roberson __FBSDID("$FreeBSD$"); 35d7f687fcSJeff Roberson 36dea0ed66SBjoern A. Zeeb #include "opt_ddb.h" 37dea0ed66SBjoern A. Zeeb 38d7f687fcSJeff Roberson #include <sys/param.h> 39d7f687fcSJeff Roberson #include <sys/systm.h> 40d7f687fcSJeff Roberson #include <sys/sysproto.h> 410304c731SJamie Gritton #include <sys/jail.h> 42d7f687fcSJeff Roberson #include <sys/kernel.h> 43d7f687fcSJeff Roberson #include <sys/lock.h> 44d7f687fcSJeff Roberson #include <sys/malloc.h> 45d7f687fcSJeff Roberson #include <sys/mutex.h> 46d7f687fcSJeff Roberson #include <sys/priv.h> 47d7f687fcSJeff Roberson #include <sys/proc.h> 48d7f687fcSJeff Roberson #include <sys/refcount.h> 49d7f687fcSJeff Roberson #include <sys/sched.h> 50d7f687fcSJeff Roberson #include <sys/smp.h> 51d7f687fcSJeff Roberson #include <sys/syscallsubr.h> 52f299c47bSAllan Jude #include <sys/capsicum.h> 53d7f687fcSJeff Roberson #include <sys/cpuset.h> 54d7f687fcSJeff Roberson #include <sys/sx.h> 55d7f687fcSJeff Roberson #include <sys/queue.h> 56e3709597SAttilio Rao #include <sys/libkern.h> 57d7f687fcSJeff Roberson #include <sys/limits.h> 58a03ee000SJeff Roberson #include <sys/bus.h> 59a03ee000SJeff Roberson #include <sys/interrupt.h> 60d7f687fcSJeff Roberson 61d7f687fcSJeff Roberson #include <vm/uma.h> 62c0ae6688SJohn Baldwin #include <vm/vm.h> 63c0ae6688SJohn Baldwin #include <vm/vm_page.h> 64c0ae6688SJohn Baldwin #include <vm/vm_param.h> 65d7f687fcSJeff Roberson 66dea0ed66SBjoern A. Zeeb #ifdef DDB 67dea0ed66SBjoern A. Zeeb #include <ddb/ddb.h> 68dea0ed66SBjoern A. Zeeb #endif /* DDB */ 69dea0ed66SBjoern A. Zeeb 70d7f687fcSJeff Roberson /* 71d7f687fcSJeff Roberson * cpusets provide a mechanism for creating and manipulating sets of 72d7f687fcSJeff Roberson * processors for the purpose of constraining the scheduling of threads to 73d7f687fcSJeff Roberson * specific processors. 74d7f687fcSJeff Roberson * 75d7f687fcSJeff Roberson * Each process belongs to an identified set, by default this is set 1. Each 76d7f687fcSJeff Roberson * thread may further restrict the cpus it may run on to a subset of this 77d7f687fcSJeff Roberson * named set. This creates an anonymous set which other threads and processes 78d7f687fcSJeff Roberson * may not join by number. 79d7f687fcSJeff Roberson * 80d7f687fcSJeff Roberson * The named set is referred to herein as the 'base' set to avoid ambiguity. 81d7f687fcSJeff Roberson * This set is usually a child of a 'root' set while the anonymous set may 82d7f687fcSJeff Roberson * simply be referred to as a mask. In the syscall api these are referred to 83d7f687fcSJeff Roberson * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. 84d7f687fcSJeff Roberson * 85d7f687fcSJeff Roberson * Threads inherit their set from their creator whether it be anonymous or 86d7f687fcSJeff Roberson * not. This means that anonymous sets are immutable because they may be 87d7f687fcSJeff Roberson * shared. To modify an anonymous set a new set is created with the desired 88d7f687fcSJeff Roberson * mask and the same parent as the existing anonymous set. This gives the 8993902625SJohn Baldwin * illusion of each thread having a private mask. 90d7f687fcSJeff Roberson * 91d7f687fcSJeff Roberson * Via the syscall apis a user may ask to retrieve or modify the root, base, 92d7f687fcSJeff Roberson * or mask that is discovered via a pid, tid, or setid. Modifying a set 93d7f687fcSJeff Roberson * modifies all numbered and anonymous child sets to comply with the new mask. 94d7f687fcSJeff Roberson * Modifying a pid or tid's mask applies only to that tid but must still 95d7f687fcSJeff Roberson * exist within the assigned parent set. 96d7f687fcSJeff Roberson * 9786855bf5SJohn Baldwin * A thread may not be assigned to a group separate from other threads in 98d7f687fcSJeff Roberson * the process. This is to remove ambiguity when the setid is queried with 99d7f687fcSJeff Roberson * a pid argument. There is no other technical limitation. 100d7f687fcSJeff Roberson * 101d7f687fcSJeff Roberson * This somewhat complex arrangement is intended to make it easy for 102d7f687fcSJeff Roberson * applications to query available processors and bind their threads to 103d7f687fcSJeff Roberson * specific processors while also allowing administrators to dynamically 104d7f687fcSJeff Roberson * reprovision by changing sets which apply to groups of processes. 105d7f687fcSJeff Roberson * 106d7f687fcSJeff Roberson * A simple application should not concern itself with sets at all and 107d7f687fcSJeff Roberson * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id 10893902625SJohn Baldwin * meaning 'curthread'. It may query available cpus for that tid with a 109d7f687fcSJeff Roberson * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). 110d7f687fcSJeff Roberson */ 111d7f687fcSJeff Roberson static uma_zone_t cpuset_zone; 112d7f687fcSJeff Roberson static struct mtx cpuset_lock; 113d7f687fcSJeff Roberson static struct setlist cpuset_ids; 114d7f687fcSJeff Roberson static struct unrhdr *cpuset_unr; 11581198539SAlexander V. Chernikov static struct cpuset *cpuset_zero, *cpuset_default; 116a03ee000SJeff Roberson 117444528c0SDavid Xu /* Return the size of cpuset_t at the kernel level */ 11860aa2c85SJonathan Anderson SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, 119f0188618SHans Petter Selasky SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); 120444528c0SDavid Xu 121a03ee000SJeff Roberson cpuset_t *cpuset_root; 122c0ae6688SJohn Baldwin cpuset_t cpuset_domain[MAXMEMDOM]; 123d7f687fcSJeff Roberson 124d7f687fcSJeff Roberson /* 125d7f687fcSJeff Roberson * Acquire a reference to a cpuset, all pointers must be tracked with refs. 126d7f687fcSJeff Roberson */ 127d7f687fcSJeff Roberson struct cpuset * 128d7f687fcSJeff Roberson cpuset_ref(struct cpuset *set) 129d7f687fcSJeff Roberson { 130d7f687fcSJeff Roberson 131d7f687fcSJeff Roberson refcount_acquire(&set->cs_ref); 132d7f687fcSJeff Roberson return (set); 133d7f687fcSJeff Roberson } 134d7f687fcSJeff Roberson 135d7f687fcSJeff Roberson /* 1367a8f695aSBjoern A. Zeeb * Walks up the tree from 'set' to find the root. Returns the root 1377a8f695aSBjoern A. Zeeb * referenced. 1387a8f695aSBjoern A. Zeeb */ 1397a8f695aSBjoern A. Zeeb static struct cpuset * 1407a8f695aSBjoern A. Zeeb cpuset_refroot(struct cpuset *set) 1417a8f695aSBjoern A. Zeeb { 1427a8f695aSBjoern A. Zeeb 1437a8f695aSBjoern A. Zeeb for (; set->cs_parent != NULL; set = set->cs_parent) 1447a8f695aSBjoern A. Zeeb if (set->cs_flags & CPU_SET_ROOT) 1457a8f695aSBjoern A. Zeeb break; 1467a8f695aSBjoern A. Zeeb cpuset_ref(set); 1477a8f695aSBjoern A. Zeeb 1487a8f695aSBjoern A. Zeeb return (set); 1497a8f695aSBjoern A. Zeeb } 1507a8f695aSBjoern A. Zeeb 1517a8f695aSBjoern A. Zeeb /* 1527a8f695aSBjoern A. Zeeb * Find the first non-anonymous set starting from 'set'. Returns this set 1537a8f695aSBjoern A. Zeeb * referenced. May return the passed in set with an extra ref if it is 1547a8f695aSBjoern A. Zeeb * not anonymous. 1557a8f695aSBjoern A. Zeeb */ 1567a8f695aSBjoern A. Zeeb static struct cpuset * 1577a8f695aSBjoern A. Zeeb cpuset_refbase(struct cpuset *set) 1587a8f695aSBjoern A. Zeeb { 1597a8f695aSBjoern A. Zeeb 1607a8f695aSBjoern A. Zeeb if (set->cs_id == CPUSET_INVALID) 1617a8f695aSBjoern A. Zeeb set = set->cs_parent; 1627a8f695aSBjoern A. Zeeb cpuset_ref(set); 1637a8f695aSBjoern A. Zeeb 1647a8f695aSBjoern A. Zeeb return (set); 1657a8f695aSBjoern A. Zeeb } 1667a8f695aSBjoern A. Zeeb 1677a8f695aSBjoern A. Zeeb /* 16893902625SJohn Baldwin * Release a reference in a context where it is safe to allocate. 169d7f687fcSJeff Roberson */ 170d7f687fcSJeff Roberson void 171d7f687fcSJeff Roberson cpuset_rel(struct cpuset *set) 172d7f687fcSJeff Roberson { 173d7f687fcSJeff Roberson cpusetid_t id; 174d7f687fcSJeff Roberson 175d7f687fcSJeff Roberson if (refcount_release(&set->cs_ref) == 0) 176d7f687fcSJeff Roberson return; 177d7f687fcSJeff Roberson mtx_lock_spin(&cpuset_lock); 178d7f687fcSJeff Roberson LIST_REMOVE(set, cs_siblings); 179d7f687fcSJeff Roberson id = set->cs_id; 180d7f687fcSJeff Roberson if (id != CPUSET_INVALID) 181d7f687fcSJeff Roberson LIST_REMOVE(set, cs_link); 182d7f687fcSJeff Roberson mtx_unlock_spin(&cpuset_lock); 183d7f687fcSJeff Roberson cpuset_rel(set->cs_parent); 184d7f687fcSJeff Roberson uma_zfree(cpuset_zone, set); 185d7f687fcSJeff Roberson if (id != CPUSET_INVALID) 186d7f687fcSJeff Roberson free_unr(cpuset_unr, id); 187d7f687fcSJeff Roberson } 188d7f687fcSJeff Roberson 189d7f687fcSJeff Roberson /* 190d7f687fcSJeff Roberson * Deferred release must be used when in a context that is not safe to 191d7f687fcSJeff Roberson * allocate/free. This places any unreferenced sets on the list 'head'. 192d7f687fcSJeff Roberson */ 193d7f687fcSJeff Roberson static void 194d7f687fcSJeff Roberson cpuset_rel_defer(struct setlist *head, struct cpuset *set) 195d7f687fcSJeff Roberson { 196d7f687fcSJeff Roberson 197d7f687fcSJeff Roberson if (refcount_release(&set->cs_ref) == 0) 198d7f687fcSJeff Roberson return; 199d7f687fcSJeff Roberson mtx_lock_spin(&cpuset_lock); 200d7f687fcSJeff Roberson LIST_REMOVE(set, cs_siblings); 201d7f687fcSJeff Roberson if (set->cs_id != CPUSET_INVALID) 202d7f687fcSJeff Roberson LIST_REMOVE(set, cs_link); 203d7f687fcSJeff Roberson LIST_INSERT_HEAD(head, set, cs_link); 204d7f687fcSJeff Roberson mtx_unlock_spin(&cpuset_lock); 205d7f687fcSJeff Roberson } 206d7f687fcSJeff Roberson 207d7f687fcSJeff Roberson /* 208d7f687fcSJeff Roberson * Complete a deferred release. Removes the set from the list provided to 209d7f687fcSJeff Roberson * cpuset_rel_defer. 210d7f687fcSJeff Roberson */ 211d7f687fcSJeff Roberson static void 212d7f687fcSJeff Roberson cpuset_rel_complete(struct cpuset *set) 213d7f687fcSJeff Roberson { 214d7f687fcSJeff Roberson LIST_REMOVE(set, cs_link); 215d7f687fcSJeff Roberson cpuset_rel(set->cs_parent); 216d7f687fcSJeff Roberson uma_zfree(cpuset_zone, set); 217d7f687fcSJeff Roberson } 218d7f687fcSJeff Roberson 219d7f687fcSJeff Roberson /* 220d7f687fcSJeff Roberson * Find a set based on an id. Returns it with a ref. 221d7f687fcSJeff Roberson */ 222d7f687fcSJeff Roberson static struct cpuset * 223413628a7SBjoern A. Zeeb cpuset_lookup(cpusetid_t setid, struct thread *td) 224d7f687fcSJeff Roberson { 225d7f687fcSJeff Roberson struct cpuset *set; 226d7f687fcSJeff Roberson 227d7f687fcSJeff Roberson if (setid == CPUSET_INVALID) 228d7f687fcSJeff Roberson return (NULL); 229d7f687fcSJeff Roberson mtx_lock_spin(&cpuset_lock); 230d7f687fcSJeff Roberson LIST_FOREACH(set, &cpuset_ids, cs_link) 231d7f687fcSJeff Roberson if (set->cs_id == setid) 232d7f687fcSJeff Roberson break; 233d7f687fcSJeff Roberson if (set) 234d7f687fcSJeff Roberson cpuset_ref(set); 235d7f687fcSJeff Roberson mtx_unlock_spin(&cpuset_lock); 236413628a7SBjoern A. Zeeb 237413628a7SBjoern A. Zeeb KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); 238413628a7SBjoern A. Zeeb if (set != NULL && jailed(td->td_ucred)) { 2390304c731SJamie Gritton struct cpuset *jset, *tset; 240413628a7SBjoern A. Zeeb 2410304c731SJamie Gritton jset = td->td_ucred->cr_prison->pr_cpuset; 2420304c731SJamie Gritton for (tset = set; tset != NULL; tset = tset->cs_parent) 2430304c731SJamie Gritton if (tset == jset) 2440304c731SJamie Gritton break; 2450304c731SJamie Gritton if (tset == NULL) { 246413628a7SBjoern A. Zeeb cpuset_rel(set); 247413628a7SBjoern A. Zeeb set = NULL; 248413628a7SBjoern A. Zeeb } 249413628a7SBjoern A. Zeeb } 250413628a7SBjoern A. Zeeb 251d7f687fcSJeff Roberson return (set); 252d7f687fcSJeff Roberson } 253d7f687fcSJeff Roberson 254d7f687fcSJeff Roberson /* 255d7f687fcSJeff Roberson * Create a set in the space provided in 'set' with the provided parameters. 256d7f687fcSJeff Roberson * The set is returned with a single ref. May return EDEADLK if the set 257d7f687fcSJeff Roberson * will have no valid cpu based on restrictions from the parent. 258d7f687fcSJeff Roberson */ 259d7f687fcSJeff Roberson static int 260e84c2db1SJohn Baldwin _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, 261d7f687fcSJeff Roberson cpusetid_t id) 262d7f687fcSJeff Roberson { 263d7f687fcSJeff Roberson 26473c40187SJeff Roberson if (!CPU_OVERLAP(&parent->cs_mask, mask)) 26573c40187SJeff Roberson return (EDEADLK); 266d7f687fcSJeff Roberson CPU_COPY(mask, &set->cs_mask); 267d7f687fcSJeff Roberson LIST_INIT(&set->cs_children); 268d7f687fcSJeff Roberson refcount_init(&set->cs_ref, 1); 269d7f687fcSJeff Roberson set->cs_flags = 0; 270d7f687fcSJeff Roberson mtx_lock_spin(&cpuset_lock); 271e84c2db1SJohn Baldwin CPU_AND(&set->cs_mask, &parent->cs_mask); 272d7f687fcSJeff Roberson set->cs_id = id; 273d7f687fcSJeff Roberson set->cs_parent = cpuset_ref(parent); 274d7f687fcSJeff Roberson LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); 275d7f687fcSJeff Roberson if (set->cs_id != CPUSET_INVALID) 276d7f687fcSJeff Roberson LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 277d7f687fcSJeff Roberson mtx_unlock_spin(&cpuset_lock); 278d7f687fcSJeff Roberson 27973c40187SJeff Roberson return (0); 280d7f687fcSJeff Roberson } 281d7f687fcSJeff Roberson 282d7f687fcSJeff Roberson /* 283d7f687fcSJeff Roberson * Create a new non-anonymous set with the requested parent and mask. May 284d7f687fcSJeff Roberson * return failures if the mask is invalid or a new number can not be 285d7f687fcSJeff Roberson * allocated. 286d7f687fcSJeff Roberson */ 287d7f687fcSJeff Roberson static int 288e84c2db1SJohn Baldwin cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) 289d7f687fcSJeff Roberson { 290d7f687fcSJeff Roberson struct cpuset *set; 291d7f687fcSJeff Roberson cpusetid_t id; 292d7f687fcSJeff Roberson int error; 293d7f687fcSJeff Roberson 294d7f687fcSJeff Roberson id = alloc_unr(cpuset_unr); 295d7f687fcSJeff Roberson if (id == -1) 296d7f687fcSJeff Roberson return (ENFILE); 297d7f687fcSJeff Roberson *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); 298d7f687fcSJeff Roberson error = _cpuset_create(set, parent, mask, id); 299d7f687fcSJeff Roberson if (error == 0) 300d7f687fcSJeff Roberson return (0); 301d7f687fcSJeff Roberson free_unr(cpuset_unr, id); 302d7f687fcSJeff Roberson uma_zfree(cpuset_zone, set); 303d7f687fcSJeff Roberson 304d7f687fcSJeff Roberson return (error); 305d7f687fcSJeff Roberson } 306d7f687fcSJeff Roberson 307d7f687fcSJeff Roberson /* 308d7f687fcSJeff Roberson * Recursively check for errors that would occur from applying mask to 309d7f687fcSJeff Roberson * the tree of sets starting at 'set'. Checks for sets that would become 310d7f687fcSJeff Roberson * empty as well as RDONLY flags. 311d7f687fcSJeff Roberson */ 312d7f687fcSJeff Roberson static int 313c9813d0aSJohn Baldwin cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) 314d7f687fcSJeff Roberson { 315d7f687fcSJeff Roberson struct cpuset *nset; 316d7f687fcSJeff Roberson cpuset_t newmask; 317d7f687fcSJeff Roberson int error; 318d7f687fcSJeff Roberson 319d7f687fcSJeff Roberson mtx_assert(&cpuset_lock, MA_OWNED); 320d7f687fcSJeff Roberson if (set->cs_flags & CPU_SET_RDONLY) 321d7f687fcSJeff Roberson return (EPERM); 322c9813d0aSJohn Baldwin if (check_mask) { 32373c40187SJeff Roberson if (!CPU_OVERLAP(&set->cs_mask, mask)) 32473c40187SJeff Roberson return (EDEADLK); 325d7f687fcSJeff Roberson CPU_COPY(&set->cs_mask, &newmask); 326d7f687fcSJeff Roberson CPU_AND(&newmask, mask); 327c9813d0aSJohn Baldwin } else 328c9813d0aSJohn Baldwin CPU_COPY(mask, &newmask); 32973c40187SJeff Roberson error = 0; 330d7f687fcSJeff Roberson LIST_FOREACH(nset, &set->cs_children, cs_siblings) 331c9813d0aSJohn Baldwin if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) 332d7f687fcSJeff Roberson break; 333d7f687fcSJeff Roberson return (error); 334d7f687fcSJeff Roberson } 335d7f687fcSJeff Roberson 336d7f687fcSJeff Roberson /* 337d7f687fcSJeff Roberson * Applies the mask 'mask' without checking for empty sets or permissions. 338d7f687fcSJeff Roberson */ 339d7f687fcSJeff Roberson static void 340d7f687fcSJeff Roberson cpuset_update(struct cpuset *set, cpuset_t *mask) 341d7f687fcSJeff Roberson { 342d7f687fcSJeff Roberson struct cpuset *nset; 343d7f687fcSJeff Roberson 344d7f687fcSJeff Roberson mtx_assert(&cpuset_lock, MA_OWNED); 345d7f687fcSJeff Roberson CPU_AND(&set->cs_mask, mask); 346d7f687fcSJeff Roberson LIST_FOREACH(nset, &set->cs_children, cs_siblings) 347d7f687fcSJeff Roberson cpuset_update(nset, &set->cs_mask); 348d7f687fcSJeff Roberson 349d7f687fcSJeff Roberson return; 350d7f687fcSJeff Roberson } 351d7f687fcSJeff Roberson 352d7f687fcSJeff Roberson /* 353d7f687fcSJeff Roberson * Modify the set 'set' to use a copy of the mask provided. Apply this new 354d7f687fcSJeff Roberson * mask to restrict all children in the tree. Checks for validity before 355d7f687fcSJeff Roberson * applying the changes. 356d7f687fcSJeff Roberson */ 357d7f687fcSJeff Roberson static int 358d7f687fcSJeff Roberson cpuset_modify(struct cpuset *set, cpuset_t *mask) 359d7f687fcSJeff Roberson { 36073c40187SJeff Roberson struct cpuset *root; 361d7f687fcSJeff Roberson int error; 362d7f687fcSJeff Roberson 363ba931c08SBjoern A. Zeeb error = priv_check(curthread, PRIV_SCHED_CPUSET); 364d7f687fcSJeff Roberson if (error) 365d7f687fcSJeff Roberson return (error); 36673c40187SJeff Roberson /* 3676aaa0b3cSBjoern A. Zeeb * In case we are called from within the jail 3686aaa0b3cSBjoern A. Zeeb * we do not allow modifying the dedicated root 3696aaa0b3cSBjoern A. Zeeb * cpuset of the jail but may still allow to 3706aaa0b3cSBjoern A. Zeeb * change child sets. 3716aaa0b3cSBjoern A. Zeeb */ 3726aaa0b3cSBjoern A. Zeeb if (jailed(curthread->td_ucred) && 3736aaa0b3cSBjoern A. Zeeb set->cs_flags & CPU_SET_ROOT) 3746aaa0b3cSBjoern A. Zeeb return (EPERM); 3756aaa0b3cSBjoern A. Zeeb /* 37673c40187SJeff Roberson * Verify that we have access to this set of 37773c40187SJeff Roberson * cpus. 37873c40187SJeff Roberson */ 37973c40187SJeff Roberson root = set->cs_parent; 38073c40187SJeff Roberson if (root && !CPU_SUBSET(&root->cs_mask, mask)) 38173c40187SJeff Roberson return (EINVAL); 382d7f687fcSJeff Roberson mtx_lock_spin(&cpuset_lock); 383c9813d0aSJohn Baldwin error = cpuset_testupdate(set, mask, 0); 384d7f687fcSJeff Roberson if (error) 385d7f687fcSJeff Roberson goto out; 386d7f687fcSJeff Roberson CPU_COPY(mask, &set->cs_mask); 387c9813d0aSJohn Baldwin cpuset_update(set, mask); 388d7f687fcSJeff Roberson out: 389d7f687fcSJeff Roberson mtx_unlock_spin(&cpuset_lock); 390d7f687fcSJeff Roberson 391d7f687fcSJeff Roberson return (error); 392d7f687fcSJeff Roberson } 393d7f687fcSJeff Roberson 394d7f687fcSJeff Roberson /* 395d7f687fcSJeff Roberson * Resolve the 'which' parameter of several cpuset apis. 396d7f687fcSJeff Roberson * 397d7f687fcSJeff Roberson * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also 398d7f687fcSJeff Roberson * checks for permission via p_cansched(). 399d7f687fcSJeff Roberson * 400d7f687fcSJeff Roberson * For WHICH_SET returns a valid set with a new reference. 401d7f687fcSJeff Roberson * 402d7f687fcSJeff Roberson * -1 may be supplied for any argument to mean the current proc/thread or 403d7f687fcSJeff Roberson * the base set of the current thread. May fail with ESRCH/EPERM. 404d7f687fcSJeff Roberson */ 4055bbb2169SAdrian Chadd int 406d7f687fcSJeff Roberson cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, 407d7f687fcSJeff Roberson struct cpuset **setp) 408d7f687fcSJeff Roberson { 409d7f687fcSJeff Roberson struct cpuset *set; 410d7f687fcSJeff Roberson struct thread *td; 411d7f687fcSJeff Roberson struct proc *p; 412d7f687fcSJeff Roberson int error; 413d7f687fcSJeff Roberson 414d7f687fcSJeff Roberson *pp = p = NULL; 415d7f687fcSJeff Roberson *tdp = td = NULL; 416d7f687fcSJeff Roberson *setp = set = NULL; 417d7f687fcSJeff Roberson switch (which) { 418d7f687fcSJeff Roberson case CPU_WHICH_PID: 419d7f687fcSJeff Roberson if (id == -1) { 420d7f687fcSJeff Roberson PROC_LOCK(curproc); 421d7f687fcSJeff Roberson p = curproc; 422d7f687fcSJeff Roberson break; 423d7f687fcSJeff Roberson } 424d7f687fcSJeff Roberson if ((p = pfind(id)) == NULL) 425d7f687fcSJeff Roberson return (ESRCH); 426d7f687fcSJeff Roberson break; 427d7f687fcSJeff Roberson case CPU_WHICH_TID: 428d7f687fcSJeff Roberson if (id == -1) { 429d7f687fcSJeff Roberson PROC_LOCK(curproc); 430d7f687fcSJeff Roberson p = curproc; 431d7f687fcSJeff Roberson td = curthread; 432d7f687fcSJeff Roberson break; 433d7f687fcSJeff Roberson } 43442fe684cSDavid Xu td = tdfind(id, -1); 435d7f687fcSJeff Roberson if (td == NULL) 436d7f687fcSJeff Roberson return (ESRCH); 43742fe684cSDavid Xu p = td->td_proc; 438d7f687fcSJeff Roberson break; 439d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 440d7f687fcSJeff Roberson if (id == -1) { 441d7f687fcSJeff Roberson thread_lock(curthread); 442a03ee000SJeff Roberson set = cpuset_refbase(curthread->td_cpuset); 443d7f687fcSJeff Roberson thread_unlock(curthread); 444d7f687fcSJeff Roberson } else 445413628a7SBjoern A. Zeeb set = cpuset_lookup(id, curthread); 446d7f687fcSJeff Roberson if (set) { 447d7f687fcSJeff Roberson *setp = set; 448d7f687fcSJeff Roberson return (0); 449d7f687fcSJeff Roberson } 450d7f687fcSJeff Roberson return (ESRCH); 451413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 452413628a7SBjoern A. Zeeb { 453413628a7SBjoern A. Zeeb /* Find `set' for prison with given id. */ 454413628a7SBjoern A. Zeeb struct prison *pr; 455413628a7SBjoern A. Zeeb 456413628a7SBjoern A. Zeeb sx_slock(&allprison_lock); 4570304c731SJamie Gritton pr = prison_find_child(curthread->td_ucred->cr_prison, id); 458413628a7SBjoern A. Zeeb sx_sunlock(&allprison_lock); 459413628a7SBjoern A. Zeeb if (pr == NULL) 460413628a7SBjoern A. Zeeb return (ESRCH); 461413628a7SBjoern A. Zeeb cpuset_ref(pr->pr_cpuset); 4620304c731SJamie Gritton *setp = pr->pr_cpuset; 463413628a7SBjoern A. Zeeb mtx_unlock(&pr->pr_mtx); 464413628a7SBjoern A. Zeeb return (0); 465413628a7SBjoern A. Zeeb } 4669b33b154SJeff Roberson case CPU_WHICH_IRQ: 467c0ae6688SJohn Baldwin case CPU_WHICH_DOMAIN: 4689b33b154SJeff Roberson return (0); 469d7f687fcSJeff Roberson default: 470d7f687fcSJeff Roberson return (EINVAL); 471d7f687fcSJeff Roberson } 472d7f687fcSJeff Roberson error = p_cansched(curthread, p); 473d7f687fcSJeff Roberson if (error) { 474d7f687fcSJeff Roberson PROC_UNLOCK(p); 475d7f687fcSJeff Roberson return (error); 476d7f687fcSJeff Roberson } 477d7f687fcSJeff Roberson if (td == NULL) 478d7f687fcSJeff Roberson td = FIRST_THREAD_IN_PROC(p); 479d7f687fcSJeff Roberson *pp = p; 480d7f687fcSJeff Roberson *tdp = td; 481d7f687fcSJeff Roberson return (0); 482d7f687fcSJeff Roberson } 483d7f687fcSJeff Roberson 484d7f687fcSJeff Roberson /* 485d7f687fcSJeff Roberson * Create an anonymous set with the provided mask in the space provided by 486d7f687fcSJeff Roberson * 'fset'. If the passed in set is anonymous we use its parent otherwise 487d7f687fcSJeff Roberson * the new set is a child of 'set'. 488d7f687fcSJeff Roberson */ 489d7f687fcSJeff Roberson static int 490e84c2db1SJohn Baldwin cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) 491d7f687fcSJeff Roberson { 492d7f687fcSJeff Roberson struct cpuset *parent; 493d7f687fcSJeff Roberson 494d7f687fcSJeff Roberson if (set->cs_id == CPUSET_INVALID) 495d7f687fcSJeff Roberson parent = set->cs_parent; 496d7f687fcSJeff Roberson else 497d7f687fcSJeff Roberson parent = set; 49873c40187SJeff Roberson if (!CPU_SUBSET(&parent->cs_mask, mask)) 499a03ee000SJeff Roberson return (EDEADLK); 500d7f687fcSJeff Roberson return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); 501d7f687fcSJeff Roberson } 502d7f687fcSJeff Roberson 503d7f687fcSJeff Roberson /* 504d7f687fcSJeff Roberson * Handle two cases for replacing the base set or mask of an entire process. 505d7f687fcSJeff Roberson * 506d7f687fcSJeff Roberson * 1) Set is non-null and mask is null. This reparents all anonymous sets 507d7f687fcSJeff Roberson * to the provided set and replaces all non-anonymous td_cpusets with the 508d7f687fcSJeff Roberson * provided set. 509d7f687fcSJeff Roberson * 2) Mask is non-null and set is null. This replaces or creates anonymous 510d7f687fcSJeff Roberson * sets for every thread with the existing base as a parent. 511d7f687fcSJeff Roberson * 512d7f687fcSJeff Roberson * This is overly complicated because we can't allocate while holding a 513d7f687fcSJeff Roberson * spinlock and spinlocks must be held while changing and examining thread 514d7f687fcSJeff Roberson * state. 515d7f687fcSJeff Roberson */ 516d7f687fcSJeff Roberson static int 517d7f687fcSJeff Roberson cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) 518d7f687fcSJeff Roberson { 519d7f687fcSJeff Roberson struct setlist freelist; 520d7f687fcSJeff Roberson struct setlist droplist; 52173c40187SJeff Roberson struct cpuset *tdset; 522d7f687fcSJeff Roberson struct cpuset *nset; 523d7f687fcSJeff Roberson struct thread *td; 524d7f687fcSJeff Roberson struct proc *p; 525d7f687fcSJeff Roberson int threads; 526d7f687fcSJeff Roberson int nfree; 527d7f687fcSJeff Roberson int error; 528f299c47bSAllan Jude 529d7f687fcSJeff Roberson /* 530d7f687fcSJeff Roberson * The algorithm requires two passes due to locking considerations. 531d7f687fcSJeff Roberson * 532d7f687fcSJeff Roberson * 1) Lookup the process and acquire the locks in the required order. 533d7f687fcSJeff Roberson * 2) If enough cpusets have not been allocated release the locks and 534d7f687fcSJeff Roberson * allocate them. Loop. 535d7f687fcSJeff Roberson */ 536d7f687fcSJeff Roberson LIST_INIT(&freelist); 537d7f687fcSJeff Roberson LIST_INIT(&droplist); 538d7f687fcSJeff Roberson nfree = 0; 539d7f687fcSJeff Roberson for (;;) { 540d7f687fcSJeff Roberson error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); 541d7f687fcSJeff Roberson if (error) 542d7f687fcSJeff Roberson goto out; 543d7f687fcSJeff Roberson if (nfree >= p->p_numthreads) 544d7f687fcSJeff Roberson break; 545d7f687fcSJeff Roberson threads = p->p_numthreads; 546d7f687fcSJeff Roberson PROC_UNLOCK(p); 547d7f687fcSJeff Roberson for (; nfree < threads; nfree++) { 548d7f687fcSJeff Roberson nset = uma_zalloc(cpuset_zone, M_WAITOK); 549d7f687fcSJeff Roberson LIST_INSERT_HEAD(&freelist, nset, cs_link); 550d7f687fcSJeff Roberson } 551d7f687fcSJeff Roberson } 552d7f687fcSJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 553d7f687fcSJeff Roberson /* 554d7f687fcSJeff Roberson * Now that the appropriate locks are held and we have enough cpusets, 55573c40187SJeff Roberson * make sure the operation will succeed before applying changes. The 55673c40187SJeff Roberson * proc lock prevents td_cpuset from changing between calls. 557d7f687fcSJeff Roberson */ 558d7f687fcSJeff Roberson error = 0; 559d7f687fcSJeff Roberson FOREACH_THREAD_IN_PROC(p, td) { 56073c40187SJeff Roberson thread_lock(td); 56173c40187SJeff Roberson tdset = td->td_cpuset; 56273c40187SJeff Roberson /* 56373c40187SJeff Roberson * Verify that a new mask doesn't specify cpus outside of 56473c40187SJeff Roberson * the set the thread is a member of. 56573c40187SJeff Roberson */ 56673c40187SJeff Roberson if (mask) { 56773c40187SJeff Roberson if (tdset->cs_id == CPUSET_INVALID) 56873c40187SJeff Roberson tdset = tdset->cs_parent; 56973c40187SJeff Roberson if (!CPU_SUBSET(&tdset->cs_mask, mask)) 570a03ee000SJeff Roberson error = EDEADLK; 57173c40187SJeff Roberson /* 57273c40187SJeff Roberson * Verify that a new set won't leave an existing thread 57373c40187SJeff Roberson * mask without a cpu to run on. It can, however, restrict 57473c40187SJeff Roberson * the set. 57573c40187SJeff Roberson */ 57673c40187SJeff Roberson } else if (tdset->cs_id == CPUSET_INVALID) { 57773c40187SJeff Roberson if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) 578a03ee000SJeff Roberson error = EDEADLK; 57973c40187SJeff Roberson } 58073c40187SJeff Roberson thread_unlock(td); 58173c40187SJeff Roberson if (error) 58273c40187SJeff Roberson goto unlock_out; 58373c40187SJeff Roberson } 58473c40187SJeff Roberson /* 58573c40187SJeff Roberson * Replace each thread's cpuset while using deferred release. We 586374ae2a3SJeff Roberson * must do this because the thread lock must be held while operating 587374ae2a3SJeff Roberson * on the thread and this limits the type of operations allowed. 58873c40187SJeff Roberson */ 58973c40187SJeff Roberson FOREACH_THREAD_IN_PROC(p, td) { 590d7f687fcSJeff Roberson thread_lock(td); 591d7f687fcSJeff Roberson /* 592d7f687fcSJeff Roberson * If we presently have an anonymous set or are applying a 593d7f687fcSJeff Roberson * mask we must create an anonymous shadow set. That is 594d7f687fcSJeff Roberson * either parented to our existing base or the supplied set. 595d7f687fcSJeff Roberson * 596d7f687fcSJeff Roberson * If we have a base set with no anonymous shadow we simply 597d7f687fcSJeff Roberson * replace it outright. 598d7f687fcSJeff Roberson */ 599d7f687fcSJeff Roberson tdset = td->td_cpuset; 600d7f687fcSJeff Roberson if (tdset->cs_id == CPUSET_INVALID || mask) { 601d7f687fcSJeff Roberson nset = LIST_FIRST(&freelist); 602d7f687fcSJeff Roberson LIST_REMOVE(nset, cs_link); 603d7f687fcSJeff Roberson if (mask) 604d7f687fcSJeff Roberson error = cpuset_shadow(tdset, nset, mask); 605d7f687fcSJeff Roberson else 606d7f687fcSJeff Roberson error = _cpuset_create(nset, set, 607d7f687fcSJeff Roberson &tdset->cs_mask, CPUSET_INVALID); 608d7f687fcSJeff Roberson if (error) { 609d7f687fcSJeff Roberson LIST_INSERT_HEAD(&freelist, nset, cs_link); 610d7f687fcSJeff Roberson thread_unlock(td); 611d7f687fcSJeff Roberson break; 612d7f687fcSJeff Roberson } 613d7f687fcSJeff Roberson } else 614d7f687fcSJeff Roberson nset = cpuset_ref(set); 615d7f687fcSJeff Roberson cpuset_rel_defer(&droplist, tdset); 616d7f687fcSJeff Roberson td->td_cpuset = nset; 617d7f687fcSJeff Roberson sched_affinity(td); 618d7f687fcSJeff Roberson thread_unlock(td); 619d7f687fcSJeff Roberson } 62073c40187SJeff Roberson unlock_out: 621d7f687fcSJeff Roberson PROC_UNLOCK(p); 622d7f687fcSJeff Roberson out: 623d7f687fcSJeff Roberson while ((nset = LIST_FIRST(&droplist)) != NULL) 624d7f687fcSJeff Roberson cpuset_rel_complete(nset); 625d7f687fcSJeff Roberson while ((nset = LIST_FIRST(&freelist)) != NULL) { 626d7f687fcSJeff Roberson LIST_REMOVE(nset, cs_link); 627d7f687fcSJeff Roberson uma_zfree(cpuset_zone, nset); 628d7f687fcSJeff Roberson } 629d7f687fcSJeff Roberson return (error); 630d7f687fcSJeff Roberson } 631d7f687fcSJeff Roberson 632d7f687fcSJeff Roberson /* 63371a19bdcSAttilio Rao * Return a string representing a valid layout for a cpuset_t object. 63471a19bdcSAttilio Rao * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 63571a19bdcSAttilio Rao */ 63671a19bdcSAttilio Rao char * 63771a19bdcSAttilio Rao cpusetobj_strprint(char *buf, const cpuset_t *set) 63871a19bdcSAttilio Rao { 63971a19bdcSAttilio Rao char *tbuf; 64071a19bdcSAttilio Rao size_t i, bytesp, bufsiz; 64171a19bdcSAttilio Rao 64271a19bdcSAttilio Rao tbuf = buf; 64371a19bdcSAttilio Rao bytesp = 0; 64471a19bdcSAttilio Rao bufsiz = CPUSETBUFSIZ; 64571a19bdcSAttilio Rao 646d4a2ab8cSAttilio Rao for (i = 0; i < (_NCPUWORDS - 1); i++) { 64771a19bdcSAttilio Rao bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); 64871a19bdcSAttilio Rao bufsiz -= bytesp; 64971a19bdcSAttilio Rao tbuf += bytesp; 65071a19bdcSAttilio Rao } 651d4a2ab8cSAttilio Rao snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); 65271a19bdcSAttilio Rao return (buf); 65371a19bdcSAttilio Rao } 65471a19bdcSAttilio Rao 65571a19bdcSAttilio Rao /* 656e3709597SAttilio Rao * Build a valid cpuset_t object from a string representation. 657e3709597SAttilio Rao * It expects an incoming buffer at least sized as CPUSETBUFSIZ. 658e3709597SAttilio Rao */ 659e3709597SAttilio Rao int 660e3709597SAttilio Rao cpusetobj_strscan(cpuset_t *set, const char *buf) 661e3709597SAttilio Rao { 662e3709597SAttilio Rao u_int nwords; 663e3709597SAttilio Rao int i, ret; 664e3709597SAttilio Rao 665e3709597SAttilio Rao if (strlen(buf) > CPUSETBUFSIZ - 1) 666e3709597SAttilio Rao return (-1); 667e3709597SAttilio Rao 668e3709597SAttilio Rao /* Allow to pass a shorter version of the mask when necessary. */ 669e3709597SAttilio Rao nwords = 1; 670e3709597SAttilio Rao for (i = 0; buf[i] != '\0'; i++) 671e3709597SAttilio Rao if (buf[i] == ',') 672e3709597SAttilio Rao nwords++; 673e3709597SAttilio Rao if (nwords > _NCPUWORDS) 674e3709597SAttilio Rao return (-1); 675e3709597SAttilio Rao 676e3709597SAttilio Rao CPU_ZERO(set); 677d4a2ab8cSAttilio Rao for (i = 0; i < (nwords - 1); i++) { 678e3709597SAttilio Rao ret = sscanf(buf, "%lx,", &set->__bits[i]); 679e3709597SAttilio Rao if (ret == 0 || ret == -1) 680e3709597SAttilio Rao return (-1); 681d4a2ab8cSAttilio Rao buf = strstr(buf, ","); 682e3709597SAttilio Rao if (buf == NULL) 683e3709597SAttilio Rao return (-1); 684e3709597SAttilio Rao buf++; 685e3709597SAttilio Rao } 686d4a2ab8cSAttilio Rao ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); 687e3709597SAttilio Rao if (ret == 0 || ret == -1) 688e3709597SAttilio Rao return (-1); 689e3709597SAttilio Rao return (0); 690e3709597SAttilio Rao } 691e3709597SAttilio Rao 692e3709597SAttilio Rao /* 693d7f687fcSJeff Roberson * Apply an anonymous mask to a single thread. 694d7f687fcSJeff Roberson */ 695a03ee000SJeff Roberson int 696d7f687fcSJeff Roberson cpuset_setthread(lwpid_t id, cpuset_t *mask) 697d7f687fcSJeff Roberson { 698d7f687fcSJeff Roberson struct cpuset *nset; 699d7f687fcSJeff Roberson struct cpuset *set; 700d7f687fcSJeff Roberson struct thread *td; 701d7f687fcSJeff Roberson struct proc *p; 702d7f687fcSJeff Roberson int error; 703d7f687fcSJeff Roberson 704d7f687fcSJeff Roberson nset = uma_zalloc(cpuset_zone, M_WAITOK); 7058bd75bddSJeff Roberson error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); 706d7f687fcSJeff Roberson if (error) 707d7f687fcSJeff Roberson goto out; 708a03ee000SJeff Roberson set = NULL; 709d7f687fcSJeff Roberson thread_lock(td); 710a03ee000SJeff Roberson error = cpuset_shadow(td->td_cpuset, nset, mask); 711d7f687fcSJeff Roberson if (error == 0) { 712a03ee000SJeff Roberson set = td->td_cpuset; 713d7f687fcSJeff Roberson td->td_cpuset = nset; 714d7f687fcSJeff Roberson sched_affinity(td); 715d7f687fcSJeff Roberson nset = NULL; 716d7f687fcSJeff Roberson } 717d7f687fcSJeff Roberson thread_unlock(td); 718d7f687fcSJeff Roberson PROC_UNLOCK(p); 719a03ee000SJeff Roberson if (set) 720a03ee000SJeff Roberson cpuset_rel(set); 721d7f687fcSJeff Roberson out: 722d7f687fcSJeff Roberson if (nset) 723d7f687fcSJeff Roberson uma_zfree(cpuset_zone, nset); 724d7f687fcSJeff Roberson return (error); 725d7f687fcSJeff Roberson } 726d7f687fcSJeff Roberson 727d7f687fcSJeff Roberson /* 72881198539SAlexander V. Chernikov * Apply new cpumask to the ithread. 72981198539SAlexander V. Chernikov */ 73081198539SAlexander V. Chernikov int 7317f7528fcSAdrian Chadd cpuset_setithread(lwpid_t id, int cpu) 73281198539SAlexander V. Chernikov { 73381198539SAlexander V. Chernikov struct cpuset *nset, *rset; 73481198539SAlexander V. Chernikov struct cpuset *parent, *old_set; 73581198539SAlexander V. Chernikov struct thread *td; 73681198539SAlexander V. Chernikov struct proc *p; 73781198539SAlexander V. Chernikov cpusetid_t cs_id; 73881198539SAlexander V. Chernikov cpuset_t mask; 73981198539SAlexander V. Chernikov int error; 74081198539SAlexander V. Chernikov 74181198539SAlexander V. Chernikov nset = uma_zalloc(cpuset_zone, M_WAITOK); 74281198539SAlexander V. Chernikov rset = uma_zalloc(cpuset_zone, M_WAITOK); 743c1d9ecf2SAlexander V. Chernikov cs_id = CPUSET_INVALID; 74481198539SAlexander V. Chernikov 74581198539SAlexander V. Chernikov CPU_ZERO(&mask); 74681198539SAlexander V. Chernikov if (cpu == NOCPU) 74781198539SAlexander V. Chernikov CPU_COPY(cpuset_root, &mask); 74881198539SAlexander V. Chernikov else 74981198539SAlexander V. Chernikov CPU_SET(cpu, &mask); 75081198539SAlexander V. Chernikov 75181198539SAlexander V. Chernikov error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); 752c1d9ecf2SAlexander V. Chernikov if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID)) 75381198539SAlexander V. Chernikov goto out; 75481198539SAlexander V. Chernikov 755c1d9ecf2SAlexander V. Chernikov /* cpuset_which() returns with PROC_LOCK held. */ 75681198539SAlexander V. Chernikov old_set = td->td_cpuset; 75781198539SAlexander V. Chernikov 75881198539SAlexander V. Chernikov if (cpu == NOCPU) { 759c1d9ecf2SAlexander V. Chernikov 76081198539SAlexander V. Chernikov /* 76181198539SAlexander V. Chernikov * roll back to default set. We're not using cpuset_shadow() 76281198539SAlexander V. Chernikov * here because we can fail CPU_SUBSET() check. This can happen 76381198539SAlexander V. Chernikov * if default set does not contain all CPUs. 76481198539SAlexander V. Chernikov */ 76581198539SAlexander V. Chernikov error = _cpuset_create(nset, cpuset_default, &mask, 76681198539SAlexander V. Chernikov CPUSET_INVALID); 76781198539SAlexander V. Chernikov 76881198539SAlexander V. Chernikov goto applyset; 76981198539SAlexander V. Chernikov } 77081198539SAlexander V. Chernikov 77181198539SAlexander V. Chernikov if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && 77281198539SAlexander V. Chernikov old_set->cs_parent->cs_id == 1)) { 773c1d9ecf2SAlexander V. Chernikov 774c1d9ecf2SAlexander V. Chernikov /* 775c1d9ecf2SAlexander V. Chernikov * Current set is either default (1) or 776c1d9ecf2SAlexander V. Chernikov * shadowed version of default set. 777c1d9ecf2SAlexander V. Chernikov * 778c1d9ecf2SAlexander V. Chernikov * Allocate new root set to be able to shadow it 779c1d9ecf2SAlexander V. Chernikov * with any mask. 780c1d9ecf2SAlexander V. Chernikov */ 78181198539SAlexander V. Chernikov error = _cpuset_create(rset, cpuset_zero, 78281198539SAlexander V. Chernikov &cpuset_zero->cs_mask, cs_id); 78381198539SAlexander V. Chernikov if (error != 0) { 78481198539SAlexander V. Chernikov PROC_UNLOCK(p); 78581198539SAlexander V. Chernikov goto out; 78681198539SAlexander V. Chernikov } 78781198539SAlexander V. Chernikov rset->cs_flags |= CPU_SET_ROOT; 78881198539SAlexander V. Chernikov parent = rset; 78981198539SAlexander V. Chernikov rset = NULL; 79081198539SAlexander V. Chernikov cs_id = CPUSET_INVALID; 79181198539SAlexander V. Chernikov } else { 79281198539SAlexander V. Chernikov /* Assume existing set was already allocated by previous call */ 793c1d9ecf2SAlexander V. Chernikov parent = old_set; 79481198539SAlexander V. Chernikov old_set = NULL; 79581198539SAlexander V. Chernikov } 79681198539SAlexander V. Chernikov 79781198539SAlexander V. Chernikov error = cpuset_shadow(parent, nset, &mask); 79881198539SAlexander V. Chernikov applyset: 79981198539SAlexander V. Chernikov if (error == 0) { 800c1d9ecf2SAlexander V. Chernikov thread_lock(td); 80181198539SAlexander V. Chernikov td->td_cpuset = nset; 80281198539SAlexander V. Chernikov sched_affinity(td); 80381198539SAlexander V. Chernikov thread_unlock(td); 804c1d9ecf2SAlexander V. Chernikov nset = NULL; 805c1d9ecf2SAlexander V. Chernikov } else 806c1d9ecf2SAlexander V. Chernikov old_set = NULL; 80781198539SAlexander V. Chernikov PROC_UNLOCK(p); 80881198539SAlexander V. Chernikov if (old_set != NULL) 80981198539SAlexander V. Chernikov cpuset_rel(old_set); 81081198539SAlexander V. Chernikov out: 81181198539SAlexander V. Chernikov if (nset != NULL) 81281198539SAlexander V. Chernikov uma_zfree(cpuset_zone, nset); 81381198539SAlexander V. Chernikov if (rset != NULL) 81481198539SAlexander V. Chernikov uma_zfree(cpuset_zone, rset); 81581198539SAlexander V. Chernikov if (cs_id != CPUSET_INVALID) 81681198539SAlexander V. Chernikov free_unr(cpuset_unr, cs_id); 81781198539SAlexander V. Chernikov return (error); 81881198539SAlexander V. Chernikov } 81981198539SAlexander V. Chernikov 82081198539SAlexander V. Chernikov 82181198539SAlexander V. Chernikov /* 822c0ae6688SJohn Baldwin * Creates system-wide cpusets and the cpuset for thread0 including two 823c0ae6688SJohn Baldwin * sets: 824d7f687fcSJeff Roberson * 825d7f687fcSJeff Roberson * 0 - The root set which should represent all valid processors in the 826d7f687fcSJeff Roberson * system. It is initially created with a mask of all processors 827d7f687fcSJeff Roberson * because we don't know what processors are valid until cpuset_init() 828d7f687fcSJeff Roberson * runs. This set is immutable. 829d7f687fcSJeff Roberson * 1 - The default set which all processes are a member of until changed. 830d7f687fcSJeff Roberson * This allows an administrator to move all threads off of given cpus to 831d7f687fcSJeff Roberson * dedicate them to high priority tasks or save power etc. 832d7f687fcSJeff Roberson */ 833d7f687fcSJeff Roberson struct cpuset * 834d7f687fcSJeff Roberson cpuset_thread0(void) 835d7f687fcSJeff Roberson { 836d7f687fcSJeff Roberson struct cpuset *set; 83762d70a81SJohn Baldwin int error, i; 838d7f687fcSJeff Roberson 839d7f687fcSJeff Roberson cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, 840d7f687fcSJeff Roberson NULL, NULL, UMA_ALIGN_PTR, 0); 841d7f687fcSJeff Roberson mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); 84281198539SAlexander V. Chernikov 843d7f687fcSJeff Roberson /* 844d7f687fcSJeff Roberson * Create the root system set for the whole machine. Doesn't use 845d7f687fcSJeff Roberson * cpuset_create() due to NULL parent. 846d7f687fcSJeff Roberson */ 847d7f687fcSJeff Roberson set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); 848f27aed53SAttilio Rao CPU_FILL(&set->cs_mask); 849d7f687fcSJeff Roberson LIST_INIT(&set->cs_children); 850d7f687fcSJeff Roberson LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); 851d7f687fcSJeff Roberson set->cs_ref = 1; 852d7f687fcSJeff Roberson set->cs_flags = CPU_SET_ROOT; 853d7f687fcSJeff Roberson cpuset_zero = set; 854a03ee000SJeff Roberson cpuset_root = &set->cs_mask; 85581198539SAlexander V. Chernikov 856d7f687fcSJeff Roberson /* 857d7f687fcSJeff Roberson * Now derive a default, modifiable set from that to give out. 858d7f687fcSJeff Roberson */ 859d7f687fcSJeff Roberson set = uma_zalloc(cpuset_zone, M_WAITOK); 860d7f687fcSJeff Roberson error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); 861d7f687fcSJeff Roberson KASSERT(error == 0, ("Error creating default set: %d\n", error)); 86281198539SAlexander V. Chernikov cpuset_default = set; 86381198539SAlexander V. Chernikov 864d7f687fcSJeff Roberson /* 865d7f687fcSJeff Roberson * Initialize the unit allocator. 0 and 1 are allocated above. 866d7f687fcSJeff Roberson */ 867d7f687fcSJeff Roberson cpuset_unr = new_unrhdr(2, INT_MAX, NULL); 868d7f687fcSJeff Roberson 86962d70a81SJohn Baldwin /* 87062d70a81SJohn Baldwin * If MD code has not initialized per-domain cpusets, place all 87162d70a81SJohn Baldwin * CPUs in domain 0. 87262d70a81SJohn Baldwin */ 87362d70a81SJohn Baldwin for (i = 0; i < MAXMEMDOM; i++) 87462d70a81SJohn Baldwin if (!CPU_EMPTY(&cpuset_domain[i])) 87562d70a81SJohn Baldwin goto domains_set; 876c0ae6688SJohn Baldwin CPU_COPY(&all_cpus, &cpuset_domain[0]); 87762d70a81SJohn Baldwin domains_set: 878c0ae6688SJohn Baldwin 879d7f687fcSJeff Roberson return (set); 880d7f687fcSJeff Roberson } 881d7f687fcSJeff Roberson 882d7f687fcSJeff Roberson /* 883413628a7SBjoern A. Zeeb * Create a cpuset, which would be cpuset_create() but 884413628a7SBjoern A. Zeeb * mark the new 'set' as root. 885413628a7SBjoern A. Zeeb * 88647479a8cSBjoern A. Zeeb * We are not going to reparent the td to it. Use cpuset_setproc_update_set() 88747479a8cSBjoern A. Zeeb * for that. 888413628a7SBjoern A. Zeeb * 889413628a7SBjoern A. Zeeb * In case of no error, returns the set in *setp locked with a reference. 890413628a7SBjoern A. Zeeb */ 891413628a7SBjoern A. Zeeb int 8920304c731SJamie Gritton cpuset_create_root(struct prison *pr, struct cpuset **setp) 893413628a7SBjoern A. Zeeb { 894413628a7SBjoern A. Zeeb struct cpuset *set; 895413628a7SBjoern A. Zeeb int error; 896413628a7SBjoern A. Zeeb 8970304c731SJamie Gritton KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); 898413628a7SBjoern A. Zeeb KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); 899413628a7SBjoern A. Zeeb 9000304c731SJamie Gritton error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); 901413628a7SBjoern A. Zeeb if (error) 902413628a7SBjoern A. Zeeb return (error); 903413628a7SBjoern A. Zeeb 904413628a7SBjoern A. Zeeb KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", 905413628a7SBjoern A. Zeeb __func__, __LINE__)); 906413628a7SBjoern A. Zeeb 907413628a7SBjoern A. Zeeb /* Mark the set as root. */ 908413628a7SBjoern A. Zeeb set = *setp; 909413628a7SBjoern A. Zeeb set->cs_flags |= CPU_SET_ROOT; 910413628a7SBjoern A. Zeeb 911413628a7SBjoern A. Zeeb return (0); 912413628a7SBjoern A. Zeeb } 913413628a7SBjoern A. Zeeb 914413628a7SBjoern A. Zeeb int 915413628a7SBjoern A. Zeeb cpuset_setproc_update_set(struct proc *p, struct cpuset *set) 916413628a7SBjoern A. Zeeb { 917413628a7SBjoern A. Zeeb int error; 918413628a7SBjoern A. Zeeb 919413628a7SBjoern A. Zeeb KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); 920413628a7SBjoern A. Zeeb KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); 921413628a7SBjoern A. Zeeb 922413628a7SBjoern A. Zeeb cpuset_ref(set); 923413628a7SBjoern A. Zeeb error = cpuset_setproc(p->p_pid, set, NULL); 924413628a7SBjoern A. Zeeb if (error) 925413628a7SBjoern A. Zeeb return (error); 926413628a7SBjoern A. Zeeb cpuset_rel(set); 927413628a7SBjoern A. Zeeb return (0); 928413628a7SBjoern A. Zeeb } 929413628a7SBjoern A. Zeeb 930413628a7SBjoern A. Zeeb /* 931d7f687fcSJeff Roberson * This is called once the final set of system cpus is known. Modifies 93293902625SJohn Baldwin * the root set and all children and mark the root read-only. 933d7f687fcSJeff Roberson */ 934d7f687fcSJeff Roberson static void 935d7f687fcSJeff Roberson cpuset_init(void *arg) 936d7f687fcSJeff Roberson { 937d7f687fcSJeff Roberson cpuset_t mask; 938d7f687fcSJeff Roberson 93971a19bdcSAttilio Rao mask = all_cpus; 940d7f687fcSJeff Roberson if (cpuset_modify(cpuset_zero, &mask)) 941d7f687fcSJeff Roberson panic("Can't set initial cpuset mask.\n"); 942d7f687fcSJeff Roberson cpuset_zero->cs_flags |= CPU_SET_RDONLY; 943d7f687fcSJeff Roberson } 944d7f687fcSJeff Roberson SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); 945d7f687fcSJeff Roberson 946d7f687fcSJeff Roberson #ifndef _SYS_SYSPROTO_H_ 947d7f687fcSJeff Roberson struct cpuset_args { 948d7f687fcSJeff Roberson cpusetid_t *setid; 949d7f687fcSJeff Roberson }; 950d7f687fcSJeff Roberson #endif 951d7f687fcSJeff Roberson int 9528451d0ddSKip Macy sys_cpuset(struct thread *td, struct cpuset_args *uap) 953d7f687fcSJeff Roberson { 954d7f687fcSJeff Roberson struct cpuset *root; 955d7f687fcSJeff Roberson struct cpuset *set; 956d7f687fcSJeff Roberson int error; 957d7f687fcSJeff Roberson 958d7f687fcSJeff Roberson thread_lock(td); 959a03ee000SJeff Roberson root = cpuset_refroot(td->td_cpuset); 960d7f687fcSJeff Roberson thread_unlock(td); 961d7f687fcSJeff Roberson error = cpuset_create(&set, root, &root->cs_mask); 962d7f687fcSJeff Roberson cpuset_rel(root); 963d7f687fcSJeff Roberson if (error) 964d7f687fcSJeff Roberson return (error); 965d7f687fcSJeff Roberson error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); 966a03ee000SJeff Roberson if (error == 0) 967a03ee000SJeff Roberson error = cpuset_setproc(-1, set, NULL); 968d7f687fcSJeff Roberson cpuset_rel(set); 969d7f687fcSJeff Roberson return (error); 970d7f687fcSJeff Roberson } 971d7f687fcSJeff Roberson 972d7f687fcSJeff Roberson #ifndef _SYS_SYSPROTO_H_ 973d7f687fcSJeff Roberson struct cpuset_setid_args { 974d7f687fcSJeff Roberson cpuwhich_t which; 975d7f687fcSJeff Roberson id_t id; 976d7f687fcSJeff Roberson cpusetid_t setid; 977d7f687fcSJeff Roberson }; 978d7f687fcSJeff Roberson #endif 979d7f687fcSJeff Roberson int 9808451d0ddSKip Macy sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) 981d7f687fcSJeff Roberson { 982ea2ebdc1SEdward Tomasz Napierala 983ea2ebdc1SEdward Tomasz Napierala return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); 984ea2ebdc1SEdward Tomasz Napierala } 985ea2ebdc1SEdward Tomasz Napierala 986ea2ebdc1SEdward Tomasz Napierala int 987ea2ebdc1SEdward Tomasz Napierala kern_cpuset_setid(struct thread *td, cpuwhich_t which, 988ea2ebdc1SEdward Tomasz Napierala id_t id, cpusetid_t setid) 989ea2ebdc1SEdward Tomasz Napierala { 990d7f687fcSJeff Roberson struct cpuset *set; 991d7f687fcSJeff Roberson int error; 992d7f687fcSJeff Roberson 993d7f687fcSJeff Roberson /* 994d7f687fcSJeff Roberson * Presently we only support per-process sets. 995d7f687fcSJeff Roberson */ 996ea2ebdc1SEdward Tomasz Napierala if (which != CPU_WHICH_PID) 997d7f687fcSJeff Roberson return (EINVAL); 998ea2ebdc1SEdward Tomasz Napierala set = cpuset_lookup(setid, td); 999d7f687fcSJeff Roberson if (set == NULL) 1000d7f687fcSJeff Roberson return (ESRCH); 1001ea2ebdc1SEdward Tomasz Napierala error = cpuset_setproc(id, set, NULL); 1002d7f687fcSJeff Roberson cpuset_rel(set); 1003d7f687fcSJeff Roberson return (error); 1004d7f687fcSJeff Roberson } 1005d7f687fcSJeff Roberson 1006d7f687fcSJeff Roberson #ifndef _SYS_SYSPROTO_H_ 1007d7f687fcSJeff Roberson struct cpuset_getid_args { 1008d7f687fcSJeff Roberson cpulevel_t level; 1009d7f687fcSJeff Roberson cpuwhich_t which; 1010d7f687fcSJeff Roberson id_t id; 1011d7f687fcSJeff Roberson cpusetid_t *setid; 10122b69bb1fSKevin Lo }; 1013d7f687fcSJeff Roberson #endif 1014d7f687fcSJeff Roberson int 10158451d0ddSKip Macy sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) 1016d7f687fcSJeff Roberson { 1017ea2ebdc1SEdward Tomasz Napierala 1018ea2ebdc1SEdward Tomasz Napierala return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, 1019ea2ebdc1SEdward Tomasz Napierala uap->setid)); 1020ea2ebdc1SEdward Tomasz Napierala } 1021ea2ebdc1SEdward Tomasz Napierala 1022ea2ebdc1SEdward Tomasz Napierala int 1023ea2ebdc1SEdward Tomasz Napierala kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, 1024ea2ebdc1SEdward Tomasz Napierala id_t id, cpusetid_t *setid) 1025ea2ebdc1SEdward Tomasz Napierala { 1026d7f687fcSJeff Roberson struct cpuset *nset; 1027d7f687fcSJeff Roberson struct cpuset *set; 1028d7f687fcSJeff Roberson struct thread *ttd; 1029d7f687fcSJeff Roberson struct proc *p; 1030ea2ebdc1SEdward Tomasz Napierala cpusetid_t tmpid; 1031d7f687fcSJeff Roberson int error; 1032d7f687fcSJeff Roberson 1033ea2ebdc1SEdward Tomasz Napierala if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) 1034d7f687fcSJeff Roberson return (EINVAL); 1035ea2ebdc1SEdward Tomasz Napierala error = cpuset_which(which, id, &p, &ttd, &set); 1036d7f687fcSJeff Roberson if (error) 1037d7f687fcSJeff Roberson return (error); 1038ea2ebdc1SEdward Tomasz Napierala switch (which) { 1039d7f687fcSJeff Roberson case CPU_WHICH_TID: 1040d7f687fcSJeff Roberson case CPU_WHICH_PID: 1041d7f687fcSJeff Roberson thread_lock(ttd); 1042a03ee000SJeff Roberson set = cpuset_refbase(ttd->td_cpuset); 1043d7f687fcSJeff Roberson thread_unlock(ttd); 1044d7f687fcSJeff Roberson PROC_UNLOCK(p); 1045d7f687fcSJeff Roberson break; 1046d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 1047413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 1048d7f687fcSJeff Roberson break; 10499b33b154SJeff Roberson case CPU_WHICH_IRQ: 1050c0ae6688SJohn Baldwin case CPU_WHICH_DOMAIN: 10519b33b154SJeff Roberson return (EINVAL); 1052d7f687fcSJeff Roberson } 1053ea2ebdc1SEdward Tomasz Napierala switch (level) { 1054d7f687fcSJeff Roberson case CPU_LEVEL_ROOT: 1055a03ee000SJeff Roberson nset = cpuset_refroot(set); 1056d7f687fcSJeff Roberson cpuset_rel(set); 1057d7f687fcSJeff Roberson set = nset; 1058d7f687fcSJeff Roberson break; 1059d7f687fcSJeff Roberson case CPU_LEVEL_CPUSET: 1060d7f687fcSJeff Roberson break; 1061d7f687fcSJeff Roberson case CPU_LEVEL_WHICH: 1062d7f687fcSJeff Roberson break; 1063d7f687fcSJeff Roberson } 1064ea2ebdc1SEdward Tomasz Napierala tmpid = set->cs_id; 1065d7f687fcSJeff Roberson cpuset_rel(set); 1066d7f687fcSJeff Roberson if (error == 0) 1067a1d0659cSJung-uk Kim error = copyout(&tmpid, setid, sizeof(tmpid)); 1068d7f687fcSJeff Roberson 1069d7f687fcSJeff Roberson return (error); 1070d7f687fcSJeff Roberson } 1071d7f687fcSJeff Roberson 1072d7f687fcSJeff Roberson #ifndef _SYS_SYSPROTO_H_ 1073d7f687fcSJeff Roberson struct cpuset_getaffinity_args { 1074d7f687fcSJeff Roberson cpulevel_t level; 1075d7f687fcSJeff Roberson cpuwhich_t which; 10767f64829aSRuslan Ermilov id_t id; 10777f64829aSRuslan Ermilov size_t cpusetsize; 10787f64829aSRuslan Ermilov cpuset_t *mask; 1079d7f687fcSJeff Roberson }; 1080d7f687fcSJeff Roberson #endif 1081d7f687fcSJeff Roberson int 10828451d0ddSKip Macy sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) 1083d7f687fcSJeff Roberson { 108496ee4310SEdward Tomasz Napierala 108596ee4310SEdward Tomasz Napierala return (kern_cpuset_getaffinity(td, uap->level, uap->which, 108696ee4310SEdward Tomasz Napierala uap->id, uap->cpusetsize, uap->mask)); 108796ee4310SEdward Tomasz Napierala } 108896ee4310SEdward Tomasz Napierala 108996ee4310SEdward Tomasz Napierala int 109096ee4310SEdward Tomasz Napierala kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 109196ee4310SEdward Tomasz Napierala id_t id, size_t cpusetsize, cpuset_t *maskp) 109296ee4310SEdward Tomasz Napierala { 1093d7f687fcSJeff Roberson struct thread *ttd; 1094d7f687fcSJeff Roberson struct cpuset *nset; 1095d7f687fcSJeff Roberson struct cpuset *set; 1096d7f687fcSJeff Roberson struct proc *p; 1097d7f687fcSJeff Roberson cpuset_t *mask; 1098d7f687fcSJeff Roberson int error; 10997f64829aSRuslan Ermilov size_t size; 1100d7f687fcSJeff Roberson 110196ee4310SEdward Tomasz Napierala if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1102d7f687fcSJeff Roberson return (ERANGE); 1103f299c47bSAllan Jude /* In Capability mode, you can only get your own CPU set. */ 1104f299c47bSAllan Jude if (IN_CAPABILITY_MODE(td)) { 1105f299c47bSAllan Jude if (level != CPU_LEVEL_WHICH) 1106f299c47bSAllan Jude return (ECAPMODE); 1107f299c47bSAllan Jude if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1108f299c47bSAllan Jude return (ECAPMODE); 1109f299c47bSAllan Jude if (id != -1) 1110f299c47bSAllan Jude return (ECAPMODE); 1111f299c47bSAllan Jude } 111296ee4310SEdward Tomasz Napierala size = cpusetsize; 1113d7f687fcSJeff Roberson mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 111496ee4310SEdward Tomasz Napierala error = cpuset_which(which, id, &p, &ttd, &set); 1115d7f687fcSJeff Roberson if (error) 1116d7f687fcSJeff Roberson goto out; 111796ee4310SEdward Tomasz Napierala switch (level) { 1118d7f687fcSJeff Roberson case CPU_LEVEL_ROOT: 1119d7f687fcSJeff Roberson case CPU_LEVEL_CPUSET: 112096ee4310SEdward Tomasz Napierala switch (which) { 1121d7f687fcSJeff Roberson case CPU_WHICH_TID: 1122d7f687fcSJeff Roberson case CPU_WHICH_PID: 1123d7f687fcSJeff Roberson thread_lock(ttd); 1124d7f687fcSJeff Roberson set = cpuset_ref(ttd->td_cpuset); 1125d7f687fcSJeff Roberson thread_unlock(ttd); 1126d7f687fcSJeff Roberson break; 1127d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 1128413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 1129d7f687fcSJeff Roberson break; 11309b33b154SJeff Roberson case CPU_WHICH_IRQ: 113129dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 113229dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 1133c0ae6688SJohn Baldwin case CPU_WHICH_DOMAIN: 11349b33b154SJeff Roberson error = EINVAL; 11359b33b154SJeff Roberson goto out; 1136d7f687fcSJeff Roberson } 113796ee4310SEdward Tomasz Napierala if (level == CPU_LEVEL_ROOT) 1138a03ee000SJeff Roberson nset = cpuset_refroot(set); 1139d7f687fcSJeff Roberson else 1140a03ee000SJeff Roberson nset = cpuset_refbase(set); 1141d7f687fcSJeff Roberson CPU_COPY(&nset->cs_mask, mask); 1142d7f687fcSJeff Roberson cpuset_rel(nset); 1143d7f687fcSJeff Roberson break; 1144d7f687fcSJeff Roberson case CPU_LEVEL_WHICH: 114596ee4310SEdward Tomasz Napierala switch (which) { 1146d7f687fcSJeff Roberson case CPU_WHICH_TID: 1147d7f687fcSJeff Roberson thread_lock(ttd); 1148d7f687fcSJeff Roberson CPU_COPY(&ttd->td_cpuset->cs_mask, mask); 1149d7f687fcSJeff Roberson thread_unlock(ttd); 1150d7f687fcSJeff Roberson break; 1151d7f687fcSJeff Roberson case CPU_WHICH_PID: 1152d7f687fcSJeff Roberson FOREACH_THREAD_IN_PROC(p, ttd) { 1153d7f687fcSJeff Roberson thread_lock(ttd); 1154d7f687fcSJeff Roberson CPU_OR(mask, &ttd->td_cpuset->cs_mask); 1155d7f687fcSJeff Roberson thread_unlock(ttd); 1156d7f687fcSJeff Roberson } 1157d7f687fcSJeff Roberson break; 1158d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 1159413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 1160d7f687fcSJeff Roberson CPU_COPY(&set->cs_mask, mask); 1161d7f687fcSJeff Roberson break; 11629b33b154SJeff Roberson case CPU_WHICH_IRQ: 116329dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 116429dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 116529dfb631SConrad Meyer error = intr_getaffinity(id, which, mask); 11669b33b154SJeff Roberson break; 1167c0ae6688SJohn Baldwin case CPU_WHICH_DOMAIN: 116896ee4310SEdward Tomasz Napierala if (id < 0 || id >= MAXMEMDOM) 1169c0ae6688SJohn Baldwin error = ESRCH; 1170c0ae6688SJohn Baldwin else 117196ee4310SEdward Tomasz Napierala CPU_COPY(&cpuset_domain[id], mask); 1172c0ae6688SJohn Baldwin break; 1173d7f687fcSJeff Roberson } 1174d7f687fcSJeff Roberson break; 1175d7f687fcSJeff Roberson default: 1176d7f687fcSJeff Roberson error = EINVAL; 1177d7f687fcSJeff Roberson break; 1178d7f687fcSJeff Roberson } 1179d7f687fcSJeff Roberson if (set) 1180d7f687fcSJeff Roberson cpuset_rel(set); 1181d7f687fcSJeff Roberson if (p) 1182d7f687fcSJeff Roberson PROC_UNLOCK(p); 1183d7f687fcSJeff Roberson if (error == 0) 118496ee4310SEdward Tomasz Napierala error = copyout(mask, maskp, size); 1185d7f687fcSJeff Roberson out: 1186d7f687fcSJeff Roberson free(mask, M_TEMP); 1187d7f687fcSJeff Roberson return (error); 1188d7f687fcSJeff Roberson } 1189d7f687fcSJeff Roberson 1190d7f687fcSJeff Roberson #ifndef _SYS_SYSPROTO_H_ 1191d7f687fcSJeff Roberson struct cpuset_setaffinity_args { 1192d7f687fcSJeff Roberson cpulevel_t level; 1193d7f687fcSJeff Roberson cpuwhich_t which; 11947f64829aSRuslan Ermilov id_t id; 11957f64829aSRuslan Ermilov size_t cpusetsize; 11967f64829aSRuslan Ermilov const cpuset_t *mask; 1197d7f687fcSJeff Roberson }; 1198d7f687fcSJeff Roberson #endif 1199d7f687fcSJeff Roberson int 12008451d0ddSKip Macy sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) 1201d7f687fcSJeff Roberson { 120296ee4310SEdward Tomasz Napierala 120396ee4310SEdward Tomasz Napierala return (kern_cpuset_setaffinity(td, uap->level, uap->which, 120496ee4310SEdward Tomasz Napierala uap->id, uap->cpusetsize, uap->mask)); 120596ee4310SEdward Tomasz Napierala } 120696ee4310SEdward Tomasz Napierala 120796ee4310SEdward Tomasz Napierala int 120896ee4310SEdward Tomasz Napierala kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, 120996ee4310SEdward Tomasz Napierala id_t id, size_t cpusetsize, const cpuset_t *maskp) 121096ee4310SEdward Tomasz Napierala { 1211d7f687fcSJeff Roberson struct cpuset *nset; 1212d7f687fcSJeff Roberson struct cpuset *set; 1213d7f687fcSJeff Roberson struct thread *ttd; 1214d7f687fcSJeff Roberson struct proc *p; 1215d7f687fcSJeff Roberson cpuset_t *mask; 1216d7f687fcSJeff Roberson int error; 1217d7f687fcSJeff Roberson 121896ee4310SEdward Tomasz Napierala if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) 1219d7f687fcSJeff Roberson return (ERANGE); 1220f299c47bSAllan Jude /* In Capability mode, you can only set your own CPU set. */ 1221f299c47bSAllan Jude if (IN_CAPABILITY_MODE(td)) { 1222f299c47bSAllan Jude if (level != CPU_LEVEL_WHICH) 1223f299c47bSAllan Jude return (ECAPMODE); 1224f299c47bSAllan Jude if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) 1225f299c47bSAllan Jude return (ECAPMODE); 1226f299c47bSAllan Jude if (id != -1) 1227f299c47bSAllan Jude return (ECAPMODE); 1228f299c47bSAllan Jude } 122996ee4310SEdward Tomasz Napierala mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO); 123096ee4310SEdward Tomasz Napierala error = copyin(maskp, mask, cpusetsize); 1231d7f687fcSJeff Roberson if (error) 1232d7f687fcSJeff Roberson goto out; 123373c40187SJeff Roberson /* 123473c40187SJeff Roberson * Verify that no high bits are set. 123573c40187SJeff Roberson */ 123696ee4310SEdward Tomasz Napierala if (cpusetsize > sizeof(cpuset_t)) { 123773c40187SJeff Roberson char *end; 123873c40187SJeff Roberson char *cp; 123973c40187SJeff Roberson 124073c40187SJeff Roberson end = cp = (char *)&mask->__bits; 124196ee4310SEdward Tomasz Napierala end += cpusetsize; 124273c40187SJeff Roberson cp += sizeof(cpuset_t); 124373c40187SJeff Roberson while (cp != end) 124473c40187SJeff Roberson if (*cp++ != 0) { 124573c40187SJeff Roberson error = EINVAL; 124673c40187SJeff Roberson goto out; 124773c40187SJeff Roberson } 124873c40187SJeff Roberson 124973c40187SJeff Roberson } 125096ee4310SEdward Tomasz Napierala switch (level) { 1251d7f687fcSJeff Roberson case CPU_LEVEL_ROOT: 1252d7f687fcSJeff Roberson case CPU_LEVEL_CPUSET: 125396ee4310SEdward Tomasz Napierala error = cpuset_which(which, id, &p, &ttd, &set); 1254d7f687fcSJeff Roberson if (error) 1255d7f687fcSJeff Roberson break; 125696ee4310SEdward Tomasz Napierala switch (which) { 1257d7f687fcSJeff Roberson case CPU_WHICH_TID: 1258d7f687fcSJeff Roberson case CPU_WHICH_PID: 1259d7f687fcSJeff Roberson thread_lock(ttd); 1260d7f687fcSJeff Roberson set = cpuset_ref(ttd->td_cpuset); 1261d7f687fcSJeff Roberson thread_unlock(ttd); 1262c6440f72SJeff Roberson PROC_UNLOCK(p); 1263d7f687fcSJeff Roberson break; 1264d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 1265413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 1266d7f687fcSJeff Roberson break; 12679b33b154SJeff Roberson case CPU_WHICH_IRQ: 126829dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 126929dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 1270c0ae6688SJohn Baldwin case CPU_WHICH_DOMAIN: 12719b33b154SJeff Roberson error = EINVAL; 12729b33b154SJeff Roberson goto out; 1273d7f687fcSJeff Roberson } 127496ee4310SEdward Tomasz Napierala if (level == CPU_LEVEL_ROOT) 1275a03ee000SJeff Roberson nset = cpuset_refroot(set); 1276d7f687fcSJeff Roberson else 1277a03ee000SJeff Roberson nset = cpuset_refbase(set); 1278d7f687fcSJeff Roberson error = cpuset_modify(nset, mask); 1279d7f687fcSJeff Roberson cpuset_rel(nset); 1280d7f687fcSJeff Roberson cpuset_rel(set); 1281d7f687fcSJeff Roberson break; 1282d7f687fcSJeff Roberson case CPU_LEVEL_WHICH: 128396ee4310SEdward Tomasz Napierala switch (which) { 1284d7f687fcSJeff Roberson case CPU_WHICH_TID: 128596ee4310SEdward Tomasz Napierala error = cpuset_setthread(id, mask); 1286d7f687fcSJeff Roberson break; 1287d7f687fcSJeff Roberson case CPU_WHICH_PID: 128896ee4310SEdward Tomasz Napierala error = cpuset_setproc(id, NULL, mask); 1289d7f687fcSJeff Roberson break; 1290d7f687fcSJeff Roberson case CPU_WHICH_CPUSET: 1291413628a7SBjoern A. Zeeb case CPU_WHICH_JAIL: 129296ee4310SEdward Tomasz Napierala error = cpuset_which(which, id, &p, &ttd, &set); 1293d7f687fcSJeff Roberson if (error == 0) { 1294d7f687fcSJeff Roberson error = cpuset_modify(set, mask); 1295d7f687fcSJeff Roberson cpuset_rel(set); 1296d7f687fcSJeff Roberson } 1297d7f687fcSJeff Roberson break; 12989b33b154SJeff Roberson case CPU_WHICH_IRQ: 129929dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 130029dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 130129dfb631SConrad Meyer error = intr_setaffinity(id, which, mask); 13029b33b154SJeff Roberson break; 1303d7f687fcSJeff Roberson default: 1304d7f687fcSJeff Roberson error = EINVAL; 1305d7f687fcSJeff Roberson break; 1306d7f687fcSJeff Roberson } 1307d7f687fcSJeff Roberson break; 1308d7f687fcSJeff Roberson default: 1309d7f687fcSJeff Roberson error = EINVAL; 1310d7f687fcSJeff Roberson break; 1311d7f687fcSJeff Roberson } 1312d7f687fcSJeff Roberson out: 1313d7f687fcSJeff Roberson free(mask, M_TEMP); 1314d7f687fcSJeff Roberson return (error); 1315d7f687fcSJeff Roberson } 1316dea0ed66SBjoern A. Zeeb 1317dea0ed66SBjoern A. Zeeb #ifdef DDB 1318cd32bd7aSJohn Baldwin void 1319cd32bd7aSJohn Baldwin ddb_display_cpuset(const cpuset_t *set) 1320dea0ed66SBjoern A. Zeeb { 1321dea0ed66SBjoern A. Zeeb int cpu, once; 1322dea0ed66SBjoern A. Zeeb 1323dea0ed66SBjoern A. Zeeb for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { 1324cd32bd7aSJohn Baldwin if (CPU_ISSET(cpu, set)) { 1325dea0ed66SBjoern A. Zeeb if (once == 0) { 1326dea0ed66SBjoern A. Zeeb db_printf("%d", cpu); 1327dea0ed66SBjoern A. Zeeb once = 1; 1328dea0ed66SBjoern A. Zeeb } else 1329dea0ed66SBjoern A. Zeeb db_printf(",%d", cpu); 1330dea0ed66SBjoern A. Zeeb } 1331dea0ed66SBjoern A. Zeeb } 1332cd32bd7aSJohn Baldwin if (once == 0) 1333cd32bd7aSJohn Baldwin db_printf("<none>"); 1334cd32bd7aSJohn Baldwin } 1335cd32bd7aSJohn Baldwin 1336cd32bd7aSJohn Baldwin DB_SHOW_COMMAND(cpusets, db_show_cpusets) 1337cd32bd7aSJohn Baldwin { 1338cd32bd7aSJohn Baldwin struct cpuset *set; 1339cd32bd7aSJohn Baldwin 1340cd32bd7aSJohn Baldwin LIST_FOREACH(set, &cpuset_ids, cs_link) { 1341cd32bd7aSJohn Baldwin db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", 1342cd32bd7aSJohn Baldwin set, set->cs_id, set->cs_ref, set->cs_flags, 1343cd32bd7aSJohn Baldwin (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); 1344cd32bd7aSJohn Baldwin db_printf(" mask="); 1345cd32bd7aSJohn Baldwin ddb_display_cpuset(&set->cs_mask); 1346dea0ed66SBjoern A. Zeeb db_printf("\n"); 1347dea0ed66SBjoern A. Zeeb if (db_pager_quit) 1348dea0ed66SBjoern A. Zeeb break; 1349dea0ed66SBjoern A. Zeeb } 1350dea0ed66SBjoern A. Zeeb } 1351dea0ed66SBjoern A. Zeeb #endif /* DDB */ 1352