1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/prctl.h> 4 #include "sched.h" 5 6 /* 7 * A simple wrapper around refcount. An allocated sched_core_cookie's 8 * address is used to compute the cookie of the task. 9 */ 10 struct sched_core_cookie { 11 refcount_t refcnt; 12 }; 13 14 unsigned long sched_core_alloc_cookie(void) 15 { 16 struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); 17 if (!ck) 18 return 0; 19 20 refcount_set(&ck->refcnt, 1); 21 sched_core_get(); 22 23 return (unsigned long)ck; 24 } 25 26 void sched_core_put_cookie(unsigned long cookie) 27 { 28 struct sched_core_cookie *ptr = (void *)cookie; 29 30 if (ptr && refcount_dec_and_test(&ptr->refcnt)) { 31 kfree(ptr); 32 sched_core_put(); 33 } 34 } 35 36 unsigned long sched_core_get_cookie(unsigned long cookie) 37 { 38 struct sched_core_cookie *ptr = (void *)cookie; 39 40 if (ptr) 41 refcount_inc(&ptr->refcnt); 42 43 return cookie; 44 } 45 46 /* 47 * sched_core_update_cookie - replace the cookie on a task 48 * @p: the task to update 49 * @cookie: the new cookie 50 * 51 * Effectively exchange the task cookie; caller is responsible for lifetimes on 52 * both ends. 53 * 54 * Returns: the old cookie 55 */ 56 unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie) 57 { 58 unsigned long old_cookie; 59 struct rq_flags rf; 60 struct rq *rq; 61 bool enqueued; 62 63 rq = task_rq_lock(p, &rf); 64 65 /* 66 * Since creating a cookie implies sched_core_get(), and we cannot set 67 * a cookie until after we've created it, similarly, we cannot destroy 68 * a cookie until after we've removed it, we must have core scheduling 69 * enabled here. 70 */ 71 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); 72 73 enqueued = sched_core_enqueued(p); 74 if (enqueued) 75 sched_core_dequeue(rq, p); 76 77 old_cookie = p->core_cookie; 78 p->core_cookie = cookie; 79 80 if (enqueued) 81 sched_core_enqueue(rq, p); 82 83 /* 84 * If task is currently running, it may not be compatible anymore after 85 * the cookie change, so enter the scheduler on its CPU to schedule it 86 * away. 87 */ 88 if (task_running(rq, p)) 89 resched_curr(rq); 90 91 task_rq_unlock(rq, p, &rf); 92 93 return old_cookie; 94 } 95 96 static unsigned long sched_core_clone_cookie(struct task_struct *p) 97 { 98 unsigned long cookie, flags; 99 100 raw_spin_lock_irqsave(&p->pi_lock, flags); 101 cookie = sched_core_get_cookie(p->core_cookie); 102 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 103 104 return cookie; 105 } 106 107 void sched_core_fork(struct task_struct *p) 108 { 109 RB_CLEAR_NODE(&p->core_node); 110 p->core_cookie = sched_core_clone_cookie(current); 111 } 112 113 void sched_core_free(struct task_struct *p) 114 { 115 sched_core_put_cookie(p->core_cookie); 116 } 117 118 static void __sched_core_set(struct task_struct *p, unsigned long cookie) 119 { 120 cookie = sched_core_get_cookie(cookie); 121 cookie = sched_core_update_cookie(p, cookie); 122 sched_core_put_cookie(cookie); 123 } 124 125 /* Called from prctl interface: PR_SCHED_CORE */ 126 int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 127 unsigned long uaddr) 128 { 129 unsigned long cookie = 0, id = 0; 130 struct task_struct *task, *p; 131 struct pid *grp; 132 int err = 0; 133 134 if (!static_branch_likely(&sched_smt_present)) 135 return -ENODEV; 136 137 if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || 138 (cmd != PR_SCHED_CORE_GET && uaddr)) 139 return -EINVAL; 140 141 rcu_read_lock(); 142 if (pid == 0) { 143 task = current; 144 } else { 145 task = find_task_by_vpid(pid); 146 if (!task) { 147 rcu_read_unlock(); 148 return -ESRCH; 149 } 150 } 151 get_task_struct(task); 152 rcu_read_unlock(); 153 154 /* 155 * Check if this process has the right to modify the specified 156 * process. Use the regular "ptrace_may_access()" checks. 157 */ 158 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 159 err = -EPERM; 160 goto out; 161 } 162 163 switch (cmd) { 164 case PR_SCHED_CORE_GET: 165 if (type != PIDTYPE_PID || uaddr & 7) { 166 err = -EINVAL; 167 goto out; 168 } 169 cookie = sched_core_clone_cookie(task); 170 if (cookie) { 171 /* XXX improve ? */ 172 ptr_to_hashval((void *)cookie, &id); 173 } 174 err = put_user(id, (u64 __user *)uaddr); 175 goto out; 176 177 case PR_SCHED_CORE_CREATE: 178 cookie = sched_core_alloc_cookie(); 179 if (!cookie) { 180 err = -ENOMEM; 181 goto out; 182 } 183 break; 184 185 case PR_SCHED_CORE_SHARE_TO: 186 cookie = sched_core_clone_cookie(current); 187 break; 188 189 case PR_SCHED_CORE_SHARE_FROM: 190 if (type != PIDTYPE_PID) { 191 err = -EINVAL; 192 goto out; 193 } 194 cookie = sched_core_clone_cookie(task); 195 __sched_core_set(current, cookie); 196 goto out; 197 198 default: 199 err = -EINVAL; 200 goto out; 201 }; 202 203 if (type == PIDTYPE_PID) { 204 __sched_core_set(task, cookie); 205 goto out; 206 } 207 208 read_lock(&tasklist_lock); 209 grp = task_pid_type(task, type); 210 211 do_each_pid_thread(grp, type, p) { 212 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) { 213 err = -EPERM; 214 goto out_tasklist; 215 } 216 } while_each_pid_thread(grp, type, p); 217 218 do_each_pid_thread(grp, type, p) { 219 __sched_core_set(p, cookie); 220 } while_each_pid_thread(grp, type, p); 221 out_tasklist: 222 read_unlock(&tasklist_lock); 223 224 out: 225 sched_core_put_cookie(cookie); 226 put_task_struct(task); 227 return err; 228 } 229 230