1 /* 2 * fs/ioprio.c 3 * 4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk> 5 * 6 * Helper functions for setting/querying io priorities of processes. The 7 * system calls closely mimmick getpriority/setpriority, see the man page for 8 * those. The prio argument is a composite of prio class and prio data, where 9 * the data argument has meaning within that class. The standard scheduling 10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7 11 * being the lowest. 12 * 13 * IOW, setting BE scheduling class with prio 2 is done ala: 14 * 15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; 16 * 17 * ioprio_set(PRIO_PROCESS, pid, prio); 18 * 19 * See also Documentation/block/ioprio.txt 20 * 21 */ 22 #include <linux/gfp.h> 23 #include <linux/kernel.h> 24 #include <linux/export.h> 25 #include <linux/ioprio.h> 26 #include <linux/cred.h> 27 #include <linux/blkdev.h> 28 #include <linux/capability.h> 29 #include <linux/sched/user.h> 30 #include <linux/sched/task.h> 31 #include <linux/syscalls.h> 32 #include <linux/security.h> 33 #include <linux/pid_namespace.h> 34 35 int set_task_ioprio(struct task_struct *task, int ioprio) 36 { 37 int err; 38 struct io_context *ioc; 39 const struct cred *cred = current_cred(), *tcred; 40 41 rcu_read_lock(); 42 tcred = __task_cred(task); 43 if (!uid_eq(tcred->uid, cred->euid) && 44 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { 45 rcu_read_unlock(); 46 return -EPERM; 47 } 48 rcu_read_unlock(); 49 50 err = security_task_setioprio(task, ioprio); 51 if (err) 52 return err; 53 54 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 55 if (ioc) { 56 ioc->ioprio = ioprio; 57 put_io_context(ioc); 58 } 59 60 return err; 61 } 62 EXPORT_SYMBOL_GPL(set_task_ioprio); 63 64 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) 65 { 66 int class = IOPRIO_PRIO_CLASS(ioprio); 67 int data = IOPRIO_PRIO_DATA(ioprio); 68 struct task_struct *p, *g; 69 struct user_struct *user; 70 struct pid *pgrp; 71 kuid_t uid; 72 int ret; 73 74 switch (class) { 75 case IOPRIO_CLASS_RT: 76 if (!capable(CAP_SYS_ADMIN)) 77 return -EPERM; 78 /* fall through, rt has prio field too */ 79 case IOPRIO_CLASS_BE: 80 if (data >= IOPRIO_BE_NR || data < 0) 81 return -EINVAL; 82 83 break; 84 case IOPRIO_CLASS_IDLE: 85 break; 86 case IOPRIO_CLASS_NONE: 87 if (data) 88 return -EINVAL; 89 break; 90 default: 91 return -EINVAL; 92 } 93 94 ret = -ESRCH; 95 rcu_read_lock(); 96 switch (which) { 97 case IOPRIO_WHO_PROCESS: 98 if (!who) 99 p = current; 100 else 101 p = find_task_by_vpid(who); 102 if (p) 103 ret = set_task_ioprio(p, ioprio); 104 break; 105 case IOPRIO_WHO_PGRP: 106 if (!who) 107 pgrp = task_pgrp(current); 108 else 109 pgrp = find_vpid(who); 110 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 111 ret = set_task_ioprio(p, ioprio); 112 if (ret) 113 break; 114 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 115 break; 116 case IOPRIO_WHO_USER: 117 uid = make_kuid(current_user_ns(), who); 118 if (!uid_valid(uid)) 119 break; 120 if (!who) 121 user = current_user(); 122 else 123 user = find_user(uid); 124 125 if (!user) 126 break; 127 128 for_each_process_thread(g, p) { 129 if (!uid_eq(task_uid(p), uid) || 130 !task_pid_vnr(p)) 131 continue; 132 ret = set_task_ioprio(p, ioprio); 133 if (ret) 134 goto free_uid; 135 } 136 free_uid: 137 if (who) 138 free_uid(user); 139 break; 140 default: 141 ret = -EINVAL; 142 } 143 144 rcu_read_unlock(); 145 return ret; 146 } 147 148 static int get_task_ioprio(struct task_struct *p) 149 { 150 int ret; 151 152 ret = security_task_getioprio(p); 153 if (ret) 154 goto out; 155 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); 156 task_lock(p); 157 if (p->io_context) 158 ret = p->io_context->ioprio; 159 task_unlock(p); 160 out: 161 return ret; 162 } 163 164 int ioprio_best(unsigned short aprio, unsigned short bprio) 165 { 166 unsigned short aclass; 167 unsigned short bclass; 168 169 if (!ioprio_valid(aprio)) 170 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); 171 if (!ioprio_valid(bprio)) 172 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); 173 174 aclass = IOPRIO_PRIO_CLASS(aprio); 175 bclass = IOPRIO_PRIO_CLASS(bprio); 176 if (aclass == bclass) 177 return min(aprio, bprio); 178 if (aclass > bclass) 179 return bprio; 180 else 181 return aprio; 182 } 183 184 SYSCALL_DEFINE2(ioprio_get, int, which, int, who) 185 { 186 struct task_struct *g, *p; 187 struct user_struct *user; 188 struct pid *pgrp; 189 kuid_t uid; 190 int ret = -ESRCH; 191 int tmpio; 192 193 rcu_read_lock(); 194 switch (which) { 195 case IOPRIO_WHO_PROCESS: 196 if (!who) 197 p = current; 198 else 199 p = find_task_by_vpid(who); 200 if (p) 201 ret = get_task_ioprio(p); 202 break; 203 case IOPRIO_WHO_PGRP: 204 if (!who) 205 pgrp = task_pgrp(current); 206 else 207 pgrp = find_vpid(who); 208 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 209 tmpio = get_task_ioprio(p); 210 if (tmpio < 0) 211 continue; 212 if (ret == -ESRCH) 213 ret = tmpio; 214 else 215 ret = ioprio_best(ret, tmpio); 216 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 217 break; 218 case IOPRIO_WHO_USER: 219 uid = make_kuid(current_user_ns(), who); 220 if (!who) 221 user = current_user(); 222 else 223 user = find_user(uid); 224 225 if (!user) 226 break; 227 228 for_each_process_thread(g, p) { 229 if (!uid_eq(task_uid(p), user->uid) || 230 !task_pid_vnr(p)) 231 continue; 232 tmpio = get_task_ioprio(p); 233 if (tmpio < 0) 234 continue; 235 if (ret == -ESRCH) 236 ret = tmpio; 237 else 238 ret = ioprio_best(ret, tmpio); 239 } 240 241 if (who) 242 free_uid(user); 243 break; 244 default: 245 ret = -EINVAL; 246 } 247 248 rcu_read_unlock(); 249 return ret; 250 } 251