xref: /linux/arch/mips/kernel/mips-mt-fpaff.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * General MIPS MT support routines, usable in AP/SP and SMVP.
3  * Copyright (C) 2005 Mips Technologies, Inc
4  */
5 #include <linux/cpu.h>
6 #include <linux/cpuset.h>
7 #include <linux/cpumask.h>
8 #include <linux/delay.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task.h>
13 #include <linux/cred.h>
14 #include <linux/security.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 
18 /*
19  * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
20  */
21 cpumask_t mt_fpu_cpumask;
22 
23 static int fpaff_threshold = -1;
24 unsigned long mt_fpemul_threshold;
25 
26 /*
27  * Replacement functions for the sys_sched_setaffinity() and
28  * sys_sched_getaffinity() system calls, so that we can integrate
29  * FPU affinity with the user's requested processor affinity.
30  * This code is 98% identical with the sys_sched_setaffinity()
31  * and sys_sched_getaffinity() system calls, and should be
32  * updated when kernel/sched/core.c changes.
33  */
34 
35 /*
36  * find_process_by_pid - find a process with a matching PID value.
37  * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
38  * cloned here.
39  */
40 static inline struct task_struct *find_process_by_pid(pid_t pid)
41 {
42 	return pid ? find_task_by_vpid(pid) : current;
43 }
44 
45 /*
46  * check the target process has a UID that matches the current process's
47  */
48 static bool check_same_owner(struct task_struct *p)
49 {
50 	const struct cred *cred = current_cred(), *pcred;
51 	bool match;
52 
53 	rcu_read_lock();
54 	pcred = __task_cred(p);
55 	match = (uid_eq(cred->euid, pcred->euid) ||
56 		 uid_eq(cred->euid, pcred->uid));
57 	rcu_read_unlock();
58 	return match;
59 }
60 
61 /*
62  * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
63  */
64 asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
65 				      unsigned long __user *user_mask_ptr)
66 {
67 	cpumask_var_t cpus_allowed, new_mask, effective_mask;
68 	struct thread_info *ti;
69 	struct task_struct *p;
70 	int retval;
71 
72 	if (len < sizeof(new_mask))
73 		return -EINVAL;
74 
75 	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
76 		return -EFAULT;
77 
78 	get_online_cpus();
79 	rcu_read_lock();
80 
81 	p = find_process_by_pid(pid);
82 	if (!p) {
83 		rcu_read_unlock();
84 		put_online_cpus();
85 		return -ESRCH;
86 	}
87 
88 	/* Prevent p going away */
89 	get_task_struct(p);
90 	rcu_read_unlock();
91 
92 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
93 		retval = -ENOMEM;
94 		goto out_put_task;
95 	}
96 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
97 		retval = -ENOMEM;
98 		goto out_free_cpus_allowed;
99 	}
100 	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
101 		retval = -ENOMEM;
102 		goto out_free_new_mask;
103 	}
104 	if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
105 		retval = -EPERM;
106 		goto out_unlock;
107 	}
108 
109 	retval = security_task_setscheduler(p);
110 	if (retval)
111 		goto out_unlock;
112 
113 	/* Record new user-specified CPU set for future reference */
114 	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
115 
116  again:
117 	/* Compute new global allowed CPU set if necessary */
118 	ti = task_thread_info(p);
119 	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
120 	    cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
121 		cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
122 		retval = set_cpus_allowed_ptr(p, effective_mask);
123 	} else {
124 		cpumask_copy(effective_mask, new_mask);
125 		clear_ti_thread_flag(ti, TIF_FPUBOUND);
126 		retval = set_cpus_allowed_ptr(p, new_mask);
127 	}
128 
129 	if (!retval) {
130 		cpuset_cpus_allowed(p, cpus_allowed);
131 		if (!cpumask_subset(effective_mask, cpus_allowed)) {
132 			/*
133 			 * We must have raced with a concurrent cpuset
134 			 * update. Just reset the cpus_allowed to the
135 			 * cpuset's cpus_allowed
136 			 */
137 			cpumask_copy(new_mask, cpus_allowed);
138 			goto again;
139 		}
140 	}
141 out_unlock:
142 	free_cpumask_var(effective_mask);
143 out_free_new_mask:
144 	free_cpumask_var(new_mask);
145 out_free_cpus_allowed:
146 	free_cpumask_var(cpus_allowed);
147 out_put_task:
148 	put_task_struct(p);
149 	put_online_cpus();
150 	return retval;
151 }
152 
153 /*
154  * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
155  */
156 asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
157 				      unsigned long __user *user_mask_ptr)
158 {
159 	unsigned int real_len;
160 	cpumask_t allowed, mask;
161 	int retval;
162 	struct task_struct *p;
163 
164 	real_len = sizeof(mask);
165 	if (len < real_len)
166 		return -EINVAL;
167 
168 	get_online_cpus();
169 	read_lock(&tasklist_lock);
170 
171 	retval = -ESRCH;
172 	p = find_process_by_pid(pid);
173 	if (!p)
174 		goto out_unlock;
175 	retval = security_task_getscheduler(p);
176 	if (retval)
177 		goto out_unlock;
178 
179 	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
180 	cpumask_and(&mask, &allowed, cpu_active_mask);
181 
182 out_unlock:
183 	read_unlock(&tasklist_lock);
184 	put_online_cpus();
185 	if (retval)
186 		return retval;
187 	if (copy_to_user(user_mask_ptr, &mask, real_len))
188 		return -EFAULT;
189 	return real_len;
190 }
191 
192 
193 static int __init fpaff_thresh(char *str)
194 {
195 	get_option(&str, &fpaff_threshold);
196 	return 1;
197 }
198 __setup("fpaff=", fpaff_thresh);
199 
200 /*
201  * FPU Use Factor empirically derived from experiments on 34K
202  */
203 #define FPUSEFACTOR 2000
204 
205 static __init int mt_fp_affinity_init(void)
206 {
207 	if (fpaff_threshold >= 0) {
208 		mt_fpemul_threshold = fpaff_threshold;
209 	} else {
210 		mt_fpemul_threshold =
211 			(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
212 	}
213 	printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
214 	       mt_fpemul_threshold);
215 
216 	return 0;
217 }
218 arch_initcall(mt_fp_affinity_init);
219