1 /* 2 * kernel/sched/cpupri.c 3 * 4 * CPU priority management 5 * 6 * Copyright (C) 2007-2008 Novell 7 * 8 * Author: Gregory Haskins <ghaskins@novell.com> 9 * 10 * This code tracks the priority of each CPU so that global migration 11 * decisions are easy to calculate. Each CPU can be in a state as follows: 12 * 13 * (INVALID), IDLE, NORMAL, RT1, ... RT99 14 * 15 * going from the lowest priority to the highest. CPUs in the INVALID state 16 * are not eligible for routing. The system maintains this state with 17 * a 2 dimensional bitmap (the first for priority class, the second for CPUs 18 * in that class). Therefore a typical application without affinity 19 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit 20 * searches). For tasks with affinity restrictions, the algorithm has a 21 * worst case complexity of O(min(102, nr_domcpus)), though the scenario that 22 * yields the worst case search is fairly contrived. 23 * 24 * This program is free software; you can redistribute it and/or 25 * modify it under the terms of the GNU General Public License 26 * as published by the Free Software Foundation; version 2 27 * of the License. 28 */ 29 #include "sched.h" 30 31 /* Convert between a 140 based task->prio, and our 102 based cpupri */ 32 static int convert_prio(int prio) 33 { 34 int cpupri; 35 36 if (prio == CPUPRI_INVALID) 37 cpupri = CPUPRI_INVALID; 38 else if (prio == MAX_PRIO) 39 cpupri = CPUPRI_IDLE; 40 else if (prio >= MAX_RT_PRIO) 41 cpupri = CPUPRI_NORMAL; 42 else 43 cpupri = MAX_RT_PRIO - prio + 1; 44 45 return cpupri; 46 } 47 48 /** 49 * cpupri_find - find the best (lowest-pri) CPU in the system 50 * @cp: The cpupri context 51 * @p: The task 52 * @lowest_mask: A mask to fill in with selected CPUs (or NULL) 53 * 54 * Note: This function returns the recommended CPUs as calculated during the 55 * current invocation. By the time the call returns, the CPUs may have in 56 * fact changed priorities any number of times. While not ideal, it is not 57 * an issue of correctness since the normal rebalancer logic will correct 58 * any discrepancies created by racing against the uncertainty of the current 59 * priority configuration. 60 * 61 * Return: (int)bool - CPUs were found 62 */ 63 int cpupri_find(struct cpupri *cp, struct task_struct *p, 64 struct cpumask *lowest_mask) 65 { 66 int idx = 0; 67 int task_pri = convert_prio(p->prio); 68 69 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); 70 71 for (idx = 0; idx < task_pri; idx++) { 72 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 73 int skip = 0; 74 75 if (!atomic_read(&(vec)->count)) 76 skip = 1; 77 /* 78 * When looking at the vector, we need to read the counter, 79 * do a memory barrier, then read the mask. 80 * 81 * Note: This is still all racey, but we can deal with it. 82 * Ideally, we only want to look at masks that are set. 83 * 84 * If a mask is not set, then the only thing wrong is that we 85 * did a little more work than necessary. 86 * 87 * If we read a zero count but the mask is set, because of the 88 * memory barriers, that can only happen when the highest prio 89 * task for a run queue has left the run queue, in which case, 90 * it will be followed by a pull. If the task we are processing 91 * fails to find a proper place to go, that pull request will 92 * pull this task if the run queue is running at a lower 93 * priority. 94 */ 95 smp_rmb(); 96 97 /* Need to do the rmb for every iteration */ 98 if (skip) 99 continue; 100 101 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 102 continue; 103 104 if (lowest_mask) { 105 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 106 107 /* 108 * We have to ensure that we have at least one bit 109 * still set in the array, since the map could have 110 * been concurrently emptied between the first and 111 * second reads of vec->mask. If we hit this 112 * condition, simply act as though we never hit this 113 * priority level and continue on. 114 */ 115 if (cpumask_any(lowest_mask) >= nr_cpu_ids) 116 continue; 117 } 118 119 return 1; 120 } 121 122 return 0; 123 } 124 125 /** 126 * cpupri_set - update the CPU priority setting 127 * @cp: The cpupri context 128 * @cpu: The target CPU 129 * @newpri: The priority (INVALID-RT99) to assign to this CPU 130 * 131 * Note: Assumes cpu_rq(cpu)->lock is locked 132 * 133 * Returns: (void) 134 */ 135 void cpupri_set(struct cpupri *cp, int cpu, int newpri) 136 { 137 int *currpri = &cp->cpu_to_pri[cpu]; 138 int oldpri = *currpri; 139 int do_mb = 0; 140 141 newpri = convert_prio(newpri); 142 143 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); 144 145 if (newpri == oldpri) 146 return; 147 148 /* 149 * If the CPU was currently mapped to a different value, we 150 * need to map it to the new value then remove the old value. 151 * Note, we must add the new value first, otherwise we risk the 152 * cpu being missed by the priority loop in cpupri_find. 153 */ 154 if (likely(newpri != CPUPRI_INVALID)) { 155 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 156 157 cpumask_set_cpu(cpu, vec->mask); 158 /* 159 * When adding a new vector, we update the mask first, 160 * do a write memory barrier, and then update the count, to 161 * make sure the vector is visible when count is set. 162 */ 163 smp_mb__before_atomic(); 164 atomic_inc(&(vec)->count); 165 do_mb = 1; 166 } 167 if (likely(oldpri != CPUPRI_INVALID)) { 168 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; 169 170 /* 171 * Because the order of modification of the vec->count 172 * is important, we must make sure that the update 173 * of the new prio is seen before we decrement the 174 * old prio. This makes sure that the loop sees 175 * one or the other when we raise the priority of 176 * the run queue. We don't care about when we lower the 177 * priority, as that will trigger an rt pull anyway. 178 * 179 * We only need to do a memory barrier if we updated 180 * the new priority vec. 181 */ 182 if (do_mb) 183 smp_mb__after_atomic(); 184 185 /* 186 * When removing from the vector, we decrement the counter first 187 * do a memory barrier and then clear the mask. 188 */ 189 atomic_dec(&(vec)->count); 190 smp_mb__after_atomic(); 191 cpumask_clear_cpu(cpu, vec->mask); 192 } 193 194 *currpri = newpri; 195 } 196 197 /** 198 * cpupri_init - initialize the cpupri structure 199 * @cp: The cpupri context 200 * 201 * Return: -ENOMEM on memory allocation failure. 202 */ 203 int cpupri_init(struct cpupri *cp) 204 { 205 int i; 206 207 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 208 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; 209 210 atomic_set(&vec->count, 0); 211 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) 212 goto cleanup; 213 } 214 215 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); 216 if (!cp->cpu_to_pri) 217 goto cleanup; 218 219 for_each_possible_cpu(i) 220 cp->cpu_to_pri[i] = CPUPRI_INVALID; 221 222 return 0; 223 224 cleanup: 225 for (i--; i >= 0; i--) 226 free_cpumask_var(cp->pri_to_cpu[i].mask); 227 return -ENOMEM; 228 } 229 230 /** 231 * cpupri_cleanup - clean up the cpupri structure 232 * @cp: The cpupri context 233 */ 234 void cpupri_cleanup(struct cpupri *cp) 235 { 236 int i; 237 238 kfree(cp->cpu_to_pri); 239 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 240 free_cpumask_var(cp->pri_to_cpu[i].mask); 241 } 242