1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_vm.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bitset.h> 38 #include <sys/domainset.h> 39 #include <sys/proc.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/malloc.h> 43 #include <sys/rwlock.h> 44 #include <sys/vmmeter.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_param.h> 48 #include <vm/vm_domainset.h> 49 #include <vm/vm_object.h> 50 #include <vm/vm_page.h> 51 #include <vm/vm_phys.h> 52 53 #ifdef NUMA 54 /* 55 * Iterators are written such that the first nowait pass has as short a 56 * codepath as possible to eliminate bloat from the allocator. It is 57 * assumed that most allocations are successful. 58 */ 59 60 static int vm_domainset_default_stride = 64; 61 62 /* 63 * Determine which policy is to be used for this allocation. 64 */ 65 static void 66 vm_domainset_iter_init(struct vm_domainset_iter *di, struct domainset *ds, 67 int *iter, struct vm_object *obj, vm_pindex_t pindex) 68 { 69 70 di->di_domain = ds; 71 di->di_iter = iter; 72 di->di_policy = ds->ds_policy; 73 if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) { 74 #if VM_NRESERVLEVEL > 0 75 if (vm_object_reserv(obj)) { 76 /* 77 * Color the pindex so we end up on the correct 78 * reservation boundary. 79 */ 80 pindex += obj->pg_color; 81 pindex >>= VM_LEVEL_0_ORDER; 82 } else 83 #endif 84 pindex /= vm_domainset_default_stride; 85 /* 86 * Offset pindex so the first page of each object does 87 * not end up in domain 0. 88 */ 89 if (obj != NULL) 90 pindex += (((uintptr_t)obj) / sizeof(*obj)); 91 di->di_offset = pindex; 92 } 93 /* Skip domains below min on the first pass. */ 94 di->di_minskip = true; 95 } 96 97 static void 98 vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain) 99 { 100 101 *domain = di->di_domain->ds_order[ 102 ++(*di->di_iter) % di->di_domain->ds_cnt]; 103 } 104 105 static void 106 vm_domainset_iter_prefer(struct vm_domainset_iter *di, int *domain) 107 { 108 int d; 109 110 do { 111 d = di->di_domain->ds_order[ 112 ++(*di->di_iter) % di->di_domain->ds_cnt]; 113 } while (d == di->di_domain->ds_prefer); 114 *domain = d; 115 } 116 117 static void 118 vm_domainset_iter_interleave(struct vm_domainset_iter *di, int *domain) 119 { 120 int d; 121 122 d = di->di_offset % di->di_domain->ds_cnt; 123 *di->di_iter = d; 124 *domain = di->di_domain->ds_order[d]; 125 } 126 127 static void 128 vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain) 129 { 130 131 KASSERT(di->di_n > 0, 132 ("vm_domainset_iter_first: Invalid n %d", di->di_n)); 133 switch (di->di_policy) { 134 case DOMAINSET_POLICY_FIRSTTOUCH: 135 /* 136 * To prevent impossible allocations we convert an invalid 137 * first-touch to round-robin. 138 */ 139 /* FALLTHROUGH */ 140 case DOMAINSET_POLICY_INTERLEAVE: 141 /* FALLTHROUGH */ 142 case DOMAINSET_POLICY_ROUNDROBIN: 143 vm_domainset_iter_rr(di, domain); 144 break; 145 case DOMAINSET_POLICY_PREFER: 146 vm_domainset_iter_prefer(di, domain); 147 break; 148 default: 149 panic("vm_domainset_iter_first: Unknown policy %d", 150 di->di_policy); 151 } 152 KASSERT(*domain < vm_ndomains, 153 ("vm_domainset_iter_next: Invalid domain %d", *domain)); 154 } 155 156 static void 157 vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain) 158 { 159 160 switch (di->di_policy) { 161 case DOMAINSET_POLICY_FIRSTTOUCH: 162 *domain = PCPU_GET(domain); 163 if (DOMAINSET_ISSET(*domain, &di->di_domain->ds_mask)) { 164 /* 165 * Add an extra iteration because we will visit the 166 * current domain a second time in the rr iterator. 167 */ 168 di->di_n = di->di_domain->ds_cnt + 1; 169 break; 170 } 171 /* 172 * To prevent impossible allocations we convert an invalid 173 * first-touch to round-robin. 174 */ 175 /* FALLTHROUGH */ 176 case DOMAINSET_POLICY_ROUNDROBIN: 177 di->di_n = di->di_domain->ds_cnt; 178 vm_domainset_iter_rr(di, domain); 179 break; 180 case DOMAINSET_POLICY_PREFER: 181 *domain = di->di_domain->ds_prefer; 182 di->di_n = di->di_domain->ds_cnt; 183 break; 184 case DOMAINSET_POLICY_INTERLEAVE: 185 vm_domainset_iter_interleave(di, domain); 186 di->di_n = di->di_domain->ds_cnt; 187 break; 188 default: 189 panic("vm_domainset_iter_first: Unknown policy %d", 190 di->di_policy); 191 } 192 KASSERT(di->di_n > 0, 193 ("vm_domainset_iter_first: Invalid n %d", di->di_n)); 194 KASSERT(*domain < vm_ndomains, 195 ("vm_domainset_iter_first: Invalid domain %d", *domain)); 196 } 197 198 void 199 vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, 200 vm_pindex_t pindex, int *domain, int *req) 201 { 202 struct domainset_ref *dr; 203 204 /* 205 * Object policy takes precedence over thread policy. The policies 206 * are immutable and unsynchronized. Updates can race but pointer 207 * loads are assumed to be atomic. 208 */ 209 if (obj != NULL && obj->domain.dr_policy != NULL) 210 dr = &obj->domain; 211 else 212 dr = &curthread->td_domain; 213 vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, obj, pindex); 214 di->di_flags = *req; 215 *req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) | 216 VM_ALLOC_NOWAIT; 217 vm_domainset_iter_first(di, domain); 218 if (vm_page_count_min_domain(*domain)) 219 vm_domainset_iter_page(di, obj, domain); 220 } 221 222 int 223 vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, 224 int *domain) 225 { 226 227 /* If there are more domains to visit we run the iterator. */ 228 while (--di->di_n != 0) { 229 vm_domainset_iter_next(di, domain); 230 if (!di->di_minskip || !vm_page_count_min_domain(*domain)) 231 return (0); 232 } 233 234 /* If we skipped domains below min restart the search. */ 235 if (di->di_minskip) { 236 di->di_minskip = false; 237 vm_domainset_iter_first(di, domain); 238 return (0); 239 } 240 241 /* If we visited all domains and this was a NOWAIT we return error. */ 242 if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0) 243 return (ENOMEM); 244 245 /* Wait for one of the domains to accumulate some free pages. */ 246 if (obj != NULL) 247 VM_OBJECT_WUNLOCK(obj); 248 vm_wait_doms(&di->di_domain->ds_mask, 0); 249 if (obj != NULL) 250 VM_OBJECT_WLOCK(obj); 251 if ((di->di_flags & VM_ALLOC_WAITFAIL) != 0) 252 return (ENOMEM); 253 254 /* Restart the search. */ 255 vm_domainset_iter_first(di, domain); 256 257 return (0); 258 } 259 260 static void 261 _vm_domainset_iter_policy_init(struct vm_domainset_iter *di, int *domain, 262 int *flags) 263 { 264 265 di->di_flags = *flags; 266 *flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT; 267 vm_domainset_iter_first(di, domain); 268 if (vm_page_count_min_domain(*domain)) 269 vm_domainset_iter_policy(di, domain); 270 } 271 272 void 273 vm_domainset_iter_policy_init(struct vm_domainset_iter *di, 274 struct domainset *ds, int *domain, int *flags) 275 { 276 277 vm_domainset_iter_init(di, ds, &curthread->td_domain.dr_iter, NULL, 0); 278 _vm_domainset_iter_policy_init(di, domain, flags); 279 } 280 281 void 282 vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di, 283 struct domainset_ref *dr, int *domain, int *flags) 284 { 285 286 vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, NULL, 0); 287 _vm_domainset_iter_policy_init(di, domain, flags); 288 } 289 290 int 291 vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain) 292 { 293 294 /* If there are more domains to visit we run the iterator. */ 295 while (--di->di_n != 0) { 296 vm_domainset_iter_next(di, domain); 297 if (!di->di_minskip || !vm_page_count_min_domain(*domain)) 298 return (0); 299 } 300 301 /* If we skipped domains below min restart the search. */ 302 if (di->di_minskip) { 303 di->di_minskip = false; 304 vm_domainset_iter_first(di, domain); 305 return (0); 306 } 307 308 /* If we visited all domains and this was a NOWAIT we return error. */ 309 if ((di->di_flags & M_WAITOK) == 0) 310 return (ENOMEM); 311 312 /* Wait for one of the domains to accumulate some free pages. */ 313 vm_wait_doms(&di->di_domain->ds_mask, 0); 314 315 /* Restart the search. */ 316 vm_domainset_iter_first(di, domain); 317 318 return (0); 319 } 320 321 #else /* !NUMA */ 322 323 int 324 vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, 325 int *domain) 326 { 327 328 return (EJUSTRETURN); 329 } 330 331 void 332 vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, 333 vm_pindex_t pindex, int *domain, int *flags) 334 { 335 336 *domain = 0; 337 } 338 339 int 340 vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain) 341 { 342 343 return (EJUSTRETURN); 344 } 345 346 void 347 vm_domainset_iter_policy_init(struct vm_domainset_iter *di, 348 struct domainset *ds, int *domain, int *flags) 349 { 350 351 *domain = 0; 352 } 353 354 void 355 vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di, 356 struct domainset_ref *dr, int *domain, int *flags) 357 { 358 359 *domain = 0; 360 } 361 362 #endif /* NUMA */ 363