1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/mm.h> 4 #include <linux/llist.h> 5 #include <linux/bpf.h> 6 #include <linux/irq_work.h> 7 #include <linux/bpf_mem_alloc.h> 8 #include <linux/memcontrol.h> 9 #include <asm/local.h> 10 11 /* Any context (including NMI) BPF specific memory allocator. 12 * 13 * Tracing BPF programs can attach to kprobe and fentry. Hence they 14 * run in unknown context where calling plain kmalloc() might not be safe. 15 * 16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 17 * Refill this cache asynchronously from irq_work. 18 * 19 * CPU_0 buckets 20 * 16 32 64 96 128 196 256 512 1024 2048 4096 21 * ... 22 * CPU_N buckets 23 * 16 32 64 96 128 196 256 512 1024 2048 4096 24 * 25 * The buckets are prefilled at the start. 26 * BPF programs always run with migration disabled. 27 * It's safe to allocate from cache of the current cpu with irqs disabled. 28 * Free-ing is always done into bucket of the current cpu as well. 29 * irq_work trims extra free elements from buckets with kfree 30 * and refills them with kmalloc, so global kmalloc logic takes care 31 * of freeing objects allocated by one cpu and freed on another. 32 * 33 * Every allocated objected is padded with extra 8 bytes that contains 34 * struct llist_node. 35 */ 36 #define LLIST_NODE_SZ sizeof(struct llist_node) 37 38 /* similar to kmalloc, but sizeof == 8 bucket is gone */ 39 static u8 size_index[24] __ro_after_init = { 40 3, /* 8 */ 41 3, /* 16 */ 42 4, /* 24 */ 43 4, /* 32 */ 44 5, /* 40 */ 45 5, /* 48 */ 46 5, /* 56 */ 47 5, /* 64 */ 48 1, /* 72 */ 49 1, /* 80 */ 50 1, /* 88 */ 51 1, /* 96 */ 52 6, /* 104 */ 53 6, /* 112 */ 54 6, /* 120 */ 55 6, /* 128 */ 56 2, /* 136 */ 57 2, /* 144 */ 58 2, /* 152 */ 59 2, /* 160 */ 60 2, /* 168 */ 61 2, /* 176 */ 62 2, /* 184 */ 63 2 /* 192 */ 64 }; 65 66 static int bpf_mem_cache_idx(size_t size) 67 { 68 if (!size || size > 4096) 69 return -1; 70 71 if (size <= 192) 72 return size_index[(size - 1) / 8] - 1; 73 74 return fls(size - 1) - 1; 75 } 76 77 #define NUM_CACHES 11 78 79 struct bpf_mem_cache { 80 /* per-cpu list of free objects of size 'unit_size'. 81 * All accesses are done with interrupts disabled and 'active' counter 82 * protection with __llist_add() and __llist_del_first(). 83 */ 84 struct llist_head free_llist; 85 local_t active; 86 87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot 89 * fail. When 'active' is busy the unit_free() will add an object to 90 * free_llist_extra. 91 */ 92 struct llist_head free_llist_extra; 93 94 struct irq_work refill_work; 95 struct obj_cgroup *objcg; 96 int unit_size; 97 /* count of objects in free_llist */ 98 int free_cnt; 99 int low_watermark, high_watermark, batch; 100 int percpu_size; 101 102 struct rcu_head rcu; 103 struct llist_head free_by_rcu; 104 struct llist_head waiting_for_gp; 105 atomic_t call_rcu_in_progress; 106 }; 107 108 struct bpf_mem_caches { 109 struct bpf_mem_cache cache[NUM_CACHES]; 110 }; 111 112 static struct llist_node notrace *__llist_del_first(struct llist_head *head) 113 { 114 struct llist_node *entry, *next; 115 116 entry = head->first; 117 if (!entry) 118 return NULL; 119 next = entry->next; 120 head->first = next; 121 return entry; 122 } 123 124 static void *__alloc(struct bpf_mem_cache *c, int node) 125 { 126 /* Allocate, but don't deplete atomic reserves that typical 127 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 128 * will allocate from the current numa node which is what we 129 * want here. 130 */ 131 gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT; 132 133 if (c->percpu_size) { 134 void **obj = kmalloc_node(c->percpu_size, flags, node); 135 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 136 137 if (!obj || !pptr) { 138 free_percpu(pptr); 139 kfree(obj); 140 return NULL; 141 } 142 obj[1] = pptr; 143 return obj; 144 } 145 146 return kmalloc_node(c->unit_size, flags, node); 147 } 148 149 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 150 { 151 #ifdef CONFIG_MEMCG_KMEM 152 if (c->objcg) 153 return get_mem_cgroup_from_objcg(c->objcg); 154 #endif 155 156 #ifdef CONFIG_MEMCG 157 return root_mem_cgroup; 158 #else 159 return NULL; 160 #endif 161 } 162 163 /* Mostly runs from irq_work except __init phase. */ 164 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 165 { 166 struct mem_cgroup *memcg = NULL, *old_memcg; 167 unsigned long flags; 168 void *obj; 169 int i; 170 171 memcg = get_memcg(c); 172 old_memcg = set_active_memcg(memcg); 173 for (i = 0; i < cnt; i++) { 174 obj = __alloc(c, node); 175 if (!obj) 176 break; 177 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 178 /* In RT irq_work runs in per-cpu kthread, so disable 179 * interrupts to avoid preemption and interrupts and 180 * reduce the chance of bpf prog executing on this cpu 181 * when active counter is busy. 182 */ 183 local_irq_save(flags); 184 /* alloc_bulk runs from irq_work which will not preempt a bpf 185 * program that does unit_alloc/unit_free since IRQs are 186 * disabled there. There is no race to increment 'active' 187 * counter. It protects free_llist from corruption in case NMI 188 * bpf prog preempted this loop. 189 */ 190 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 191 __llist_add(obj, &c->free_llist); 192 c->free_cnt++; 193 local_dec(&c->active); 194 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 195 local_irq_restore(flags); 196 } 197 set_active_memcg(old_memcg); 198 mem_cgroup_put(memcg); 199 } 200 201 static void free_one(struct bpf_mem_cache *c, void *obj) 202 { 203 if (c->percpu_size) { 204 free_percpu(((void **)obj)[1]); 205 kfree(obj); 206 return; 207 } 208 209 kfree(obj); 210 } 211 212 static void __free_rcu(struct rcu_head *head) 213 { 214 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); 215 struct llist_node *llnode = llist_del_all(&c->waiting_for_gp); 216 struct llist_node *pos, *t; 217 218 llist_for_each_safe(pos, t, llnode) 219 free_one(c, pos); 220 atomic_set(&c->call_rcu_in_progress, 0); 221 } 222 223 static void __free_rcu_tasks_trace(struct rcu_head *head) 224 { 225 /* If RCU Tasks Trace grace period implies RCU grace period, 226 * there is no need to invoke call_rcu(). 227 */ 228 if (rcu_trace_implies_rcu_gp()) 229 __free_rcu(head); 230 else 231 call_rcu(head, __free_rcu); 232 } 233 234 static void enque_to_free(struct bpf_mem_cache *c, void *obj) 235 { 236 struct llist_node *llnode = obj; 237 238 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 239 * Nothing races to add to free_by_rcu list. 240 */ 241 __llist_add(llnode, &c->free_by_rcu); 242 } 243 244 static void do_call_rcu(struct bpf_mem_cache *c) 245 { 246 struct llist_node *llnode, *t; 247 248 if (atomic_xchg(&c->call_rcu_in_progress, 1)) 249 return; 250 251 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); 252 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) 253 /* There is no concurrent __llist_add(waiting_for_gp) access. 254 * It doesn't race with llist_del_all either. 255 * But there could be two concurrent llist_del_all(waiting_for_gp): 256 * from __free_rcu() and from drain_mem_cache(). 257 */ 258 __llist_add(llnode, &c->waiting_for_gp); 259 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 260 * If RCU Tasks Trace grace period implies RCU grace period, free 261 * these elements directly, else use call_rcu() to wait for normal 262 * progs to finish and finally do free_one() on each element. 263 */ 264 call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); 265 } 266 267 static void free_bulk(struct bpf_mem_cache *c) 268 { 269 struct llist_node *llnode, *t; 270 unsigned long flags; 271 int cnt; 272 273 do { 274 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 275 local_irq_save(flags); 276 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 277 llnode = __llist_del_first(&c->free_llist); 278 if (llnode) 279 cnt = --c->free_cnt; 280 else 281 cnt = 0; 282 local_dec(&c->active); 283 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 284 local_irq_restore(flags); 285 if (llnode) 286 enque_to_free(c, llnode); 287 } while (cnt > (c->high_watermark + c->low_watermark) / 2); 288 289 /* and drain free_llist_extra */ 290 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 291 enque_to_free(c, llnode); 292 do_call_rcu(c); 293 } 294 295 static void bpf_mem_refill(struct irq_work *work) 296 { 297 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 298 int cnt; 299 300 /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 301 cnt = c->free_cnt; 302 if (cnt < c->low_watermark) 303 /* irq_work runs on this cpu and kmalloc will allocate 304 * from the current numa node which is what we want here. 305 */ 306 alloc_bulk(c, c->batch, NUMA_NO_NODE); 307 else if (cnt > c->high_watermark) 308 free_bulk(c); 309 } 310 311 static void notrace irq_work_raise(struct bpf_mem_cache *c) 312 { 313 irq_work_queue(&c->refill_work); 314 } 315 316 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 317 * the freelist cache will be elem_size * 64 (or less) on each cpu. 318 * 319 * For bpf programs that don't have statically known allocation sizes and 320 * assuming (low_mark + high_mark) / 2 as an average number of elements per 321 * bucket and all buckets are used the total amount of memory in freelists 322 * on each cpu will be: 323 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 324 * == ~ 116 Kbyte using below heuristic. 325 * Initialized, but unused bpf allocator (not bpf map specific one) will 326 * consume ~ 11 Kbyte per cpu. 327 * Typical case will be between 11K and 116K closer to 11K. 328 * bpf progs can and should share bpf_mem_cache when possible. 329 */ 330 331 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 332 { 333 init_irq_work(&c->refill_work, bpf_mem_refill); 334 if (c->unit_size <= 256) { 335 c->low_watermark = 32; 336 c->high_watermark = 96; 337 } else { 338 /* When page_size == 4k, order-0 cache will have low_mark == 2 339 * and high_mark == 6 with batch alloc of 3 individual pages at 340 * a time. 341 * 8k allocs and above low == 1, high == 3, batch == 1. 342 */ 343 c->low_watermark = max(32 * 256 / c->unit_size, 1); 344 c->high_watermark = max(96 * 256 / c->unit_size, 3); 345 } 346 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 347 348 /* To avoid consuming memory assume that 1st run of bpf 349 * prog won't be doing more than 4 map_update_elem from 350 * irq disabled region 351 */ 352 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 353 } 354 355 /* When size != 0 bpf_mem_cache for each cpu. 356 * This is typical bpf hash map use case when all elements have equal size. 357 * 358 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 359 * kmalloc/kfree. Max allocation size is 4096 in this case. 360 * This is bpf_dynptr and bpf_kptr use case. 361 */ 362 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) 363 { 364 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 365 struct bpf_mem_caches *cc, __percpu *pcc; 366 struct bpf_mem_cache *c, __percpu *pc; 367 struct obj_cgroup *objcg = NULL; 368 int cpu, i, unit_size, percpu_size = 0; 369 370 if (size) { 371 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 372 if (!pc) 373 return -ENOMEM; 374 375 if (percpu) 376 /* room for llist_node and per-cpu pointer */ 377 percpu_size = LLIST_NODE_SZ + sizeof(void *); 378 else 379 size += LLIST_NODE_SZ; /* room for llist_node */ 380 unit_size = size; 381 382 #ifdef CONFIG_MEMCG_KMEM 383 objcg = get_obj_cgroup_from_current(); 384 #endif 385 for_each_possible_cpu(cpu) { 386 c = per_cpu_ptr(pc, cpu); 387 c->unit_size = unit_size; 388 c->objcg = objcg; 389 c->percpu_size = percpu_size; 390 prefill_mem_cache(c, cpu); 391 } 392 ma->cache = pc; 393 return 0; 394 } 395 396 /* size == 0 && percpu is an invalid combination */ 397 if (WARN_ON_ONCE(percpu)) 398 return -EINVAL; 399 400 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 401 if (!pcc) 402 return -ENOMEM; 403 #ifdef CONFIG_MEMCG_KMEM 404 objcg = get_obj_cgroup_from_current(); 405 #endif 406 for_each_possible_cpu(cpu) { 407 cc = per_cpu_ptr(pcc, cpu); 408 for (i = 0; i < NUM_CACHES; i++) { 409 c = &cc->cache[i]; 410 c->unit_size = sizes[i]; 411 c->objcg = objcg; 412 prefill_mem_cache(c, cpu); 413 } 414 } 415 ma->caches = pcc; 416 return 0; 417 } 418 419 static void drain_mem_cache(struct bpf_mem_cache *c) 420 { 421 struct llist_node *llnode, *t; 422 423 /* No progs are using this bpf_mem_cache, but htab_map_free() called 424 * bpf_mem_cache_free() for all remaining elements and they can be in 425 * free_by_rcu or in waiting_for_gp lists, so drain those lists now. 426 * 427 * Except for waiting_for_gp list, there are no concurrent operations 428 * on these lists, so it is safe to use __llist_del_all(). 429 */ 430 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) 431 free_one(c, llnode); 432 llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp)) 433 free_one(c, llnode); 434 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist)) 435 free_one(c, llnode); 436 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra)) 437 free_one(c, llnode); 438 } 439 440 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 441 { 442 free_percpu(ma->cache); 443 free_percpu(ma->caches); 444 ma->cache = NULL; 445 ma->caches = NULL; 446 } 447 448 static void free_mem_alloc(struct bpf_mem_alloc *ma) 449 { 450 /* waiting_for_gp lists was drained, but __free_rcu might 451 * still execute. Wait for it now before we freeing percpu caches. 452 */ 453 rcu_barrier_tasks_trace(); 454 rcu_barrier(); 455 free_mem_alloc_no_barrier(ma); 456 } 457 458 static void free_mem_alloc_deferred(struct work_struct *work) 459 { 460 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); 461 462 free_mem_alloc(ma); 463 kfree(ma); 464 } 465 466 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) 467 { 468 struct bpf_mem_alloc *copy; 469 470 if (!rcu_in_progress) { 471 /* Fast path. No callbacks are pending, hence no need to do 472 * rcu_barrier-s. 473 */ 474 free_mem_alloc_no_barrier(ma); 475 return; 476 } 477 478 copy = kmalloc(sizeof(*ma), GFP_KERNEL); 479 if (!copy) { 480 /* Slow path with inline barrier-s */ 481 free_mem_alloc(ma); 482 return; 483 } 484 485 /* Defer barriers into worker to let the rest of map memory to be freed */ 486 copy->cache = ma->cache; 487 ma->cache = NULL; 488 copy->caches = ma->caches; 489 ma->caches = NULL; 490 INIT_WORK(©->work, free_mem_alloc_deferred); 491 queue_work(system_unbound_wq, ©->work); 492 } 493 494 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 495 { 496 struct bpf_mem_caches *cc; 497 struct bpf_mem_cache *c; 498 int cpu, i, rcu_in_progress; 499 500 if (ma->cache) { 501 rcu_in_progress = 0; 502 for_each_possible_cpu(cpu) { 503 c = per_cpu_ptr(ma->cache, cpu); 504 /* 505 * refill_work may be unfinished for PREEMPT_RT kernel 506 * in which irq work is invoked in a per-CPU RT thread. 507 * It is also possible for kernel with 508 * arch_irq_work_has_interrupt() being false and irq 509 * work is invoked in timer interrupt. So waiting for 510 * the completion of irq work to ease the handling of 511 * concurrency. 512 */ 513 irq_work_sync(&c->refill_work); 514 drain_mem_cache(c); 515 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); 516 } 517 /* objcg is the same across cpus */ 518 if (c->objcg) 519 obj_cgroup_put(c->objcg); 520 destroy_mem_alloc(ma, rcu_in_progress); 521 } 522 if (ma->caches) { 523 rcu_in_progress = 0; 524 for_each_possible_cpu(cpu) { 525 cc = per_cpu_ptr(ma->caches, cpu); 526 for (i = 0; i < NUM_CACHES; i++) { 527 c = &cc->cache[i]; 528 irq_work_sync(&c->refill_work); 529 drain_mem_cache(c); 530 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); 531 } 532 } 533 if (c->objcg) 534 obj_cgroup_put(c->objcg); 535 destroy_mem_alloc(ma, rcu_in_progress); 536 } 537 } 538 539 /* notrace is necessary here and in other functions to make sure 540 * bpf programs cannot attach to them and cause llist corruptions. 541 */ 542 static void notrace *unit_alloc(struct bpf_mem_cache *c) 543 { 544 struct llist_node *llnode = NULL; 545 unsigned long flags; 546 int cnt = 0; 547 548 /* Disable irqs to prevent the following race for majority of prog types: 549 * prog_A 550 * bpf_mem_alloc 551 * preemption or irq -> prog_B 552 * bpf_mem_alloc 553 * 554 * but prog_B could be a perf_event NMI prog. 555 * Use per-cpu 'active' counter to order free_list access between 556 * unit_alloc/unit_free/bpf_mem_refill. 557 */ 558 local_irq_save(flags); 559 if (local_inc_return(&c->active) == 1) { 560 llnode = __llist_del_first(&c->free_llist); 561 if (llnode) 562 cnt = --c->free_cnt; 563 } 564 local_dec(&c->active); 565 local_irq_restore(flags); 566 567 WARN_ON(cnt < 0); 568 569 if (cnt < c->low_watermark) 570 irq_work_raise(c); 571 return llnode; 572 } 573 574 /* Though 'ptr' object could have been allocated on a different cpu 575 * add it to the free_llist of the current cpu. 576 * Let kfree() logic deal with it when it's later called from irq_work. 577 */ 578 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 579 { 580 struct llist_node *llnode = ptr - LLIST_NODE_SZ; 581 unsigned long flags; 582 int cnt = 0; 583 584 BUILD_BUG_ON(LLIST_NODE_SZ > 8); 585 586 local_irq_save(flags); 587 if (local_inc_return(&c->active) == 1) { 588 __llist_add(llnode, &c->free_llist); 589 cnt = ++c->free_cnt; 590 } else { 591 /* unit_free() cannot fail. Therefore add an object to atomic 592 * llist. free_bulk() will drain it. Though free_llist_extra is 593 * a per-cpu list we have to use atomic llist_add here, since 594 * it also can be interrupted by bpf nmi prog that does another 595 * unit_free() into the same free_llist_extra. 596 */ 597 llist_add(llnode, &c->free_llist_extra); 598 } 599 local_dec(&c->active); 600 local_irq_restore(flags); 601 602 if (cnt > c->high_watermark) 603 /* free few objects from current cpu into global kmalloc pool */ 604 irq_work_raise(c); 605 } 606 607 /* Called from BPF program or from sys_bpf syscall. 608 * In both cases migration is disabled. 609 */ 610 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 611 { 612 int idx; 613 void *ret; 614 615 if (!size) 616 return ZERO_SIZE_PTR; 617 618 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 619 if (idx < 0) 620 return NULL; 621 622 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 623 return !ret ? NULL : ret + LLIST_NODE_SZ; 624 } 625 626 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 627 { 628 int idx; 629 630 if (!ptr) 631 return; 632 633 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 634 if (idx < 0) 635 return; 636 637 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 638 } 639 640 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 641 { 642 void *ret; 643 644 ret = unit_alloc(this_cpu_ptr(ma->cache)); 645 return !ret ? NULL : ret + LLIST_NODE_SZ; 646 } 647 648 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 649 { 650 if (!ptr) 651 return; 652 653 unit_free(this_cpu_ptr(ma->cache), ptr); 654 } 655