1 //SPDX-License-Identifier: GPL-2.0 2 #include <linux/bpf-cgroup.h> 3 #include <linux/bpf.h> 4 #include <linux/btf.h> 5 #include <linux/bug.h> 6 #include <linux/filter.h> 7 #include <linux/mm.h> 8 #include <linux/rbtree.h> 9 #include <linux/slab.h> 10 #include <uapi/linux/btf.h> 11 12 DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); 13 14 #ifdef CONFIG_CGROUP_BPF 15 16 #define LOCAL_STORAGE_CREATE_FLAG_MASK \ 17 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 18 19 struct bpf_cgroup_storage_map { 20 struct bpf_map map; 21 22 spinlock_t lock; 23 struct bpf_prog *prog; 24 struct rb_root root; 25 struct list_head list; 26 }; 27 28 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) 29 { 30 return container_of(map, struct bpf_cgroup_storage_map, map); 31 } 32 33 static int bpf_cgroup_storage_key_cmp( 34 const struct bpf_cgroup_storage_key *key1, 35 const struct bpf_cgroup_storage_key *key2) 36 { 37 if (key1->cgroup_inode_id < key2->cgroup_inode_id) 38 return -1; 39 else if (key1->cgroup_inode_id > key2->cgroup_inode_id) 40 return 1; 41 else if (key1->attach_type < key2->attach_type) 42 return -1; 43 else if (key1->attach_type > key2->attach_type) 44 return 1; 45 return 0; 46 } 47 48 static struct bpf_cgroup_storage *cgroup_storage_lookup( 49 struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, 50 bool locked) 51 { 52 struct rb_root *root = &map->root; 53 struct rb_node *node; 54 55 if (!locked) 56 spin_lock_bh(&map->lock); 57 58 node = root->rb_node; 59 while (node) { 60 struct bpf_cgroup_storage *storage; 61 62 storage = container_of(node, struct bpf_cgroup_storage, node); 63 64 switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { 65 case -1: 66 node = node->rb_left; 67 break; 68 case 1: 69 node = node->rb_right; 70 break; 71 default: 72 if (!locked) 73 spin_unlock_bh(&map->lock); 74 return storage; 75 } 76 } 77 78 if (!locked) 79 spin_unlock_bh(&map->lock); 80 81 return NULL; 82 } 83 84 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, 85 struct bpf_cgroup_storage *storage) 86 { 87 struct rb_root *root = &map->root; 88 struct rb_node **new = &(root->rb_node), *parent = NULL; 89 90 while (*new) { 91 struct bpf_cgroup_storage *this; 92 93 this = container_of(*new, struct bpf_cgroup_storage, node); 94 95 parent = *new; 96 switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { 97 case -1: 98 new = &((*new)->rb_left); 99 break; 100 case 1: 101 new = &((*new)->rb_right); 102 break; 103 default: 104 return -EEXIST; 105 } 106 } 107 108 rb_link_node(&storage->node, parent, new); 109 rb_insert_color(&storage->node, root); 110 111 return 0; 112 } 113 114 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) 115 { 116 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 117 struct bpf_cgroup_storage_key *key = _key; 118 struct bpf_cgroup_storage *storage; 119 120 storage = cgroup_storage_lookup(map, key, false); 121 if (!storage) 122 return NULL; 123 124 return &READ_ONCE(storage->buf)->data[0]; 125 } 126 127 static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, 128 void *value, u64 flags) 129 { 130 struct bpf_cgroup_storage_key *key = _key; 131 struct bpf_cgroup_storage *storage; 132 struct bpf_storage_buffer *new; 133 134 if (flags != BPF_ANY && flags != BPF_EXIST) 135 return -EINVAL; 136 137 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, 138 key, false); 139 if (!storage) 140 return -ENOENT; 141 142 new = kmalloc_node(sizeof(struct bpf_storage_buffer) + 143 map->value_size, 144 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN, 145 map->numa_node); 146 if (!new) 147 return -ENOMEM; 148 149 memcpy(&new->data[0], value, map->value_size); 150 151 new = xchg(&storage->buf, new); 152 kfree_rcu(new, rcu); 153 154 return 0; 155 } 156 157 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, 158 void *value) 159 { 160 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 161 struct bpf_cgroup_storage_key *key = _key; 162 struct bpf_cgroup_storage *storage; 163 int cpu, off = 0; 164 u32 size; 165 166 rcu_read_lock(); 167 storage = cgroup_storage_lookup(map, key, false); 168 if (!storage) { 169 rcu_read_unlock(); 170 return -ENOENT; 171 } 172 173 /* per_cpu areas are zero-filled and bpf programs can only 174 * access 'value_size' of them, so copying rounded areas 175 * will not leak any kernel data 176 */ 177 size = round_up(_map->value_size, 8); 178 for_each_possible_cpu(cpu) { 179 bpf_long_memcpy(value + off, 180 per_cpu_ptr(storage->percpu_buf, cpu), size); 181 off += size; 182 } 183 rcu_read_unlock(); 184 return 0; 185 } 186 187 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, 188 void *value, u64 map_flags) 189 { 190 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 191 struct bpf_cgroup_storage_key *key = _key; 192 struct bpf_cgroup_storage *storage; 193 int cpu, off = 0; 194 u32 size; 195 196 if (map_flags != BPF_ANY && map_flags != BPF_EXIST) 197 return -EINVAL; 198 199 rcu_read_lock(); 200 storage = cgroup_storage_lookup(map, key, false); 201 if (!storage) { 202 rcu_read_unlock(); 203 return -ENOENT; 204 } 205 206 /* the user space will provide round_up(value_size, 8) bytes that 207 * will be copied into per-cpu area. bpf programs can only access 208 * value_size of it. During lookup the same extra bytes will be 209 * returned or zeros which were zero-filled by percpu_alloc, 210 * so no kernel data leaks possible 211 */ 212 size = round_up(_map->value_size, 8); 213 for_each_possible_cpu(cpu) { 214 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), 215 value + off, size); 216 off += size; 217 } 218 rcu_read_unlock(); 219 return 0; 220 } 221 222 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, 223 void *_next_key) 224 { 225 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 226 struct bpf_cgroup_storage_key *key = _key; 227 struct bpf_cgroup_storage_key *next = _next_key; 228 struct bpf_cgroup_storage *storage; 229 230 spin_lock_bh(&map->lock); 231 232 if (list_empty(&map->list)) 233 goto enoent; 234 235 if (key) { 236 storage = cgroup_storage_lookup(map, key, true); 237 if (!storage) 238 goto enoent; 239 240 storage = list_next_entry(storage, list); 241 if (!storage) 242 goto enoent; 243 } else { 244 storage = list_first_entry(&map->list, 245 struct bpf_cgroup_storage, list); 246 } 247 248 spin_unlock_bh(&map->lock); 249 next->attach_type = storage->key.attach_type; 250 next->cgroup_inode_id = storage->key.cgroup_inode_id; 251 return 0; 252 253 enoent: 254 spin_unlock_bh(&map->lock); 255 return -ENOENT; 256 } 257 258 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) 259 { 260 int numa_node = bpf_map_attr_numa_node(attr); 261 struct bpf_cgroup_storage_map *map; 262 263 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) 264 return ERR_PTR(-EINVAL); 265 266 if (attr->value_size == 0) 267 return ERR_PTR(-EINVAL); 268 269 if (attr->value_size > PAGE_SIZE) 270 return ERR_PTR(-E2BIG); 271 272 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK) 273 /* reserved bits should not be used */ 274 return ERR_PTR(-EINVAL); 275 276 if (attr->max_entries) 277 /* max_entries is not used and enforced to be 0 */ 278 return ERR_PTR(-EINVAL); 279 280 map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), 281 __GFP_ZERO | GFP_USER, numa_node); 282 if (!map) 283 return ERR_PTR(-ENOMEM); 284 285 map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map), 286 PAGE_SIZE) >> PAGE_SHIFT; 287 288 /* copy mandatory map attributes */ 289 bpf_map_init_from_attr(&map->map, attr); 290 291 spin_lock_init(&map->lock); 292 map->root = RB_ROOT; 293 INIT_LIST_HEAD(&map->list); 294 295 return &map->map; 296 } 297 298 static void cgroup_storage_map_free(struct bpf_map *_map) 299 { 300 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 301 302 WARN_ON(!RB_EMPTY_ROOT(&map->root)); 303 WARN_ON(!list_empty(&map->list)); 304 305 kfree(map); 306 } 307 308 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key) 309 { 310 return -EINVAL; 311 } 312 313 static int cgroup_storage_check_btf(const struct bpf_map *map, 314 const struct btf *btf, 315 const struct btf_type *key_type, 316 const struct btf_type *value_type) 317 { 318 const struct btf_type *t; 319 struct btf_member *m; 320 u32 id, size; 321 322 /* Key is expected to be of struct bpf_cgroup_storage_key type, 323 * which is: 324 * struct bpf_cgroup_storage_key { 325 * __u64 cgroup_inode_id; 326 * __u32 attach_type; 327 * }; 328 */ 329 330 /* 331 * Key_type must be a structure with two fields. 332 */ 333 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || 334 BTF_INFO_VLEN(key_type->info) != 2) 335 return -EINVAL; 336 337 /* 338 * The first field must be a 64 bit integer at 0 offset. 339 */ 340 m = (struct btf_member *)(key_type + 1); 341 if (m->offset) 342 return -EINVAL; 343 id = m->type; 344 t = btf_type_id_size(btf, &id, NULL); 345 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id); 346 if (!t || !btf_type_is_reg_int(t, size)) 347 return -EINVAL; 348 349 /* 350 * The second field must be a 32 bit integer at 64 bit offset. 351 */ 352 m++; 353 if (m->offset != offsetof(struct bpf_cgroup_storage_key, attach_type) * 354 BITS_PER_BYTE) 355 return -EINVAL; 356 id = m->type; 357 t = btf_type_id_size(btf, &id, NULL); 358 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type); 359 if (!t || !btf_type_is_reg_int(t, size)) 360 return -EINVAL; 361 362 return 0; 363 } 364 365 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, 366 struct seq_file *m) 367 { 368 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); 369 struct bpf_cgroup_storage_key *key = _key; 370 struct bpf_cgroup_storage *storage; 371 int cpu; 372 373 rcu_read_lock(); 374 storage = cgroup_storage_lookup(map_to_storage(map), key, false); 375 if (!storage) { 376 rcu_read_unlock(); 377 return; 378 } 379 380 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); 381 stype = cgroup_storage_type(map); 382 if (stype == BPF_CGROUP_STORAGE_SHARED) { 383 seq_puts(m, ": "); 384 btf_type_seq_show(map->btf, map->btf_value_type_id, 385 &READ_ONCE(storage->buf)->data[0], m); 386 seq_puts(m, "\n"); 387 } else { 388 seq_puts(m, ": {\n"); 389 for_each_possible_cpu(cpu) { 390 seq_printf(m, "\tcpu%d: ", cpu); 391 btf_type_seq_show(map->btf, map->btf_value_type_id, 392 per_cpu_ptr(storage->percpu_buf, cpu), 393 m); 394 seq_puts(m, "\n"); 395 } 396 seq_puts(m, "}\n"); 397 } 398 rcu_read_unlock(); 399 } 400 401 const struct bpf_map_ops cgroup_storage_map_ops = { 402 .map_alloc = cgroup_storage_map_alloc, 403 .map_free = cgroup_storage_map_free, 404 .map_get_next_key = cgroup_storage_get_next_key, 405 .map_lookup_elem = cgroup_storage_lookup_elem, 406 .map_update_elem = cgroup_storage_update_elem, 407 .map_delete_elem = cgroup_storage_delete_elem, 408 .map_check_btf = cgroup_storage_check_btf, 409 .map_seq_show_elem = cgroup_storage_seq_show_elem, 410 }; 411 412 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) 413 { 414 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); 415 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 416 int ret = -EBUSY; 417 418 spin_lock_bh(&map->lock); 419 420 if (map->prog && map->prog != prog) 421 goto unlock; 422 if (prog->aux->cgroup_storage[stype] && 423 prog->aux->cgroup_storage[stype] != _map) 424 goto unlock; 425 426 map->prog = prog; 427 prog->aux->cgroup_storage[stype] = _map; 428 ret = 0; 429 unlock: 430 spin_unlock_bh(&map->lock); 431 432 return ret; 433 } 434 435 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) 436 { 437 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); 438 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 439 440 spin_lock_bh(&map->lock); 441 if (map->prog == prog) { 442 WARN_ON(prog->aux->cgroup_storage[stype] != _map); 443 map->prog = NULL; 444 prog->aux->cgroup_storage[stype] = NULL; 445 } 446 spin_unlock_bh(&map->lock); 447 } 448 449 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) 450 { 451 size_t size; 452 453 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { 454 size = sizeof(struct bpf_storage_buffer) + map->value_size; 455 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, 456 PAGE_SIZE) >> PAGE_SHIFT; 457 } else { 458 size = map->value_size; 459 *pages = round_up(round_up(size, 8) * num_possible_cpus(), 460 PAGE_SIZE) >> PAGE_SHIFT; 461 } 462 463 return size; 464 } 465 466 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, 467 enum bpf_cgroup_storage_type stype) 468 { 469 struct bpf_cgroup_storage *storage; 470 struct bpf_map *map; 471 gfp_t flags; 472 size_t size; 473 u32 pages; 474 475 map = prog->aux->cgroup_storage[stype]; 476 if (!map) 477 return NULL; 478 479 size = bpf_cgroup_storage_calculate_size(map, &pages); 480 481 if (bpf_map_charge_memlock(map, pages)) 482 return ERR_PTR(-EPERM); 483 484 storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), 485 __GFP_ZERO | GFP_USER, map->numa_node); 486 if (!storage) 487 goto enomem; 488 489 flags = __GFP_ZERO | GFP_USER; 490 491 if (stype == BPF_CGROUP_STORAGE_SHARED) { 492 storage->buf = kmalloc_node(size, flags, map->numa_node); 493 if (!storage->buf) 494 goto enomem; 495 } else { 496 storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); 497 if (!storage->percpu_buf) 498 goto enomem; 499 } 500 501 storage->map = (struct bpf_cgroup_storage_map *)map; 502 503 return storage; 504 505 enomem: 506 bpf_map_uncharge_memlock(map, pages); 507 kfree(storage); 508 return ERR_PTR(-ENOMEM); 509 } 510 511 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu) 512 { 513 struct bpf_cgroup_storage *storage = 514 container_of(rcu, struct bpf_cgroup_storage, rcu); 515 516 kfree(storage->buf); 517 kfree(storage); 518 } 519 520 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu) 521 { 522 struct bpf_cgroup_storage *storage = 523 container_of(rcu, struct bpf_cgroup_storage, rcu); 524 525 free_percpu(storage->percpu_buf); 526 kfree(storage); 527 } 528 529 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) 530 { 531 enum bpf_cgroup_storage_type stype; 532 struct bpf_map *map; 533 u32 pages; 534 535 if (!storage) 536 return; 537 538 map = &storage->map->map; 539 540 bpf_cgroup_storage_calculate_size(map, &pages); 541 bpf_map_uncharge_memlock(map, pages); 542 543 stype = cgroup_storage_type(map); 544 if (stype == BPF_CGROUP_STORAGE_SHARED) 545 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); 546 else 547 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); 548 } 549 550 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, 551 struct cgroup *cgroup, 552 enum bpf_attach_type type) 553 { 554 struct bpf_cgroup_storage_map *map; 555 556 if (!storage) 557 return; 558 559 storage->key.attach_type = type; 560 storage->key.cgroup_inode_id = cgroup->kn->id.id; 561 562 map = storage->map; 563 564 spin_lock_bh(&map->lock); 565 WARN_ON(cgroup_storage_insert(map, storage)); 566 list_add(&storage->list, &map->list); 567 spin_unlock_bh(&map->lock); 568 } 569 570 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) 571 { 572 struct bpf_cgroup_storage_map *map; 573 struct rb_root *root; 574 575 if (!storage) 576 return; 577 578 map = storage->map; 579 580 spin_lock_bh(&map->lock); 581 root = &map->root; 582 rb_erase(&storage->node, root); 583 584 list_del(&storage->list); 585 spin_unlock_bh(&map->lock); 586 } 587 588 #endif 589