1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2020 Facebook */ 3 #include <linux/bpf.h> 4 #include <linux/fs.h> 5 #include <linux/filter.h> 6 #include <linux/kernel.h> 7 #include <linux/btf_ids.h> 8 9 struct bpf_iter_seq_map_info { 10 u32 map_id; 11 }; 12 13 static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos) 14 { 15 struct bpf_iter_seq_map_info *info = seq->private; 16 struct bpf_map *map; 17 18 map = bpf_map_get_curr_or_next(&info->map_id); 19 if (!map) 20 return NULL; 21 22 if (*pos == 0) 23 ++*pos; 24 return map; 25 } 26 27 static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 28 { 29 struct bpf_iter_seq_map_info *info = seq->private; 30 31 ++*pos; 32 ++info->map_id; 33 bpf_map_put((struct bpf_map *)v); 34 return bpf_map_get_curr_or_next(&info->map_id); 35 } 36 37 struct bpf_iter__bpf_map { 38 __bpf_md_ptr(struct bpf_iter_meta *, meta); 39 __bpf_md_ptr(struct bpf_map *, map); 40 }; 41 42 DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map) 43 44 static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop) 45 { 46 struct bpf_iter__bpf_map ctx; 47 struct bpf_iter_meta meta; 48 struct bpf_prog *prog; 49 int ret = 0; 50 51 ctx.meta = &meta; 52 ctx.map = v; 53 meta.seq = seq; 54 prog = bpf_iter_get_info(&meta, in_stop); 55 if (prog) 56 ret = bpf_iter_run_prog(prog, &ctx); 57 58 return ret; 59 } 60 61 static int bpf_map_seq_show(struct seq_file *seq, void *v) 62 { 63 return __bpf_map_seq_show(seq, v, false); 64 } 65 66 static void bpf_map_seq_stop(struct seq_file *seq, void *v) 67 { 68 if (!v) 69 (void)__bpf_map_seq_show(seq, v, true); 70 else 71 bpf_map_put((struct bpf_map *)v); 72 } 73 74 static const struct seq_operations bpf_map_seq_ops = { 75 .start = bpf_map_seq_start, 76 .next = bpf_map_seq_next, 77 .stop = bpf_map_seq_stop, 78 .show = bpf_map_seq_show, 79 }; 80 81 BTF_ID_LIST(btf_bpf_map_id) 82 BTF_ID(struct, bpf_map) 83 84 static const struct bpf_iter_seq_info bpf_map_seq_info = { 85 .seq_ops = &bpf_map_seq_ops, 86 .init_seq_private = NULL, 87 .fini_seq_private = NULL, 88 .seq_priv_size = sizeof(struct bpf_iter_seq_map_info), 89 }; 90 91 static struct bpf_iter_reg bpf_map_reg_info = { 92 .target = "bpf_map", 93 .ctx_arg_info_size = 1, 94 .ctx_arg_info = { 95 { offsetof(struct bpf_iter__bpf_map, map), 96 PTR_TO_BTF_ID_OR_NULL }, 97 }, 98 .seq_info = &bpf_map_seq_info, 99 }; 100 101 static int bpf_iter_check_map(struct bpf_prog *prog, 102 struct bpf_iter_aux_info *aux) 103 { 104 u32 key_acc_size, value_acc_size, key_size, value_size; 105 struct bpf_map *map = aux->map; 106 bool is_percpu = false; 107 108 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 109 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 110 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 111 is_percpu = true; 112 else if (map->map_type != BPF_MAP_TYPE_HASH && 113 map->map_type != BPF_MAP_TYPE_LRU_HASH && 114 map->map_type != BPF_MAP_TYPE_ARRAY) 115 return -EINVAL; 116 117 key_acc_size = prog->aux->max_rdonly_access; 118 value_acc_size = prog->aux->max_rdwr_access; 119 key_size = map->key_size; 120 if (!is_percpu) 121 value_size = map->value_size; 122 else 123 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 124 125 if (key_acc_size > key_size || value_acc_size > value_size) 126 return -EACCES; 127 128 return 0; 129 } 130 131 DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, 132 struct bpf_map *map, void *key, void *value) 133 134 static const struct bpf_iter_reg bpf_map_elem_reg_info = { 135 .target = "bpf_map_elem", 136 .check_target = bpf_iter_check_map, 137 .req_linfo = BPF_ITER_LINK_MAP_FD, 138 .ctx_arg_info_size = 2, 139 .ctx_arg_info = { 140 { offsetof(struct bpf_iter__bpf_map_elem, key), 141 PTR_TO_RDONLY_BUF_OR_NULL }, 142 { offsetof(struct bpf_iter__bpf_map_elem, value), 143 PTR_TO_RDWR_BUF_OR_NULL }, 144 }, 145 }; 146 147 static int __init bpf_map_iter_init(void) 148 { 149 int ret; 150 151 bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; 152 ret = bpf_iter_reg_target(&bpf_map_reg_info); 153 if (ret) 154 return ret; 155 156 return bpf_iter_reg_target(&bpf_map_elem_reg_info); 157 } 158 159 late_initcall(bpf_map_iter_init); 160