xref: /linux/kernel/bpf/bpf_insn_array.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2025 Isovalent */
3 
4 #include <linux/bpf.h>
5 
6 struct bpf_insn_array {
7 	struct bpf_map map;
8 	atomic_t used;
9 	long *ips;
10 	DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
11 };
12 
13 #define cast_insn_array(MAP_PTR) \
14 	container_of((MAP_PTR), struct bpf_insn_array, map)
15 
16 #define INSN_DELETED ((u32)-1)
17 
18 static inline u64 insn_array_alloc_size(u32 max_entries)
19 {
20 	const u64 base_size = sizeof(struct bpf_insn_array);
21 	const u64 entry_size = sizeof(struct bpf_insn_array_value);
22 
23 	return base_size + max_entries * (entry_size + sizeof(long));
24 }
25 
26 static int insn_array_alloc_check(union bpf_attr *attr)
27 {
28 	u32 value_size = sizeof(struct bpf_insn_array_value);
29 
30 	if (attr->max_entries == 0 || attr->key_size != 4 ||
31 	    attr->value_size != value_size || attr->map_flags != 0)
32 		return -EINVAL;
33 
34 	return 0;
35 }
36 
37 static void insn_array_free(struct bpf_map *map)
38 {
39 	struct bpf_insn_array *insn_array = cast_insn_array(map);
40 
41 	bpf_map_area_free(insn_array);
42 }
43 
44 static struct bpf_map *insn_array_alloc(union bpf_attr *attr)
45 {
46 	u64 size = insn_array_alloc_size(attr->max_entries);
47 	struct bpf_insn_array *insn_array;
48 
49 	insn_array = bpf_map_area_alloc(size, NUMA_NO_NODE);
50 	if (!insn_array)
51 		return ERR_PTR(-ENOMEM);
52 
53 	/* ips are allocated right after the insn_array->values[] array */
54 	insn_array->ips = (void *)&insn_array->values[attr->max_entries];
55 
56 	bpf_map_init_from_attr(&insn_array->map, attr);
57 
58 	/* BPF programs aren't allowed to write to the map */
59 	insn_array->map.map_flags |= BPF_F_RDONLY_PROG;
60 
61 	return &insn_array->map;
62 }
63 
64 static void *insn_array_lookup_elem(struct bpf_map *map, void *key)
65 {
66 	struct bpf_insn_array *insn_array = cast_insn_array(map);
67 	u32 index = *(u32 *)key;
68 
69 	if (unlikely(index >= insn_array->map.max_entries))
70 		return NULL;
71 
72 	return &insn_array->values[index];
73 }
74 
75 static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)
76 {
77 	struct bpf_insn_array *insn_array = cast_insn_array(map);
78 	u32 index = *(u32 *)key;
79 	struct bpf_insn_array_value val = {};
80 
81 	if (unlikely(index >= insn_array->map.max_entries))
82 		return -E2BIG;
83 
84 	if (unlikely(map_flags & BPF_NOEXIST))
85 		return -EEXIST;
86 
87 	copy_map_value(map, &val, value);
88 	if (val.jitted_off || val.xlated_off)
89 		return -EINVAL;
90 
91 	insn_array->values[index].orig_off = val.orig_off;
92 
93 	return 0;
94 }
95 
96 static long insn_array_delete_elem(struct bpf_map *map, void *key)
97 {
98 	return -EINVAL;
99 }
100 
101 static int insn_array_check_btf(const struct bpf_map *map,
102 			      const struct btf *btf,
103 			      const struct btf_type *key_type,
104 			      const struct btf_type *value_type)
105 {
106 	if (!btf_type_is_i32(key_type))
107 		return -EINVAL;
108 
109 	if (!btf_type_is_i64(value_type))
110 		return -EINVAL;
111 
112 	return 0;
113 }
114 
115 static u64 insn_array_mem_usage(const struct bpf_map *map)
116 {
117 	return insn_array_alloc_size(map->max_entries);
118 }
119 
120 static int insn_array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
121 {
122 	struct bpf_insn_array *insn_array = cast_insn_array(map);
123 
124 	if ((off % sizeof(long)) != 0 ||
125 	    (off / sizeof(long)) >= map->max_entries)
126 		return -EINVAL;
127 
128 	/* from BPF's point of view, this map is a jump table */
129 	*imm = (unsigned long)insn_array->ips + off;
130 
131 	return 0;
132 }
133 
134 BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array)
135 
136 const struct bpf_map_ops insn_array_map_ops = {
137 	.map_alloc_check = insn_array_alloc_check,
138 	.map_alloc = insn_array_alloc,
139 	.map_free = insn_array_free,
140 	.map_get_next_key = bpf_array_get_next_key,
141 	.map_lookup_elem = insn_array_lookup_elem,
142 	.map_update_elem = insn_array_update_elem,
143 	.map_delete_elem = insn_array_delete_elem,
144 	.map_check_btf = insn_array_check_btf,
145 	.map_mem_usage = insn_array_mem_usage,
146 	.map_direct_value_addr = insn_array_map_direct_value_addr,
147 	.map_btf_id = &insn_array_btf_ids[0],
148 };
149 
150 static inline bool is_frozen(struct bpf_map *map)
151 {
152 	guard(mutex)(&map->freeze_mutex);
153 
154 	return map->frozen;
155 }
156 
157 static bool is_insn_array(const struct bpf_map *map)
158 {
159 	return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
160 }
161 
162 static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
163 				 const struct bpf_prog *prog)
164 {
165 	u32 off;
166 	int i;
167 
168 	for (i = 0; i < insn_array->map.max_entries; i++) {
169 		off = insn_array->values[i].orig_off;
170 
171 		if (off >= prog->len)
172 			return false;
173 
174 		if (off > 0) {
175 			if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
176 				return false;
177 		}
178 	}
179 
180 	return true;
181 }
182 
183 int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
184 {
185 	struct bpf_insn_array *insn_array = cast_insn_array(map);
186 	struct bpf_insn_array_value *values = insn_array->values;
187 	int i;
188 
189 	if (!is_frozen(map))
190 		return -EINVAL;
191 
192 	if (!valid_offsets(insn_array, prog))
193 		return -EINVAL;
194 
195 	/*
196 	 * There can be only one program using the map
197 	 */
198 	if (atomic_xchg(&insn_array->used, 1))
199 		return -EBUSY;
200 
201 	/*
202 	 * Reset all the map indexes to the original values.  This is needed,
203 	 * e.g., when a replay of verification with different log level should
204 	 * be performed.
205 	 */
206 	for (i = 0; i < map->max_entries; i++)
207 		values[i].xlated_off = values[i].orig_off;
208 
209 	return 0;
210 }
211 
212 int bpf_insn_array_ready(struct bpf_map *map)
213 {
214 	struct bpf_insn_array *insn_array = cast_insn_array(map);
215 	int i;
216 
217 	for (i = 0; i < map->max_entries; i++) {
218 		if (insn_array->values[i].xlated_off == INSN_DELETED)
219 			continue;
220 		if (!insn_array->ips[i])
221 			return -EFAULT;
222 	}
223 
224 	return 0;
225 }
226 
227 void bpf_insn_array_release(struct bpf_map *map)
228 {
229 	struct bpf_insn_array *insn_array = cast_insn_array(map);
230 
231 	atomic_set(&insn_array->used, 0);
232 }
233 
234 void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len)
235 {
236 	struct bpf_insn_array *insn_array = cast_insn_array(map);
237 	int i;
238 
239 	if (len <= 1)
240 		return;
241 
242 	for (i = 0; i < map->max_entries; i++) {
243 		if (insn_array->values[i].xlated_off <= off)
244 			continue;
245 		if (insn_array->values[i].xlated_off == INSN_DELETED)
246 			continue;
247 		insn_array->values[i].xlated_off += len - 1;
248 	}
249 }
250 
251 void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
252 {
253 	struct bpf_insn_array *insn_array = cast_insn_array(map);
254 	int i;
255 
256 	for (i = 0; i < map->max_entries; i++) {
257 		if (insn_array->values[i].xlated_off < off)
258 			continue;
259 		if (insn_array->values[i].xlated_off == INSN_DELETED)
260 			continue;
261 		if (insn_array->values[i].xlated_off < off + len)
262 			insn_array->values[i].xlated_off = INSN_DELETED;
263 		else
264 			insn_array->values[i].xlated_off -= len;
265 	}
266 }
267 
268 /*
269  * This function is called by JITs. The image is the real program
270  * image, the offsets array set up the xlated -> jitted mapping.
271  * The offsets[xlated] offset should point to the beginning of
272  * the jitted instruction.
273  */
274 void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
275 {
276 	struct bpf_insn_array *insn_array;
277 	struct bpf_map *map;
278 	u32 xlated_off;
279 	int i, j;
280 
281 	if (!offsets || !image)
282 		return;
283 
284 	for (i = 0; i < prog->aux->used_map_cnt; i++) {
285 		map = prog->aux->used_maps[i];
286 		if (!is_insn_array(map))
287 			continue;
288 
289 		insn_array = cast_insn_array(map);
290 		for (j = 0; j < map->max_entries; j++) {
291 			xlated_off = insn_array->values[j].xlated_off;
292 			if (xlated_off == INSN_DELETED)
293 				continue;
294 			if (xlated_off < prog->aux->subprog_start)
295 				continue;
296 			xlated_off -= prog->aux->subprog_start;
297 			if (xlated_off >= prog->len)
298 				continue;
299 
300 			insn_array->values[j].jitted_off = offsets[xlated_off];
301 			insn_array->ips[j] = (long)(image + offsets[xlated_off]);
302 		}
303 	}
304 }
305