xref: /linux/kernel/bpf/bpf_insn_array.c (revision 493d9e0d608339a32f568504d5fd411a261bb0af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2025 Isovalent */
3 
4 #include <linux/bpf.h>
5 
6 struct bpf_insn_array {
7 	struct bpf_map map;
8 	atomic_t used;
9 	long *ips;
10 	DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
11 };
12 
13 #define cast_insn_array(MAP_PTR) \
14 	container_of((MAP_PTR), struct bpf_insn_array, map)
15 
16 #define INSN_DELETED ((u32)-1)
17 
18 static inline u64 insn_array_alloc_size(u32 max_entries)
19 {
20 	const u64 base_size = sizeof(struct bpf_insn_array);
21 	const u64 entry_size = sizeof(struct bpf_insn_array_value);
22 
23 	return base_size + max_entries * (entry_size + sizeof(long));
24 }
25 
26 static int insn_array_alloc_check(union bpf_attr *attr)
27 {
28 	u32 value_size = sizeof(struct bpf_insn_array_value);
29 
30 	if (attr->max_entries == 0 || attr->key_size != 4 ||
31 	    attr->value_size != value_size || attr->map_flags != 0)
32 		return -EINVAL;
33 
34 	return 0;
35 }
36 
37 static void insn_array_free(struct bpf_map *map)
38 {
39 	struct bpf_insn_array *insn_array = cast_insn_array(map);
40 
41 	bpf_map_area_free(insn_array);
42 }
43 
44 static struct bpf_map *insn_array_alloc(union bpf_attr *attr)
45 {
46 	u64 size = insn_array_alloc_size(attr->max_entries);
47 	struct bpf_insn_array *insn_array;
48 
49 	insn_array = bpf_map_area_alloc(size, NUMA_NO_NODE);
50 	if (!insn_array)
51 		return ERR_PTR(-ENOMEM);
52 
53 	/* ips are allocated right after the insn_array->values[] array */
54 	insn_array->ips = (void *)&insn_array->values[attr->max_entries];
55 
56 	bpf_map_init_from_attr(&insn_array->map, attr);
57 
58 	return &insn_array->map;
59 }
60 
61 static void *insn_array_lookup_elem(struct bpf_map *map, void *key)
62 {
63 	struct bpf_insn_array *insn_array = cast_insn_array(map);
64 	u32 index = *(u32 *)key;
65 
66 	if (unlikely(index >= insn_array->map.max_entries))
67 		return NULL;
68 
69 	return &insn_array->values[index];
70 }
71 
72 static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)
73 {
74 	struct bpf_insn_array *insn_array = cast_insn_array(map);
75 	u32 index = *(u32 *)key;
76 	struct bpf_insn_array_value val = {};
77 
78 	if (unlikely(index >= insn_array->map.max_entries))
79 		return -E2BIG;
80 
81 	if (unlikely(map_flags & BPF_NOEXIST))
82 		return -EEXIST;
83 
84 	copy_map_value(map, &val, value);
85 	if (val.jitted_off || val.xlated_off)
86 		return -EINVAL;
87 
88 	insn_array->values[index].orig_off = val.orig_off;
89 
90 	return 0;
91 }
92 
93 static long insn_array_delete_elem(struct bpf_map *map, void *key)
94 {
95 	return -EINVAL;
96 }
97 
98 static int insn_array_check_btf(const struct bpf_map *map,
99 			      const struct btf *btf,
100 			      const struct btf_type *key_type,
101 			      const struct btf_type *value_type)
102 {
103 	if (!btf_type_is_i32(key_type))
104 		return -EINVAL;
105 
106 	if (!btf_type_is_i64(value_type))
107 		return -EINVAL;
108 
109 	return 0;
110 }
111 
112 static u64 insn_array_mem_usage(const struct bpf_map *map)
113 {
114 	return insn_array_alloc_size(map->max_entries);
115 }
116 
117 static int insn_array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
118 {
119 	struct bpf_insn_array *insn_array = cast_insn_array(map);
120 
121 	if ((off % sizeof(long)) != 0 ||
122 	    (off / sizeof(long)) >= map->max_entries)
123 		return -EINVAL;
124 
125 	/* from BPF's point of view, this map is a jump table */
126 	*imm = (unsigned long)insn_array->ips + off;
127 
128 	return 0;
129 }
130 
131 BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array)
132 
133 const struct bpf_map_ops insn_array_map_ops = {
134 	.map_alloc_check = insn_array_alloc_check,
135 	.map_alloc = insn_array_alloc,
136 	.map_free = insn_array_free,
137 	.map_get_next_key = bpf_array_get_next_key,
138 	.map_lookup_elem = insn_array_lookup_elem,
139 	.map_update_elem = insn_array_update_elem,
140 	.map_delete_elem = insn_array_delete_elem,
141 	.map_check_btf = insn_array_check_btf,
142 	.map_mem_usage = insn_array_mem_usage,
143 	.map_direct_value_addr = insn_array_map_direct_value_addr,
144 	.map_btf_id = &insn_array_btf_ids[0],
145 };
146 
147 static inline bool is_frozen(struct bpf_map *map)
148 {
149 	guard(mutex)(&map->freeze_mutex);
150 
151 	return map->frozen;
152 }
153 
154 static bool is_insn_array(const struct bpf_map *map)
155 {
156 	return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
157 }
158 
159 static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
160 				 const struct bpf_prog *prog)
161 {
162 	u32 off;
163 	int i;
164 
165 	for (i = 0; i < insn_array->map.max_entries; i++) {
166 		off = insn_array->values[i].orig_off;
167 
168 		if (off >= prog->len)
169 			return false;
170 
171 		if (off > 0) {
172 			if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
173 				return false;
174 		}
175 	}
176 
177 	return true;
178 }
179 
180 int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
181 {
182 	struct bpf_insn_array *insn_array = cast_insn_array(map);
183 	struct bpf_insn_array_value *values = insn_array->values;
184 	int i;
185 
186 	if (!is_frozen(map))
187 		return -EINVAL;
188 
189 	if (!valid_offsets(insn_array, prog))
190 		return -EINVAL;
191 
192 	/*
193 	 * There can be only one program using the map
194 	 */
195 	if (atomic_xchg(&insn_array->used, 1))
196 		return -EBUSY;
197 
198 	/*
199 	 * Reset all the map indexes to the original values.  This is needed,
200 	 * e.g., when a replay of verification with different log level should
201 	 * be performed.
202 	 */
203 	for (i = 0; i < map->max_entries; i++)
204 		values[i].xlated_off = values[i].orig_off;
205 
206 	return 0;
207 }
208 
209 int bpf_insn_array_ready(struct bpf_map *map)
210 {
211 	struct bpf_insn_array *insn_array = cast_insn_array(map);
212 	int i;
213 
214 	for (i = 0; i < map->max_entries; i++) {
215 		if (insn_array->values[i].xlated_off == INSN_DELETED)
216 			continue;
217 		if (!insn_array->ips[i])
218 			return -EFAULT;
219 	}
220 
221 	return 0;
222 }
223 
224 void bpf_insn_array_release(struct bpf_map *map)
225 {
226 	struct bpf_insn_array *insn_array = cast_insn_array(map);
227 
228 	atomic_set(&insn_array->used, 0);
229 }
230 
231 void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len)
232 {
233 	struct bpf_insn_array *insn_array = cast_insn_array(map);
234 	int i;
235 
236 	if (len <= 1)
237 		return;
238 
239 	for (i = 0; i < map->max_entries; i++) {
240 		if (insn_array->values[i].xlated_off <= off)
241 			continue;
242 		if (insn_array->values[i].xlated_off == INSN_DELETED)
243 			continue;
244 		insn_array->values[i].xlated_off += len - 1;
245 	}
246 }
247 
248 void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
249 {
250 	struct bpf_insn_array *insn_array = cast_insn_array(map);
251 	int i;
252 
253 	for (i = 0; i < map->max_entries; i++) {
254 		if (insn_array->values[i].xlated_off < off)
255 			continue;
256 		if (insn_array->values[i].xlated_off == INSN_DELETED)
257 			continue;
258 		if (insn_array->values[i].xlated_off < off + len)
259 			insn_array->values[i].xlated_off = INSN_DELETED;
260 		else
261 			insn_array->values[i].xlated_off -= len;
262 	}
263 }
264 
265 /*
266  * This function is called by JITs. The image is the real program
267  * image, the offsets array set up the xlated -> jitted mapping.
268  * The offsets[xlated] offset should point to the beginning of
269  * the jitted instruction.
270  */
271 void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
272 {
273 	struct bpf_insn_array *insn_array;
274 	struct bpf_map *map;
275 	u32 xlated_off;
276 	int i, j;
277 
278 	if (!offsets || !image)
279 		return;
280 
281 	for (i = 0; i < prog->aux->used_map_cnt; i++) {
282 		map = prog->aux->used_maps[i];
283 		if (!is_insn_array(map))
284 			continue;
285 
286 		insn_array = cast_insn_array(map);
287 		for (j = 0; j < map->max_entries; j++) {
288 			xlated_off = insn_array->values[j].xlated_off;
289 			if (xlated_off == INSN_DELETED)
290 				continue;
291 			if (xlated_off < prog->aux->subprog_start)
292 				continue;
293 			xlated_off -= prog->aux->subprog_start;
294 			if (xlated_off >= prog->len)
295 				continue;
296 
297 			insn_array->values[j].jitted_off = offsets[xlated_off];
298 			insn_array->ips[j] = (long)(image + offsets[xlated_off]);
299 		}
300 	}
301 }
302