xref: /linux/kernel/bpf/bpf_insn_array.c (revision b4ce5923e780d6896d4aaf19de5a27652b8bf1ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2025 Isovalent */
3 
4 #include <linux/bpf.h>
5 
6 struct bpf_insn_array {
7 	struct bpf_map map;
8 	atomic_t used;
9 	long *ips;
10 	DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
11 };
12 
13 #define cast_insn_array(MAP_PTR) \
14 	container_of((MAP_PTR), struct bpf_insn_array, map)
15 
16 #define INSN_DELETED ((u32)-1)
17 
18 static inline u64 insn_array_alloc_size(u32 max_entries)
19 {
20 	const u64 base_size = sizeof(struct bpf_insn_array);
21 	const u64 entry_size = sizeof(struct bpf_insn_array_value);
22 
23 	return base_size + max_entries * (entry_size + sizeof(long));
24 }
25 
26 static int insn_array_alloc_check(union bpf_attr *attr)
27 {
28 	u32 value_size = sizeof(struct bpf_insn_array_value);
29 
30 	if (attr->max_entries == 0 || attr->key_size != 4 ||
31 	    attr->value_size != value_size || attr->map_flags != 0)
32 		return -EINVAL;
33 
34 	return 0;
35 }
36 
37 static void insn_array_free(struct bpf_map *map)
38 {
39 	struct bpf_insn_array *insn_array = cast_insn_array(map);
40 
41 	bpf_map_area_free(insn_array);
42 }
43 
44 static struct bpf_map *insn_array_alloc(union bpf_attr *attr)
45 {
46 	u64 size = insn_array_alloc_size(attr->max_entries);
47 	struct bpf_insn_array *insn_array;
48 
49 	insn_array = bpf_map_area_alloc(size, NUMA_NO_NODE);
50 	if (!insn_array)
51 		return ERR_PTR(-ENOMEM);
52 
53 	/* ips are allocated right after the insn_array->values[] array */
54 	insn_array->ips = (void *)&insn_array->values[attr->max_entries];
55 
56 	bpf_map_init_from_attr(&insn_array->map, attr);
57 
58 	return &insn_array->map;
59 }
60 
61 static void *insn_array_lookup_elem(struct bpf_map *map, void *key)
62 {
63 	struct bpf_insn_array *insn_array = cast_insn_array(map);
64 	u32 index = *(u32 *)key;
65 
66 	if (unlikely(index >= insn_array->map.max_entries))
67 		return NULL;
68 
69 	return &insn_array->values[index];
70 }
71 
72 static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)
73 {
74 	struct bpf_insn_array *insn_array = cast_insn_array(map);
75 	u32 index = *(u32 *)key;
76 	struct bpf_insn_array_value val = {};
77 
78 	if (unlikely(index >= insn_array->map.max_entries))
79 		return -E2BIG;
80 
81 	if (unlikely(map_flags & BPF_NOEXIST))
82 		return -EEXIST;
83 
84 	copy_map_value(map, &val, value);
85 	if (val.jitted_off || val.xlated_off)
86 		return -EINVAL;
87 
88 	insn_array->values[index].orig_off = val.orig_off;
89 
90 	return 0;
91 }
92 
93 static long insn_array_delete_elem(struct bpf_map *map, void *key)
94 {
95 	return -EINVAL;
96 }
97 
98 static int insn_array_check_btf(const struct bpf_map *map,
99 			      const struct btf *btf,
100 			      const struct btf_type *key_type,
101 			      const struct btf_type *value_type)
102 {
103 	if (!btf_type_is_i32(key_type))
104 		return -EINVAL;
105 
106 	if (!btf_type_is_i64(value_type))
107 		return -EINVAL;
108 
109 	return 0;
110 }
111 
112 static u64 insn_array_mem_usage(const struct bpf_map *map)
113 {
114 	return insn_array_alloc_size(map->max_entries);
115 }
116 
117 BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array)
118 
119 const struct bpf_map_ops insn_array_map_ops = {
120 	.map_alloc_check = insn_array_alloc_check,
121 	.map_alloc = insn_array_alloc,
122 	.map_free = insn_array_free,
123 	.map_get_next_key = bpf_array_get_next_key,
124 	.map_lookup_elem = insn_array_lookup_elem,
125 	.map_update_elem = insn_array_update_elem,
126 	.map_delete_elem = insn_array_delete_elem,
127 	.map_check_btf = insn_array_check_btf,
128 	.map_mem_usage = insn_array_mem_usage,
129 	.map_btf_id = &insn_array_btf_ids[0],
130 };
131 
132 static inline bool is_frozen(struct bpf_map *map)
133 {
134 	guard(mutex)(&map->freeze_mutex);
135 
136 	return map->frozen;
137 }
138 
139 static bool is_insn_array(const struct bpf_map *map)
140 {
141 	return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
142 }
143 
144 static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
145 				 const struct bpf_prog *prog)
146 {
147 	u32 off;
148 	int i;
149 
150 	for (i = 0; i < insn_array->map.max_entries; i++) {
151 		off = insn_array->values[i].orig_off;
152 
153 		if (off >= prog->len)
154 			return false;
155 
156 		if (off > 0) {
157 			if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
158 				return false;
159 		}
160 	}
161 
162 	return true;
163 }
164 
165 int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
166 {
167 	struct bpf_insn_array *insn_array = cast_insn_array(map);
168 	struct bpf_insn_array_value *values = insn_array->values;
169 	int i;
170 
171 	if (!is_frozen(map))
172 		return -EINVAL;
173 
174 	if (!valid_offsets(insn_array, prog))
175 		return -EINVAL;
176 
177 	/*
178 	 * There can be only one program using the map
179 	 */
180 	if (atomic_xchg(&insn_array->used, 1))
181 		return -EBUSY;
182 
183 	/*
184 	 * Reset all the map indexes to the original values.  This is needed,
185 	 * e.g., when a replay of verification with different log level should
186 	 * be performed.
187 	 */
188 	for (i = 0; i < map->max_entries; i++)
189 		values[i].xlated_off = values[i].orig_off;
190 
191 	return 0;
192 }
193 
194 int bpf_insn_array_ready(struct bpf_map *map)
195 {
196 	struct bpf_insn_array *insn_array = cast_insn_array(map);
197 	int i;
198 
199 	for (i = 0; i < map->max_entries; i++) {
200 		if (insn_array->values[i].xlated_off == INSN_DELETED)
201 			continue;
202 		if (!insn_array->ips[i])
203 			return -EFAULT;
204 	}
205 
206 	return 0;
207 }
208 
209 void bpf_insn_array_release(struct bpf_map *map)
210 {
211 	struct bpf_insn_array *insn_array = cast_insn_array(map);
212 
213 	atomic_set(&insn_array->used, 0);
214 }
215 
216 void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len)
217 {
218 	struct bpf_insn_array *insn_array = cast_insn_array(map);
219 	int i;
220 
221 	if (len <= 1)
222 		return;
223 
224 	for (i = 0; i < map->max_entries; i++) {
225 		if (insn_array->values[i].xlated_off <= off)
226 			continue;
227 		if (insn_array->values[i].xlated_off == INSN_DELETED)
228 			continue;
229 		insn_array->values[i].xlated_off += len - 1;
230 	}
231 }
232 
233 void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
234 {
235 	struct bpf_insn_array *insn_array = cast_insn_array(map);
236 	int i;
237 
238 	for (i = 0; i < map->max_entries; i++) {
239 		if (insn_array->values[i].xlated_off < off)
240 			continue;
241 		if (insn_array->values[i].xlated_off == INSN_DELETED)
242 			continue;
243 		if (insn_array->values[i].xlated_off < off + len)
244 			insn_array->values[i].xlated_off = INSN_DELETED;
245 		else
246 			insn_array->values[i].xlated_off -= len;
247 	}
248 }
249 
250 /*
251  * This function is called by JITs. The image is the real program
252  * image, the offsets array set up the xlated -> jitted mapping.
253  * The offsets[xlated] offset should point to the beginning of
254  * the jitted instruction.
255  */
256 void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
257 {
258 	struct bpf_insn_array *insn_array;
259 	struct bpf_map *map;
260 	u32 xlated_off;
261 	int i, j;
262 
263 	if (!offsets || !image)
264 		return;
265 
266 	for (i = 0; i < prog->aux->used_map_cnt; i++) {
267 		map = prog->aux->used_maps[i];
268 		if (!is_insn_array(map))
269 			continue;
270 
271 		insn_array = cast_insn_array(map);
272 		for (j = 0; j < map->max_entries; j++) {
273 			xlated_off = insn_array->values[j].xlated_off;
274 			if (xlated_off == INSN_DELETED)
275 				continue;
276 			if (xlated_off < prog->aux->subprog_start)
277 				continue;
278 			xlated_off -= prog->aux->subprog_start;
279 			if (xlated_off >= prog->len)
280 				continue;
281 
282 			insn_array->values[j].jitted_off = offsets[xlated_off];
283 			insn_array->ips[j] = (long)(image + offsets[xlated_off]);
284 		}
285 	}
286 }
287