Lines Matching +full:reserve +full:- +full:mem +full:- +full:v1

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
61 #define DST regs[insn->dst_reg]
62 #define SRC regs[insn->src_reg]
67 #define OFF insn->off
68 #define IMM insn->imm
82 ptr = skb_network_header(skb) + k - SKF_NET_OFF; in bpf_internal_load_pointer_neg_helper()
86 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; in bpf_internal_load_pointer_neg_helper()
88 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) in bpf_internal_load_pointer_neg_helper()
115 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); in bpf_prog_alloc_no_stats()
116 if (!fp->active) { in bpf_prog_alloc_no_stats()
122 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats()
123 fp->aux = aux; in bpf_prog_alloc_no_stats()
124 fp->aux->main_prog_aux = aux; in bpf_prog_alloc_no_stats()
125 fp->aux->prog = fp; in bpf_prog_alloc_no_stats()
126 fp->jit_requested = ebpf_jit_enabled(); in bpf_prog_alloc_no_stats()
127 fp->blinding_requested = bpf_jit_blinding_enabled(fp); in bpf_prog_alloc_no_stats()
129 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; in bpf_prog_alloc_no_stats()
132 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); in bpf_prog_alloc_no_stats()
134 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); in bpf_prog_alloc_no_stats()
136 mutex_init(&fp->aux->used_maps_mutex); in bpf_prog_alloc_no_stats()
137 mutex_init(&fp->aux->ext_mutex); in bpf_prog_alloc_no_stats()
138 mutex_init(&fp->aux->dst_mutex); in bpf_prog_alloc_no_stats()
157 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); in bpf_prog_alloc()
158 if (!prog->stats) { in bpf_prog_alloc()
159 free_percpu(prog->active); in bpf_prog_alloc()
160 kfree(prog->aux); in bpf_prog_alloc()
168 pstats = per_cpu_ptr(prog->stats, cpu); in bpf_prog_alloc()
169 u64_stats_init(&pstats->syncp); in bpf_prog_alloc()
177 if (!prog->aux->nr_linfo || !prog->jit_requested) in bpf_prog_alloc_jited_linfo()
180 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, in bpf_prog_alloc_jited_linfo()
181 sizeof(*prog->aux->jited_linfo), in bpf_prog_alloc_jited_linfo()
183 if (!prog->aux->jited_linfo) in bpf_prog_alloc_jited_linfo()
184 return -ENOMEM; in bpf_prog_alloc_jited_linfo()
191 if (prog->aux->jited_linfo && in bpf_prog_jit_attempt_done()
192 (!prog->jited || !prog->aux->jited_linfo[0])) { in bpf_prog_jit_attempt_done()
193 kvfree(prog->aux->jited_linfo); in bpf_prog_jit_attempt_done()
194 prog->aux->jited_linfo = NULL; in bpf_prog_jit_attempt_done()
197 kfree(prog->aux->kfunc_tab); in bpf_prog_jit_attempt_done()
198 prog->aux->kfunc_tab = NULL; in bpf_prog_jit_attempt_done()
216 * The prog's idx to prog->aux->linfo and jited_linfo
218 * jited_linfo[linfo_idx] = prog->bpf_func
222 * jited_linfo[i] = prog->bpf_func +
223 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
232 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt) in bpf_prog_fill_jited_linfo()
236 linfo_idx = prog->aux->linfo_idx; in bpf_prog_fill_jited_linfo()
237 linfo = &prog->aux->linfo[linfo_idx]; in bpf_prog_fill_jited_linfo()
239 insn_end = insn_start + prog->len; in bpf_prog_fill_jited_linfo()
241 jited_linfo = &prog->aux->jited_linfo[linfo_idx]; in bpf_prog_fill_jited_linfo()
242 jited_linfo[0] = prog->bpf_func; in bpf_prog_fill_jited_linfo()
244 nr_linfo = prog->aux->nr_linfo - linfo_idx; in bpf_prog_fill_jited_linfo()
250 jited_linfo[i] = prog->bpf_func + in bpf_prog_fill_jited_linfo()
251 insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; in bpf_prog_fill_jited_linfo()
263 if (pages <= fp_old->pages) in bpf_prog_realloc()
268 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc()
269 fp->pages = pages; in bpf_prog_realloc()
270 fp->aux->prog = fp; in bpf_prog_realloc()
272 /* We keep fp->aux from fp_old around in the new in bpf_prog_realloc()
275 fp_old->aux = NULL; in bpf_prog_realloc()
276 fp_old->stats = NULL; in bpf_prog_realloc()
277 fp_old->active = NULL; in bpf_prog_realloc()
286 if (fp->aux) { in __bpf_prog_free()
287 mutex_destroy(&fp->aux->used_maps_mutex); in __bpf_prog_free()
288 mutex_destroy(&fp->aux->dst_mutex); in __bpf_prog_free()
289 kfree(fp->aux->poke_tab); in __bpf_prog_free()
290 kfree(fp->aux); in __bpf_prog_free()
292 free_percpu(fp->stats); in __bpf_prog_free()
293 free_percpu(fp->active); in __bpf_prog_free()
306 return -ENOMEM; in bpf_prog_calc_tag()
311 for (i = 0, was_ld_map = false; i < fp->len; i++) { in bpf_prog_calc_tag()
312 dst[i] = fp->insnsi[i]; in bpf_prog_calc_tag()
330 sha256((u8 *)dst, size, fp->digest); in bpf_prog_calc_tag()
339 s32 delta = end_new - end_old; in bpf_adj_delta_to_imm()
340 s64 imm = insn->imm; in bpf_adj_delta_to_imm()
345 imm -= delta; in bpf_adj_delta_to_imm()
347 return -ERANGE; in bpf_adj_delta_to_imm()
349 insn->imm = imm; in bpf_adj_delta_to_imm()
357 s32 delta = end_new - end_old; in bpf_adj_delta_to_off()
359 if (insn->code == (BPF_JMP32 | BPF_JA)) { in bpf_adj_delta_to_off()
360 off = insn->imm; in bpf_adj_delta_to_off()
364 off = insn->off; in bpf_adj_delta_to_off()
372 off -= delta; in bpf_adj_delta_to_off()
374 return -ERANGE; in bpf_adj_delta_to_off()
376 if (insn->code == (BPF_JMP32 | BPF_JA)) in bpf_adj_delta_to_off()
377 insn->imm = off; in bpf_adj_delta_to_off()
379 insn->off = off; in bpf_adj_delta_to_off()
387 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); in bpf_adj_branches()
388 struct bpf_insn *insn = prog->insnsi; in bpf_adj_branches()
400 insn = prog->insnsi + end_old; in bpf_adj_branches()
409 code = insn->code; in bpf_adj_branches()
416 if (insn->src_reg != BPF_PSEUDO_CALL) in bpf_adj_branches()
436 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo()
440 linfo = prog->aux->linfo; in bpf_adj_linfo()
454 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; in bpf_patch_insn_single()
461 memcpy(prog->insnsi + off, patch, sizeof(*patch)); in bpf_patch_insn_single()
465 insn_adj_cnt = prog->len + insn_delta; in bpf_patch_insn_single()
467 /* Reject anything that would potentially let the insn->off in bpf_patch_insn_single()
483 return ERR_PTR(-ENOMEM); in bpf_patch_insn_single()
485 prog_adj->len = insn_adj_cnt; in bpf_patch_insn_single()
495 insn_rest = insn_adj_cnt - off - len; in bpf_patch_insn_single()
497 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, in bpf_patch_insn_single()
499 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); in bpf_patch_insn_single()
519 memmove(prog->insnsi + off, prog->insnsi + off + cnt, in bpf_remove_insns()
520 sizeof(struct bpf_insn) * (prog->len - off - cnt)); in bpf_remove_insns()
521 prog->len -= cnt; in bpf_remove_insns()
532 for (i = 0; i < fp->aux->real_func_cnt; i++) in bpf_prog_kallsyms_del_subprogs()
533 bpf_prog_kallsyms_del(fp->aux->func[i]); in bpf_prog_kallsyms_del_subprogs()
555 prog->aux->ksym.start = (unsigned long) prog->bpf_func; in bpf_prog_ksym_set_addr()
556 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len; in bpf_prog_ksym_set_addr()
562 char *sym = prog->aux->ksym.name; in bpf_prog_ksym_set_name()
568 sizeof(prog->tag) * 2 + in bpf_prog_ksym_set_name()
576 sizeof(prog->aux->name) > KSYM_NAME_LEN); in bpf_prog_ksym_set_name()
579 sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); in bpf_prog_ksym_set_name()
581 /* prog->aux->name will be ignored if full btf name is available */ in bpf_prog_ksym_set_name()
582 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) { in bpf_prog_ksym_set_name()
583 type = btf_type_by_id(prog->aux->btf, in bpf_prog_ksym_set_name()
584 prog->aux->func_info[prog->aux->func_idx].type_id); in bpf_prog_ksym_set_name()
585 func_name = btf_name_by_offset(prog->aux->btf, type->name_off); in bpf_prog_ksym_set_name()
586 snprintf(sym, (size_t)(end - sym), "_%s", func_name); in bpf_prog_ksym_set_name()
590 if (prog->aux->name[0]) in bpf_prog_ksym_set_name()
591 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); in bpf_prog_ksym_set_name()
598 return container_of(n, struct bpf_ksym, tnode)->start; in bpf_get_ksym_start()
614 if (val < ksym->start) in bpf_tree_comp()
615 return -1; in bpf_tree_comp()
618 * trace. Therefore, do val > ksym->end instead of val >= ksym->end. in bpf_tree_comp()
620 if (val > ksym->end) in bpf_tree_comp()
638 WARN_ON_ONCE(!list_empty(&ksym->lnode)); in bpf_ksym_add()
639 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms); in bpf_ksym_add()
640 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops); in bpf_ksym_add()
646 if (list_empty(&ksym->lnode)) in __bpf_ksym_del()
649 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops); in __bpf_ksym_del()
650 list_del_rcu(&ksym->lnode); in __bpf_ksym_del()
662 return fp->jited && !bpf_prog_was_classic(fp); in bpf_prog_kallsyms_candidate()
668 !bpf_token_capable(fp->aux->token, CAP_BPF)) in bpf_prog_kallsyms_add()
673 fp->aux->ksym.prog = true; in bpf_prog_kallsyms_add()
675 bpf_ksym_add(&fp->aux->ksym); in bpf_prog_kallsyms_add()
685 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, in bpf_prog_kallsyms_add()
686 "__cfi_%s", fp->aux->ksym.name); in bpf_prog_kallsyms_add()
688 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; in bpf_prog_kallsyms_add()
689 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; in bpf_prog_kallsyms_add()
691 bpf_ksym_add(&fp->aux->ksym_prefix); in bpf_prog_kallsyms_add()
700 bpf_ksym_del(&fp->aux->ksym); in bpf_prog_kallsyms_del()
704 bpf_ksym_del(&fp->aux->ksym_prefix); in bpf_prog_kallsyms_del()
725 unsigned long symbol_start = ksym->start; in __bpf_address_lookup()
726 unsigned long symbol_end = ksym->end; in __bpf_address_lookup()
728 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN); in __bpf_address_lookup()
731 *size = symbol_end - symbol_start; in __bpf_address_lookup()
733 *off = addr - symbol_start; in __bpf_address_lookup()
758 return ksym && ksym->prog ? in bpf_prog_ksym_find()
759 container_of(ksym, struct bpf_prog_aux, ksym)->prog : in bpf_prog_ksym_find()
772 if (!prog->aux->num_exentries) in search_bpf_extables()
775 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); in search_bpf_extables()
786 int ret = -ERANGE; in bpf_get_kallsym()
796 strscpy(sym, ksym->name, KSYM_NAME_LEN); in bpf_get_kallsym()
798 *value = ksym->start; in bpf_get_kallsym()
812 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in bpf_jit_add_poke_descriptor()
814 u32 slot = prog->aux->size_poke_tab; in bpf_jit_add_poke_descriptor()
818 return -ENOSPC; in bpf_jit_add_poke_descriptor()
819 if (poke->tailcall_target || poke->tailcall_target_stable || in bpf_jit_add_poke_descriptor()
820 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr) in bpf_jit_add_poke_descriptor()
821 return -EINVAL; in bpf_jit_add_poke_descriptor()
823 switch (poke->reason) { in bpf_jit_add_poke_descriptor()
825 if (!poke->tail_call.map) in bpf_jit_add_poke_descriptor()
826 return -EINVAL; in bpf_jit_add_poke_descriptor()
829 return -EINVAL; in bpf_jit_add_poke_descriptor()
834 return -ENOMEM; in bpf_jit_add_poke_descriptor()
837 prog->aux->size_poke_tab = size; in bpf_jit_add_poke_descriptor()
838 prog->aux->poke_tab = tab; in bpf_jit_add_poke_descriptor()
854 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
877 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
897 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); in alloc_new_pack()
898 if (!pack->ptr) in alloc_new_pack()
900 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); in alloc_new_pack()
901 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); in alloc_new_pack()
903 set_vm_flush_reset_perms(pack->ptr); in alloc_new_pack()
904 err = set_memory_rox((unsigned long)pack->ptr, in alloc_new_pack()
908 list_add_tail(&pack->list, &pack_list); in alloc_new_pack()
912 bpf_jit_free_exec(pack->ptr); in alloc_new_pack()
943 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, in bpf_prog_pack_alloc()
956 bitmap_set(pack->bitmap, pos, nbits); in bpf_prog_pack_alloc()
957 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT); in bpf_prog_pack_alloc()
977 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) { in bpf_prog_pack_free()
987 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT; in bpf_prog_pack_free()
992 bitmap_clear(pack->bitmap, pos, nbits); in bpf_prog_pack_free()
993 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, in bpf_prog_pack_free()
995 list_del(&pack->list); in bpf_prog_pack_free()
996 bpf_jit_free_exec(pack->ptr); in bpf_prog_pack_free()
1012 return MODULES_END - MODULES_VADDR; in bpf_jit_alloc_exec_limit()
1014 return VMALLOC_END - VMALLOC_START; in bpf_jit_alloc_exec_limit()
1033 return -EPERM; in bpf_jit_charge_modmem()
1080 /* Fill space with illegal/arch-dep instructions. */ in bpf_jit_binary_alloc()
1083 hdr->size = size; in bpf_jit_binary_alloc()
1084 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), in bpf_jit_binary_alloc()
1085 PAGE_SIZE - sizeof(*hdr)); in bpf_jit_binary_alloc()
1086 start = get_random_u32_below(hole) & ~(alignment - 1); in bpf_jit_binary_alloc()
1089 *image_ptr = &hdr->image[start]; in bpf_jit_binary_alloc()
1096 u32 size = hdr->size; in bpf_jit_binary_free()
1141 /* Fill space with illegal/arch-dep instructions. */ in bpf_jit_binary_pack_alloc()
1143 (*rw_header)->size = size; in bpf_jit_binary_pack_alloc()
1145 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), in bpf_jit_binary_pack_alloc()
1146 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); in bpf_jit_binary_pack_alloc()
1147 start = get_random_u32_below(hole) & ~(alignment - 1); in bpf_jit_binary_pack_alloc()
1149 *image_ptr = &ro_header->image[start]; in bpf_jit_binary_pack_alloc()
1150 *rw_image = &(*rw_header)->image[start]; in bpf_jit_binary_pack_alloc()
1161 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); in bpf_jit_binary_pack_finalize()
1166 bpf_prog_pack_free(ro_header, ro_header->size); in bpf_jit_binary_pack_finalize()
1177 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1178 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1185 u32 size = ro_header->size; in bpf_jit_binary_pack_free()
1195 unsigned long real_start = (unsigned long)fp->bpf_func; in bpf_jit_binary_pack_hdr()
1205 unsigned long real_start = (unsigned long)fp->bpf_func; in bpf_jit_binary_hdr()
1214 * implement cBPF JIT, do not set images read-only, etc.
1218 if (fp->jited) { in bpf_jit_free()
1232 s16 off = insn->off; in bpf_jit_get_func_addr()
1233 s32 imm = insn->imm; in bpf_jit_get_func_addr()
1237 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; in bpf_jit_get_func_addr()
1239 /* Place-holder address till the last pass has collected in bpf_jit_get_func_addr()
1241 * can pick them up from prog->aux. in bpf_jit_get_func_addr()
1245 else if (prog->aux->func && in bpf_jit_get_func_addr()
1246 off >= 0 && off < prog->aux->real_func_cnt) in bpf_jit_get_func_addr()
1247 addr = (u8 *)prog->aux->func[off]->bpf_func; in bpf_jit_get_func_addr()
1249 return -EINVAL; in bpf_jit_get_func_addr()
1250 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && in bpf_jit_get_func_addr()
1252 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); in bpf_jit_get_func_addr()
1270 if (prog->aux->ksym.prog) in bpf_jit_get_prog_name()
1271 return prog->aux->ksym.name; in bpf_jit_get_prog_name()
1272 return prog->aux->name; in bpf_jit_get_prog_name()
1304 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) in bpf_jit_blind_insn()
1307 if (from->imm == 0 && in bpf_jit_blind_insn()
1308 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || in bpf_jit_blind_insn()
1309 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { in bpf_jit_blind_insn()
1310 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); in bpf_jit_blind_insn()
1314 switch (from->code) { in bpf_jit_blind_insn()
1324 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn()
1326 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); in bpf_jit_blind_insn()
1338 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn()
1340 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off); in bpf_jit_blind_insn()
1355 off = from->off; in bpf_jit_blind_insn()
1357 off -= 2; in bpf_jit_blind_insn()
1358 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn()
1360 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); in bpf_jit_blind_insn()
1375 off = from->off; in bpf_jit_blind_insn()
1377 off -= 2; in bpf_jit_blind_insn()
1378 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn()
1380 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, in bpf_jit_blind_insn()
1402 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); in bpf_jit_blind_insn()
1404 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); in bpf_jit_blind_insn()
1408 return to - to_buff; in bpf_jit_blind_insn()
1417 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); in bpf_prog_clone_create()
1419 /* aux->prog still points to the fp_other one, so in bpf_prog_clone_create()
1423 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); in bpf_prog_clone_create()
1438 fp->aux = NULL; in bpf_prog_clone_free()
1439 fp->stats = NULL; in bpf_prog_clone_free()
1440 fp->active = NULL; in bpf_prog_clone_free()
1446 /* We have to repoint aux->prog to self, as we don't in bpf_jit_prog_release_other()
1449 fp->aux->prog = fp; in bpf_jit_prog_release_other()
1461 if (!prog->blinding_requested || prog->blinded) in bpf_jit_blind_constants()
1466 return ERR_PTR(-ENOMEM); in bpf_jit_blind_constants()
1468 insn_cnt = clone->len; in bpf_jit_blind_constants()
1469 insn = clone->insnsi; in bpf_jit_blind_constants()
1491 clone->aux->verifier_zext); in bpf_jit_blind_constants()
1497 /* Patching may have repointed aux->prog during in bpf_jit_blind_constants()
1506 insn_delta = rewritten - 1; in bpf_jit_blind_constants()
1509 insn = clone->insnsi + i + insn_delta; in bpf_jit_blind_constants()
1514 clone->blinded = 1; in bpf_jit_blind_constants()
1520 * therefore keeping it non-static as well; will also be used by JITs
1596 /* 32-bit Jump instructions. */ \
1655 INSN_3(STX, MEM, B), \
1656 INSN_3(STX, MEM, H), \
1657 INSN_3(STX, MEM, W), \
1658 INSN_3(STX, MEM, DW), \
1660 INSN_3(ST, MEM, B), \
1661 INSN_3(ST, MEM, H), \
1662 INSN_3(ST, MEM, W), \
1663 INSN_3(ST, MEM, DW), \
1666 INSN_3(LDX, MEM, B), \
1667 INSN_3(LDX, MEM, H), \
1668 INSN_3(LDX, MEM, W), \
1669 INSN_3(LDX, MEM, DW), \
1682 /* Now overwrite non-defaults ... */ in bpf_opcode_in_insntable()
1684 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ in bpf_opcode_in_insntable()
1700 * ___bpf_prog_run - run eBPF program on a given context
1701 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1714 /* Now overwrite non-defaults ... */ in ___bpf_prog_run()
1716 /* Non-UAPI available opcodes. */ in ___bpf_prog_run()
1736 goto *jumptable[insn->code]; in ___bpf_prog_run()
1738 /* Explicitly mask the register-based shift amounts with 63 or 31 in ___bpf_prog_run()
1741 * as x86-64 or arm64, the compiler is optimizing the AND away for in ___bpf_prog_run()
1744 * implementation-defined results in such a case; the resulting in ___bpf_prog_run()
1778 ALU(SUB, -) in ___bpf_prog_run()
1788 DST = (u32) -DST; in ___bpf_prog_run()
1791 DST = -DST; in ___bpf_prog_run()
1852 DST = DST - AX * SRC; in ___bpf_prog_run()
1866 DST = (u32)-AX; in ___bpf_prog_run()
1880 DST = DST - AX * IMM; in ___bpf_prog_run()
1894 DST = (u32)-AX; in ___bpf_prog_run()
1923 DST = (u32)-AX; in ___bpf_prog_run()
1950 DST = (u32)-AX; in ___bpf_prog_run()
1996 /* Function call scratches BPF_R1-BPF_R5 registers, in ___bpf_prog_run()
1997 * preserves BPF_R6-BPF_R9, and stores return value in ___bpf_prog_run()
2000 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, in ___bpf_prog_run()
2005 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, in ___bpf_prog_run()
2008 insn + insn->off + 1); in ___bpf_prog_run()
2017 if (unlikely(index >= array->map.max_entries)) in ___bpf_prog_run()
2025 prog = READ_ONCE(array->ptrs[index]); in ___bpf_prog_run()
2034 insn = prog->insnsi; in ___bpf_prog_run()
2040 insn += insn->off; in ___bpf_prog_run()
2043 insn += insn->imm; in ___bpf_prog_run()
2051 insn += insn->off; \ in ___bpf_prog_run()
2057 insn += insn->off; \ in ___bpf_prog_run()
2063 insn += insn->off; \ in ___bpf_prog_run()
2069 insn += insn->off; \ in ___bpf_prog_run()
2088 * Bounds-Check Bypass and Type Confusion. In case of arm64, we in ___bpf_prog_run()
2094 * v1 mitigation that happens to produce the required code on in ___bpf_prog_run()
2101 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ in ___bpf_prog_run()
2104 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ in ___bpf_prog_run()
2107 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ in ___bpf_prog_run()
2111 (const void *)(long) (SRC + insn->off)); \ in ___bpf_prog_run()
2123 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ in ___bpf_prog_run()
2127 (const void *)(long) (SRC + insn->off)); \ in ___bpf_prog_run()
2138 if (BPF_SIZE(insn->code) == BPF_W) \ in ___bpf_prog_run()
2140 (DST + insn->off)); \ in ___bpf_prog_run()
2141 else if (BPF_SIZE(insn->code) == BPF_DW) \ in ___bpf_prog_run()
2143 (DST + insn->off)); \ in ___bpf_prog_run()
2148 if (BPF_SIZE(insn->code) == BPF_W) \ in ___bpf_prog_run()
2151 (atomic_t *)(unsigned long) (DST + insn->off)); \ in ___bpf_prog_run()
2152 else if (BPF_SIZE(insn->code) == BPF_DW) \ in ___bpf_prog_run()
2155 (atomic64_t *)(unsigned long) (DST + insn->off)); \ in ___bpf_prog_run()
2165 /* Atomic read-modify-write instructions support only W and DW in ___bpf_prog_run()
2175 if (BPF_SIZE(insn->code) == BPF_W) in ___bpf_prog_run()
2177 (atomic_t *)(unsigned long) (DST + insn->off), in ___bpf_prog_run()
2179 else if (BPF_SIZE(insn->code) == BPF_DW) in ___bpf_prog_run()
2181 (atomic64_t *)(unsigned long) (DST + insn->off), in ___bpf_prog_run()
2187 if (BPF_SIZE(insn->code) == BPF_W) in ___bpf_prog_run()
2189 (atomic_t *)(unsigned long) (DST + insn->off), in ___bpf_prog_run()
2191 else if (BPF_SIZE(insn->code) == BPF_DW) in ___bpf_prog_run()
2193 (atomic64_t *)(unsigned long) (DST + insn->off), in ___bpf_prog_run()
2202 switch (BPF_SIZE(insn->code)) { in ___bpf_prog_run()
2206 (SIZE *)(unsigned long)(SRC + insn->off)); \ in ___bpf_prog_run()
2220 switch (BPF_SIZE(insn->code)) { in ___bpf_prog_run()
2224 (SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC); \ in ___bpf_prog_run()
2251 insn->code, insn->imm); in ___bpf_prog_run()
2325 insn->off = (s16) insn->imm; in bpf_patch_call_args()
2326 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - in bpf_patch_call_args()
2328 insn->code = BPF_JMP | BPF_CALL_ARGS; in bpf_patch_call_args()
2347 struct bpf_prog_aux *aux = fp->aux; in __bpf_prog_map_compatible()
2352 if (fp->kprobe_override) in __bpf_prog_map_compatible()
2355 spin_lock(&map->owner_lock); in __bpf_prog_map_compatible()
2357 if (!map->owner) { in __bpf_prog_map_compatible()
2358 map->owner = bpf_map_owner_alloc(map); in __bpf_prog_map_compatible()
2359 if (!map->owner) in __bpf_prog_map_compatible()
2361 map->owner->type = prog_type; in __bpf_prog_map_compatible()
2362 map->owner->jited = fp->jited; in __bpf_prog_map_compatible()
2363 map->owner->xdp_has_frags = aux->xdp_has_frags; in __bpf_prog_map_compatible()
2364 map->owner->expected_attach_type = fp->expected_attach_type; in __bpf_prog_map_compatible()
2365 map->owner->attach_func_proto = aux->attach_func_proto; in __bpf_prog_map_compatible()
2367 map->owner->storage_cookie[i] = in __bpf_prog_map_compatible()
2368 aux->cgroup_storage[i] ? in __bpf_prog_map_compatible()
2369 aux->cgroup_storage[i]->cookie : 0; in __bpf_prog_map_compatible()
2373 ret = map->owner->type == prog_type && in __bpf_prog_map_compatible()
2374 map->owner->jited == fp->jited && in __bpf_prog_map_compatible()
2375 map->owner->xdp_has_frags == aux->xdp_has_frags; in __bpf_prog_map_compatible()
2377 map->map_type == BPF_MAP_TYPE_PROG_ARRAY && in __bpf_prog_map_compatible()
2378 map->owner->expected_attach_type != fp->expected_attach_type) in __bpf_prog_map_compatible()
2383 cookie = aux->cgroup_storage[i] ? in __bpf_prog_map_compatible()
2384 aux->cgroup_storage[i]->cookie : 0; in __bpf_prog_map_compatible()
2385 ret = map->owner->storage_cookie[i] == cookie || in __bpf_prog_map_compatible()
2389 map->owner->attach_func_proto != aux->attach_func_proto) { in __bpf_prog_map_compatible()
2403 spin_unlock(&map->owner_lock); in __bpf_prog_map_compatible()
2412 * are implemented, prohibit adding dev-bound programs to program maps. in bpf_prog_map_compatible()
2414 if (bpf_prog_is_dev_bound(fp->aux)) in bpf_prog_map_compatible()
2422 struct bpf_prog_aux *aux = fp->aux; in bpf_check_tail_call()
2425 mutex_lock(&aux->used_maps_mutex); in bpf_check_tail_call()
2426 for (i = 0; i < aux->used_map_cnt; i++) { in bpf_check_tail_call()
2427 struct bpf_map *map = aux->used_maps[i]; in bpf_check_tail_call()
2433 ret = -EINVAL; in bpf_check_tail_call()
2439 mutex_unlock(&aux->used_maps_mutex); in bpf_check_tail_call()
2447 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); in bpf_prog_select_interpreter()
2448 u32 idx = (round_up(stack_depth, 32) / 32) - 1; in bpf_prog_select_interpreter()
2450 /* may_goto may cause stack size > 512, leading to idx out-of-bounds. in bpf_prog_select_interpreter()
2451 * But for non-JITed programs, we don't need bpf_func, so no bounds in bpf_prog_select_interpreter()
2455 fp->bpf_func = interpreters[idx]; in bpf_prog_select_interpreter()
2458 fp->bpf_func = __bpf_prog_ret0_warn; in bpf_prog_select_interpreter()
2461 fp->bpf_func = __bpf_prog_ret0_warn; in bpf_prog_select_interpreter()
2467 * bpf_prog_select_runtime - select exec runtime for BPF program
2484 if (fp->bpf_func) in bpf_prog_select_runtime()
2500 if (!bpf_prog_is_offloaded(fp->aux)) { in bpf_prog_select_runtime()
2507 if (!fp->jited && jit_needed) { in bpf_prog_select_runtime()
2508 *err = -ENOTSUPP; in bpf_prog_select_runtime()
2589 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb); in bpf_prog_array_free_sleepable()
2597 for (item = array->items; item->prog; item++) in bpf_prog_array_length()
2598 if (item->prog != &dummy_bpf_prog.prog) in bpf_prog_array_length()
2607 for (item = array->items; item->prog; item++) in bpf_prog_array_is_empty()
2608 if (item->prog != &dummy_bpf_prog.prog) in bpf_prog_array_is_empty()
2620 for (item = array->items; item->prog; item++) { in bpf_prog_array_copy_core()
2621 if (item->prog == &dummy_bpf_prog.prog) in bpf_prog_array_copy_core()
2623 prog_ids[i] = item->prog->aux->id; in bpf_prog_array_copy_core()
2630 return !!(item->prog); in bpf_prog_array_copy_core()
2648 return -ENOMEM; in bpf_prog_array_copy_to_user()
2653 return -EFAULT; in bpf_prog_array_copy_to_user()
2655 return -ENOSPC; in bpf_prog_array_copy_to_user()
2664 for (item = array->items; item->prog; item++) in bpf_prog_array_delete_safe()
2665 if (item->prog == old_prog) { in bpf_prog_array_delete_safe()
2666 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); in bpf_prog_array_delete_safe()
2672 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2674 * a dummy no-op program.
2682 * * 0 - Success
2683 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2684 * * -ENOENT - Index out of range
2692 * bpf_prog_array_update_at() - Updates the program at the given index
2702 * * 0 - Success
2703 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2704 * * -ENOENT - Index out of range
2712 return -EINVAL; in bpf_prog_array_update_at()
2714 for (item = array->items; item->prog; item++) { in bpf_prog_array_update_at()
2715 if (item->prog == &dummy_bpf_prog.prog) in bpf_prog_array_update_at()
2718 WRITE_ONCE(item->prog, prog); in bpf_prog_array_update_at()
2721 index--; in bpf_prog_array_update_at()
2723 return -ENOENT; in bpf_prog_array_update_at()
2741 existing = old_array->items; in bpf_prog_array_copy()
2742 for (; existing->prog; existing++) { in bpf_prog_array_copy()
2743 if (existing->prog == exclude_prog) { in bpf_prog_array_copy()
2747 if (existing->prog != &dummy_bpf_prog.prog) in bpf_prog_array_copy()
2749 if (existing->prog == include_prog) in bpf_prog_array_copy()
2750 return -EEXIST; in bpf_prog_array_copy()
2755 return -ENOENT; in bpf_prog_array_copy()
2771 return -ENOMEM; in bpf_prog_array_copy()
2772 new = array->items; in bpf_prog_array_copy()
2776 existing = old_array->items; in bpf_prog_array_copy()
2777 for (; existing->prog; existing++) { in bpf_prog_array_copy()
2778 if (existing->prog == exclude_prog || in bpf_prog_array_copy()
2779 existing->prog == &dummy_bpf_prog.prog) in bpf_prog_array_copy()
2782 new->prog = existing->prog; in bpf_prog_array_copy()
2783 new->bpf_cookie = existing->bpf_cookie; in bpf_prog_array_copy()
2788 new->prog = include_prog; in bpf_prog_array_copy()
2789 new->bpf_cookie = bpf_cookie; in bpf_prog_array_copy()
2792 new->prog = NULL; in bpf_prog_array_copy()
2813 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC in bpf_prog_array_copy_info()
2824 sleepable = aux->prog->sleepable; in __bpf_free_used_maps()
2827 if (map->ops->map_poke_untrack) in __bpf_free_used_maps()
2828 map->ops->map_poke_untrack(map, aux); in __bpf_free_used_maps()
2830 atomic64_dec(&map->sleepable_refcnt); in __bpf_free_used_maps()
2837 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt); in bpf_free_used_maps()
2838 kfree(aux->used_maps); in bpf_free_used_maps()
2849 if (btf_mod->module) in __bpf_free_used_btfs()
2850 module_put(btf_mod->module); in __bpf_free_used_btfs()
2851 btf_put(btf_mod->btf); in __bpf_free_used_btfs()
2858 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt); in bpf_free_used_btfs()
2859 kfree(aux->used_btfs); in bpf_free_used_btfs()
2869 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); in bpf_prog_free_deferred()
2870 bpf_prog_stream_free(aux->prog); in bpf_prog_free_deferred()
2873 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID) in bpf_prog_free_deferred()
2874 bpf_cgroup_atype_put(aux->cgroup_atype); in bpf_prog_free_deferred()
2879 bpf_prog_dev_bound_destroy(aux->prog); in bpf_prog_free_deferred()
2881 if (aux->prog->has_callchain_buf) in bpf_prog_free_deferred()
2884 if (aux->dst_trampoline) in bpf_prog_free_deferred()
2885 bpf_trampoline_put(aux->dst_trampoline); in bpf_prog_free_deferred()
2886 for (i = 0; i < aux->real_func_cnt; i++) { in bpf_prog_free_deferred()
2891 aux->func[i]->aux->poke_tab = NULL; in bpf_prog_free_deferred()
2892 bpf_jit_free(aux->func[i]); in bpf_prog_free_deferred()
2894 if (aux->real_func_cnt) { in bpf_prog_free_deferred()
2895 kfree(aux->func); in bpf_prog_free_deferred()
2896 bpf_prog_unlock_free(aux->prog); in bpf_prog_free_deferred()
2898 bpf_jit_free(aux->prog); in bpf_prog_free_deferred()
2904 struct bpf_prog_aux *aux = fp->aux; in bpf_prog_free()
2906 if (aux->dst_prog) in bpf_prog_free()
2907 bpf_prog_put(aux->dst_prog); in bpf_prog_free()
2908 bpf_token_put(aux->token); in bpf_prog_free()
2909 INIT_WORK(&aux->work, bpf_prog_free_deferred); in bpf_prog_free()
2910 schedule_work(&aux->work); in bpf_prog_free()
2926 * this function is called from native eBPF and classic-to-eBPF in BPF_CALL_0()
2996 return -ENOTSUPP; in bpf_event_output()
3000 /* Always built-in helper functions. */
3034 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3047 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for
3065 * The verifier will not patch the insn->imm for the call to the helper if
3114 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3116 * as atomic_xchg() on pointer-sized words.
3124 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3129 return -EFAULT; in skb_copy_bits()
3135 return -ENOTSUPP; in bpf_arch_text_poke()
3140 return ERR_PTR(-ENOTSUPP); in bpf_arch_text_copy()
3145 return -ENOTSUPP; in bpf_arch_text_invalidate()
3193 if (!p->timestamp) { in bpf_check_timed_may_goto()
3194 p->timestamp = time; in bpf_check_timed_may_goto()
3198 if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) { in bpf_check_timed_may_goto()
3206 /* for configs without MMU or 32-bit */
3244 int idx = -1, insn_start, insn_end, len; in bpf_prog_get_file_line()
3250 btf = prog->aux->btf; in bpf_prog_get_file_line()
3251 linfo = prog->aux->linfo; in bpf_prog_get_file_line()
3252 jited_linfo = prog->aux->jited_linfo; in bpf_prog_get_file_line()
3255 return -EINVAL; in bpf_prog_get_file_line()
3256 len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len; in bpf_prog_get_file_line()
3258 linfo = &prog->aux->linfo[prog->aux->linfo_idx]; in bpf_prog_get_file_line()
3259 jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx]; in bpf_prog_get_file_line()
3263 nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx; in bpf_prog_get_file_line()
3272 if (idx == -1) in bpf_prog_get_file_line()
3273 return -ENOENT; in bpf_prog_get_file_line()
3306 ctxp->prog = prog->aux->main_prog_aux->prog; in find_from_stack_cb()