verifier.c (93cd6fa6806cb3455e8231578840afb031606352) verifier.c (3c839744b33782b930c5c61df35511ede5e5a574)
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 5 unchanged lines hidden (view full) ---

14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/filter.h>
19#include <net/netlink.h>
20#include <linux/file.h>
21#include <linux/vmalloc.h>
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but

--- 5 unchanged lines hidden (view full) ---

14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/filter.h>
19#include <net/netlink.h>
20#include <linux/file.h>
21#include <linux/vmalloc.h>
22#include <linux/stringify.h>
22
23/* bpf_check() is a static code analyzer that walks eBPF program
24 * instruction by instruction and updates register/stack state.
25 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
26 *
27 * The first pass is depth-first-search to check that the program is a DAG.
28 * It rejects the following programs:
29 * - larger than BPF_MAXINSNS insns

--- 155 unchanged lines hidden (view full) ---

185 [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj",
186 [FRAME_PTR] = "fp",
187 [PTR_TO_STACK] = "fp",
188 [CONST_IMM] = "imm",
189 [PTR_TO_PACKET] = "pkt",
190 [PTR_TO_PACKET_END] = "pkt_end",
191};
192
23
24/* bpf_check() is a static code analyzer that walks eBPF program
25 * instruction by instruction and updates register/stack state.
26 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
27 *
28 * The first pass is depth-first-search to check that the program is a DAG.
29 * It rejects the following programs:
30 * - larger than BPF_MAXINSNS insns

--- 155 unchanged lines hidden (view full) ---

186 [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj",
187 [FRAME_PTR] = "fp",
188 [PTR_TO_STACK] = "fp",
189 [CONST_IMM] = "imm",
190 [PTR_TO_PACKET] = "pkt",
191 [PTR_TO_PACKET_END] = "pkt_end",
192};
193
194#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x)
195static const char * const func_id_str[] = {
196 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN)
197};
198#undef __BPF_FUNC_STR_FN
199
200static const char *func_id_name(int id)
201{
202 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
203
204 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
205 return func_id_str[id];
206 else
207 return "unknown";
208}
209
193static void print_verifier_state(struct bpf_verifier_state *state)
194{
195 struct bpf_reg_state *reg;
196 enum bpf_reg_type t;
197 int i;
198
199 for (i = 0; i < MAX_BPF_REG; i++) {
200 reg = &state->regs[i];

--- 6 unchanged lines hidden (view full) ---

207 else if (t == PTR_TO_PACKET)
208 verbose("(id=%d,off=%d,r=%d)",
209 reg->id, reg->off, reg->range);
210 else if (t == UNKNOWN_VALUE && reg->imm)
211 verbose("%lld", reg->imm);
212 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
213 t == PTR_TO_MAP_VALUE_OR_NULL ||
214 t == PTR_TO_MAP_VALUE_ADJ)
210static void print_verifier_state(struct bpf_verifier_state *state)
211{
212 struct bpf_reg_state *reg;
213 enum bpf_reg_type t;
214 int i;
215
216 for (i = 0; i < MAX_BPF_REG; i++) {
217 reg = &state->regs[i];

--- 6 unchanged lines hidden (view full) ---

224 else if (t == PTR_TO_PACKET)
225 verbose("(id=%d,off=%d,r=%d)",
226 reg->id, reg->off, reg->range);
227 else if (t == UNKNOWN_VALUE && reg->imm)
228 verbose("%lld", reg->imm);
229 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
230 t == PTR_TO_MAP_VALUE_OR_NULL ||
231 t == PTR_TO_MAP_VALUE_ADJ)
215 verbose("(ks=%d,vs=%d)",
232 verbose("(ks=%d,vs=%d,id=%u)",
216 reg->map_ptr->key_size,
233 reg->map_ptr->key_size,
217 reg->map_ptr->value_size);
234 reg->map_ptr->value_size,
235 reg->id);
218 if (reg->min_value != BPF_REGISTER_MIN_RANGE)
219 verbose(",min_value=%lld",
220 (long long)reg->min_value);
221 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
222 verbose(",max_value=%llu",
223 (unsigned long long)reg->max_value);
224 }
225 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {

--- 122 unchanged lines hidden (view full) ---

348 } else {
349 verbose("BUG_ld_%02x\n", insn->code);
350 return;
351 }
352 } else if (class == BPF_JMP) {
353 u8 opcode = BPF_OP(insn->code);
354
355 if (opcode == BPF_CALL) {
236 if (reg->min_value != BPF_REGISTER_MIN_RANGE)
237 verbose(",min_value=%lld",
238 (long long)reg->min_value);
239 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
240 verbose(",max_value=%llu",
241 (unsigned long long)reg->max_value);
242 }
243 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {

--- 122 unchanged lines hidden (view full) ---

366 } else {
367 verbose("BUG_ld_%02x\n", insn->code);
368 return;
369 }
370 } else if (class == BPF_JMP) {
371 u8 opcode = BPF_OP(insn->code);
372
373 if (opcode == BPF_CALL) {
356 verbose("(%02x) call %d\n", insn->code, insn->imm);
374 verbose("(%02x) call %s#%d\n", insn->code,
375 func_id_name(insn->imm), insn->imm);
357 } else if (insn->code == (BPF_JMP | BPF_JA)) {
358 verbose("(%02x) goto pc%+d\n",
359 insn->code, insn->off);
360 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
361 verbose("(%02x) exit\n", insn->code);
362 } else if (BPF_SRC(insn->code) == BPF_X) {
363 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
364 insn->code, insn->dst_reg,

--- 77 unchanged lines hidden (view full) ---

442 /* 1st arg to a function */
443 regs[BPF_REG_1].type = PTR_TO_CTX;
444}
445
446static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
447{
448 BUG_ON(regno >= MAX_BPF_REG);
449 regs[regno].type = UNKNOWN_VALUE;
376 } else if (insn->code == (BPF_JMP | BPF_JA)) {
377 verbose("(%02x) goto pc%+d\n",
378 insn->code, insn->off);
379 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
380 verbose("(%02x) exit\n", insn->code);
381 } else if (BPF_SRC(insn->code) == BPF_X) {
382 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
383 insn->code, insn->dst_reg,

--- 77 unchanged lines hidden (view full) ---

461 /* 1st arg to a function */
462 regs[BPF_REG_1].type = PTR_TO_CTX;
463}
464
465static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
466{
467 BUG_ON(regno >= MAX_BPF_REG);
468 regs[regno].type = UNKNOWN_VALUE;
469 regs[regno].id = 0;
450 regs[regno].imm = 0;
451}
452
453static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
454{
455 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
456 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
457}

--- 150 unchanged lines hidden (view full) ---

608 return -EACCES;
609 }
610 return 0;
611}
612
613#define MAX_PACKET_OFF 0xffff
614
615static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
470 regs[regno].imm = 0;
471}
472
473static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
474{
475 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
476 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
477}

--- 150 unchanged lines hidden (view full) ---

628 return -EACCES;
629 }
630 return 0;
631}
632
633#define MAX_PACKET_OFF 0xffff
634
635static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
616 const struct bpf_call_arg_meta *meta)
636 const struct bpf_call_arg_meta *meta,
637 enum bpf_access_type t)
617{
618 switch (env->prog->type) {
638{
639 switch (env->prog->type) {
640 case BPF_PROG_TYPE_LWT_IN:
641 case BPF_PROG_TYPE_LWT_OUT:
642 /* dst_input() and dst_output() can't write for now */
643 if (t == BPF_WRITE)
644 return false;
619 case BPF_PROG_TYPE_SCHED_CLS:
620 case BPF_PROG_TYPE_SCHED_ACT:
621 case BPF_PROG_TYPE_XDP:
645 case BPF_PROG_TYPE_SCHED_CLS:
646 case BPF_PROG_TYPE_SCHED_ACT:
647 case BPF_PROG_TYPE_XDP:
648 case BPF_PROG_TYPE_LWT_XMIT:
622 if (meta)
623 return meta->pkt_access;
624
625 env->seen_direct_write = true;
626 return true;
627 default:
628 return false;
629 }

--- 182 unchanged lines hidden (view full) ---

812 verbose("attempt to corrupt spilled pointer on stack\n");
813 return -EACCES;
814 }
815 err = check_stack_write(state, off, size, value_regno);
816 } else {
817 err = check_stack_read(state, off, size, value_regno);
818 }
819 } else if (state->regs[regno].type == PTR_TO_PACKET) {
649 if (meta)
650 return meta->pkt_access;
651
652 env->seen_direct_write = true;
653 return true;
654 default:
655 return false;
656 }

--- 182 unchanged lines hidden (view full) ---

839 verbose("attempt to corrupt spilled pointer on stack\n");
840 return -EACCES;
841 }
842 err = check_stack_write(state, off, size, value_regno);
843 } else {
844 err = check_stack_read(state, off, size, value_regno);
845 }
846 } else if (state->regs[regno].type == PTR_TO_PACKET) {
820 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL)) {
847 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
821 verbose("cannot write into packet\n");
822 return -EACCES;
823 }
824 if (t == BPF_WRITE && value_regno >= 0 &&
825 is_pointer_value(env, value_regno)) {
826 verbose("R%d leaks addr into packet\n", value_regno);
827 return -EACCES;
828 }

--- 116 unchanged lines hidden (view full) ---

945 if (arg_type == ARG_ANYTHING) {
946 if (is_pointer_value(env, regno)) {
947 verbose("R%d leaks addr into helper function\n", regno);
948 return -EACCES;
949 }
950 return 0;
951 }
952
848 verbose("cannot write into packet\n");
849 return -EACCES;
850 }
851 if (t == BPF_WRITE && value_regno >= 0 &&
852 is_pointer_value(env, value_regno)) {
853 verbose("R%d leaks addr into packet\n", value_regno);
854 return -EACCES;
855 }

--- 116 unchanged lines hidden (view full) ---

972 if (arg_type == ARG_ANYTHING) {
973 if (is_pointer_value(env, regno)) {
974 verbose("R%d leaks addr into helper function\n", regno);
975 return -EACCES;
976 }
977 return 0;
978 }
979
953 if (type == PTR_TO_PACKET && !may_access_direct_pkt_data(env, meta)) {
980 if (type == PTR_TO_PACKET &&
981 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
954 verbose("helper access to the packet is not allowed\n");
955 return -EACCES;
956 }
957
958 if (arg_type == ARG_PTR_TO_MAP_KEY ||
959 arg_type == ARG_PTR_TO_MAP_VALUE) {
960 expected_type = PTR_TO_STACK;
961 if (type != PTR_TO_PACKET && type != expected_type)

--- 145 unchanged lines hidden (view full) ---

1107 goto error;
1108 break;
1109 default:
1110 break;
1111 }
1112
1113 return 0;
1114error:
982 verbose("helper access to the packet is not allowed\n");
983 return -EACCES;
984 }
985
986 if (arg_type == ARG_PTR_TO_MAP_KEY ||
987 arg_type == ARG_PTR_TO_MAP_VALUE) {
988 expected_type = PTR_TO_STACK;
989 if (type != PTR_TO_PACKET && type != expected_type)

--- 145 unchanged lines hidden (view full) ---

1135 goto error;
1136 break;
1137 default:
1138 break;
1139 }
1140
1141 return 0;
1142error:
1115 verbose("cannot pass map_type %d into func %d\n",
1116 map->map_type, func_id);
1143 verbose("cannot pass map_type %d into func %s#%d\n",
1144 map->map_type, func_id_name(func_id), func_id);
1117 return -EINVAL;
1118}
1119
1120static int check_raw_mode(const struct bpf_func_proto *fn)
1121{
1122 int count = 0;
1123
1124 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)

--- 40 unchanged lines hidden (view full) ---

1165 struct bpf_reg_state *regs = state->regs;
1166 struct bpf_reg_state *reg;
1167 struct bpf_call_arg_meta meta;
1168 bool changes_data;
1169 int i, err;
1170
1171 /* find function prototype */
1172 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1145 return -EINVAL;
1146}
1147
1148static int check_raw_mode(const struct bpf_func_proto *fn)
1149{
1150 int count = 0;
1151
1152 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)

--- 40 unchanged lines hidden (view full) ---

1193 struct bpf_reg_state *regs = state->regs;
1194 struct bpf_reg_state *reg;
1195 struct bpf_call_arg_meta meta;
1196 bool changes_data;
1197 int i, err;
1198
1199 /* find function prototype */
1200 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1173 verbose("invalid func %d\n", func_id);
1201 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id);
1174 return -EINVAL;
1175 }
1176
1177 if (env->prog->aux->ops->get_func_proto)
1178 fn = env->prog->aux->ops->get_func_proto(func_id);
1179
1180 if (!fn) {
1202 return -EINVAL;
1203 }
1204
1205 if (env->prog->aux->ops->get_func_proto)
1206 fn = env->prog->aux->ops->get_func_proto(func_id);
1207
1208 if (!fn) {
1181 verbose("unknown func %d\n", func_id);
1209 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id);
1182 return -EINVAL;
1183 }
1184
1185 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1186 if (!env->prog->gpl_compatible && fn->gpl_only) {
1187 verbose("cannot call GPL only function from proprietary program\n");
1188 return -EINVAL;
1189 }
1190
1191 changes_data = bpf_helper_changes_skb_data(fn->func);
1192
1193 memset(&meta, 0, sizeof(meta));
1194 meta.pkt_access = fn->pkt_access;
1195
1196 /* We only support one arg being in raw mode at the moment, which
1197 * is sufficient for the helper functions we have right now.
1198 */
1199 err = check_raw_mode(fn);
1200 if (err) {
1210 return -EINVAL;
1211 }
1212
1213 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1214 if (!env->prog->gpl_compatible && fn->gpl_only) {
1215 verbose("cannot call GPL only function from proprietary program\n");
1216 return -EINVAL;
1217 }
1218
1219 changes_data = bpf_helper_changes_skb_data(fn->func);
1220
1221 memset(&meta, 0, sizeof(meta));
1222 meta.pkt_access = fn->pkt_access;
1223
1224 /* We only support one arg being in raw mode at the moment, which
1225 * is sufficient for the helper functions we have right now.
1226 */
1227 err = check_raw_mode(fn);
1228 if (err) {
1201 verbose("kernel subsystem misconfigured func %d\n", func_id);
1229 verbose("kernel subsystem misconfigured func %s#%d\n",
1230 func_id_name(func_id), func_id);
1202 return err;
1203 }
1204
1205 /* check args */
1206 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1207 if (err)
1208 return err;
1209 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);

--- 37 unchanged lines hidden (view full) ---

1247 * can check 'value_size' boundary of memory access
1248 * to map element returned from bpf_map_lookup_elem()
1249 */
1250 if (meta.map_ptr == NULL) {
1251 verbose("kernel subsystem misconfigured verifier\n");
1252 return -EINVAL;
1253 }
1254 regs[BPF_REG_0].map_ptr = meta.map_ptr;
1231 return err;
1232 }
1233
1234 /* check args */
1235 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1236 if (err)
1237 return err;
1238 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);

--- 37 unchanged lines hidden (view full) ---

1276 * can check 'value_size' boundary of memory access
1277 * to map element returned from bpf_map_lookup_elem()
1278 */
1279 if (meta.map_ptr == NULL) {
1280 verbose("kernel subsystem misconfigured verifier\n");
1281 return -EINVAL;
1282 }
1283 regs[BPF_REG_0].map_ptr = meta.map_ptr;
1284 regs[BPF_REG_0].id = ++env->id_gen;
1255 } else {
1285 } else {
1256 verbose("unknown return type %d of func %d\n",
1257 fn->ret_type, func_id);
1286 verbose("unknown return type %d of func %s#%d\n",
1287 fn->ret_type, func_id_name(func_id), func_id);
1258 return -EINVAL;
1259 }
1260
1261 err = check_map_func_compatibility(meta.map_ptr, func_id);
1262 if (err)
1263 return err;
1264
1265 if (changes_data)

--- 180 unchanged lines hidden (view full) ---

1446static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1447 struct bpf_insn *insn)
1448{
1449 struct bpf_reg_state *regs = env->cur_state.regs;
1450 struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1451 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1452 u8 opcode = BPF_OP(insn->code);
1453
1288 return -EINVAL;
1289 }
1290
1291 err = check_map_func_compatibility(meta.map_ptr, func_id);
1292 if (err)
1293 return err;
1294
1295 if (changes_data)

--- 180 unchanged lines hidden (view full) ---

1476static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1477 struct bpf_insn *insn)
1478{
1479 struct bpf_reg_state *regs = env->cur_state.regs;
1480 struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1481 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1482 u8 opcode = BPF_OP(insn->code);
1483
1454 /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
1455 * Don't care about overflow or negative values, just add them
1484 /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
1485 * insn. Don't care about overflow or negative values, just add them
1456 */
1457 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
1458 dst_reg->imm += insn->imm;
1459 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
1460 src_reg->type == CONST_IMM)
1461 dst_reg->imm += src_reg->imm;
1486 */
1487 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
1488 dst_reg->imm += insn->imm;
1489 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
1490 src_reg->type == CONST_IMM)
1491 dst_reg->imm += src_reg->imm;
1492 else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
1493 dst_reg->imm |= insn->imm;
1494 else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
1495 src_reg->type == CONST_IMM)
1496 dst_reg->imm |= src_reg->imm;
1462 else
1463 mark_reg_unknown_value(regs, insn->dst_reg);
1464 return 0;
1465}
1466
1467static void check_reg_overflow(struct bpf_reg_state *reg)
1468{
1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE)

--- 4 unchanged lines hidden (view full) ---

1474}
1475
1476static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1477 struct bpf_insn *insn)
1478{
1479 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1480 s64 min_val = BPF_REGISTER_MIN_RANGE;
1481 u64 max_val = BPF_REGISTER_MAX_RANGE;
1497 else
1498 mark_reg_unknown_value(regs, insn->dst_reg);
1499 return 0;
1500}
1501
1502static void check_reg_overflow(struct bpf_reg_state *reg)
1503{
1504 if (reg->max_value > BPF_REGISTER_MAX_RANGE)

--- 4 unchanged lines hidden (view full) ---

1509}
1510
1511static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1512 struct bpf_insn *insn)
1513{
1514 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1515 s64 min_val = BPF_REGISTER_MIN_RANGE;
1516 u64 max_val = BPF_REGISTER_MAX_RANGE;
1482 bool min_set = false, max_set = false;
1483 u8 opcode = BPF_OP(insn->code);
1484
1485 dst_reg = &regs[insn->dst_reg];
1486 if (BPF_SRC(insn->code) == BPF_X) {
1487 check_reg_overflow(&regs[insn->src_reg]);
1488 min_val = regs[insn->src_reg].min_value;
1489 max_val = regs[insn->src_reg].max_value;
1490

--- 6 unchanged lines hidden (view full) ---

1497 if (regs[insn->src_reg].type != CONST_IMM &&
1498 regs[insn->src_reg].type != UNKNOWN_VALUE) {
1499 min_val = BPF_REGISTER_MIN_RANGE;
1500 max_val = BPF_REGISTER_MAX_RANGE;
1501 }
1502 } else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
1503 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
1504 min_val = max_val = insn->imm;
1517 u8 opcode = BPF_OP(insn->code);
1518
1519 dst_reg = &regs[insn->dst_reg];
1520 if (BPF_SRC(insn->code) == BPF_X) {
1521 check_reg_overflow(&regs[insn->src_reg]);
1522 min_val = regs[insn->src_reg].min_value;
1523 max_val = regs[insn->src_reg].max_value;
1524

--- 6 unchanged lines hidden (view full) ---

1531 if (regs[insn->src_reg].type != CONST_IMM &&
1532 regs[insn->src_reg].type != UNKNOWN_VALUE) {
1533 min_val = BPF_REGISTER_MIN_RANGE;
1534 max_val = BPF_REGISTER_MAX_RANGE;
1535 }
1536 } else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
1537 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
1538 min_val = max_val = insn->imm;
1505 min_set = max_set = true;
1506 }
1507
1508 /* We don't know anything about what was done to this register, mark it
1509 * as unknown.
1510 */
1511 if (min_val == BPF_REGISTER_MIN_RANGE &&
1512 max_val == BPF_REGISTER_MAX_RANGE) {
1513 reset_reg_range_values(regs, insn->dst_reg);

--- 149 unchanged lines hidden (view full) ---

1663 */
1664 regs[insn->dst_reg] = regs[insn->src_reg];
1665 } else {
1666 if (is_pointer_value(env, insn->src_reg)) {
1667 verbose("R%d partial copy of pointer\n",
1668 insn->src_reg);
1669 return -EACCES;
1670 }
1539 }
1540
1541 /* We don't know anything about what was done to this register, mark it
1542 * as unknown.
1543 */
1544 if (min_val == BPF_REGISTER_MIN_RANGE &&
1545 max_val == BPF_REGISTER_MAX_RANGE) {
1546 reset_reg_range_values(regs, insn->dst_reg);

--- 149 unchanged lines hidden (view full) ---

1696 */
1697 regs[insn->dst_reg] = regs[insn->src_reg];
1698 } else {
1699 if (is_pointer_value(env, insn->src_reg)) {
1700 verbose("R%d partial copy of pointer\n",
1701 insn->src_reg);
1702 return -EACCES;
1703 }
1671 regs[insn->dst_reg].type = UNKNOWN_VALUE;
1672 regs[insn->dst_reg].map_ptr = NULL;
1704 mark_reg_unknown_value(regs, insn->dst_reg);
1673 }
1674 } else {
1675 /* case: R = imm
1676 * remember the value we stored into this reg
1677 */
1678 regs[insn->dst_reg].type = CONST_IMM;
1679 regs[insn->dst_reg].imm = insn->imm;
1680 regs[insn->dst_reg].max_value = insn->imm;

--- 245 unchanged lines hidden (view full) ---

1926 default:
1927 break;
1928 }
1929
1930 check_reg_overflow(false_reg);
1931 check_reg_overflow(true_reg);
1932}
1933
1705 }
1706 } else {
1707 /* case: R = imm
1708 * remember the value we stored into this reg
1709 */
1710 regs[insn->dst_reg].type = CONST_IMM;
1711 regs[insn->dst_reg].imm = insn->imm;
1712 regs[insn->dst_reg].max_value = insn->imm;

--- 245 unchanged lines hidden (view full) ---

1958 default:
1959 break;
1960 }
1961
1962 check_reg_overflow(false_reg);
1963 check_reg_overflow(true_reg);
1964}
1965
1966static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
1967 enum bpf_reg_type type)
1968{
1969 struct bpf_reg_state *reg = &regs[regno];
1970
1971 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
1972 reg->type = type;
1973 if (type == UNKNOWN_VALUE)
1974 mark_reg_unknown_value(regs, regno);
1975 }
1976}
1977
1978/* The logic is similar to find_good_pkt_pointers(), both could eventually
1979 * be folded together at some point.
1980 */
1981static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
1982 enum bpf_reg_type type)
1983{
1984 struct bpf_reg_state *regs = state->regs;
1985 int i;
1986
1987 for (i = 0; i < MAX_BPF_REG; i++)
1988 mark_map_reg(regs, i, regs[regno].id, type);
1989
1990 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1991 if (state->stack_slot_type[i] != STACK_SPILL)
1992 continue;
1993 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE,
1994 regs[regno].id, type);
1995 }
1996}
1997
1934static int check_cond_jmp_op(struct bpf_verifier_env *env,
1935 struct bpf_insn *insn, int *insn_idx)
1936{
1937 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
1938 struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
1939 u8 opcode = BPF_OP(insn->code);
1940 int err;
1941

--- 71 unchanged lines hidden (view full) ---

2013 reg_set_min_max(&other_branch->regs[insn->dst_reg],
2014 dst_reg, insn->imm, opcode);
2015 }
2016
2017 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2018 if (BPF_SRC(insn->code) == BPF_K &&
2019 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2020 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1998static int check_cond_jmp_op(struct bpf_verifier_env *env,
1999 struct bpf_insn *insn, int *insn_idx)
2000{
2001 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
2002 struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2003 u8 opcode = BPF_OP(insn->code);
2004 int err;
2005

--- 71 unchanged lines hidden (view full) ---

2077 reg_set_min_max(&other_branch->regs[insn->dst_reg],
2078 dst_reg, insn->imm, opcode);
2079 }
2080
2081 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2082 if (BPF_SRC(insn->code) == BPF_K &&
2083 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2084 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2021 if (opcode == BPF_JEQ) {
2022 /* next fallthrough insn can access memory via
2023 * this register
2024 */
2025 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
2026 /* branch targer cannot access it, since reg == 0 */
2027 mark_reg_unknown_value(other_branch->regs,
2028 insn->dst_reg);
2029 } else {
2030 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
2031 mark_reg_unknown_value(regs, insn->dst_reg);
2032 }
2085 /* Mark all identical map registers in each branch as either
2086 * safe or unknown depending R == 0 or R != 0 conditional.
2087 */
2088 mark_map_regs(this_branch, insn->dst_reg,
2089 opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
2090 mark_map_regs(other_branch, insn->dst_reg,
2091 opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
2033 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2034 dst_reg->type == PTR_TO_PACKET &&
2035 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2036 find_good_pkt_pointers(this_branch, dst_reg);
2037 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2038 dst_reg->type == PTR_TO_PACKET_END &&
2039 regs[insn->src_reg].type == PTR_TO_PACKET) {
2040 find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);

--- 1207 unchanged lines hidden ---
2092 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2093 dst_reg->type == PTR_TO_PACKET &&
2094 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2095 find_good_pkt_pointers(this_branch, dst_reg);
2096 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2097 dst_reg->type == PTR_TO_PACKET_END &&
2098 regs[insn->src_reg].type == PTR_TO_PACKET) {
2099 find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);

--- 1207 unchanged lines hidden ---