1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2019 Netronome Systems, Inc. */ 3 4 #include <errno.h> 5 #include <fcntl.h> 6 #include <string.h> 7 #include <stdlib.h> 8 #include <unistd.h> 9 #include <net/if.h> 10 #include <sys/utsname.h> 11 12 #include <linux/btf.h> 13 #include <linux/filter.h> 14 #include <linux/kernel.h> 15 16 #include "bpf.h" 17 #include "libbpf.h" 18 19 static bool grep(const char *buffer, const char *pattern) 20 { 21 return !!strstr(buffer, pattern); 22 } 23 24 static int get_vendor_id(int ifindex) 25 { 26 char ifname[IF_NAMESIZE], path[64], buf[8]; 27 ssize_t len; 28 int fd; 29 30 if (!if_indextoname(ifindex, ifname)) 31 return -1; 32 33 snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); 34 35 fd = open(path, O_RDONLY); 36 if (fd < 0) 37 return -1; 38 39 len = read(fd, buf, sizeof(buf)); 40 close(fd); 41 if (len < 0) 42 return -1; 43 if (len >= (ssize_t)sizeof(buf)) 44 return -1; 45 buf[len] = '\0'; 46 47 return strtol(buf, NULL, 0); 48 } 49 50 static int get_kernel_version(void) 51 { 52 int version, subversion, patchlevel; 53 struct utsname utsn; 54 55 /* Return 0 on failure, and attempt to probe with empty kversion */ 56 if (uname(&utsn)) 57 return 0; 58 59 if (sscanf(utsn.release, "%d.%d.%d", 60 &version, &subversion, &patchlevel) != 3) 61 return 0; 62 63 return (version << 16) + (subversion << 8) + patchlevel; 64 } 65 66 static void 67 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, 68 size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex) 69 { 70 struct bpf_load_program_attr xattr = {}; 71 int fd; 72 73 switch (prog_type) { 74 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 75 xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; 76 break; 77 case BPF_PROG_TYPE_KPROBE: 78 xattr.kern_version = get_kernel_version(); 79 break; 80 case BPF_PROG_TYPE_UNSPEC: 81 case BPF_PROG_TYPE_SOCKET_FILTER: 82 case BPF_PROG_TYPE_SCHED_CLS: 83 case BPF_PROG_TYPE_SCHED_ACT: 84 case BPF_PROG_TYPE_TRACEPOINT: 85 case BPF_PROG_TYPE_XDP: 86 case BPF_PROG_TYPE_PERF_EVENT: 87 case BPF_PROG_TYPE_CGROUP_SKB: 88 case BPF_PROG_TYPE_CGROUP_SOCK: 89 case BPF_PROG_TYPE_LWT_IN: 90 case BPF_PROG_TYPE_LWT_OUT: 91 case BPF_PROG_TYPE_LWT_XMIT: 92 case BPF_PROG_TYPE_SOCK_OPS: 93 case BPF_PROG_TYPE_SK_SKB: 94 case BPF_PROG_TYPE_CGROUP_DEVICE: 95 case BPF_PROG_TYPE_SK_MSG: 96 case BPF_PROG_TYPE_RAW_TRACEPOINT: 97 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 98 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 99 case BPF_PROG_TYPE_LIRC_MODE2: 100 case BPF_PROG_TYPE_SK_REUSEPORT: 101 case BPF_PROG_TYPE_FLOW_DISSECTOR: 102 case BPF_PROG_TYPE_CGROUP_SYSCTL: 103 default: 104 break; 105 } 106 107 xattr.prog_type = prog_type; 108 xattr.insns = insns; 109 xattr.insns_cnt = insns_cnt; 110 xattr.license = "GPL"; 111 xattr.prog_ifindex = ifindex; 112 113 fd = bpf_load_program_xattr(&xattr, buf, buf_len); 114 if (fd >= 0) 115 close(fd); 116 } 117 118 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) 119 { 120 struct bpf_insn insns[2] = { 121 BPF_MOV64_IMM(BPF_REG_0, 0), 122 BPF_EXIT_INSN() 123 }; 124 125 if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS) 126 /* nfp returns -EINVAL on exit(0) with TC offload */ 127 insns[0].imm = 2; 128 129 errno = 0; 130 probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex); 131 132 return errno != EINVAL && errno != EOPNOTSUPP; 133 } 134 135 static int load_btf(void) 136 { 137 #define BTF_INFO_ENC(kind, kind_flag, vlen) \ 138 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) 139 #define BTF_TYPE_ENC(name, info, size_or_type) \ 140 (name), (info), (size_or_type) 141 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ 142 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) 143 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ 144 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ 145 BTF_INT_ENC(encoding, bits_offset, bits) 146 #define BTF_MEMBER_ENC(name, type, bits_offset) \ 147 (name), (type), (bits_offset) 148 149 const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l"; 150 /* struct bpf_spin_lock { 151 * int val; 152 * }; 153 * struct val { 154 * int cnt; 155 * struct bpf_spin_lock l; 156 * }; 157 */ 158 __u32 btf_raw_types[] = { 159 /* int */ 160 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 161 /* struct bpf_spin_lock */ /* [2] */ 162 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), 163 BTF_MEMBER_ENC(15, 1, 0), /* int val; */ 164 /* struct val */ /* [3] */ 165 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), 166 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ 167 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ 168 }; 169 struct btf_header btf_hdr = { 170 .magic = BTF_MAGIC, 171 .version = BTF_VERSION, 172 .hdr_len = sizeof(struct btf_header), 173 .type_len = sizeof(btf_raw_types), 174 .str_off = sizeof(btf_raw_types), 175 .str_len = sizeof(btf_str_sec), 176 }; 177 __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) + 178 sizeof(btf_str_sec)]; 179 180 memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr)); 181 memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types)); 182 memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types), 183 btf_str_sec, sizeof(btf_str_sec)); 184 185 return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0); 186 } 187 188 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) 189 { 190 int key_size, value_size, max_entries, map_flags; 191 __u32 btf_key_type_id = 0, btf_value_type_id = 0; 192 struct bpf_create_map_attr attr = {}; 193 int fd = -1, btf_fd = -1, fd_inner; 194 195 key_size = sizeof(__u32); 196 value_size = sizeof(__u32); 197 max_entries = 1; 198 map_flags = 0; 199 200 switch (map_type) { 201 case BPF_MAP_TYPE_STACK_TRACE: 202 value_size = sizeof(__u64); 203 break; 204 case BPF_MAP_TYPE_LPM_TRIE: 205 key_size = sizeof(__u64); 206 value_size = sizeof(__u64); 207 map_flags = BPF_F_NO_PREALLOC; 208 break; 209 case BPF_MAP_TYPE_CGROUP_STORAGE: 210 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 211 key_size = sizeof(struct bpf_cgroup_storage_key); 212 value_size = sizeof(__u64); 213 max_entries = 0; 214 break; 215 case BPF_MAP_TYPE_QUEUE: 216 case BPF_MAP_TYPE_STACK: 217 key_size = 0; 218 break; 219 case BPF_MAP_TYPE_SK_STORAGE: 220 btf_key_type_id = 1; 221 btf_value_type_id = 3; 222 value_size = 8; 223 max_entries = 0; 224 map_flags = BPF_F_NO_PREALLOC; 225 btf_fd = load_btf(); 226 if (btf_fd < 0) 227 return false; 228 break; 229 case BPF_MAP_TYPE_UNSPEC: 230 case BPF_MAP_TYPE_HASH: 231 case BPF_MAP_TYPE_ARRAY: 232 case BPF_MAP_TYPE_PROG_ARRAY: 233 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 234 case BPF_MAP_TYPE_PERCPU_HASH: 235 case BPF_MAP_TYPE_PERCPU_ARRAY: 236 case BPF_MAP_TYPE_CGROUP_ARRAY: 237 case BPF_MAP_TYPE_LRU_HASH: 238 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 239 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 240 case BPF_MAP_TYPE_HASH_OF_MAPS: 241 case BPF_MAP_TYPE_DEVMAP: 242 case BPF_MAP_TYPE_SOCKMAP: 243 case BPF_MAP_TYPE_CPUMAP: 244 case BPF_MAP_TYPE_XSKMAP: 245 case BPF_MAP_TYPE_SOCKHASH: 246 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 247 default: 248 break; 249 } 250 251 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 252 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 253 /* TODO: probe for device, once libbpf has a function to create 254 * map-in-map for offload 255 */ 256 if (ifindex) 257 return false; 258 259 fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, 260 sizeof(__u32), sizeof(__u32), 1, 0); 261 if (fd_inner < 0) 262 return false; 263 fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), 264 fd_inner, 1, 0); 265 close(fd_inner); 266 } else { 267 /* Note: No other restriction on map type probes for offload */ 268 attr.map_type = map_type; 269 attr.key_size = key_size; 270 attr.value_size = value_size; 271 attr.max_entries = max_entries; 272 attr.map_flags = map_flags; 273 attr.map_ifindex = ifindex; 274 if (btf_fd >= 0) { 275 attr.btf_fd = btf_fd; 276 attr.btf_key_type_id = btf_key_type_id; 277 attr.btf_value_type_id = btf_value_type_id; 278 } 279 280 fd = bpf_create_map_xattr(&attr); 281 } 282 if (fd >= 0) 283 close(fd); 284 if (btf_fd >= 0) 285 close(btf_fd); 286 287 return fd >= 0; 288 } 289 290 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, 291 __u32 ifindex) 292 { 293 struct bpf_insn insns[2] = { 294 BPF_EMIT_CALL(id), 295 BPF_EXIT_INSN() 296 }; 297 char buf[4096] = {}; 298 bool res; 299 300 probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), 301 ifindex); 302 res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); 303 304 if (ifindex) { 305 switch (get_vendor_id(ifindex)) { 306 case 0x19ee: /* Netronome specific */ 307 res = res && !grep(buf, "not supported by FW") && 308 !grep(buf, "unsupported function id"); 309 break; 310 default: 311 break; 312 } 313 } 314 315 return res; 316 } 317