1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2019 Netronome Systems, Inc. */ 3 4 #include <errno.h> 5 #include <fcntl.h> 6 #include <string.h> 7 #include <stdlib.h> 8 #include <unistd.h> 9 #include <net/if.h> 10 #include <sys/utsname.h> 11 12 #include <linux/btf.h> 13 #include <linux/filter.h> 14 #include <linux/kernel.h> 15 16 #include "bpf.h" 17 #include "libbpf.h" 18 #include "libbpf_internal.h" 19 20 static bool grep(const char *buffer, const char *pattern) 21 { 22 return !!strstr(buffer, pattern); 23 } 24 25 static int get_vendor_id(int ifindex) 26 { 27 char ifname[IF_NAMESIZE], path[64], buf[8]; 28 ssize_t len; 29 int fd; 30 31 if (!if_indextoname(ifindex, ifname)) 32 return -1; 33 34 snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); 35 36 fd = open(path, O_RDONLY); 37 if (fd < 0) 38 return -1; 39 40 len = read(fd, buf, sizeof(buf)); 41 close(fd); 42 if (len < 0) 43 return -1; 44 if (len >= (ssize_t)sizeof(buf)) 45 return -1; 46 buf[len] = '\0'; 47 48 return strtol(buf, NULL, 0); 49 } 50 51 static int get_kernel_version(void) 52 { 53 int version, subversion, patchlevel; 54 struct utsname utsn; 55 56 /* Return 0 on failure, and attempt to probe with empty kversion */ 57 if (uname(&utsn)) 58 return 0; 59 60 if (sscanf(utsn.release, "%d.%d.%d", 61 &version, &subversion, &patchlevel) != 3) 62 return 0; 63 64 return (version << 16) + (subversion << 8) + patchlevel; 65 } 66 67 static void 68 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, 69 size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex) 70 { 71 struct bpf_load_program_attr xattr = {}; 72 int fd; 73 74 switch (prog_type) { 75 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 76 xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; 77 break; 78 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 79 xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT; 80 break; 81 case BPF_PROG_TYPE_SK_LOOKUP: 82 xattr.expected_attach_type = BPF_SK_LOOKUP; 83 break; 84 case BPF_PROG_TYPE_KPROBE: 85 xattr.kern_version = get_kernel_version(); 86 break; 87 case BPF_PROG_TYPE_UNSPEC: 88 case BPF_PROG_TYPE_SOCKET_FILTER: 89 case BPF_PROG_TYPE_SCHED_CLS: 90 case BPF_PROG_TYPE_SCHED_ACT: 91 case BPF_PROG_TYPE_TRACEPOINT: 92 case BPF_PROG_TYPE_XDP: 93 case BPF_PROG_TYPE_PERF_EVENT: 94 case BPF_PROG_TYPE_CGROUP_SKB: 95 case BPF_PROG_TYPE_CGROUP_SOCK: 96 case BPF_PROG_TYPE_LWT_IN: 97 case BPF_PROG_TYPE_LWT_OUT: 98 case BPF_PROG_TYPE_LWT_XMIT: 99 case BPF_PROG_TYPE_SOCK_OPS: 100 case BPF_PROG_TYPE_SK_SKB: 101 case BPF_PROG_TYPE_CGROUP_DEVICE: 102 case BPF_PROG_TYPE_SK_MSG: 103 case BPF_PROG_TYPE_RAW_TRACEPOINT: 104 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 105 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 106 case BPF_PROG_TYPE_LIRC_MODE2: 107 case BPF_PROG_TYPE_SK_REUSEPORT: 108 case BPF_PROG_TYPE_FLOW_DISSECTOR: 109 case BPF_PROG_TYPE_CGROUP_SYSCTL: 110 case BPF_PROG_TYPE_TRACING: 111 case BPF_PROG_TYPE_STRUCT_OPS: 112 case BPF_PROG_TYPE_EXT: 113 case BPF_PROG_TYPE_LSM: 114 default: 115 break; 116 } 117 118 xattr.prog_type = prog_type; 119 xattr.insns = insns; 120 xattr.insns_cnt = insns_cnt; 121 xattr.license = "GPL"; 122 xattr.prog_ifindex = ifindex; 123 124 fd = bpf_load_program_xattr(&xattr, buf, buf_len); 125 if (fd >= 0) 126 close(fd); 127 } 128 129 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) 130 { 131 struct bpf_insn insns[2] = { 132 BPF_MOV64_IMM(BPF_REG_0, 0), 133 BPF_EXIT_INSN() 134 }; 135 136 if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS) 137 /* nfp returns -EINVAL on exit(0) with TC offload */ 138 insns[0].imm = 2; 139 140 errno = 0; 141 probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex); 142 143 return errno != EINVAL && errno != EOPNOTSUPP; 144 } 145 146 int libbpf__load_raw_btf(const char *raw_types, size_t types_len, 147 const char *str_sec, size_t str_len) 148 { 149 struct btf_header hdr = { 150 .magic = BTF_MAGIC, 151 .version = BTF_VERSION, 152 .hdr_len = sizeof(struct btf_header), 153 .type_len = types_len, 154 .str_off = types_len, 155 .str_len = str_len, 156 }; 157 int btf_fd, btf_len; 158 __u8 *raw_btf; 159 160 btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len; 161 raw_btf = malloc(btf_len); 162 if (!raw_btf) 163 return -ENOMEM; 164 165 memcpy(raw_btf, &hdr, sizeof(hdr)); 166 memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); 167 memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); 168 169 btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false); 170 171 free(raw_btf); 172 return btf_fd; 173 } 174 175 static int load_local_storage_btf(void) 176 { 177 const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l"; 178 /* struct bpf_spin_lock { 179 * int val; 180 * }; 181 * struct val { 182 * int cnt; 183 * struct bpf_spin_lock l; 184 * }; 185 */ 186 __u32 types[] = { 187 /* int */ 188 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 189 /* struct bpf_spin_lock */ /* [2] */ 190 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), 191 BTF_MEMBER_ENC(15, 1, 0), /* int val; */ 192 /* struct val */ /* [3] */ 193 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), 194 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ 195 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ 196 }; 197 198 return libbpf__load_raw_btf((char *)types, sizeof(types), 199 strs, sizeof(strs)); 200 } 201 202 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) 203 { 204 int key_size, value_size, max_entries, map_flags; 205 __u32 btf_key_type_id = 0, btf_value_type_id = 0; 206 struct bpf_create_map_attr attr = {}; 207 int fd = -1, btf_fd = -1, fd_inner; 208 209 key_size = sizeof(__u32); 210 value_size = sizeof(__u32); 211 max_entries = 1; 212 map_flags = 0; 213 214 switch (map_type) { 215 case BPF_MAP_TYPE_STACK_TRACE: 216 value_size = sizeof(__u64); 217 break; 218 case BPF_MAP_TYPE_LPM_TRIE: 219 key_size = sizeof(__u64); 220 value_size = sizeof(__u64); 221 map_flags = BPF_F_NO_PREALLOC; 222 break; 223 case BPF_MAP_TYPE_CGROUP_STORAGE: 224 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 225 key_size = sizeof(struct bpf_cgroup_storage_key); 226 value_size = sizeof(__u64); 227 max_entries = 0; 228 break; 229 case BPF_MAP_TYPE_QUEUE: 230 case BPF_MAP_TYPE_STACK: 231 key_size = 0; 232 break; 233 case BPF_MAP_TYPE_SK_STORAGE: 234 case BPF_MAP_TYPE_INODE_STORAGE: 235 case BPF_MAP_TYPE_TASK_STORAGE: 236 btf_key_type_id = 1; 237 btf_value_type_id = 3; 238 value_size = 8; 239 max_entries = 0; 240 map_flags = BPF_F_NO_PREALLOC; 241 btf_fd = load_local_storage_btf(); 242 if (btf_fd < 0) 243 return false; 244 break; 245 case BPF_MAP_TYPE_RINGBUF: 246 key_size = 0; 247 value_size = 0; 248 max_entries = 4096; 249 break; 250 case BPF_MAP_TYPE_UNSPEC: 251 case BPF_MAP_TYPE_HASH: 252 case BPF_MAP_TYPE_ARRAY: 253 case BPF_MAP_TYPE_PROG_ARRAY: 254 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 255 case BPF_MAP_TYPE_PERCPU_HASH: 256 case BPF_MAP_TYPE_PERCPU_ARRAY: 257 case BPF_MAP_TYPE_CGROUP_ARRAY: 258 case BPF_MAP_TYPE_LRU_HASH: 259 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 260 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 261 case BPF_MAP_TYPE_HASH_OF_MAPS: 262 case BPF_MAP_TYPE_DEVMAP: 263 case BPF_MAP_TYPE_DEVMAP_HASH: 264 case BPF_MAP_TYPE_SOCKMAP: 265 case BPF_MAP_TYPE_CPUMAP: 266 case BPF_MAP_TYPE_XSKMAP: 267 case BPF_MAP_TYPE_SOCKHASH: 268 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 269 case BPF_MAP_TYPE_STRUCT_OPS: 270 default: 271 break; 272 } 273 274 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 275 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 276 /* TODO: probe for device, once libbpf has a function to create 277 * map-in-map for offload 278 */ 279 if (ifindex) 280 return false; 281 282 fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, 283 sizeof(__u32), sizeof(__u32), 1, 0); 284 if (fd_inner < 0) 285 return false; 286 fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), 287 fd_inner, 1, 0); 288 close(fd_inner); 289 } else { 290 /* Note: No other restriction on map type probes for offload */ 291 attr.map_type = map_type; 292 attr.key_size = key_size; 293 attr.value_size = value_size; 294 attr.max_entries = max_entries; 295 attr.map_flags = map_flags; 296 attr.map_ifindex = ifindex; 297 if (btf_fd >= 0) { 298 attr.btf_fd = btf_fd; 299 attr.btf_key_type_id = btf_key_type_id; 300 attr.btf_value_type_id = btf_value_type_id; 301 } 302 303 fd = bpf_create_map_xattr(&attr); 304 } 305 if (fd >= 0) 306 close(fd); 307 if (btf_fd >= 0) 308 close(btf_fd); 309 310 return fd >= 0; 311 } 312 313 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, 314 __u32 ifindex) 315 { 316 struct bpf_insn insns[2] = { 317 BPF_EMIT_CALL(id), 318 BPF_EXIT_INSN() 319 }; 320 char buf[4096] = {}; 321 bool res; 322 323 probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), 324 ifindex); 325 res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); 326 327 if (ifindex) { 328 switch (get_vendor_id(ifindex)) { 329 case 0x19ee: /* Netronome specific */ 330 res = res && !grep(buf, "not supported by FW") && 331 !grep(buf, "unsupported function id"); 332 break; 333 default: 334 break; 335 } 336 } 337 338 return res; 339 } 340 341 /* 342 * Probe for availability of kernel commit (5.3): 343 * 344 * c04c0d2b968a ("bpf: increase complexity limit and maximum program size") 345 */ 346 bool bpf_probe_large_insn_limit(__u32 ifindex) 347 { 348 struct bpf_insn insns[BPF_MAXINSNS + 1]; 349 int i; 350 351 for (i = 0; i < BPF_MAXINSNS; i++) 352 insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1); 353 insns[BPF_MAXINSNS] = BPF_EXIT_INSN(); 354 355 errno = 0; 356 probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0, 357 ifindex); 358 359 return errno != E2BIG && errno != EINVAL; 360 } 361