1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/bpf.h> 13 #include <linux/err.h> 14 #include <linux/vmalloc.h> 15 #include <linux/slab.h> 16 #include <linux/mm.h> 17 #include <linux/filter.h> 18 19 /* Called from syscall */ 20 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 21 { 22 struct bpf_array *array; 23 u32 elem_size, array_size; 24 25 /* check sanity of attributes */ 26 if (attr->max_entries == 0 || attr->key_size != 4 || 27 attr->value_size == 0) 28 return ERR_PTR(-EINVAL); 29 30 elem_size = round_up(attr->value_size, 8); 31 32 /* check round_up into zero and u32 overflow */ 33 if (elem_size == 0 || 34 attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) 35 return ERR_PTR(-ENOMEM); 36 37 array_size = sizeof(*array) + attr->max_entries * elem_size; 38 39 /* allocate all map elements and zero-initialize them */ 40 array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); 41 if (!array) { 42 array = vzalloc(array_size); 43 if (!array) 44 return ERR_PTR(-ENOMEM); 45 } 46 47 /* copy mandatory map attributes */ 48 array->map.key_size = attr->key_size; 49 array->map.value_size = attr->value_size; 50 array->map.max_entries = attr->max_entries; 51 52 array->elem_size = elem_size; 53 54 return &array->map; 55 } 56 57 /* Called from syscall or from eBPF program */ 58 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 59 { 60 struct bpf_array *array = container_of(map, struct bpf_array, map); 61 u32 index = *(u32 *)key; 62 63 if (index >= array->map.max_entries) 64 return NULL; 65 66 return array->value + array->elem_size * index; 67 } 68 69 /* Called from syscall */ 70 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 71 { 72 struct bpf_array *array = container_of(map, struct bpf_array, map); 73 u32 index = *(u32 *)key; 74 u32 *next = (u32 *)next_key; 75 76 if (index >= array->map.max_entries) { 77 *next = 0; 78 return 0; 79 } 80 81 if (index == array->map.max_entries - 1) 82 return -ENOENT; 83 84 *next = index + 1; 85 return 0; 86 } 87 88 /* Called from syscall or from eBPF program */ 89 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 90 u64 map_flags) 91 { 92 struct bpf_array *array = container_of(map, struct bpf_array, map); 93 u32 index = *(u32 *)key; 94 95 if (map_flags > BPF_EXIST) 96 /* unknown flags */ 97 return -EINVAL; 98 99 if (index >= array->map.max_entries) 100 /* all elements were pre-allocated, cannot insert a new one */ 101 return -E2BIG; 102 103 if (map_flags == BPF_NOEXIST) 104 /* all elements already exist */ 105 return -EEXIST; 106 107 memcpy(array->value + array->elem_size * index, value, array->elem_size); 108 return 0; 109 } 110 111 /* Called from syscall or from eBPF program */ 112 static int array_map_delete_elem(struct bpf_map *map, void *key) 113 { 114 return -EINVAL; 115 } 116 117 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 118 static void array_map_free(struct bpf_map *map) 119 { 120 struct bpf_array *array = container_of(map, struct bpf_array, map); 121 122 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 123 * so the programs (can be more than one that used this map) were 124 * disconnected from events. Wait for outstanding programs to complete 125 * and free the array 126 */ 127 synchronize_rcu(); 128 129 kvfree(array); 130 } 131 132 static const struct bpf_map_ops array_ops = { 133 .map_alloc = array_map_alloc, 134 .map_free = array_map_free, 135 .map_get_next_key = array_map_get_next_key, 136 .map_lookup_elem = array_map_lookup_elem, 137 .map_update_elem = array_map_update_elem, 138 .map_delete_elem = array_map_delete_elem, 139 }; 140 141 static struct bpf_map_type_list array_type __read_mostly = { 142 .ops = &array_ops, 143 .type = BPF_MAP_TYPE_ARRAY, 144 }; 145 146 static int __init register_array_map(void) 147 { 148 bpf_register_map_type(&array_type); 149 return 0; 150 } 151 late_initcall(register_array_map); 152 153 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 154 { 155 /* only bpf_prog file descriptors can be stored in prog_array map */ 156 if (attr->value_size != sizeof(u32)) 157 return ERR_PTR(-EINVAL); 158 return array_map_alloc(attr); 159 } 160 161 static void prog_array_map_free(struct bpf_map *map) 162 { 163 struct bpf_array *array = container_of(map, struct bpf_array, map); 164 int i; 165 166 synchronize_rcu(); 167 168 /* make sure it's empty */ 169 for (i = 0; i < array->map.max_entries; i++) 170 BUG_ON(array->prog[i] != NULL); 171 kvfree(array); 172 } 173 174 static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key) 175 { 176 return NULL; 177 } 178 179 /* only called from syscall */ 180 static int prog_array_map_update_elem(struct bpf_map *map, void *key, 181 void *value, u64 map_flags) 182 { 183 struct bpf_array *array = container_of(map, struct bpf_array, map); 184 struct bpf_prog *prog, *old_prog; 185 u32 index = *(u32 *)key, ufd; 186 187 if (map_flags != BPF_ANY) 188 return -EINVAL; 189 190 if (index >= array->map.max_entries) 191 return -E2BIG; 192 193 ufd = *(u32 *)value; 194 prog = bpf_prog_get(ufd); 195 if (IS_ERR(prog)) 196 return PTR_ERR(prog); 197 198 if (!bpf_prog_array_compatible(array, prog)) { 199 bpf_prog_put(prog); 200 return -EINVAL; 201 } 202 203 old_prog = xchg(array->prog + index, prog); 204 if (old_prog) 205 bpf_prog_put_rcu(old_prog); 206 207 return 0; 208 } 209 210 static int prog_array_map_delete_elem(struct bpf_map *map, void *key) 211 { 212 struct bpf_array *array = container_of(map, struct bpf_array, map); 213 struct bpf_prog *old_prog; 214 u32 index = *(u32 *)key; 215 216 if (index >= array->map.max_entries) 217 return -E2BIG; 218 219 old_prog = xchg(array->prog + index, NULL); 220 if (old_prog) { 221 bpf_prog_put_rcu(old_prog); 222 return 0; 223 } else { 224 return -ENOENT; 225 } 226 } 227 228 /* decrement refcnt of all bpf_progs that are stored in this map */ 229 void bpf_prog_array_map_clear(struct bpf_map *map) 230 { 231 struct bpf_array *array = container_of(map, struct bpf_array, map); 232 int i; 233 234 for (i = 0; i < array->map.max_entries; i++) 235 prog_array_map_delete_elem(map, &i); 236 } 237 238 static const struct bpf_map_ops prog_array_ops = { 239 .map_alloc = prog_array_map_alloc, 240 .map_free = prog_array_map_free, 241 .map_get_next_key = array_map_get_next_key, 242 .map_lookup_elem = prog_array_map_lookup_elem, 243 .map_update_elem = prog_array_map_update_elem, 244 .map_delete_elem = prog_array_map_delete_elem, 245 }; 246 247 static struct bpf_map_type_list prog_array_type __read_mostly = { 248 .ops = &prog_array_ops, 249 .type = BPF_MAP_TYPE_PROG_ARRAY, 250 }; 251 252 static int __init register_prog_array_map(void) 253 { 254 bpf_register_map_type(&prog_array_type); 255 return 0; 256 } 257 late_initcall(register_prog_array_map); 258