act_bpf.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) | act_bpf.c (a85a970af265f156740977168b542234511b28a8) |
---|---|
1/* 2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ --- 20 unchanged lines hidden (view full) --- 29 struct sock_filter *bpf_ops; 30 const char *bpf_name; 31 u32 bpf_fd; 32 u16 bpf_num_ops; 33 bool is_ebpf; 34}; 35 36static int bpf_net_id; | 1/* 2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ --- 20 unchanged lines hidden (view full) --- 29 struct sock_filter *bpf_ops; 30 const char *bpf_name; 31 u32 bpf_fd; 32 u16 bpf_num_ops; 33 bool is_ebpf; 34}; 35 36static int bpf_net_id; |
37static struct tc_action_ops act_bpf_ops; |
|
37 38static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 39 struct tcf_result *res) 40{ | 38 39static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 40 struct tcf_result *res) 41{ |
41 struct tcf_bpf *prog = act->priv; | 42 struct tcf_bpf *prog = to_bpf(act); |
42 struct bpf_prog *filter; 43 int action, filter_res; 44 bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; 45 46 if (unlikely(!skb_mac_header_was_set(skb))) 47 return TC_ACT_UNSPEC; 48 49 tcf_lastuse_update(&prog->tcf_tm); 50 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb); 51 52 rcu_read_lock(); 53 filter = rcu_dereference(prog->filter); 54 if (at_ingress) { 55 __skb_push(skb, skb->mac_len); | 43 struct bpf_prog *filter; 44 int action, filter_res; 45 bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; 46 47 if (unlikely(!skb_mac_header_was_set(skb))) 48 return TC_ACT_UNSPEC; 49 50 tcf_lastuse_update(&prog->tcf_tm); 51 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb); 52 53 rcu_read_lock(); 54 filter = rcu_dereference(prog->filter); 55 if (at_ingress) { 56 __skb_push(skb, skb->mac_len); |
57 bpf_compute_data_end(skb); |
|
56 filter_res = BPF_PROG_RUN(filter, skb); 57 __skb_pull(skb, skb->mac_len); 58 } else { | 58 filter_res = BPF_PROG_RUN(filter, skb); 59 __skb_pull(skb, skb->mac_len); 60 } else { |
61 bpf_compute_data_end(skb); |
|
59 filter_res = BPF_PROG_RUN(filter, skb); 60 } 61 rcu_read_unlock(); 62 63 /* A BPF program may overwrite the default action opcode. 64 * Similarly as in cls_bpf, if filter_res == -1 we use the 65 * default action specified from tc. 66 * --- 60 unchanged lines hidden (view full) --- 127 128 return 0; 129} 130 131static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, 132 int bind, int ref) 133{ 134 unsigned char *tp = skb_tail_pointer(skb); | 62 filter_res = BPF_PROG_RUN(filter, skb); 63 } 64 rcu_read_unlock(); 65 66 /* A BPF program may overwrite the default action opcode. 67 * Similarly as in cls_bpf, if filter_res == -1 we use the 68 * default action specified from tc. 69 * --- 60 unchanged lines hidden (view full) --- 130 131 return 0; 132} 133 134static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, 135 int bind, int ref) 136{ 137 unsigned char *tp = skb_tail_pointer(skb); |
135 struct tcf_bpf *prog = act->priv; | 138 struct tcf_bpf *prog = to_bpf(act); |
136 struct tc_act_bpf opt = { 137 .index = prog->tcf_index, 138 .refcnt = prog->tcf_refcnt - ref, 139 .bindcnt = prog->tcf_bindcnt - bind, 140 .action = prog->tcf_action, 141 }; 142 struct tcf_t tm; 143 int ret; 144 145 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt)) 146 goto nla_put_failure; 147 148 if (tcf_bpf_is_ebpf(prog)) 149 ret = tcf_bpf_dump_ebpf_info(prog, skb); 150 else 151 ret = tcf_bpf_dump_bpf_info(prog, skb); 152 if (ret) 153 goto nla_put_failure; 154 | 139 struct tc_act_bpf opt = { 140 .index = prog->tcf_index, 141 .refcnt = prog->tcf_refcnt - ref, 142 .bindcnt = prog->tcf_bindcnt - bind, 143 .action = prog->tcf_action, 144 }; 145 struct tcf_t tm; 146 int ret; 147 148 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt)) 149 goto nla_put_failure; 150 151 if (tcf_bpf_is_ebpf(prog)) 152 ret = tcf_bpf_dump_ebpf_info(prog, skb); 153 else 154 ret = tcf_bpf_dump_bpf_info(prog, skb); 155 if (ret) 156 goto nla_put_failure; 157 |
155 tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install); 156 tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse); 157 tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires); 158 159 if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm)) | 158 tcf_tm_dump(&tm, &prog->tcf_tm); 159 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm, 160 TCA_ACT_BPF_PAD)) |
160 goto nla_put_failure; 161 162 return skb->len; 163 164nla_put_failure: 165 nlmsg_trim(skb, tp); 166 return -1; 167} 168 169static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = { 170 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) }, 171 [TCA_ACT_BPF_FD] = { .type = NLA_U32 }, | 161 goto nla_put_failure; 162 163 return skb->len; 164 165nla_put_failure: 166 nlmsg_trim(skb, tp); 167 return -1; 168} 169 170static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = { 171 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) }, 172 [TCA_ACT_BPF_FD] = { .type = NLA_U32 }, |
172 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN }, | 173 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, 174 .len = ACT_BPF_NAME_LEN }, |
173 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 }, 174 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY, 175 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 176}; 177 178static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 179{ 180 struct sock_filter *bpf_ops; --- 36 unchanged lines hidden (view full) --- 217static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 218{ 219 struct bpf_prog *fp; 220 char *name = NULL; 221 u32 bpf_fd; 222 223 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); 224 | 175 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 }, 176 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY, 177 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 178}; 179 180static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 181{ 182 struct sock_filter *bpf_ops; --- 36 unchanged lines hidden (view full) --- 219static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 220{ 221 struct bpf_prog *fp; 222 char *name = NULL; 223 u32 bpf_fd; 224 225 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); 226 |
225 fp = bpf_prog_get(bpf_fd); | 227 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT); |
226 if (IS_ERR(fp)) 227 return PTR_ERR(fp); 228 | 228 if (IS_ERR(fp)) 229 return PTR_ERR(fp); 230 |
229 if (fp->type != BPF_PROG_TYPE_SCHED_ACT) { 230 bpf_prog_put(fp); 231 return -EINVAL; 232 } 233 | |
234 if (tb[TCA_ACT_BPF_NAME]) { 235 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), 236 nla_len(tb[TCA_ACT_BPF_NAME]), 237 GFP_KERNEL); 238 if (!name) { 239 bpf_prog_put(fp); 240 return -ENOMEM; 241 } --- 27 unchanged lines hidden (view full) --- 269 */ 270 cfg->filter = rcu_dereference_protected(prog->filter, 1); 271 272 cfg->bpf_ops = prog->bpf_ops; 273 cfg->bpf_name = prog->bpf_name; 274} 275 276static int tcf_bpf_init(struct net *net, struct nlattr *nla, | 231 if (tb[TCA_ACT_BPF_NAME]) { 232 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), 233 nla_len(tb[TCA_ACT_BPF_NAME]), 234 GFP_KERNEL); 235 if (!name) { 236 bpf_prog_put(fp); 237 return -ENOMEM; 238 } --- 27 unchanged lines hidden (view full) --- 266 */ 267 cfg->filter = rcu_dereference_protected(prog->filter, 1); 268 269 cfg->bpf_ops = prog->bpf_ops; 270 cfg->bpf_name = prog->bpf_name; 271} 272 273static int tcf_bpf_init(struct net *net, struct nlattr *nla, |
277 struct nlattr *est, struct tc_action *act, | 274 struct nlattr *est, struct tc_action **act, |
278 int replace, int bind) 279{ 280 struct tc_action_net *tn = net_generic(net, bpf_net_id); 281 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 282 struct tcf_bpf_cfg cfg, old; 283 struct tc_act_bpf *parm; 284 struct tcf_bpf *prog; 285 bool is_bpf, is_ebpf; --- 8 unchanged lines hidden (view full) --- 294 295 if (!tb[TCA_ACT_BPF_PARMS]) 296 return -EINVAL; 297 298 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); 299 300 if (!tcf_hash_check(tn, parm->index, act, bind)) { 301 ret = tcf_hash_create(tn, parm->index, est, act, | 275 int replace, int bind) 276{ 277 struct tc_action_net *tn = net_generic(net, bpf_net_id); 278 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 279 struct tcf_bpf_cfg cfg, old; 280 struct tc_act_bpf *parm; 281 struct tcf_bpf *prog; 282 bool is_bpf, is_ebpf; --- 8 unchanged lines hidden (view full) --- 291 292 if (!tb[TCA_ACT_BPF_PARMS]) 293 return -EINVAL; 294 295 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); 296 297 if (!tcf_hash_check(tn, parm->index, act, bind)) { 298 ret = tcf_hash_create(tn, parm->index, est, act, |
302 sizeof(*prog), bind, true); | 299 &act_bpf_ops, bind, true); |
303 if (ret < 0) 304 return ret; 305 306 res = ACT_P_CREATED; 307 } else { 308 /* Don't override defaults. */ 309 if (bind) 310 return 0; 311 | 300 if (ret < 0) 301 return ret; 302 303 res = ACT_P_CREATED; 304 } else { 305 /* Don't override defaults. */ 306 if (bind) 307 return 0; 308 |
312 tcf_hash_release(act, bind); | 309 tcf_hash_release(*act, bind); |
313 if (!replace) 314 return -EEXIST; 315 } 316 317 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; 318 is_ebpf = tb[TCA_ACT_BPF_FD]; 319 320 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { 321 ret = -EINVAL; 322 goto out; 323 } 324 325 memset(&cfg, 0, sizeof(cfg)); 326 327 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : 328 tcf_bpf_init_from_efd(tb, &cfg); 329 if (ret < 0) 330 goto out; 331 | 310 if (!replace) 311 return -EEXIST; 312 } 313 314 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; 315 is_ebpf = tb[TCA_ACT_BPF_FD]; 316 317 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { 318 ret = -EINVAL; 319 goto out; 320 } 321 322 memset(&cfg, 0, sizeof(cfg)); 323 324 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : 325 tcf_bpf_init_from_efd(tb, &cfg); 326 if (ret < 0) 327 goto out; 328 |
332 prog = to_bpf(act); | 329 prog = to_bpf(*act); |
333 ASSERT_RTNL(); 334 335 if (res != ACT_P_CREATED) 336 tcf_bpf_prog_fill_cfg(prog, &old); 337 338 prog->bpf_ops = cfg.bpf_ops; 339 prog->bpf_name = cfg.bpf_name; 340 341 if (cfg.bpf_num_ops) 342 prog->bpf_num_ops = cfg.bpf_num_ops; 343 if (cfg.bpf_fd) 344 prog->bpf_fd = cfg.bpf_fd; 345 346 prog->tcf_action = parm->action; 347 rcu_assign_pointer(prog->filter, cfg.filter); 348 349 if (res == ACT_P_CREATED) { | 330 ASSERT_RTNL(); 331 332 if (res != ACT_P_CREATED) 333 tcf_bpf_prog_fill_cfg(prog, &old); 334 335 prog->bpf_ops = cfg.bpf_ops; 336 prog->bpf_name = cfg.bpf_name; 337 338 if (cfg.bpf_num_ops) 339 prog->bpf_num_ops = cfg.bpf_num_ops; 340 if (cfg.bpf_fd) 341 prog->bpf_fd = cfg.bpf_fd; 342 343 prog->tcf_action = parm->action; 344 rcu_assign_pointer(prog->filter, cfg.filter); 345 346 if (res == ACT_P_CREATED) { |
350 tcf_hash_insert(tn, act); | 347 tcf_hash_insert(tn, *act); |
351 } else { 352 /* make sure the program being replaced is no longer executing */ 353 synchronize_rcu(); 354 tcf_bpf_cfg_cleanup(&old); 355 } 356 357 return res; 358out: 359 if (res == ACT_P_CREATED) | 348 } else { 349 /* make sure the program being replaced is no longer executing */ 350 synchronize_rcu(); 351 tcf_bpf_cfg_cleanup(&old); 352 } 353 354 return res; 355out: 356 if (res == ACT_P_CREATED) |
360 tcf_hash_cleanup(act, est); | 357 tcf_hash_cleanup(*act, est); |
361 362 return ret; 363} 364 365static void tcf_bpf_cleanup(struct tc_action *act, int bind) 366{ 367 struct tcf_bpf_cfg tmp; 368 | 358 359 return ret; 360} 361 362static void tcf_bpf_cleanup(struct tc_action *act, int bind) 363{ 364 struct tcf_bpf_cfg tmp; 365 |
369 tcf_bpf_prog_fill_cfg(act->priv, &tmp); | 366 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp); |
370 tcf_bpf_cfg_cleanup(&tmp); 371} 372 373static int tcf_bpf_walker(struct net *net, struct sk_buff *skb, 374 struct netlink_callback *cb, int type, | 367 tcf_bpf_cfg_cleanup(&tmp); 368} 369 370static int tcf_bpf_walker(struct net *net, struct sk_buff *skb, 371 struct netlink_callback *cb, int type, |
375 struct tc_action *a) | 372 const struct tc_action_ops *ops) |
376{ 377 struct tc_action_net *tn = net_generic(net, bpf_net_id); 378 | 373{ 374 struct tc_action_net *tn = net_generic(net, bpf_net_id); 375 |
379 return tcf_generic_walker(tn, skb, cb, type, a); | 376 return tcf_generic_walker(tn, skb, cb, type, ops); |
380} 381 | 377} 378 |
382static int tcf_bpf_search(struct net *net, struct tc_action *a, u32 index) | 379static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index) |
383{ 384 struct tc_action_net *tn = net_generic(net, bpf_net_id); 385 386 return tcf_hash_search(tn, a, index); 387} 388 389static struct tc_action_ops act_bpf_ops __read_mostly = { 390 .kind = "bpf", 391 .type = TCA_ACT_BPF, 392 .owner = THIS_MODULE, 393 .act = tcf_bpf, 394 .dump = tcf_bpf_dump, 395 .cleanup = tcf_bpf_cleanup, 396 .init = tcf_bpf_init, 397 .walk = tcf_bpf_walker, 398 .lookup = tcf_bpf_search, | 380{ 381 struct tc_action_net *tn = net_generic(net, bpf_net_id); 382 383 return tcf_hash_search(tn, a, index); 384} 385 386static struct tc_action_ops act_bpf_ops __read_mostly = { 387 .kind = "bpf", 388 .type = TCA_ACT_BPF, 389 .owner = THIS_MODULE, 390 .act = tcf_bpf, 391 .dump = tcf_bpf_dump, 392 .cleanup = tcf_bpf_cleanup, 393 .init = tcf_bpf_init, 394 .walk = tcf_bpf_walker, 395 .lookup = tcf_bpf_search, |
396 .size = sizeof(struct tcf_bpf), |
|
399}; 400 401static __net_init int bpf_init_net(struct net *net) 402{ 403 struct tc_action_net *tn = net_generic(net, bpf_net_id); 404 405 return tc_action_net_init(tn, &act_bpf_ops, BPF_TAB_MASK); 406} --- 31 unchanged lines hidden --- | 397}; 398 399static __net_init int bpf_init_net(struct net *net) 400{ 401 struct tc_action_net *tn = net_generic(net, bpf_net_id); 402 403 return tc_action_net_init(tn, &act_bpf_ops, BPF_TAB_MASK); 404} --- 31 unchanged lines hidden --- |