1 #ifndef __NET_PKT_CLS_H 2 #define __NET_PKT_CLS_H 3 4 #include <linux/pkt_cls.h> 5 #include <net/sch_generic.h> 6 #include <net/act_api.h> 7 8 /* Basic packet classifier frontend definitions. */ 9 10 struct tcf_walker { 11 int stop; 12 int skip; 13 int count; 14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *); 15 }; 16 17 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 18 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 19 20 #ifdef CONFIG_NET_CLS 21 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 22 bool create); 23 void tcf_chain_put(struct tcf_chain *chain); 24 int tcf_block_get(struct tcf_block **p_block, 25 struct tcf_proto __rcu **p_filter_chain); 26 void tcf_block_put(struct tcf_block *block); 27 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 28 struct tcf_result *res, bool compat_mode); 29 30 #else 31 static inline 32 int tcf_block_get(struct tcf_block **p_block, 33 struct tcf_proto __rcu **p_filter_chain) 34 { 35 return 0; 36 } 37 38 static inline void tcf_block_put(struct tcf_block *block) 39 { 40 } 41 42 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 43 struct tcf_result *res, bool compat_mode) 44 { 45 return TC_ACT_UNSPEC; 46 } 47 #endif 48 49 static inline unsigned long 50 __cls_set_class(unsigned long *clp, unsigned long cl) 51 { 52 return xchg(clp, cl); 53 } 54 55 static inline unsigned long 56 cls_set_class(struct tcf_proto *tp, unsigned long *clp, 57 unsigned long cl) 58 { 59 unsigned long old_cl; 60 61 tcf_tree_lock(tp); 62 old_cl = __cls_set_class(clp, cl); 63 tcf_tree_unlock(tp); 64 65 return old_cl; 66 } 67 68 static inline void 69 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 70 { 71 unsigned long cl; 72 73 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid); 74 cl = cls_set_class(tp, &r->class, cl); 75 if (cl) 76 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 77 } 78 79 static inline void 80 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 81 { 82 unsigned long cl; 83 84 if ((cl = __cls_set_class(&r->class, 0)) != 0) 85 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 86 } 87 88 struct tcf_exts { 89 #ifdef CONFIG_NET_CLS_ACT 90 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 91 int nr_actions; 92 struct tc_action **actions; 93 #endif 94 /* Map to export classifier specific extension TLV types to the 95 * generic extensions API. Unsupported extensions must be set to 0. 96 */ 97 int action; 98 int police; 99 }; 100 101 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) 102 { 103 #ifdef CONFIG_NET_CLS_ACT 104 exts->type = 0; 105 exts->nr_actions = 0; 106 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 107 GFP_KERNEL); 108 if (!exts->actions) 109 return -ENOMEM; 110 #endif 111 exts->action = action; 112 exts->police = police; 113 return 0; 114 } 115 116 /** 117 * tcf_exts_is_predicative - check if a predicative extension is present 118 * @exts: tc filter extensions handle 119 * 120 * Returns 1 if a predicative extension is present, i.e. an extension which 121 * might cause further actions and thus overrule the regular tcf_result. 122 */ 123 static inline int 124 tcf_exts_is_predicative(struct tcf_exts *exts) 125 { 126 #ifdef CONFIG_NET_CLS_ACT 127 return exts->nr_actions; 128 #else 129 return 0; 130 #endif 131 } 132 133 /** 134 * tcf_exts_is_available - check if at least one extension is present 135 * @exts: tc filter extensions handle 136 * 137 * Returns 1 if at least one extension is present. 138 */ 139 static inline int 140 tcf_exts_is_available(struct tcf_exts *exts) 141 { 142 /* All non-predicative extensions must be added here. */ 143 return tcf_exts_is_predicative(exts); 144 } 145 146 static inline void tcf_exts_to_list(const struct tcf_exts *exts, 147 struct list_head *actions) 148 { 149 #ifdef CONFIG_NET_CLS_ACT 150 int i; 151 152 for (i = 0; i < exts->nr_actions; i++) { 153 struct tc_action *a = exts->actions[i]; 154 155 list_add_tail(&a->list, actions); 156 } 157 #endif 158 } 159 160 static inline void 161 tcf_exts_stats_update(const struct tcf_exts *exts, 162 u64 bytes, u64 packets, u64 lastuse) 163 { 164 #ifdef CONFIG_NET_CLS_ACT 165 int i; 166 167 preempt_disable(); 168 169 for (i = 0; i < exts->nr_actions; i++) { 170 struct tc_action *a = exts->actions[i]; 171 172 tcf_action_stats_update(a, bytes, packets, lastuse); 173 } 174 175 preempt_enable(); 176 #endif 177 } 178 179 /** 180 * tcf_exts_exec - execute tc filter extensions 181 * @skb: socket buffer 182 * @exts: tc filter extensions handle 183 * @res: desired result 184 * 185 * Executes all configured extensions. Returns 0 on a normal execution, 186 * a negative number if the filter must be considered unmatched or 187 * a positive action code (TC_ACT_*) which must be returned to the 188 * underlying layer. 189 */ 190 static inline int 191 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 192 struct tcf_result *res) 193 { 194 #ifdef CONFIG_NET_CLS_ACT 195 if (exts->nr_actions) 196 return tcf_action_exec(skb, exts->actions, exts->nr_actions, 197 res); 198 #endif 199 return 0; 200 } 201 202 #ifdef CONFIG_NET_CLS_ACT 203 204 #define tc_no_actions(_exts) ((_exts)->nr_actions == 0) 205 #define tc_single_action(_exts) ((_exts)->nr_actions == 1) 206 207 #else /* CONFIG_NET_CLS_ACT */ 208 209 #define tc_no_actions(_exts) true 210 #define tc_single_action(_exts) false 211 212 #endif /* CONFIG_NET_CLS_ACT */ 213 214 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 215 struct nlattr **tb, struct nlattr *rate_tlv, 216 struct tcf_exts *exts, bool ovr); 217 void tcf_exts_destroy(struct tcf_exts *exts); 218 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 219 struct tcf_exts *src); 220 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 221 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 222 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, 223 struct net_device **hw_dev); 224 225 /** 226 * struct tcf_pkt_info - packet information 227 */ 228 struct tcf_pkt_info { 229 unsigned char * ptr; 230 int nexthdr; 231 }; 232 233 #ifdef CONFIG_NET_EMATCH 234 235 struct tcf_ematch_ops; 236 237 /** 238 * struct tcf_ematch - extended match (ematch) 239 * 240 * @matchid: identifier to allow userspace to reidentify a match 241 * @flags: flags specifying attributes and the relation to other matches 242 * @ops: the operations lookup table of the corresponding ematch module 243 * @datalen: length of the ematch specific configuration data 244 * @data: ematch specific data 245 */ 246 struct tcf_ematch { 247 struct tcf_ematch_ops * ops; 248 unsigned long data; 249 unsigned int datalen; 250 u16 matchid; 251 u16 flags; 252 struct net *net; 253 }; 254 255 static inline int tcf_em_is_container(struct tcf_ematch *em) 256 { 257 return !em->ops; 258 } 259 260 static inline int tcf_em_is_simple(struct tcf_ematch *em) 261 { 262 return em->flags & TCF_EM_SIMPLE; 263 } 264 265 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 266 { 267 return em->flags & TCF_EM_INVERT; 268 } 269 270 static inline int tcf_em_last_match(struct tcf_ematch *em) 271 { 272 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 273 } 274 275 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 276 { 277 if (tcf_em_last_match(em)) 278 return 1; 279 280 if (result == 0 && em->flags & TCF_EM_REL_AND) 281 return 1; 282 283 if (result != 0 && em->flags & TCF_EM_REL_OR) 284 return 1; 285 286 return 0; 287 } 288 289 /** 290 * struct tcf_ematch_tree - ematch tree handle 291 * 292 * @hdr: ematch tree header supplied by userspace 293 * @matches: array of ematches 294 */ 295 struct tcf_ematch_tree { 296 struct tcf_ematch_tree_hdr hdr; 297 struct tcf_ematch * matches; 298 299 }; 300 301 /** 302 * struct tcf_ematch_ops - ematch module operations 303 * 304 * @kind: identifier (kind) of this ematch module 305 * @datalen: length of expected configuration data (optional) 306 * @change: called during validation (optional) 307 * @match: called during ematch tree evaluation, must return 1/0 308 * @destroy: called during destroyage (optional) 309 * @dump: called during dumping process (optional) 310 * @owner: owner, must be set to THIS_MODULE 311 * @link: link to previous/next ematch module (internal use) 312 */ 313 struct tcf_ematch_ops { 314 int kind; 315 int datalen; 316 int (*change)(struct net *net, void *, 317 int, struct tcf_ematch *); 318 int (*match)(struct sk_buff *, struct tcf_ematch *, 319 struct tcf_pkt_info *); 320 void (*destroy)(struct tcf_ematch *); 321 int (*dump)(struct sk_buff *, struct tcf_ematch *); 322 struct module *owner; 323 struct list_head link; 324 }; 325 326 int tcf_em_register(struct tcf_ematch_ops *); 327 void tcf_em_unregister(struct tcf_ematch_ops *); 328 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 329 struct tcf_ematch_tree *); 330 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 331 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 332 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 333 struct tcf_pkt_info *); 334 335 /** 336 * tcf_em_tree_change - replace ematch tree of a running classifier 337 * 338 * @tp: classifier kind handle 339 * @dst: destination ematch tree variable 340 * @src: source ematch tree (temporary tree from tcf_em_tree_validate) 341 * 342 * This functions replaces the ematch tree in @dst with the ematch 343 * tree in @src. The classifier in charge of the ematch tree may be 344 * running. 345 */ 346 static inline void tcf_em_tree_change(struct tcf_proto *tp, 347 struct tcf_ematch_tree *dst, 348 struct tcf_ematch_tree *src) 349 { 350 tcf_tree_lock(tp); 351 memcpy(dst, src, sizeof(*dst)); 352 tcf_tree_unlock(tp); 353 } 354 355 /** 356 * tcf_em_tree_match - evaulate an ematch tree 357 * 358 * @skb: socket buffer of the packet in question 359 * @tree: ematch tree to be used for evaluation 360 * @info: packet information examined by classifier 361 * 362 * This function matches @skb against the ematch tree in @tree by going 363 * through all ematches respecting their logic relations returning 364 * as soon as the result is obvious. 365 * 366 * Returns 1 if the ematch tree as-one matches, no ematches are configured 367 * or ematch is not enabled in the kernel, otherwise 0 is returned. 368 */ 369 static inline int tcf_em_tree_match(struct sk_buff *skb, 370 struct tcf_ematch_tree *tree, 371 struct tcf_pkt_info *info) 372 { 373 if (tree->hdr.nmatches) 374 return __tcf_em_tree_match(skb, tree, info); 375 else 376 return 1; 377 } 378 379 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 380 381 #else /* CONFIG_NET_EMATCH */ 382 383 struct tcf_ematch_tree { 384 }; 385 386 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 387 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 388 #define tcf_em_tree_dump(skb, t, tlv) (0) 389 #define tcf_em_tree_change(tp, dst, src) do { } while(0) 390 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 391 392 #endif /* CONFIG_NET_EMATCH */ 393 394 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 395 { 396 switch (layer) { 397 case TCF_LAYER_LINK: 398 return skb->data; 399 case TCF_LAYER_NETWORK: 400 return skb_network_header(skb); 401 case TCF_LAYER_TRANSPORT: 402 return skb_transport_header(skb); 403 } 404 405 return NULL; 406 } 407 408 static inline int tcf_valid_offset(const struct sk_buff *skb, 409 const unsigned char *ptr, const int len) 410 { 411 return likely((ptr + len) <= skb_tail_pointer(skb) && 412 ptr >= skb->head && 413 (ptr <= (ptr + len))); 414 } 415 416 #ifdef CONFIG_NET_CLS_IND 417 #include <net/net_namespace.h> 418 419 static inline int 420 tcf_change_indev(struct net *net, struct nlattr *indev_tlv) 421 { 422 char indev[IFNAMSIZ]; 423 struct net_device *dev; 424 425 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) 426 return -EINVAL; 427 dev = __dev_get_by_name(net, indev); 428 if (!dev) 429 return -ENODEV; 430 return dev->ifindex; 431 } 432 433 static inline bool 434 tcf_match_indev(struct sk_buff *skb, int ifindex) 435 { 436 if (!ifindex) 437 return true; 438 if (!skb->skb_iif) 439 return false; 440 return ifindex == skb->skb_iif; 441 } 442 #endif /* CONFIG_NET_CLS_IND */ 443 444 struct tc_cls_u32_knode { 445 struct tcf_exts *exts; 446 struct tc_u32_sel *sel; 447 u32 handle; 448 u32 val; 449 u32 mask; 450 u32 link_handle; 451 u8 fshift; 452 }; 453 454 struct tc_cls_u32_hnode { 455 u32 handle; 456 u32 prio; 457 unsigned int divisor; 458 }; 459 460 enum tc_clsu32_command { 461 TC_CLSU32_NEW_KNODE, 462 TC_CLSU32_REPLACE_KNODE, 463 TC_CLSU32_DELETE_KNODE, 464 TC_CLSU32_NEW_HNODE, 465 TC_CLSU32_REPLACE_HNODE, 466 TC_CLSU32_DELETE_HNODE, 467 }; 468 469 struct tc_cls_u32_offload { 470 /* knode values */ 471 enum tc_clsu32_command command; 472 union { 473 struct tc_cls_u32_knode knode; 474 struct tc_cls_u32_hnode hnode; 475 }; 476 }; 477 478 static inline bool tc_can_offload(const struct net_device *dev, 479 const struct tcf_proto *tp) 480 { 481 const struct Qdisc *sch = tp->q; 482 const struct Qdisc_class_ops *cops = sch->ops->cl_ops; 483 484 if (!(dev->features & NETIF_F_HW_TC)) 485 return false; 486 if (!dev->netdev_ops->ndo_setup_tc) 487 return false; 488 if (cops && cops->tcf_cl_offload) 489 return cops->tcf_cl_offload(tp->classid); 490 491 return true; 492 } 493 494 static inline bool tc_skip_hw(u32 flags) 495 { 496 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 497 } 498 499 static inline bool tc_should_offload(const struct net_device *dev, 500 const struct tcf_proto *tp, u32 flags) 501 { 502 if (tc_skip_hw(flags)) 503 return false; 504 return tc_can_offload(dev, tp); 505 } 506 507 static inline bool tc_skip_sw(u32 flags) 508 { 509 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 510 } 511 512 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 513 static inline bool tc_flags_valid(u32 flags) 514 { 515 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)) 516 return false; 517 518 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 519 return false; 520 521 return true; 522 } 523 524 static inline bool tc_in_hw(u32 flags) 525 { 526 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 527 } 528 529 enum tc_fl_command { 530 TC_CLSFLOWER_REPLACE, 531 TC_CLSFLOWER_DESTROY, 532 TC_CLSFLOWER_STATS, 533 }; 534 535 struct tc_cls_flower_offload { 536 enum tc_fl_command command; 537 u32 prio; 538 unsigned long cookie; 539 struct flow_dissector *dissector; 540 struct fl_flow_key *mask; 541 struct fl_flow_key *key; 542 struct tcf_exts *exts; 543 }; 544 545 enum tc_matchall_command { 546 TC_CLSMATCHALL_REPLACE, 547 TC_CLSMATCHALL_DESTROY, 548 }; 549 550 struct tc_cls_matchall_offload { 551 enum tc_matchall_command command; 552 struct tcf_exts *exts; 553 unsigned long cookie; 554 }; 555 556 enum tc_clsbpf_command { 557 TC_CLSBPF_ADD, 558 TC_CLSBPF_REPLACE, 559 TC_CLSBPF_DESTROY, 560 TC_CLSBPF_STATS, 561 }; 562 563 struct tc_cls_bpf_offload { 564 enum tc_clsbpf_command command; 565 struct tcf_exts *exts; 566 struct bpf_prog *prog; 567 const char *name; 568 bool exts_integrated; 569 u32 gen_flags; 570 }; 571 572 573 /* This structure holds cookie structure that is passed from user 574 * to the kernel for actions and classifiers 575 */ 576 struct tc_cookie { 577 u8 *data; 578 u32 len; 579 }; 580 #endif 581