1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 10 /* Basic packet classifier frontend definitions. */ 11 12 struct tcf_walker { 13 int stop; 14 int skip; 15 int count; 16 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 17 }; 18 19 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 20 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 21 22 bool tcf_queue_work(struct work_struct *work); 23 24 #ifdef CONFIG_NET_CLS 25 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 26 bool create); 27 void tcf_chain_put(struct tcf_chain *chain); 28 int tcf_block_get(struct tcf_block **p_block, 29 struct tcf_proto __rcu **p_filter_chain); 30 void tcf_block_put(struct tcf_block *block); 31 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 32 struct tcf_result *res, bool compat_mode); 33 34 #else 35 static inline 36 int tcf_block_get(struct tcf_block **p_block, 37 struct tcf_proto __rcu **p_filter_chain) 38 { 39 return 0; 40 } 41 42 static inline void tcf_block_put(struct tcf_block *block) 43 { 44 } 45 46 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 47 struct tcf_result *res, bool compat_mode) 48 { 49 return TC_ACT_UNSPEC; 50 } 51 #endif 52 53 static inline unsigned long 54 __cls_set_class(unsigned long *clp, unsigned long cl) 55 { 56 return xchg(clp, cl); 57 } 58 59 static inline unsigned long 60 cls_set_class(struct tcf_proto *tp, unsigned long *clp, 61 unsigned long cl) 62 { 63 unsigned long old_cl; 64 65 tcf_tree_lock(tp); 66 old_cl = __cls_set_class(clp, cl); 67 tcf_tree_unlock(tp); 68 69 return old_cl; 70 } 71 72 static inline void 73 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 74 { 75 unsigned long cl; 76 77 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid); 78 cl = cls_set_class(tp, &r->class, cl); 79 if (cl) 80 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 81 } 82 83 static inline void 84 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 85 { 86 unsigned long cl; 87 88 if ((cl = __cls_set_class(&r->class, 0)) != 0) 89 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); 90 } 91 92 struct tcf_exts { 93 #ifdef CONFIG_NET_CLS_ACT 94 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 95 int nr_actions; 96 struct tc_action **actions; 97 #endif 98 /* Map to export classifier specific extension TLV types to the 99 * generic extensions API. Unsupported extensions must be set to 0. 100 */ 101 int action; 102 int police; 103 }; 104 105 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) 106 { 107 #ifdef CONFIG_NET_CLS_ACT 108 exts->type = 0; 109 exts->nr_actions = 0; 110 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 111 GFP_KERNEL); 112 if (!exts->actions) 113 return -ENOMEM; 114 #endif 115 exts->action = action; 116 exts->police = police; 117 return 0; 118 } 119 120 static inline void tcf_exts_to_list(const struct tcf_exts *exts, 121 struct list_head *actions) 122 { 123 #ifdef CONFIG_NET_CLS_ACT 124 int i; 125 126 for (i = 0; i < exts->nr_actions; i++) { 127 struct tc_action *a = exts->actions[i]; 128 129 list_add_tail(&a->list, actions); 130 } 131 #endif 132 } 133 134 static inline void 135 tcf_exts_stats_update(const struct tcf_exts *exts, 136 u64 bytes, u64 packets, u64 lastuse) 137 { 138 #ifdef CONFIG_NET_CLS_ACT 139 int i; 140 141 preempt_disable(); 142 143 for (i = 0; i < exts->nr_actions; i++) { 144 struct tc_action *a = exts->actions[i]; 145 146 tcf_action_stats_update(a, bytes, packets, lastuse); 147 } 148 149 preempt_enable(); 150 #endif 151 } 152 153 /** 154 * tcf_exts_has_actions - check if at least one action is present 155 * @exts: tc filter extensions handle 156 * 157 * Returns true if at least one action is present. 158 */ 159 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 160 { 161 #ifdef CONFIG_NET_CLS_ACT 162 return exts->nr_actions; 163 #else 164 return false; 165 #endif 166 } 167 168 /** 169 * tcf_exts_has_one_action - check if exactly one action is present 170 * @exts: tc filter extensions handle 171 * 172 * Returns true if exactly one action is present. 173 */ 174 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) 175 { 176 #ifdef CONFIG_NET_CLS_ACT 177 return exts->nr_actions == 1; 178 #else 179 return false; 180 #endif 181 } 182 183 /** 184 * tcf_exts_exec - execute tc filter extensions 185 * @skb: socket buffer 186 * @exts: tc filter extensions handle 187 * @res: desired result 188 * 189 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 190 * a negative number if the filter must be considered unmatched or 191 * a positive action code (TC_ACT_*) which must be returned to the 192 * underlying layer. 193 */ 194 static inline int 195 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 196 struct tcf_result *res) 197 { 198 #ifdef CONFIG_NET_CLS_ACT 199 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 200 #endif 201 return TC_ACT_OK; 202 } 203 204 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 205 struct nlattr **tb, struct nlattr *rate_tlv, 206 struct tcf_exts *exts, bool ovr); 207 void tcf_exts_destroy(struct tcf_exts *exts); 208 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 209 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 210 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 211 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, 212 struct net_device **hw_dev); 213 214 /** 215 * struct tcf_pkt_info - packet information 216 */ 217 struct tcf_pkt_info { 218 unsigned char * ptr; 219 int nexthdr; 220 }; 221 222 #ifdef CONFIG_NET_EMATCH 223 224 struct tcf_ematch_ops; 225 226 /** 227 * struct tcf_ematch - extended match (ematch) 228 * 229 * @matchid: identifier to allow userspace to reidentify a match 230 * @flags: flags specifying attributes and the relation to other matches 231 * @ops: the operations lookup table of the corresponding ematch module 232 * @datalen: length of the ematch specific configuration data 233 * @data: ematch specific data 234 */ 235 struct tcf_ematch { 236 struct tcf_ematch_ops * ops; 237 unsigned long data; 238 unsigned int datalen; 239 u16 matchid; 240 u16 flags; 241 struct net *net; 242 }; 243 244 static inline int tcf_em_is_container(struct tcf_ematch *em) 245 { 246 return !em->ops; 247 } 248 249 static inline int tcf_em_is_simple(struct tcf_ematch *em) 250 { 251 return em->flags & TCF_EM_SIMPLE; 252 } 253 254 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 255 { 256 return em->flags & TCF_EM_INVERT; 257 } 258 259 static inline int tcf_em_last_match(struct tcf_ematch *em) 260 { 261 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 262 } 263 264 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 265 { 266 if (tcf_em_last_match(em)) 267 return 1; 268 269 if (result == 0 && em->flags & TCF_EM_REL_AND) 270 return 1; 271 272 if (result != 0 && em->flags & TCF_EM_REL_OR) 273 return 1; 274 275 return 0; 276 } 277 278 /** 279 * struct tcf_ematch_tree - ematch tree handle 280 * 281 * @hdr: ematch tree header supplied by userspace 282 * @matches: array of ematches 283 */ 284 struct tcf_ematch_tree { 285 struct tcf_ematch_tree_hdr hdr; 286 struct tcf_ematch * matches; 287 288 }; 289 290 /** 291 * struct tcf_ematch_ops - ematch module operations 292 * 293 * @kind: identifier (kind) of this ematch module 294 * @datalen: length of expected configuration data (optional) 295 * @change: called during validation (optional) 296 * @match: called during ematch tree evaluation, must return 1/0 297 * @destroy: called during destroyage (optional) 298 * @dump: called during dumping process (optional) 299 * @owner: owner, must be set to THIS_MODULE 300 * @link: link to previous/next ematch module (internal use) 301 */ 302 struct tcf_ematch_ops { 303 int kind; 304 int datalen; 305 int (*change)(struct net *net, void *, 306 int, struct tcf_ematch *); 307 int (*match)(struct sk_buff *, struct tcf_ematch *, 308 struct tcf_pkt_info *); 309 void (*destroy)(struct tcf_ematch *); 310 int (*dump)(struct sk_buff *, struct tcf_ematch *); 311 struct module *owner; 312 struct list_head link; 313 }; 314 315 int tcf_em_register(struct tcf_ematch_ops *); 316 void tcf_em_unregister(struct tcf_ematch_ops *); 317 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 318 struct tcf_ematch_tree *); 319 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 320 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 321 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 322 struct tcf_pkt_info *); 323 324 /** 325 * tcf_em_tree_match - evaulate an ematch tree 326 * 327 * @skb: socket buffer of the packet in question 328 * @tree: ematch tree to be used for evaluation 329 * @info: packet information examined by classifier 330 * 331 * This function matches @skb against the ematch tree in @tree by going 332 * through all ematches respecting their logic relations returning 333 * as soon as the result is obvious. 334 * 335 * Returns 1 if the ematch tree as-one matches, no ematches are configured 336 * or ematch is not enabled in the kernel, otherwise 0 is returned. 337 */ 338 static inline int tcf_em_tree_match(struct sk_buff *skb, 339 struct tcf_ematch_tree *tree, 340 struct tcf_pkt_info *info) 341 { 342 if (tree->hdr.nmatches) 343 return __tcf_em_tree_match(skb, tree, info); 344 else 345 return 1; 346 } 347 348 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 349 350 #else /* CONFIG_NET_EMATCH */ 351 352 struct tcf_ematch_tree { 353 }; 354 355 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 356 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 357 #define tcf_em_tree_dump(skb, t, tlv) (0) 358 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 359 360 #endif /* CONFIG_NET_EMATCH */ 361 362 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 363 { 364 switch (layer) { 365 case TCF_LAYER_LINK: 366 return skb->data; 367 case TCF_LAYER_NETWORK: 368 return skb_network_header(skb); 369 case TCF_LAYER_TRANSPORT: 370 return skb_transport_header(skb); 371 } 372 373 return NULL; 374 } 375 376 static inline int tcf_valid_offset(const struct sk_buff *skb, 377 const unsigned char *ptr, const int len) 378 { 379 return likely((ptr + len) <= skb_tail_pointer(skb) && 380 ptr >= skb->head && 381 (ptr <= (ptr + len))); 382 } 383 384 #ifdef CONFIG_NET_CLS_IND 385 #include <net/net_namespace.h> 386 387 static inline int 388 tcf_change_indev(struct net *net, struct nlattr *indev_tlv) 389 { 390 char indev[IFNAMSIZ]; 391 struct net_device *dev; 392 393 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) 394 return -EINVAL; 395 dev = __dev_get_by_name(net, indev); 396 if (!dev) 397 return -ENODEV; 398 return dev->ifindex; 399 } 400 401 static inline bool 402 tcf_match_indev(struct sk_buff *skb, int ifindex) 403 { 404 if (!ifindex) 405 return true; 406 if (!skb->skb_iif) 407 return false; 408 return ifindex == skb->skb_iif; 409 } 410 #endif /* CONFIG_NET_CLS_IND */ 411 412 struct tc_cls_common_offload { 413 u32 chain_index; 414 __be16 protocol; 415 u32 prio; 416 u32 classid; 417 }; 418 419 static inline void 420 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, 421 const struct tcf_proto *tp) 422 { 423 cls_common->chain_index = tp->chain->index; 424 cls_common->protocol = tp->protocol; 425 cls_common->prio = tp->prio; 426 cls_common->classid = tp->classid; 427 } 428 429 struct tc_cls_u32_knode { 430 struct tcf_exts *exts; 431 struct tc_u32_sel *sel; 432 u32 handle; 433 u32 val; 434 u32 mask; 435 u32 link_handle; 436 u8 fshift; 437 }; 438 439 struct tc_cls_u32_hnode { 440 u32 handle; 441 u32 prio; 442 unsigned int divisor; 443 }; 444 445 enum tc_clsu32_command { 446 TC_CLSU32_NEW_KNODE, 447 TC_CLSU32_REPLACE_KNODE, 448 TC_CLSU32_DELETE_KNODE, 449 TC_CLSU32_NEW_HNODE, 450 TC_CLSU32_REPLACE_HNODE, 451 TC_CLSU32_DELETE_HNODE, 452 }; 453 454 struct tc_cls_u32_offload { 455 struct tc_cls_common_offload common; 456 /* knode values */ 457 enum tc_clsu32_command command; 458 union { 459 struct tc_cls_u32_knode knode; 460 struct tc_cls_u32_hnode hnode; 461 }; 462 }; 463 464 static inline bool tc_can_offload(const struct net_device *dev) 465 { 466 if (!(dev->features & NETIF_F_HW_TC)) 467 return false; 468 if (!dev->netdev_ops->ndo_setup_tc) 469 return false; 470 return true; 471 } 472 473 static inline bool tc_skip_hw(u32 flags) 474 { 475 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 476 } 477 478 static inline bool tc_should_offload(const struct net_device *dev, u32 flags) 479 { 480 if (tc_skip_hw(flags)) 481 return false; 482 return tc_can_offload(dev); 483 } 484 485 static inline bool tc_skip_sw(u32 flags) 486 { 487 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 488 } 489 490 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 491 static inline bool tc_flags_valid(u32 flags) 492 { 493 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)) 494 return false; 495 496 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 497 return false; 498 499 return true; 500 } 501 502 static inline bool tc_in_hw(u32 flags) 503 { 504 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 505 } 506 507 enum tc_fl_command { 508 TC_CLSFLOWER_REPLACE, 509 TC_CLSFLOWER_DESTROY, 510 TC_CLSFLOWER_STATS, 511 }; 512 513 struct tc_cls_flower_offload { 514 struct tc_cls_common_offload common; 515 enum tc_fl_command command; 516 unsigned long cookie; 517 struct flow_dissector *dissector; 518 struct fl_flow_key *mask; 519 struct fl_flow_key *key; 520 struct tcf_exts *exts; 521 bool egress_dev; 522 }; 523 524 enum tc_matchall_command { 525 TC_CLSMATCHALL_REPLACE, 526 TC_CLSMATCHALL_DESTROY, 527 }; 528 529 struct tc_cls_matchall_offload { 530 struct tc_cls_common_offload common; 531 enum tc_matchall_command command; 532 struct tcf_exts *exts; 533 unsigned long cookie; 534 }; 535 536 enum tc_clsbpf_command { 537 TC_CLSBPF_ADD, 538 TC_CLSBPF_REPLACE, 539 TC_CLSBPF_DESTROY, 540 TC_CLSBPF_STATS, 541 }; 542 543 struct tc_cls_bpf_offload { 544 struct tc_cls_common_offload common; 545 enum tc_clsbpf_command command; 546 struct tcf_exts *exts; 547 struct bpf_prog *prog; 548 const char *name; 549 bool exts_integrated; 550 u32 gen_flags; 551 }; 552 553 554 /* This structure holds cookie structure that is passed from user 555 * to the kernel for actions and classifiers 556 */ 557 struct tc_cookie { 558 u8 *data; 559 u32 len; 560 }; 561 #endif 562