1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one 9 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel). 10 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension 11 */ 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <net/netlink.h> 21 #include <net/act_api.h> 22 #include <net/pkt_cls.h> 23 #include <net/sch_generic.h> 24 #include <net/tc_wrapper.h> 25 26 #define HTSIZE 256 27 28 struct fw_head { 29 u32 mask; 30 struct fw_filter __rcu *ht[HTSIZE]; 31 struct rcu_head rcu; 32 }; 33 34 struct fw_filter { 35 struct fw_filter __rcu *next; 36 u32 id; 37 struct tcf_result res; 38 int ifindex; 39 struct tcf_exts exts; 40 struct tcf_proto *tp; 41 struct rcu_work rwork; 42 }; 43 44 static u32 fw_hash(u32 handle) 45 { 46 handle ^= (handle >> 16); 47 handle ^= (handle >> 8); 48 return handle % HTSIZE; 49 } 50 51 TC_INDIRECT_SCOPE int fw_classify(struct sk_buff *skb, 52 const struct tcf_proto *tp, 53 struct tcf_result *res) 54 { 55 struct fw_head *head = rcu_dereference_bh(tp->root); 56 struct fw_filter *f; 57 int r; 58 u32 id = skb->mark; 59 60 if (head != NULL) { 61 id &= head->mask; 62 63 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; 64 f = rcu_dereference_bh(f->next)) { 65 if (f->id == id) { 66 *res = f->res; 67 if (!tcf_match_indev(skb, f->ifindex)) 68 continue; 69 r = tcf_exts_exec(skb, &f->exts, res); 70 if (r < 0) 71 continue; 72 73 return r; 74 } 75 } 76 } else { 77 struct Qdisc *q = tcf_block_q(tp->chain->block); 78 79 /* Old method: classify the packet using its skb mark. */ 80 if (id && (TC_H_MAJ(id) == 0 || 81 !(TC_H_MAJ(id ^ q->handle)))) { 82 res->classid = id; 83 res->class = 0; 84 return 0; 85 } 86 } 87 88 return -1; 89 } 90 91 static void *fw_get(struct tcf_proto *tp, u32 handle) 92 { 93 struct fw_head *head = rtnl_dereference(tp->root); 94 struct fw_filter *f; 95 96 if (head == NULL) 97 return NULL; 98 99 f = rtnl_dereference(head->ht[fw_hash(handle)]); 100 for (; f; f = rtnl_dereference(f->next)) { 101 if (f->id == handle) 102 return f; 103 } 104 return NULL; 105 } 106 107 static int fw_init(struct tcf_proto *tp) 108 { 109 /* We don't allocate fw_head here, because in the old method 110 * we don't need it at all. 111 */ 112 return 0; 113 } 114 115 static void __fw_delete_filter(struct fw_filter *f) 116 { 117 tcf_exts_destroy(&f->exts); 118 tcf_exts_put_net(&f->exts); 119 kfree(f); 120 } 121 122 static void fw_delete_filter_work(struct work_struct *work) 123 { 124 struct fw_filter *f = container_of(to_rcu_work(work), 125 struct fw_filter, 126 rwork); 127 rtnl_lock(); 128 __fw_delete_filter(f); 129 rtnl_unlock(); 130 } 131 132 static void fw_destroy(struct tcf_proto *tp, bool rtnl_held, 133 struct netlink_ext_ack *extack) 134 { 135 struct fw_head *head = rtnl_dereference(tp->root); 136 struct fw_filter *f; 137 int h; 138 139 if (head == NULL) 140 return; 141 142 for (h = 0; h < HTSIZE; h++) { 143 while ((f = rtnl_dereference(head->ht[h])) != NULL) { 144 RCU_INIT_POINTER(head->ht[h], 145 rtnl_dereference(f->next)); 146 tcf_unbind_filter(tp, &f->res); 147 if (tcf_exts_get_net(&f->exts)) 148 tcf_queue_work(&f->rwork, fw_delete_filter_work); 149 else 150 __fw_delete_filter(f); 151 } 152 } 153 kfree_rcu(head, rcu); 154 } 155 156 static int fw_delete(struct tcf_proto *tp, void *arg, bool *last, 157 bool rtnl_held, struct netlink_ext_ack *extack) 158 { 159 struct fw_head *head = rtnl_dereference(tp->root); 160 struct fw_filter *f = arg; 161 struct fw_filter __rcu **fp; 162 struct fw_filter *pfp; 163 int ret = -EINVAL; 164 int h; 165 166 if (head == NULL || f == NULL) 167 goto out; 168 169 fp = &head->ht[fw_hash(f->id)]; 170 171 for (pfp = rtnl_dereference(*fp); pfp; 172 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 173 if (pfp == f) { 174 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 175 tcf_unbind_filter(tp, &f->res); 176 tcf_exts_get_net(&f->exts); 177 tcf_queue_work(&f->rwork, fw_delete_filter_work); 178 ret = 0; 179 break; 180 } 181 } 182 183 *last = true; 184 for (h = 0; h < HTSIZE; h++) { 185 if (rcu_access_pointer(head->ht[h])) { 186 *last = false; 187 break; 188 } 189 } 190 191 out: 192 return ret; 193 } 194 195 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { 196 [TCA_FW_CLASSID] = { .type = NLA_U32 }, 197 [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 198 [TCA_FW_MASK] = { .type = NLA_U32 }, 199 }; 200 201 static int fw_set_parms(struct net *net, struct tcf_proto *tp, 202 struct fw_filter *f, struct nlattr **tb, 203 struct nlattr **tca, unsigned long base, u32 flags, 204 struct netlink_ext_ack *extack) 205 { 206 struct fw_head *head = rtnl_dereference(tp->root); 207 u32 mask; 208 int err; 209 210 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, flags, 211 extack); 212 if (err < 0) 213 return err; 214 215 if (tb[TCA_FW_INDEV]) { 216 int ret; 217 ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack); 218 if (ret < 0) 219 return ret; 220 f->ifindex = ret; 221 } 222 223 err = -EINVAL; 224 if (tb[TCA_FW_MASK]) { 225 mask = nla_get_u32(tb[TCA_FW_MASK]); 226 if (mask != head->mask) 227 return err; 228 } else if (head->mask != 0xFFFFFFFF) 229 return err; 230 231 if (tb[TCA_FW_CLASSID]) { 232 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); 233 tcf_bind_filter(tp, &f->res, base); 234 } 235 236 return 0; 237 } 238 239 static int fw_change(struct net *net, struct sk_buff *in_skb, 240 struct tcf_proto *tp, unsigned long base, 241 u32 handle, struct nlattr **tca, void **arg, 242 u32 flags, struct netlink_ext_ack *extack) 243 { 244 struct fw_head *head = rtnl_dereference(tp->root); 245 struct fw_filter *f = *arg; 246 struct nlattr *opt = tca[TCA_OPTIONS]; 247 struct nlattr *tb[TCA_FW_MAX + 1]; 248 int err; 249 250 if (!opt) { 251 if (handle) 252 return -EINVAL; 253 254 if (tcf_block_shared(tp->chain->block)) { 255 NL_SET_ERR_MSG(extack, 256 "Must specify mark when attaching fw filter to block"); 257 return -EINVAL; 258 } 259 260 return 0; /* Succeed if it is old method. */ 261 } 262 263 err = nla_parse_nested_deprecated(tb, TCA_FW_MAX, opt, fw_policy, 264 NULL); 265 if (err < 0) 266 return err; 267 268 if (f) { 269 struct fw_filter *pfp, *fnew; 270 struct fw_filter __rcu **fp; 271 272 if (f->id != handle && handle) 273 return -EINVAL; 274 275 fnew = kzalloc_obj(struct fw_filter); 276 if (!fnew) 277 return -ENOBUFS; 278 279 fnew->id = f->id; 280 fnew->ifindex = f->ifindex; 281 fnew->tp = f->tp; 282 283 err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT, 284 TCA_FW_POLICE); 285 if (err < 0) { 286 kfree(fnew); 287 return err; 288 } 289 290 err = fw_set_parms(net, tp, fnew, tb, tca, base, flags, extack); 291 if (err < 0) { 292 tcf_exts_destroy(&fnew->exts); 293 kfree(fnew); 294 return err; 295 } 296 297 fp = &head->ht[fw_hash(fnew->id)]; 298 for (pfp = rtnl_dereference(*fp); pfp; 299 fp = &pfp->next, pfp = rtnl_dereference(*fp)) 300 if (pfp == f) 301 break; 302 303 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); 304 rcu_assign_pointer(*fp, fnew); 305 tcf_unbind_filter(tp, &f->res); 306 tcf_exts_get_net(&f->exts); 307 tcf_queue_work(&f->rwork, fw_delete_filter_work); 308 309 *arg = fnew; 310 return err; 311 } 312 313 if (!handle) 314 return -EINVAL; 315 316 if (!head) { 317 u32 mask = 0xFFFFFFFF; 318 if (tb[TCA_FW_MASK]) 319 mask = nla_get_u32(tb[TCA_FW_MASK]); 320 321 head = kzalloc_obj(*head); 322 if (!head) 323 return -ENOBUFS; 324 head->mask = mask; 325 326 rcu_assign_pointer(tp->root, head); 327 } 328 329 f = kzalloc_obj(struct fw_filter); 330 if (f == NULL) 331 return -ENOBUFS; 332 333 err = tcf_exts_init(&f->exts, net, TCA_FW_ACT, TCA_FW_POLICE); 334 if (err < 0) 335 goto errout; 336 f->id = handle; 337 f->tp = tp; 338 339 err = fw_set_parms(net, tp, f, tb, tca, base, flags, extack); 340 if (err < 0) 341 goto errout; 342 343 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); 344 rcu_assign_pointer(head->ht[fw_hash(handle)], f); 345 346 *arg = f; 347 return 0; 348 349 errout: 350 tcf_exts_destroy(&f->exts); 351 kfree(f); 352 return err; 353 } 354 355 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg, 356 bool rtnl_held) 357 { 358 struct fw_head *head = rtnl_dereference(tp->root); 359 int h; 360 361 if (head == NULL) 362 arg->stop = 1; 363 364 if (arg->stop) 365 return; 366 367 for (h = 0; h < HTSIZE; h++) { 368 struct fw_filter *f; 369 370 for (f = rtnl_dereference(head->ht[h]); f; 371 f = rtnl_dereference(f->next)) { 372 if (!tc_cls_stats_dump(tp, arg, f)) 373 return; 374 } 375 } 376 } 377 378 static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh, 379 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 380 { 381 struct fw_head *head = rtnl_dereference(tp->root); 382 struct fw_filter *f = fh; 383 struct nlattr *nest; 384 385 if (f == NULL) 386 return skb->len; 387 388 t->tcm_handle = f->id; 389 390 if (!f->res.classid && !tcf_exts_has_actions(&f->exts)) 391 return skb->len; 392 393 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 394 if (nest == NULL) 395 goto nla_put_failure; 396 397 if (f->res.classid && 398 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid)) 399 goto nla_put_failure; 400 if (f->ifindex) { 401 struct net_device *dev; 402 dev = __dev_get_by_index(net, f->ifindex); 403 if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name)) 404 goto nla_put_failure; 405 } 406 if (head->mask != 0xFFFFFFFF && 407 nla_put_u32(skb, TCA_FW_MASK, head->mask)) 408 goto nla_put_failure; 409 410 if (tcf_exts_dump(skb, &f->exts) < 0) 411 goto nla_put_failure; 412 413 nla_nest_end(skb, nest); 414 415 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 416 goto nla_put_failure; 417 418 return skb->len; 419 420 nla_put_failure: 421 nla_nest_cancel(skb, nest); 422 return -1; 423 } 424 425 static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 426 unsigned long base) 427 { 428 struct fw_filter *f = fh; 429 430 tc_cls_bind_class(classid, cl, q, &f->res, base); 431 } 432 433 static struct tcf_proto_ops cls_fw_ops __read_mostly = { 434 .kind = "fw", 435 .classify = fw_classify, 436 .init = fw_init, 437 .destroy = fw_destroy, 438 .get = fw_get, 439 .change = fw_change, 440 .delete = fw_delete, 441 .walk = fw_walk, 442 .dump = fw_dump, 443 .bind_class = fw_bind_class, 444 .owner = THIS_MODULE, 445 }; 446 MODULE_ALIAS_NET_CLS("fw"); 447 448 static int __init init_fw(void) 449 { 450 return register_tcf_proto_ops(&cls_fw_ops); 451 } 452 453 static void __exit exit_fw(void) 454 { 455 unregister_tcf_proto_ops(&cls_fw_ops); 456 } 457 458 module_init(init_fw) 459 module_exit(exit_fw) 460 MODULE_DESCRIPTION("SKB mark based TC classifier"); 461 MODULE_LICENSE("GPL"); 462