1 /* 2 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one 13 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel). 14 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension 15 * 16 * JHS: We should remove the CONFIG_NET_CLS_IND from here 17 * eventually when the meta match extension is made available 18 * 19 */ 20 21 #include <linux/module.h> 22 #include <linux/slab.h> 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/string.h> 26 #include <linux/errno.h> 27 #include <linux/skbuff.h> 28 #include <net/netlink.h> 29 #include <net/act_api.h> 30 #include <net/pkt_cls.h> 31 #include <net/sch_generic.h> 32 33 #define HTSIZE 256 34 35 struct fw_head { 36 u32 mask; 37 struct fw_filter __rcu *ht[HTSIZE]; 38 struct rcu_head rcu; 39 }; 40 41 struct fw_filter { 42 struct fw_filter __rcu *next; 43 u32 id; 44 struct tcf_result res; 45 #ifdef CONFIG_NET_CLS_IND 46 int ifindex; 47 #endif /* CONFIG_NET_CLS_IND */ 48 struct tcf_exts exts; 49 struct tcf_proto *tp; 50 union { 51 struct work_struct work; 52 struct rcu_head rcu; 53 }; 54 }; 55 56 static u32 fw_hash(u32 handle) 57 { 58 handle ^= (handle >> 16); 59 handle ^= (handle >> 8); 60 return handle % HTSIZE; 61 } 62 63 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, 64 struct tcf_result *res) 65 { 66 struct fw_head *head = rcu_dereference_bh(tp->root); 67 struct fw_filter *f; 68 int r; 69 u32 id = skb->mark; 70 71 if (head != NULL) { 72 id &= head->mask; 73 74 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; 75 f = rcu_dereference_bh(f->next)) { 76 if (f->id == id) { 77 *res = f->res; 78 #ifdef CONFIG_NET_CLS_IND 79 if (!tcf_match_indev(skb, f->ifindex)) 80 continue; 81 #endif /* CONFIG_NET_CLS_IND */ 82 r = tcf_exts_exec(skb, &f->exts, res); 83 if (r < 0) 84 continue; 85 86 return r; 87 } 88 } 89 } else { 90 struct Qdisc *q = tcf_block_q(tp->chain->block); 91 92 /* Old method: classify the packet using its skb mark. */ 93 if (id && (TC_H_MAJ(id) == 0 || 94 !(TC_H_MAJ(id ^ q->handle)))) { 95 res->classid = id; 96 res->class = 0; 97 return 0; 98 } 99 } 100 101 return -1; 102 } 103 104 static void *fw_get(struct tcf_proto *tp, u32 handle) 105 { 106 struct fw_head *head = rtnl_dereference(tp->root); 107 struct fw_filter *f; 108 109 if (head == NULL) 110 return NULL; 111 112 f = rtnl_dereference(head->ht[fw_hash(handle)]); 113 for (; f; f = rtnl_dereference(f->next)) { 114 if (f->id == handle) 115 return f; 116 } 117 return NULL; 118 } 119 120 static int fw_init(struct tcf_proto *tp) 121 { 122 /* We don't allocate fw_head here, because in the old method 123 * we don't need it at all. 124 */ 125 return 0; 126 } 127 128 static void fw_delete_filter_work(struct work_struct *work) 129 { 130 struct fw_filter *f = container_of(work, struct fw_filter, work); 131 132 rtnl_lock(); 133 tcf_exts_destroy(&f->exts); 134 kfree(f); 135 rtnl_unlock(); 136 } 137 138 static void fw_delete_filter(struct rcu_head *head) 139 { 140 struct fw_filter *f = container_of(head, struct fw_filter, rcu); 141 142 INIT_WORK(&f->work, fw_delete_filter_work); 143 tcf_queue_work(&f->work); 144 } 145 146 static void fw_destroy(struct tcf_proto *tp) 147 { 148 struct fw_head *head = rtnl_dereference(tp->root); 149 struct fw_filter *f; 150 int h; 151 152 if (head == NULL) 153 return; 154 155 for (h = 0; h < HTSIZE; h++) { 156 while ((f = rtnl_dereference(head->ht[h])) != NULL) { 157 RCU_INIT_POINTER(head->ht[h], 158 rtnl_dereference(f->next)); 159 tcf_unbind_filter(tp, &f->res); 160 call_rcu(&f->rcu, fw_delete_filter); 161 } 162 } 163 kfree_rcu(head, rcu); 164 } 165 166 static int fw_delete(struct tcf_proto *tp, void *arg, bool *last) 167 { 168 struct fw_head *head = rtnl_dereference(tp->root); 169 struct fw_filter *f = arg; 170 struct fw_filter __rcu **fp; 171 struct fw_filter *pfp; 172 int ret = -EINVAL; 173 int h; 174 175 if (head == NULL || f == NULL) 176 goto out; 177 178 fp = &head->ht[fw_hash(f->id)]; 179 180 for (pfp = rtnl_dereference(*fp); pfp; 181 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 182 if (pfp == f) { 183 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 184 tcf_unbind_filter(tp, &f->res); 185 call_rcu(&f->rcu, fw_delete_filter); 186 ret = 0; 187 break; 188 } 189 } 190 191 *last = true; 192 for (h = 0; h < HTSIZE; h++) { 193 if (rcu_access_pointer(head->ht[h])) { 194 *last = false; 195 break; 196 } 197 } 198 199 out: 200 return ret; 201 } 202 203 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { 204 [TCA_FW_CLASSID] = { .type = NLA_U32 }, 205 [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 206 [TCA_FW_MASK] = { .type = NLA_U32 }, 207 }; 208 209 static int fw_set_parms(struct net *net, struct tcf_proto *tp, 210 struct fw_filter *f, struct nlattr **tb, 211 struct nlattr **tca, unsigned long base, bool ovr) 212 { 213 struct fw_head *head = rtnl_dereference(tp->root); 214 u32 mask; 215 int err; 216 217 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr); 218 if (err < 0) 219 return err; 220 221 if (tb[TCA_FW_CLASSID]) { 222 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); 223 tcf_bind_filter(tp, &f->res, base); 224 } 225 226 #ifdef CONFIG_NET_CLS_IND 227 if (tb[TCA_FW_INDEV]) { 228 int ret; 229 ret = tcf_change_indev(net, tb[TCA_FW_INDEV]); 230 if (ret < 0) 231 return ret; 232 f->ifindex = ret; 233 } 234 #endif /* CONFIG_NET_CLS_IND */ 235 236 err = -EINVAL; 237 if (tb[TCA_FW_MASK]) { 238 mask = nla_get_u32(tb[TCA_FW_MASK]); 239 if (mask != head->mask) 240 return err; 241 } else if (head->mask != 0xFFFFFFFF) 242 return err; 243 244 return 0; 245 } 246 247 static int fw_change(struct net *net, struct sk_buff *in_skb, 248 struct tcf_proto *tp, unsigned long base, 249 u32 handle, struct nlattr **tca, void **arg, 250 bool ovr) 251 { 252 struct fw_head *head = rtnl_dereference(tp->root); 253 struct fw_filter *f = *arg; 254 struct nlattr *opt = tca[TCA_OPTIONS]; 255 struct nlattr *tb[TCA_FW_MAX + 1]; 256 int err; 257 258 if (!opt) 259 return handle ? -EINVAL : 0; /* Succeed if it is old method. */ 260 261 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy, NULL); 262 if (err < 0) 263 return err; 264 265 if (f) { 266 struct fw_filter *pfp, *fnew; 267 struct fw_filter __rcu **fp; 268 269 if (f->id != handle && handle) 270 return -EINVAL; 271 272 fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 273 if (!fnew) 274 return -ENOBUFS; 275 276 fnew->id = f->id; 277 fnew->res = f->res; 278 #ifdef CONFIG_NET_CLS_IND 279 fnew->ifindex = f->ifindex; 280 #endif /* CONFIG_NET_CLS_IND */ 281 fnew->tp = f->tp; 282 283 err = tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE); 284 if (err < 0) { 285 kfree(fnew); 286 return err; 287 } 288 289 err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr); 290 if (err < 0) { 291 tcf_exts_destroy(&fnew->exts); 292 kfree(fnew); 293 return err; 294 } 295 296 fp = &head->ht[fw_hash(fnew->id)]; 297 for (pfp = rtnl_dereference(*fp); pfp; 298 fp = &pfp->next, pfp = rtnl_dereference(*fp)) 299 if (pfp == f) 300 break; 301 302 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); 303 rcu_assign_pointer(*fp, fnew); 304 tcf_unbind_filter(tp, &f->res); 305 call_rcu(&f->rcu, fw_delete_filter); 306 307 *arg = fnew; 308 return err; 309 } 310 311 if (!handle) 312 return -EINVAL; 313 314 if (!head) { 315 u32 mask = 0xFFFFFFFF; 316 if (tb[TCA_FW_MASK]) 317 mask = nla_get_u32(tb[TCA_FW_MASK]); 318 319 head = kzalloc(sizeof(*head), GFP_KERNEL); 320 if (!head) 321 return -ENOBUFS; 322 head->mask = mask; 323 324 rcu_assign_pointer(tp->root, head); 325 } 326 327 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 328 if (f == NULL) 329 return -ENOBUFS; 330 331 err = tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); 332 if (err < 0) 333 goto errout; 334 f->id = handle; 335 f->tp = tp; 336 337 err = fw_set_parms(net, tp, f, tb, tca, base, ovr); 338 if (err < 0) 339 goto errout; 340 341 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); 342 rcu_assign_pointer(head->ht[fw_hash(handle)], f); 343 344 *arg = f; 345 return 0; 346 347 errout: 348 tcf_exts_destroy(&f->exts); 349 kfree(f); 350 return err; 351 } 352 353 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) 354 { 355 struct fw_head *head = rtnl_dereference(tp->root); 356 int h; 357 358 if (head == NULL) 359 arg->stop = 1; 360 361 if (arg->stop) 362 return; 363 364 for (h = 0; h < HTSIZE; h++) { 365 struct fw_filter *f; 366 367 for (f = rtnl_dereference(head->ht[h]); f; 368 f = rtnl_dereference(f->next)) { 369 if (arg->count < arg->skip) { 370 arg->count++; 371 continue; 372 } 373 if (arg->fn(tp, f, arg) < 0) { 374 arg->stop = 1; 375 return; 376 } 377 arg->count++; 378 } 379 } 380 } 381 382 static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh, 383 struct sk_buff *skb, struct tcmsg *t) 384 { 385 struct fw_head *head = rtnl_dereference(tp->root); 386 struct fw_filter *f = fh; 387 struct nlattr *nest; 388 389 if (f == NULL) 390 return skb->len; 391 392 t->tcm_handle = f->id; 393 394 if (!f->res.classid && !tcf_exts_has_actions(&f->exts)) 395 return skb->len; 396 397 nest = nla_nest_start(skb, TCA_OPTIONS); 398 if (nest == NULL) 399 goto nla_put_failure; 400 401 if (f->res.classid && 402 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid)) 403 goto nla_put_failure; 404 #ifdef CONFIG_NET_CLS_IND 405 if (f->ifindex) { 406 struct net_device *dev; 407 dev = __dev_get_by_index(net, f->ifindex); 408 if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name)) 409 goto nla_put_failure; 410 } 411 #endif /* CONFIG_NET_CLS_IND */ 412 if (head->mask != 0xFFFFFFFF && 413 nla_put_u32(skb, TCA_FW_MASK, head->mask)) 414 goto nla_put_failure; 415 416 if (tcf_exts_dump(skb, &f->exts) < 0) 417 goto nla_put_failure; 418 419 nla_nest_end(skb, nest); 420 421 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 422 goto nla_put_failure; 423 424 return skb->len; 425 426 nla_put_failure: 427 nla_nest_cancel(skb, nest); 428 return -1; 429 } 430 431 static void fw_bind_class(void *fh, u32 classid, unsigned long cl) 432 { 433 struct fw_filter *f = fh; 434 435 if (f && f->res.classid == classid) 436 f->res.class = cl; 437 } 438 439 static struct tcf_proto_ops cls_fw_ops __read_mostly = { 440 .kind = "fw", 441 .classify = fw_classify, 442 .init = fw_init, 443 .destroy = fw_destroy, 444 .get = fw_get, 445 .change = fw_change, 446 .delete = fw_delete, 447 .walk = fw_walk, 448 .dump = fw_dump, 449 .bind_class = fw_bind_class, 450 .owner = THIS_MODULE, 451 }; 452 453 static int __init init_fw(void) 454 { 455 return register_tcf_proto_ops(&cls_fw_ops); 456 } 457 458 static void __exit exit_fw(void) 459 { 460 unregister_tcf_proto_ops(&cls_fw_ops); 461 } 462 463 module_init(init_fw) 464 module_exit(exit_fw) 465 MODULE_LICENSE("GPL"); 466