1 /* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/skbuff.h> 15 #include <linux/rcupdate.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/sock.h> 19 #include <net/cls_cgroup.h> 20 21 struct cls_cgroup_head { 22 u32 handle; 23 struct tcf_exts exts; 24 struct tcf_ematch_tree ematches; 25 struct tcf_proto *tp; 26 union { 27 struct work_struct work; 28 struct rcu_head rcu; 29 }; 30 }; 31 32 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 33 struct tcf_result *res) 34 { 35 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); 36 u32 classid = task_get_classid(skb); 37 38 if (!classid) 39 return -1; 40 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 41 return -1; 42 43 res->classid = classid; 44 res->class = 0; 45 46 return tcf_exts_exec(skb, &head->exts, res); 47 } 48 49 static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) 50 { 51 return NULL; 52 } 53 54 static int cls_cgroup_init(struct tcf_proto *tp) 55 { 56 return 0; 57 } 58 59 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61 }; 62 63 static void __cls_cgroup_destroy(struct cls_cgroup_head *head) 64 { 65 tcf_exts_destroy(&head->exts); 66 tcf_em_tree_destroy(&head->ematches); 67 tcf_exts_put_net(&head->exts); 68 kfree(head); 69 } 70 71 static void cls_cgroup_destroy_work(struct work_struct *work) 72 { 73 struct cls_cgroup_head *head = container_of(work, 74 struct cls_cgroup_head, 75 work); 76 rtnl_lock(); 77 __cls_cgroup_destroy(head); 78 rtnl_unlock(); 79 } 80 81 static void cls_cgroup_destroy_rcu(struct rcu_head *root) 82 { 83 struct cls_cgroup_head *head = container_of(root, 84 struct cls_cgroup_head, 85 rcu); 86 87 INIT_WORK(&head->work, cls_cgroup_destroy_work); 88 tcf_queue_work(&head->work); 89 } 90 91 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 92 struct tcf_proto *tp, unsigned long base, 93 u32 handle, struct nlattr **tca, 94 void **arg, bool ovr, 95 struct netlink_ext_ack *extack) 96 { 97 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 98 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 99 struct cls_cgroup_head *new; 100 int err; 101 102 if (!tca[TCA_OPTIONS]) 103 return -EINVAL; 104 105 if (!head && !handle) 106 return -EINVAL; 107 108 if (head && handle != head->handle) 109 return -ENOENT; 110 111 new = kzalloc(sizeof(*head), GFP_KERNEL); 112 if (!new) 113 return -ENOBUFS; 114 115 err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); 116 if (err < 0) 117 goto errout; 118 new->handle = handle; 119 new->tp = tp; 120 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 121 cgroup_policy, NULL); 122 if (err < 0) 123 goto errout; 124 125 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr, 126 extack); 127 if (err < 0) 128 goto errout; 129 130 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); 131 if (err < 0) 132 goto errout; 133 134 rcu_assign_pointer(tp->root, new); 135 if (head) { 136 tcf_exts_get_net(&head->exts); 137 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 138 } 139 return 0; 140 errout: 141 tcf_exts_destroy(&new->exts); 142 kfree(new); 143 return err; 144 } 145 146 static void cls_cgroup_destroy(struct tcf_proto *tp, 147 struct netlink_ext_ack *extack) 148 { 149 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 150 151 /* Head can still be NULL due to cls_cgroup_init(). */ 152 if (head) { 153 if (tcf_exts_get_net(&head->exts)) 154 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 155 else 156 __cls_cgroup_destroy(head); 157 } 158 } 159 160 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last, 161 struct netlink_ext_ack *extack) 162 { 163 return -EOPNOTSUPP; 164 } 165 166 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 167 { 168 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 169 170 if (arg->count < arg->skip) 171 goto skip; 172 173 if (arg->fn(tp, head, arg) < 0) { 174 arg->stop = 1; 175 return; 176 } 177 skip: 178 arg->count++; 179 } 180 181 static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh, 182 struct sk_buff *skb, struct tcmsg *t) 183 { 184 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 185 struct nlattr *nest; 186 187 t->tcm_handle = head->handle; 188 189 nest = nla_nest_start(skb, TCA_OPTIONS); 190 if (nest == NULL) 191 goto nla_put_failure; 192 193 if (tcf_exts_dump(skb, &head->exts) < 0 || 194 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 195 goto nla_put_failure; 196 197 nla_nest_end(skb, nest); 198 199 if (tcf_exts_dump_stats(skb, &head->exts) < 0) 200 goto nla_put_failure; 201 202 return skb->len; 203 204 nla_put_failure: 205 nla_nest_cancel(skb, nest); 206 return -1; 207 } 208 209 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 210 .kind = "cgroup", 211 .init = cls_cgroup_init, 212 .change = cls_cgroup_change, 213 .classify = cls_cgroup_classify, 214 .destroy = cls_cgroup_destroy, 215 .get = cls_cgroup_get, 216 .delete = cls_cgroup_delete, 217 .walk = cls_cgroup_walk, 218 .dump = cls_cgroup_dump, 219 .owner = THIS_MODULE, 220 }; 221 222 static int __init init_cgroup_cls(void) 223 { 224 return register_tcf_proto_ops(&cls_cgroup_ops); 225 } 226 227 static void __exit exit_cgroup_cls(void) 228 { 229 unregister_tcf_proto_ops(&cls_cgroup_ops); 230 } 231 232 module_init(init_cgroup_cls); 233 module_exit(exit_cgroup_cls); 234 MODULE_LICENSE("GPL"); 235