1 /* 2 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/skbuff.h> 14 #include <linux/rtnetlink.h> 15 #include <linux/if_vlan.h> 16 #include <net/netlink.h> 17 #include <net/pkt_sched.h> 18 #include <net/pkt_cls.h> 19 20 #include <linux/tc_act/tc_vlan.h> 21 #include <net/tc_act/tc_vlan.h> 22 23 static unsigned int vlan_net_id; 24 static struct tc_action_ops act_vlan_ops; 25 26 static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a, 27 struct tcf_result *res) 28 { 29 struct tcf_vlan *v = to_vlan(a); 30 struct tcf_vlan_params *p; 31 int action; 32 int err; 33 u16 tci; 34 35 tcf_lastuse_update(&v->tcf_tm); 36 bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb); 37 38 /* Ensure 'data' points at mac_header prior calling vlan manipulating 39 * functions. 40 */ 41 if (skb_at_tc_ingress(skb)) 42 skb_push_rcsum(skb, skb->mac_len); 43 44 action = READ_ONCE(v->tcf_action); 45 46 p = rcu_dereference_bh(v->vlan_p); 47 48 switch (p->tcfv_action) { 49 case TCA_VLAN_ACT_POP: 50 err = skb_vlan_pop(skb); 51 if (err) 52 goto drop; 53 break; 54 case TCA_VLAN_ACT_PUSH: 55 err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid | 56 (p->tcfv_push_prio << VLAN_PRIO_SHIFT)); 57 if (err) 58 goto drop; 59 break; 60 case TCA_VLAN_ACT_MODIFY: 61 /* No-op if no vlan tag (either hw-accel or in-payload) */ 62 if (!skb_vlan_tagged(skb)) 63 goto out; 64 /* extract existing tag (and guarantee no hw-accel tag) */ 65 if (skb_vlan_tag_present(skb)) { 66 tci = skb_vlan_tag_get(skb); 67 __vlan_hwaccel_clear_tag(skb); 68 } else { 69 /* in-payload vlan tag, pop it */ 70 err = __skb_vlan_pop(skb, &tci); 71 if (err) 72 goto drop; 73 } 74 /* replace the vid */ 75 tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid; 76 /* replace prio bits, if tcfv_push_prio specified */ 77 if (p->tcfv_push_prio) { 78 tci &= ~VLAN_PRIO_MASK; 79 tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT; 80 } 81 /* put updated tci as hwaccel tag */ 82 __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci); 83 break; 84 default: 85 BUG(); 86 } 87 88 out: 89 if (skb_at_tc_ingress(skb)) 90 skb_pull_rcsum(skb, skb->mac_len); 91 92 return action; 93 94 drop: 95 qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats)); 96 return TC_ACT_SHOT; 97 } 98 99 static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { 100 [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, 101 [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, 102 [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, 103 [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 }, 104 }; 105 106 static int tcf_vlan_init(struct net *net, struct nlattr *nla, 107 struct nlattr *est, struct tc_action **a, 108 int ovr, int bind, bool rtnl_held, 109 struct tcf_proto *tp, struct netlink_ext_ack *extack) 110 { 111 struct tc_action_net *tn = net_generic(net, vlan_net_id); 112 struct nlattr *tb[TCA_VLAN_MAX + 1]; 113 struct tcf_chain *goto_ch = NULL; 114 struct tcf_vlan_params *p; 115 struct tc_vlan *parm; 116 struct tcf_vlan *v; 117 int action; 118 u16 push_vid = 0; 119 __be16 push_proto = 0; 120 u8 push_prio = 0; 121 bool exists = false; 122 int ret = 0, err; 123 124 if (!nla) 125 return -EINVAL; 126 127 err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy, NULL); 128 if (err < 0) 129 return err; 130 131 if (!tb[TCA_VLAN_PARMS]) 132 return -EINVAL; 133 parm = nla_data(tb[TCA_VLAN_PARMS]); 134 err = tcf_idr_check_alloc(tn, &parm->index, a, bind); 135 if (err < 0) 136 return err; 137 exists = err; 138 if (exists && bind) 139 return 0; 140 141 switch (parm->v_action) { 142 case TCA_VLAN_ACT_POP: 143 break; 144 case TCA_VLAN_ACT_PUSH: 145 case TCA_VLAN_ACT_MODIFY: 146 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) { 147 if (exists) 148 tcf_idr_release(*a, bind); 149 else 150 tcf_idr_cleanup(tn, parm->index); 151 return -EINVAL; 152 } 153 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); 154 if (push_vid >= VLAN_VID_MASK) { 155 if (exists) 156 tcf_idr_release(*a, bind); 157 else 158 tcf_idr_cleanup(tn, parm->index); 159 return -ERANGE; 160 } 161 162 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { 163 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); 164 switch (push_proto) { 165 case htons(ETH_P_8021Q): 166 case htons(ETH_P_8021AD): 167 break; 168 default: 169 if (exists) 170 tcf_idr_release(*a, bind); 171 else 172 tcf_idr_cleanup(tn, parm->index); 173 return -EPROTONOSUPPORT; 174 } 175 } else { 176 push_proto = htons(ETH_P_8021Q); 177 } 178 179 if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY]) 180 push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]); 181 break; 182 default: 183 if (exists) 184 tcf_idr_release(*a, bind); 185 else 186 tcf_idr_cleanup(tn, parm->index); 187 return -EINVAL; 188 } 189 action = parm->v_action; 190 191 if (!exists) { 192 ret = tcf_idr_create(tn, parm->index, est, a, 193 &act_vlan_ops, bind, true); 194 if (ret) { 195 tcf_idr_cleanup(tn, parm->index); 196 return ret; 197 } 198 199 ret = ACT_P_CREATED; 200 } else if (!ovr) { 201 tcf_idr_release(*a, bind); 202 return -EEXIST; 203 } 204 205 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 206 if (err < 0) 207 goto release_idr; 208 209 v = to_vlan(*a); 210 211 p = kzalloc(sizeof(*p), GFP_KERNEL); 212 if (!p) { 213 err = -ENOMEM; 214 goto put_chain; 215 } 216 217 p->tcfv_action = action; 218 p->tcfv_push_vid = push_vid; 219 p->tcfv_push_prio = push_prio; 220 p->tcfv_push_proto = push_proto; 221 222 spin_lock_bh(&v->tcf_lock); 223 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 224 rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); 225 spin_unlock_bh(&v->tcf_lock); 226 227 if (goto_ch) 228 tcf_chain_put_by_act(goto_ch); 229 if (p) 230 kfree_rcu(p, rcu); 231 232 if (ret == ACT_P_CREATED) 233 tcf_idr_insert(tn, *a); 234 return ret; 235 put_chain: 236 if (goto_ch) 237 tcf_chain_put_by_act(goto_ch); 238 release_idr: 239 tcf_idr_release(*a, bind); 240 return err; 241 } 242 243 static void tcf_vlan_cleanup(struct tc_action *a) 244 { 245 struct tcf_vlan *v = to_vlan(a); 246 struct tcf_vlan_params *p; 247 248 p = rcu_dereference_protected(v->vlan_p, 1); 249 if (p) 250 kfree_rcu(p, rcu); 251 } 252 253 static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 254 int bind, int ref) 255 { 256 unsigned char *b = skb_tail_pointer(skb); 257 struct tcf_vlan *v = to_vlan(a); 258 struct tcf_vlan_params *p; 259 struct tc_vlan opt = { 260 .index = v->tcf_index, 261 .refcnt = refcount_read(&v->tcf_refcnt) - ref, 262 .bindcnt = atomic_read(&v->tcf_bindcnt) - bind, 263 }; 264 struct tcf_t t; 265 266 spin_lock_bh(&v->tcf_lock); 267 opt.action = v->tcf_action; 268 p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock)); 269 opt.v_action = p->tcfv_action; 270 if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) 271 goto nla_put_failure; 272 273 if ((p->tcfv_action == TCA_VLAN_ACT_PUSH || 274 p->tcfv_action == TCA_VLAN_ACT_MODIFY) && 275 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) || 276 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, 277 p->tcfv_push_proto) || 278 (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, 279 p->tcfv_push_prio)))) 280 goto nla_put_failure; 281 282 tcf_tm_dump(&t, &v->tcf_tm); 283 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD)) 284 goto nla_put_failure; 285 spin_unlock_bh(&v->tcf_lock); 286 287 return skb->len; 288 289 nla_put_failure: 290 spin_unlock_bh(&v->tcf_lock); 291 nlmsg_trim(skb, b); 292 return -1; 293 } 294 295 static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, 296 struct netlink_callback *cb, int type, 297 const struct tc_action_ops *ops, 298 struct netlink_ext_ack *extack) 299 { 300 struct tc_action_net *tn = net_generic(net, vlan_net_id); 301 302 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 303 } 304 305 static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) 306 { 307 struct tc_action_net *tn = net_generic(net, vlan_net_id); 308 309 return tcf_idr_search(tn, a, index); 310 } 311 312 static struct tc_action_ops act_vlan_ops = { 313 .kind = "vlan", 314 .id = TCA_ID_VLAN, 315 .owner = THIS_MODULE, 316 .act = tcf_vlan_act, 317 .dump = tcf_vlan_dump, 318 .init = tcf_vlan_init, 319 .cleanup = tcf_vlan_cleanup, 320 .walk = tcf_vlan_walker, 321 .lookup = tcf_vlan_search, 322 .size = sizeof(struct tcf_vlan), 323 }; 324 325 static __net_init int vlan_init_net(struct net *net) 326 { 327 struct tc_action_net *tn = net_generic(net, vlan_net_id); 328 329 return tc_action_net_init(tn, &act_vlan_ops); 330 } 331 332 static void __net_exit vlan_exit_net(struct list_head *net_list) 333 { 334 tc_action_net_exit(net_list, vlan_net_id); 335 } 336 337 static struct pernet_operations vlan_net_ops = { 338 .init = vlan_init_net, 339 .exit_batch = vlan_exit_net, 340 .id = &vlan_net_id, 341 .size = sizeof(struct tc_action_net), 342 }; 343 344 static int __init vlan_init_module(void) 345 { 346 return tcf_register_action(&act_vlan_ops, &vlan_net_ops); 347 } 348 349 static void __exit vlan_cleanup_module(void) 350 { 351 tcf_unregister_action(&act_vlan_ops, &vlan_net_ops); 352 } 353 354 module_init(vlan_init_module); 355 module_exit(vlan_cleanup_module); 356 357 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 358 MODULE_DESCRIPTION("vlan manipulation actions"); 359 MODULE_LICENSE("GPL v2"); 360