act_skbedit.c (2f1e85b1aee459b7d0fd981839042c6a38ffaf0c) | act_skbedit.c (38a6f0865796e26fc38fff4858f681d9ae76fa0f) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2008, Intel Corporation. 4 * 5 * Author: Alexander Duyck <alexander.h.duyck@intel.com> 6 */ 7 8#include <linux/module.h> --- 9 unchanged lines hidden (view full) --- 18#include <net/pkt_cls.h> 19 20#include <linux/tc_act/tc_skbedit.h> 21#include <net/tc_act/tc_skbedit.h> 22 23static unsigned int skbedit_net_id; 24static struct tc_action_ops act_skbedit_ops; 25 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2008, Intel Corporation. 4 * 5 * Author: Alexander Duyck <alexander.h.duyck@intel.com> 6 */ 7 8#include <linux/module.h> --- 9 unchanged lines hidden (view full) --- 18#include <net/pkt_cls.h> 19 20#include <linux/tc_act/tc_skbedit.h> 21#include <net/tc_act/tc_skbedit.h> 22 23static unsigned int skbedit_net_id; 24static struct tc_action_ops act_skbedit_ops; 25 |
26static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params, 27 struct sk_buff *skb) 28{ 29 u16 queue_mapping = params->queue_mapping; 30 31 if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { 32 u32 hash = skb_get_hash(skb); 33 34 queue_mapping += hash % params->mapping_mod; 35 } 36 37 return netdev_cap_txqueue(skb->dev, queue_mapping); 38} 39 |
|
26static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, 27 struct tcf_result *res) 28{ 29 struct tcf_skbedit *d = to_skbedit(a); 30 struct tcf_skbedit_params *params; 31 int action; 32 33 tcf_lastuse_update(&d->tcf_tm); --- 23 unchanged lines hidden (view full) --- 57 break; 58 } 59 } 60 if (params->flags & SKBEDIT_F_QUEUE_MAPPING && 61 skb->dev->real_num_tx_queues > params->queue_mapping) { 62#ifdef CONFIG_NET_EGRESS 63 netdev_xmit_skip_txqueue(true); 64#endif | 40static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, 41 struct tcf_result *res) 42{ 43 struct tcf_skbedit *d = to_skbedit(a); 44 struct tcf_skbedit_params *params; 45 int action; 46 47 tcf_lastuse_update(&d->tcf_tm); --- 23 unchanged lines hidden (view full) --- 71 break; 72 } 73 } 74 if (params->flags & SKBEDIT_F_QUEUE_MAPPING && 75 skb->dev->real_num_tx_queues > params->queue_mapping) { 76#ifdef CONFIG_NET_EGRESS 77 netdev_xmit_skip_txqueue(true); 78#endif |
65 skb_set_queue_mapping(skb, params->queue_mapping); | 79 skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb)); |
66 } 67 if (params->flags & SKBEDIT_F_MARK) { 68 skb->mark &= ~params->mask; 69 skb->mark |= params->mark & params->mask; 70 } 71 if (params->flags & SKBEDIT_F_PTYPE) 72 skb->pkt_type = params->ptype; 73 return action; --- 17 unchanged lines hidden (view full) --- 91static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { 92 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, 93 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, 94 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, 95 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 96 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, 97 [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, 98 [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, | 80 } 81 if (params->flags & SKBEDIT_F_MARK) { 82 skb->mark &= ~params->mask; 83 skb->mark |= params->mark & params->mask; 84 } 85 if (params->flags & SKBEDIT_F_PTYPE) 86 skb->pkt_type = params->ptype; 87 return action; --- 17 unchanged lines hidden (view full) --- 105static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { 106 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, 107 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, 108 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, 109 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 110 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, 111 [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, 112 [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, |
113 [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) }, |
|
99}; 100 101static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 102 struct nlattr *est, struct tc_action **a, 103 struct tcf_proto *tp, u32 act_flags, 104 struct netlink_ext_ack *extack) 105{ 106 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 107 bool bind = act_flags & TCA_ACT_FLAGS_BIND; 108 struct tcf_skbedit_params *params_new; 109 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 110 struct tcf_chain *goto_ch = NULL; 111 struct tc_skbedit *parm; 112 struct tcf_skbedit *d; 113 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; 114 u16 *queue_mapping = NULL, *ptype = NULL; | 114}; 115 116static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 117 struct nlattr *est, struct tc_action **a, 118 struct tcf_proto *tp, u32 act_flags, 119 struct netlink_ext_ack *extack) 120{ 121 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 122 bool bind = act_flags & TCA_ACT_FLAGS_BIND; 123 struct tcf_skbedit_params *params_new; 124 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 125 struct tcf_chain *goto_ch = NULL; 126 struct tc_skbedit *parm; 127 struct tcf_skbedit *d; 128 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; 129 u16 *queue_mapping = NULL, *ptype = NULL; |
130 u16 mapping_mod = 1; |
|
115 bool exists = false; 116 int ret = 0, err; 117 u32 index; 118 119 if (nla == NULL) 120 return -EINVAL; 121 122 err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla, --- 29 unchanged lines hidden (view full) --- 152 if (tb[TCA_SKBEDIT_MASK] != NULL) { 153 flags |= SKBEDIT_F_MASK; 154 mask = nla_data(tb[TCA_SKBEDIT_MASK]); 155 } 156 157 if (tb[TCA_SKBEDIT_FLAGS] != NULL) { 158 u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); 159 | 131 bool exists = false; 132 int ret = 0, err; 133 u32 index; 134 135 if (nla == NULL) 136 return -EINVAL; 137 138 err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla, --- 29 unchanged lines hidden (view full) --- 168 if (tb[TCA_SKBEDIT_MASK] != NULL) { 169 flags |= SKBEDIT_F_MASK; 170 mask = nla_data(tb[TCA_SKBEDIT_MASK]); 171 } 172 173 if (tb[TCA_SKBEDIT_FLAGS] != NULL) { 174 u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); 175 |
176 if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) { 177 u16 *queue_mapping_max; 178 179 if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] || 180 !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) { 181 NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping."); 182 return -EINVAL; 183 } 184 185 queue_mapping_max = 186 nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]); 187 if (*queue_mapping_max < *queue_mapping) { 188 NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min."); 189 return -EINVAL; 190 } 191 192 mapping_mod = *queue_mapping_max - *queue_mapping + 1; 193 flags |= SKBEDIT_F_TXQ_SKBHASH; 194 } |
|
160 if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) 161 flags |= SKBEDIT_F_INHERITDSFIELD; 162 } 163 164 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 165 index = parm->index; 166 err = tcf_idr_check_alloc(tn, &index, a, bind); 167 if (err < 0) --- 35 unchanged lines hidden (view full) --- 203 if (unlikely(!params_new)) { 204 err = -ENOMEM; 205 goto put_chain; 206 } 207 208 params_new->flags = flags; 209 if (flags & SKBEDIT_F_PRIORITY) 210 params_new->priority = *priority; | 195 if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) 196 flags |= SKBEDIT_F_INHERITDSFIELD; 197 } 198 199 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); 200 index = parm->index; 201 err = tcf_idr_check_alloc(tn, &index, a, bind); 202 if (err < 0) --- 35 unchanged lines hidden (view full) --- 238 if (unlikely(!params_new)) { 239 err = -ENOMEM; 240 goto put_chain; 241 } 242 243 params_new->flags = flags; 244 if (flags & SKBEDIT_F_PRIORITY) 245 params_new->priority = *priority; |
211 if (flags & SKBEDIT_F_QUEUE_MAPPING) | 246 if (flags & SKBEDIT_F_QUEUE_MAPPING) { |
212 params_new->queue_mapping = *queue_mapping; | 247 params_new->queue_mapping = *queue_mapping; |
248 params_new->mapping_mod = mapping_mod; 249 } |
|
213 if (flags & SKBEDIT_F_MARK) 214 params_new->mark = *mark; 215 if (flags & SKBEDIT_F_PTYPE) 216 params_new->ptype = *ptype; 217 /* default behaviour is to use all the bits */ 218 params_new->mask = 0xffffffff; 219 if (flags & SKBEDIT_F_MASK) 220 params_new->mask = *mask; --- 50 unchanged lines hidden (view full) --- 271 if ((params->flags & SKBEDIT_F_PTYPE) && 272 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype)) 273 goto nla_put_failure; 274 if ((params->flags & SKBEDIT_F_MASK) && 275 nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask)) 276 goto nla_put_failure; 277 if (params->flags & SKBEDIT_F_INHERITDSFIELD) 278 pure_flags |= SKBEDIT_F_INHERITDSFIELD; | 250 if (flags & SKBEDIT_F_MARK) 251 params_new->mark = *mark; 252 if (flags & SKBEDIT_F_PTYPE) 253 params_new->ptype = *ptype; 254 /* default behaviour is to use all the bits */ 255 params_new->mask = 0xffffffff; 256 if (flags & SKBEDIT_F_MASK) 257 params_new->mask = *mask; --- 50 unchanged lines hidden (view full) --- 308 if ((params->flags & SKBEDIT_F_PTYPE) && 309 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype)) 310 goto nla_put_failure; 311 if ((params->flags & SKBEDIT_F_MASK) && 312 nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask)) 313 goto nla_put_failure; 314 if (params->flags & SKBEDIT_F_INHERITDSFIELD) 315 pure_flags |= SKBEDIT_F_INHERITDSFIELD; |
316 if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { 317 if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX, 318 params->queue_mapping + params->mapping_mod - 1)) 319 goto nla_put_failure; 320 321 pure_flags |= SKBEDIT_F_TXQ_SKBHASH; 322 } |
|
279 if (pure_flags != 0 && 280 nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) 281 goto nla_put_failure; 282 283 tcf_tm_dump(&t, &d->tcf_tm); 284 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 285 goto nla_put_failure; 286 spin_unlock_bh(&d->tcf_lock); --- 33 unchanged lines hidden (view full) --- 320 return tcf_idr_search(tn, a, index); 321} 322 323static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) 324{ 325 return nla_total_size(sizeof(struct tc_skbedit)) 326 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ 327 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ | 323 if (pure_flags != 0 && 324 nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) 325 goto nla_put_failure; 326 327 tcf_tm_dump(&t, &d->tcf_tm); 328 if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) 329 goto nla_put_failure; 330 spin_unlock_bh(&d->tcf_lock); --- 33 unchanged lines hidden (view full) --- 364 return tcf_idr_search(tn, a, index); 365} 366 367static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) 368{ 369 return nla_total_size(sizeof(struct tc_skbedit)) 370 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ 371 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ |
372 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */ |
|
328 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ 329 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ 330 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ 331 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ 332} 333 334static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, 335 u32 *index_inc, bool bind, --- 92 unchanged lines hidden --- | 373 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ 374 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ 375 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ 376 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ 377} 378 379static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, 380 u32 *index_inc, bool bind, --- 92 unchanged lines hidden --- |