1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/if.h> 3 #include <linux/if_ether.h> 4 #include <linux/if_link.h> 5 #include <linux/netdevice.h> 6 #include <linux/in.h> 7 #include <linux/types.h> 8 #include <linux/skbuff.h> 9 #include <net/flow_dissector.h> 10 #include "enic_res.h" 11 #include "enic_clsf.h" 12 13 /* enic_addfltr_5t - Add ipv4 5tuple filter 14 * @enic: enic struct of vnic 15 * @keys: flow_keys of ipv4 5tuple 16 * @rq: rq number to steer to 17 * 18 * This function returns filter_id(hardware_id) of the filter 19 * added. In case of error it returns a negative number. 20 */ 21 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) 22 { 23 int res; 24 struct filter data; 25 26 switch (keys->basic.ip_proto) { 27 case IPPROTO_TCP: 28 data.u.ipv4.protocol = PROTO_TCP; 29 break; 30 case IPPROTO_UDP: 31 data.u.ipv4.protocol = PROTO_UDP; 32 break; 33 default: 34 return -EPROTONOSUPPORT; 35 }; 36 data.type = FILTER_IPV4_5TUPLE; 37 data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); 38 data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); 39 data.u.ipv4.src_port = ntohs(keys->ports.src); 40 data.u.ipv4.dst_port = ntohs(keys->ports.dst); 41 data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; 42 43 spin_lock_bh(&enic->devcmd_lock); 44 res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); 45 spin_unlock_bh(&enic->devcmd_lock); 46 res = (res == 0) ? rq : res; 47 48 return res; 49 } 50 51 /* enic_delfltr - Delete clsf filter 52 * @enic: enic struct of vnic 53 * @filter_id: filter_is(hardware_id) of filter to be deleted 54 * 55 * This function returns zero in case of success, negative number incase of 56 * error. 57 */ 58 int enic_delfltr(struct enic *enic, u16 filter_id) 59 { 60 int ret; 61 62 spin_lock_bh(&enic->devcmd_lock); 63 ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL); 64 spin_unlock_bh(&enic->devcmd_lock); 65 66 return ret; 67 } 68 69 /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members 70 * @enic: enic data 71 */ 72 void enic_rfs_flw_tbl_init(struct enic *enic) 73 { 74 int i; 75 76 spin_lock_init(&enic->rfs_h.lock); 77 for (i = 0; i <= ENIC_RFS_FLW_MASK; i++) 78 INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]); 79 enic->rfs_h.max = enic->config.num_arfs; 80 enic->rfs_h.free = enic->rfs_h.max; 81 enic->rfs_h.toclean = 0; 82 } 83 84 void enic_rfs_flw_tbl_free(struct enic *enic) 85 { 86 int i; 87 88 enic_rfs_timer_stop(enic); 89 spin_lock_bh(&enic->rfs_h.lock); 90 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { 91 struct hlist_head *hhead; 92 struct hlist_node *tmp; 93 struct enic_rfs_fltr_node *n; 94 95 hhead = &enic->rfs_h.ht_head[i]; 96 hlist_for_each_entry_safe(n, tmp, hhead, node) { 97 enic_delfltr(enic, n->fltr_id); 98 hlist_del(&n->node); 99 kfree(n); 100 enic->rfs_h.free++; 101 } 102 } 103 spin_unlock_bh(&enic->rfs_h.lock); 104 } 105 106 struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) 107 { 108 int i; 109 110 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { 111 struct hlist_head *hhead; 112 struct hlist_node *tmp; 113 struct enic_rfs_fltr_node *n; 114 115 hhead = &enic->rfs_h.ht_head[i]; 116 hlist_for_each_entry_safe(n, tmp, hhead, node) 117 if (n->fltr_id == fltr_id) 118 return n; 119 } 120 121 return NULL; 122 } 123 124 #ifdef CONFIG_RFS_ACCEL 125 void enic_flow_may_expire(struct timer_list *t) 126 { 127 struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire); 128 bool res; 129 int j; 130 131 spin_lock_bh(&enic->rfs_h.lock); 132 for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { 133 struct hlist_head *hhead; 134 struct hlist_node *tmp; 135 struct enic_rfs_fltr_node *n; 136 137 hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++]; 138 hlist_for_each_entry_safe(n, tmp, hhead, node) { 139 res = rps_may_expire_flow(enic->netdev, n->rq_id, 140 n->flow_id, n->fltr_id); 141 if (res) { 142 res = enic_delfltr(enic, n->fltr_id); 143 if (unlikely(res)) 144 continue; 145 hlist_del(&n->node); 146 kfree(n); 147 enic->rfs_h.free++; 148 } 149 } 150 } 151 spin_unlock_bh(&enic->rfs_h.lock); 152 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); 153 } 154 155 static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, 156 struct flow_keys *k) 157 { 158 struct enic_rfs_fltr_node *tpos; 159 160 hlist_for_each_entry(tpos, h, node) 161 if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src && 162 tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst && 163 tpos->keys.ports.ports == k->ports.ports && 164 tpos->keys.basic.ip_proto == k->basic.ip_proto && 165 tpos->keys.basic.n_proto == k->basic.n_proto) 166 return tpos; 167 return NULL; 168 } 169 170 int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 171 u16 rxq_index, u32 flow_id) 172 { 173 struct flow_keys keys; 174 struct enic_rfs_fltr_node *n; 175 struct enic *enic; 176 u16 tbl_idx; 177 int res, i; 178 179 enic = netdev_priv(dev); 180 res = skb_flow_dissect_flow_keys(skb, &keys, 0); 181 if (!res || keys.basic.n_proto != htons(ETH_P_IP) || 182 (keys.basic.ip_proto != IPPROTO_TCP && 183 keys.basic.ip_proto != IPPROTO_UDP)) 184 return -EPROTONOSUPPORT; 185 186 tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; 187 spin_lock_bh(&enic->rfs_h.lock); 188 n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); 189 190 if (n) { /* entry already present */ 191 if (rxq_index == n->rq_id) { 192 res = -EEXIST; 193 goto ret_unlock; 194 } 195 196 /* desired rq changed for the flow, we need to delete 197 * old fltr and add new one 198 * 199 * The moment we delete the fltr, the upcoming pkts 200 * are put it default rq based on rss. When we add 201 * new filter, upcoming pkts are put in desired queue. 202 * This could cause ooo pkts. 203 * 204 * Lets 1st try adding new fltr and then del old one. 205 */ 206 i = --enic->rfs_h.free; 207 /* clsf tbl is full, we have to del old fltr first*/ 208 if (unlikely(i < 0)) { 209 enic->rfs_h.free++; 210 res = enic_delfltr(enic, n->fltr_id); 211 if (unlikely(res < 0)) 212 goto ret_unlock; 213 res = enic_addfltr_5t(enic, &keys, rxq_index); 214 if (res < 0) { 215 hlist_del(&n->node); 216 enic->rfs_h.free++; 217 goto ret_unlock; 218 } 219 /* add new fltr 1st then del old fltr */ 220 } else { 221 int ret; 222 223 res = enic_addfltr_5t(enic, &keys, rxq_index); 224 if (res < 0) { 225 enic->rfs_h.free++; 226 goto ret_unlock; 227 } 228 ret = enic_delfltr(enic, n->fltr_id); 229 /* deleting old fltr failed. Add old fltr to list. 230 * enic_flow_may_expire() will try to delete it later. 231 */ 232 if (unlikely(ret < 0)) { 233 struct enic_rfs_fltr_node *d; 234 struct hlist_head *head; 235 236 head = &enic->rfs_h.ht_head[tbl_idx]; 237 d = kmalloc(sizeof(*d), GFP_ATOMIC); 238 if (d) { 239 d->fltr_id = n->fltr_id; 240 INIT_HLIST_NODE(&d->node); 241 hlist_add_head(&d->node, head); 242 } 243 } else { 244 enic->rfs_h.free++; 245 } 246 } 247 n->rq_id = rxq_index; 248 n->fltr_id = res; 249 n->flow_id = flow_id; 250 /* entry not present */ 251 } else { 252 i = --enic->rfs_h.free; 253 if (i <= 0) { 254 enic->rfs_h.free++; 255 res = -EBUSY; 256 goto ret_unlock; 257 } 258 259 n = kmalloc(sizeof(*n), GFP_ATOMIC); 260 if (!n) { 261 res = -ENOMEM; 262 enic->rfs_h.free++; 263 goto ret_unlock; 264 } 265 266 res = enic_addfltr_5t(enic, &keys, rxq_index); 267 if (res < 0) { 268 kfree(n); 269 enic->rfs_h.free++; 270 goto ret_unlock; 271 } 272 n->rq_id = rxq_index; 273 n->fltr_id = res; 274 n->flow_id = flow_id; 275 n->keys = keys; 276 INIT_HLIST_NODE(&n->node); 277 hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]); 278 } 279 280 ret_unlock: 281 spin_unlock_bh(&enic->rfs_h.lock); 282 return res; 283 } 284 285 #endif /* CONFIG_RFS_ACCEL */ 286