xref: /linux/drivers/net/xen-netback/hash.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * Copyright (c) 2016 Citrix Systems Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version 2
6  * as published by the Free Softare Foundation; or, when distributed
7  * separately from the Linux kernel or incorporated into other
8  * software packages, subject to the following license:
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this source file (the "Software"), to deal in the Software without
12  * restriction, including without limitation the rights to use, copy, modify,
13  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14  * and to permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26  * IN THE SOFTWARE.
27  */
28 
29 #define XEN_NETIF_DEFINE_TOEPLITZ
30 
31 #include "common.h"
32 #include <linux/vmalloc.h>
33 #include <linux/rculist.h>
34 
35 static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
36 			    unsigned int len, u32 val)
37 {
38 	struct xenvif_hash_cache_entry *new, *entry, *oldest;
39 	unsigned long flags;
40 	bool found;
41 
42 	new = kmalloc(sizeof(*entry), GFP_ATOMIC);
43 	if (!new)
44 		return;
45 
46 	memcpy(new->tag, tag, len);
47 	new->len = len;
48 	new->val = val;
49 
50 	spin_lock_irqsave(&vif->hash.cache.lock, flags);
51 
52 	found = false;
53 	oldest = NULL;
54 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
55 				lockdep_is_held(&vif->hash.cache.lock)) {
56 		/* Make sure we don't add duplicate entries */
57 		if (entry->len == len &&
58 		    memcmp(entry->tag, tag, len) == 0)
59 			found = true;
60 		if (!oldest || entry->seq < oldest->seq)
61 			oldest = entry;
62 	}
63 
64 	if (!found) {
65 		new->seq = atomic_inc_return(&vif->hash.cache.seq);
66 		list_add_rcu(&new->link, &vif->hash.cache.list);
67 
68 		if (++vif->hash.cache.count > xenvif_hash_cache_size) {
69 			list_del_rcu(&oldest->link);
70 			vif->hash.cache.count--;
71 			kfree_rcu(oldest, rcu);
72 		}
73 	}
74 
75 	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
76 
77 	if (found)
78 		kfree(new);
79 }
80 
81 static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
82 			   unsigned int len)
83 {
84 	u32 val;
85 
86 	val = xen_netif_toeplitz_hash(vif->hash.key,
87 				      sizeof(vif->hash.key),
88 				      data, len);
89 
90 	if (xenvif_hash_cache_size != 0)
91 		xenvif_add_hash(vif, data, len, val);
92 
93 	return val;
94 }
95 
96 static void xenvif_flush_hash(struct xenvif *vif)
97 {
98 	struct xenvif_hash_cache_entry *entry;
99 	unsigned long flags;
100 
101 	if (xenvif_hash_cache_size == 0)
102 		return;
103 
104 	spin_lock_irqsave(&vif->hash.cache.lock, flags);
105 
106 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
107 				lockdep_is_held(&vif->hash.cache.lock)) {
108 		list_del_rcu(&entry->link);
109 		vif->hash.cache.count--;
110 		kfree_rcu(entry, rcu);
111 	}
112 
113 	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
114 }
115 
116 static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
117 			    unsigned int len)
118 {
119 	struct xenvif_hash_cache_entry *entry;
120 	u32 val;
121 	bool found;
122 
123 	if (len >= XEN_NETBK_HASH_TAG_SIZE)
124 		return 0;
125 
126 	if (xenvif_hash_cache_size == 0)
127 		return xenvif_new_hash(vif, data, len);
128 
129 	rcu_read_lock();
130 
131 	found = false;
132 
133 	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
134 		if (entry->len == len &&
135 		    memcmp(entry->tag, data, len) == 0) {
136 			val = entry->val;
137 			entry->seq = atomic_inc_return(&vif->hash.cache.seq);
138 			found = true;
139 			break;
140 		}
141 	}
142 
143 	rcu_read_unlock();
144 
145 	if (!found)
146 		val = xenvif_new_hash(vif, data, len);
147 
148 	return val;
149 }
150 
151 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
152 {
153 	struct flow_keys flow;
154 	u32 hash = 0;
155 	enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
156 	u32 flags = vif->hash.flags;
157 	bool has_tcp_hdr;
158 
159 	/* Quick rejection test: If the network protocol doesn't
160 	 * correspond to any enabled hash type then there's no point
161 	 * in parsing the packet header.
162 	 */
163 	switch (skb->protocol) {
164 	case htons(ETH_P_IP):
165 		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
166 			     XEN_NETIF_CTRL_HASH_TYPE_IPV4))
167 			break;
168 
169 		goto done;
170 
171 	case htons(ETH_P_IPV6):
172 		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
173 			     XEN_NETIF_CTRL_HASH_TYPE_IPV6))
174 			break;
175 
176 		goto done;
177 
178 	default:
179 		goto done;
180 	}
181 
182 	memset(&flow, 0, sizeof(flow));
183 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
184 		goto done;
185 
186 	has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
187 		      !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
188 
189 	switch (skb->protocol) {
190 	case htons(ETH_P_IP):
191 		if (has_tcp_hdr &&
192 		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
193 			u8 data[12];
194 
195 			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
196 			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
197 			memcpy(&data[8], &flow.ports.src, 2);
198 			memcpy(&data[10], &flow.ports.dst, 2);
199 
200 			hash = xenvif_find_hash(vif, data, sizeof(data));
201 			type = PKT_HASH_TYPE_L4;
202 		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
203 			u8 data[8];
204 
205 			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
206 			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
207 
208 			hash = xenvif_find_hash(vif, data, sizeof(data));
209 			type = PKT_HASH_TYPE_L3;
210 		}
211 
212 		break;
213 
214 	case htons(ETH_P_IPV6):
215 		if (has_tcp_hdr &&
216 		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
217 			u8 data[36];
218 
219 			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
220 			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
221 			memcpy(&data[32], &flow.ports.src, 2);
222 			memcpy(&data[34], &flow.ports.dst, 2);
223 
224 			hash = xenvif_find_hash(vif, data, sizeof(data));
225 			type = PKT_HASH_TYPE_L4;
226 		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
227 			u8 data[32];
228 
229 			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
230 			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
231 
232 			hash = xenvif_find_hash(vif, data, sizeof(data));
233 			type = PKT_HASH_TYPE_L3;
234 		}
235 
236 		break;
237 	}
238 
239 done:
240 	if (type == PKT_HASH_TYPE_NONE)
241 		skb_clear_hash(skb);
242 	else
243 		__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
244 }
245 
246 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
247 {
248 	switch (alg) {
249 	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
250 	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
251 		break;
252 
253 	default:
254 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
255 	}
256 
257 	vif->hash.alg = alg;
258 
259 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
260 }
261 
262 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
263 {
264 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
265 		return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
266 
267 	*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
268 		 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
269 		 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
270 		 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
271 
272 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
273 }
274 
275 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
276 {
277 	if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
278 		      XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
279 		      XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
280 		      XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
281 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
282 
283 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
284 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
285 
286 	vif->hash.flags = flags;
287 
288 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
289 }
290 
291 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
292 {
293 	u8 *key = vif->hash.key;
294 	struct gnttab_copy copy_op = {
295 		.source.u.ref = gref,
296 		.source.domid = vif->domid,
297 		.dest.u.gmfn = virt_to_gfn(key),
298 		.dest.domid = DOMID_SELF,
299 		.dest.offset = xen_offset_in_page(key),
300 		.len = len,
301 		.flags = GNTCOPY_source_gref
302 	};
303 
304 	if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
305 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
306 
307 	if (copy_op.len != 0) {
308 		gnttab_batch_copy(&copy_op, 1);
309 
310 		if (copy_op.status != GNTST_okay)
311 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
312 	}
313 
314 	/* Clear any remaining key octets */
315 	if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
316 		memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
317 
318 	xenvif_flush_hash(vif);
319 
320 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
321 }
322 
323 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
324 {
325 	if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
326 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
327 
328 	vif->hash.size = size;
329 	memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
330 	       sizeof(u32) * size);
331 
332 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
333 }
334 
335 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
336 			    u32 off)
337 {
338 	u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
339 	unsigned int nr = 1;
340 	struct gnttab_copy copy_op[2] = {{
341 		.source.u.ref = gref,
342 		.source.domid = vif->domid,
343 		.dest.domid = DOMID_SELF,
344 		.len = len * sizeof(*mapping),
345 		.flags = GNTCOPY_source_gref
346 	}};
347 
348 	if ((off + len < off) || (off + len > vif->hash.size) ||
349 	    len > XEN_PAGE_SIZE / sizeof(*mapping))
350 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
351 
352 	copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
353 	copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
354 	if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
355 		copy_op[1] = copy_op[0];
356 		copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
357 		copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
358 		copy_op[1].dest.offset = 0;
359 		copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
360 		copy_op[0].len = copy_op[1].source.offset;
361 		nr = 2;
362 	}
363 
364 	memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
365 	       vif->hash.size * sizeof(*mapping));
366 
367 	if (copy_op[0].len != 0) {
368 		gnttab_batch_copy(copy_op, nr);
369 
370 		if (copy_op[0].status != GNTST_okay ||
371 		    copy_op[nr - 1].status != GNTST_okay)
372 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
373 	}
374 
375 	while (len-- != 0)
376 		if (mapping[off++] >= vif->num_queues)
377 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
378 
379 	vif->hash.mapping_sel = !vif->hash.mapping_sel;
380 
381 	return XEN_NETIF_CTRL_STATUS_SUCCESS;
382 }
383 
384 #ifdef CONFIG_DEBUG_FS
385 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
386 {
387 	unsigned int i;
388 
389 	switch (vif->hash.alg) {
390 	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
391 		seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
392 		break;
393 
394 	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
395 		seq_puts(m, "Hash Algorithm: NONE\n");
396 		fallthrough;
397 	default:
398 		return;
399 	}
400 
401 	if (vif->hash.flags) {
402 		seq_puts(m, "\nHash Flags:\n");
403 
404 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
405 			seq_puts(m, "- IPv4\n");
406 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
407 			seq_puts(m, "- IPv4 + TCP\n");
408 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
409 			seq_puts(m, "- IPv6\n");
410 		if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
411 			seq_puts(m, "- IPv6 + TCP\n");
412 	}
413 
414 	seq_puts(m, "\nHash Key:\n");
415 
416 	for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
417 		unsigned int j, n;
418 
419 		n = 8;
420 		if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
421 			n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
422 
423 		seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
424 
425 		for (j = 0; j < n; j++, i++)
426 			seq_printf(m, "%02x ", vif->hash.key[i]);
427 
428 		seq_puts(m, "\n");
429 	}
430 
431 	if (vif->hash.size != 0) {
432 		const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
433 
434 		seq_puts(m, "\nHash Mapping:\n");
435 
436 		for (i = 0; i < vif->hash.size; ) {
437 			unsigned int j, n;
438 
439 			n = 8;
440 			if (i + n >= vif->hash.size)
441 				n = vif->hash.size - i;
442 
443 			seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
444 
445 			for (j = 0; j < n; j++, i++)
446 				seq_printf(m, "%4u ", mapping[i]);
447 
448 			seq_puts(m, "\n");
449 		}
450 	}
451 }
452 #endif /* CONFIG_DEBUG_FS */
453 
454 void xenvif_init_hash(struct xenvif *vif)
455 {
456 	if (xenvif_hash_cache_size == 0)
457 		return;
458 
459 	BUG_ON(vif->hash.cache.count);
460 
461 	spin_lock_init(&vif->hash.cache.lock);
462 	INIT_LIST_HEAD(&vif->hash.cache.list);
463 }
464 
465 void xenvif_deinit_hash(struct xenvif *vif)
466 {
467 	xenvif_flush_hash(vif);
468 }
469